From f787d67f14e38798055e97778137addd8e55b8c5 Mon Sep 17 00:00:00 2001 From: Joseph Sirianni Date: Tue, 5 Apr 2022 16:53:07 -0400 Subject: [PATCH 01/59] fix setup script link (#9095) --- receiver/mysqlreceiver/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/mysqlreceiver/README.md b/receiver/mysqlreceiver/README.md index 898493c9ea06..c351242cd5a4 100644 --- a/receiver/mysqlreceiver/README.md +++ b/receiver/mysqlreceiver/README.md @@ -10,7 +10,7 @@ Supported pipeline types: `metrics` This receiver supports MySQL version 8.0 -Collecting most metrics requires the ability to execute `SHOW GLOBAL STATUS`. The `buffer_pool_size` metric requires access to the `information_schema.innodb_metrics` table. Please refer to [setup.sh](./testdata/scripts/setup.sh) for an example of how to configure these permissions. +Collecting most metrics requires the ability to execute `SHOW GLOBAL STATUS`. The `buffer_pool_size` metric requires access to the `information_schema.innodb_metrics` table. Please refer to [setup.sh](./testdata/integration/scripts/setup.sh) for an example of how to configure these permissions. ## Configuration From 94386ed5122fcfb04ab599ce2e1971a621609b11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patryk=20Ma=C5=82ek?= Date: Tue, 5 Apr 2022 22:56:54 +0200 Subject: [PATCH 02/59] [processor/routing] Use MoveTo instead of CopyTo (#8991) --- processor/routingprocessor/processor_test.go | 50 +++++++++++++++++++- processor/routingprocessor/router.go | 12 ++--- 2 files changed, 55 insertions(+), 7 deletions(-) diff --git a/processor/routingprocessor/processor_test.go b/processor/routingprocessor/processor_test.go index 50c66d807b5a..5f031e37ced0 100644 --- a/processor/routingprocessor/processor_test.go +++ b/processor/routingprocessor/processor_test.go @@ -464,7 +464,6 @@ func TestMetrics_RoutingWorks_Context(t *testing.T) { rm.Resource().Attributes().InsertString("X-Tenant", "acme") t.Run("non default route is properly used", func(t *testing.T) { - assert.NoError(t, exp.ConsumeMetrics( metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{ "X-Tenant": "acme", @@ -735,6 +734,54 @@ func TestLogs_AreCorrectlySplitPerResourceAttributeRouting(t *testing.T) { ) } +func Benchmark_MetricsRouting_ResourceAttribute(b *testing.B) { + cfg := &Config{ + FromAttribute: "X-Tenant", + AttributeSource: resourceAttributeSource, + DefaultExporters: []string{"otlp"}, + Table: []RoutingTableItem{ + { + Value: "acme", + Exporters: []string{"otlp/2"}, + }, + }, + } + + runBenchmark := func(b *testing.B, cfg *Config) { + defaultExp := &mockMetricsExporter{} + mExp := &mockMetricsExporter{} + + host := &mockHost{ + Host: componenttest.NewNopHost(), + GetExportersFunc: func() map[config.DataType]map[config.ComponentID]component.Exporter { + return map[config.DataType]map[config.ComponentID]component.Exporter{ + config.MetricsDataType: { + config.NewComponentID("otlp"): defaultExp, + config.NewComponentID("otlp/2"): mExp, + }, + } + }, + } + + exp := newProcessor(zap.NewNop(), cfg) + exp.Start(context.Background(), host) + + for i := 0; i < b.N; i++ { + m := pdata.NewMetrics() + rm := m.ResourceMetrics().AppendEmpty() + + attrs := rm.Resource().Attributes() + attrs.InsertString("X-Tenant", "acme") + attrs.InsertString("X-Tenant1", "acme") + attrs.InsertString("X-Tenant2", "acme") + + exp.ConsumeMetrics(context.Background(), m) + } + } + + runBenchmark(b, cfg) +} + type mockHost struct { component.Host GetExportersFunc func() map[config.DataType]map[config.ComponentID]component.Exporter @@ -752,6 +799,7 @@ type mockComponent struct{} func (m *mockComponent) Start(context.Context, component.Host) error { return nil } + func (m *mockComponent) Shutdown(context.Context) error { return nil } diff --git a/processor/routingprocessor/router.go b/processor/routingprocessor/router.go index 2a7adf645255..4565b368d4fa 100644 --- a/processor/routingprocessor/router.go +++ b/processor/routingprocessor/router.go @@ -95,10 +95,10 @@ func (r *router) routeMetricsForResource(_ context.Context, tm pdata.Metrics) [] } if rEntry, ok := routingMap[attrValue]; ok { - resMetrics.CopyTo(rEntry.resMetrics.AppendEmpty()) + resMetrics.MoveTo(rEntry.resMetrics.AppendEmpty()) } else { new := pdata.NewResourceMetricsSlice() - resMetrics.CopyTo(new.AppendEmpty()) + resMetrics.MoveTo(new.AppendEmpty()) routingMap[attrValue] = routingEntry{ exporters: exp, @@ -181,10 +181,10 @@ func (r *router) routeTracesForResource(_ context.Context, tr pdata.Traces) []ro } if rEntry, ok := routingMap[attrValue]; ok { - resSpans.CopyTo(rEntry.resSpans.AppendEmpty()) + resSpans.MoveTo(rEntry.resSpans.AppendEmpty()) } else { new := pdata.NewResourceSpansSlice() - resSpans.CopyTo(new.AppendEmpty()) + resSpans.MoveTo(new.AppendEmpty()) routingMap[attrValue] = routingEntry{ exporters: exp, @@ -267,10 +267,10 @@ func (r *router) routeLogsForResource(_ context.Context, tl pdata.Logs) []routed } if rEntry, ok := routingMap[attrValue]; ok { - resLogs.CopyTo(rEntry.resLogs.AppendEmpty()) + resLogs.MoveTo(rEntry.resLogs.AppendEmpty()) } else { new := pdata.NewResourceLogsSlice() - resLogs.CopyTo(new.AppendEmpty()) + resLogs.MoveTo(new.AppendEmpty()) routingMap[attrValue] = routingEntry{ exporters: exp, From 0ed3674243200228c2018b56a8bac28f1e839c0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juraci=20Paix=C3=A3o=20Kr=C3=B6hling?= Date: Wed, 6 Apr 2022 14:28:45 +0200 Subject: [PATCH 03/59] [extension/jaegerremotesampling] Tie in the strategy storages (#8818) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Tie in the strategy storages This change adds support for the strategy stores, previously referenced as client config managers. This implements both the local file strategy store and the remote (gRPC) store. Fixes #6695 Signed-off-by: Juraci Paixão Kröhling * Add changelog Signed-off-by: Juraci Paixão Kröhling * Improved readme Signed-off-by: Juraci Paixão Kröhling --- CHANGELOG.md | 1 + extension/jaegerremotesampling/README.md | 15 +- extension/jaegerremotesampling/config_test.go | 4 +- extension/jaegerremotesampling/extension.go | 86 ++- .../jaegerremotesampling/extension_test.go | 96 ++-- extension/jaegerremotesampling/factory.go | 3 +- extension/jaegerremotesampling/go.mod | 48 +- extension/jaegerremotesampling/go.sum | 530 ++++++++++++++++++ .../internal/clientconfigmanager.go | 40 -- .../jaegerremotesampling/internal/http.go | 24 +- .../internal/http_test.go | 25 +- .../jaegerremotesampling/testdata/config.yaml | 3 +- .../testdata/strategy.json | 1 + 13 files changed, 726 insertions(+), 150 deletions(-) delete mode 100644 extension/jaegerremotesampling/internal/clientconfigmanager.go create mode 100644 extension/jaegerremotesampling/testdata/strategy.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e32eb9f9e26..d2981707a41a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ generated code (#5270) - Add `make crosslink` target to ensure replace statements are included in `go.mod` for all transitive dependencies within repository (#8822) - `filestorageextension`: Change bbolt DB settings for better performance (#9004) +- `jaegerremotesamplingextension`: Add local and remote sampling stores (#8818) ### 🛑 Breaking changes 🛑 diff --git a/extension/jaegerremotesampling/README.md b/extension/jaegerremotesampling/README.md index 752308e767b8..f38bb89ab0a0 100644 --- a/extension/jaegerremotesampling/README.md +++ b/extension/jaegerremotesampling/README.md @@ -10,21 +10,26 @@ Note that the port `14250` will clash with the Jaeger Receiver. When both are us Although this extension is derived from Jaeger, it can be used by any clients who can consume this standard, such as the [OpenTelemetry Java SDK](https://github.com/open-telemetry/opentelemetry-java/tree/v1.9.1/sdk-extensions/jaeger-remote-sampler). +At this moment, the `reload_interval` option is only effective for the `file` source. In the future, this property will be used to control a local cache for a `remote` source. + +The `file` source can be used to load files from the local file system or from remote HTTP/S sources. The `remote` source must be used with a gRPC server that provides a Jaeger remote sampling service. + ## Configuration ```yaml extensions: jaegerremotesampling: - grpc: - endpoint: :15251 source: remote: endpoint: jaeger-collector:14250 jaegerremotesampling/1: - http: - endpoint: :5878 source: - file: /etc/otel/sampling_strategies.json + reload_interval: 1s + file: /etc/otelcol/sampling_strategies.json + jaegerremotesampling/2: + source: + reload_interval: 1s + file: http://jaeger.example.com/sampling_strategies.json ``` A sampling strategy file could look like: diff --git a/extension/jaegerremotesampling/config_test.go b/extension/jaegerremotesampling/config_test.go index 5264fb571ef5..05de408ed69e 100644 --- a/extension/jaegerremotesampling/config_test.go +++ b/extension/jaegerremotesampling/config_test.go @@ -17,6 +17,7 @@ package jaegerremotesampling import ( "path/filepath" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -60,7 +61,8 @@ func TestLoadConfig(t *testing.T) { HTTPServerSettings: &confighttp.HTTPServerSettings{Endpoint: ":5778"}, GRPCServerSettings: &configgrpc.GRPCServerSettings{NetAddr: confignet.NetAddr{Endpoint: ":14250"}}, Source: Source{ - File: "/etc/otel/sampling_strategies.json", + ReloadInterval: time.Second, + File: "/etc/otelcol/sampling_strategies.json", }, }, ext1) diff --git a/extension/jaegerremotesampling/extension.go b/extension/jaegerremotesampling/extension.go index ea17a726f25f..82deeb3f3713 100644 --- a/extension/jaegerremotesampling/extension.go +++ b/extension/jaegerremotesampling/extension.go @@ -16,8 +16,14 @@ package jaegerremotesampling // import "github.com/open-telemetry/opentelemetry- import ( "context" + "fmt" + grpcStore "github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc" + "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore" + "github.com/jaegertracing/jaeger/plugin/sampling/strategystore/static" "go.opentelemetry.io/collector/component" + "go.uber.org/zap" + "google.golang.org/grpc" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling/internal" ) @@ -25,39 +31,87 @@ import ( var _ component.Extension = (*jrsExtension)(nil) type jrsExtension struct { - httpServer component.Component + cfg *Config + telemetry component.TelemetrySettings + + httpServer component.Component + samplingStore strategystore.StrategyStore + + closers []func() error } -func newExtension(cfg *Config, telemetry component.TelemetrySettings) (*jrsExtension, error) { - // TODO(jpkroehling): get a proper instance - cfgMgr := internal.NewClientConfigManager() - ext := &jrsExtension{} +func newExtension(cfg *Config, telemetry component.TelemetrySettings) *jrsExtension { + jrse := &jrsExtension{ + cfg: cfg, + telemetry: telemetry, + } + return jrse +} - if cfg.HTTPServerSettings != nil { - httpServer, err := internal.NewHTTP(telemetry, *cfg.HTTPServerSettings, cfgMgr) +func (jrse *jrsExtension) Start(ctx context.Context, host component.Host) error { + // the config validation will take care of ensuring we have one and only one of the following about the + // source of the sampling config: + // - remote (gRPC) + // - local file + // we can then use a simplified logic here to assign the appropriate store + if jrse.cfg.Source.File != "" { + opts := static.Options{ + StrategiesFile: jrse.cfg.Source.File, + ReloadInterval: jrse.cfg.Source.ReloadInterval, + } + ss, err := static.NewStrategyStore(opts, jrse.telemetry.Logger) if err != nil { - return nil, err + return fmt.Errorf("failed to create the local file strategy store: %v", err) } - ext.httpServer = httpServer + + // there's a Close function on the concrete type, which is not visible to us... + // how can we close it then? + jrse.samplingStore = ss } - return ext, nil -} + if jrse.cfg.Source.Remote != nil { + opts, err := jrse.cfg.Source.Remote.ToDialOptions(host, jrse.telemetry) + if err != nil { + return fmt.Errorf("error while setting up the remote sampling source: %v", err) + } + conn, err := grpc.Dial(jrse.cfg.Source.Remote.Endpoint, opts...) + if err != nil { + return fmt.Errorf("error while connecting to the remote sampling source: %v", err) + } + + jrse.samplingStore = grpcStore.NewConfigManager(conn) + jrse.closers = append(jrse.closers, func() error { + return conn.Close() + }) + } + + if jrse.cfg.HTTPServerSettings != nil { + httpServer, err := internal.NewHTTP(jrse.telemetry, *jrse.cfg.HTTPServerSettings, jrse.samplingStore) + if err != nil { + return fmt.Errorf("error while creating the HTTP server: %v", err) + } + jrse.httpServer = httpServer + } -func (jrse *jrsExtension) Start(ctx context.Context, host component.Host) error { // then we start our own server interfaces, starting with the HTTP one err := jrse.httpServer.Start(ctx, host) if err != nil { - return err + return fmt.Errorf("error while starting the HTTP server: %v", err) } return nil } func (jrse *jrsExtension) Shutdown(ctx context.Context) error { - err := jrse.httpServer.Shutdown(ctx) - if err != nil { - return err + // we probably don't want to break whenever an error occurs, we want to continue and close the other resources + if err := jrse.httpServer.Shutdown(ctx); err != nil { + jrse.telemetry.Logger.Error("error while shutting down the HTTP server", zap.Error(err)) + } + + for _, closer := range jrse.closers { + if err := closer(); err != nil { + jrse.telemetry.Logger.Error("error while shutting down the sampling store", zap.Error(err)) + } } return nil diff --git a/extension/jaegerremotesampling/extension_test.go b/extension/jaegerremotesampling/extension_test.go index ec1dadc10b21..49dc22540dae 100644 --- a/extension/jaegerremotesampling/extension_test.go +++ b/extension/jaegerremotesampling/extension_test.go @@ -16,89 +16,79 @@ package jaegerremotesampling import ( "context" - "errors" + "fmt" + "net" + "path/filepath" "testing" + "github.com/jaegertracing/jaeger/proto-gen/api_v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configgrpc" + "google.golang.org/grpc" ) func TestNewExtension(t *testing.T) { // test - e, err := newExtension(createDefaultConfig().(*Config), componenttest.NewNopTelemetrySettings()) - require.NoError(t, err) + cfg := createDefaultConfig().(*Config) + cfg.Source.File = filepath.Join("testdata", "strategy.json") + e := newExtension(cfg, componenttest.NewNopTelemetrySettings()) // verify assert.NotNil(t, e) } -func TestStartAndShutdown(t *testing.T) { +func TestStartAndShutdownLocalFile(t *testing.T) { // prepare - e, err := newExtension(createDefaultConfig().(*Config), componenttest.NewNopTelemetrySettings()) + cfg := createDefaultConfig().(*Config) + cfg.Source.File = filepath.Join("testdata", "strategy.json") + + e := newExtension(cfg, componenttest.NewNopTelemetrySettings()) require.NotNil(t, e) - require.NoError(t, err) require.NoError(t, e.Start(context.Background(), componenttest.NewNopHost())) // test and verify assert.NoError(t, e.Shutdown(context.Background())) } -func TestFailedToStartHTTPServer(t *testing.T) { - // prepare - errBooBoo := errors.New("the server made a boo boo") - - e, err := newExtension(createDefaultConfig().(*Config), componenttest.NewNopTelemetrySettings()) - require.NotNil(t, e) +func TestStartAndShutdownRemote(t *testing.T) { + // prepare the socket the mock server will listen at + lis, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) - e.httpServer = &mockComponent{ - StartFunc: func(_ context.Context, _ component.Host) error { - return errBooBoo - }, + // create the mock server + server := grpc.NewServer() + go func() { + err = server.Serve(lis) + require.NoError(t, err) + }() + + // register the service + api_v2.RegisterSamplingManagerServer(server, &samplingServer{}) + + // create the config, pointing to the mock server + cfg := createDefaultConfig().(*Config) + cfg.Source.Remote = &configgrpc.GRPCClientSettings{ + Endpoint: fmt.Sprintf("localhost:%d", lis.Addr().(*net.TCPAddr).Port), + WaitForReady: true, } - // test and verify - assert.Equal(t, errBooBoo, e.Start(context.Background(), componenttest.NewNopHost())) -} - -func TestFailedToShutdownHTTPServer(t *testing.T) { - // prepare - errBooBoo := errors.New("the server made a boo boo") - - e, err := newExtension(createDefaultConfig().(*Config), componenttest.NewNopTelemetrySettings()) + // create the extension + e := newExtension(cfg, componenttest.NewNopTelemetrySettings()) require.NotNil(t, e) - require.NoError(t, err) - e.httpServer = &mockComponent{ - ShutdownFunc: func(_ context.Context) error { - return errBooBoo - }, - } - require.NoError(t, e.Start(context.Background(), componenttest.NewNopHost())) - - // test and verify - assert.Equal(t, errBooBoo, e.Shutdown(context.Background())) -} - -type mockComponent struct { - StartFunc func(_ context.Context, _ component.Host) error - ShutdownFunc func(_ context.Context) error + // test + assert.NoError(t, e.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, e.Shutdown(context.Background())) } -func (s *mockComponent) Start(ctx context.Context, host component.Host) error { - if s.StartFunc == nil { - return nil - } - - return s.StartFunc(ctx, host) +type samplingServer struct { + api_v2.UnimplementedSamplingManagerServer } -func (s *mockComponent) Shutdown(ctx context.Context) error { - if s.ShutdownFunc == nil { - return nil - } - - return s.ShutdownFunc(ctx) +func (s samplingServer) GetSamplingStrategy(ctx context.Context, param *api_v2.SamplingStrategyParameters) (*api_v2.SamplingStrategyResponse, error) { + return &api_v2.SamplingStrategyResponse{ + StrategyType: api_v2.SamplingStrategyType_PROBABILISTIC, + }, nil } diff --git a/extension/jaegerremotesampling/factory.go b/extension/jaegerremotesampling/factory.go index 549b48283b44..dbc6d3a96c15 100644 --- a/extension/jaegerremotesampling/factory.go +++ b/extension/jaegerremotesampling/factory.go @@ -48,9 +48,10 @@ func createDefaultConfig() config.Extension { Endpoint: ":14250", }, }, + Source: Source{}, } } func createExtension(_ context.Context, set component.ExtensionCreateSettings, cfg config.Extension) (component.Extension, error) { - return newExtension(cfg.(*Config), set.TelemetrySettings) + return newExtension(cfg.(*Config), set.TelemetrySettings), nil } diff --git a/extension/jaegerremotesampling/go.mod b/extension/jaegerremotesampling/go.mod index 52800e628458..624ad56799cd 100644 --- a/extension/jaegerremotesampling/go.mod +++ b/extension/jaegerremotesampling/go.mod @@ -6,46 +6,86 @@ require ( github.com/jaegertracing/jaeger v1.32.0 github.com/stretchr/testify v1.7.1 go.opentelemetry.io/collector v0.48.0 + go.uber.org/zap v1.21.0 + google.golang.org/grpc v1.45.0 ) require ( cloud.google.com/go/compute v1.5.0 // indirect + contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect github.com/apache/thrift v0.16.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/go-kit/log v0.1.0 // indirect + github.com/go-logfmt/logfmt v0.5.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/knadh/koanf v1.4.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.1.16 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/statsd_exporter v0.21.0 // indirect github.com/rs/cors v1.8.2 // indirect + github.com/shirou/gopsutil/v3 v3.22.2 // indirect + github.com/spf13/afero v1.6.0 // indirect github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cobra v1.4.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.10.1 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/tklauser/go-sysconf v0.3.9 // indirect + github.com/tklauser/numcpus v0.3.0 // indirect + github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/collector/model v0.48.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect + go.opentelemetry.io/contrib/zpages v0.31.0 // indirect go.opentelemetry.io/otel v1.6.1 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.28.0 // indirect go.opentelemetry.io/otel/metric v0.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.6.1 // indirect + go.opentelemetry.io/otel/sdk/metric v0.28.0 // indirect go.opentelemetry.io/otel/trace v1.6.1 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.21.0 // indirect golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf // indirect - google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/extension/jaegerremotesampling/go.sum b/extension/jaegerremotesampling/go.sum index e13787a13bb6..b41b83bb3d3a 100644 --- a/extension/jaegerremotesampling/go.sum +++ b/extension/jaegerremotesampling/go.sum @@ -25,7 +25,9 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= @@ -39,6 +41,7 @@ cloud.google.com/go/compute v1.5.0 h1:b1zWmYuuHz7gO9kDcM/EpHGr06UgsYNRpNJzI2kFiL cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -49,36 +52,83 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= +contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= +github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.32.0/go.mod h1:+EmJJKZWVT/faR9RcOxJerP+LId4iWdQPBGLy1Y1Njs= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Shopify/toxiproxy/v2 v2.3.0/go.mod h1:KvQTtB6RjCJY4zqNJn7C7JDFgsG5uoHYDirfUfpIm0c= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= +github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -87,11 +137,34 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -100,33 +173,107 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= +github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= +github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gocql/gocql v0.0.0-20211222173705-d73e6b1002a7/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -154,12 +301,14 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -174,6 +323,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -193,54 +343,130 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jaegertracing/jaeger v1.32.0 h1:aKtCeFMWsJ/TuNx+5mMscOCcGhnkG7ZSYx3zsCpDVAQ= github.com/jaegertracing/jaeger v1.32.0/go.mod h1:2bCBxuy0Pdb+wGRL5YhjSyrp6Wpz1vvfL4hEYbLfrCc= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -248,9 +474,38 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -259,48 +514,176 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mostynb/go-grpc-compression v1.1.16 h1:D9tGUINmcII049pxOj9dl32Fzhp26TrDVQXECoKJqQg= github.com/mostynb/go-grpc-compression v1.1.16/go.mod h1:xxa6UoYynYS2h+5HB/Hglu81iYAp87ARaNmhhwi0s1s= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= +github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/cmdflag v0.0.2/go.mod h1:a3zKGZ3cdQUfxjd0RGMLZr8xI3nvpJOB+m6o/1X5BmU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v3 v3.3.4/go.mod h1:280XNCGS8jAcG++AHdd6SeWnzyJ1w9oow2vbORyey8Q= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil/v3 v3.22.2 h1:wCrArWFkHYIdDxx/FSfF5RB4dpJYW6t7rcp3+zL8uks= +github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= +github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -309,13 +692,45 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.0/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= +go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= +go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -326,37 +741,65 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector/model v0.46.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= +go.opentelemetry.io/contrib/zpages v0.31.0 h1:P75HGsEZp/TSdY0QSknzI9ubo7DU3i5MPGHM7h9IwL4= +go.opentelemetry.io/contrib/zpages v0.31.0/go.mod h1:CAB55C1K7YhinQfNNIdNLgJJ+dVRlb6zQpbGQjeIDf8= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= go.opentelemetry.io/otel v1.6.1 h1:6r1YrcTenBvYa1x491d0GGpTVBsNECmrc/K6b+zDeis= go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= +go.opentelemetry.io/otel/exporters/prometheus v0.28.0 h1:6JY0KEQC1WKOjEFu5QJqz+H0kZ51c/lPiudkxGzwaQg= +go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= go.opentelemetry.io/otel/metric v0.28.0 h1:o5YNh+jxACMODoAo1bI7OES0RUW4jAMae0Vgs2etWAQ= go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= +go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= +go.opentelemetry.io/otel/sdk/metric v0.28.0 h1:+1ndwHSiknwZtC8VmXM3xtMsd6kbFxtqti4qevn2J+o= +go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1 h1:f8c93l5tboBYZna1nWk0W9DYyMzJXDWdZcJZ0Kb400U= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -392,9 +835,13 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -402,9 +849,12 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -415,6 +865,7 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -422,12 +873,19 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -445,12 +903,14 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -460,23 +920,42 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -487,34 +966,52 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -531,19 +1028,26 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -576,6 +1080,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= @@ -618,7 +1123,9 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= @@ -630,6 +1137,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -652,6 +1160,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -687,7 +1196,11 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= @@ -696,12 +1209,14 @@ google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf h1:SVYXkUz2yZS9FWb2Gm8ivSlbNQzL2Z/NpPKE3RG2jWk= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -725,6 +1240,7 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= @@ -744,22 +1260,35 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -772,3 +1301,4 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/extension/jaegerremotesampling/internal/clientconfigmanager.go b/extension/jaegerremotesampling/internal/clientconfigmanager.go deleted file mode 100644 index 0d3deccd1958..000000000000 --- a/extension/jaegerremotesampling/internal/clientconfigmanager.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling/internal" - -import ( - "context" - - "github.com/jaegertracing/jaeger/cmd/agent/app/configmanager" - "github.com/jaegertracing/jaeger/thrift-gen/baggage" - "github.com/jaegertracing/jaeger/thrift-gen/sampling" -) - -// NewClientConfigManager returns a new Jaeger's configmanager.ClientConfigManager. It might be either -// a proxy to a remote location, or might serve data based on local files. -func NewClientConfigManager() configmanager.ClientConfigManager { - return &clientCfgMgr{} -} - -type clientCfgMgr struct { -} - -func (m *clientCfgMgr) GetSamplingStrategy(ctx context.Context, serviceName string) (*sampling.SamplingStrategyResponse, error) { - return sampling.NewSamplingStrategyResponse(), nil -} - -func (m *clientCfgMgr) GetBaggageRestrictions(ctx context.Context, serviceName string) ([]*baggage.BaggageRestriction, error) { - return nil, nil -} diff --git a/extension/jaegerremotesampling/internal/http.go b/extension/jaegerremotesampling/internal/http.go index 631797533142..99c5a7a96fa2 100644 --- a/extension/jaegerremotesampling/internal/http.go +++ b/extension/jaegerremotesampling/internal/http.go @@ -23,36 +23,36 @@ import ( "net/http" "sync" - "github.com/jaegertracing/jaeger/cmd/agent/app/configmanager" + "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" ) var ( - errMissingClientConfigManager = errors.New("the client config manager has not been provided") + errMissingStrategyStore = errors.New("the strategy store has not been provided") ) var _ component.Component = (*SamplingHTTPServer)(nil) type SamplingHTTPServer struct { - telemetry component.TelemetrySettings - settings confighttp.HTTPServerSettings - cfgMgr configmanager.ClientConfigManager + telemetry component.TelemetrySettings + settings confighttp.HTTPServerSettings + strategyStore strategystore.StrategyStore mux *http.ServeMux srv *http.Server shutdownWG *sync.WaitGroup } -func NewHTTP(telemetry component.TelemetrySettings, settings confighttp.HTTPServerSettings, cfgMgr configmanager.ClientConfigManager) (*SamplingHTTPServer, error) { - if cfgMgr == nil { - return nil, errMissingClientConfigManager +func NewHTTP(telemetry component.TelemetrySettings, settings confighttp.HTTPServerSettings, strategyStore strategystore.StrategyStore) (*SamplingHTTPServer, error) { + if strategyStore == nil { + return nil, errMissingStrategyStore } srv := &SamplingHTTPServer{ - telemetry: telemetry, - settings: settings, - cfgMgr: cfgMgr, + telemetry: telemetry, + settings: settings, + strategyStore: strategyStore, shutdownWG: &sync.WaitGroup{}, } @@ -107,7 +107,7 @@ func (h *SamplingHTTPServer) samplingStrategyHandler(rw http.ResponseWriter, r * return } - resp, err := h.cfgMgr.GetSamplingStrategy(r.Context(), svc) + resp, err := h.strategyStore.GetSamplingStrategy(r.Context(), svc) if err != nil { err = fmt.Errorf("failed to get sampling strategy for service %q: %v", svc, err) http.Error(rw, err.Error(), http.StatusInternalServerError) diff --git a/extension/jaegerremotesampling/internal/http_test.go b/extension/jaegerremotesampling/internal/http_test.go index 3d9ce7881f30..0a9107b50526 100644 --- a/extension/jaegerremotesampling/internal/http_test.go +++ b/extension/jaegerremotesampling/internal/http_test.go @@ -24,7 +24,6 @@ import ( "net/url" "testing" - "github.com/jaegertracing/jaeger/thrift-gen/baggage" "github.com/jaegertracing/jaeger/thrift-gen/sampling" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -37,7 +36,7 @@ func TestMissingClientConfigManager(t *testing.T) { s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, nil) // verify - assert.Equal(t, errMissingClientConfigManager, err) + assert.Equal(t, errMissingStrategyStore, err) assert.Nil(t, s) } @@ -46,7 +45,7 @@ func TestStartAndStop(t *testing.T) { srvSettings := confighttp.HTTPServerSettings{ Endpoint: ":0", } - s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), srvSettings, NewClientConfigManager()) + s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), srvSettings, &mockCfgMgr{}) require.NoError(t, err) require.NotNil(t, s) @@ -72,7 +71,7 @@ func TestEndpointsAreWired(t *testing.T) { for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { // prepare - s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, NewClientConfigManager()) + s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, &mockCfgMgr{}) require.NoError(t, err) require.NotNil(t, s) @@ -98,7 +97,7 @@ func TestEndpointsAreWired(t *testing.T) { func TestServiceNameIsRequired(t *testing.T) { // prepare - s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, NewClientConfigManager()) + s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, &mockCfgMgr{}) require.NoError(t, err) require.NotNil(t, s) @@ -116,11 +115,11 @@ func TestServiceNameIsRequired(t *testing.T) { } func TestErrorFromClientConfigManager(t *testing.T) { - s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, NewClientConfigManager()) + s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, &mockCfgMgr{}) require.NoError(t, err) require.NotNil(t, s) - s.cfgMgr = &mockCfgMgr{ + s.strategyStore = &mockCfgMgr{ getSamplingStrategyFunc: func(ctx context.Context, serviceName string) (*sampling.SamplingStrategyResponse, error) { return nil, errors.New("some error") }, @@ -142,20 +141,12 @@ func TestErrorFromClientConfigManager(t *testing.T) { } type mockCfgMgr struct { - getSamplingStrategyFunc func(ctx context.Context, serviceName string) (*sampling.SamplingStrategyResponse, error) - getBaggageRestrictionsFunc func(ctx context.Context, serviceName string) ([]*baggage.BaggageRestriction, error) + getSamplingStrategyFunc func(ctx context.Context, serviceName string) (*sampling.SamplingStrategyResponse, error) } func (m *mockCfgMgr) GetSamplingStrategy(ctx context.Context, serviceName string) (*sampling.SamplingStrategyResponse, error) { if m.getSamplingStrategyFunc != nil { return m.getSamplingStrategyFunc(ctx, serviceName) } - return nil, nil -} - -func (m *mockCfgMgr) GetBaggageRestrictions(ctx context.Context, serviceName string) ([]*baggage.BaggageRestriction, error) { - if m.getBaggageRestrictionsFunc != nil { - return m.getBaggageRestrictionsFunc(ctx, serviceName) - } - return nil, nil + return sampling.NewSamplingStrategyResponse(), nil } diff --git a/extension/jaegerremotesampling/testdata/config.yaml b/extension/jaegerremotesampling/testdata/config.yaml index f67bfe872210..9f1587338ffb 100644 --- a/extension/jaegerremotesampling/testdata/config.yaml +++ b/extension/jaegerremotesampling/testdata/config.yaml @@ -5,7 +5,8 @@ extensions: endpoint: jaeger-collector:14250 jaegerremotesampling/1: source: - file: /etc/otel/sampling_strategies.json + reload_interval: 1s + file: /etc/otelcol/sampling_strategies.json service: extensions: [jaegerremotesampling/1] diff --git a/extension/jaegerremotesampling/testdata/strategy.json b/extension/jaegerremotesampling/testdata/strategy.json new file mode 100644 index 000000000000..d086c64042db --- /dev/null +++ b/extension/jaegerremotesampling/testdata/strategy.json @@ -0,0 +1 @@ +{"strategyType":"PROBABILISTIC"} \ No newline at end of file From 698254a0d3051364fe927ae8e32ccbc18dabc437 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Wed, 6 Apr 2022 05:49:54 -0700 Subject: [PATCH 04/59] [exporter/splunkhec] Add traces batching (#8995) * [Splunk HEC exporter] Add traces batching * Update exporter/splunkhecexporter/client.go Co-authored-by: Dmitrii Anoshin Co-authored-by: Dmitrii Anoshin --- CHANGELOG.md | 1 + exporter/splunkhecexporter/client.go | 193 ++++++++- exporter/splunkhecexporter/client_test.go | 164 ++++++-- exporter/splunkhecexporter/config.go | 8 + exporter/splunkhecexporter/config_test.go | 13 + exporter/splunkhecexporter/factory.go | 1 + exporter/splunkhecexporter/go.sum | 398 ++++++++++++++++++ .../splunkhecexporter/tracedata_to_splunk.go | 78 ++-- .../tracedata_to_splunk_test.go | 67 +-- 9 files changed, 795 insertions(+), 128 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d2981707a41a..34b0431fdcab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## Unreleased ### 💡 Enhancements 💡 +- `splunkhecexporter`: Add support for batching traces (#8995) - `tanzuobservabilityexporter`: Use resourcetotelemetry helper (#8338) - `cmd/mdatagen`: Add resource attributes definition to metadata.yaml and move `pdata.Metrics` creation to the diff --git a/exporter/splunkhecexporter/client.go b/exporter/splunkhecexporter/client.go index 51f4e467e6dd..8d5a1886aff6 100644 --- a/exporter/splunkhecexporter/client.go +++ b/exporter/splunkhecexporter/client.go @@ -122,12 +122,43 @@ func (c *client) pushTraceData( c.wg.Add(1) defer c.wg.Done() - splunkEvents, _ := traceDataToSplunk(c.logger, td, c.config) - if len(splunkEvents) == 0 { - return nil + gzipWriter := c.zippers.Get().(*gzip.Writer) + defer c.zippers.Put(gzipWriter) + + gzipBuffer := bytes.NewBuffer(make([]byte, 0, c.config.MaxContentLengthLogs)) + gzipWriter.Reset(gzipBuffer) + + // Callback when each batch is to be sent. + send := func(ctx context.Context, buf *bytes.Buffer) (err error) { + localHeaders := map[string]string{} + if td.ResourceSpans().Len() != 0 { + accessToken, found := td.ResourceSpans().At(0).Resource().Attributes().Get(splunk.HecTokenLabel) + if found { + localHeaders["Authorization"] = splunk.HECTokenHeader + " " + accessToken.StringVal() + } + } + + shouldCompress := buf.Len() >= minCompressionLen && !c.config.DisableCompression + + if shouldCompress { + gzipBuffer.Reset() + gzipWriter.Reset(gzipBuffer) + + if _, err = io.Copy(gzipWriter, buf); err != nil { + return fmt.Errorf("failed copying buffer to gzip writer: %v", err) + } + + if err = gzipWriter.Close(); err != nil { + return fmt.Errorf("failed flushing compressed data to gzip writer: %v", err) + } + + return c.postEvents(ctx, gzipBuffer, localHeaders, shouldCompress) + } + + return c.postEvents(ctx, buf, localHeaders, shouldCompress) } - return c.sendSplunkEvents(ctx, splunkEvents) + return c.pushTracesDataInBatches(ctx, td, send) } func (c *client) sendSplunkEvents(ctx context.Context, splunkEvents []*splunk.Event) error { @@ -399,6 +430,71 @@ func (c *client) pushMetricsRecords(ctx context.Context, mds pdata.ResourceMetri return permanentErrors, nil } +func (c *client) pushTracesData(ctx context.Context, tds pdata.ResourceSpansSlice, state *bufferState, send func(context.Context, *bytes.Buffer) error) (permanentErrors []error, sendingError error) { + res := tds.At(state.resource) + spans := res.ScopeSpans().At(state.library).Spans() + bufCap := int(c.config.MaxContentLengthTraces) + + for k := 0; k < spans.Len(); k++ { + if state.bufFront == nil { + state.bufFront = &index{resource: state.resource, library: state.library, record: k} + } + + // Parsing span record to Splunk event. + event := mapSpanToSplunkEvent(res.Resource(), spans.At(k), c.config, c.logger) + // JSON encoding event and writing to buffer. + b, err := jsoniter.Marshal(event) + if err != nil { + permanentErrors = append(permanentErrors, consumererror.NewPermanent(fmt.Errorf("dropped span events: %v, error: %v", event, err))) + continue + } + state.buf.Write(b) + + // Continue adding events to buffer up to capacity. + // 0 capacity is interpreted as unknown/unbound consistent with ContentLength in http.Request. + if state.buf.Len() <= bufCap || bufCap == 0 { + // Tracking length of event bytes below capacity in buffer. + state.bufLen = state.buf.Len() + continue + } + + state.tmpBuf.Reset() + // Storing event bytes over capacity in buffer before truncating. + if bufCap > 0 { + if over := state.buf.Len() - state.bufLen; over <= bufCap { + state.tmpBuf.Write(state.buf.Bytes()[state.bufLen:state.buf.Len()]) + } else { + permanentErrors = append(permanentErrors, consumererror.NewPermanent( + fmt.Errorf("dropped span event: %s, error: event size %d bytes larger than configured max content length %d bytes", string(state.buf.Bytes()[state.bufLen:state.buf.Len()]), over, bufCap))) + } + } + + // Truncating buffer at tracked length below capacity and sending. + state.buf.Truncate(state.bufLen) + if state.buf.Len() > 0 { + if err := send(ctx, state.buf); err != nil { + return permanentErrors, err + } + } + state.buf.Reset() + + // Writing truncated bytes back to buffer. + state.tmpBuf.WriteTo(state.buf) + + if state.buf.Len() > 0 { + // This means that the current record had overflown the buffer and was not sent + state.bufFront = &index{resource: state.resource, library: state.library, record: k} + } else { + // This means that the entire buffer was sent, including the current record + state.bufFront = nil + } + + state.bufLen = state.buf.Len() + } + + return permanentErrors, nil +} + // pushMetricsDataInBatches sends batches of Splunk events in JSON format. // The batch content length is restricted to MaxContentLengthMetrics. // md metrics are parsed to Splunk events. @@ -434,6 +530,41 @@ func (c *client) pushMetricsDataInBatches(ctx context.Context, md pdata.Metrics, return multierr.Combine(permanentErrors...) } +// pushTracesDataInBatches sends batches of Splunk events in JSON format. +// The batch content length is restricted to MaxContentLengthMetrics. +// td traces are parsed to Splunk events. +func (c *client) pushTracesDataInBatches(ctx context.Context, td pdata.Traces, send func(context.Context, *bytes.Buffer) error) error { + var bufState = makeBlankBufferState(c.config.MaxContentLengthTraces) + var permanentErrors []error + + var rts = td.ResourceSpans() + for i := 0; i < rts.Len(); i++ { + ilts := rts.At(i).ScopeSpans() + for j := 0; j < ilts.Len(); j++ { + var err error + var newPermanentErrors []error + + bufState.resource, bufState.library = i, j + newPermanentErrors, err = c.pushTracesData(ctx, rts, &bufState, send) + + if err != nil { + return consumererror.NewTraces(err, *subTraces(&td, bufState.bufFront)) + } + + permanentErrors = append(permanentErrors, newPermanentErrors...) + } + } + + // There's some leftover unsent traces + if bufState.buf.Len() > 0 { + if err := send(ctx, bufState.buf); err != nil { + return consumererror.NewTraces(err, *subTraces(&td, bufState.bufFront)) + } + } + + return multierr.Combine(permanentErrors...) +} + func (c *client) postEvents(ctx context.Context, events io.Reader, headers map[string]string, compressed bool) error { req, err := http.NewRequestWithContext(ctx, "POST", c.url.String(), events) if err != nil { @@ -493,6 +624,18 @@ func subMetrics(md *pdata.Metrics, bufFront *index) *pdata.Metrics { return &subset } +// subTraces returns a subset of `td`starting from `bufFront`. It can be nil, in which case it is ignored +func subTraces(td *pdata.Traces, bufFront *index) *pdata.Traces { + if td == nil { + return td + } + + subset := pdata.NewTraces() + subTracesByType(td, bufFront, &subset) + + return &subset +} + func subLogsByType(src *pdata.Logs, from *index, dst *pdata.Logs, profiling bool) { if from == nil { return // All the data of this type was sent successfully @@ -582,6 +725,48 @@ func subMetricsByType(src *pdata.Metrics, from *index, dst *pdata.Metrics) { } } +func subTracesByType(src *pdata.Traces, from *index, dst *pdata.Traces) { + if from == nil { + return // All the data of this type was sent successfully + } + + resources := src.ResourceSpans() + resourcesSub := dst.ResourceSpans() + + for i := from.resource; i < resources.Len(); i++ { + newSub := resourcesSub.AppendEmpty() + resources.At(i).Resource().CopyTo(newSub.Resource()) + + libraries := resources.At(i).ScopeSpans() + librariesSub := newSub.ScopeSpans() + + j := 0 + if i == from.resource { + j = from.library + } + for jSub := 0; j < libraries.Len(); j++ { + lib := libraries.At(j) + + newLibSub := librariesSub.AppendEmpty() + lib.Scope().CopyTo(newLibSub.Scope()) + + traces := lib.Spans() + tracesSub := newLibSub.Spans() + jSub++ + + k := 0 + if i == from.resource && j == from.library { + k = from.record + } + + for kSub := 0; k < traces.Len(); k++ { //revive:disable-line:var-naming + traces.At(k).CopyTo(tracesSub.AppendEmpty()) + kSub++ + } + } + } +} + func encodeBodyEvents(zippers *sync.Pool, evs []*splunk.Event, disableCompression bool) (bodyReader io.Reader, compressed bool, err error) { buf := new(bytes.Buffer) for _, e := range evs { diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go index bd7bbee3e4e1..48fb508ad91b 100644 --- a/exporter/splunkhecexporter/client_test.go +++ b/exporter/splunkhecexporter/client_test.go @@ -228,7 +228,7 @@ func runMetricsExport(cfg *Config, metrics pdata.Metrics, t *testing.T) ([][]byt } } -func runTraceExport(disableCompression bool, numberOfTraces int, t *testing.T) (string, error) { +func runTraceExport(testConfig *Config, traces pdata.Traces, t *testing.T) ([][]byte, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) @@ -237,7 +237,8 @@ func runTraceExport(disableCompression bool, numberOfTraces int, t *testing.T) ( factory := NewFactory() cfg := factory.CreateDefaultConfig().(*Config) cfg.Endpoint = "http://" + listener.Addr().String() + "/services/collector" - cfg.DisableCompression = disableCompression + cfg.DisableCompression = testConfig.DisableCompression + cfg.MaxContentLengthTraces = testConfig.MaxContentLengthTraces cfg.Token = "1234-1234" receivedRequest := make(chan []byte) @@ -255,15 +256,19 @@ func runTraceExport(disableCompression bool, numberOfTraces int, t *testing.T) ( assert.NoError(t, exporter.Start(context.Background(), componenttest.NewNopHost())) defer exporter.Shutdown(context.Background()) - td := createTraceData(numberOfTraces) - - err = exporter.ConsumeTraces(context.Background(), td) + err = exporter.ConsumeTraces(context.Background(), traces) assert.NoError(t, err) - select { - case request := <-receivedRequest: - return string(request), nil - case <-time.After(1 * time.Second): - return "", errors.New("timeout") + var requests [][]byte + for { + select { + case request := <-receivedRequest: + requests = append(requests, request) + case <-time.After(1 * time.Second): + if len(requests) == 0 { + err = errors.New("timeout") + } + return requests, err + } } } @@ -308,13 +313,127 @@ func runLogExport(cfg *Config, ld pdata.Logs, t *testing.T) ([][]byte, error) { } } -func TestReceiveTraces(t *testing.T) { - actual, err := runTraceExport(true, 3, t) - assert.NoError(t, err) - expected := `{"time":1,"host":"unknown","event":{"trace_id":"01010101010101010101010101010101","span_id":"0000000000000001","parent_span_id":"0102030405060708","name":"root","end_time":2000000000,"kind":"SPAN_KIND_UNSPECIFIED","status":{"message":"ok","code":"STATUS_CODE_OK"},"start_time":1000000000},"fields":{"resource":"R1"}}` - expected += `{"time":2,"host":"unknown","event":{"trace_id":"01010101010101010101010101010101","span_id":"0000000000000001","parent_span_id":"","name":"root","end_time":3000000000,"kind":"SPAN_KIND_UNSPECIFIED","status":{"message":"","code":"STATUS_CODE_UNSET"},"start_time":2000000000},"fields":{"resource":"R1"}}` - expected += `{"time":3,"host":"unknown","event":{"trace_id":"01010101010101010101010101010101","span_id":"0000000000000001","parent_span_id":"0102030405060708","name":"root","end_time":4000000000,"kind":"SPAN_KIND_UNSPECIFIED","status":{"message":"ok","code":"STATUS_CODE_OK"},"start_time":3000000000},"fields":{"resource":"R1"}}` - assert.Equal(t, expected, actual) +func TestReceiveTracesBatches(t *testing.T) { + type wantType struct { + batches [][]string + numBatches int + compressed bool + } + + // The test cases depend on the constant minCompressionLen = 1500. + // If the constant changed, the test cases with want.compressed=true must be updated. + require.Equal(t, minCompressionLen, 1500) + + tests := []struct { + name string + conf *Config + traces pdata.Traces + want wantType + }{ + { + name: "all trace events in payload when max content length unknown (configured max content length 0)", + traces: createTraceData(4), + conf: func() *Config { + cfg := NewFactory().CreateDefaultConfig().(*Config) + cfg.MaxContentLengthTraces = 0 + return cfg + }(), + want: wantType{ + batches: [][]string{ + {`"start_time":1`, + `"start_time":2`, + `start_time":3`, + `start_time":4`}, + }, + numBatches: 1, + }, + }, + { + name: "1 trace event per payload (configured max content length is same as event size)", + traces: createTraceData(4), + conf: func() *Config { + cfg := NewFactory().CreateDefaultConfig().(*Config) + cfg.MaxContentLengthTraces = 320 + return cfg + }(), + want: wantType{ + batches: [][]string{ + {`"start_time":1`}, + {`"start_time":2`}, + {`"start_time":3`}, + {`"start_time":4`}, + }, + numBatches: 4, + }, + }, + { + name: "2 trace events per payload (configured max content length is twice event size)", + traces: createTraceData(4), + conf: func() *Config { + cfg := NewFactory().CreateDefaultConfig().(*Config) + cfg.MaxContentLengthTraces = 640 + return cfg + }(), + want: wantType{ + batches: [][]string{ + {`"start_time":1`, `"start_time":2`}, + {`"start_time":3`, `"start_time":4`}, + }, + numBatches: 2, + }, + }, + { + name: "1 compressed batch of 2037 bytes, make sure the event size is more than minCompressionLen=1500 to trigger compression", + traces: createTraceData(10), + conf: func() *Config { + return NewFactory().CreateDefaultConfig().(*Config) + }(), + want: wantType{ + batches: [][]string{ + {`"start_time":1`, `"start_time":2`, `"start_time":3`, `"start_time":4`, `"start_time":7`, `"start_time":8`, `"start_time":9`}, + }, + numBatches: 1, + compressed: true, + }, + }, + { + name: "2 compressed batches - 1832 bytes each, make sure the log size is more than minCompressionLen=1500 to trigger compression", + traces: createTraceData(22), + conf: func() *Config { + cfg := NewFactory().CreateDefaultConfig().(*Config) + cfg.MaxContentLengthTraces = 3520 + return cfg + }(), + want: wantType{ + batches: [][]string{ + {`"start_time":1`, `"start_time":2`, `"start_time":5`, `"start_time":6`, `"start_time":7`, `"start_time":8`, `"start_time":9`, `"start_time":10`, `"start_time":11`}, + {`"start_time":15`, `"start_time":16`, `"start_time":17`, `"start_time":18`, `"start_time":19`, `"start_time":20`, `"start_time":21`}, + }, + numBatches: 2, + compressed: true, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := runTraceExport(test.conf, test.traces, t) + + require.NoError(t, err) + require.Len(t, got, test.want.numBatches) + + for i := 0; i < test.want.numBatches; i++ { + require.NotZero(t, got[i]) + if test.want.compressed { + validateCompressedContains(t, test.want.batches[i], got[i]) + } else { + for _, expected := range test.want.batches[i] { + assert.Contains(t, string(got[i]), expected) + } + } + } + }) + } } func TestReceiveLogs(t *testing.T) { @@ -573,12 +692,6 @@ func TestReceiveBatchedMetrics(t *testing.T) { } } -func TestReceiveTracesWithCompression(t *testing.T) { - request, err := runTraceExport(false, 1000, t) - assert.NoError(t, err) - assert.NotEqual(t, "", request) -} - func TestReceiveMetricsWithCompression(t *testing.T) { cfg := NewFactory().CreateDefaultConfig().(*Config) request, err := runMetricsExport(cfg, createMetricsData(1000), t) @@ -628,11 +741,6 @@ func TestErrorReceived(t *testing.T) { assert.EqualError(t, err, "HTTP 500 \"Internal Server Error\"") } -func TestInvalidTraces(t *testing.T) { - _, err := runTraceExport(false, 0, t) - assert.Error(t, err) -} - func TestInvalidLogs(t *testing.T) { config := NewFactory().CreateDefaultConfig().(*Config) config.DisableCompression = false diff --git a/exporter/splunkhecexporter/config.go b/exporter/splunkhecexporter/config.go index 26868031e2c3..df8be937ebfb 100644 --- a/exporter/splunkhecexporter/config.go +++ b/exporter/splunkhecexporter/config.go @@ -32,6 +32,7 @@ const ( hecPath = "services/collector" maxContentLengthLogsLimit = 2 * 1024 * 1024 maxContentLengthMetricsLimit = 2 * 1024 * 1024 + maxContentLengthTracesLimit = 2 * 1024 * 1024 ) // OtelToHecFields defines the mapping of attributes to HEC fields @@ -79,6 +80,9 @@ type Config struct { // Maximum metric data size in bytes per HTTP post. Defaults to the backend limit of 2097152 bytes (2MiB). MaxContentLengthMetrics uint `mapstructure:"max_content_length_metrics"` + // Maximum trace data size in bytes per HTTP post. Defaults to the backend limit of 2097152 bytes (2MiB). + MaxContentLengthTraces uint `mapstructure:"max_content_length_traces"` + // TLSSetting struct exposes TLS client configuration. TLSSetting configtls.TLSClientSetting `mapstructure:"tls,omitempty"` @@ -126,6 +130,10 @@ func (cfg *Config) validateConfig() error { return fmt.Errorf(`requires "max_content_length_metrics" <= %d`, maxContentLengthMetricsLimit) } + if cfg.MaxContentLengthTraces > maxContentLengthTracesLimit { + return fmt.Errorf(`requires "max_content_length_traces <= #{maxContentLengthTracesLimit}`) + } + return nil } diff --git a/exporter/splunkhecexporter/config_test.go b/exporter/splunkhecexporter/config_test.go index e5c8ca997d49..e9aee9d8d6ee 100644 --- a/exporter/splunkhecexporter/config_test.go +++ b/exporter/splunkhecexporter/config_test.go @@ -64,6 +64,7 @@ func TestLoadConfig(t *testing.T) { MaxConnections: 100, MaxContentLengthLogs: 2 * 1024 * 1024, MaxContentLengthMetrics: 2 * 1024 * 1024, + MaxContentLengthTraces: 2 * 1024 * 1024, TimeoutSettings: exporterhelper.TimeoutSettings{ Timeout: 10 * time.Second, }, @@ -115,6 +116,7 @@ func TestConfig_getOptionsFromConfig(t *testing.T) { Index string MaxContentLengthLogs uint MaxContentLengthMetrics uint + MaxContentLengthTraces uint } tests := []struct { name string @@ -179,6 +181,16 @@ func TestConfig_getOptionsFromConfig(t *testing.T) { want: nil, wantErr: true, }, + { + name: "Test max content length traces greater than limit", + fields: fields{ + Token: "1234", + Endpoint: "https://example.com:8000", + MaxContentLengthTraces: maxContentLengthTracesLimit + 1, + }, + want: nil, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -190,6 +202,7 @@ func TestConfig_getOptionsFromConfig(t *testing.T) { Index: tt.fields.Index, MaxContentLengthLogs: tt.fields.MaxContentLengthLogs, MaxContentLengthMetrics: tt.fields.MaxContentLengthMetrics, + MaxContentLengthTraces: tt.fields.MaxContentLengthTraces, } got, err := cfg.getOptionsFromConfig() if (err != nil) != tt.wantErr { diff --git a/exporter/splunkhecexporter/factory.go b/exporter/splunkhecexporter/factory.go index c1c0f2380337..3682406d8f4c 100644 --- a/exporter/splunkhecexporter/factory.go +++ b/exporter/splunkhecexporter/factory.go @@ -70,6 +70,7 @@ func createDefaultConfig() config.Exporter { MaxConnections: defaultMaxIdleCons, MaxContentLengthLogs: maxContentLengthLogsLimit, MaxContentLengthMetrics: maxContentLengthMetricsLimit, + MaxContentLengthTraces: maxContentLengthTracesLimit, HecToOtelAttrs: splunk.HecToOtelAttrs{ Source: splunk.DefaultSourceLabel, SourceType: splunk.DefaultSourceTypeLabel, diff --git a/exporter/splunkhecexporter/go.sum b/exporter/splunkhecexporter/go.sum index 5cf61766c350..643d433ff627 100644 --- a/exporter/splunkhecexporter/go.sum +++ b/exporter/splunkhecexporter/go.sum @@ -1,8 +1,49 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.9.0/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,6 +59,9 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -26,6 +70,10 @@ github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnd github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -34,6 +82,9 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -47,27 +98,56 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -81,11 +161,16 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -93,8 +178,22 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -119,25 +218,52 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -155,43 +281,103 @@ github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mostynb/go-grpc-compression v1.1.16 h1:D9tGUINmcII049pxOj9dl32Fzhp26TrDVQXECoKJqQg= +github.com/mostynb/go-grpc-compression v1.1.16/go.mod h1:xxa6UoYynYS2h+5HB/Hglu81iYAp87ARaNmhhwi0s1s= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pierrec/cmdflag v0.0.2/go.mod h1:a3zKGZ3cdQUfxjd0RGMLZr8xI3nvpJOB+m6o/1X5BmU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v3 v3.3.4/go.mod h1:280XNCGS8jAcG++AHdd6SeWnzyJ1w9oow2vbORyey8Q= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= +github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= @@ -199,13 +385,20 @@ go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOU go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= +go.opentelemetry.io/contrib/zpages v0.31.0/go.mod h1:CAB55C1K7YhinQfNNIdNLgJJ+dVRlb6zQpbGQjeIDf8= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= go.opentelemetry.io/otel v1.6.1 h1:6r1YrcTenBvYa1x491d0GGpTVBsNECmrc/K6b+zDeis= go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= +go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= go.opentelemetry.io/otel/metric v0.28.0 h1:o5YNh+jxACMODoAo1bI7OES0RUW4jAMae0Vgs2etWAQ= go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= +go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= +go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1 h1:f8c93l5tboBYZna1nWk0W9DYyMzJXDWdZcJZ0Kb400U= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= @@ -220,71 +413,202 @@ go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -292,25 +616,80 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -321,24 +700,43 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/exporter/splunkhecexporter/tracedata_to_splunk.go b/exporter/splunkhecexporter/tracedata_to_splunk.go index 50412de9c5f7..1a569d18a29a 100644 --- a/exporter/splunkhecexporter/tracedata_to_splunk.go +++ b/exporter/splunkhecexporter/tracedata_to_splunk.go @@ -57,60 +57,46 @@ type hecSpan struct { Links []hecLink `json:"links,omitempty"` } -func traceDataToSplunk(logger *zap.Logger, data pdata.Traces, config *Config) ([]*splunk.Event, int) { +func mapSpanToSplunkEvent(resource pdata.Resource, span pdata.Span, config *Config, logger *zap.Logger) *splunk.Event { sourceKey := config.HecToOtelAttrs.Source sourceTypeKey := config.HecToOtelAttrs.SourceType indexKey := config.HecToOtelAttrs.Index hostKey := config.HecToOtelAttrs.Host - numDroppedSpans := 0 - splunkEvents := make([]*splunk.Event, 0, data.SpanCount()) - rss := data.ResourceSpans() - for i := 0; i < rss.Len(); i++ { - rs := rss.At(i) - host := unknownHostName - source := config.Source - sourceType := config.SourceType - index := config.Index - commonFields := map[string]interface{}{} - rs.Resource().Attributes().Range(func(k string, v pdata.Value) bool { - switch k { - case hostKey: - host = v.StringVal() - case sourceKey: - source = v.StringVal() - case sourceTypeKey: - sourceType = v.StringVal() - case indexKey: - index = v.StringVal() - case splunk.HecTokenLabel: - // ignore - default: - commonFields[k] = v.AsString() - } - return true - }) - ilss := rs.ScopeSpans() - for sils := 0; sils < ilss.Len(); sils++ { - ils := ilss.At(sils) - spans := ils.Spans() - for si := 0; si < spans.Len(); si++ { - span := spans.At(si) - se := &splunk.Event{ - Time: timestampToSecondsWithMillisecondPrecision(span.StartTimestamp()), - Host: host, - Source: source, - SourceType: sourceType, - Index: index, - Event: toHecSpan(logger, span), - Fields: commonFields, - } - splunkEvents = append(splunkEvents, se) - } + host := unknownHostName + source := config.Source + sourceType := config.SourceType + index := config.Index + commonFields := map[string]interface{}{} + resource.Attributes().Range(func(k string, v pdata.Value) bool { + switch k { + case hostKey: + host = v.StringVal() + case sourceKey: + source = v.StringVal() + case sourceTypeKey: + sourceType = v.StringVal() + case indexKey: + index = v.StringVal() + case splunk.HecTokenLabel: + // ignore + default: + commonFields[k] = v.AsString() } + return true + }) + + se := &splunk.Event{ + Time: timestampToSecondsWithMillisecondPrecision(span.StartTimestamp()), + Host: host, + Source: source, + SourceType: sourceType, + Index: index, + Event: toHecSpan(logger, span), + Fields: commonFields, } - return splunkEvents, numDroppedSpans + return se } func toHecSpan(logger *zap.Logger, span pdata.Span) hecSpan { diff --git a/exporter/splunkhecexporter/tracedata_to_splunk_test.go b/exporter/splunkhecexporter/tracedata_to_splunk_test.go index 1c29ba5ac155..81bbc58d40f8 100644 --- a/exporter/splunkhecexporter/tracedata_to_splunk_test.go +++ b/exporter/splunkhecexporter/tracedata_to_splunk_test.go @@ -31,11 +31,10 @@ func Test_traceDataToSplunk(t *testing.T) { ts := pdata.Timestamp(123) tests := []struct { - name string - traceDataFn func() pdata.Traces - wantSplunkEvents []*splunk.Event - configFn func() *Config - wantNumDroppedSpans int + name string + traceDataFn func() pdata.Traces + wantSplunkEvent *splunk.Event + configFn func() *Config }{ { name: "valid", @@ -50,43 +49,10 @@ func Test_traceDataToSplunk(t *testing.T) { initSpan("myspan", &ts, ils.Spans().AppendEmpty()) return traces }, - wantSplunkEvents: []*splunk.Event{ - commonSplunkEvent("myspan", ts), - }, + wantSplunkEvent: commonSplunkEvent("myspan", ts), configFn: func() *Config { return createDefaultConfig().(*Config) }, - wantNumDroppedSpans: 0, - }, - { - name: "empty_rs", - traceDataFn: func() pdata.Traces { - traces := pdata.NewTraces() - traces.ResourceSpans().AppendEmpty() - return traces - }, - configFn: func() *Config { - return createDefaultConfig().(*Config) - }, - wantSplunkEvents: []*splunk.Event{}, - wantNumDroppedSpans: 0, - }, - { - name: "empty_ils", - traceDataFn: func() pdata.Traces { - traces := pdata.NewTraces() - rs := traces.ResourceSpans().AppendEmpty() - rs.Resource().Attributes().InsertString("com.splunk.source", "myservice") - rs.Resource().Attributes().InsertString("host.name", "myhost") - rs.Resource().Attributes().InsertString("com.splunk.sourcetype", "mysourcetype") - rs.ScopeSpans().AppendEmpty() - return traces - }, - configFn: func() *Config { - return createDefaultConfig().(*Config) - }, - wantSplunkEvents: []*splunk.Event{}, - wantNumDroppedSpans: 0, }, { name: "custom_config", @@ -95,9 +61,10 @@ func Test_traceDataToSplunk(t *testing.T) { rs := traces.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("mysource", "myservice") rs.Resource().Attributes().InsertString("myhost", "myhost") - rs.Resource().Attributes().InsertString("mysourcetype", "mysourcetype") + rs.Resource().Attributes().InsertString("mysourcetype", "othersourcetype") rs.Resource().Attributes().InsertString("myindex", "mysourcetype") - rs.ScopeSpans().AppendEmpty() + ils := rs.ScopeSpans().AppendEmpty() + initSpan("myspan", &ts, ils.Spans().AppendEmpty()) return traces }, configFn: func() *Config { @@ -111,8 +78,12 @@ func Test_traceDataToSplunk(t *testing.T) { return cfg }, - wantSplunkEvents: []*splunk.Event{}, - wantNumDroppedSpans: 0, + wantSplunkEvent: func() *splunk.Event { + e := commonSplunkEvent("myspan", ts) + e.Index = "mysourcetype" + e.SourceType = "othersourcetype" + return e + }(), }, } for _, tt := range tests { @@ -120,13 +91,9 @@ func Test_traceDataToSplunk(t *testing.T) { traces := tt.traceDataFn() cfg := tt.configFn() - gotEvents, gotNumDroppedSpans := traceDataToSplunk(logger, traces, cfg) - assert.Equal(t, tt.wantNumDroppedSpans, gotNumDroppedSpans) - require.Equal(t, len(tt.wantSplunkEvents), len(gotEvents)) - for i, want := range tt.wantSplunkEvents { - assert.EqualValues(t, want, gotEvents[i]) - } - assert.Equal(t, tt.wantSplunkEvents, gotEvents) + event := mapSpanToSplunkEvent(traces.ResourceSpans().At(0).Resource(), traces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0), cfg, logger) + require.NotNil(t, event) + assert.Equal(t, tt.wantSplunkEvent, event) }) } } From 80e6ea282a0cd8d0b56d3d6b8e629ddd60758942 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Wed, 6 Apr 2022 06:46:02 -0700 Subject: [PATCH 05/59] [processor/attributes] Add support to filter on log body (#8996) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add support to filter on log body * code review * add changelog * Update processor/attributesprocessor/README.md Co-authored-by: Przemek Maciolek <58699843+pmm-sumo@users.noreply.github.com> * Update CHANGELOG.md Co-authored-by: Dmitrii Anoshin Co-authored-by: Przemek Maciolek <58699843+pmm-sumo@users.noreply.github.com> Co-authored-by: Dmitrii Anoshin Co-authored-by: Juraci Paixão Kröhling --- CHANGELOG.md | 1 + .../processor/filterconfig/config.go | 12 ++++++++++-- .../processor/filterlog/filterlog.go | 16 ++++++++++++++++ .../processor/filterlog/filterlog_test.go | 15 ++++++++++++--- processor/attributesprocessor/README.md | 11 ++++++++--- .../attributesprocessor/testdata/config.yaml | 18 ++++++++++++++++++ 6 files changed, 65 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34b0431fdcab..fc6bcdf6a338 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - Add `make crosslink` target to ensure replace statements are included in `go.mod` for all transitive dependencies within repository (#8822) - `filestorageextension`: Change bbolt DB settings for better performance (#9004) - `jaegerremotesamplingextension`: Add local and remote sampling stores (#8818) +- `attributesprocessor`: Add support to filter on log body (#8996) ### 🛑 Breaking changes 🛑 diff --git a/internal/coreinternal/processor/filterconfig/config.go b/internal/coreinternal/processor/filterconfig/config.go index d767b101f152..5410e3b0e230 100644 --- a/internal/coreinternal/processor/filterconfig/config.go +++ b/internal/coreinternal/processor/filterconfig/config.go @@ -95,6 +95,10 @@ type MatchProperties struct { // Deprecated: the Name field is removed from the log data model. LogNames []string `mapstructure:"log_names"` + // LogBodies is a list of strings that the LogRecord's body field must match + // against. + LogBodies []string `mapstructure:"log_bodies"` + // MetricNames is a list of strings to match metric name against. // A match occurs if metric name matches at least one item in the list. // This field is optional. @@ -123,6 +127,10 @@ func (mp *MatchProperties) ValidateForSpans() error { return errors.New("log_names should not be specified for trace spans") } + if len(mp.LogBodies) > 0 { + return errors.New("log_bodies should not be specified for trace spans") + } + if len(mp.Services) == 0 && len(mp.SpanNames) == 0 && len(mp.Attributes) == 0 && len(mp.Libraries) == 0 && len(mp.Resources) == 0 { return errors.New(`at least one of "services", "span_names", "attributes", "libraries" or "resources" field must be specified`) @@ -137,8 +145,8 @@ func (mp *MatchProperties) ValidateForLogs() error { return errors.New("neither services nor span_names should be specified for log records") } - if len(mp.Attributes) == 0 && len(mp.Libraries) == 0 && len(mp.Resources) == 0 { - return errors.New(`at least one of "attributes", "libraries" or "resources" field must be specified`) + if len(mp.Attributes) == 0 && len(mp.Libraries) == 0 && len(mp.Resources) == 0 && len(mp.LogBodies) == 0 { + return errors.New(`at least one of "attributes", "libraries", "resources" or "log_bodies" field must be specified`) } return nil diff --git a/internal/coreinternal/processor/filterlog/filterlog.go b/internal/coreinternal/processor/filterlog/filterlog.go index c981fc18c655..c22fee6188bb 100644 --- a/internal/coreinternal/processor/filterlog/filterlog.go +++ b/internal/coreinternal/processor/filterlog/filterlog.go @@ -38,6 +38,9 @@ type propertiesMatcher struct { // log names to compare to. nameFilters filterset.FilterSet + + // log bodies to compare to. + bodyFilters filterset.FilterSet } // NewMatcher creates a LogRecord Matcher that matches based on the given MatchProperties. @@ -62,20 +65,33 @@ func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { return nil, fmt.Errorf("error creating log record name filters: %v", err) } } + var bodyFS filterset.FilterSet + if len(mp.LogBodies) > 0 { + bodyFS, err = filterset.CreateFilterSet(mp.LogBodies, &mp.Config) + if err != nil { + return nil, fmt.Errorf("error creating log record body filters: %v", err) + } + } return &propertiesMatcher{ PropertiesMatcher: rm, nameFilters: nameFS, + bodyFilters: bodyFS, }, nil } // MatchLogRecord matches a log record to a set of properties. // There are 3 sets of properties to match against. // The log record names are matched, if specified. +// The log record bodies are matched, if specified. // The attributes are then checked, if specified. // At least one of log record names or attributes must be specified. It is // supported to have more than one of these specified, and all specified must // evaluate to true for a match to occur. func (mp *propertiesMatcher) MatchLogRecord(lr pdata.LogRecord, resource pdata.Resource, library pdata.InstrumentationScope) bool { + if lr.Body().Type() == pdata.ValueTypeString && mp.bodyFilters != nil && mp.bodyFilters.Matches(lr.Body().StringVal()) { + return true + } + return mp.PropertiesMatcher.Match(lr.Attributes(), resource, library) } diff --git a/internal/coreinternal/processor/filterlog/filterlog_test.go b/internal/coreinternal/processor/filterlog/filterlog_test.go index 7fc38a285d33..8aa0440048ce 100644 --- a/internal/coreinternal/processor/filterlog/filterlog_test.go +++ b/internal/coreinternal/processor/filterlog/filterlog_test.go @@ -40,14 +40,15 @@ func TestLogRecord_validateMatchesConfiguration_InvalidConfig(t *testing.T) { { name: "empty_property", property: filterconfig.MatchProperties{}, - errorString: "at least one of \"attributes\", \"libraries\" or \"resources\" field must be specified", + errorString: `at least one of "attributes", "libraries", "resources" or "log_bodies" field must be specified`, }, { name: "empty_log_names_and_attributes", property: filterconfig.MatchProperties{ - LogNames: []string{}, + LogNames: []string{}, + LogBodies: []string{}, }, - errorString: "at least one of \"attributes\", \"libraries\" or \"resources\" field must be specified", + errorString: `at least one of "attributes", "libraries", "resources" or "log_bodies" field must be specified`, }, { name: "span_properties", @@ -149,10 +150,18 @@ func TestLogRecord_Matching_True(t *testing.T) { }, }, }, + { + name: "log_body_regexp_match", + properties: &filterconfig.MatchProperties{ + Config: *createConfig(filterset.Regexp), + LogBodies: []string{"AUTH.*"}, + }, + }, } lr := pdata.NewLogRecord() lr.Attributes().InsertString("abc", "def") + lr.Body().SetStringVal("AUTHENTICATION FAILED") for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { diff --git a/processor/attributesprocessor/README.md b/processor/attributesprocessor/README.md index 301a1333edcc..c29b41ae33ff 100644 --- a/processor/attributesprocessor/README.md +++ b/processor/attributesprocessor/README.md @@ -166,13 +166,13 @@ if the input data should be included or excluded from the processor. To configur this option, under `include` and/or `exclude` at least `match_type` and one of the following is required: - For spans, one of `services`, `span_names`, `attributes`, `resources`, or `libraries` must be specified -with a non-empty value for a valid configuration. The `log_names`, `expressions`, `resource_attributes` and +with a non-empty value for a valid configuration. The `log_names`, `log_bodies`, `expressions`, `resource_attributes` and `metric_names` fields are invalid. -- For logs, one of `log_names`, `attributes`, `resources`, or `libraries` must be specified with a +- For logs, one of `log_names`, `log_bodies`, `attributes`, `resources`, or `libraries` must be specified with a non-empty value for a valid configuration. The `span_names`, `metric_names`, `expressions`, `resource_attributes`, and `services` fields are invalid. - For metrics, one of `metric_names`, `resources` must be specified -with a valid non-empty value for a valid configuration. The `span_names`, `log_names`, and +with a valid non-empty value for a valid configuration. The `span_names`, `log_names`, `log_bodies` and `services` fields are invalid. @@ -218,6 +218,11 @@ attributes: # This is an optional field. log_names: [, ..., ] + # The log body must match at least one of the items. + # Currently only string body types are supported. + # This is an optional field. + log_bodies: [, ..., ] + # The metric name must match at least one of the items. # This is an optional field. metric_names: [, ..., ] diff --git a/processor/attributesprocessor/testdata/config.yaml b/processor/attributesprocessor/testdata/config.yaml index e8a7428c34d7..2c68eb918de6 100644 --- a/processor/attributesprocessor/testdata/config.yaml +++ b/processor/attributesprocessor/testdata/config.yaml @@ -307,6 +307,24 @@ processors: action: update value: "SELECT * FROM USERS [obfuscated]" + + # The following demonstrates how to process logs that have a body that match regexp + # patterns. This processor will remove "token" attribute and will obfuscate "password" + # attribute in spans where body matches "AUTH.*". + attributes/log_body_regexp: + # Specifies the span properties that must exist for the processor to be applied. + include: + # match_type defines that "services" is an array of regexp-es. + match_type: regexp + # The span service name must match "auth.*" pattern. + log_bodies: ["AUTH.*"] + actions: + - key: password + action: update + value: "obfuscated" + - key: token + action: delete + receivers: nop: From 290ae78f214458a11e209c644fb482ff8ba7da29 Mon Sep 17 00:00:00 2001 From: Alex Boten Date: Wed, 6 Apr 2022 06:46:19 -0700 Subject: [PATCH 06/59] [extension/fluentbit] mark extension as deprecated (#9062) * [extension/fluentbit] mark extension as deprecated After many discussions it seems the community is leaning towards removing the components that execute subprocesses. As such, marking the fluentbit exception as deprecated. Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/6721 * update changelog --- CHANGELOG.md | 1 + extension/fluentbitextension/README.md | 5 ++++- extension/fluentbitextension/factory.go | 11 +++++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc6bcdf6a338..277ab1bd1eb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - `datadogexporter`: Deprecate `service` setting in favor of `service.name` semantic convention (#8784) - `datadogexporter`: Deprecate `version` setting in favor of `service.version` semantic convention (#8784) - `datadogexporter`: Deprecate `GetHostTags` method from `TagsConfig` struct (#8975) +- `fluentbitextension`: Deprecate Fluentbit extension (#9062) ### 🚀 New components 🚀 diff --git a/extension/fluentbitextension/README.md b/extension/fluentbitextension/README.md index ad2caac070dc..afb00957642a 100644 --- a/extension/fluentbitextension/README.md +++ b/extension/fluentbitextension/README.md @@ -1,4 +1,7 @@ -# FluentBit Subprocess Extension +# Deprecated FluentBit Subprocess Extension + +This extension has been deprecated due to security concerns around the ability to specify the execution of +any arbitrary processes via its configuration. See [#6721](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/6721) for additional details. **This extension is experimental and may receive breaking changes or be removed at any time.** diff --git a/extension/fluentbitextension/factory.go b/extension/fluentbitextension/factory.go index fdee2eb7768d..e1e819dcd5fa 100644 --- a/extension/fluentbitextension/factory.go +++ b/extension/fluentbitextension/factory.go @@ -16,9 +16,11 @@ package fluentbitextension // import "github.com/open-telemetry/opentelemetry-co import ( "context" + "sync" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" + "go.uber.org/zap" ) const ( @@ -26,6 +28,8 @@ const ( typeStr = "fluentbit" ) +var once sync.Once + // NewFactory creates a factory for FluentBit extension. func NewFactory() component.ExtensionFactory { return component.NewExtensionFactory( @@ -40,7 +44,14 @@ func createDefaultConfig() config.Extension { } } +func logDeprecation(logger *zap.Logger) { + once.Do(func() { + logger.Warn("fluentbit extension is deprecated and will be removed in future versions.") + }) +} + func createExtension(_ context.Context, params component.ExtensionCreateSettings, cfg config.Extension) (component.Extension, error) { + logDeprecation(params.Logger) config := cfg.(*Config) return newProcessManager(config, params.Logger), nil } From a5bc9438446e6f13d9d128560322221f5e73918c Mon Sep 17 00:00:00 2001 From: Dmitrii Anoshin Date: Wed, 6 Apr 2022 06:59:46 -0700 Subject: [PATCH 07/59] Fix recent Changelog entries (#9096) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix recent Changelog entries * Update CHANGELOG.md Co-authored-by: Juraci Paixão Kröhling Co-authored-by: Juraci Paixão Kröhling --- CHANGELOG.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 277ab1bd1eb8..87848e7745c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,8 @@ ### 💡 Enhancements 💡 - `splunkhecexporter`: Add support for batching traces (#8995) +- `hostmetricsreceiver`: Migrate Processes scraper to the Metrics builder (#8855) - `tanzuobservabilityexporter`: Use resourcetotelemetry helper (#8338) -- `cmd/mdatagen`: Add resource attributes definition to metadata.yaml and move `pdata.Metrics` creation to the - generated code (#5270) - Add `make crosslink` target to ensure replace statements are included in `go.mod` for all transitive dependencies within repository (#8822) - `filestorageextension`: Change bbolt DB settings for better performance (#9004) - `jaegerremotesamplingextension`: Add local and remote sampling stores (#8818) @@ -23,7 +22,7 @@ ### 🧰 Bug fixes 🧰 -- `hostmetricsreceiver`: Use cpu times for time delta in cpu.utilization calculation (#8856) +- `hostmetricsreceiver`: Use cpu times for time delta in cpu.utilization calculation (#8857) - `dynatraceexporter`: Remove overly verbose stacktrace from certain logs (#8989) ### 🚩 Deprecations 🚩 @@ -49,7 +48,7 @@ - `resourcedetectionprocessor`: Add attribute allowlist (#8547) - `datadogexporter`: Metrics payload data and Sketches payload data will be logged if collector is started in debug mode (#8929) - `cmd/mdatagen`: Add resource attributes definition to metadata.yaml and move `pdata.Metrics` creation to the - generated code (#5270) + generated code (#8555) ### 🛑 Breaking changes 🛑 From 4c48a85b73a600b0fc366606d792508864b1db05 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Wed, 6 Apr 2022 11:45:42 -0400 Subject: [PATCH 08/59] [prometheusremotewriteexporter] Translate resource to the target info metric (#8493) * prometheusremotewriteexporter: translate resource to target info * fix go mod and changelog * fix deprecation warnings * Update pkg/translator/prometheusremotewrite/go.mod Co-authored-by: Anthony Mirabella Co-authored-by: Alex Boten --- CHANGELOG.md | 1 + .../exporter_test.go | 2 +- pkg/translator/prometheusremotewrite/go.mod | 3 + pkg/translator/prometheusremotewrite/go.sum | 2 + .../prometheusremotewrite/helper.go | 133 +++++++++++---- .../prometheusremotewrite/helper_test.go | 153 +++++++++++++++++- .../prometheusremotewrite/metrics_to_prw.go | 6 +- 7 files changed, 262 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87848e7745c6..1ed6a74ff912 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - `filestorageextension`: Change bbolt DB settings for better performance (#9004) - `jaegerremotesamplingextension`: Add local and remote sampling stores (#8818) - `attributesprocessor`: Add support to filter on log body (#8996) +- `prometheusremotewriteexporter`: Translate resource attributes to the target info metric (#8493) ### 🛑 Breaking changes 🛑 diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 7037c7a14bcd..0d14b3ed69f1 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -441,7 +441,7 @@ func Test_PushMetrics(t *testing.T) { "intSum_case", &intSumBatch, checkFunc, - 2, + 3, http.StatusAccepted, false, false, diff --git a/pkg/translator/prometheusremotewrite/go.mod b/pkg/translator/prometheusremotewrite/go.mod index fe76d36d4228..1c8c1f892c65 100644 --- a/pkg/translator/prometheusremotewrite/go.mod +++ b/pkg/translator/prometheusremotewrite/go.mod @@ -15,8 +15,11 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/kr/pretty v0.3.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.uber.org/atomic v1.9.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../../internal/coreinternal diff --git a/pkg/translator/prometheusremotewrite/go.sum b/pkg/translator/prometheusremotewrite/go.sum index 84922e9d04bc..1fd2bdc29b6d 100644 --- a/pkg/translator/prometheusremotewrite/go.sum +++ b/pkg/translator/prometheusremotewrite/go.sum @@ -949,6 +949,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.46.0 h1:y7D6FmP4aJSkj3xdwlvlLMHFEX7tr9ZY7XtRZAWP9k0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.46.0/go.mod h1:wFgFElwA1P+441Aqxbwn2xafbKQGh2NB2AX/Ilap27o= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= diff --git a/pkg/translator/prometheusremotewrite/helper.go b/pkg/translator/prometheusremotewrite/helper.go index 24eca679db6d..1ecd0cf07c26 100644 --- a/pkg/translator/prometheusremotewrite/helper.go +++ b/pkg/translator/prometheusremotewrite/helper.go @@ -48,8 +48,10 @@ const ( maxExemplarRunes = 128 // Trace and Span id keys are defined as part of the spec: // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification%2Fmetrics%2Fdatamodel.md#exemplars-2 - traceIDKey = "trace_id" - spanIDKey = "span_id" + traceIDKey = "trace_id" + spanIDKey = "span_id" + infoType = "info" + targetMetricName = "target" ) type bucketBoundsData struct { @@ -75,13 +77,13 @@ func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // creates a new TimeSeries in the map if not found and returns the time series signature. // tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil. func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label, - metric pdata.Metric) string { + datatype string) string { if sample == nil || labels == nil || tsMap == nil { return "" } - sig := timeSeriesSignature(metric, &labels) + sig := timeSeriesSignature(datatype, &labels) ts, ok := tsMap[sig] if ok { @@ -137,9 +139,9 @@ func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBound // TYPE-label1-value1- ... -labelN-valueN // the label slice should not contain duplicate label names; this method sorts the slice by label name before creating // the signature. -func timeSeriesSignature(metric pdata.Metric, labels *[]prompb.Label) string { +func timeSeriesSignature(datatype string, labels *[]prompb.Label) string { b := strings.Builder{} - b.WriteString(metric.DataType().String()) + b.WriteString(datatype) sort.Sort(ByLabelName(*labels)) @@ -160,6 +162,23 @@ func createAttributes(resource pdata.Resource, attributes pdata.Map, externalLab // map ensures no duplicate label name l := map[string]prompb.Label{} + // Ensure attributes are sorted by key for consistent merging of keys which + // collide when sanitized. + attributes.Sort() + attributes.Range(func(key string, value pdata.Value) bool { + if existingLabel, alreadyExists := l[sanitize(key)]; alreadyExists { + existingLabel.Value = existingLabel.Value + ";" + value.AsString() + l[sanitize(key)] = existingLabel + } else { + l[sanitize(key)] = prompb.Label{ + Name: sanitize(key), + Value: value.AsString(), + } + } + + return true + }) + // Map service.name + service.namespace to job if serviceName, ok := resource.Attributes().Get(conventions.AttributeServiceName); ok { val := serviceName.AsString() @@ -178,24 +197,6 @@ func createAttributes(resource pdata.Resource, attributes pdata.Map, externalLab Value: instance.AsString(), } } - - // Ensure attributes are sorted by key for consistent merging of keys which - // collide when sanitized. - attributes.Sort() - attributes.Range(func(key string, value pdata.Value) bool { - if existingLabel, alreadyExists := l[sanitize(key)]; alreadyExists { - existingLabel.Value = existingLabel.Value + ";" + value.AsString() - l[sanitize(key)] = existingLabel - } else { - l[sanitize(key)] = prompb.Label{ - Name: sanitize(key), - Value: value.AsString(), - } - } - - return true - }) - for key, value := range externalLabels { // External labels have already been sanitized if _, alreadyExists := l[key]; alreadyExists { @@ -280,7 +281,7 @@ func addSingleNumberDataPoint(pt pdata.NumberDataPoint, resource pdata.Resource, if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { sample.Value = math.Float64frombits(value.StaleNaN) } - addSample(tsMap, sample, labels, metric) + addSample(tsMap, sample, labels, metric.DataType().String()) } // addSingleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It @@ -299,7 +300,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res } sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) - addSample(tsMap, sum, sumlabels, metric) + addSample(tsMap, sum, sumlabels, metric.DataType().String()) // treat count as a sample in an individual TimeSeries count := &prompb.Sample{ @@ -311,7 +312,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res } countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) - addSample(tsMap, count, countlabels, metric) + addSample(tsMap, count, countlabels, metric.DataType().String()) // cumulative count for conversion to cumulative histogram var cumulativeCount uint64 @@ -335,7 +336,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) labels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, boundStr) - sig := addSample(tsMap, bucket, labels, metric) + sig := addSample(tsMap, bucket, labels, metric.DataType().String()) bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound}) } @@ -350,7 +351,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res infBucket.Value = float64(cumulativeCount) } infLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) - sig := addSample(tsMap, infBucket, infLabels, metric) + sig := addSample(tsMap, infBucket, infLabels, metric.DataType().String()) bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)}) addExemplars(tsMap, promExemplars, bucketBounds) @@ -411,6 +412,42 @@ func getPromExemplars(pt pdata.HistogramDataPoint) []prompb.Exemplar { return promExemplars } +// mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics +func mostRecentTimestampInMetric(metric pdata.Metric) pdata.Timestamp { + var ts pdata.Timestamp + // handle individual metric based on type + switch metric.DataType() { + case pdata.MetricDataTypeGauge: + dataPoints := metric.Gauge().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + case pdata.MetricDataTypeSum: + dataPoints := metric.Sum().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + case pdata.MetricDataTypeHistogram: + dataPoints := metric.Histogram().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + case pdata.MetricDataTypeSummary: + dataPoints := metric.Summary().DataPoints() + for x := 0; x < dataPoints.Len(); x++ { + ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) + } + } + return ts +} + +func maxTimestamp(a, b pdata.Timestamp) pdata.Timestamp { + if a > b { + return a + } + return b +} + // addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resource, metric pdata.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { @@ -426,7 +463,7 @@ func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resourc sum.Value = math.Float64frombits(value.StaleNaN) } sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) - addSample(tsMap, sum, sumlabels, metric) + addSample(tsMap, sum, sumlabels, metric.DataType().String()) // treat count as a sample in an individual TimeSeries count := &prompb.Sample{ @@ -437,7 +474,7 @@ func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resourc count.Value = math.Float64frombits(value.StaleNaN) } countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) - addSample(tsMap, count, countlabels, metric) + addSample(tsMap, count, countlabels, metric.DataType().String()) // process each percentile/quantile for i := 0; i < pt.QuantileValues().Len(); i++ { @@ -451,8 +488,40 @@ func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resourc } percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) qtlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName, quantileStr, percentileStr) - addSample(tsMap, quantile, qtlabels, metric) + addSample(tsMap, quantile, qtlabels, metric.DataType().String()) + } +} + +// addResourceTargetInfo converts the resource to the target info metric +func addResourceTargetInfo(resource pdata.Resource, settings Settings, timestamp pdata.Timestamp, tsMap map[string]*prompb.TimeSeries) { + if resource.Attributes().Len() == 0 { + return + } + // create parameters for addSample + name := targetMetricName + if len(settings.Namespace) > 0 { + name = settings.Namespace + "_" + name + } + // Use resource attributes (other than those used for job+instance) as the + // metric labels for the target info metric + attributes := pdata.NewMap() + resource.Attributes().CopyTo(attributes) + attributes.RemoveIf(func(k string, _ pdata.Value) bool { + switch k { + case conventions.AttributeServiceName, conventions.AttributeServiceNamespace, conventions.AttributeServiceInstanceID: + // Remove resource attributes used for job + instance + return true + default: + return false + } + }) + labels := createAttributes(resource, attributes, settings.ExternalLabels, nameStr, name) + sample := &prompb.Sample{ + Value: float64(1), + // convert ns to ms + Timestamp: convertTimeStamp(timestamp), } + addSample(tsMap, sample, labels, infoType) } // copied from prometheus-go-metric-exporter diff --git a/pkg/translator/prometheusremotewrite/helper_test.go b/pkg/translator/prometheusremotewrite/helper_test.go index 77d789b6cc6c..a5034bfdf226 100644 --- a/pkg/translator/prometheusremotewrite/helper_test.go +++ b/pkg/translator/prometheusremotewrite/helper_test.go @@ -23,6 +23,9 @@ import ( "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" ) // Test_validateMetrics checks validateMetrics return true if a type and temporality combination is valid, false @@ -119,14 +122,14 @@ func Test_addSample(t *testing.T) { } t.Run("empty_case", func(t *testing.T) { tsMap := map[string]*prompb.TimeSeries{} - addSample(tsMap, nil, nil, pdata.NewMetric()) + addSample(tsMap, nil, nil, "") assert.Exactly(t, tsMap, map[string]*prompb.TimeSeries{}) }) // run tests for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - addSample(tt.orig, &tt.testCase[0].sample, tt.testCase[0].labels, tt.testCase[0].metric) - addSample(tt.orig, &tt.testCase[1].sample, tt.testCase[1].labels, tt.testCase[1].metric) + addSample(tt.orig, &tt.testCase[0].sample, tt.testCase[0].labels, tt.testCase[0].metric.DataType().String()) + addSample(tt.orig, &tt.testCase[1].sample, tt.testCase[1].labels, tt.testCase[1].metric.DataType().String()) assert.Exactly(t, tt.want, tt.orig) }) } @@ -171,7 +174,7 @@ func Test_timeSeriesSignature(t *testing.T) { // run tests for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - assert.EqualValues(t, tt.want, timeSeriesSignature(tt.metric, &tt.lbs)) + assert.EqualValues(t, tt.want, timeSeriesSignature(tt.metric.DataType().String(), &tt.lbs)) }) } } @@ -490,3 +493,145 @@ func Test_getPromExemplars(t *testing.T) { }) } } + +func TestAddResourceTargetInfo(t *testing.T) { + resourceAttrMap := map[string]interface{}{ + conventions.AttributeServiceName: "service-name", + conventions.AttributeServiceNamespace: "service-namespace", + conventions.AttributeServiceInstanceID: "service-instance-id", + "resource_attr": "resource-attr-val-1", + } + resourceWithServiceAttrs := pdata.NewResource() + pdata.NewMapFromRaw(resourceAttrMap).CopyTo(resourceWithServiceAttrs.Attributes()) + for _, tc := range []struct { + desc string + resource pdata.Resource + settings Settings + timestamp pdata.Timestamp + expected map[string]*prompb.TimeSeries + }{ + { + desc: "empty resource", + resource: pdata.NewResource(), + expected: map[string]*prompb.TimeSeries{}, + }, + { + desc: "with resource", + resource: testdata.GenerateMetricsNoLibraries().ResourceMetrics().At(0).Resource(), + timestamp: testdata.TestMetricStartTimestamp, + expected: map[string]*prompb.TimeSeries{ + "info-__name__-target-resource_attr-resource-attr-val-1": { + Labels: []prompb.Label{ + { + Name: "__name__", + Value: "target", + }, + { + Name: "resource_attr", + Value: "resource-attr-val-1", + }, + }, + Samples: []prompb.Sample{ + { + Value: 1, + Timestamp: 1581452772000, + }, + }, + }, + }, + }, + { + desc: "with resource, with namespace", + resource: testdata.GenerateMetricsNoLibraries().ResourceMetrics().At(0).Resource(), + timestamp: testdata.TestMetricStartTimestamp, + settings: Settings{Namespace: "foo"}, + expected: map[string]*prompb.TimeSeries{ + "info-__name__-foo_target-resource_attr-resource-attr-val-1": { + Labels: []prompb.Label{ + { + Name: "__name__", + Value: "foo_target", + }, + { + Name: "resource_attr", + Value: "resource-attr-val-1", + }, + }, + Samples: []prompb.Sample{ + { + Value: 1, + Timestamp: 1581452772000, + }, + }, + }, + }, + }, + { + desc: "with resource, with service attributes", + resource: resourceWithServiceAttrs, + timestamp: testdata.TestMetricStartTimestamp, + expected: map[string]*prompb.TimeSeries{ + "info-__name__-target-instance-service-instance-id-job-service-namespace/service-name-resource_attr-resource-attr-val-1": { + Labels: []prompb.Label{ + { + Name: "__name__", + Value: "target", + }, + { + Name: "instance", + Value: "service-instance-id", + }, + { + Name: "job", + Value: "service-namespace/service-name", + }, + { + Name: "resource_attr", + Value: "resource-attr-val-1", + }, + }, + Samples: []prompb.Sample{ + { + Value: 1, + Timestamp: 1581452772000, + }, + }, + }, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + tsMap := map[string]*prompb.TimeSeries{} + addResourceTargetInfo(tc.resource, tc.settings, tc.timestamp, tsMap) + assert.Exactly(t, tc.expected, tsMap) + }) + } +} + +func TestMostRecentTimestampInMetric(t *testing.T) { + laterTimestamp := pdata.NewTimestampFromTime(testdata.TestMetricTime.Add(1 * time.Minute)) + metricMultipleTimestamps := testdata.GenerateMetricsOneMetric().ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) + // the first datapoint timestamp is at testdata.TestMetricTime + metricMultipleTimestamps.Sum().DataPoints().At(1).SetTimestamp(laterTimestamp) + for _, tc := range []struct { + desc string + input pdata.Metric + expected pdata.Timestamp + }{ + { + desc: "empty", + input: pdata.NewMetric(), + expected: pdata.Timestamp(0), + }, + { + desc: "multiple timestamps", + input: metricMultipleTimestamps, + expected: laterTimestamp, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + got := mostRecentTimestampInMetric(tc.input) + assert.Exactly(t, tc.expected, got) + }) + } +} diff --git a/pkg/translator/prometheusremotewrite/metrics_to_prw.go b/pkg/translator/prometheusremotewrite/metrics_to_prw.go index 01ba9ff79e67..80899f59cca3 100644 --- a/pkg/translator/prometheusremotewrite/metrics_to_prw.go +++ b/pkg/translator/prometheusremotewrite/metrics_to_prw.go @@ -47,7 +47,9 @@ func FromMetrics(md pdata.Metrics, settings Settings) (tsMap map[string]*prompb. resourceMetrics := resourceMetricsSlice.At(i) resource := resourceMetrics.Resource() scopeMetricsSlice := resourceMetrics.ScopeMetrics() - // TODO: add resource attributes as labels, probably in next PR + // keep track of the most recent timestamp in the ResourceMetrics for + // use with the "target" info metric + var mostRecentTimestamp pdata.Timestamp for j := 0; j < scopeMetricsSlice.Len(); j++ { scopeMetrics := scopeMetricsSlice.At(j) metricSlice := scopeMetrics.Metrics() @@ -55,6 +57,7 @@ func FromMetrics(md pdata.Metrics, settings Settings) (tsMap map[string]*prompb. // TODO: decide if instrumentation library information should be exported as labels for k := 0; k < metricSlice.Len(); k++ { metric := metricSlice.At(k) + mostRecentTimestamp = maxTimestamp(mostRecentTimestamp, mostRecentTimestampInMetric(metric)) // check for valid type and temporality combination and for matching data field and type if ok := validateMetrics(metric); !ok { @@ -96,6 +99,7 @@ func FromMetrics(md pdata.Metrics, settings Settings) (tsMap map[string]*prompb. } } } + addResourceTargetInfo(resource, settings, mostRecentTimestamp, tsMap) } return From a434fd3e35437fa4a7e6181bfb779b2a59efbca1 Mon Sep 17 00:00:00 2001 From: Alex Boten Date: Wed, 6 Apr 2022 08:48:30 -0700 Subject: [PATCH 09/59] [receiver/prometheusexec] mark receiver as deprecated (#9058) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [receiver/prometheusexec] mark receiver as deprecated After many discussions it seems the community is leaning towards removing the components that execute subprocesses. As such, marking the prom_exec receiver as deprecated. Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/6722 * Update CHANGELOG.md Co-authored-by: Juraci Paixão Kröhling --- CHANGELOG.md | 1 + receiver/prometheusexecreceiver/README.md | 5 ++++- receiver/prometheusexecreceiver/factory.go | 11 +++++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ed6a74ff912..b66993651f79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ - `datadogexporter`: Deprecate `service` setting in favor of `service.name` semantic convention (#8784) - `datadogexporter`: Deprecate `version` setting in favor of `service.version` semantic convention (#8784) - `datadogexporter`: Deprecate `GetHostTags` method from `TagsConfig` struct (#8975) +- `prometheusexecreceiver`: Deprecate prom_exec receiver (#9058) - `fluentbitextension`: Deprecate Fluentbit extension (#9062) ### 🚀 New components 🚀 diff --git a/receiver/prometheusexecreceiver/README.md b/receiver/prometheusexecreceiver/README.md index d87f0f250534..7c587c79c4bc 100644 --- a/receiver/prometheusexecreceiver/README.md +++ b/receiver/prometheusexecreceiver/README.md @@ -1,4 +1,7 @@ -# prometheus_exec Receiver +# Deprecated prometheus_exec Receiver + +This receiver has been deprecated due to security concerns around the ability to specify the execution of +any arbitrary processes via its configuration. See [#6722](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/6722) for additional details. This receiver makes it easy for a user to collect metrics from third-party services **via Prometheus exporters**. It's meant for people who want a diff --git a/receiver/prometheusexecreceiver/factory.go b/receiver/prometheusexecreceiver/factory.go index ce4754d9c7a2..3f2a4f340e48 100644 --- a/receiver/prometheusexecreceiver/factory.go +++ b/receiver/prometheusexecreceiver/factory.go @@ -16,11 +16,13 @@ package prometheusexecreceiver // import "github.com/open-telemetry/opentelemetr import ( "context" + "sync" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" + "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusexecreceiver/subprocessmanager" ) @@ -34,6 +36,8 @@ const ( defaultTimeoutInterval = 10 * time.Second ) +var once sync.Once + // NewFactory creates a factory for the prometheusexec receiver func NewFactory() component.ReceiverFactory { return component.NewReceiverFactory( @@ -42,6 +46,12 @@ func NewFactory() component.ReceiverFactory { component.WithMetricsReceiver(createMetricsReceiver)) } +func logDeprecation(logger *zap.Logger) { + once.Do(func() { + logger.Warn("prometheus_exec receiver is deprecated and will be removed in future versions.") + }) +} + // createDefaultConfig returns a default config func createDefaultConfig() config.Receiver { return &Config{ @@ -61,6 +71,7 @@ func createMetricsReceiver( cfg config.Receiver, nextConsumer consumer.Metrics, ) (component.MetricsReceiver, error) { + logDeprecation(params.Logger) rCfg := cfg.(*Config) return newPromExecReceiver(params, rCfg, nextConsumer) } From fc32f90dec76e5951661ced1ee4f9e501b6ef36e Mon Sep 17 00:00:00 2001 From: Dmitrii Anoshin Date: Wed, 6 Apr 2022 12:00:26 -0700 Subject: [PATCH 10/59] [Metrics builder] Set schema version from metadata.yaml (#9010) This change sets the `SchemaURL` field on the metrics reported by all metric scrapers. The version is taken from a new required metadata field `conventions_version`. After this change, metrics from all the scrapers migrated to the new metrics builder will be reported with the latest schema version 1.9.0. This makes `wrapBySchemaURLSetterConsumer` function redundant, and it can be removed. This approach enforces the `SchemaURL` by making sure it's explicitly defined in the metadata field `conventions_version`. We could've make a default version applied in the metrics builded instead of asking for it explicitly, but I don't think it's the best approach. We have all the metrics metadata defined in the yaml file so far, so I don't think we should introduce anything applied implicitly here. Closes: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8841 --- CHANGELOG.md | 2 +- cmd/mdatagen/loader.go | 2 ++ cmd/mdatagen/loader_test.go | 3 +- cmd/mdatagen/metric-metadata.yaml | 4 +++ cmd/mdatagen/metrics_v2.tmpl | 6 ++++ cmd/mdatagen/testdata/all_options.yaml | 1 + receiver/hostmetricsreceiver/factory.go | 30 +------------------ .../hostmetrics_receiver_test.go | 2 +- .../internal/metadata/generated_metrics_v2.go | 2 ++ .../internal/scraper/cpuscraper/metadata.yaml | 2 ++ .../internal/metadata/generated_metrics_v2.go | 2 ++ .../scraper/diskscraper/metadata.yaml | 2 ++ .../internal/metadata/generated_metrics_v2.go | 2 ++ .../scraper/filesystemscraper/metadata.yaml | 2 ++ .../internal/metadata/generated_metrics_v2.go | 2 ++ .../scraper/loadscraper/metadata.yaml | 2 ++ .../internal/metadata/generated_metrics_v2.go | 2 ++ .../scraper/memoryscraper/metadata.yaml | 2 ++ .../internal/metadata/generated_metrics_v2.go | 2 ++ .../scraper/networkscraper/metadata.yaml | 2 ++ .../internal/metadata/generated_metrics_v2.go | 2 ++ .../scraper/pagingscraper/metadata.yaml | 2 ++ .../internal/metadata/generated_metrics_v2.go | 2 ++ .../scraper/processesscraper/metadata.yaml | 2 ++ .../internal/metadata/generated_metrics_v2.go | 2 ++ .../scraper/processscraper/metadata.yaml | 2 ++ 26 files changed, 54 insertions(+), 32 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b66993651f79..8f6aa6958f54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,6 @@ ### 💡 Enhancements 💡 - `splunkhecexporter`: Add support for batching traces (#8995) - - `hostmetricsreceiver`: Migrate Processes scraper to the Metrics builder (#8855) - `tanzuobservabilityexporter`: Use resourcetotelemetry helper (#8338) - Add `make crosslink` target to ensure replace statements are included in `go.mod` for all transitive dependencies within repository (#8822) @@ -12,6 +11,7 @@ - `jaegerremotesamplingextension`: Add local and remote sampling stores (#8818) - `attributesprocessor`: Add support to filter on log body (#8996) - `prometheusremotewriteexporter`: Translate resource attributes to the target info metric (#8493) +- `cmd/mdatagen`: Add `sem_conv_version` field to metadata.yaml that is used to set metrics SchemaURL (#9010) ### 🛑 Breaking changes 🛑 diff --git a/cmd/mdatagen/loader.go b/cmd/mdatagen/loader.go index 91539d727888..c08b9d8d470f 100644 --- a/cmd/mdatagen/loader.go +++ b/cmd/mdatagen/loader.go @@ -157,6 +157,8 @@ type attribute struct { type metadata struct { // Name of the component. Name string `validate:"notblank"` + // SemConvVersion is a version number of OpenTelemetry semantic conventions applied to the scraped metrics. + SemConvVersion string `mapstructure:"sem_conv_version"` // ResourceAttributes that can be emitted by the component. ResourceAttributes map[attributeName]attribute `mapstructure:"resource_attributes" validate:"dive"` // Attributes emitted by one or more metrics. diff --git a/cmd/mdatagen/loader_test.go b/cmd/mdatagen/loader_test.go index 7276105a182e..4995a59b89be 100644 --- a/cmd/mdatagen/loader_test.go +++ b/cmd/mdatagen/loader_test.go @@ -33,7 +33,8 @@ func Test_loadMetadata(t *testing.T) { name: "all options", yml: "all_options.yaml", want: metadata{ - Name: "metricreceiver", + Name: "metricreceiver", + SemConvVersion: "1.9.0", Attributes: map[attributeName]attribute{ "enumAttribute": { Description: "Attribute with a known set of values.", diff --git a/cmd/mdatagen/metric-metadata.yaml b/cmd/mdatagen/metric-metadata.yaml index 5e2bb7c05bf5..2baa58a60490 100644 --- a/cmd/mdatagen/metric-metadata.yaml +++ b/cmd/mdatagen/metric-metadata.yaml @@ -1,6 +1,10 @@ # Required: name of the receiver. name: +# Optional: OTel Semantic Conventions version that will be associated with the scraped metrics. +# This attribute should be set for metrics compliant with OTel Semantic Conventions. +sem_conv_version: 1.9.0 + # Optional: map of resource attribute definitions with the key being the attribute name. resource_attributes: : diff --git a/cmd/mdatagen/metrics_v2.tmpl b/cmd/mdatagen/metrics_v2.tmpl index 55e6be78b456..4f0e43df69fd 100644 --- a/cmd/mdatagen/metrics_v2.tmpl +++ b/cmd/mdatagen/metrics_v2.tmpl @@ -6,6 +6,9 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + {{- if .SemConvVersion }} + conventions "go.opentelemetry.io/collector/model/semconv/v{{ .SemConvVersion }}" + {{- end }} ) // MetricSettings provides common settings for a particular metric. @@ -162,6 +165,9 @@ func With{{ $name.Render }}(val {{ $attr.Type.Primitive }}) ResourceOption { // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + {{- if .SemConvVersion }} + rm.SetSchemaUrl(conventions.SchemaURL) + {{- end }} rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/cmd/mdatagen/testdata/all_options.yaml b/cmd/mdatagen/testdata/all_options.yaml index 2be532e56b27..d940586065a6 100644 --- a/cmd/mdatagen/testdata/all_options.yaml +++ b/cmd/mdatagen/testdata/all_options.yaml @@ -1,4 +1,5 @@ name: metricreceiver +sem_conv_version: 1.9.0 attributes: freeFormAttribute: description: Attribute that can take on any value. diff --git a/receiver/hostmetricsreceiver/factory.go b/receiver/hostmetricsreceiver/factory.go index b1d60ab758cb..92bf4ff0e0a9 100644 --- a/receiver/hostmetricsreceiver/factory.go +++ b/receiver/hostmetricsreceiver/factory.go @@ -21,8 +21,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" - conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" @@ -94,40 +92,14 @@ func createMetricsReceiver( return nil, err } - schemaURLSetterConsumer, err := wrapBySchemaURLSetterConsumer(consumer) - if err != nil { - return nil, err - } - return scraperhelper.NewScraperControllerReceiver( &oCfg.ScraperControllerSettings, set, - schemaURLSetterConsumer, + consumer, addScraperOptions..., ) } -// This function wraps the consumer and returns a new consumer such that the schema URL -// of all metrics that pass through the new consumer is set correctly. -func wrapBySchemaURLSetterConsumer(cm consumer.Metrics) (consumer.Metrics, error) { - return consumer.NewMetrics(func(ctx context.Context, md pdata.Metrics) error { - rms := md.ResourceMetrics() - for i := 0; i < rms.Len(); i++ { - rm := rms.At(i) - if rm.SchemaUrl() == "" { - // If no specific SchemaURL is set we assume all collected host metrics - // confirm to our default SchemaURL. The assumption here is that - // the code that produces these metrics uses semantic conventions - // defined in package "conventions". - rm.SetSchemaUrl(conventions.SchemaURL) - } - // Else if the SchemaURL is set we assume the producer of the metric knows - // what it does. We won't touch it. - } - return cm.ConsumeMetrics(ctx, md) - }) -} - func createAddScraperOptions( ctx context.Context, logger *zap.Logger, diff --git a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go index 710b7b60109c..453f0be38328 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go +++ b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go @@ -30,7 +30,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/model/pdata" - conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go index 955fccedd1a9..5c067a44f6ae 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -189,6 +190,7 @@ type ResourceOption func(pdata.Resource) // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/metadata.yaml index bbc2dc36ac4c..100a622d2c71 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/cpu +sem_conv_version: 1.9.0 + attributes: cpu: description: CPU number starting at 0. diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go index 9572dc236bf1..582d838dff2f 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -488,6 +489,7 @@ type ResourceOption func(pdata.Resource) // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml index ecea081a18f8..280bf9e6473b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/disk +sem_conv_version: 1.9.0 + attributes: device: description: Name of the disk. diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go index a97eebdfa71c..4658ef6313e5 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -257,6 +258,7 @@ type ResourceOption func(pdata.Resource) // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/metadata.yaml index 5082f66d9774..5aa453a44b2b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/filesystem +sem_conv_version: 1.9.0 + attributes: device: description: Identifier of the filesystem. diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go index 6beca4cb8d24..4922334dd715 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -236,6 +237,7 @@ type ResourceOption func(pdata.Resource) // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/metadata.yaml index 55a3f1456c10..dcecd7950f40 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/load +sem_conv_version: 1.9.0 + attributes: metrics: diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go index 3c4703ae6519..70631c51b68c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -187,6 +188,7 @@ type ResourceOption func(pdata.Resource) // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/metadata.yaml index bde1492ec2df..2518abc535f2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/memory +sem_conv_version: 1.9.0 + attributes: state: description: Breakdown of memory usage by type. diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go index 54f645e74a59..1692273ad068 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -371,6 +372,7 @@ type ResourceOption func(pdata.Resource) // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/metadata.yaml index 79054ce1a903..9522a8053cdb 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/network +sem_conv_version: 1.9.0 + attributes: device: description: Name of the network interface. diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go index 02af4fd8dce7..0cc1be7c0efc 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -308,6 +309,7 @@ type ResourceOption func(pdata.Resource) // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/metadata.yaml index beb13dc94bc0..e423c3e56dce 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/paging +sem_conv_version: 1.9.0 + attributes: device: description: Name of the page file. diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go index 18aad46f4097..22199624fdfb 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -187,6 +188,7 @@ type ResourceOption func(pdata.Resource) // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/metadata.yaml index 4ca475c9b398..0151b1fabd71 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/processes +sem_conv_version: 1.9.0 + attributes: status: description: Breakdown status of the processes. diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go index 37d7224ad873..c48ffce8b4e0 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/model/pdata" + conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" ) // MetricSettings provides common settings for a particular metric. @@ -345,6 +346,7 @@ func WithProcessPid(val int64) ResourceOption { // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { rm := pdata.NewResourceMetrics() + rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml index a18ff1002c0b..0979cccfe934 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml @@ -1,5 +1,7 @@ name: hostmetricsreceiver/process +sem_conv_version: 1.9.0 + resource_attributes: process.pid: description: Process identifier (PID). From 57369ff6a978dbd577918c72a1e3d812c39bc69f Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Wed, 6 Apr 2022 21:14:33 +0200 Subject: [PATCH 11/59] [exporter/datadog] Deprecate `env` setting (#9017) * [exporter/datadog] Deprecate `env` setting * Add CHANGELOG entry * Remove TODO * Fix issue numbers --- CHANGELOG.md | 1 + exporter/datadogexporter/README.md | 1 - exporter/datadogexporter/config/config.go | 5 +++++ exporter/datadogexporter/example/config.yaml | 10 ++++++---- exporter/datadogexporter/factory_test.go | 6 ++---- exporter/datadogexporter/testdata/config.yaml | 3 +-- 6 files changed, 15 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f6aa6958f54..4caea0288485 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ - `datadogexporter`: Deprecate `service` setting in favor of `service.name` semantic convention (#8784) - `datadogexporter`: Deprecate `version` setting in favor of `service.version` semantic convention (#8784) +- `datadogexporter`: Deprecate `env` setting in favor of `deployment.environment` semantic convention (#9017) - `datadogexporter`: Deprecate `GetHostTags` method from `TagsConfig` struct (#8975) - `prometheusexecreceiver`: Deprecate prom_exec receiver (#9058) - `fluentbitextension`: Deprecate Fluentbit extension (#9062) diff --git a/exporter/datadogexporter/README.md b/exporter/datadogexporter/README.md index efc5a2a5e780..f1ccda09712d 100644 --- a/exporter/datadogexporter/README.md +++ b/exporter/datadogexporter/README.md @@ -68,7 +68,6 @@ processors: exporters: datadog/api: hostname: customhostname - env: prod tags: - example:tag diff --git a/exporter/datadogexporter/config/config.go b/exporter/datadogexporter/config/config.go index 54da9b35c6ef..967efd466c7d 100644 --- a/exporter/datadogexporter/config/config.go +++ b/exporter/datadogexporter/config/config.go @@ -207,6 +207,8 @@ type TagsConfig struct { Hostname string `mapstructure:"hostname"` // Env is the environment for unified service tagging. + // Deprecated: [v0.49.0] Set `deployment.environment` semconv instead, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/9016 for details. + // This option will be removed in v0.52.0. // It can also be set through the `DD_ENV` environment variable (Deprecated: [v0.47.0] set environment variable explicitly on configuration instead). Env string `mapstructure:"env"` @@ -406,6 +408,9 @@ func (c *Config) Unmarshal(configMap *config.Map) error { if c.Version != "" { c.warnings = append(c.warnings, fmt.Errorf(deprecationTemplate, "version", "v0.52.0", 8783)) } + if c.Env != "" { + c.warnings = append(c.warnings, fmt.Errorf(deprecationTemplate, "env", "v0.52.0", 9016)) + } return nil } diff --git a/exporter/datadogexporter/example/config.yaml b/exporter/datadogexporter/example/config.yaml index cf04b5ee25e2..e26fde0143f9 100644 --- a/exporter/datadogexporter/example/config.yaml +++ b/exporter/datadogexporter/example/config.yaml @@ -15,22 +15,24 @@ exporters: ## @param env - string - optional ## The environment for unified service tagging. + ## Deprecated: [v0.49.0] Set `deployment.environment` semconv instead, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/9016 for details. + ## This option will be removed in v0.52.0. ## If unset it will be determined from the `DD_ENV` environment variable (Deprecated: [v0.47.0] set environment variable explicitly on configuration instead). # # env: prod ## @param service - string - optional ## The service for unified service tagging. - ## Deprecated: [v0.48.0] Set `service.name` semconv instead, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8781 for details. - ## This option will be removed in v0.51.0. + ## Deprecated: [v0.49.0] Set `service.name` semconv instead, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8781 for details. + ## This option will be removed in v0.52.0. ## If unset it will be determined from the `DD_SERVICE` environment variable (Deprecated: [v0.47.0] set environment variable explicitly on configuration instead). # # service: myservice ## @param version - string - optional ## The version for unified service tagging. - ## Deprecated: [v0.48.0] Set `service.version` semconv instead, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8783 for details. - ## This option will be removed in v0.51.0. + ## Deprecated: [v0.49.0] Set `service.version` semconv instead, see https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8783 for details. + ## This option will be removed in v0.52.0. ## If unset it will be determined from the `DD_VERSION` environment variable (Deprecated: [v0.47.0] set environment variable explicitly on configuration instead). # # version: myversion diff --git a/exporter/datadogexporter/factory_test.go b/exporter/datadogexporter/factory_test.go index a59e2743a6e7..d0a20cb0373e 100644 --- a/exporter/datadogexporter/factory_test.go +++ b/exporter/datadogexporter/factory_test.go @@ -233,7 +233,6 @@ func TestLoadConfig(t *testing.T) { func TestLoadConfigEnvVariables(t *testing.T) { assert.NoError(t, os.Setenv("DD_API_KEY", "replacedapikey")) assert.NoError(t, os.Setenv("DD_HOST", "testhost")) - assert.NoError(t, os.Setenv("DD_ENV", "testenv")) assert.NoError(t, os.Setenv("DD_SITE", "datadoghq.test")) assert.NoError(t, os.Setenv("DD_TAGS", "envexample:tag envexample2:tag")) assert.NoError(t, os.Setenv("DD_URL", "https://api.datadoghq.com")) @@ -242,7 +241,6 @@ func TestLoadConfigEnvVariables(t *testing.T) { defer func() { assert.NoError(t, os.Unsetenv("DD_API_KEY")) assert.NoError(t, os.Unsetenv("DD_HOST")) - assert.NoError(t, os.Unsetenv("DD_ENV")) assert.NoError(t, os.Unsetenv("DD_SITE")) assert.NoError(t, os.Unsetenv("DD_TAGS")) assert.NoError(t, os.Unsetenv("DD_URL")) @@ -270,7 +268,7 @@ func TestLoadConfigEnvVariables(t *testing.T) { assert.Equal(t, exporterhelper.NewDefaultQueueSettings(), apiConfig.QueueSettings) assert.Equal(t, ddconfig.TagsConfig{ Hostname: "customhostname", - Env: "prod", + Env: "none", EnvVarTags: "envexample:tag envexample2:tag", Tags: []string{"example:tag"}, }, apiConfig.TagsConfig) @@ -317,7 +315,7 @@ func TestLoadConfigEnvVariables(t *testing.T) { assert.Equal(t, exporterhelper.NewDefaultQueueSettings(), defaultConfig.QueueSettings) assert.Equal(t, ddconfig.TagsConfig{ Hostname: "testhost", - Env: "testenv", + Env: "none", EnvVarTags: "envexample:tag envexample2:tag", }, defaultConfig.TagsConfig) assert.Equal(t, ddconfig.APIConfig{ diff --git a/exporter/datadogexporter/testdata/config.yaml b/exporter/datadogexporter/testdata/config.yaml index d783c7e76f94..69179e2de6ce 100644 --- a/exporter/datadogexporter/testdata/config.yaml +++ b/exporter/datadogexporter/testdata/config.yaml @@ -7,6 +7,7 @@ processors: exporters: datadog/api: hostname: customhostname + # Deprecated; kept here to avoid regressions. env: prod # Deprecated; kept here to avoid regressions. service: myservice @@ -25,8 +26,6 @@ exporters: datadog/api2: hostname: customhostname - env: prod - tags: - example:tag From 211079fbfd70bcdb45e0d1dbfdd63b2554d9ad87 Mon Sep 17 00:00:00 2001 From: Anuraag Agrawal Date: Thu, 7 Apr 2022 21:54:51 +0900 Subject: [PATCH 12/59] [processor/transform] Move function handling logic to common and generify transform context. (#8972) * Move function handling logic to common and generify transform context. * Fix * Keep traces functions --- processor/transformprocessor/config.go | 3 +- processor/transformprocessor/config_test.go | 4 +- processor/transformprocessor/factory_test.go | 4 +- .../internal/{traces => common}/condition.go | 26 +- .../{traces => common}/condition_test.go | 99 ++++--- .../internal/common/expression.go | 86 ++++++ .../{traces => common}/expression_test.go | 32 ++- .../internal/common/functions.go | 127 +++++++++ .../internal/common/functions_test.go | 122 +++++++++ .../internal/common/parser.go | 42 ++- .../internal/common/parser_test.go | 4 +- .../internal/traces/expression.go | 88 ------ .../internal/traces/functions.go | 106 +------- .../internal/traces/functions_test.go | 101 +------ .../internal/traces/processor.go | 48 +--- .../internal/traces/traces.go | 250 +++++++++--------- .../internal/traces/traces_test.go | 12 +- 17 files changed, 627 insertions(+), 527 deletions(-) rename processor/transformprocessor/internal/{traces => common}/condition.go (58%) rename processor/transformprocessor/internal/{traces => common}/condition_test.go (53%) create mode 100644 processor/transformprocessor/internal/common/expression.go rename processor/transformprocessor/internal/{traces => common}/expression_test.go (75%) create mode 100644 processor/transformprocessor/internal/common/functions.go create mode 100644 processor/transformprocessor/internal/common/functions_test.go delete mode 100644 processor/transformprocessor/internal/traces/expression.go diff --git a/processor/transformprocessor/config.go b/processor/transformprocessor/config.go index e2cf1379c3d7..f45a6f3859fc 100644 --- a/processor/transformprocessor/config.go +++ b/processor/transformprocessor/config.go @@ -17,6 +17,7 @@ package transformprocessor // import "github.com/open-telemetry/opentelemetry-co import ( "go.opentelemetry.io/collector/config" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" ) @@ -36,6 +37,6 @@ type Config struct { var _ config.Processor = (*Config)(nil) func (c *Config) Validate() error { - _, err := traces.Parse(c.Traces.Queries, c.Traces.functions) + _, err := common.ParseQueries(c.Traces.Queries, c.Traces.functions, traces.ParsePath) return err } diff --git a/processor/transformprocessor/config_test.go b/processor/transformprocessor/config_test.go index 568bc5b3f5af..541fa0010e15 100644 --- a/processor/transformprocessor/config_test.go +++ b/processor/transformprocessor/config_test.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/service/servicetest" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) func TestLoadingConfig(t *testing.T) { @@ -46,7 +46,7 @@ func TestLoadingConfig(t *testing.T) { `keep_keys(attributes, "http.method", "http.path")`, }, - functions: traces.DefaultFunctions(), + functions: common.DefaultFunctions(), }, }) } diff --git a/processor/transformprocessor/factory_test.go b/processor/transformprocessor/factory_test.go index e94384b7580a..410b8d9e991a 100644 --- a/processor/transformprocessor/factory_test.go +++ b/processor/transformprocessor/factory_test.go @@ -25,7 +25,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/model/pdata" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) func TestFactory_Type(t *testing.T) { @@ -41,7 +41,7 @@ func TestFactory_CreateDefaultConfig(t *testing.T) { Traces: TracesConfig{ Queries: []string{}, - functions: traces.DefaultFunctions(), + functions: common.DefaultFunctions(), }, }) assert.NoError(t, configtest.CheckConfigStruct(cfg)) diff --git a/processor/transformprocessor/internal/traces/condition.go b/processor/transformprocessor/internal/common/condition.go similarity index 58% rename from processor/transformprocessor/internal/traces/condition.go rename to processor/transformprocessor/internal/common/condition.go index 9e997f050523..56ed9dc20964 100644 --- a/processor/transformprocessor/internal/traces/condition.go +++ b/processor/transformprocessor/internal/common/condition.go @@ -12,29 +12,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -package traces // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" import ( "fmt" - - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) -type condFunc = func(ctx spanTransformContext) bool +type condFunc = func(ctx TransformContext) bool -var alwaysTrue = func(ctx spanTransformContext) bool { +var alwaysTrue = func(ctx TransformContext) bool { return true } -func newConditionEvaluator(cond *common.Condition, functions map[string]interface{}) (condFunc, error) { +func newConditionEvaluator(cond *Condition, functions map[string]interface{}, pathParser PathExpressionParser) (condFunc, error) { if cond == nil { return alwaysTrue, nil } - left, err := newGetter(cond.Left, functions) + left, err := NewGetter(cond.Left, functions, pathParser) if err != nil { return nil, err } - right, err := newGetter(cond.Right, functions) + right, err := NewGetter(cond.Right, functions, pathParser) // TODO(anuraaga): Check if both left and right are literals and const-evaluate if err != nil { return nil, err @@ -42,15 +40,15 @@ func newConditionEvaluator(cond *common.Condition, functions map[string]interfac switch cond.Op { case "==": - return func(ctx spanTransformContext) bool { - a := left.get(ctx) - b := right.get(ctx) + return func(ctx TransformContext) bool { + a := left.Get(ctx) + b := right.Get(ctx) return a == b }, nil case "!=": - return func(ctx spanTransformContext) bool { - a := left.get(ctx) - b := right.get(ctx) + return func(ctx TransformContext) bool { + a := left.Get(ctx) + b := right.Get(ctx) return a != b }, nil } diff --git a/processor/transformprocessor/internal/traces/condition_test.go b/processor/transformprocessor/internal/common/condition_test.go similarity index 53% rename from processor/transformprocessor/internal/traces/condition_test.go rename to processor/transformprocessor/internal/common/condition_test.go index bc9184248192..5b42e27de725 100644 --- a/processor/transformprocessor/internal/traces/condition_test.go +++ b/processor/transformprocessor/internal/common/condition_test.go @@ -12,15 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package traces +package common import ( + "fmt" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/model/pdata" - - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) func Test_newConditionEvaluator(t *testing.T) { @@ -28,16 +27,16 @@ func Test_newConditionEvaluator(t *testing.T) { span.SetName("bear") tests := []struct { name string - cond *common.Condition + cond *Condition matching pdata.Span }{ { name: "literals match", - cond: &common.Condition{ - Left: common.Value{ + cond: &Condition{ + Left: Value{ String: strp("hello"), }, - Right: common.Value{ + Right: Value{ String: strp("hello"), }, Op: "==", @@ -46,11 +45,11 @@ func Test_newConditionEvaluator(t *testing.T) { }, { name: "literals don't match", - cond: &common.Condition{ - Left: common.Value{ + cond: &Condition{ + Left: Value{ String: strp("hello"), }, - Right: common.Value{ + Right: Value{ String: strp("goodbye"), }, Op: "!=", @@ -59,17 +58,17 @@ func Test_newConditionEvaluator(t *testing.T) { }, { name: "path expression matches", - cond: &common.Condition{ - Left: common.Value{ - Path: &common.Path{ - Fields: []common.Field{ + cond: &Condition{ + Left: Value{ + Path: &Path{ + Fields: []Field{ { Name: "name", }, }, }, }, - Right: common.Value{ + Right: Value{ String: strp("bear"), }, Op: "==", @@ -78,17 +77,17 @@ func Test_newConditionEvaluator(t *testing.T) { }, { name: "path expression not matches", - cond: &common.Condition{ - Left: common.Value{ - Path: &common.Path{ - Fields: []common.Field{ + cond: &Condition{ + Left: Value{ + Path: &Path{ + Fields: []Field{ { Name: "name", }, }, }, }, - Right: common.Value{ + Right: Value{ String: strp("cat"), }, Op: "!=", @@ -103,9 +102,9 @@ func Test_newConditionEvaluator(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - evaluate, err := newConditionEvaluator(tt.cond, DefaultFunctions()) + evaluate, err := newConditionEvaluator(tt.cond, DefaultFunctions(), testParsePath) assert.NoError(t, err) - assert.True(t, evaluate(spanTransformContext{ + assert.True(t, evaluate(testTransformContext{ span: tt.matching, il: pdata.NewInstrumentationScope(), resource: pdata.NewResource(), @@ -114,27 +113,63 @@ func Test_newConditionEvaluator(t *testing.T) { } t.Run("invalid", func(t *testing.T) { - _, err := newConditionEvaluator(&common.Condition{ - Left: common.Value{ + _, err := newConditionEvaluator(&Condition{ + Left: Value{ String: strp("bear"), }, Op: "<>", - Right: common.Value{ + Right: Value{ String: strp("cat"), }, - }, DefaultFunctions()) + }, DefaultFunctions(), testParsePath) assert.Error(t, err) }) } -func strp(s string) *string { - return &s +// Small copy of traces data model for use in common tests + +type testTransformContext struct { + span pdata.Span + il pdata.InstrumentationScope + resource pdata.Resource +} + +func (ctx testTransformContext) GetItem() interface{} { + return ctx.span +} + +func (ctx testTransformContext) GetInstrumentationScope() pdata.InstrumentationScope { + return ctx.il +} + +func (ctx testTransformContext) GetResource() pdata.Resource { + return ctx.resource } -func intp(i int64) *int64 { - return &i +// pathGetSetter is a getSetter which has been resolved using a path expression provided by a user. +type testGetSetter struct { + getter ExprFunc + setter func(ctx TransformContext, val interface{}) } -func floatp(f float64) *float64 { - return &f +func (path testGetSetter) Get(ctx TransformContext) interface{} { + return path.getter(ctx) +} + +func (path testGetSetter) Set(ctx TransformContext, val interface{}) { + path.setter(ctx, val) +} + +func testParsePath(val *Path) (GetSetter, error) { + if val != nil && len(val.Fields) > 0 && val.Fields[0].Name == "name" { + return &testGetSetter{ + getter: func(ctx TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Name() + }, + setter: func(ctx TransformContext, val interface{}) { + ctx.GetItem().(pdata.Span).SetName(val.(string)) + }, + }, nil + } + return nil, fmt.Errorf("bad path %v", val) } diff --git a/processor/transformprocessor/internal/common/expression.go b/processor/transformprocessor/internal/common/expression.go new file mode 100644 index 000000000000..d88a0db1a24a --- /dev/null +++ b/processor/transformprocessor/internal/common/expression.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + +import ( + "fmt" + + "go.opentelemetry.io/collector/model/pdata" +) + +type TransformContext interface { + GetItem() interface{} + GetInstrumentationScope() pdata.InstrumentationScope + GetResource() pdata.Resource +} + +type ExprFunc func(ctx TransformContext) interface{} + +type Getter interface { + Get(ctx TransformContext) interface{} +} + +type Setter interface { + Set(ctx TransformContext, val interface{}) +} + +type GetSetter interface { + Getter + Setter +} + +type literal struct { + value interface{} +} + +func (l literal) Get(ctx TransformContext) interface{} { + return l.value +} + +type exprGetter struct { + expr ExprFunc +} + +func (g exprGetter) Get(ctx TransformContext) interface{} { + return g.expr(ctx) +} + +func NewGetter(val Value, functions map[string]interface{}, pathParser PathExpressionParser) (Getter, error) { + if s := val.String; s != nil { + return &literal{value: *s}, nil + } + if f := val.Float; f != nil { + return &literal{value: *f}, nil + } + if i := val.Int; i != nil { + return &literal{value: *i}, nil + } + + if val.Path != nil { + return pathParser(val.Path) + } + + if val.Invocation == nil { + // In practice, can't happen since the DSL grammar guarantees one is set + return nil, fmt.Errorf("no value field set. This is a bug in the transformprocessor") + } + call, err := NewFunctionCall(*val.Invocation, functions, pathParser) + if err != nil { + return nil, err + } + return &exprGetter{ + expr: call, + }, nil +} diff --git a/processor/transformprocessor/internal/traces/expression_test.go b/processor/transformprocessor/internal/common/expression_test.go similarity index 75% rename from processor/transformprocessor/internal/traces/expression_test.go rename to processor/transformprocessor/internal/common/expression_test.go index 63c1078f3071..c715b224b46b 100644 --- a/processor/transformprocessor/internal/traces/expression_test.go +++ b/processor/transformprocessor/internal/common/expression_test.go @@ -12,19 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package traces +package common import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/model/pdata" - - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) -func hello() exprFunc { - return func(ctx spanTransformContext) interface{} { +func hello() ExprFunc { + return func(ctx TransformContext) interface{} { return "world" } } @@ -34,35 +32,35 @@ func Test_newGetter(t *testing.T) { span.SetName("bear") tests := []struct { name string - val common.Value + val Value want interface{} }{ { name: "string literal", - val: common.Value{ + val: Value{ String: strp("str"), }, want: "str", }, { name: "float literal", - val: common.Value{ + val: Value{ Float: floatp(1.2), }, want: 1.2, }, { name: "int literal", - val: common.Value{ + val: Value{ Int: intp(12), }, want: int64(12), }, { name: "path expression", - val: common.Value{ - Path: &common.Path{ - Fields: []common.Field{ + val: Value{ + Path: &Path{ + Fields: []Field{ { Name: "name", }, @@ -73,8 +71,8 @@ func Test_newGetter(t *testing.T) { }, { name: "function call", - val: common.Value{ - Invocation: &common.Invocation{ + val: Value{ + Invocation: &Invocation{ Function: "hello", }, }, @@ -86,9 +84,9 @@ func Test_newGetter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - reader, err := newGetter(tt.val, functions) + reader, err := NewGetter(tt.val, functions, testParsePath) assert.NoError(t, err) - val := reader.get(spanTransformContext{ + val := reader.Get(testTransformContext{ span: span, il: pdata.NewInstrumentationScope(), resource: pdata.NewResource(), @@ -98,7 +96,7 @@ func Test_newGetter(t *testing.T) { } t.Run("empty value", func(t *testing.T) { - _, err := newGetter(common.Value{}, functions) + _, err := NewGetter(Value{}, functions, testParsePath) assert.Error(t, err) }) } diff --git a/processor/transformprocessor/internal/common/functions.go b/processor/transformprocessor/internal/common/functions.go new file mode 100644 index 000000000000..d24d735619b3 --- /dev/null +++ b/processor/transformprocessor/internal/common/functions.go @@ -0,0 +1,127 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/collector/model/pdata" +) + +var registry = map[string]interface{}{ + "keep_keys": keepKeys, + "set": set, +} + +type PathExpressionParser func(*Path) (GetSetter, error) + +func DefaultFunctions() map[string]interface{} { + return registry +} + +func set(target Setter, value Getter) ExprFunc { + return func(ctx TransformContext) interface{} { + val := value.Get(ctx) + if val != nil { + target.Set(ctx, val) + } + return nil + } +} + +func keepKeys(target GetSetter, keys []string) ExprFunc { + keySet := make(map[string]struct{}, len(keys)) + for _, key := range keys { + keySet[key] = struct{}{} + } + + return func(ctx TransformContext) interface{} { + val := target.Get(ctx) + if val == nil { + return nil + } + + if attrs, ok := val.(pdata.Map); ok { + // TODO(anuraaga): Avoid copying when filtering keys https://github.com/open-telemetry/opentelemetry-collector/issues/4756 + filtered := pdata.NewMap() + filtered.EnsureCapacity(attrs.Len()) + attrs.Range(func(key string, val pdata.Value) bool { + if _, ok := keySet[key]; ok { + filtered.Insert(key, val) + } + return true + }) + target.Set(ctx, filtered) + } + return nil + } +} + +// TODO(anuraaga): See if reflection can be avoided without complicating definition of transform functions. +// Visible for testing +func NewFunctionCall(inv Invocation, functions map[string]interface{}, pathParser PathExpressionParser) (ExprFunc, error) { + if f, ok := functions[inv.Function]; ok { + fType := reflect.TypeOf(f) + args := make([]reflect.Value, 0) + for i := 0; i < fType.NumIn(); i++ { + argType := fType.In(i) + + if argType.Kind() == reflect.Slice { + switch argType.Elem().Kind() { + case reflect.String: + arg := make([]string, 0) + for j := i; j < len(inv.Arguments); j++ { + if inv.Arguments[j].String == nil { + return nil, fmt.Errorf("invalid argument for slice parameter at position %v, must be string", j) + } + arg = append(arg, *inv.Arguments[j].String) + } + args = append(args, reflect.ValueOf(arg)) + default: + return nil, fmt.Errorf("unsupported slice type for function %v", inv.Function) + } + continue + } + + if i >= len(inv.Arguments) { + return nil, fmt.Errorf("not enough arguments for function %v", inv.Function) + } + argDef := inv.Arguments[i] + switch argType.Name() { + case "Setter": + fallthrough + case "GetSetter": + arg, err := pathParser(argDef.Path) + if err != nil { + return nil, fmt.Errorf("invalid argument at position %v %w", i, err) + } + args = append(args, reflect.ValueOf(arg)) + continue + case "Getter": + arg, err := NewGetter(argDef, functions, pathParser) + if err != nil { + return nil, fmt.Errorf("invalid argument at position %v %w", i, err) + } + args = append(args, reflect.ValueOf(arg)) + continue + } + } + val := reflect.ValueOf(f) + ret := val.Call(args) + return ret[0].Interface().(ExprFunc), nil + } + return nil, fmt.Errorf("undefined function %v", inv.Function) +} diff --git a/processor/transformprocessor/internal/common/functions_test.go b/processor/transformprocessor/internal/common/functions_test.go new file mode 100644 index 000000000000..98d3636e5936 --- /dev/null +++ b/processor/transformprocessor/internal/common/functions_test.go @@ -0,0 +1,122 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// Test for valid functions are in internal/traces/functions_test.go as there are many different data model cases. + +func Test_newFunctionCall_invalid(t *testing.T) { + tests := []struct { + name string + inv Invocation + }{ + { + name: "unknown function", + inv: Invocation{ + Function: "unknownfunc", + Arguments: []Value{}, + }, + }, + { + name: "not accessor", + inv: Invocation{ + Function: "set", + Arguments: []Value{ + { + String: strp("not path"), + }, + { + String: strp("cat"), + }, + }, + }, + }, + { + name: "not reader (invalid function)", + inv: Invocation{ + Function: "set", + Arguments: []Value{ + { + Path: &Path{ + Fields: []Field{ + { + Name: "name", + }, + }, + }, + }, + { + Invocation: &Invocation{ + Function: "unknownfunc", + }, + }, + }, + }, + }, + { + name: "not enough args", + inv: Invocation{ + Function: "set", + Arguments: []Value{ + { + Path: &Path{ + Fields: []Field{ + { + Name: "name", + }, + }, + }, + }, + { + Invocation: &Invocation{ + Function: "unknownfunc", + }, + }, + }, + }, + }, + { + name: "not matching slice type", + inv: Invocation{ + Function: "keep_keys", + Arguments: []Value{ + { + Path: &Path{ + Fields: []Field{ + { + Name: "attributes", + }, + }, + }, + }, + { + Int: intp(10), + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := NewFunctionCall(tt.inv, DefaultFunctions(), testParsePath) + assert.Error(t, err) + }) + } +} diff --git a/processor/transformprocessor/internal/common/parser.go b/processor/transformprocessor/internal/common/parser.go index 4626082524d3..8272880c2c0e 100644 --- a/processor/transformprocessor/internal/common/parser.go +++ b/processor/transformprocessor/internal/common/parser.go @@ -17,6 +17,7 @@ package common // import "github.com/open-telemetry/opentelemetry-collector-cont import ( "github.com/alecthomas/participle/v2" "github.com/alecthomas/participle/v2/lexer" + "go.uber.org/multierr" ) // ParsedQuery represents a parsed query. It is the entry point into the query DSL. @@ -65,9 +66,48 @@ type Field struct { MapKey *string `( "[" @String "]" )?` } +// Query holds a top level Query for processing telemetry data. A Query is a combination of a function +// invocation and the condition to match telemetry for invoking the function. +type Query struct { + Function ExprFunc + Condition condFunc +} + +func ParseQueries(statements []string, functions map[string]interface{}, pathParser PathExpressionParser) ([]Query, error) { + queries := make([]Query, 0) + var errors error + + for _, statement := range statements { + parsed, err := parseQuery(statement) + if err != nil { + errors = multierr.Append(errors, err) + continue + } + function, err := NewFunctionCall(parsed.Invocation, functions, pathParser) + if err != nil { + errors = multierr.Append(errors, err) + continue + } + condition, err := newConditionEvaluator(parsed.Condition, functions, pathParser) + if err != nil { + errors = multierr.Append(errors, err) + continue + } + queries = append(queries, Query{ + Function: function, + Condition: condition, + }) + } + + if errors != nil { + return nil, errors + } + return queries, nil +} + var parser = newParser() -func Parse(raw string) (*ParsedQuery, error) { +func parseQuery(raw string) (*ParsedQuery, error) { parsed := &ParsedQuery{} err := parser.ParseString("", raw, parsed) if err != nil { diff --git a/processor/transformprocessor/internal/common/parser_test.go b/processor/transformprocessor/internal/common/parser_test.go index 21d63023dd69..cf55e5162d3f 100644 --- a/processor/transformprocessor/internal/common/parser_test.go +++ b/processor/transformprocessor/internal/common/parser_test.go @@ -280,7 +280,7 @@ func Test_parse(t *testing.T) { for _, tt := range tests { t.Run(tt.query, func(t *testing.T) { - parsed, err := Parse(tt.query) + parsed, err := parseQuery(tt.query) assert.NoError(t, err) assert.Equal(t, tt.expected, parsed) }) @@ -297,7 +297,7 @@ func Test_parse_failure(t *testing.T) { } for _, tt := range tests { t.Run(tt, func(t *testing.T) { - _, err := Parse(tt) + _, err := parseQuery(tt) assert.Error(t, err) }) } diff --git a/processor/transformprocessor/internal/traces/expression.go b/processor/transformprocessor/internal/traces/expression.go deleted file mode 100644 index da106a26d4fc..000000000000 --- a/processor/transformprocessor/internal/traces/expression.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package traces // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" - -import ( - "fmt" - - "go.opentelemetry.io/collector/model/pdata" - - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" -) - -type spanTransformContext struct { - span pdata.Span - il pdata.InstrumentationScope - resource pdata.Resource -} - -type exprFunc func(ctx spanTransformContext) interface{} - -// getter allows reading a value while processing traces. Note that data is not necessarily read from input -// telemetry but may be a literal value or a function invocation. -type getter interface { - get(ctx spanTransformContext) interface{} -} - -// setter allows writing a value to trace data. -type setter interface { - set(ctx spanTransformContext, val interface{}) -} - -// getSetter allows reading or writing a value to trace data. -type getSetter interface { - getter - setter -} - -// literal holds a literal value defined as part of a Query. It does not read from telemetry data. -type literal struct { - value interface{} -} - -func (l literal) get(ctx spanTransformContext) interface{} { - return l.value -} - -func newGetter(val common.Value, functions map[string]interface{}) (getter, error) { - if s := val.String; s != nil { - return &literal{value: *s}, nil - } - if f := val.Float; f != nil { - return &literal{value: *f}, nil - } - if i := val.Int; i != nil { - return &literal{value: *i}, nil - } - - if val.Path != nil { - return newPathGetSetter(val.Path.Fields) - } - - if val.Invocation == nil { - // In practice, can't happen since the DSL grammar guarantees one is set - return nil, fmt.Errorf("no value field set. This is a bug in the transformprocessor") - } - - call, err := newFunctionCall(*val.Invocation, functions) - if err != nil { - return nil, err - } - return &pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return call(ctx) - }, - }, nil -} diff --git a/processor/transformprocessor/internal/traces/functions.go b/processor/transformprocessor/internal/traces/functions.go index 061136aea745..b438e82e6fcd 100644 --- a/processor/transformprocessor/internal/traces/functions.go +++ b/processor/transformprocessor/internal/traces/functions.go @@ -15,112 +15,10 @@ package traces // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/traces" import ( - "fmt" - "reflect" - - "go.opentelemetry.io/collector/model/pdata" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) -var registry = map[string]interface{}{ - "keep_keys": keepKeys, - "set": set, -} - func DefaultFunctions() map[string]interface{} { - return registry -} - -func set(target setter, value getter) exprFunc { - return func(ctx spanTransformContext) interface{} { - val := value.get(ctx) - if val != nil { - target.set(ctx, val) - } - return nil - } -} - -func keepKeys(target getSetter, keys []string) exprFunc { - keySet := make(map[string]struct{}, len(keys)) - for _, key := range keys { - keySet[key] = struct{}{} - } - - return func(ctx spanTransformContext) interface{} { - val := target.get(ctx) - if val == nil { - return nil - } - - if attrs, ok := val.(pdata.Map); ok { - // TODO(anuraaga): Avoid copying when filtering keys https://github.com/open-telemetry/opentelemetry-collector/issues/4756 - filtered := pdata.NewMap() - filtered.EnsureCapacity(attrs.Len()) - attrs.Range(func(key string, val pdata.Value) bool { - if _, ok := keySet[key]; ok { - filtered.Insert(key, val) - } - return true - }) - target.set(ctx, filtered) - } - return nil - } -} - -// TODO(anuraaga): See if reflection can be avoided without complicating definition of transform functions. -func newFunctionCall(inv common.Invocation, functions map[string]interface{}) (exprFunc, error) { - if f, ok := functions[inv.Function]; ok { - fType := reflect.TypeOf(f) - args := make([]reflect.Value, 0) - for i := 0; i < fType.NumIn(); i++ { - argType := fType.In(i) - - if argType.Kind() == reflect.Slice { - switch argType.Elem().Kind() { - case reflect.String: - arg := make([]string, 0) - for j := i; j < len(inv.Arguments); j++ { - if inv.Arguments[j].String == nil { - return nil, fmt.Errorf("invalid argument for slice parameter at position %v, must be string", j) - } - arg = append(arg, *inv.Arguments[j].String) - } - args = append(args, reflect.ValueOf(arg)) - default: - return nil, fmt.Errorf("unsupported slice type for function %v", inv.Function) - } - continue - } - - if i >= len(inv.Arguments) { - return nil, fmt.Errorf("not enough arguments for function %v", inv.Function) - } - argDef := inv.Arguments[i] - switch argType.Name() { - case "setter": - fallthrough - case "getSetter": - arg, err := newGetSetter(argDef) - if err != nil { - return nil, fmt.Errorf("invalid argument at position %v %w", i, err) - } - args = append(args, reflect.ValueOf(arg)) - continue - case "getter": - arg, err := newGetter(argDef, functions) - if err != nil { - return nil, fmt.Errorf("invalid argument at position %v %w", i, err) - } - args = append(args, reflect.ValueOf(arg)) - continue - } - } - val := reflect.ValueOf(f) - ret := val.Call(args) - return ret[0].Interface().(exprFunc), nil - } - return nil, fmt.Errorf("undefined function %v", inv.Function) + // No trace-only functions yet. + return common.DefaultFunctions() } diff --git a/processor/transformprocessor/internal/traces/functions_test.go b/processor/transformprocessor/internal/traces/functions_test.go index 99eec68586f9..33acb171d095 100644 --- a/processor/transformprocessor/internal/traces/functions_test.go +++ b/processor/transformprocessor/internal/traces/functions_test.go @@ -173,7 +173,7 @@ func Test_newFunctionCall(t *testing.T) { span := pdata.NewSpan() input.CopyTo(span) - evaluate, err := newFunctionCall(tt.inv, DefaultFunctions()) + evaluate, err := common.NewFunctionCall(tt.inv, DefaultFunctions(), ParsePath) assert.NoError(t, err) evaluate(spanTransformContext{ span: span, @@ -187,102 +187,3 @@ func Test_newFunctionCall(t *testing.T) { }) } } - -func Test_newFunctionCall_invalid(t *testing.T) { - tests := []struct { - name string - inv common.Invocation - }{ - { - name: "unknown function", - inv: common.Invocation{ - Function: "unknownfunc", - Arguments: []common.Value{}, - }, - }, - { - name: "not trace accessor", - inv: common.Invocation{ - Function: "set", - Arguments: []common.Value{ - { - String: strp("not path"), - }, - { - String: strp("cat"), - }, - }, - }, - }, - { - name: "not trace reader (invalid function)", - inv: common.Invocation{ - Function: "set", - Arguments: []common.Value{ - { - Path: &common.Path{ - Fields: []common.Field{ - { - Name: "name", - }, - }, - }, - }, - { - Invocation: &common.Invocation{ - Function: "unknownfunc", - }, - }, - }, - }, - }, - { - name: "not enough args", - inv: common.Invocation{ - Function: "set", - Arguments: []common.Value{ - { - Path: &common.Path{ - Fields: []common.Field{ - { - Name: "name", - }, - }, - }, - }, - { - Invocation: &common.Invocation{ - Function: "unknownfunc", - }, - }, - }, - }, - }, - { - name: "not matching slice type", - inv: common.Invocation{ - Function: "keep_keys", - Arguments: []common.Value{ - { - Path: &common.Path{ - Fields: []common.Field{ - { - Name: "attributes", - }, - }, - }, - }, - { - Int: intp(10), - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := newFunctionCall(tt.inv, DefaultFunctions()) - assert.Error(t, err) - }) - } -} diff --git a/processor/transformprocessor/internal/traces/processor.go b/processor/transformprocessor/internal/traces/processor.go index f3e73c83bba2..cdf68ace12fe 100644 --- a/processor/transformprocessor/internal/traces/processor.go +++ b/processor/transformprocessor/internal/traces/processor.go @@ -19,26 +19,18 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/model/pdata" - "go.uber.org/multierr" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) type Processor struct { - queries []Query + queries []common.Query logger *zap.Logger } -// Query holds a top level Query for processing trace data. A Query is a combination of a function -// invocation and the condition to match telemetry for invoking the function. -type Query struct { - function exprFunc - condition condFunc -} - func NewProcessor(statements []string, functions map[string]interface{}, settings component.ProcessorCreateSettings) (*Processor, error) { - queries, err := Parse(statements, functions) + queries, err := common.ParseQueries(statements, functions, ParsePath) if err != nil { return nil, err } @@ -62,8 +54,8 @@ func (p *Processor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Tra ctx.span = span for _, statement := range p.queries { - if statement.condition(ctx) { - statement.function(ctx) + if statement.Condition(ctx) { + statement.Function(ctx) } } } @@ -71,35 +63,3 @@ func (p *Processor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Tra } return td, nil } - -func Parse(statements []string, functions map[string]interface{}) ([]Query, error) { - queries := make([]Query, 0) - var errors error - - for _, statement := range statements { - parsed, err := common.Parse(statement) - if err != nil { - errors = multierr.Append(errors, err) - continue - } - function, err := newFunctionCall(parsed.Invocation, functions) - if err != nil { - errors = multierr.Append(errors, err) - continue - } - condition, err := newConditionEvaluator(parsed.Condition, functions) - if err != nil { - errors = multierr.Append(errors, err) - continue - } - queries = append(queries, Query{ - function: function, - condition: condition, - }) - } - - if errors != nil { - return nil, errors - } - return queries, nil -} diff --git a/processor/transformprocessor/internal/traces/traces.go b/processor/transformprocessor/internal/traces/traces.go index d09cb4f3e202..fcff3ef966fa 100644 --- a/processor/transformprocessor/internal/traces/traces.go +++ b/processor/transformprocessor/internal/traces/traces.go @@ -24,29 +24,43 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) +type spanTransformContext struct { + span pdata.Span + il pdata.InstrumentationScope + resource pdata.Resource +} + +func (ctx spanTransformContext) GetItem() interface{} { + return ctx.span +} + +func (ctx spanTransformContext) GetInstrumentationScope() pdata.InstrumentationScope { + return ctx.il +} + +func (ctx spanTransformContext) GetResource() pdata.Resource { + return ctx.resource +} + // pathGetSetter is a getSetter which has been resolved using a path expression provided by a user. type pathGetSetter struct { - getter exprFunc - setter func(ctx spanTransformContext, val interface{}) + getter common.ExprFunc + setter func(ctx common.TransformContext, val interface{}) } -func (path pathGetSetter) get(ctx spanTransformContext) interface{} { +func (path pathGetSetter) Get(ctx common.TransformContext) interface{} { return path.getter(ctx) } -func (path pathGetSetter) set(ctx spanTransformContext, val interface{}) { +func (path pathGetSetter) Set(ctx common.TransformContext, val interface{}) { path.setter(ctx, val) } -func newGetSetter(val common.Value) (getSetter, error) { - if val.Path == nil { - return nil, fmt.Errorf("must be a trace path expression") - } - - return newPathGetSetter(val.Path.Fields) +func ParsePath(val *common.Path) (common.GetSetter, error) { + return newPathGetSetter(val.Fields) } -func newPathGetSetter(path []common.Field) (getSetter, error) { +func newPathGetSetter(path []common.Field) (common.GetSetter, error) { switch path[0].Name { case "resource": if len(path) == 1 { @@ -62,13 +76,13 @@ func newPathGetSetter(path []common.Field) (getSetter, error) { } case "instrumentation_library": if len(path) == 1 { - return accessInstrumentationLibrary(), nil + return accessInstrumentationScope(), nil } switch path[1].Name { case "name": - return accessInstrumentationLibraryName(), nil + return accessInstrumentationScopeName(), nil case "version": - return accessInstrumentationLibraryVersion(), nil + return accessInstrumentationScopeVersion(), nil } case "trace_id": return accessTraceID(), nil @@ -121,13 +135,13 @@ func newPathGetSetter(path []common.Field) (getSetter, error) { func accessResource() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.resource + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetResource() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if newRes, ok := val.(pdata.Resource); ok { - ctx.resource.Attributes().Clear() - newRes.CopyTo(ctx.resource) + ctx.GetResource().Attributes().Clear() + newRes.CopyTo(ctx.GetResource()) } }, } @@ -135,13 +149,13 @@ func accessResource() pathGetSetter { func accessResourceAttributes() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.resource.Attributes() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetResource().Attributes() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if attrs, ok := val.(pdata.Map); ok { - ctx.resource.Attributes().Clear() - attrs.CopyTo(ctx.resource.Attributes()) + ctx.GetResource().Attributes().Clear() + attrs.CopyTo(ctx.GetResource().Attributes()) } }, } @@ -149,49 +163,49 @@ func accessResourceAttributes() pathGetSetter { func accessResourceAttributesKey(mapKey *string) pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return getAttr(ctx.resource.Attributes(), *mapKey) + getter: func(ctx common.TransformContext) interface{} { + return getAttr(ctx.GetResource().Attributes(), *mapKey) }, - setter: func(ctx spanTransformContext, val interface{}) { - setAttr(ctx.resource.Attributes(), *mapKey, val) + setter: func(ctx common.TransformContext, val interface{}) { + setAttr(ctx.GetResource().Attributes(), *mapKey, val) }, } } -func accessInstrumentationLibrary() pathGetSetter { +func accessInstrumentationScope() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.il + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetInstrumentationScope() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if newIl, ok := val.(pdata.InstrumentationScope); ok { - newIl.CopyTo(ctx.il) + newIl.CopyTo(ctx.GetInstrumentationScope()) } }, } } -func accessInstrumentationLibraryName() pathGetSetter { +func accessInstrumentationScopeName() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.il.Name() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetInstrumentationScope().Name() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { - ctx.il.SetName(str) + ctx.GetInstrumentationScope().SetName(str) } }, } } -func accessInstrumentationLibraryVersion() pathGetSetter { +func accessInstrumentationScopeVersion() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.il.Version() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetInstrumentationScope().Version() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { - ctx.il.SetVersion(str) + ctx.GetInstrumentationScope().SetVersion(str) } }, } @@ -199,15 +213,15 @@ func accessInstrumentationLibraryVersion() pathGetSetter { func accessTraceID() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.TraceID() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).TraceID() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { id, _ := hex.DecodeString(str) var idArr [16]byte copy(idArr[:16], id) - ctx.span.SetTraceID(pdata.NewTraceID(idArr)) + ctx.GetItem().(pdata.Span).SetTraceID(pdata.NewTraceID(idArr)) } }, } @@ -215,15 +229,15 @@ func accessTraceID() pathGetSetter { func accessSpanID() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.SpanID() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).SpanID() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { id, _ := hex.DecodeString(str) var idArr [8]byte copy(idArr[:8], id) - ctx.span.SetSpanID(pdata.NewSpanID(idArr)) + ctx.GetItem().(pdata.Span).SetSpanID(pdata.NewSpanID(idArr)) } }, } @@ -231,12 +245,12 @@ func accessSpanID() pathGetSetter { func accessTraceState() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.TraceState() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).TraceState() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { - ctx.span.SetTraceState(pdata.TraceState(str)) + ctx.GetItem().(pdata.Span).SetTraceState(pdata.TraceState(str)) } }, } @@ -244,15 +258,15 @@ func accessTraceState() pathGetSetter { func accessParentSpanID() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.ParentSpanID() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).ParentSpanID() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { id, _ := hex.DecodeString(str) var idArr [8]byte copy(idArr[:8], id) - ctx.span.SetParentSpanID(pdata.NewSpanID(idArr)) + ctx.GetItem().(pdata.Span).SetParentSpanID(pdata.NewSpanID(idArr)) } }, } @@ -260,12 +274,12 @@ func accessParentSpanID() pathGetSetter { func accessName() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.Name() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Name() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { - ctx.span.SetName(str) + ctx.GetItem().(pdata.Span).SetName(str) } }, } @@ -273,12 +287,12 @@ func accessName() pathGetSetter { func accessKind() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.Kind() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Kind() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.span.SetKind(pdata.SpanKind(i)) + ctx.GetItem().(pdata.Span).SetKind(pdata.SpanKind(i)) } }, } @@ -286,12 +300,12 @@ func accessKind() pathGetSetter { func accessStartTimeUnixNano() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.StartTimestamp().AsTime().UnixNano() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).StartTimestamp().AsTime().UnixNano() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.span.SetStartTimestamp(pdata.NewTimestampFromTime(time.Unix(0, i))) + ctx.GetItem().(pdata.Span).SetStartTimestamp(pdata.NewTimestampFromTime(time.Unix(0, i))) } }, } @@ -299,12 +313,12 @@ func accessStartTimeUnixNano() pathGetSetter { func accessEndTimeUnixNano() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.EndTimestamp().AsTime().UnixNano() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).EndTimestamp().AsTime().UnixNano() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.span.SetEndTimestamp(pdata.NewTimestampFromTime(time.Unix(0, i))) + ctx.GetItem().(pdata.Span).SetEndTimestamp(pdata.NewTimestampFromTime(time.Unix(0, i))) } }, } @@ -312,13 +326,13 @@ func accessEndTimeUnixNano() pathGetSetter { func accessAttributes() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.Attributes() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Attributes() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if attrs, ok := val.(pdata.Map); ok { - ctx.span.Attributes().Clear() - attrs.CopyTo(ctx.span.Attributes()) + ctx.GetItem().(pdata.Span).Attributes().Clear() + attrs.CopyTo(ctx.GetItem().(pdata.Span).Attributes()) } }, } @@ -326,23 +340,23 @@ func accessAttributes() pathGetSetter { func accessAttributesKey(mapKey *string) pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return getAttr(ctx.span.Attributes(), *mapKey) + getter: func(ctx common.TransformContext) interface{} { + return getAttr(ctx.GetItem().(pdata.Span).Attributes(), *mapKey) }, - setter: func(ctx spanTransformContext, val interface{}) { - setAttr(ctx.span.Attributes(), *mapKey, val) + setter: func(ctx common.TransformContext, val interface{}) { + setAttr(ctx.GetItem().(pdata.Span).Attributes(), *mapKey, val) }, } } func accessDroppedAttributesCount() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.DroppedAttributesCount() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).DroppedAttributesCount() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.span.SetDroppedAttributesCount(uint32(i)) + ctx.GetItem().(pdata.Span).SetDroppedAttributesCount(uint32(i)) } }, } @@ -350,15 +364,15 @@ func accessDroppedAttributesCount() pathGetSetter { func accessEvents() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.Events() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Events() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if slc, ok := val.(pdata.SpanEventSlice); ok { - ctx.span.Events().RemoveIf(func(event pdata.SpanEvent) bool { + ctx.GetItem().(pdata.Span).Events().RemoveIf(func(event pdata.SpanEvent) bool { return true }) - slc.CopyTo(ctx.span.Events()) + slc.CopyTo(ctx.GetItem().(pdata.Span).Events()) } }, } @@ -366,12 +380,12 @@ func accessEvents() pathGetSetter { func accessDroppedEventsCount() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.DroppedEventsCount() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).DroppedEventsCount() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.span.SetDroppedEventsCount(uint32(i)) + ctx.GetItem().(pdata.Span).SetDroppedEventsCount(uint32(i)) } }, } @@ -379,15 +393,15 @@ func accessDroppedEventsCount() pathGetSetter { func accessLinks() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.Links() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Links() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if slc, ok := val.(pdata.SpanLinkSlice); ok { - ctx.span.Links().RemoveIf(func(event pdata.SpanLink) bool { + ctx.GetItem().(pdata.Span).Links().RemoveIf(func(event pdata.SpanLink) bool { return true }) - slc.CopyTo(ctx.span.Links()) + slc.CopyTo(ctx.GetItem().(pdata.Span).Links()) } }, } @@ -395,12 +409,12 @@ func accessLinks() pathGetSetter { func accessDroppedLinksCount() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.DroppedLinksCount() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).DroppedLinksCount() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.span.SetDroppedLinksCount(uint32(i)) + ctx.GetItem().(pdata.Span).SetDroppedLinksCount(uint32(i)) } }, } @@ -408,12 +422,12 @@ func accessDroppedLinksCount() pathGetSetter { func accessStatus() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.Status() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Status() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if status, ok := val.(pdata.SpanStatus); ok { - status.CopyTo(ctx.span.Status()) + status.CopyTo(ctx.GetItem().(pdata.Span).Status()) } }, } @@ -421,12 +435,12 @@ func accessStatus() pathGetSetter { func accessStatusCode() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.Status().Code() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Status().Code() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.span.Status().SetCode(pdata.StatusCode(i)) + ctx.GetItem().(pdata.Span).Status().SetCode(pdata.StatusCode(i)) } }, } @@ -434,12 +448,12 @@ func accessStatusCode() pathGetSetter { func accessStatusMessage() pathGetSetter { return pathGetSetter{ - getter: func(ctx spanTransformContext) interface{} { - return ctx.span.Status().Message() + getter: func(ctx common.TransformContext) interface{} { + return ctx.GetItem().(pdata.Span).Status().Message() }, - setter: func(ctx spanTransformContext, val interface{}) { + setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { - ctx.span.Status().SetMessage(str) + ctx.GetItem().(pdata.Span).Status().SetMessage(str) } }, } diff --git a/processor/transformprocessor/internal/traces/traces_test.go b/processor/transformprocessor/internal/traces/traces_test.go index 152b1257df21..f887ecf5d27d 100644 --- a/processor/transformprocessor/internal/traces/traces_test.go +++ b/processor/transformprocessor/internal/traces/traces_test.go @@ -668,14 +668,14 @@ func Test_newPathGetSetter(t *testing.T) { span, il, resource := createTelemetry() - got := accessor.get(spanTransformContext{ + got := accessor.Get(spanTransformContext{ span: span, il: il, resource: resource, }) assert.Equal(t, tt.orig, got) - accessor.set(spanTransformContext{ + accessor.Set(spanTransformContext{ span: span, il: il, resource: resource, @@ -752,3 +752,11 @@ func createTelemetry() (pdata.Span, pdata.InstrumentationScope, pdata.Resource) return span, il, resource } + +func strp(s string) *string { + return &s +} + +func intp(i int64) *int64 { + return &i +} From 03b379ff00254203ac6fe24168f64fb7b21f12f0 Mon Sep 17 00:00:00 2001 From: Roger Coll Date: Thu, 7 Apr 2022 12:59:44 +0000 Subject: [PATCH 13/59] [receiver/podman] Add timeout config option (#9014) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [receiver/podman] Add timeout config option * docs: update changelog * feat: add missing license to new test file Co-authored-by: Juraci Paixão Kröhling --- CHANGELOG.md | 1 + receiver/podmanreceiver/README.md | 4 +- receiver/podmanreceiver/config.go | 7 +- receiver/podmanreceiver/config_test.go | 2 + receiver/podmanreceiver/factory.go | 1 + receiver/podmanreceiver/podman_client.go | 23 +++--- receiver/podmanreceiver/podman_client_test.go | 77 +++++++++++++++++++ receiver/podmanreceiver/receiver.go | 4 +- receiver/podmanreceiver/receiver_test.go | 6 +- receiver/podmanreceiver/testdata/config.yaml | 1 + 10 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 receiver/podmanreceiver/podman_client_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 4caea0288485..1ef240ae8935 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - `jaegerremotesamplingextension`: Add local and remote sampling stores (#8818) - `attributesprocessor`: Add support to filter on log body (#8996) - `prometheusremotewriteexporter`: Translate resource attributes to the target info metric (#8493) +- `podmanreceiver`: Add API timeout configuration option (#9014) - `cmd/mdatagen`: Add `sem_conv_version` field to metadata.yaml that is used to set metrics SchemaURL (#9010) ### 🛑 Breaking changes 🛑 diff --git a/receiver/podmanreceiver/README.md b/receiver/podmanreceiver/README.md index 0f649a856628..a13e35f6c326 100644 --- a/receiver/podmanreceiver/README.md +++ b/receiver/podmanreceiver/README.md @@ -19,6 +19,7 @@ The following settings are required: The following settings are optional: - `collection_interval` (default = `10s`): The interval at which to gather container stats. +- `timeout` (default = `5s`): The maximum amount of time to wait for Podman API responses. Example: @@ -26,6 +27,7 @@ Example: receivers: podman_stats: endpoint: unix://run/podman/podman.sock + timeout: 10s collection_interval: 10s ``` @@ -81,4 +83,4 @@ Recommended build tags to use when including this receiver in your build: - `containers_image_openpgp` - `exclude_graphdriver_btrfs` -- `exclude_graphdriver_devicemapper` \ No newline at end of file +- `exclude_graphdriver_devicemapper` diff --git a/receiver/podmanreceiver/config.go b/receiver/podmanreceiver/config.go index 265eec358b5a..cb479424f00d 100644 --- a/receiver/podmanreceiver/config.go +++ b/receiver/podmanreceiver/config.go @@ -16,6 +16,7 @@ package podmanreceiver // import "github.com/open-telemetry/opentelemetry-collec import ( "errors" + "time" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/receiver/scraperhelper" @@ -27,7 +28,11 @@ type Config struct { scraperhelper.ScraperControllerSettings `mapstructure:",squash"` // The URL of the podman server. Default is "unix:///run/podman/podman.sock" - Endpoint string `mapstructure:"endpoint"` + Endpoint string `mapstructure:"endpoint"` + + // The maximum amount of time to wait for Podman API responses. Default is 5s + Timeout time.Duration `mapstructure:"timeout"` + APIVersion string `mapstructure:"api_version"` SSHKey string `mapstructure:"ssh_key"` SSHPassphrase string `mapstructure:"ssh_passphrase"` diff --git a/receiver/podmanreceiver/config_test.go b/receiver/podmanreceiver/config_test.go index 9d730e3c7f07..075d973c211f 100644 --- a/receiver/podmanreceiver/config_test.go +++ b/receiver/podmanreceiver/config_test.go @@ -45,9 +45,11 @@ func TestLoadConfig(t *testing.T) { assert.Equal(t, "podman_stats", dcfg.ID().String()) assert.Equal(t, "unix:///run/podman/podman.sock", dcfg.Endpoint) assert.Equal(t, 10*time.Second, dcfg.CollectionInterval) + assert.Equal(t, 5*time.Second, dcfg.Timeout) ascfg := cfg.Receivers[config.NewComponentIDWithName(typeStr, "all")].(*Config) assert.Equal(t, "podman_stats/all", ascfg.ID().String()) assert.Equal(t, "http://example.com/", ascfg.Endpoint) assert.Equal(t, 2*time.Second, ascfg.CollectionInterval) + assert.Equal(t, 20*time.Second, ascfg.Timeout) } diff --git a/receiver/podmanreceiver/factory.go b/receiver/podmanreceiver/factory.go index 4953b67f6404..952bad065a67 100644 --- a/receiver/podmanreceiver/factory.go +++ b/receiver/podmanreceiver/factory.go @@ -43,6 +43,7 @@ func createDefaultConfig() *Config { CollectionInterval: 10 * time.Second, }, Endpoint: "unix:///run/podman/podman.sock", + Timeout: 5 * time.Second, APIVersion: defaultAPIVersion, } } diff --git a/receiver/podmanreceiver/podman_client.go b/receiver/podmanreceiver/podman_client.go index 8a4bfdc563c7..db43ad3de115 100644 --- a/receiver/podmanreceiver/podman_client.go +++ b/receiver/podmanreceiver/podman_client.go @@ -60,12 +60,16 @@ type containerStatsReport struct { type clientFactory func(logger *zap.Logger, cfg *Config) (client, error) type client interface { - stats() ([]containerStats, error) + ping(context.Context) error + stats(context.Context) ([]containerStats, error) } type podmanClient struct { conn *http.Client endpoint string + + // The maximum amount of time to wait for Podman API responses + timeout time.Duration } func newPodmanClient(logger *zap.Logger, cfg *Config) (client, error) { @@ -76,10 +80,7 @@ func newPodmanClient(logger *zap.Logger, cfg *Config) (client, error) { c := &podmanClient{ conn: connection, endpoint: fmt.Sprintf("http://d/v%s/libpod", cfg.APIVersion), - } - err = c.ping() - if err != nil { - return nil, err + timeout: cfg.Timeout, } return c, nil } @@ -96,11 +97,13 @@ func (c *podmanClient) request(ctx context.Context, path string, params url.Valu return c.conn.Do(req) } -func (c *podmanClient) stats() ([]containerStats, error) { +func (c *podmanClient) stats(ctx context.Context) ([]containerStats, error) { params := url.Values{} params.Add("stream", "false") - resp, err := c.request(context.Background(), "/containers/stats", params) + statsCtx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + resp, err := c.request(statsCtx, "/containers/stats", params) if err != nil { return nil, err } @@ -122,8 +125,10 @@ func (c *podmanClient) stats() ([]containerStats, error) { return report.Stats, nil } -func (c *podmanClient) ping() error { - resp, err := c.request(context.Background(), "/_ping", nil) +func (c *podmanClient) ping(ctx context.Context) error { + pingCtx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + resp, err := c.request(pingCtx, "/_ping", nil) if err != nil { return err } diff --git a/receiver/podmanreceiver/podman_client_test.go b/receiver/podmanreceiver/podman_client_test.go new file mode 100644 index 000000000000..94faf7c43f1d --- /dev/null +++ b/receiver/podmanreceiver/podman_client_test.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package podmanreceiver + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func tmpSock(t *testing.T) (net.Listener, string) { + f, err := ioutil.TempFile(os.TempDir(), "testsock") + if err != nil { + t.Fatal(err) + } + addr := f.Name() + os.Remove(addr) + + listener, err := net.Listen("unix", addr) + if err != nil { + t.Fatal(err) + } + + return listener, addr +} + +func TestWatchingTimeouts(t *testing.T) { + listener, addr := tmpSock(t) + defer listener.Close() + defer os.Remove(addr) + + config := &Config{ + Endpoint: fmt.Sprintf("unix://%s", addr), + Timeout: 50 * time.Millisecond, + } + + cli, err := newPodmanClient(zap.NewNop(), config) + assert.NotNil(t, cli) + assert.Nil(t, err) + + expectedError := "context deadline exceeded" + + shouldHaveTaken := time.Now().Add(100 * time.Millisecond).UnixNano() + + err = cli.ping(context.Background()) + require.Error(t, err) + + containers, err := cli.stats(context.Background()) + require.Error(t, err) + assert.Contains(t, err.Error(), expectedError) + assert.Nil(t, containers) + + assert.GreaterOrEqual( + t, time.Now().UnixNano(), shouldHaveTaken, + "Client timeouts don't appear to have been exercised.", + ) +} diff --git a/receiver/podmanreceiver/receiver.go b/receiver/podmanreceiver/receiver.go index db55c32d7f87..9e2ce5cb5179 100644 --- a/receiver/podmanreceiver/receiver.go +++ b/receiver/podmanreceiver/receiver.go @@ -72,10 +72,10 @@ func (r *receiver) start(context.Context, component.Host) error { return err } -func (r *receiver) scrape(context.Context) (pdata.Metrics, error) { +func (r *receiver) scrape(ctx context.Context) (pdata.Metrics, error) { var err error - stats, err := r.client.stats() + stats, err := r.client.stats(ctx) if err != nil { r.set.Logger.Error("error fetching stats", zap.Error(err)) return pdata.Metrics{}, err diff --git a/receiver/podmanreceiver/receiver_test.go b/receiver/podmanreceiver/receiver_test.go index 97a73722408b..542cfed6e16e 100644 --- a/receiver/podmanreceiver/receiver_test.go +++ b/receiver/podmanreceiver/receiver_test.go @@ -93,7 +93,7 @@ func (c mockClient) factory(logger *zap.Logger, cfg *Config) (client, error) { return c, nil } -func (c mockClient) stats() ([]containerStats, error) { +func (c mockClient) stats(context.Context) ([]containerStats, error) { report := <-c if report.Error != "" { return nil, errors.New(report.Error) @@ -101,6 +101,10 @@ func (c mockClient) stats() ([]containerStats, error) { return report.Stats, nil } +func (c mockClient) ping(context.Context) error { + return nil +} + type mockConsumer chan pdata.Metrics func (m mockConsumer) Capabilities() consumer.Capabilities { diff --git a/receiver/podmanreceiver/testdata/config.yaml b/receiver/podmanreceiver/testdata/config.yaml index 71cae16291ca..5e53b86e180f 100644 --- a/receiver/podmanreceiver/testdata/config.yaml +++ b/receiver/podmanreceiver/testdata/config.yaml @@ -3,6 +3,7 @@ receivers: podman_stats/all: endpoint: http://example.com/ collection_interval: 2s + timeout: 20s processors: nop: From 955c2afcf9486bb3a746b3a414ce6ed33d2975bf Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Thu, 7 Apr 2022 09:17:05 -0400 Subject: [PATCH 14/59] Bump limits on performance tests (#9105) * Bump limits on log performance tests * Bump cpu limit on metric perf tests --- testbed/tests/log_test.go | 8 ++++---- testbed/tests/metric_test.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testbed/tests/log_test.go b/testbed/tests/log_test.go index 026110627c67..1b2f641549e4 100644 --- a/testbed/tests/log_test.go +++ b/testbed/tests/log_test.go @@ -40,7 +40,7 @@ func TestLog10kDPS(t *testing.T) { receiver: testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), resourceSpec: testbed.ResourceSpec{ ExpectedMaxCPU: 30, - ExpectedMaxRAM: 98, + ExpectedMaxRAM: 120, }, }, { @@ -49,7 +49,7 @@ func TestLog10kDPS(t *testing.T) { receiver: testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)), resourceSpec: testbed.ResourceSpec{ ExpectedMaxCPU: 30, - ExpectedMaxRAM: 95, + ExpectedMaxRAM: 120, }, }, { @@ -58,7 +58,7 @@ func TestLog10kDPS(t *testing.T) { receiver: testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), resourceSpec: testbed.ResourceSpec{ ExpectedMaxCPU: 30, - ExpectedMaxRAM: 117, + ExpectedMaxRAM: 120, }, }, { @@ -67,7 +67,7 @@ func TestLog10kDPS(t *testing.T) { receiver: testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), resourceSpec: testbed.ResourceSpec{ ExpectedMaxCPU: 30, - ExpectedMaxRAM: 105, + ExpectedMaxRAM: 120, }, extensions: datasenders.NewLocalFileStorageExtension(), }, diff --git a/testbed/tests/metric_test.go b/testbed/tests/metric_test.go index 2ce05b351d10..3533e8f4d81e 100644 --- a/testbed/tests/metric_test.go +++ b/testbed/tests/metric_test.go @@ -60,7 +60,7 @@ func TestMetric10kDPS(t *testing.T) { testbed.NewOTLPMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), testbed.NewOTLPDataReceiver(testbed.GetAvailablePort(t)), testbed.ResourceSpec{ - ExpectedMaxCPU: 50, + ExpectedMaxCPU: 60, ExpectedMaxRAM: 105, }, }, @@ -69,7 +69,7 @@ func TestMetric10kDPS(t *testing.T) { testbed.NewOTLPHTTPMetricDataSender(testbed.DefaultHost, testbed.GetAvailablePort(t)), testbed.NewOTLPHTTPDataReceiver(testbed.GetAvailablePort(t)), testbed.ResourceSpec{ - ExpectedMaxCPU: 55, + ExpectedMaxCPU: 60, ExpectedMaxRAM: 100, }, }, From 129b983658986b7afd5428778b6370edb094de2e Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 7 Apr 2022 06:32:01 -0700 Subject: [PATCH 15/59] Promote @djaglowski to maintainer role (#9104) @djaglowski has been with the project for a long time, contributing to bunch of components. @djaglowski also owns a critical part of the logging pipeline which is the https://github.com/open-telemetry/opentelemetry-log-collection. With the current proposal of moving the log collection into contrib https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8850 we would like to have him as maintainer to continue to maintain that library as well as helping the community to maintain the entire project. Signed-off-by: Bogdan Drutu --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e6392efc03c2..3e6143a62882 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,6 @@ Approvers ([@open-telemetry/collector-contrib-approvers](https://github.com/orgs - [Anthony Mirabella](https://github.com/Aneurysm9), AWS - [Anuraag Agrawal](https://github.com/anuraaga), AWS -- [Daniel Jaglowski](https://github.com/djaglowski), observIQ - [David Ashpole](https://github.com/dashpole), Google - [Dmitrii Anoshin](https://github.com/dmitryax), Splunk - [Pablo Baeyens](https://github.com/mx-psi), DataDog @@ -76,6 +75,7 @@ Approvers ([@open-telemetry/collector-contrib-approvers](https://github.com/orgs Maintainers ([@open-telemetry/collector-contrib-maintainer](https://github.com/orgs/open-telemetry/teams/collector-contrib-maintainer)): +- [Daniel Jaglowski](https://github.com/djaglowski), observIQ - [Juraci Paixão Kröhling](https://github.com/jpkrohling), Grafana Labs - [Alex Boten](https://github.com/codeboten), Lightstep - [Bogdan Drutu](https://github.com/BogdanDrutu), Splunk From cd25661252b04535eff3eaa27bf9eef0273cf52e Mon Sep 17 00:00:00 2001 From: Mitchell Armstrong <48131175+armstrmi@users.noreply.github.com> Date: Thu, 7 Apr 2022 09:40:40 -0400 Subject: [PATCH 16/59] [receiver/riak] Riak Metric Receiver (#8548) * Add riakreceiver --- .github/CODEOWNERS | 1 + CHANGELOG.md | 3 +- Makefile | 1 + cmd/configschema/go.mod | 3 + go.mod | 3 + internal/components/components.go | 2 + internal/components/receivers_test.go | 3 + receiver/riakreceiver/Makefile | 1 + receiver/riakreceiver/README.md | 36 + receiver/riakreceiver/client.go | 125 ++ receiver/riakreceiver/client_test.go | 149 ++ receiver/riakreceiver/config.go | 66 + receiver/riakreceiver/config_test.go | 109 ++ receiver/riakreceiver/doc.go | 17 + receiver/riakreceiver/documentation.md | 38 + receiver/riakreceiver/factory.go | 70 + receiver/riakreceiver/factory_test.go | 96 ++ receiver/riakreceiver/go.mod | 77 + receiver/riakreceiver/go.sum | 1197 ++++++++++++++ receiver/riakreceiver/integration_test.go | 90 + .../internal/metadata/generated_metrics_v2.go | 529 ++++++ .../riakreceiver/internal/mocks/client.go | 39 + receiver/riakreceiver/internal/model/model.go | 34 + receiver/riakreceiver/metadata.yaml | 71 + receiver/riakreceiver/scraper.go | 102 ++ receiver/riakreceiver/scraper_test.go | 158 ++ .../apiresponses/get_stats_response.json | 566 +++++++ receiver/riakreceiver/testdata/config.yaml | 19 + .../testdata/integration/Dockerfile.riak | 29 + .../testdata/integration/entrypoint.sh | 41 + .../testdata/integration/expected.json | 214 +++ .../testdata/integration/riak.conf | 1451 +++++++++++++++++ .../testdata/scraper/expected.json | 214 +++ versions.yaml | 1 + 34 files changed, 5554 insertions(+), 1 deletion(-) create mode 100644 receiver/riakreceiver/Makefile create mode 100644 receiver/riakreceiver/README.md create mode 100644 receiver/riakreceiver/client.go create mode 100644 receiver/riakreceiver/client_test.go create mode 100644 receiver/riakreceiver/config.go create mode 100644 receiver/riakreceiver/config_test.go create mode 100644 receiver/riakreceiver/doc.go create mode 100644 receiver/riakreceiver/documentation.md create mode 100644 receiver/riakreceiver/factory.go create mode 100644 receiver/riakreceiver/factory_test.go create mode 100644 receiver/riakreceiver/go.mod create mode 100644 receiver/riakreceiver/go.sum create mode 100644 receiver/riakreceiver/integration_test.go create mode 100644 receiver/riakreceiver/internal/metadata/generated_metrics_v2.go create mode 100644 receiver/riakreceiver/internal/mocks/client.go create mode 100644 receiver/riakreceiver/internal/model/model.go create mode 100644 receiver/riakreceiver/metadata.yaml create mode 100644 receiver/riakreceiver/scraper.go create mode 100644 receiver/riakreceiver/scraper_test.go create mode 100644 receiver/riakreceiver/testdata/apiresponses/get_stats_response.json create mode 100644 receiver/riakreceiver/testdata/config.yaml create mode 100644 receiver/riakreceiver/testdata/integration/Dockerfile.riak create mode 100644 receiver/riakreceiver/testdata/integration/entrypoint.sh create mode 100644 receiver/riakreceiver/testdata/integration/expected.json create mode 100644 receiver/riakreceiver/testdata/integration/riak.conf create mode 100644 receiver/riakreceiver/testdata/scraper/expected.json diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 10e2e6c22e1a..586f1eac31e1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -144,6 +144,7 @@ receiver/prometheusreceiver/ @open-telemetry/collector-c receiver/rabbitmqreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @cpheps receiver/receivercreator/ @open-telemetry/collector-contrib-approvers @jrcamp receiver/redisreceiver/ @open-telemetry/collector-contrib-approvers @pmcollins @dmitryax +receiver/riakreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @armstrmi receiver/sapmreceiver/ @open-telemetry/collector-contrib-approvers @owais receiver/signalfxreceiver/ @open-telemetry/collector-contrib-approvers @pjanotti @dmitryax receiver/skywalkingreceiver @open-telemetry/collector-contrib-approvers @JaredTan95 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ef240ae8935..3f1e29b1ea7c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## Unreleased ### 💡 Enhancements 💡 +- `riakreceiver`: Added implementation of Riak Metric Receiver (#8548) - `splunkhecexporter`: Add support for batching traces (#8995) - `hostmetricsreceiver`: Migrate Processes scraper to the Metrics builder (#8855) - `tanzuobservabilityexporter`: Use resourcetotelemetry helper (#8338) @@ -2002,4 +2003,4 @@ First release of OpenTelemetry Collector Contrib. [v0.2.7]: https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/v0.2.6...v0.2.7 [v0.2.6]: https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/v0.0.5...v0.2.6 [v0.0.5]: https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/v0.0.1...v0.0.5 -[v0.0.1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.0.1 +[v0.0.1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.0.1 \ No newline at end of file diff --git a/Makefile b/Makefile index 21e3eea736d7..cea19f045d95 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,7 @@ INTEGRATION_TEST_MODULES := \ receiver/nginxreceiver \ receiver/postgresqlreceiver \ receiver/redisreceiver \ + receiver/riakreceiver \ receiver/zookeeperreceiver \ extension/observer/dockerobserver diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index 12e9e129e859..625d2ea8d1a6 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -350,6 +350,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.48.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.48.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver v0.48.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver v0.48.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.48.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.48.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/simpleprometheusreceiver v0.0.0-00010101000000-000000000000 // indirect @@ -758,6 +759,8 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/recei replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver => ../../receiver/redisreceiver +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver => ../../receiver/riakreceiver + replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver => ../../receiver/sapmreceiver replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver => ../../receiver/signalfxreceiver diff --git a/go.mod b/go.mod index b464975d4d57..a8a204535e26 100644 --- a/go.mod +++ b/go.mod @@ -110,6 +110,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver v0.48.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/simpleprometheusreceiver v0.0.0-00010101000000-000000000000 @@ -762,6 +763,8 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/recei replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver => ./receiver/redisreceiver +replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver => ./receiver/riakreceiver + replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver => ./receiver/sapmreceiver replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver => ./receiver/signalfxreceiver diff --git a/internal/components/components.go b/internal/components/components.go index ac92647bd3e0..dc434dce839d 100644 --- a/internal/components/components.go +++ b/internal/components/components.go @@ -130,6 +130,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/simpleprometheusreceiver" @@ -207,6 +208,7 @@ func Components() (component.Factories, error) { prometheusreceiver.NewFactory(), receivercreator.NewFactory(), redisreceiver.NewFactory(), + riakreceiver.NewFactory(), sapmreceiver.NewFactory(), signalfxreceiver.NewFactory(), simpleprometheusreceiver.NewFactory(), diff --git a/internal/components/receivers_test.go b/internal/components/receivers_test.go index 7403e400e654..06d78f5a326c 100644 --- a/internal/components/receivers_test.go +++ b/internal/components/receivers_test.go @@ -190,6 +190,9 @@ func TestDefaultReceivers(t *testing.T) { { receiver: "redis", }, + { + receiver: "riak", + }, { receiver: "sapm", }, diff --git a/receiver/riakreceiver/Makefile b/receiver/riakreceiver/Makefile new file mode 100644 index 000000000000..ded7a36092dc --- /dev/null +++ b/receiver/riakreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/receiver/riakreceiver/README.md b/receiver/riakreceiver/README.md new file mode 100644 index 000000000000..28bf4dcd2d1f --- /dev/null +++ b/receiver/riakreceiver/README.md @@ -0,0 +1,36 @@ +# Riak Receiver + +Riak metrics will be collected from the [/stats](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html) endpoint. + +This Riak receiver will collect metrics for [3.x+](https://github.com/basho/riak/releases) + +Supported pipeline types: `metrics` + +## Configuration + +The following configuration settings are required: + +- `username` +- `password` + +The following configuration settings are optional: + +- `endpoint` (default: `http://localhost:8098`): The URL of the node to be monitored. +- `collection_interval` (default = `60s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. +- `tls` (defaults defined [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)): TLS control. By default insecure settings are rejected and certificate verification is on. + +### Example Configuration + +```yaml +receivers: + riak: + endpoint: http://localhost:8098 + username: otelu + password: $RIAK_PASSWORD + collection_interval: 60s +``` + +## Metrics + +Details about the metrics produced by this receiver can be found in [metadata.yaml](./metadata.yaml) + diff --git a/receiver/riakreceiver/client.go b/receiver/riakreceiver/client.go new file mode 100644 index 000000000000..5e5a12e8b1b3 --- /dev/null +++ b/receiver/riakreceiver/client.go @@ -0,0 +1,125 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "go.opentelemetry.io/collector/component" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/model" +) + +// statsPath is the path to stats endpoint +const statsPath = "/stats" + +type client interface { + // GetStats calls "/stats" endpoint to get list of stats for the target node + GetStats(ctx context.Context) (*model.Stats, error) +} + +var _ client = (*riakClient)(nil) + +type riakClient struct { + client *http.Client + hostEndpoint string + creds riakCredentials + logger *zap.Logger +} + +type riakCredentials struct { + username string + password string +} + +func newClient(cfg *Config, host component.Host, settings component.TelemetrySettings, logger *zap.Logger) (client, error) { + httpClient, err := cfg.ToClient(host.GetExtensions(), settings) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP Client: %w", err) + } + + return &riakClient{ + client: httpClient, + hostEndpoint: cfg.Endpoint, + creds: riakCredentials{ + username: cfg.Username, + password: cfg.Password, + }, + logger: logger, + }, nil +} + +func (c *riakClient) GetStats(ctx context.Context) (*model.Stats, error) { + var stats *model.Stats + + if err := c.get(ctx, statsPath, &stats); err != nil { + c.logger.Debug("Failed to retrieve stats", zap.Error(err)) + return nil, err + } + + return stats, nil +} + +func (c *riakClient) get(ctx context.Context, path string, respObj interface{}) error { + // Construct endpoint and create request + url := c.hostEndpoint + path + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) + if err != nil { + return fmt.Errorf("failed to create get request for path %s: %w", path, err) + } + + // Set user/pass authentication + req.SetBasicAuth(c.creds.username, c.creds.password) + + // Make request + resp, err := c.client.Do(req) + if err != nil { + return fmt.Errorf("failed to make http request: %w", err) + } + + // Defer body close + defer func() { + if closeErr := resp.Body.Close(); closeErr != nil { + c.logger.Warn("failed to close response body", zap.Error(closeErr)) + } + }() + + // Check for OK status code + if resp.StatusCode != http.StatusOK { + c.logger.Debug("riak API non-200", zap.Error(err), zap.Int("status_code", resp.StatusCode)) + + // Attempt to extract the error payload + payloadData, err := io.ReadAll(resp.Body) + if err != nil { + c.logger.Debug("failed to read payload error message", zap.Error(err)) + } else { + c.logger.Debug("riak API Error", zap.ByteString("api_error", payloadData)) + } + + return fmt.Errorf("non 200 code returned %d", resp.StatusCode) + } + + // Decode the payload into the passed in response object + if err := json.NewDecoder(resp.Body).Decode(respObj); err != nil { + return fmt.Errorf("failed to decode response payload: %w", err) + } + + return nil +} diff --git a/receiver/riakreceiver/client_test.go b/receiver/riakreceiver/client_test.go new file mode 100644 index 000000000000..239ad4f28926 --- /dev/null +++ b/receiver/riakreceiver/client_test.go @@ -0,0 +1,149 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" + +import ( + "context" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configtls" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/model" +) + +const ( + statsAPIResponseFile = "get_stats_response.json" +) + +func TestNewClient(t *testing.T) { + testCase := []struct { + desc string + cfg *Config + expectError error + }{ + { + desc: "Invalid HTTP config", + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/non/existent", + }, + }, + }, + }, + expectError: errors.New("failed to create HTTP Client"), + }, + { + desc: "Valid Configuration", + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + TLSSetting: configtls.TLSClientSetting{}, + Endpoint: defaultEndpoint, + }, + }, + expectError: nil, + }, + } + + for _, tc := range testCase { + t.Run(tc.desc, func(t *testing.T) { + ac, err := newClient(tc.cfg, componenttest.NewNopHost(), componenttest.NewNopTelemetrySettings(), zap.NewNop()) + if tc.expectError != nil { + require.Nil(t, ac) + require.Contains(t, err.Error(), tc.expectError.Error()) + } else { + require.NoError(t, err) + + actualClient, ok := ac.(*riakClient) + require.True(t, ok) + + require.Equal(t, tc.cfg.Username, actualClient.creds.username) + require.Equal(t, tc.cfg.Password, actualClient.creds.password) + require.Equal(t, tc.cfg.Endpoint, actualClient.hostEndpoint) + require.Equal(t, zap.NewNop(), actualClient.logger) + require.NotNil(t, actualClient.client) + } + }) + } +} + +func TestGetStatsDetails(t *testing.T) { + t.Run("Non-200 Response", func(t *testing.T) { + // Setup test server + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + clusters, err := tc.GetStats(context.Background()) + require.Nil(t, clusters) + require.EqualError(t, err, "non 200 code returned 401") + }) + + t.Run("Successful call", func(t *testing.T) { + data := loadAPIResponseData(t, statsAPIResponseFile) + + // Setup test server + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write(data) + })) + defer ts.Close() + + tc := createTestClient(t, ts.URL) + + // Load the valid data into a struct to compare + var expected *model.Stats + err := json.Unmarshal(data, &expected) + require.NoError(t, err) + + clusters, err := tc.GetStats(context.Background()) + require.NoError(t, err) + require.Equal(t, expected, clusters) + }) +} + +func createTestClient(t *testing.T, baseEndpoint string) client { + t.Helper() + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = baseEndpoint + + testClient, err := newClient(cfg, componenttest.NewNopHost(), componenttest.NewNopTelemetrySettings(), zap.NewNop()) + require.NoError(t, err) + return testClient +} + +func loadAPIResponseData(t *testing.T, fileName string) []byte { + t.Helper() + fullPath := filepath.Join("testdata", "apiresponses", fileName) + + data, err := ioutil.ReadFile(fullPath) + require.NoError(t, err) + + return data +} diff --git a/receiver/riakreceiver/config.go b/receiver/riakreceiver/config.go new file mode 100644 index 000000000000..7e6e1ce5f659 --- /dev/null +++ b/receiver/riakreceiver/config.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" + +import ( + "errors" + "fmt" + "net/url" + + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/receiver/scraperhelper" + "go.uber.org/multierr" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/metadata" +) + +// Predefined error responses for configuration validation failures +var ( + errMissingUsername = errors.New(`"username" not specified in config`) + errMissingPassword = errors.New(`"password" not specified in config`) + + errInvalidEndpoint = errors.New(`"endpoint" must be in the form of ://:`) +) + +const defaultEndpoint = "http://localhost:8098" + +// Config defines the configuration for the various elements of the receiver agent. +type Config struct { + scraperhelper.ScraperControllerSettings `mapstructure:",squash"` + confighttp.HTTPClientSettings `mapstructure:",squash"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + Metrics metadata.MetricsSettings `mapstructure:"metrics"` +} + +// Validate validates the configuration by checking for missing or invalid fields +func (cfg *Config) Validate() error { + var err error + if cfg.Username == "" { + err = multierr.Append(err, errMissingUsername) + } + + if cfg.Password == "" { + err = multierr.Append(err, errMissingPassword) + } + + _, parseErr := url.Parse(cfg.Endpoint) + if parseErr != nil { + wrappedErr := fmt.Errorf("%s: %w", errInvalidEndpoint.Error(), parseErr) + err = multierr.Append(err, wrappedErr) + } + + return err +} diff --git a/receiver/riakreceiver/config_test.go b/receiver/riakreceiver/config_test.go new file mode 100644 index 000000000000..1f878b6f0ba7 --- /dev/null +++ b/receiver/riakreceiver/config_test.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/config/confighttp" + "go.uber.org/multierr" +) + +func TestValidate(t *testing.T) { + testCases := []struct { + desc string + cfg *Config + expectedErr error + }{ + { + desc: "missing username, password, and invalid endpoint", + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "invalid://endpoint: 12efg", + }, + }, + expectedErr: multierr.Combine( + errMissingUsername, + errMissingPassword, + fmt.Errorf("%s: %w", errInvalidEndpoint, errors.New(`parse "invalid://endpoint: 12efg": invalid port ": 12efg" after host`)), + ), + }, + { + desc: "missing password and invalid endpoint", + cfg: &Config{ + Username: "otelu", + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "invalid://endpoint: 12efg", + }, + }, + expectedErr: multierr.Combine( + errMissingPassword, + fmt.Errorf("%s: %w", errInvalidEndpoint, errors.New(`parse "invalid://endpoint: 12efg": invalid port ": 12efg" after host`)), + ), + }, + { + desc: "missing username and invalid endpoint", + cfg: &Config{ + Password: "otelp", + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "invalid://endpoint: 12efg", + }, + }, + expectedErr: multierr.Combine( + errMissingUsername, + fmt.Errorf("%s: %w", errInvalidEndpoint, errors.New(`parse "invalid://endpoint: 12efg": invalid port ": 12efg" after host`)), + ), + }, + { + desc: "invalid endpoint", + cfg: &Config{ + Username: "otelu", + Password: "otelp", + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: "invalid://endpoint: 12efg", + }, + }, + expectedErr: multierr.Combine( + fmt.Errorf("%s: %w", errInvalidEndpoint, errors.New(`parse "invalid://endpoint: 12efg": invalid port ": 12efg" after host`)), + ), + }, + { + desc: "valid config", + cfg: &Config{ + Username: "otelu", + Password: "otelp", + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + }, + }, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + actualErr := tc.cfg.Validate() + if tc.expectedErr != nil { + require.EqualError(t, actualErr, tc.expectedErr.Error()) + } else { + require.NoError(t, actualErr) + } + + }) + } +} diff --git a/receiver/riakreceiver/doc.go b/receiver/riakreceiver/doc.go new file mode 100644 index 000000000000..a44e79193f66 --- /dev/null +++ b/receiver/riakreceiver/doc.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate mdatagen --experimental-gen metadata.yaml + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" diff --git a/receiver/riakreceiver/documentation.md b/receiver/riakreceiver/documentation.md new file mode 100644 index 000000000000..9f6acf5413ba --- /dev/null +++ b/receiver/riakreceiver/documentation.md @@ -0,0 +1,38 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# riakreceiver + +## Metrics + +These are the metrics available for this scraper. + +| Name | Description | Unit | Type | Attributes | +| ---- | ----------- | ---- | ---- | ---------- | +| **riak.memory.limit** | The amount of memory allocated to the node. | By | Sum(Int) |
| +| **riak.node.operation.count** | The number of operations performed by the node. | {operation} | Sum(Int) |
  • request
| +| **riak.node.operation.time.mean** | The mean time between request and response for operations performed by the node over the last minute. | us | Gauge(Int) |
  • request
| +| **riak.node.read_repair.count** | The number of read repairs performed by the node. | {read_repair} | Sum(Int) |
| +| **riak.vnode.index.operation.count** | The number of index operations performed by vnodes on the node. | {operation} | Sum(Int) |
  • operation
| +| **riak.vnode.operation.count** | The number of operations performed by vnodes on the node. | {operation} | Sum(Int) |
  • request
| + +**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default. +Any metric can be enabled or disabled with the following scraper configuration: + +```yaml +metrics: + : + enabled: +``` + +## Resource attributes + +| Name | Description | Type | +| ---- | ----------- | ---- | +| riak.node.name | The name this node uses to identify itself. | String | + +## Metric attributes + +| Name | Description | +| ---- | ----------- | +| operation | The operation type for index operations. | +| request | The request operation type. | diff --git a/receiver/riakreceiver/factory.go b/receiver/riakreceiver/factory.go new file mode 100644 index 000000000000..11ca087a1a60 --- /dev/null +++ b/receiver/riakreceiver/factory.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/metadata" +) + +const typeStr = "riak" + +var errConfigNotRiak = errors.New("config was not a Riak receiver config") + +// NewFactory creates a new receiver factory +func NewFactory() component.ReceiverFactory { + return component.NewReceiverFactory( + typeStr, + createDefaultConfig, + component.WithMetricsReceiver(createMetricsReceiver)) +} + +func createDefaultConfig() config.Receiver { + return &Config{ + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)), + CollectionInterval: 10 * time.Second, + }, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + Timeout: 10 * time.Second, + }, + Metrics: metadata.DefaultMetricsSettings(), + } +} + +func createMetricsReceiver(ctx context.Context, params component.ReceiverCreateSettings, rConf config.Receiver, consumer consumer.Metrics) (component.MetricsReceiver, error) { + cfg, ok := rConf.(*Config) + if !ok { + return nil, errConfigNotRiak + } + + riakScraper := newScraper(params.Logger, cfg, params.TelemetrySettings) + scraper, err := scraperhelper.NewScraper(typeStr, riakScraper.scrape, scraperhelper.WithStart(riakScraper.start)) + if err != nil { + return nil, err + } + + return scraperhelper.NewScraperControllerReceiver(&cfg.ScraperControllerSettings, params, consumer, scraperhelper.AddScraper(scraper)) +} diff --git a/receiver/riakreceiver/factory_test.go b/receiver/riakreceiver/factory_test.go new file mode 100644 index 000000000000..15430f5ea892 --- /dev/null +++ b/receiver/riakreceiver/factory_test.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/metadata" +) + +func TestNewFactory(t *testing.T) { + testCases := []struct { + desc string + testFunc func(*testing.T) + }{ + { + desc: "creates a new factory with correct type", + testFunc: func(t *testing.T) { + factory := NewFactory() + require.EqualValues(t, typeStr, factory.Type()) + }, + }, + { + desc: "creates a new factory with valid default config", + testFunc: func(t *testing.T) { + factory := NewFactory() + + var expectedCfg config.Receiver = &Config{ + ScraperControllerSettings: scraperhelper.ScraperControllerSettings{ + ReceiverSettings: config.NewReceiverSettings(config.NewComponentID(typeStr)), + CollectionInterval: 10 * time.Second, + }, + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + Timeout: 10 * time.Second, + }, + Metrics: metadata.DefaultMetricsSettings(), + } + + require.Equal(t, expectedCfg, factory.CreateDefaultConfig()) + }, + }, + { + desc: "creates a new factory and CreateMetricReceiver returns no error", + testFunc: func(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig() + _, err := factory.CreateMetricsReceiver( + context.Background(), + componenttest.NewNopReceiverCreateSettings(), + cfg, + consumertest.NewNop(), + ) + require.NoError(t, err) + }, + }, + { + desc: "creates a new factory and CreateMetricReceiver returns error with incorrect config", + testFunc: func(t *testing.T) { + factory := NewFactory() + _, err := factory.CreateMetricsReceiver( + context.Background(), + componenttest.NewNopReceiverCreateSettings(), + nil, + consumertest.NewNop(), + ) + require.ErrorIs(t, err, errConfigNotRiak) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, tc.testFunc) + } +} diff --git a/receiver/riakreceiver/go.mod b/receiver/riakreceiver/go.mod new file mode 100644 index 000000000000..52dbad5f9753 --- /dev/null +++ b/receiver/riakreceiver/go.mod @@ -0,0 +1,77 @@ +module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver + +go 1.17 + +require ( + go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector/model v0.48.0 + go.uber.org/multierr v1.8.0 + go.uber.org/zap v1.21.0 +) + +require github.com/stretchr/testify v1.7.1 + +require ( + github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.47.0 + github.com/testcontainers/testcontainers-go v0.12.0 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect + github.com/Microsoft/hcsshim v0.8.16 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect + github.com/containerd/containerd v1.5.0-beta.4 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v20.10.11+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/felixge/httpsnoop v1.0.2 // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/klauspost/compress v1.15.1 // indirect + github.com/knadh/koanf v1.4.0 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/sys/mount v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.5.0 // indirect + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opencontainers/runc v1.0.2 // indirect + github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rogpeppe/go-internal v1.6.1 // indirect + github.com/rs/cors v1.8.2 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/stretchr/objx v0.2.0 // indirect + go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect + go.opentelemetry.io/otel v1.6.1 // indirect + go.opentelemetry.io/otel/metric v0.28.0 // indirect + go.opentelemetry.io/otel/trace v1.6.1 // indirect + go.uber.org/atomic v1.9.0 // indirect + golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect + golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/grpc v1.45.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/receiver/riakreceiver/go.sum b/receiver/riakreceiver/go.sum new file mode 100644 index 000000000000..a74d734ddc1d --- /dev/null +++ b/receiver/riakreceiver/go.sum @@ -0,0 +1,1197 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Flaque/filet v0.0.0-20201012163910-45f684403088 h1:PnnQln5IGbhLeJOi6hVs+lCeF+B1dRfFKPGXUAez0Ww= +github.com/Flaque/filet v0.0.0-20201012163910-45f684403088/go.mod h1:TK+jB3mBs+8ZMWhU5BqZKnZWJ1MrLo8etNVg51ueTBo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 h1:hkGVFjz+plgr5UfxZUTPFbUFIF/Km6/s+RVRIRHLrrY= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4 h1:zjz4MOAOFgdBlwid2nNUlJ3YLpVi/97L36lfMYJex60= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo= +github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.4.0/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= +github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mostynb/go-grpc-compression v1.1.16/go.mod h1:xxa6UoYynYS2h+5HB/Hglu81iYAp87ARaNmhhwi0s1s= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.47.0 h1:61MPuKne5R2/w54YZjI/S1Iw+X7N7rsL3CZ/EDEWdDk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.47.0/go.mod h1:lW6MaaOr/PMf2Tno2WcwHJBkEJYsmhcosVqib/JFVI8= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/cmdflag v0.0.2/go.mod h1:a3zKGZ3cdQUfxjd0RGMLZr8xI3nvpJOB+m6o/1X5BmU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v3 v3.3.4/go.mod h1:280XNCGS8jAcG++AHdd6SeWnzyJ1w9oow2vbORyey8Q= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/testcontainers/testcontainers-go v0.12.0 h1:SK0NryGHIx7aifF6YqReORL18aGAA4bsDPtikDVCEyg= +github.com/testcontainers/testcontainers-go v0.12.0/go.mod h1:SIndOQXZng0IW8iWU1Js0ynrfZ8xcxrTtDfF6rD2pxs= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= +go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector/model v0.47.0/go.mod h1:tyZ1XdPtljZ9I09pJGcz5ktV9G1AAZ/HDmf6YOMHebc= +go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= +go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= +go.opentelemetry.io/contrib/zpages v0.31.0/go.mod h1:CAB55C1K7YhinQfNNIdNLgJJ+dVRlb6zQpbGQjeIDf8= +go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= +go.opentelemetry.io/otel v1.6.1 h1:6r1YrcTenBvYa1x491d0GGpTVBsNECmrc/K6b+zDeis= +go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= +go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= +go.opentelemetry.io/otel/metric v0.28.0 h1:o5YNh+jxACMODoAo1bI7OES0RUW4jAMae0Vgs2etWAQ= +go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= +go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= +go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= +go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= +go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= +go.opentelemetry.io/otel/trace v1.6.1 h1:f8c93l5tboBYZna1nWk0W9DYyMzJXDWdZcJZ0Kb400U= +go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211108170745-6635138e15ea h1:FosBMXtOc8Tp9Hbo4ltl1WJSrTVewZU8MPnTPY2HdH8= +golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211109184856-51b60fd695b3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/receiver/riakreceiver/integration_test.go b/receiver/riakreceiver/integration_test.go new file mode 100644 index 000000000000..16f0365e1933 --- /dev/null +++ b/receiver/riakreceiver/integration_test.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build integration +// +build integration + +package riakreceiver + +import ( + "context" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest/golden" +) + +func riakContainer(t *testing.T) testcontainers.Container { + ctx := context.Background() + req := testcontainers.ContainerRequest{ + FromDockerfile: testcontainers.FromDockerfile{ + Context: filepath.Join("testdata", "integration"), + Dockerfile: "Dockerfile.riak", + }, + ExposedPorts: []string{"8098:8098"}, + WaitingFor: wait.ForListeningPort("8098"), + } + + require.NoError(t, req.Validate()) + + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + require.NoError(t, err) + time.Sleep(time.Second * 6) + return container +} + +func TestRiakIntegration(t *testing.T) { + container := riakContainer(t) + defer func() { + require.NoError(t, container.Terminate(context.Background())) + }() + hostname, err := container.Host(context.Background()) + require.NoError(t, err) + + expectedFile := filepath.Join("testdata", "integration", "expected.json") + expectedMetrics, err := golden.ReadMetrics(expectedFile) + require.NoError(t, err) + + f := NewFactory() + cfg := f.CreateDefaultConfig().(*Config) + cfg.ScraperControllerSettings.CollectionInterval = 100 * time.Millisecond + cfg.Endpoint = fmt.Sprintf("http://%s", net.JoinHostPort(hostname, "8098")) + + consumer := new(consumertest.MetricsSink) + settings := componenttest.NewNopReceiverCreateSettings() + + rcvr, err := f.CreateMetricsReceiver(context.Background(), settings, cfg, consumer) + require.NoError(t, err, "failed creating metrics receiver") + require.NoError(t, rcvr.Start(context.Background(), componenttest.NewNopHost())) + require.Eventuallyf(t, func() bool { + return len(consumer.AllMetrics()) > 0 + }, 2*time.Minute, 1*time.Second, "failed to receive more than 0 metrics") + + actualMetrics := consumer.AllMetrics()[0] + + scrapertest.CompareMetrics(expectedMetrics, actualMetrics, scrapertest.IgnoreMetricValues()) +} diff --git a/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go b/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go new file mode 100644 index 000000000000..2211d72eef58 --- /dev/null +++ b/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go @@ -0,0 +1,529 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/model/pdata" +) + +// MetricSettings provides common settings for a particular metric. +type MetricSettings struct { + Enabled bool `mapstructure:"enabled"` +} + +// MetricsSettings provides settings for riakreceiver metrics. +type MetricsSettings struct { + RiakMemoryLimit MetricSettings `mapstructure:"riak.memory.limit"` + RiakNodeOperationCount MetricSettings `mapstructure:"riak.node.operation.count"` + RiakNodeOperationTimeMean MetricSettings `mapstructure:"riak.node.operation.time.mean"` + RiakNodeReadRepairCount MetricSettings `mapstructure:"riak.node.read_repair.count"` + RiakVnodeIndexOperationCount MetricSettings `mapstructure:"riak.vnode.index.operation.count"` + RiakVnodeOperationCount MetricSettings `mapstructure:"riak.vnode.operation.count"` +} + +func DefaultMetricsSettings() MetricsSettings { + return MetricsSettings{ + RiakMemoryLimit: MetricSettings{ + Enabled: true, + }, + RiakNodeOperationCount: MetricSettings{ + Enabled: true, + }, + RiakNodeOperationTimeMean: MetricSettings{ + Enabled: true, + }, + RiakNodeReadRepairCount: MetricSettings{ + Enabled: true, + }, + RiakVnodeIndexOperationCount: MetricSettings{ + Enabled: true, + }, + RiakVnodeOperationCount: MetricSettings{ + Enabled: true, + }, + } +} + +type metricRiakMemoryLimit struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills riak.memory.limit metric with initial data. +func (m *metricRiakMemoryLimit) init() { + m.data.SetName("riak.memory.limit") + m.data.SetDescription("The amount of memory allocated to the node.") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) +} + +func (m *metricRiakMemoryLimit) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricRiakMemoryLimit) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricRiakMemoryLimit) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricRiakMemoryLimit(settings MetricSettings) metricRiakMemoryLimit { + m := metricRiakMemoryLimit{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricRiakNodeOperationCount struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills riak.node.operation.count metric with initial data. +func (m *metricRiakNodeOperationCount) init() { + m.data.SetName("riak.node.operation.count") + m.data.SetDescription("The number of operations performed by the node.") + m.data.SetUnit("{operation}") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricRiakNodeOperationCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, requestAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Request, pdata.NewValueString(requestAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricRiakNodeOperationCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricRiakNodeOperationCount) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricRiakNodeOperationCount(settings MetricSettings) metricRiakNodeOperationCount { + m := metricRiakNodeOperationCount{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricRiakNodeOperationTimeMean struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills riak.node.operation.time.mean metric with initial data. +func (m *metricRiakNodeOperationTimeMean) init() { + m.data.SetName("riak.node.operation.time.mean") + m.data.SetDescription("The mean time between request and response for operations performed by the node over the last minute.") + m.data.SetUnit("us") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricRiakNodeOperationTimeMean) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, requestAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Request, pdata.NewValueString(requestAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricRiakNodeOperationTimeMean) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricRiakNodeOperationTimeMean) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricRiakNodeOperationTimeMean(settings MetricSettings) metricRiakNodeOperationTimeMean { + m := metricRiakNodeOperationTimeMean{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricRiakNodeReadRepairCount struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills riak.node.read_repair.count metric with initial data. +func (m *metricRiakNodeReadRepairCount) init() { + m.data.SetName("riak.node.read_repair.count") + m.data.SetDescription("The number of read repairs performed by the node.") + m.data.SetUnit("{read_repair}") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) +} + +func (m *metricRiakNodeReadRepairCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricRiakNodeReadRepairCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricRiakNodeReadRepairCount) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricRiakNodeReadRepairCount(settings MetricSettings) metricRiakNodeReadRepairCount { + m := metricRiakNodeReadRepairCount{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricRiakVnodeIndexOperationCount struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills riak.vnode.index.operation.count metric with initial data. +func (m *metricRiakVnodeIndexOperationCount) init() { + m.data.SetName("riak.vnode.index.operation.count") + m.data.SetDescription("The number of index operations performed by vnodes on the node.") + m.data.SetUnit("{operation}") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricRiakVnodeIndexOperationCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, operationAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricRiakVnodeIndexOperationCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricRiakVnodeIndexOperationCount) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricRiakVnodeIndexOperationCount(settings MetricSettings) metricRiakVnodeIndexOperationCount { + m := metricRiakVnodeIndexOperationCount{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricRiakVnodeOperationCount struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills riak.vnode.operation.count metric with initial data. +func (m *metricRiakVnodeOperationCount) init() { + m.data.SetName("riak.vnode.operation.count") + m.data.SetDescription("The number of operations performed by vnodes on the node.") + m.data.SetUnit("{operation}") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricRiakVnodeOperationCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, requestAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Request, pdata.NewValueString(requestAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricRiakVnodeOperationCount) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricRiakVnodeOperationCount) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricRiakVnodeOperationCount(settings MetricSettings) metricRiakVnodeOperationCount { + m := metricRiakVnodeOperationCount{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user settings. +type MetricsBuilder struct { + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + metricRiakMemoryLimit metricRiakMemoryLimit + metricRiakNodeOperationCount metricRiakNodeOperationCount + metricRiakNodeOperationTimeMean metricRiakNodeOperationTimeMean + metricRiakNodeReadRepairCount metricRiakNodeReadRepairCount + metricRiakVnodeIndexOperationCount metricRiakVnodeIndexOperationCount + metricRiakVnodeOperationCount metricRiakVnodeOperationCount +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), + metricRiakMemoryLimit: newMetricRiakMemoryLimit(settings.RiakMemoryLimit), + metricRiakNodeOperationCount: newMetricRiakNodeOperationCount(settings.RiakNodeOperationCount), + metricRiakNodeOperationTimeMean: newMetricRiakNodeOperationTimeMean(settings.RiakNodeOperationTimeMean), + metricRiakNodeReadRepairCount: newMetricRiakNodeReadRepairCount(settings.RiakNodeReadRepairCount), + metricRiakVnodeIndexOperationCount: newMetricRiakVnodeIndexOperationCount(settings.RiakVnodeIndexOperationCount), + metricRiakVnodeOperationCount: newMetricRiakVnodeOperationCount(settings.RiakVnodeOperationCount), + } + for _, op := range options { + op(mb) + } + return mb +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// WithRiakNodeName sets provided value as "riak.node.name" attribute for current resource. +func WithRiakNodeName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("riak.node.name", val) + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("otelcol/riakreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricRiakMemoryLimit.emit(ils.Metrics()) + mb.metricRiakNodeOperationCount.emit(ils.Metrics()) + mb.metricRiakNodeOperationTimeMean.emit(ils.Metrics()) + mb.metricRiakNodeReadRepairCount.emit(ils.Metrics()) + mb.metricRiakVnodeIndexOperationCount.emit(ils.Metrics()) + mb.metricRiakVnodeOperationCount.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics +} + +// RecordRiakMemoryLimitDataPoint adds a data point to riak.memory.limit metric. +func (mb *MetricsBuilder) RecordRiakMemoryLimitDataPoint(ts pdata.Timestamp, val int64) { + mb.metricRiakMemoryLimit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordRiakNodeOperationCountDataPoint adds a data point to riak.node.operation.count metric. +func (mb *MetricsBuilder) RecordRiakNodeOperationCountDataPoint(ts pdata.Timestamp, val int64, requestAttributeValue string) { + mb.metricRiakNodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue) +} + +// RecordRiakNodeOperationTimeMeanDataPoint adds a data point to riak.node.operation.time.mean metric. +func (mb *MetricsBuilder) RecordRiakNodeOperationTimeMeanDataPoint(ts pdata.Timestamp, val int64, requestAttributeValue string) { + mb.metricRiakNodeOperationTimeMean.recordDataPoint(mb.startTime, ts, val, requestAttributeValue) +} + +// RecordRiakNodeReadRepairCountDataPoint adds a data point to riak.node.read_repair.count metric. +func (mb *MetricsBuilder) RecordRiakNodeReadRepairCountDataPoint(ts pdata.Timestamp, val int64) { + mb.metricRiakNodeReadRepairCount.recordDataPoint(mb.startTime, ts, val) +} + +// RecordRiakVnodeIndexOperationCountDataPoint adds a data point to riak.vnode.index.operation.count metric. +func (mb *MetricsBuilder) RecordRiakVnodeIndexOperationCountDataPoint(ts pdata.Timestamp, val int64, operationAttributeValue string) { + mb.metricRiakVnodeIndexOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue) +} + +// RecordRiakVnodeOperationCountDataPoint adds a data point to riak.vnode.operation.count metric. +func (mb *MetricsBuilder) RecordRiakVnodeOperationCountDataPoint(ts pdata.Timestamp, val int64, requestAttributeValue string) { + mb.metricRiakVnodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pdata.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} + +// Attributes contains the possible metric attributes that can be used. +var Attributes = struct { + // Operation (The operation type for index operations.) + Operation string + // Request (The request operation type.) + Request string +}{ + "operation", + "request", +} + +// A is an alias for Attributes. +var A = Attributes + +// AttributeOperation are the possible values that the attribute "operation" can have. +var AttributeOperation = struct { + Read string + Write string + Delete string +}{ + "read", + "write", + "delete", +} + +// AttributeRequest are the possible values that the attribute "request" can have. +var AttributeRequest = struct { + Put string + Get string +}{ + "put", + "get", +} diff --git a/receiver/riakreceiver/internal/mocks/client.go b/receiver/riakreceiver/internal/mocks/client.go new file mode 100644 index 000000000000..44a09ebdf5ed --- /dev/null +++ b/receiver/riakreceiver/internal/mocks/client.go @@ -0,0 +1,39 @@ +// Code generated by mockery v2.10.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + model "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/model" +) + +// client is an autogenerated mock type for the client type +type MockClient struct { + mock.Mock +} + +// GetStats provides a mock function with given fields: ctx +func (_m *MockClient) GetStats(ctx context.Context) (*model.Stats, error) { + ret := _m.Called(ctx) + + var r0 *model.Stats + if rf, ok := ret.Get(0).(func(context.Context) *model.Stats); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Stats) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/receiver/riakreceiver/internal/model/model.go b/receiver/riakreceiver/internal/model/model.go new file mode 100644 index 000000000000..c86de6188892 --- /dev/null +++ b/receiver/riakreceiver/internal/model/model.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/model" + +// Stat represents a stat in the API response +type Stats struct { + // Identifiers + Node string `json:"nodename"` + + // Metrics + NodeGets int64 `json:"node_gets"` + NodePuts int64 `json:"node_puts"` + NodeGetFsmTimeMean int64 `json:"node_get_fsm_time_mean"` + NodePutFsmTimeMean int64 `json:"node_put_fsm_time_mean"` + ReadRepairs int64 `json:"read_repairs_total"` + MemAllocated int64 `json:"mem_allocated"` + VnodeGets int64 `json:"vnode_gets"` + VnodePuts int64 `json:"vnode_puts"` + VnodeIndexDeletes int64 `json:"vnode_index_deletes"` + VnodeIndexReads int64 `json:"vnode_index_reads"` + VnodeIndexWrites int64 `json:"vnode_index_writes"` +} diff --git a/receiver/riakreceiver/metadata.yaml b/receiver/riakreceiver/metadata.yaml new file mode 100644 index 000000000000..9e6c2d6307e9 --- /dev/null +++ b/receiver/riakreceiver/metadata.yaml @@ -0,0 +1,71 @@ +name: riakreceiver + +resource_attributes: + riak.node.name: + description: The name this node uses to identify itself. + type: string + +attributes: + request: + description: The request operation type. + enum: + - put + - get + operation: + description: The operation type for index operations. + enum: + - read + - write + - delete + +metrics: + riak.node.operation.count: + description: The number of operations performed by the node. + unit: "{operation}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + enabled: true + attributes: [request] + riak.node.operation.time.mean: + description: The mean time between request and response for operations performed by the node over the last minute. + unit: us + gauge: + value_type: int + enabled: true + attributes: [request] + riak.node.read_repair.count: + description: The number of read repairs performed by the node. + unit: "{read_repair}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + enabled: true + riak.memory.limit: + description: The amount of memory allocated to the node. + unit: By + sum: + monotonic: false + aggregation: cumulative + value_type: int + enabled: true + riak.vnode.operation.count: + description: The number of operations performed by vnodes on the node. + unit: "{operation}" + sum: + monotonic: true + aggregation: cumulative + value_type: int + enabled: true + attributes: [request] + riak.vnode.index.operation.count: + description: The number of index operations performed by vnodes on the node. + unit: "{operation}" + sum: + monotonic: false + aggregation: cumulative + value_type: int + attributes: [operation] + enabled: true diff --git a/receiver/riakreceiver/scraper.go b/receiver/riakreceiver/scraper.go new file mode 100644 index 000000000000..eac5544d14c1 --- /dev/null +++ b/receiver/riakreceiver/scraper.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/receiver/scrapererror" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/model" +) + +var errClientNotInit = errors.New("client not initialized") + +// riakScraper handles scraping of Riak metrics +type riakScraper struct { + logger *zap.Logger + cfg *Config + settings component.TelemetrySettings + client client + mb *metadata.MetricsBuilder +} + +// newScraper creates a new scraper +func newScraper(logger *zap.Logger, cfg *Config, settings component.TelemetrySettings) *riakScraper { + return &riakScraper{ + logger: logger, + cfg: cfg, + settings: settings, + mb: metadata.NewMetricsBuilder(metadata.DefaultMetricsSettings()), + } +} + +// start starts the scraper by creating a new HTTP Client on the scraper +func (r *riakScraper) start(ctx context.Context, host component.Host) (err error) { + r.client, err = newClient(r.cfg, host, r.settings, r.logger) + return +} + +// scrape collects metrics from the Riak API +func (r *riakScraper) scrape(ctx context.Context) (pdata.Metrics, error) { + // Validate we don't attempt to scrape without initializing the client + if r.client == nil { + return pdata.NewMetrics(), errors.New("client not initialized") + } + + // Get stats for processing + stats, err := r.client.GetStats(ctx) + if err != nil { + return pdata.NewMetrics(), err + } + + return r.collectStats(stats) +} + +// collectStats collects metrics +func (r *riakScraper) collectStats(stat *model.Stats) (pdata.Metrics, error) { + now := pdata.NewTimestampFromTime(time.Now()) + var errors scrapererror.ScrapeErrors + //scrape node.operation.count metric + r.mb.RecordRiakNodeOperationCountDataPoint(now, stat.NodeGets, metadata.AttributeRequest.Get) + r.mb.RecordRiakNodeOperationCountDataPoint(now, stat.NodePuts, metadata.AttributeRequest.Put) + + //scrape node.operation.time.mean metric + r.mb.RecordRiakNodeOperationTimeMeanDataPoint(now, stat.NodeGetFsmTimeMean, metadata.AttributeRequest.Get) + r.mb.RecordRiakNodeOperationTimeMeanDataPoint(now, stat.NodePutFsmTimeMean, metadata.AttributeRequest.Put) + + //scrape node.read_repair.count metric + r.mb.RecordRiakNodeReadRepairCountDataPoint(now, stat.ReadRepairs) + + //scrape node.memory.limit metric + r.mb.RecordRiakMemoryLimitDataPoint(now, stat.MemAllocated) + + //scrape vnode.operation.count metric + r.mb.RecordRiakVnodeOperationCountDataPoint(now, stat.VnodeGets, metadata.AttributeRequest.Get) + r.mb.RecordRiakVnodeOperationCountDataPoint(now, stat.VnodePuts, metadata.AttributeRequest.Put) + + //scrape vnode.index.operation.count metric + r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexReads, metadata.AttributeOperation.Read) + r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexWrites, metadata.AttributeOperation.Write) + r.mb.RecordRiakVnodeIndexOperationCountDataPoint(now, stat.VnodeIndexDeletes, metadata.AttributeOperation.Delete) + + return r.mb.Emit(metadata.WithRiakNodeName(stat.Node)), errors.Combine() +} diff --git a/receiver/riakreceiver/scraper_test.go b/receiver/riakreceiver/scraper_test.go new file mode 100644 index 000000000000..13a0d0de7d1d --- /dev/null +++ b/receiver/riakreceiver/scraper_test.go @@ -0,0 +1,158 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package riakreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver" + +import ( + "context" + "encoding/json" + "errors" + "path/filepath" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/model/pdata" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/mocks" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver/internal/model" +) + +func TestScraperStart(t *testing.T) { + testcases := []struct { + desc string + scraper *riakScraper + expectError bool + }{ + { + desc: "Bad Config", + scraper: &riakScraper{ + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + Endpoint: defaultEndpoint, + TLSSetting: configtls.TLSClientSetting{ + TLSSetting: configtls.TLSSetting{ + CAFile: "/non/existent", + }, + }, + }, + }, + settings: componenttest.NewNopTelemetrySettings(), + }, + expectError: true, + }, + { + desc: "Valid Config", + scraper: &riakScraper{ + cfg: &Config{ + HTTPClientSettings: confighttp.HTTPClientSettings{ + TLSSetting: configtls.TLSClientSetting{}, + Endpoint: defaultEndpoint, + }, + }, + settings: componenttest.NewNopTelemetrySettings(), + }, + expectError: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + err := tc.scraper.start(context.Background(), componenttest.NewNopHost()) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestScaperScrape(t *testing.T) { + testCases := []struct { + desc string + setupMockClient func(t *testing.T) client + expectedMetricGen func(t *testing.T) pdata.Metrics + expectedErr error + }{ + { + desc: "Nil client", + setupMockClient: func(t *testing.T) client { + return nil + }, + expectedMetricGen: func(t *testing.T) pdata.Metrics { + return pdata.NewMetrics() + }, + expectedErr: errClientNotInit, + }, + { + desc: "API Call Failure", + setupMockClient: func(t *testing.T) client { + mockClient := mocks.MockClient{} + mockClient.On("GetStats", mock.Anything).Return(nil, errors.New("some api error")) + return &mockClient + }, + expectedMetricGen: func(t *testing.T) pdata.Metrics { + return pdata.NewMetrics() + }, + expectedErr: errors.New("some api error"), + }, + { + desc: "Successful Collection", + setupMockClient: func(t *testing.T) client { + mockClient := mocks.MockClient{} + // use helper function from client tests + data := loadAPIResponseData(t, statsAPIResponseFile) + var stats *model.Stats + err := json.Unmarshal(data, &stats) + require.NoError(t, err) + + mockClient.On("GetStats", mock.Anything).Return(stats, nil) + return &mockClient + }, + expectedMetricGen: func(t *testing.T) pdata.Metrics { + goldenPath := filepath.Join("testdata", "scraper", "expected.json") + expectedMetrics, err := golden.ReadMetrics(goldenPath) + require.NoError(t, err) + return expectedMetrics + }, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + scraper := newScraper(zap.NewNop(), createDefaultConfig().(*Config), componenttest.NewNopTelemetrySettings()) + scraper.client = tc.setupMockClient(t) + actualMetrics, err := scraper.scrape(context.Background()) + + if tc.expectedErr == nil { + require.NoError(t, err) + } else { + require.EqualError(t, err, tc.expectedErr.Error()) + } + + expectedMetrics := tc.expectedMetricGen(t) + + err = scrapertest.CompareMetrics(expectedMetrics, actualMetrics) + require.NoError(t, err) + }) + } +} diff --git a/receiver/riakreceiver/testdata/apiresponses/get_stats_response.json b/receiver/riakreceiver/testdata/apiresponses/get_stats_response.json new file mode 100644 index 000000000000..45ea9dd48686 --- /dev/null +++ b/receiver/riakreceiver/testdata/apiresponses/get_stats_response.json @@ -0,0 +1,566 @@ +{ + "clusteraae_fsm_active": 0, + "clusteraae_fsm_create": 0, + "clusteraae_fsm_create_error": 0, + "connected_nodes": [], + "consistent_get_objsize_100": 0, + "consistent_get_objsize_95": 0, + "consistent_get_objsize_99": 0, + "consistent_get_objsize_mean": 0, + "consistent_get_objsize_median": 0, + "consistent_get_time_100": 0, + "consistent_get_time_95": 0, + "consistent_get_time_99": 0, + "consistent_get_time_mean": 0, + "consistent_get_time_median": 0, + "consistent_gets": 0, + "consistent_gets_total": 0, + "consistent_put_objsize_100": 0, + "consistent_put_objsize_95": 0, + "consistent_put_objsize_99": 0, + "consistent_put_objsize_mean": 0, + "consistent_put_objsize_median": 0, + "consistent_put_time_100": 0, + "consistent_put_time_95": 0, + "consistent_put_time_99": 0, + "consistent_put_time_mean": 0, + "consistent_put_time_median": 0, + "consistent_puts": 0, + "consistent_puts_total": 0, + "converge_delay_last": 0, + "converge_delay_max": 0, + "converge_delay_mean": 0, + "converge_delay_min": 0, + "coord_local_soft_loaded_total": 0, + "coord_local_unloaded_total": 0, + "coord_redir_least_loaded_total": 0, + "coord_redir_loaded_local_total": 0, + "coord_redir_unloaded_total": 0, + "coord_redirs_total": 0, + "counter_actor_counts_100": 0, + "counter_actor_counts_95": 0, + "counter_actor_counts_99": 0, + "counter_actor_counts_mean": 0, + "counter_actor_counts_median": 0, + "cpu_avg1": 95, + "cpu_avg15": 131, + "cpu_avg5": 123, + "cpu_nprocs": 1067, + "dropped_vnode_requests_total": 0, + "executing_mappers": 0, + "gossip_received": 0, + "handoff_timeouts": 0, + "hll_bytes": 0, + "hll_bytes_100": 0, + "hll_bytes_95": 0, + "hll_bytes_99": 0, + "hll_bytes_mean": 0, + "hll_bytes_median": 0, + "hll_bytes_total": 0, + "ignored_gossip_total": 0, + "index_fsm_active": 0, + "index_fsm_complete": 0, + "index_fsm_create": 0, + "index_fsm_create_error": 0, + "index_fsm_results_100": 0, + "index_fsm_results_95": 0, + "index_fsm_results_99": 0, + "index_fsm_results_mean": 0, + "index_fsm_results_median": 0, + "index_fsm_time_100": 0, + "index_fsm_time_95": 0, + "index_fsm_time_99": 0, + "index_fsm_time_mean": 0, + "index_fsm_time_median": 0, + "late_put_fsm_coordinator_ack": 0, + "leveldb_read_block_error": "undefined", + "list_fsm_active": 0, + "list_fsm_create": 0, + "list_fsm_create_error": 0, + "list_fsm_create_error_total": 0, + "list_fsm_create_total": 0, + "map_actor_counts_100": 0, + "map_actor_counts_95": 0, + "map_actor_counts_99": 0, + "map_actor_counts_mean": 0, + "map_actor_counts_median": 0, + "mem_allocated": 3127377920, + "mem_total": 10447507456, + "memory_atom": 744369, + "memory_atom_used": 741992, + "memory_binary": 1007384, + "memory_code": 15193387, + "memory_ets": 6721720, + "memory_processes": 20741216, + "memory_processes_used": 20725312, + "memory_system": 36183888, + "memory_total": 56925104, + "ngrfetch_nofetch": 0, + "ngrfetch_nofetch_total": 0, + "ngrfetch_prefetch": 0, + "ngrfetch_prefetch_total": 0, + "ngrfetch_tofetch": 0, + "ngrfetch_tofetch_total": 0, + "ngrrepl_empty": 0, + "ngrrepl_empty_total": 0, + "ngrrepl_error": 0, + "ngrrepl_error_total": 0, + "ngrrepl_object": 0, + "ngrrepl_object_total": 0, + "node_get_fsm_active": 0, + "node_get_fsm_active_60s": 0, + "node_get_fsm_counter_objsize_100": 0, + "node_get_fsm_counter_objsize_95": 0, + "node_get_fsm_counter_objsize_99": 0, + "node_get_fsm_counter_objsize_mean": 0, + "node_get_fsm_counter_objsize_median": 0, + "node_get_fsm_counter_siblings_100": 0, + "node_get_fsm_counter_siblings_95": 0, + "node_get_fsm_counter_siblings_99": 0, + "node_get_fsm_counter_siblings_mean": 0, + "node_get_fsm_counter_siblings_median": 0, + "node_get_fsm_counter_time_100": 0, + "node_get_fsm_counter_time_95": 0, + "node_get_fsm_counter_time_99": 0, + "node_get_fsm_counter_time_mean": 0, + "node_get_fsm_counter_time_median": 0, + "node_get_fsm_errors": 0, + "node_get_fsm_errors_total": 0, + "node_get_fsm_hll_objsize_100": 0, + "node_get_fsm_hll_objsize_95": 0, + "node_get_fsm_hll_objsize_99": 0, + "node_get_fsm_hll_objsize_mean": 0, + "node_get_fsm_hll_objsize_median": 0, + "node_get_fsm_hll_siblings_100": 0, + "node_get_fsm_hll_siblings_95": 0, + "node_get_fsm_hll_siblings_99": 0, + "node_get_fsm_hll_siblings_mean": 0, + "node_get_fsm_hll_siblings_median": 0, + "node_get_fsm_hll_time_100": 0, + "node_get_fsm_hll_time_95": 0, + "node_get_fsm_hll_time_99": 0, + "node_get_fsm_hll_time_mean": 0, + "node_get_fsm_hll_time_median": 0, + "node_get_fsm_in_rate": 0, + "node_get_fsm_map_objsize_100": 0, + "node_get_fsm_map_objsize_95": 0, + "node_get_fsm_map_objsize_99": 0, + "node_get_fsm_map_objsize_mean": 0, + "node_get_fsm_map_objsize_median": 0, + "node_get_fsm_map_siblings_100": 0, + "node_get_fsm_map_siblings_95": 0, + "node_get_fsm_map_siblings_99": 0, + "node_get_fsm_map_siblings_mean": 0, + "node_get_fsm_map_siblings_median": 0, + "node_get_fsm_map_time_100": 0, + "node_get_fsm_map_time_95": 0, + "node_get_fsm_map_time_99": 0, + "node_get_fsm_map_time_mean": 0, + "node_get_fsm_map_time_median": 0, + "node_get_fsm_objsize_100": 0, + "node_get_fsm_objsize_95": 0, + "node_get_fsm_objsize_99": 0, + "node_get_fsm_objsize_mean": 0, + "node_get_fsm_objsize_median": 0, + "node_get_fsm_out_rate": 0, + "node_get_fsm_rejected": 0, + "node_get_fsm_rejected_60s": 0, + "node_get_fsm_rejected_total": 0, + "node_get_fsm_set_objsize_100": 0, + "node_get_fsm_set_objsize_95": 0, + "node_get_fsm_set_objsize_99": 0, + "node_get_fsm_set_objsize_mean": 0, + "node_get_fsm_set_objsize_median": 0, + "node_get_fsm_set_siblings_100": 0, + "node_get_fsm_set_siblings_95": 0, + "node_get_fsm_set_siblings_99": 0, + "node_get_fsm_set_siblings_mean": 0, + "node_get_fsm_set_siblings_median": 0, + "node_get_fsm_set_time_100": 0, + "node_get_fsm_set_time_95": 0, + "node_get_fsm_set_time_99": 0, + "node_get_fsm_set_time_mean": 0, + "node_get_fsm_set_time_median": 0, + "node_get_fsm_siblings_100": 0, + "node_get_fsm_siblings_95": 0, + "node_get_fsm_siblings_99": 0, + "node_get_fsm_siblings_mean": 0, + "node_get_fsm_siblings_median": 0, + "node_get_fsm_time_100": 0, + "node_get_fsm_time_95": 0, + "node_get_fsm_time_99": 0, + "node_get_fsm_time_mean": 4, + "node_get_fsm_time_median": 0, + "node_gets": 2, + "node_gets_counter": 0, + "node_gets_counter_total": 0, + "node_gets_hll": 0, + "node_gets_hll_total": 0, + "node_gets_map": 0, + "node_gets_map_total": 0, + "node_gets_set": 0, + "node_gets_set_total": 0, + "node_gets_total": 0, + "node_put_fsm_active": 0, + "node_put_fsm_active_60s": 0, + "node_put_fsm_counter_time_100": 0, + "node_put_fsm_counter_time_95": 0, + "node_put_fsm_counter_time_99": 0, + "node_put_fsm_counter_time_mean": 0, + "node_put_fsm_counter_time_median": 0, + "node_put_fsm_hll_time_100": 0, + "node_put_fsm_hll_time_95": 0, + "node_put_fsm_hll_time_99": 0, + "node_put_fsm_hll_time_mean": 0, + "node_put_fsm_hll_time_median": 0, + "node_put_fsm_in_rate": 0, + "node_put_fsm_map_time_100": 0, + "node_put_fsm_map_time_95": 0, + "node_put_fsm_map_time_99": 0, + "node_put_fsm_map_time_mean": 0, + "node_put_fsm_map_time_median": 0, + "node_put_fsm_out_rate": 0, + "node_put_fsm_rejected": 0, + "node_put_fsm_rejected_60s": 0, + "node_put_fsm_rejected_total": 0, + "node_put_fsm_set_time_100": 0, + "node_put_fsm_set_time_95": 0, + "node_put_fsm_set_time_99": 0, + "node_put_fsm_set_time_mean": 0, + "node_put_fsm_set_time_median": 0, + "node_put_fsm_time_100": 0, + "node_put_fsm_time_95": 0, + "node_put_fsm_time_99": 0, + "node_put_fsm_time_mean": 5, + "node_put_fsm_time_median": 0, + "node_puts": 3, + "node_puts_counter": 0, + "node_puts_counter_total": 0, + "node_puts_hll": 0, + "node_puts_hll_total": 0, + "node_puts_map": 0, + "node_puts_map_total": 0, + "node_puts_set": 0, + "node_puts_set_total": 0, + "node_puts_total": 0, + "nodename": "riak@127.0.0.1", + "object_counter_merge": 0, + "object_counter_merge_time_100": 0, + "object_counter_merge_time_95": 0, + "object_counter_merge_time_99": 0, + "object_counter_merge_time_mean": 0, + "object_counter_merge_time_median": 0, + "object_counter_merge_total": 0, + "object_hll_merge": 0, + "object_hll_merge_time_100": 0, + "object_hll_merge_time_95": 0, + "object_hll_merge_time_99": 0, + "object_hll_merge_time_mean": 0, + "object_hll_merge_time_median": 0, + "object_hll_merge_total": 0, + "object_map_merge": 0, + "object_map_merge_time_100": 0, + "object_map_merge_time_95": 0, + "object_map_merge_time_99": 0, + "object_map_merge_time_mean": 0, + "object_map_merge_time_median": 0, + "object_map_merge_total": 0, + "object_merge": 0, + "object_merge_time_100": 0, + "object_merge_time_95": 0, + "object_merge_time_99": 0, + "object_merge_time_mean": 0, + "object_merge_time_median": 0, + "object_merge_total": 0, + "object_set_merge": 0, + "object_set_merge_time_100": 0, + "object_set_merge_time_95": 0, + "object_set_merge_time_99": 0, + "object_set_merge_time_mean": 0, + "object_set_merge_time_median": 0, + "object_set_merge_total": 0, + "pbc_active": 0, + "pbc_connects": 0, + "pbc_connects_total": 0, + "pipeline_active": 0, + "pipeline_create_count": 0, + "pipeline_create_error_count": 0, + "pipeline_create_error_one": 0, + "pipeline_create_one": 0, + "postcommit_fail": 0, + "precommit_fail": 0, + "read_repairs": 6, + "read_repairs_counter": 0, + "read_repairs_counter_total": 0, + "read_repairs_fallback_notfound_count": "undefined", + "read_repairs_fallback_notfound_one": "undefined", + "read_repairs_fallback_outofdate_count": "undefined", + "read_repairs_fallback_outofdate_one": "undefined", + "read_repairs_hll": 0, + "read_repairs_hll_total": 0, + "read_repairs_map": 0, + "read_repairs_map_total": 0, + "read_repairs_primary_notfound_count": "undefined", + "read_repairs_primary_notfound_one": "undefined", + "read_repairs_primary_outofdate_count": "undefined", + "read_repairs_primary_outofdate_one": "undefined", + "read_repairs_set": 0, + "read_repairs_set_total": 0, + "read_repairs_total": 0, + "rebalance_delay_last": 0, + "rebalance_delay_max": 0, + "rebalance_delay_mean": 0, + "rebalance_delay_min": 0, + "rejected_handoffs": 0, + "riak_kv_vnodeq_max": 0, + "riak_kv_vnodeq_mean": 0.0, + "riak_kv_vnodeq_median": 0, + "riak_kv_vnodeq_min": 0, + "riak_kv_vnodeq_total": 0, + "riak_kv_vnodes_running": 64, + "riak_pipe_vnodeq_max": 0, + "riak_pipe_vnodeq_mean": 0.0, + "riak_pipe_vnodeq_median": 0, + "riak_pipe_vnodeq_min": 0, + "riak_pipe_vnodeq_total": 0, + "riak_pipe_vnodes_running": 64, + "ring_creation_size": 64, + "ring_members": [ + "riak@127.0.0.1" + ], + "ring_num_partitions": 64, + "ring_ownership": "[{'riak@127.0.0.1',64}]", + "rings_reconciled": 0, + "rings_reconciled_total": 0, + "set_actor_counts_100": 0, + "set_actor_counts_95": 0, + "set_actor_counts_99": 0, + "set_actor_counts_mean": 0, + "set_actor_counts_median": 0, + "skipped_read_repairs": 0, + "skipped_read_repairs_total": 0, + "soft_loaded_vnode_mbox_total": 0, + "storage_backend": "riak_kv_bitcask_backend", + "sys_driver_version": "3.3", + "sys_global_heaps_size": "deprecated", + "sys_heap_type": "private", + "sys_logical_processors": 8, + "sys_monitor_count": 515, + "sys_otp_release": "20", + "sys_port_count": 83, + "sys_process_count": 1558, + "sys_smp_support": true, + "sys_system_architecture": "x86_64-unknown-linux-gnu", + "sys_system_version": "Erlang/OTP 20 [erts-9.3] [source] [64-bit] [smp:8:8] [ds:8:8:10] [async-threads:64] [kernel-poll:true]", + "sys_thread_pool_size": 64, + "sys_threads_enabled": true, + "sys_wordsize": 8, + "tictacaae_queue_microsec__max": 0, + "tictacaae_queue_microsec_mean": 0, + "vnode_counter_update": 0, + "vnode_counter_update_time_100": 0, + "vnode_counter_update_time_95": 0, + "vnode_counter_update_time_99": 0, + "vnode_counter_update_time_mean": 0, + "vnode_counter_update_time_median": 0, + "vnode_counter_update_total": 0, + "vnode_get_fsm_time_100": 0, + "vnode_get_fsm_time_95": 0, + "vnode_get_fsm_time_99": 0, + "vnode_get_fsm_time_mean": 0, + "vnode_get_fsm_time_median": 0, + "vnode_gets": 7, + "vnode_gets_total": 0, + "vnode_head_fsm_time_100": 0, + "vnode_head_fsm_time_95": 0, + "vnode_head_fsm_time_99": 0, + "vnode_head_fsm_time_mean": 0, + "vnode_head_fsm_time_median": 0, + "vnode_heads": 0, + "vnode_heads_total": 0, + "vnode_hll_update": 0, + "vnode_hll_update_time_100": 0, + "vnode_hll_update_time_95": 0, + "vnode_hll_update_time_99": 0, + "vnode_hll_update_time_mean": 0, + "vnode_hll_update_time_median": 0, + "vnode_hll_update_total": 0, + "vnode_index_deletes": 9, + "vnode_index_deletes_postings": 0, + "vnode_index_deletes_postings_total": 0, + "vnode_index_deletes_total": 0, + "vnode_index_reads": 10, + "vnode_index_reads_total": 0, + "vnode_index_refreshes": 0, + "vnode_index_refreshes_total": 0, + "vnode_index_writes": 11, + "vnode_index_writes_postings": 0, + "vnode_index_writes_postings_total": 0, + "vnode_index_writes_total": 0, + "vnode_map_update": 0, + "vnode_map_update_time_100": 0, + "vnode_map_update_time_95": 0, + "vnode_map_update_time_99": 0, + "vnode_map_update_time_mean": 0, + "vnode_map_update_time_median": 0, + "vnode_map_update_total": 0, + "vnode_mbox_check_timeout_total": 0, + "vnode_put_fsm_time_100": 0, + "vnode_put_fsm_time_95": 0, + "vnode_put_fsm_time_99": 0, + "vnode_put_fsm_time_mean": 0, + "vnode_put_fsm_time_median": 0, + "vnode_puts": 8, + "vnode_puts_total": 0, + "vnode_set_update": 0, + "vnode_set_update_time_100": 0, + "vnode_set_update_time_95": 0, + "vnode_set_update_time_99": 0, + "vnode_set_update_time_mean": 0, + "vnode_set_update_time_median": 0, + "vnode_set_update_total": 0, + "worker_af1_pool_queuetime_100": 0, + "worker_af1_pool_queuetime_mean": 0, + "worker_af1_pool_total": 0, + "worker_af1_pool_worktime_100": 0, + "worker_af1_pool_worktime_mean": 0, + "worker_af2_pool_queuetime_100": 0, + "worker_af2_pool_queuetime_mean": 0, + "worker_af2_pool_total": 0, + "worker_af2_pool_worktime_100": 0, + "worker_af2_pool_worktime_mean": 0, + "worker_af3_pool_queuetime_100": 0, + "worker_af3_pool_queuetime_mean": 0, + "worker_af3_pool_total": 0, + "worker_af3_pool_worktime_100": 0, + "worker_af3_pool_worktime_mean": 0, + "worker_af4_pool_queuetime_100": 0, + "worker_af4_pool_queuetime_mean": 0, + "worker_af4_pool_total": 0, + "worker_af4_pool_worktime_100": 0, + "worker_af4_pool_worktime_mean": 0, + "worker_be_pool_queuetime_100": 0, + "worker_be_pool_queuetime_mean": 0, + "worker_be_pool_total": 0, + "worker_be_pool_worktime_100": 0, + "worker_be_pool_worktime_mean": 0, + "worker_node_worker_pool_queuetime_100": 0, + "worker_node_worker_pool_queuetime_mean": 0, + "worker_node_worker_pool_total": 0, + "worker_node_worker_pool_worktime_100": 0, + "worker_node_worker_pool_worktime_mean": 0, + "worker_unregistered_queuetime_100": 0, + "worker_unregistered_queuetime_mean": 0, + "worker_unregistered_total": 0, + "worker_unregistered_worktime_100": 0, + "worker_unregistered_worktime_mean": 0, + "worker_vnode_pool_queuetime_100": 0, + "worker_vnode_pool_queuetime_mean": 0, + "worker_vnode_pool_total": 0, + "worker_vnode_pool_worktime_100": 0, + "worker_vnode_pool_worktime_mean": 0, + "write_once_merge": 0, + "write_once_put_objsize_100": 0, + "write_once_put_objsize_95": 0, + "write_once_put_objsize_99": 0, + "write_once_put_objsize_mean": 0, + "write_once_put_objsize_median": 0, + "write_once_put_time_100": 0, + "write_once_put_time_95": 0, + "write_once_put_time_99": 0, + "write_once_put_time_mean": 0, + "write_once_put_time_median": 0, + "write_once_puts": 0, + "write_once_puts_total": 0, + "disk": [ + { + "id": "/", + "size": 107077304, + "used": 10 + }, + { + "id": "/dev", + "size": 65536, + "used": 0 + }, + { + "id": "/dev/shm", + "size": 65536, + "used": 0 + }, + { + "id": "/etc/hosts", + "size": 107077304, + "used": 10 + }, + { + "id": "/proc/acpi", + "size": 5101320, + "used": 0 + }, + { + "id": "/sys/firmware", + "size": 5101320, + "used": 0 + } + ], + "riak_auth_mods_version": "riak_kv-3.0.0", + "canola_version": "2.1.0", + "riaknostic_version": "riak_kv-3.0.2", + "riak_repl_version": "riak_kv-3.0.9", + "ranch_version": "1.6.0", + "riak_kv_version": "riak_kv-3.0.9", + "riak_api_version": "riak_kv-3.0.9", + "ebloom_version": "2.0.0", + "riakhttpc_version": "1.3.1", + "ibrowse_version": "4.4.2", + "riakc_version": "3.0.8+p1", + "riak_pb_version": "3.0.8", + "recon_version": "2.5.2", + "redbug_version": "1.2.2", + "runtime_tools_version": "1.12.5", + "hyper_version": "1.0", + "kv_index_tictactree_version": "1.0.1", + "leveled_version": "1.0.6", + "lz4_version": "0.2.2", + "sext_version": "1.4.1", + "riak_dt_version": "riak_kv-3.0.0", + "sidejob_version": "2.1.0", + "riak_pipe_version": "riak_kv-3.0.9", + "riak_core_version": "riak_kv-3.0.9", + "exometer_core_version": "1.5.7", + "os_mon_version": "2.4.4", + "webmachine_version": "1.11.1", + "mochiweb_version": "2.20.0", + "ssl_version": "8.2.4", + "eleveldb_version": "riak_kv-3.0.0", + "cluster_info_version": "2.1.0", + "poolboy_version": "riak_kv-3.0.0", + "pbkdf2_version": "2.1.0", + "basho_stats_version": "1.1.0", + "clique_version": "0.3.11", + "riak_sysmon_version": "2.2.0", + "cuttlefish_version": "2.1.0", + "lager_version": "3.8.0", + "getopt_version": "1.0.1", + "sasl_version": "3.1.1", + "bitcask_version": "2.0.3", + "inets_version": "6.5", + "xmerl_version": "1.3.16", + "observer_version": "2.7", + "parse_trans_version": "3.3.0", + "folsom_version": "0.8.7", + "bear_version": "0.8.7", + "setup_version": "2.0.2", + "hut_version": "1.2.1", + "public_key_version": "1.5.2", + "crypto_version": "4.2.1", + "asn1_version": "5.0.5", + "lager_syslog_version": "2.1.2", + "syslog_version": "1.0.5", + "goldrush_version": "0.1.9", + "compiler_version": "7.1.5", + "syntax_tools_version": "2.1.4", + "stdlib_version": "3.4.4", + "kernel_version": "5.4.3" +} diff --git a/receiver/riakreceiver/testdata/config.yaml b/receiver/riakreceiver/testdata/config.yaml new file mode 100644 index 000000000000..731039266316 --- /dev/null +++ b/receiver/riakreceiver/testdata/config.yaml @@ -0,0 +1,19 @@ +receivers: + riak: + endpoint: http://localhost:8098 + username: otelu + password: $RIAK_PASSWORD + collection_interval: 10s + +processors: + nop: + +exporters: + nop: + +service: + pipelines: + metrics: + receivers: [riak] + processors: [nop] + exporters: [nop] diff --git a/receiver/riakreceiver/testdata/integration/Dockerfile.riak b/receiver/riakreceiver/testdata/integration/Dockerfile.riak new file mode 100644 index 000000000000..4423162f3c21 --- /dev/null +++ b/receiver/riakreceiver/testdata/integration/Dockerfile.riak @@ -0,0 +1,29 @@ +FROM ubuntu:20.04 as download + +RUN \ + apt-get update -qq && \ + apt-get install -qq -y curl && \ + curl -L -o /riak.deb https://files.tiot.jp/riak/kv/3.0/3.0.9/ubuntu/focal64/riak_3.0.9-OTP20.3_amd64.deb + +FROM ubuntu:20.04 + +COPY --from=download /riak.deb /riak.deb + +RUN \ + apt-get update -qq && \ + DEBIAN_FRONTEND=noninteractive apt-get install -qq -y -f /riak.deb + +WORKDIR /var/lib/riak + +EXPOSE 8087 +EXPOSE 8098 + +ENV RIAK_VERSION 3.0.2 +ENV RIAK_HOME /usr/lib/riak +ENV RIAK_FLAVOR KV + +COPY riak.conf /etc/riak/riak.conf +COPY entrypoint.sh /usr/lib/riak/entrypoint.sh +RUN chmod +x /usr/lib/riak/entrypoint.sh + +ENTRYPOINT [ "/usr/lib/riak/entrypoint.sh" ] diff --git a/receiver/riakreceiver/testdata/integration/entrypoint.sh b/receiver/riakreceiver/testdata/integration/entrypoint.sh new file mode 100644 index 000000000000..190ea4e2e64e --- /dev/null +++ b/receiver/riakreceiver/testdata/integration/entrypoint.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Cluster start script to bootstrap a Riak cluster. +# +set -ex + +if [[ -x /usr/sbin/riak ]]; then + export RIAK=/usr/sbin/riak +else + export RIAK=$RIAK_HOME/bin/riak +fi +export RIAK_CONF=/etc/riak/riak.conf +export USER_CONF=/etc/riak/user.conf +export RIAK_ADVANCED_CONF=/etc/riak/advanced.config +if [[ -x /usr/sbin/riak-admin ]]; then + export RIAK_ADMIN=/usr/sbin/riak-admin +else + export RIAK_ADMIN=$RIAK_HOME/bin/riak-admin +fi +export SCHEMAS_DIR=/etc/riak/schemas/ + +# Set ports for PB and HTTP +export PB_PORT=${PB_PORT:-8087} +export HTTP_PORT=${HTTP_PORT:-8098} + +# Use ping to discover our HOSTNAME because it's easier and more reliable than other methods +export HOST=$(ping -c1 $HOSTNAME | awk '/^PING/ {print $3}' | sed 's/[()]//g')||'127.0.0.1' + +# CLUSTER_NAME is used to name the nodes and is the value used in the distributed cookie +export CLUSTER_NAME=${CLUSTER_NAME:-riak} + +# The COORDINATOR_NODE is the first node in a cluster to which other nodes will eventually join +export COORDINATOR_NODE=${COORDINATOR_NODE:-$HOSTNAME} +export COORDINATOR_NODE_HOST=$(ping -c1 $COORDINATOR_NODE | awk '/^PING/ {print $3}' | sed 's/[()]//g')||'127.0.0.1' + +# Start the node and wait until fully up +$RIAK start + +# Trap SIGTERM and SIGINT and tail the log file indefinitely +tail -n 1024 -f /var/log/riak/console.log + diff --git a/receiver/riakreceiver/testdata/integration/expected.json b/receiver/riakreceiver/testdata/integration/expected.json new file mode 100644 index 000000000000..5b10bb361826 --- /dev/null +++ b/receiver/riakreceiver/testdata/integration/expected.json @@ -0,0 +1,214 @@ +{ + "resourceMetrics": [ + { + "instrumentationLibraryMetrics": [ + { + "instrumentationLibrary": { + "name": "otelcol/riakreceiver" + }, + "metrics": [ + { + "description": "The amount of memory allocated to the node.", + "name": "riak.memory.limit", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3330531328", + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of operations performed by the node.", + "name": "riak.node.operation.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "get" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "put" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + } + ], + "isMonotonic": true + }, + "unit": "{operation}" + }, + { + "description": "The mean time between request and response for operations performed by the node over the last minute.", + "gauge": { + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "get" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "put" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + } + ] + }, + "name": "riak.node.operation.time.mean", + "unit": "us" + }, + { + "description": "The number of read repairs performed by the node.", + "name": "riak.node.read_repair.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + } + ], + "isMonotonic": true + }, + "unit": "{read_repair}" + }, + { + "description": "The number of index operations performed by vnodes on the node.", + "name": "riak.vnode.index.operation.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "read" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "write" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "delete" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + } + ] + }, + "unit": "{operation}" + }, + { + "description": "The number of operations performed by vnodes on the node.", + "name": "riak.vnode.operation.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "get" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + }, + { + "asInt": "0", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "put" + } + } + ], + "startTimeUnixNano": "1648220901148778000", + "timeUnixNano": "1648220901509196000" + } + ], + "isMonotonic": true + }, + "unit": "{operation}" + } + ] + } + ], + "resource": { + "attributes": [ + { + "key": "riak.node.name", + "value": { + "stringValue": "riak@127.0.0.1" + } + } + ] + } + } + ] +} diff --git a/receiver/riakreceiver/testdata/integration/riak.conf b/receiver/riakreceiver/testdata/integration/riak.conf new file mode 100644 index 000000000000..6e89217a3805 --- /dev/null +++ b/receiver/riakreceiver/testdata/integration/riak.conf @@ -0,0 +1,1451 @@ +## Where to emit the default log messages (typically at 'info' +## severity): +## off: disabled +## file: the file specified by log.console.file +## console: to standard output (seen when using `riak attach-direct`) +## both: log.console.file and standard out. +## +## Default: file +## +## Acceptable values: +## - one of: off, file, console, both +log.console = console + +## The severity level of the console log, default is 'info'. +## +## Default: info +## +## Acceptable values: +## - one of: debug, info, notice, warning, error, critical, alert, emergency, none +log.console.level = info + +## When 'log.console' is set to 'file' or 'both', the file where +## console messages will be logged. +## +## Default: $(platform_log_dir)/console.log +## +## Acceptable values: +## - the path to a file +log.console.file = $(platform_log_dir)/console.log + +## The file where error messages will be logged. +## +## Default: $(platform_log_dir)/error.log +## +## Acceptable values: +## - the path to a file +log.error.file = $(platform_log_dir)/error.log + +## When set to 'on', enables log output to syslog. +## +## Default: off +## +## Acceptable values: +## - on or off +log.syslog = off + +## Whether to enable the crash log. +## +## Default: on +## +## Acceptable values: +## - on or off +log.crash = on + +## If the crash log is enabled, the file where its messages will +## be written. +## +## Default: $(platform_log_dir)/crash.log +## +## Acceptable values: +## - the path to a file +log.crash.file = $(platform_log_dir)/crash.log + +## Maximum size in bytes of individual messages in the crash log +## +## Default: 64KB +## +## Acceptable values: +## - a byte size with units, e.g. 10GB +log.crash.maximum_message_size = 64KB + +## Maximum size of the crash log in bytes, before it is rotated +## +## Default: 10MB +## +## Acceptable values: +## - a byte size with units, e.g. 10GB +log.crash.size = 10MB + +## The schedule on which to rotate the crash log. For more +## information see: +## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation +## +## Default: $D0 +## +## Acceptable values: +## - text +log.crash.rotation = $D0 + +## The number of rotated crash logs to keep. When set to +## 'current', only the current open log file is kept. +## +## Default: 5 +## +## Acceptable values: +## - an integer +## - the text "current" +log.crash.rotation.keep = 5 + +## +## Default: true +## +## Acceptable values: +## - text +erlang.vm.ignore_break_signal = true + +## Name of the Erlang node +## +## Default: riak@127.0.0.1 +## +## Acceptable values: +## - text +nodename = riak@127.0.0.1 + +## Cookie for distributed node communication. All nodes in the +## same cluster should use the same cookie or they will not be able to +## communicate. +## +## Default: riak +## +## Acceptable values: +## - text +distributed_cookie = riak + +## Sets the number of threads in async thread pool, valid range +## is 0-1024. If thread support is available, the default is 64. +## More information at: http://erlang.org/doc/man/erl.html +## +## Default: 64 +## +## Acceptable values: +## - an integer +erlang.async_threads = 64 + +## The number of concurrent ports/sockets +## Valid range is 1024-134217727 +## +## Default: 262144 +## +## Acceptable values: +## - an integer +erlang.max_ports = 262144 + +## Set scheduler forced wakeup interval. All run queues will be +## scanned each Interval milliseconds. While there are sleeping +## schedulers in the system, one scheduler will be woken for each +## non-empty run queue found. An Interval of zero disables this +## feature, which also is the default. +## This feature is a workaround for lengthy executing native code, and +## native code that do not bump reductions properly. +## More information: http://www.erlang.org/doc/man/erl.html#+sfwi +## +## Default: 500 +## +## Acceptable values: +## - an integer +## erlang.schedulers.force_wakeup_interval = 500 + +## Enable or disable scheduler compaction of load. By default +## scheduler compaction of load is enabled. When enabled, load +## balancing will strive for a load distribution which causes as many +## scheduler threads as possible to be fully loaded (i.e., not run out +## of work). This is accomplished by migrating load (e.g. runnable +## processes) into a smaller set of schedulers when schedulers +## frequently run out of work. When disabled, the frequency with which +## schedulers run out of work will not be taken into account by the +## load balancing logic. +## More information: http://www.erlang.org/doc/man/erl.html#+scl +## +## Default: false +## +## Acceptable values: +## - one of: true, false +## erlang.schedulers.compaction_of_load = false + +## Enable or disable scheduler utilization balancing of load. By +## default scheduler utilization balancing is disabled and instead +## scheduler compaction of load is enabled which will strive for a +## load distribution which causes as many scheduler threads as +## possible to be fully loaded (i.e., not run out of work). When +## scheduler utilization balancing is enabled the system will instead +## try to balance scheduler utilization between schedulers. That is, +## strive for equal scheduler utilization on all schedulers. +## More information: http://www.erlang.org/doc/man/erl.html#+sub +## +## Acceptable values: +## - one of: true, false +## erlang.schedulers.utilization_balancing = true + +## Number of partitions in the cluster (only valid when first +## creating the cluster). Must be a power of 2, minimum 8 and maximum +## 1024. +## +## Default: 64 +## +## Acceptable values: +## - an integer +## ring_size = 64 + +## Number of concurrent node-to-node transfers allowed. +## +## Default: 2 +## +## Acceptable values: +## - an integer +## transfer_limit = 2 + +## Default cert location for https can be overridden +## with the ssl config variable, for example: +## +## Acceptable values: +## - the path to a file +## ssl.certfile = $(platform_etc_dir)/cert.pem + +## Default key location for https can be overridden with the ssl +## config variable, for example: +## +## Acceptable values: +## - the path to a file +## ssl.keyfile = $(platform_etc_dir)/key.pem + +## Default signing authority location for https can be overridden +## with the ssl config variable, for example: +## +## Acceptable values: +## - the path to a file +## ssl.cacertfile = $(platform_etc_dir)/cacertfile.pem + +## DTrace support Do not enable 'dtrace' unless your Erlang/OTP +## runtime is compiled to support DTrace. DTrace is available in +## R15B01 (supported by the Erlang/OTP official source package) and in +## R14B04 via a custom source repository & branch. +## +## Default: off +## +## Acceptable values: +## - on or off +dtrace = off + +## Platform-specific installation paths (substituted by rebar) +## +## Default: /usr/lib/riak/bin +## +## Acceptable values: +## - the path to a directory +platform_bin_dir = /usr/lib/riak/bin + +## +## Default: /var/lib/riak +## +## Acceptable values: +## - the path to a directory +platform_data_dir = /var/lib/riak + +## +## Default: /etc/riak +## +## Acceptable values: +## - the path to a directory +platform_etc_dir = /etc/riak + +## +## Default: /usr/lib/riak/lib +## +## Acceptable values: +## - the path to a directory +platform_lib_dir = /usr/lib/riak/lib + +## +## Default: /var/log/riak +## +## Acceptable values: +## - the path to a directory +platform_log_dir = /var/log/riak + +## Enable consensus subsystem. Set to 'on' to enable the +## consensus subsystem used for strongly consistent Riak operations. +## +## Default: off +## +## Acceptable values: +## - on or off +## strong_consistency = on + +## On cluster leave - force full rebalance partitions +## By default on a cluster leave there will first be an attempt to handoff +## vnodes to safe (in terms of target_n_val) locations. In small clusters, +## there may be insufficient safe locations, and a temporary state can be +## created where a single node has a large number of vnodes. +## To mitigate this, a full rebalance (a re-assignment that does not optimise +## based on the starting position), can be forced by setting this option on +## all nodes. +## Please carefully consider any cluster plan created with this option before +## committing +## +## Default: off +## +## Acceptable values: +## - on or off +full_rebalance_onleave = off + +## listener.http. is an IP address and TCP port that the Riak +## HTTP interface will bind. +## +## Default: 127.0.0.1:8098 +## +## Acceptable values: +## - an IP/port pair, e.g. 127.0.0.1:10011 +listener.http.internal = 0.0.0.0:8098 + +## listener.protobuf. is an IP address and TCP port that the Riak +## Protocol Buffers interface will bind. +## +## Default: 127.0.0.1:8087 +## +## Acceptable values: +## - an IP/port pair, e.g. 127.0.0.1:10011 +listener.protobuf.internal = 127.0.0.1:8087 + +## The maximum length to which the queue of pending connections +## may grow. If set, it must be an integer > 0. If you anticipate a +## huge number of connections being initialized *simultaneously*, set +## this number higher. +## +## Default: 128 +## +## Acceptable values: +## - an integer +## protobuf.backlog = 128 + +## listener.https. is an IP address and TCP port that the Riak +## HTTPS interface will bind. +## +## Acceptable values: +## - an IP/port pair, e.g. 127.0.0.1:10011 +## listener.https.internal = 127.0.0.1:8098 + +## How Riak will repair out-of-sync keys. Some features require +## this to be set to 'active', including search. +## * active: out-of-sync keys will be repaired in the background +## * passive: out-of-sync keys are only repaired on read +## * active-debug: like active, but outputs verbose debugging +## information +## +## Default: active +## +## Acceptable values: +## - one of: active, passive, active-debug +anti_entropy = active + +## +## Default: passive +## +## Acceptable values: +## - one of: active, passive +tictacaae_active = passive + +## Use hashtree tokens for anti-entropy throttling +## To hold-up the vnode when there is a backlog of activity on the AAE store +## hashtree token bucket may be used to block the vnode every 90 puts until +## the PUT has been completed. This use aae_ping with tictac_aae, and a full +## sync block with legacy anti-entropy +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## aae_tokenbucket = enabled + +## A path under which aae data files will be stored. +## +## Default: $(platform_data_dir)/tictac_aae +## +## Acceptable values: +## - the path to a directory +tictacaae_dataroot = $(platform_data_dir)/tictac_aae + +## Parallel key store type +## When running in parallel mode, which will be the default if the backend does +## not support native tictac aae (i.e. is not leveled), what type of parallel +## key store should be kept - leveled_ko (leveled and key-ordered), or +## leveled_so (leveled and segment ordered). +## When running in native mode, this setting is ignored +## +## Default: leveled_ko +## +## Acceptable values: +## - one of: leveled_ko, leveled_so +## tictacaae_parallelstore = leveled_ko + +## Minimum Rebuild Wait +## The minimum number of hours to wait between rebuilds. Default value is 2 +## weeks +## +## Default: 336 +## +## Acceptable values: +## - an integer +tictacaae_rebuildwait = 336 + +## Maximum Rebuild Delay +## The number of seconds which represents the length of the period in which the +## next rebuild will be scheduled. So if all vnodes are scheduled to rebuild +## at the same time, they will actually rebuild randomly between 0 an this +## value (in seconds) after the rebuild time. Default value is 4 days +## +## Default: 345600 +## +## Acceptable values: +## - an integer +tictacaae_rebuilddelay = 345600 + +## Store heads in parallel key stores +## If running a parallel key store, the whole "head" object may be stored to +## allow for fold_heads queries to be run against the parallel store. +## Alternatively, the cost of the parallel key store can be reduced by storing +## only a minimal data set necessary for AAE and monitoring +## +## Default: disabled +## +## Acceptable values: +## - enabled or disabled +## tictacaae_storeheads = disabled + +## Pool Strategy - should a single node_worker_pool or multiple pools be +## used for queueing potentially longer-running "background" queries +## +## Default: dscp +## +## Acceptable values: +## - one of: none, single, dscp +## worker_pool_strategy = dscp + +## Pool Sizes - sizes for individual node_worker_pools +## Only relevant if single or dscp strategy chosen. Set +## `node_worker_pool_size` if a `single` pool strategy is being used, or set +## `af_worker_pool_size` and `be_worker_pool_size` if a multiple pool strategy +## has been chosen. +## Separate assured forwarding pools will be used of `af_worker_pool_size` for +## informational aae_folds (find_keys, object_stats) and functional folds +## (merge_tree_range, fetch_clock_range). The be_pool is used only for tictac +## AAE rebuilds at present +## +## Default: 4 +## +## Acceptable values: +## - an integer +node_worker_pool_size = 4 + +## +## Default: 2 +## +## Acceptable values: +## - an integer +af1_worker_pool_size = 2 + +## +## Default: 1 +## +## Acceptable values: +## - an integer +af2_worker_pool_size = 1 + +## +## Default: 4 +## +## Acceptable values: +## - an integer +af3_worker_pool_size = 4 + +## +## Default: 1 +## +## Acceptable values: +## - an integer +af4_worker_pool_size = 1 + +## +## Default: 1 +## +## Acceptable values: +## - an integer +be_worker_pool_size = 1 + +## Backend PUT Pause (ms). +## If the backend PUT has resulted in a pause request, then how long should +## the vnode pause for? This is measured in ms, and currently only applies +## to the leveled backend +## +## Default: 10 +## +## Acceptable values: +## - an integer +## backend_pause_ms = 10 + +## Whether to allow node to participate in coverage queries. +## This is used as a manual switch to stop nodes in incomplete states +## (E.g. doing a full partition repair, or node replace) from participating +## in coverage queries, as their information may be incomplete (e.g. 2i +## issues seen in these circumstances). +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## participate_in_coverage = enabled + +## Specifies the storage engine used for Riak's key-value data +## and secondary indexes (if supported). +## +## Default: bitcask +## +## Acceptable values: +## - one of: bitcask, leveldb, leveled, memory, multi, prefix_multi +storage_backend = bitcask + +## Simplify prefix_multi configuration for Riak CS. Keep this +## commented out unless Riak is configured for Riak CS. +## +## Acceptable values: +## - an integer +## cs_version = 20000 + +## Controls which binary representation of a riak value is stored +## on disk. +## * 0: Original erlang:term_to_binary format. Higher space overhead. +## * 1: New format for more compact storage of small values. +## If using the leveled backend object_format 1 will always be used, when +## persisting data into the backend - even if 0 has been configured here +## +## Default: 1 +## +## Acceptable values: +## - the integer 1 +## - the integer 0 +object.format = 1 + +## Reading or writing objects bigger than this size will write a +## warning in the logs. +## +## Default: 5MB +## +## Acceptable values: +## - a byte size with units, e.g. 10GB +object.size.warning_threshold = 5MB + +## Writing an object bigger than this will send a failure to the +## client. +## +## Default: 50MB +## +## Acceptable values: +## - a byte size with units, e.g. 10GB +object.size.maximum = 50MB + +## Writing an object with more than this number of siblings will +## generate a warning in the logs. +## +## Default: 25 +## +## Acceptable values: +## - an integer +object.siblings.warning_threshold = 25 + +## Writing an object with more than this number of siblings will +## send a failure to the client. +## +## Default: 100 +## +## Acceptable values: +## - an integer +object.siblings.maximum = 100 + +## Whether to allow list buckets. +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## cluster.job.riak_kv.list_buckets = enabled + +## Whether to allow streaming list buckets. +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## cluster.job.riak_kv.stream_list_buckets = enabled + +## Whether to allow list keys. +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## cluster.job.riak_kv.list_keys = enabled + +## Whether to allow streaming list keys. +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## cluster.job.riak_kv.stream_list_keys = enabled + +## Whether to allow secondary index queries. +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## cluster.job.riak_kv.secondary_index = enabled + +## Whether to allow streaming secondary index queries. +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## cluster.job.riak_kv.stream_secondary_index = enabled + +## Whether to allow term-based map-reduce. +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## cluster.job.riak_kv.map_reduce = enabled + +## Whether to allow JavaScript map-reduce. +## +## Default: enabled +## +## Acceptable values: +## - enabled or disabled +## cluster.job.riak_kv.map_reduce_js = enabled + +## For Tictac full-sync does all data need to be sync'd, or should a +## specific bucket be sync'd (bucket), or a specific bucket type (type). +## Note that in most cases sync of all data is lower overhead than sync of +## a subset of data - as cached AAE trees will be used. +## TODO: type is not yet implemented. +## +## Default: disabled +## +## Acceptable values: +## - one of: all, bucket, type, disabled +ttaaefs_scope = disabled + +## For tictac full-sync what registered queue name on this cluster should +## be use for passing references to data which needs to be replicated for AAE +## full-sync. This queue name must be defined as a +## `riak_kv.replq_queuename`, but need not be exlusive to full-sync (i.e. a +## real-time replication queue may be used as well) +## +## Default: q1_ttaaefs +## +## Acceptable values: +## - text +ttaaefs_queuename = q1_ttaaefs + +## For tictac full-sync what is the maximum number of AAE segments to be +## compared per exchange. Reducing this will speed up clock compare queries, +## but will increase the number of exchanges required to complete a repair. +## If using range_check to speed-up repairs, this can be reduced as the +## range_check maxresults will be boosted by the ttaaefs_rangeboost When using +## range_check a value of 64 is recommended, which may be reduced to 32 or 16 +## if the cluster has a very large volume of keys and/or limited capacity. +## Only reduce below 16 in exceptional circumstances. +## More capacity to process sync queries can be added by increaseing the af2 +## and af3 queue sizes - but this will be at the risk of there being a bigger +## impact on KV performance when repairs are required. +## +## Default: 64 +## +## Acceptable values: +## - an integer +ttaaefs_maxresults = 64 + +## For tictac full-sync what is the maximum number of AAE segments to be +## compared per exchange. When running a range_check query this will be the +## ttaaefs_max results * ttaaefs_rangeboost. +## When using range_check, a small maxresults can be used, in effect using +## other *_check syncs as discovery queries (to find the range_check for the +## range_check to do the heavy lifting) +## +## Default: 8 +## +## Acceptable values: +## - an integer +ttaaefs_rangeboost = 8 + +## For Tictac bucket full-sync which bucket should be sync'd by this +## node. Only ascii string bucket definitions supported (which will be +## converted using list_to_binary). +## +## Acceptable values: +## - text +## ttaaefs_bucketfilter_name = sample_bucketname + +## For Tictac bucket full-sync what is the bucket type of the bucket name. +## Only ascii string type bucket definitions supported (these +## definitions will be converted to binary using list_to_binary) +## +## Acceptable values: +## - text +## ttaaefs_bucketfilter_type = default + +## For Tictac all full-sync which NVAL should be sync'd by this node. +## This is the `local` nval, as the data in the remote cluster may have an +## alternative nval. +## +## Default: 3 +## +## Acceptable values: +## - an integer +ttaaefs_localnval = 3 + +## For Tictac all full-sync which NVAL should be sync'd in the remote +## cluster. +## +## Default: 3 +## +## Acceptable values: +## - an integer +ttaaefs_remotenval = 3 + +## The network address of the peer node in the cluster with which this +## node will connect to for full_sync purposes. If this peer node is +## unavailable, then this local node will not perform any full-sync actions, +## so alternative peer addresses should eb configured in other nodes. The +## peer address may be a load-balanced IP to avoid this issue. +## +## Acceptable values: +## - text +## ttaaefs_peerip = 127.0.0.1 + +## The port to be used when connecting to the remote peer cluster +## +## Acceptable values: +## - an integer +## ttaaefs_peerport = 8098 + +## The protocol to be used when conecting to the peer in the remote +## cluster. Could be http or pb (but only http currently being tested) +## TODO: Support for SSL? Support for pb. +## +## Default: http +## +## Acceptable values: +## - one of: http, pb +ttaaefs_peerprotocol = http + +## How many times per 24hour period should all the data be checked to +## confirm it is fully sync'd. When running a full (i.e. nval) sync this will +## check all the data under that nval between the clusters, and when the trees +## are out of alignment, will check across all data where the nval matches the +## specified nval. +## On large clusters (in terms of key count), this may take a long time - so +## allcheck should be scheduled infrequently, as other checks may be delayed by +## consumption of queue resource by the allcheck. +## The af3_queue size, and the ttaaefs_maxresults, both need to be tuned to +## ensure that the allcheck can run wihtin the 30 minute timeout. +## For per-bucket replication all is a reference to all of the data for that +## bucket, and warnings about sizing are specially relevant. +## +## Default: 24 +## +## Acceptable values: +## - an integer +ttaaefs_allcheck = 24 + +## How many times per 24hour period should no data be checked to +## confirm it is fully sync'd. Use nochecks to align the number of checks +## done by each node - if each node has the same number of slots, they will +## naurally space their checks within the period of the slot. +## +## Default: 0 +## +## Acceptable values: +## - an integer +ttaaefs_nocheck = 0 + +## How many times per 24hour period should the last hours data be checked +## to confirm it is fully sync'd. +## For per-bucket replication, the tree comparison prompted by this will be +## constrained by the time period, as well as the keys and clocks checked for +## repair. For full, nval, replication - the tree comparison is across all +## time, but the keys and clocks checked for repair are constrained by the time +## period. +## Once deltas are outside of the last hour, an hourcheck can do +## nothing to resolve the data, but will still consume resource. +## +## Default: 0 +## +## Acceptable values: +## - an integer +ttaaefs_hourcheck = 0 + +## How many times per 24hour period should the last 24-hours of data be +## checked to confirm it is fully sync'd. +## For per-bucket replication, the tree comparison prompted by this will be +## constrained by the time period, as well as the keys and clocks checked for +## repair. For full, nval, replication - the tree comparison is across all +## time, but the keys and clocks checked for repair are constrained by the time +## period. +## Once deltas are outside of the last hour, a daycheck can do +## nothing to resolve the data, but will still consume resource. +## +## Default: 0 +## +## Acceptable values: +## - an integer +ttaaefs_daycheck = 0 + +## How many times per 24hour period should the a range_check be run. The +## range_check is intended to be a smart check, in that it will: +## - use a last_modified range starting from the last successful check as its +## range if the last check was successful (i.e. showed the clusters to be +## in sync); +## - use a range identified by the last check (a last modified range, and +## perhaps also a specific Bucket) if a range to limit the issues has been +## identified by a previous failure +## - Not run at all if the clusters are out of sync and no range has been +## discovered (this may be the case when running on a sink which is behind a +## source cluster). +## For full, nval, sync operations the range is only relevant to the search +## for objects to repair - the tree comparison is always between all data for +## that nval. +## +## Default: 0 +## +## Acceptable values: +## - an integer +ttaaefs_rangecheck = 0 + +## If Tictac AAE full-sync discovers keys to be repaired, should each key +## that is repaired be logged +## +## Default: disabled +## +## Acceptable values: +## - enabled or disabled +## ttaaefs_logrepairs = enabled + +## If Tictac AAE sees difference in trees (for nval-based full +## comparisons) only, should it attempt to repair those trees as well as +## repairing any deltas. Enabling this setting will change the concurrency +## of fetch_clock_nval queries run to find repairs. +## +## Default: disabled +## +## Acceptable values: +## - enabled or disabled +## aae_fetchclocks_repair = enabled + +## Enable this node to act as a real-time replication source +## +## Default: disabled +## +## Acceptable values: +## - enabled or disabled +## replrtq_enablesrc = enabled + +## Limit the size of replication queues (for a queue and priority, i.e. +## each priority on each queue will have this as the limit) +## +## Default: 300000 +## +## Acceptable values: +## - an integer +replrtq_srcqueuelimit = 300000 + +## Limit the number of objects to be cached on the replication queue, +## with objects queued when the priority queue is beyond this limit stored as +## clocks only to be fetched on replication +## +## Default: 1000 +## +## Acceptable values: +## - an integer +replrtq_srcobjectlimit = 1000 + +## Limit the size of an object which may be pushed to the replication +## queue. Objects larger than this will still be replicated, but by being +## re-fetched. The product of replrtq_objectsize and replrtq_srcobjectlimit +## gives a theoretical maximum for the total memory consumed by the +## riak_kv_rpelrtq (in terms of objects). Default of this product is 200MB. +## +## Default: 200KB +## +## Acceptable values: +## - a byte size with units, e.g. 10GB +replrtq_srcobjectsize = 200KB + +## Queue definitions +## Queues should be defined using a pipe '|' delimited string, of two +## colon ':' delimited elements. The first part of each queue definition is +## the ascii name of the queue, the second part indicated the filter to be +## applied which should be either: +## - any (all real-time modifications to be replicated via this queue) +## - block_rtq (no real-time modifications to be replicated) +## - bucketname. +## - bucketprefix. +## - buckettype. +## The latter three options allow for specific buckets to be supported by the +## queue, or only buckets with certain prefixes, or for just buckets of a given +## type. +## If a list of buckets or types need to be supported, then either multiple +## queues need to be defined, or non-persistent extended definitions can be +## made at runtime used the riak_kv_replrtq_src API. +## Example configurtaion might be: +## cluster_a:any|cluster_b:block_rtq|cluster_c:bucketprefix.user +## +## Default: q1_ttaaefs:block_rtq +## +## Acceptable values: +## - text +replrtq_srcqueue = q1_ttaaefs:block_rtq + +## Enable this node zlib compress objects over the wire +## +## Default: disabled +## +## Acceptable values: +## - enabled or disabled +## replrtq_compressonwire = enabled + +## Enable this node to act as a sink and consume from a src cluster +## +## Default: disabled +## +## Acceptable values: +## - enabled or disabled +## replrtq_enablesink = enabled + +## Queue name to be used for peers (replrtq_sinkpeers) that are +## defined without a queue name. Each node is expected to have a single +## queue from which it will consume (by name). This queue may be consumed +## from multiple peers - and those peers may sit on multiple clusters. +## If more than one queue name is to be consumed from, real-time changes can +## be made through `riak_kv_replrtq_snk:add_snkqueue/3`. The peer list can +## also be extended to add different queue names into definitions - however +## it is strongly recommended to use a single sinkqueue name per node. +## +## Default: q1_ttaaefs +## +## Acceptable values: +## - text +replrtq_sinkqueue = q1_ttaaefs + +## A list of peers is required to inform the sink node how to reach the +## src. All src nodes will need to have entries consumed - so it is +## recommended that each src node is referred to in multiple sink node +## configurations. +## The list of peers is tokenised as host:port:protocol +## In exceptional circumstances this definition can be extended to +## queuename:host:port:protocol - but restricting the definitions of queuename +## to the single queue specified in replrtq_sinkqueue is strongly recommended. +## +## Acceptable values: +## - text +## replrtq_sinkpeers = 127.0.0.1:8098:http + +## The number of workers to be used for each queue must be configured. +## +## Default: 24 +## +## Acceptable values: +## - an integer +replrtq_sinkworkers = 24 + +## The maximum number of workers to be for any given peer may be +## configured - if not configured will default to the number of sinkworkers +## +## Acceptable values: +## - an integer +## replrtq_sinkpeerlimit = 24 + +## Enable the `recalc` compaction strategy within the leveled backend in +## riak. The default (when disabled) is `retain`, but this will leave +## uncollected garbage within the, journal. +## It is now recommended from Riak KV 2.9.2 to consider the `recalc` strategy. +## This strategy has a side effect of slower startups, and slower recovery +## from a wiped ledger - but it will not keep an overhead of garbage within +## the Journal. +## It should be possible to move from `retain` to `recalc` via configuration +## change. However, it is not possible to switch from `recalc` back to +## `retain`. This switch can only be made for new nodes receiving data +## through riak transfers (not inheriting data on disk). +## The default `retain` strategy retains a history of key changes in the +## journal, whereas the `recalc` strategy discards that history, but will redo +## a diff_index_specs calculation when reloading each object. +## +## Default: disabled +## +## Acceptable values: +## - enabled or disabled +## leveled_reload_recalc = enabled + +## Enable logging of query timings in the index_fsm +## +## Default: disabled +## +## Acceptable values: +## - enabled or disabled +## log_index_fsm = enabled + +## Set the vnode worker pool size +## This is a pool of workers per-vnode, to be used for general queries, in +## particular secondary index queries. This now defaults to 5 workers, prior +## to release 3.0.9 it was set to a default of 10. +## The number of concurrent index queries that can be supported in the cluster +## will be equal to n_val * worker_count. +## The statistic worker_vnode_pool_worktime_mean tracks the average time +## each worker is taking per query in microseconds, so the overall queries +## per second supported will be: +## (1000000 div worker_vnode_pool_worktime) * n_val * worker_count +## It should normally be possible to support >> 100 queries per second with +## just a single worker per vnode. +## The statistic worker_vnode_pool_queuetime_mean will track the average time +## a query is spending on a queue, should the vnode pool be exhausted. +## If using tictac_aae this should be set to at least 2, as tree rebuilds use +## this pool as well as queries. Also consider that long-running legacy +## queries (list keys and list buckets, not using aae_fold) also use +## this pool. All aae_fold type queries will use the alternative +## node_worker_pool, unless none is used for the worker_pool_strategy, in which +## case the vnode pool is also used for aae_folds. +## +## Default: 5 +## +## Acceptable values: +## - an integer +worker_pool_size = 5 + +## A path under which bitcask data files will be stored. +## +## Default: $(platform_data_dir)/bitcask +## +## Acceptable values: +## - the path to a directory +bitcask.data_root = $(platform_data_dir)/bitcask + +## Configure how Bitcask writes data to disk. +## erlang: Erlang's built-in file API +## nif: Direct calls to the POSIX C API +## The NIF mode provides higher throughput for certain +## workloads, but has the potential to negatively impact +## the Erlang VM, leading to higher worst-case latencies +## and possible throughput collapse. +## +## Default: erlang +## +## Acceptable values: +## - one of: erlang, nif +bitcask.io_mode = erlang + +## This parameter defines the percentage of total server memory +## to assign to LevelDB. LevelDB will dynamically adjust its internal +## cache sizes to stay within this size. The memory size can +## alternately be assigned as a byte count via leveldb.maximum_memory +## instead. +## +## Default: 70 +## +## Acceptable values: +## - an integer +leveldb.maximum_memory.percent = 70 + +## Enables or disables the compression of data on disk. +## Enabling (default) saves disk space. Disabling may reduce read +## latency but increase overall disk activity. Option can be +## changed at any time, but will not impact data on disk until +## next time a file requires compaction. +## +## Default: on +## +## Acceptable values: +## - on or off +leveldb.compression = on + +## Selection of compression algorithms. snappy is +## original compression supplied for leveldb. lz4 is new +## algorithm that compresses to similar volume but averages twice +## as fast on writes and four times as fast on reads. +## +## Acceptable values: +## - one of: snappy, lz4 +leveldb.compression.algorithm = lz4 + +## +## Default: on +## +## Acceptable values: +## - on or off +## multi_backend.name.leveldb.compression = on + +## +## Acceptable values: +## - one of: snappy, lz4 +## multi_backend.name.leveldb.compression.algorithm = lz4 + +## A path under which leveled data files will be stored. +## +## Default: $(platform_data_dir)/leveled +## +## Acceptable values: +## - the path to a directory +leveled.data_root = $(platform_data_dir)/leveled + +## Strategy for flushing data to disk +## Can be set to riak_sync, sync (if OTP > 16) or none. Use none, and the OS +## will flush when most efficient. Use riak_sync or sync to flush after every +## PUT (not recommended without some hardware support e.g. flash drives and/or +## Flash-backed Write Caches) +## +## Default: none +## +## Acceptable values: +## - text +leveled.sync_strategy = none + +## Compression method +## Can be lz4 or native (which will use the Erlang native zlib compression) +## within term_to_binary +## +## Default: native +## +## Acceptable values: +## - text +leveled.compression_method = native + +## Compression point +## The point at which compression is applied to the Journal (the Ledger is +## always compressed). Use on_receipt or on_compact. on_compact is suitable +## when values are unlikely to yield much benefit from compression +## (compression is only attempted when compacting) +## +## Default: on_receipt +## +## Acceptable values: +## - text +leveled.compression_point = on_receipt + +## Log level +## Can be debug, info, warn, error or critical +## Set the minimum log level to be used within leveled. Leveled will log many +## lines to allow for stats to be etracted by those using log indexers such as +## Splunk +## +## Default: info +## +## Acceptable values: +## - text +leveled.log_level = info + +## The approximate size (in bytes) when a Journal file should be rolled. +## Normally keep this as around the size of o(100K) objects. Default is 1GB. +## Note that on startup an actual maximum size will be chosen which varies by +## a random factor from this point - to avoid coordination of roll events +## across vnodes. +## +## Default: 1000000000 +## +## Acceptable values: +## - an integer +leveled.journal_size = 1000000000 + +## The approximate count of objects when a Journal file should be rolled. +## This time measured in object count, a file will be rolled if either the +## object count or the journal size limit is reached. Default 200K. +## Note that on startup an actual maximum size will be chosen which varies by +## a random factor from this point - to avoid coordination of roll events +## across vnodes. +## +## Default: 200000 +## +## Acceptable values: +## - an integer +leveled.journal_objectcount = 200000 + +## The level of the ledger to be pre-loaded into the page cache +## Depending on how much memory is available for the page cache, and how much +## disk I/O activity can be tolerated at startup - then the level at which the +## ledger is forced into the page cache can be controlled by configuration. +## +## Default: 4 +## +## Acceptable values: +## - an integer +leveled.ledger_pagecachelevel = 4 + +## The number of journal compactions per vnode per day +## The higher the value, the more compaction runs, and the sooner space is +## recovered. But each run has a cost +## +## Default: 24 +## +## Acceptable values: +## - an integer +leveled.compaction_runs_perday = 24 + +## The number of times per day to score an individual file for compaction. +## The default value will lead to each file, on average, being scored once +## every 12 hours +## +## Default: 2 +## +## Acceptable values: +## - an integer +leveled.compaction_scores_perday = 2 + +## Compaction Low Hour +## The hour of the day in which journal compaction can start. Use Low hour +## of 0 and High hour of 23 to have no compaction window (i.e. always compact +## regardless of time of day) +## +## Default: 0 +## +## Acceptable values: +## - an integer +leveled.compaction_low_hour = 0 + +## Compaction Top Hour +## The hour of the day, after which journal compaction should stop. +## If low hour > top hour then, compaction will work overnight between low +## hour and top hour (inclusive). Timings rely on server's view of local time +## +## Default: 23 +## +## Acceptable values: +## - an integer +leveled.compaction_top_hour = 23 + +## Max Journal Files Per Compaction Run +## In a single compaction run, what is the maximum number of consecutive files +## which may be compacted. If increasing this value, the average number of +## files per run may decrease, unless adjustments are also made to the +## maxrunlength and singlefile compactionpercentage settings. +## +## Default: 4 +## +## Acceptable values: +## - an integer +leveled.max_run_length = 4 + +## The approximate size (in bytes) when a Journal file should be rolled. +## Normally keep this as around the size of o(100K) objects. Default is 1GB. +## Note that on startup an actual maximum size will be chosen which varies by +## a random factor from this point - to avoid coordination of roll events +## across vnodes. +## +## Default: 1000000000 +## +## Acceptable values: +## - an integer +multi_backend.name.leveled.journal_size = 1000000000 + +## The approximate count of objects when a Journal file should be rolled. +## This time measured in object count, a file will be rolled if either the +## object count or the journal size limit is reached. Default 200K. +## Note that on startup an actual maximum size will be chosen which varies by +## a random factor from this point - to avoid coordination of roll events +## across vnodes. +## +## Default: 200000 +## +## Acceptable values: +## - an integer +multi_backend.name.leveled.journal_objectcount = 200000 + +## The level of the ledger to be pre-loaded into the page cache +## Depending on how much memory is available for the page cache, and how much +## disk I/O activity can be tolerated at startup - then the level at which the +## ledger is forced into the page cache can be controlled by configuration. +## +## Default: 4 +## +## Acceptable values: +## - an integer +multi_backend.name.leveled.ledger_pagecachelevel = 4 + +## The number of times per day to score an individual file for compaction +## The default value will lead to each file, on average, being scored once +## every 12 hours +## +## Default: 2 +## +## Acceptable values: +## - an integer +multi_backend.name.leveled.compaction_scores_perday = 2 + +## Path (relative or absolute) to the working directory for the +## replication process +## +## Default: /var/lib/riak/riak_repl/ +## +## Acceptable values: +## - text +mdc.data_root = /var/lib/riak/riak_repl/ + +## The cluster manager will listen for connections from remote +## clusters on this ip and port. Every node runs one cluster manager, +## but only the cluster manager running on the cluster_leader will +## service requests. This can change as nodes enter and leave the +## cluster. The value is a combination of an IP address (**not +## hostname**) followed by a port number +## +## Default: 127.0.0.1:9080 +## +## Acceptable values: +## - an IP/port pair, e.g. 127.0.0.1:10011 +mdc.cluster_manager = 127.0.0.1:9080 + +## The hard limit of fullsync workers that will be running on the +## source side of a cluster across all nodes on that cluster for a +## fullsync to a sink cluster. This means if one has configured +## fullsync for two different clusters, both with a +## max_fssource_cluster of 5, 10 fullsync workers can be in +## progress. Only affects nodes on the source cluster on which this +## parameter is defined via the configuration file or command line +## +## Default: 5 +## +## Acceptable values: +## - an integer +mdc.max_fssource_cluster = 5 + +## Limits the number of fullsync workers that will be running on +## each individual node in a source cluster. This is a hard limit for +## all fullsyncs enabled; additional fullsync configurations will not +## increase the number of fullsync workers allowed to run on any node. +## Only affects nodes on the source cluster on which this parameter is +## defined via the configuration file or command line +## +## Default: 1 +## +## Acceptable values: +## - an integer +mdc.max_fssource_node = 1 + +## Limits the number of "soft_exist" that the fullsynce +## coordinator will handle before failing a partition from +## fullsync. The soft_retries is per-fullsync, not per-partition. +## Only affects nodes on the source cluster on which this parameter is +## defined via the configuration file +## +## Default: 100 +## +## Acceptable values: +## - an integer +mdc.max_fssource_soft_retries = 100 + +## Adds a retry wait time. To be used in conjunction with +## soft_retries. When a partition fails to fullsync with a soft_exit, +## it is added to a queue to be retried. The retry wait time is the +## minimum amount of time to elapse before a fullsync is re-attempted +## on that partition. An example of usage: If the remote partition's +## AAE tree is being re-built it can take many minutes, even +## hours. There is no point in rapidly re-trying the same partition +## `max_fssource_soft_retries' times in rapid +## succession. fssource_retry_wait * max_fssource_soft_retries is the +## maximum amount of time that can pass before fullsync discards a +## partition. +## +## Default: 60s +## +## Acceptable values: +## - a time duration with units, e.g. '10s' for 10 seconds +mdc.fssource_retry_wait = 60s + +## Limits the number of fullsync workers allowed to run on each +## individual node in a sink cluster. This is a hard limit for all +## fullsync sources interacting with the sink cluster. Thus, multiple +## simultaneous source connections to the sink cluster will have to +## share the sink node's number of maximum connections. Only affects +## nodes on the sink cluster on which this parameter is defined via +## the configuration file or command line. +## +## Default: 1 +## +## Acceptable values: +## - an integer +mdc.max_fssink_node = 1 + +## Whether to initiate a fullsync on initial connection from the +## secondary cluster +## +## Default: true +## +## Acceptable values: +## - one of: true, false +mdc.fullsync_on_connect = true + +## a single integer value representing the duration to wait in +## minutes between fullsyncs, or a list of {clustername, +## time_in_minutes} pairs for each sink participating in fullsync +## replication. +## +## Acceptable values: +## - a time duration with units, e.g. '10s' for 10 seconds +## mdc.fullsync_interval.all = 30m + +## The maximum size the realtime replication queue can grow to +## before new objects are dropped. Defaults to 100MB. Dropped objects +## will need to be replication with a fullsync. +## +## Default: 100MB +## +## Acceptable values: +## - a byte size with units, e.g. 10GB +mdc.rtq_max_bytes = 100MB + +## Enable Riak CS proxy_get and block filter. +## +## Default: off +## +## Acceptable values: +## - one of: on, off +mdc.proxy_get = off + +## A heartbeat message is sent from the source to the sink every +## heartbeat_interval. Setting heartbeat_interval to undefined +## disables the realtime heartbeat. This feature is only available in +## Riak Enterprise 1.3.2+. +## +## Default: 15s +## +## Acceptable values: +## - a time duration with units, e.g. '10s' for 10 seconds +mdc.realtime.heartbeat_interval = 15s + +## If a heartbeat response is not received in +## rt_heartbeat_timeout seconds, then the source connection exits and +## will be re-established. This feature is only available in Riak +## Enterprise 1.3.2+. +## +## Default: 15s +## +## Acceptable values: +## - a time duration with units, e.g. '10s' for 10 seconds +mdc.realtime.heartbeat_timeout = 15s + +## How frequently the stats for fullsync source processes should be +## gathered. Requests for fullsync status always returned the most recently +## gathered data, and thus can be at most as old as this value. +## +## Acceptable values: +## - a time duration with units, e.g. '10s' for 10 seconds +## mdc.fullsync.stat_refresh_interval = 1m + diff --git a/receiver/riakreceiver/testdata/scraper/expected.json b/receiver/riakreceiver/testdata/scraper/expected.json new file mode 100644 index 000000000000..865797352794 --- /dev/null +++ b/receiver/riakreceiver/testdata/scraper/expected.json @@ -0,0 +1,214 @@ +{ + "resourceMetrics": [ + { + "instrumentationLibraryMetrics": [ + { + "instrumentationLibrary": { + "name": "otelcol/riakreceiver" + }, + "metrics": [ + { + "description": "The amount of memory allocated to the node.", + "name": "riak.memory.limit", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "3127377920", + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + } + ] + }, + "unit": "By" + }, + { + "description": "The number of operations performed by the node.", + "name": "riak.node.operation.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "2", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "get" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + }, + { + "asInt": "3", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "put" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + } + ], + "isMonotonic": true + }, + "unit": "{operation}" + }, + { + "description": "The mean time between request and response for operations performed by the node over the last minute.", + "gauge": { + "dataPoints": [ + { + "asInt": "4", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "get" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + }, + { + "asInt": "5", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "put" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + } + ] + }, + "name": "riak.node.operation.time.mean", + "unit": "us" + }, + { + "description": "The number of read repairs performed by the node.", + "name": "riak.node.read_repair.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "0", + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + } + ], + "isMonotonic": true + }, + "unit": "{read_repair}" + }, + { + "description": "The number of index operations performed by vnodes on the node.", + "name": "riak.vnode.index.operation.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "10", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "read" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + }, + { + "asInt": "11", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "write" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + }, + { + "asInt": "9", + "attributes": [ + { + "key": "operation", + "value": { + "stringValue": "delete" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + } + ] + }, + "unit": "{operation}" + }, + { + "description": "The number of operations performed by vnodes on the node.", + "name": "riak.vnode.operation.count", + "sum": { + "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", + "dataPoints": [ + { + "asInt": "7", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "get" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + }, + { + "asInt": "8", + "attributes": [ + { + "key": "request", + "value": { + "stringValue": "put" + } + } + ], + "startTimeUnixNano": "1648220661611816000", + "timeUnixNano": "1648220661612587000" + } + ], + "isMonotonic": true + }, + "unit": "{operation}" + } + ] + } + ], + "resource": { + "attributes": [ + { + "key": "riak.node.name", + "value": { + "stringValue": "riak@127.0.0.1" + } + } + ] + } + } + ] +} diff --git a/versions.yaml b/versions.yaml index bb7cf5c47a51..162b854fd986 100644 --- a/versions.yaml +++ b/versions.yaml @@ -170,6 +170,7 @@ module-sets: - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/rabbitmqreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sapmreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/signalfxreceiver - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/simpleprometheusreceiver From ca63585e509811a5341b753fb4c8188f9bcb610e Mon Sep 17 00:00:00 2001 From: Alex Boten Date: Thu, 7 Apr 2022 11:23:56 -0700 Subject: [PATCH 17/59] [extension/fluentbit][receiver/promexec] add deprecation notice (#9103) Adding deprecation notice to both promexec receiver and fluentbit extension. --- extension/fluentbitextension/go.mod | 1 + receiver/prometheusexecreceiver/go.mod | 1 + 2 files changed, 2 insertions(+) diff --git a/extension/fluentbitextension/go.mod b/extension/fluentbitextension/go.mod index f63d0df80871..cdb86fe25bd0 100644 --- a/extension/fluentbitextension/go.mod +++ b/extension/fluentbitextension/go.mod @@ -1,3 +1,4 @@ +// Deprecated: fluentbit extension is deprecated and will be removed in future versions. module github.com/open-telemetry/opentelemetry-collector-contrib/extension/fluentbitextension go 1.17 diff --git a/receiver/prometheusexecreceiver/go.mod b/receiver/prometheusexecreceiver/go.mod index 479258d3d931..3d771f60edab 100644 --- a/receiver/prometheusexecreceiver/go.mod +++ b/receiver/prometheusexecreceiver/go.mod @@ -1,3 +1,4 @@ +// Deprecated: prometheus_exec receiver is deprecated and will be removed in future versions. module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusexecreceiver go 1.17 From a9b5da4ac7aeb7b6c1971495e0cebb0ffde2a20b Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Fri, 8 Apr 2022 18:48:14 +0100 Subject: [PATCH 18/59] [ci] Check contrib modules are using the correct core collector version (#9112) * Check contrib modules are using the correct core collector version Signed-off-by: Martin Hickey * Updates after review Review: - https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/9112#pullrequestreview-935066186 Signed-off-by: Martin Hickey * Update after review Review: - https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/9112#pullrequestreview-936398313 Signed-off-by: Martin Hickey --- .github/workflows/build-and-test.yml | 8 ++ .../scripts/check-collector-module-version.sh | 82 +++++++++++++++++++ 2 files changed, 90 insertions(+) create mode 100755 .github/workflows/scripts/check-collector-module-version.sh diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 8c25d1e0c03b..8781486d00ca 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -41,6 +41,14 @@ jobs: - name: Install Tools if: steps.tool-cache.outputs.cache-hit != 'true' run: make install-tools + check-collector-module-version: + runs-on: ubuntu-latest + needs: [setup-environment] + steps: + - name: Checkout Repo + uses: actions/checkout@v2 + - name: Check Collector Module Version + run: ./.github/workflows/scripts/check-collector-module-version.sh lint: runs-on: ubuntu-latest needs: [setup-environment] diff --git a/.github/workflows/scripts/check-collector-module-version.sh b/.github/workflows/scripts/check-collector-module-version.sh new file mode 100755 index 000000000000..911839074c36 --- /dev/null +++ b/.github/workflows/scripts/check-collector-module-version.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# verifies if the collector components are using the main core collector version +# as a dependency. +# +set -eu -o pipefail + +# Return the collector main core version +get_collector_version() { + collector_module="$1" + main_mod_file="$2" + + if grep -q "$collector_module" "$main_mod_file"; then + grep "$collector_module" "$main_mod_file" | (read mod version; + echo $version) + else + echo "Error: failed to retrieve the \"$collector_module\" version from \"$main_mod_file\"." + exit 1 + fi +} + +# Compare the collector main core version against all the collector component +# modules to verify that they are using this version as its dependency +check_collector_versions_correct() { + collector_module="$1" + collector_mod_version="$2" + incorrect_version=0 + mod_files=$(find . -type f -name "go.mod") + + # Loop through all the module files, checking the collector version + for mod_file in $mod_files; do + if grep -q "$collector_module" "$mod_file"; then + mod_line=$(grep "$collector_module" "$mod_file") + version=$(echo "$mod_line" | cut -d" " -f2) + + # To account for a module on its own 'require' line, + # the version field is shifted right by 1 + if [ "$version" == "$collector_module" ]; then + version=$(echo "$mod_line" | cut -d" " -f3) + fi + + if [ "$version" != "$collector_mod_version" ]; then + incorrect_version=$((incorrect_version+1)) + echo "Incorrect version \"$version\" of \"$collector_module\" is included in \"$mod_file\". It should be version \"$collector_mod_version\"." + fi + fi + done + + echo "There are $incorrect_version incorrect \"$collector_module\" version(s) in the module files." + if [ "$incorrect_version" -gt 0 ]; then + exit 1 + fi +} + +# Note space at end of string. This is so it filters for the exact string +# only and does not return string which contains this string as a substring. +COLLECTOR_MODULE="go.opentelemetry.io/collector " + +COLLECTOR_MODEL_MODULE="go.opentelemetry.io/collector/model" +MAIN_MOD_FILE="./go.mod" +COLLECTOR_MOD_VERSION=$(get_collector_version "$COLLECTOR_MODULE" "$MAIN_MOD_FILE") + +# Check the collector module version in each of the module files +check_collector_versions_correct "$COLLECTOR_MODULE" "$COLLECTOR_MOD_VERSION" + +# Check the collector model module version in each of the module files +check_collector_versions_correct "$COLLECTOR_MODEL_MODULE" "$COLLECTOR_MOD_VERSION" From a9ba630997fc247703d7f154be834531e470d7f2 Mon Sep 17 00:00:00 2001 From: Daniel Dyla Date: Fri, 8 Apr 2022 14:20:59 -0400 Subject: [PATCH 19/59] [dynatraceexporter]: Move internal packages to internal folder (#9097) * [dynatraceexporter]: Move internal packages to internal folder * Revert config package internal * Update changelog --- CHANGELOG.md | 1 + .../dynatraceexporter/{ => internal}/serialization/gauge.go | 2 +- .../{ => internal}/serialization/gauge_test.go | 0 .../dynatraceexporter/{ => internal}/serialization/histogram.go | 2 +- .../{ => internal}/serialization/histogram_test.go | 0 .../{ => internal}/serialization/serialization.go | 2 +- exporter/dynatraceexporter/{ => internal}/serialization/sum.go | 2 +- .../dynatraceexporter/{ => internal}/serialization/sum_test.go | 0 exporter/dynatraceexporter/metrics_exporter.go | 2 +- 9 files changed, 6 insertions(+), 5 deletions(-) rename exporter/dynatraceexporter/{ => internal}/serialization/gauge.go (95%) rename exporter/dynatraceexporter/{ => internal}/serialization/gauge_test.go (100%) rename exporter/dynatraceexporter/{ => internal}/serialization/histogram.go (97%) rename exporter/dynatraceexporter/{ => internal}/serialization/histogram_test.go (100%) rename exporter/dynatraceexporter/{ => internal}/serialization/serialization.go (97%) rename exporter/dynatraceexporter/{ => internal}/serialization/sum.go (98%) rename exporter/dynatraceexporter/{ => internal}/serialization/sum_test.go (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f1e29b1ea7c..f98e86d234ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ - A detailed [Upgrade Guide](https://github.com/open-telemetry/opentelemetry-log-collection/releases/tag/v0.28.0) is available in the log-collection v0.28.0 release notes. - `datadogexporter`: Remove `OnlyMetadata` method from `Config` struct (#8980) - `datadogexporter`: Remove `GetCensoredKey` method from `APIConfig` struct (#8980) +- `dynatraceexporter`: Make `serialization` package `/internal` (#9097) ### 🧰 Bug fixes 🧰 diff --git a/exporter/dynatraceexporter/serialization/gauge.go b/exporter/dynatraceexporter/internal/serialization/gauge.go similarity index 95% rename from exporter/dynatraceexporter/serialization/gauge.go rename to exporter/dynatraceexporter/internal/serialization/gauge.go index 15cc7c3a2cbf..4f6c79040f0b 100644 --- a/exporter/dynatraceexporter/serialization/gauge.go +++ b/exporter/dynatraceexporter/internal/serialization/gauge.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package serialization // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/serialization" +package serialization // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization" import ( "fmt" diff --git a/exporter/dynatraceexporter/serialization/gauge_test.go b/exporter/dynatraceexporter/internal/serialization/gauge_test.go similarity index 100% rename from exporter/dynatraceexporter/serialization/gauge_test.go rename to exporter/dynatraceexporter/internal/serialization/gauge_test.go diff --git a/exporter/dynatraceexporter/serialization/histogram.go b/exporter/dynatraceexporter/internal/serialization/histogram.go similarity index 97% rename from exporter/dynatraceexporter/serialization/histogram.go rename to exporter/dynatraceexporter/internal/serialization/histogram.go index 62bd429d9352..9164739d0e14 100644 --- a/exporter/dynatraceexporter/serialization/histogram.go +++ b/exporter/dynatraceexporter/internal/serialization/histogram.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package serialization // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/serialization" +package serialization // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization" import ( "errors" diff --git a/exporter/dynatraceexporter/serialization/histogram_test.go b/exporter/dynatraceexporter/internal/serialization/histogram_test.go similarity index 100% rename from exporter/dynatraceexporter/serialization/histogram_test.go rename to exporter/dynatraceexporter/internal/serialization/histogram_test.go diff --git a/exporter/dynatraceexporter/serialization/serialization.go b/exporter/dynatraceexporter/internal/serialization/serialization.go similarity index 97% rename from exporter/dynatraceexporter/serialization/serialization.go rename to exporter/dynatraceexporter/internal/serialization/serialization.go index 68b1ca704c08..7760c90b32ec 100644 --- a/exporter/dynatraceexporter/serialization/serialization.go +++ b/exporter/dynatraceexporter/internal/serialization/serialization.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package serialization // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/serialization" +package serialization // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization" import ( "fmt" diff --git a/exporter/dynatraceexporter/serialization/sum.go b/exporter/dynatraceexporter/internal/serialization/sum.go similarity index 98% rename from exporter/dynatraceexporter/serialization/sum.go rename to exporter/dynatraceexporter/internal/serialization/sum.go index 0bbea506a497..5554ae557aed 100644 --- a/exporter/dynatraceexporter/serialization/sum.go +++ b/exporter/dynatraceexporter/internal/serialization/sum.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package serialization // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/serialization" +package serialization // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization" import ( "fmt" diff --git a/exporter/dynatraceexporter/serialization/sum_test.go b/exporter/dynatraceexporter/internal/serialization/sum_test.go similarity index 100% rename from exporter/dynatraceexporter/serialization/sum_test.go rename to exporter/dynatraceexporter/internal/serialization/sum_test.go diff --git a/exporter/dynatraceexporter/metrics_exporter.go b/exporter/dynatraceexporter/metrics_exporter.go index 6d47fead22d6..faee9fde4725 100644 --- a/exporter/dynatraceexporter/metrics_exporter.go +++ b/exporter/dynatraceexporter/metrics_exporter.go @@ -32,7 +32,7 @@ import ( "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/config" - "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/serialization" + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/internal/serialization" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap" ) From 3e1b50a3cfc0a32a3cc34cbe1e90c471af3e487e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juraci=20Paix=C3=A3o=20Kr=C3=B6hling?= Date: Fri, 8 Apr 2022 17:11:23 -0300 Subject: [PATCH 20/59] [extension/oauth2clientauth] Use new client auth helpers (#7787) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Juraci Paixão Kröhling --- .../oauth2clientauthextension/extension.go | 31 ++++---------- .../extension_test.go | 40 ++++--------------- .../oauth2clientauthextension/factory.go | 11 ++++- 3 files changed, 26 insertions(+), 56 deletions(-) diff --git a/extension/oauth2clientauthextension/extension.go b/extension/oauth2clientauthextension/extension.go index ffd0c04265a8..56e7837ab517 100644 --- a/extension/oauth2clientauthextension/extension.go +++ b/extension/oauth2clientauthextension/extension.go @@ -19,8 +19,6 @@ import ( "fmt" "net/http" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configauth" "go.uber.org/multierr" "go.uber.org/zap" "golang.org/x/oauth2" @@ -29,17 +27,14 @@ import ( grpcOAuth "google.golang.org/grpc/credentials/oauth" ) -// ClientCredentialsAuthenticator provides implementation for providing client authentication using OAuth2 client credentials +// clientAuthenticator provides implementation for providing client authentication using OAuth2 client credentials // workflow for both gRPC and HTTP clients. -type ClientCredentialsAuthenticator struct { +type clientAuthenticator struct { clientCredentials *clientcredentials.Config logger *zap.Logger client *http.Client } -// ClientCredentialsAuthenticator implements ClientAuthenticator -var _ configauth.ClientAuthenticator = (*ClientCredentialsAuthenticator)(nil) - type errorWrappingTokenSource struct { ts oauth2.TokenSource tokenURL string @@ -51,7 +46,7 @@ var _ oauth2.TokenSource = (*errorWrappingTokenSource)(nil) // errFailedToGetSecurityToken indicates a problem communicating with OAuth2 server. var errFailedToGetSecurityToken = fmt.Errorf("failed to get security token from token endpoint") -func newClientCredentialsExtension(cfg *Config, logger *zap.Logger) (*ClientCredentialsAuthenticator, error) { +func newClientAuthenticator(cfg *Config, logger *zap.Logger) (*clientAuthenticator, error) { if cfg.ClientID == "" { return nil, errNoClientIDProvided } @@ -70,7 +65,7 @@ func newClientCredentialsExtension(cfg *Config, logger *zap.Logger) (*ClientCred } transport.TLSClientConfig = tlsCfg - return &ClientCredentialsAuthenticator{ + return &clientAuthenticator{ clientCredentials: &clientcredentials.Config{ ClientID: cfg.ClientID, ClientSecret: cfg.ClientSecret, @@ -86,16 +81,6 @@ func newClientCredentialsExtension(cfg *Config, logger *zap.Logger) (*ClientCred }, nil } -// Start for ClientCredentialsAuthenticator extension does nothing -func (o *ClientCredentialsAuthenticator) Start(_ context.Context, _ component.Host) error { - return nil -} - -// Shutdown for ClientCredentialsAuthenticator extension does nothing -func (o *ClientCredentialsAuthenticator) Shutdown(_ context.Context) error { - return nil -} - func (ewts errorWrappingTokenSource) Token() (*oauth2.Token, error) { tok, err := ewts.ts.Token() if err != nil { @@ -106,9 +91,9 @@ func (ewts errorWrappingTokenSource) Token() (*oauth2.Token, error) { return tok, nil } -// RoundTripper returns oauth2.Transport, an http.RoundTripper that performs "client-credential" OAuth flow and +// roundTripper returns oauth2.Transport, an http.RoundTripper that performs "client-credential" OAuth flow and // also auto refreshes OAuth tokens as needed. -func (o *ClientCredentialsAuthenticator) RoundTripper(base http.RoundTripper) (http.RoundTripper, error) { +func (o *clientAuthenticator) roundTripper(base http.RoundTripper) (http.RoundTripper, error) { ctx := context.WithValue(context.Background(), oauth2.HTTPClient, o.client) return &oauth2.Transport{ Source: errorWrappingTokenSource{ @@ -119,9 +104,9 @@ func (o *ClientCredentialsAuthenticator) RoundTripper(base http.RoundTripper) (h }, nil } -// PerRPCCredentials returns gRPC PerRPCCredentials that supports "client-credential" OAuth flow. The underneath +// perRPCCredentials returns gRPC PerRPCCredentials that supports "client-credential" OAuth flow. The underneath // oauth2.clientcredentials.Config instance will manage tokens performing auto refresh as necessary. -func (o *ClientCredentialsAuthenticator) PerRPCCredentials() (credentials.PerRPCCredentials, error) { +func (o *clientAuthenticator) perRPCCredentials() (credentials.PerRPCCredentials, error) { ctx := context.WithValue(context.Background(), oauth2.HTTPClient, o.client) return grpcOAuth.TokenSource{ TokenSource: errorWrappingTokenSource{ diff --git a/extension/oauth2clientauthextension/extension_test.go b/extension/oauth2clientauthextension/extension_test.go index b5e3ce7632a6..8e46accb7acb 100644 --- a/extension/oauth2clientauthextension/extension_test.go +++ b/extension/oauth2clientauthextension/extension_test.go @@ -121,7 +121,7 @@ func TestOAuthClientSettings(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - rc, err := newClientCredentialsExtension(test.settings, zap.NewNop()) + rc, err := newClientAuthenticator(test.settings, zap.NewNop()) if test.shouldError { assert.NotNil(t, err) assert.Contains(t, err.Error(), test.expectedError) @@ -185,7 +185,7 @@ func TestRoundTripper(t *testing.T) { for _, testcase := range tests { t.Run(testcase.name, func(t *testing.T) { - oauth2Authenticator, err := newClientCredentialsExtension(testcase.settings, zap.NewNop()) + oauth2Authenticator, err := newClientAuthenticator(testcase.settings, zap.NewNop()) if testcase.shouldError { assert.Error(t, err) assert.Nil(t, oauth2Authenticator) @@ -193,7 +193,7 @@ func TestRoundTripper(t *testing.T) { } assert.NotNil(t, oauth2Authenticator) - roundTripper, err := oauth2Authenticator.RoundTripper(baseRoundTripper) + roundTripper, err := oauth2Authenticator.roundTripper(baseRoundTripper) assert.Nil(t, err) // test roundTripper is an OAuth RoundTripper @@ -239,14 +239,14 @@ func TestOAuth2PerRPCCredentials(t *testing.T) { for _, testcase := range tests { t.Run(testcase.name, func(t *testing.T) { - oauth2Authenticator, err := newClientCredentialsExtension(testcase.settings, zap.NewNop()) + oauth2Authenticator, err := newClientAuthenticator(testcase.settings, zap.NewNop()) if testcase.shouldError { assert.Error(t, err) assert.Nil(t, oauth2Authenticator) return } assert.NoError(t, err) - perRPCCredentials, err := oauth2Authenticator.PerRPCCredentials() + perRPCCredentials, err := oauth2Authenticator.perRPCCredentials() assert.Nil(t, err) // test perRPCCredentials is an grpc OAuthTokenSource _, ok := perRPCCredentials.(grpcOAuth.TokenSource) @@ -255,30 +255,6 @@ func TestOAuth2PerRPCCredentials(t *testing.T) { } } -func TestOAuthExtensionStart(t *testing.T) { - oAuthExtensionAuth, err := newClientCredentialsExtension( - &Config{ - ClientID: "testclientid", - ClientSecret: "testsecret", - TokenURL: "https://example.com/v1/token", - Scopes: []string{"resource.read"}, - }, nil) - assert.Nil(t, err) - assert.Nil(t, oAuthExtensionAuth.Start(context.Background(), nil)) -} - -func TestOAuthExtensionShutdown(t *testing.T) { - oAuthExtensionAuth, err := newClientCredentialsExtension( - &Config{ - ClientID: "testclientid", - ClientSecret: "testsecret", - TokenURL: "https://example.com/v1/token", - Scopes: []string{"resource.read"}, - }, nil) - assert.Nil(t, err) - assert.Nil(t, oAuthExtensionAuth.Shutdown(context.Background())) -} - func TestFailContactingOAuth(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) @@ -289,7 +265,7 @@ func TestFailContactingOAuth(t *testing.T) { serverURL, err := url.Parse(server.URL) assert.NoError(t, err) - oauth2Authenticator, err := newClientCredentialsExtension(&Config{ + oauth2Authenticator, err := newClientAuthenticator(&Config{ ClientID: "dummy", ClientSecret: "ABC", TokenURL: serverURL.String(), @@ -297,7 +273,7 @@ func TestFailContactingOAuth(t *testing.T) { assert.Nil(t, err) // Test for gRPC connections - credential, err := oauth2Authenticator.PerRPCCredentials() + credential, err := oauth2Authenticator.perRPCCredentials() assert.Nil(t, err) _, err = credential.GetRequestMetadata(context.Background()) @@ -308,7 +284,7 @@ func TestFailContactingOAuth(t *testing.T) { setting := confighttp.HTTPClientSettings{ Endpoint: "http://example.com/", CustomRoundTripper: func(next http.RoundTripper) (http.RoundTripper, error) { - return oauth2Authenticator.RoundTripper(next) + return oauth2Authenticator.roundTripper(next) }, } diff --git a/extension/oauth2clientauthextension/factory.go b/extension/oauth2clientauthextension/factory.go index 333e5fc04ec0..397f98904f9d 100644 --- a/extension/oauth2clientauthextension/factory.go +++ b/extension/oauth2clientauthextension/factory.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" + "go.opentelemetry.io/collector/config/configauth" ) const ( @@ -41,5 +42,13 @@ func createDefaultConfig() config.Extension { } func createExtension(_ context.Context, set component.ExtensionCreateSettings, cfg config.Extension) (component.Extension, error) { - return newClientCredentialsExtension(cfg.(*Config), set.Logger) + ca, err := newClientAuthenticator(cfg.(*Config), set.Logger) + if err != nil { + return nil, err + } + + return configauth.NewClientAuthenticator( + configauth.WithClientRoundTripper(ca.roundTripper), + configauth.WithPerRPCCredentials(ca.perRPCCredentials), + ), nil } From 6e1dcf6c37cd68d52a96523aa463f1ed0760358e Mon Sep 17 00:00:00 2001 From: Dmitrii Anoshin Date: Fri, 8 Apr 2022 16:52:58 -0700 Subject: [PATCH 21/59] [exporter/splunkhec] Add an option to disable log or profiling data (#9065) --- CHANGELOG.md | 1 + exporter/splunkhecexporter/README.md | 6 +- exporter/splunkhecexporter/client.go | 32 ++++- exporter/splunkhecexporter/client_test.go | 125 +++++++++++++----- exporter/splunkhecexporter/config.go | 9 ++ exporter/splunkhecexporter/config_test.go | 2 + exporter/splunkhecexporter/factory.go | 4 +- .../splunkhecexporter/testdata/config.yaml | 2 + 8 files changed, 140 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f98e86d234ee..4823123c5714 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - `prometheusremotewriteexporter`: Translate resource attributes to the target info metric (#8493) - `podmanreceiver`: Add API timeout configuration option (#9014) - `cmd/mdatagen`: Add `sem_conv_version` field to metadata.yaml that is used to set metrics SchemaURL (#9010) +- `splunkheceporter`: Add an option to disable log or profiling data (#9065) ### 🛑 Breaking changes 🛑 diff --git a/exporter/splunkhecexporter/README.md b/exporter/splunkhecexporter/README.md index 76208b743e38..1a5fdd13bc3b 100644 --- a/exporter/splunkhecexporter/README.md +++ b/exporter/splunkhecexporter/README.md @@ -28,7 +28,11 @@ The following configuration options can also be configured: - `max_content_length_logs` (default: 2097152): Maximum log data size in bytes per HTTP post limited to 2097152 bytes (2 MiB). - `max_content_length_metrics` (default: 2097152): Maximum metric data size in bytes per HTTP post limited to 2097152 bytes (2 MiB). - `splunk_app_name` (default: "OpenTelemetry Collector Contrib") App name is used to track telemetry information for Splunk App's using HEC by App name. -- `splunk_app_version` (default: Current OpenTelemetry Collector Contrib Build Version): App version is used to track telemetry information for Splunk App's using HEC by App version. +- `splunk_app_version` (default: Current OpenTelemetry Collector Contrib Build Version): App version is used to track telemetry information for Splunk App's using HEC by App version. +- `log_data_enabled` (default: true): Specifies whether the log data is exported. Set it to `false` if you want the log + data to be dropped instead. Applicable in the `logs` pipeline only. +- `profiling_data_enabled` (default: true): Specifies whether the profiling data is exported. Set it to `false` if + you want the profiling data to be dropped instead. Applicable in the `logs` pipeline only. - `hec_metadata_to_otel_attrs/source` (default = 'com.splunk.source'): Specifies the mapping of a specific unified model attribute value to the standard source field of a HEC event. - `hec_metadata_to_otel_attrs/sourcetype` (default = 'com.splunk.sourcetype'): Specifies the mapping of a specific unified model attribute value to the standard sourcetype field of a HEC event. - `hec_metadata_to_otel_attrs/index` (default = 'com.splunk.index'): Specifies the mapping of a specific unified model attribute value to the standard index field of a HEC event. diff --git a/exporter/splunkhecexporter/client.go b/exporter/splunkhecexporter/client.go index 8d5a1886aff6..516abcdc79fb 100644 --- a/exporter/splunkhecexporter/client.go +++ b/exporter/splunkhecexporter/client.go @@ -258,6 +258,7 @@ func (c *client) pushLogDataInBatches(ctx context.Context, ld pdata.Logs, send f var permanentErrors []error var rls = ld.ResourceLogs() + var droppedProfilingDataRecords, droppedLogRecords int for i := 0; i < rls.Len(); i++ { ills := rls.At(i).ScopeLogs() for j := 0; j < ills.Len(); j++ { @@ -265,25 +266,40 @@ func (c *client) pushLogDataInBatches(ctx context.Context, ld pdata.Logs, send f var newPermanentErrors []error if isProfilingData(ills.At(j)) { + if !c.config.ProfilingDataEnabled { + droppedProfilingDataRecords += ills.At(j).LogRecords().Len() + continue + } profilingBufState.resource, profilingBufState.library = i, j newPermanentErrors, err = c.pushLogRecords(ctx, rls, &profilingBufState, profilingHeaders, send) } else { + if !c.config.LogDataEnabled { + droppedLogRecords += ills.At(j).LogRecords().Len() + continue + } bufState.resource, bufState.library = i, j newPermanentErrors, err = c.pushLogRecords(ctx, rls, &bufState, nil, send) } if err != nil { - return consumererror.NewLogs(err, *subLogs(&ld, bufState.bufFront, profilingBufState.bufFront)) + return consumererror.NewLogs(err, *c.subLogs(&ld, bufState.bufFront, profilingBufState.bufFront)) } permanentErrors = append(permanentErrors, newPermanentErrors...) } } + if droppedProfilingDataRecords != 0 { + c.logger.Debug("Profiling data is not allowed", zap.Int("dropped_records", droppedProfilingDataRecords)) + } + if droppedLogRecords != 0 { + c.logger.Debug("Log data is not allowed", zap.Int("dropped_records", droppedLogRecords)) + } + // There's some leftover unsent non-profiling data if bufState.buf.Len() > 0 { if err := send(ctx, bufState.buf, nil); err != nil { - return consumererror.NewLogs(err, *subLogs(&ld, bufState.bufFront, profilingBufState.bufFront)) + return consumererror.NewLogs(err, *c.subLogs(&ld, bufState.bufFront, profilingBufState.bufFront)) } } @@ -291,7 +307,7 @@ func (c *client) pushLogDataInBatches(ctx context.Context, ld pdata.Logs, send f if profilingBufState.buf.Len() > 0 { if err := send(ctx, profilingBufState.buf, profilingHeaders); err != nil { // Non-profiling bufFront is set to nil because all non-profiling data was flushed successfully above. - return consumererror.NewLogs(err, *subLogs(&ld, nil, profilingBufState.bufFront)) + return consumererror.NewLogs(err, *c.subLogs(&ld, nil, profilingBufState.bufFront)) } } @@ -600,14 +616,18 @@ func (c *client) postEvents(ctx context.Context, events io.Reader, headers map[s // subLogs returns a subset of `ld` starting from `profilingBufFront` for profiling data // plus starting from `bufFront` for non-profiling data. Both can be nil, in which case they are ignored -func subLogs(ld *pdata.Logs, bufFront *index, profilingBufFront *index) *pdata.Logs { +func (c *client) subLogs(ld *pdata.Logs, bufFront *index, profilingBufFront *index) *pdata.Logs { if ld == nil { return ld } subset := pdata.NewLogs() - subLogsByType(ld, bufFront, &subset, false) - subLogsByType(ld, profilingBufFront, &subset, true) + if c.config.LogDataEnabled { + subLogsByType(ld, bufFront, &subset, false) + } + if c.config.ProfilingDataEnabled { + subLogsByType(ld, profilingBufFront, &subset, true) + } return &subset } diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go index 48fb508ad91b..972ca264dba1 100644 --- a/exporter/splunkhecexporter/client_test.go +++ b/exporter/splunkhecexporter/client_test.go @@ -162,9 +162,14 @@ func createLogDataWithCustomLibraries(numResources int, libraries []string, numR return logs } +type receivedRequest struct { + body []byte + headers http.Header +} + type CapturingData struct { testing *testing.T - receivedRequest chan []byte + receivedRequest chan receivedRequest statusCode int checkCompression bool } @@ -182,12 +187,12 @@ func (c *CapturingData) ServeHTTP(w http.ResponseWriter, r *http.Request) { panic(err) } go func() { - c.receivedRequest <- body + c.receivedRequest <- receivedRequest{body, r.Header} }() w.WriteHeader(c.statusCode) } -func runMetricsExport(cfg *Config, metrics pdata.Metrics, t *testing.T) ([][]byte, error) { +func runMetricsExport(cfg *Config, metrics pdata.Metrics, t *testing.T) ([]receivedRequest, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) @@ -197,8 +202,8 @@ func runMetricsExport(cfg *Config, metrics pdata.Metrics, t *testing.T) ([][]byt cfg.Endpoint = "http://" + listener.Addr().String() + "/services/collector" cfg.Token = "1234-1234" - receivedRequest := make(chan []byte) - capture := CapturingData{testing: t, receivedRequest: receivedRequest, statusCode: 200, checkCompression: !cfg.DisableCompression} + rr := make(chan receivedRequest) + capture := CapturingData{testing: t, receivedRequest: rr, statusCode: 200, checkCompression: !cfg.DisableCompression} s := &http.Server{ Handler: &capture, } @@ -214,10 +219,10 @@ func runMetricsExport(cfg *Config, metrics pdata.Metrics, t *testing.T) ([][]byt err = exporter.ConsumeMetrics(context.Background(), metrics) assert.NoError(t, err) - var requests [][]byte + var requests []receivedRequest for { select { - case request := <-receivedRequest: + case request := <-rr: requests = append(requests, request) case <-time.After(1 * time.Second): if len(requests) == 0 { @@ -228,7 +233,7 @@ func runMetricsExport(cfg *Config, metrics pdata.Metrics, t *testing.T) ([][]byt } } -func runTraceExport(testConfig *Config, traces pdata.Traces, t *testing.T) ([][]byte, error) { +func runTraceExport(testConfig *Config, traces pdata.Traces, t *testing.T) ([]receivedRequest, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) @@ -241,8 +246,8 @@ func runTraceExport(testConfig *Config, traces pdata.Traces, t *testing.T) ([][] cfg.MaxContentLengthTraces = testConfig.MaxContentLengthTraces cfg.Token = "1234-1234" - receivedRequest := make(chan []byte) - capture := CapturingData{testing: t, receivedRequest: receivedRequest, statusCode: 200, checkCompression: !cfg.DisableCompression} + rr := make(chan receivedRequest) + capture := CapturingData{testing: t, receivedRequest: rr, statusCode: 200, checkCompression: !cfg.DisableCompression} s := &http.Server{ Handler: &capture, } @@ -258,10 +263,10 @@ func runTraceExport(testConfig *Config, traces pdata.Traces, t *testing.T) ([][] err = exporter.ConsumeTraces(context.Background(), traces) assert.NoError(t, err) - var requests [][]byte + var requests []receivedRequest for { select { - case request := <-receivedRequest: + case request := <-rr: requests = append(requests, request) case <-time.After(1 * time.Second): if len(requests) == 0 { @@ -272,7 +277,7 @@ func runTraceExport(testConfig *Config, traces pdata.Traces, t *testing.T) ([][] } } -func runLogExport(cfg *Config, ld pdata.Logs, t *testing.T) ([][]byte, error) { +func runLogExport(cfg *Config, ld pdata.Logs, t *testing.T) ([]receivedRequest, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) @@ -281,8 +286,8 @@ func runLogExport(cfg *Config, ld pdata.Logs, t *testing.T) ([][]byte, error) { cfg.Endpoint = "http://" + listener.Addr().String() + "/services/collector" cfg.Token = "1234-1234" - receivedRequest := make(chan []byte) - capture := CapturingData{testing: t, receivedRequest: receivedRequest, statusCode: 200, checkCompression: !cfg.DisableCompression} + rr := make(chan receivedRequest) + capture := CapturingData{testing: t, receivedRequest: rr, statusCode: 200, checkCompression: !cfg.DisableCompression} s := &http.Server{ Handler: &capture, } @@ -299,10 +304,10 @@ func runLogExport(cfg *Config, ld pdata.Logs, t *testing.T) ([][]byte, error) { err = exporter.ConsumeLogs(context.Background(), ld) assert.NoError(t, err) - var requests [][]byte + var requests []receivedRequest for { select { - case request := <-receivedRequest: + case request := <-rr: requests = append(requests, request) case <-time.After(1 * time.Second): if len(requests) == 0 { @@ -425,10 +430,10 @@ func TestReceiveTracesBatches(t *testing.T) { for i := 0; i < test.want.numBatches; i++ { require.NotZero(t, got[i]) if test.want.compressed { - validateCompressedContains(t, test.want.batches[i], got[i]) + validateCompressedContains(t, test.want.batches[i], got[i].body) } else { for _, expected := range test.want.batches[i] { - assert.Contains(t, string(got[i]), expected) + assert.Contains(t, string(got[i].body), expected) } } } @@ -548,10 +553,10 @@ func TestReceiveLogs(t *testing.T) { for i := 0; i < test.want.numBatches; i++ { require.NotZero(t, got[i]) if test.want.compressed { - validateCompressedContains(t, test.want.batches[i], got[i]) + validateCompressedContains(t, test.want.batches[i], got[i].body) } else { for _, expected := range test.want.batches[i] { - assert.Contains(t, string(got[i]), expected) + assert.Contains(t, string(got[i].body), expected) } } } @@ -566,7 +571,7 @@ func TestReceiveMetrics(t *testing.T) { actual, err := runMetricsExport(cfg, md, t) assert.Len(t, actual, 1) assert.NoError(t, err) - msg := string(actual[0]) + msg := string(actual[0].body) assert.Contains(t, msg, "\"event\":\"metric\"") assert.Contains(t, msg, "\"time\":1.001") assert.Contains(t, msg, "\"time\":2.002") @@ -681,10 +686,10 @@ func TestReceiveBatchedMetrics(t *testing.T) { for i := 0; i < test.want.numBatches; i++ { require.NotZero(t, got[i]) if test.want.compressed { - validateCompressedContains(t, test.want.batches[i], got[i]) + validateCompressedContains(t, test.want.batches[i], got[i].body) } else { for _, expected := range test.want.batches[i] { - assert.Contains(t, string(got[i]), expected) + assert.Contains(t, string(got[i].body), expected) } } } @@ -700,8 +705,8 @@ func TestReceiveMetricsWithCompression(t *testing.T) { } func TestErrorReceived(t *testing.T) { - receivedRequest := make(chan []byte) - capture := CapturingData{receivedRequest: receivedRequest, statusCode: 500} + rr := make(chan receivedRequest) + capture := CapturingData{receivedRequest: rr, statusCode: 500} listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) @@ -734,7 +739,7 @@ func TestErrorReceived(t *testing.T) { err = exporter.ConsumeTraces(context.Background(), td) select { - case <-receivedRequest: + case <-rr: case <-time.After(5 * time.Second): t.Fatal("Should have received request") } @@ -907,7 +912,7 @@ func Test_pushLogData_InvalidLog(t *testing.T) { zippers: sync.Pool{New: func() interface{} { return gzip.NewWriter(nil) }}, - config: &Config{}, + config: NewFactory().CreateDefaultConfig().(*Config), logger: zaptest.NewLogger(t), } @@ -918,7 +923,7 @@ func Test_pushLogData_InvalidLog(t *testing.T) { err := c.pushLogData(context.Background(), logs) - assert.Contains(t, err.Error(), "Permanent error: dropped log event: &{ unknown +Inf map[]}, error: splunk.Event.Event: unsupported value: +Inf") + assert.Error(t, err, "Permanent error: dropped log event: &{ unknown +Inf map[]}, error: splunk.Event.Event: unsupported value: +Inf") } func Test_pushLogData_PostError(t *testing.T) { @@ -1140,13 +1145,67 @@ func Test_pushLogData_Small_MaxContentLength(t *testing.T) { } } +func TestAllowedLogDataTypes(t *testing.T) { + tests := []struct { + name string + allowProfilingData bool + allowLogData bool + wantProfilingRecords int + wantLogRecords int + }{ + { + name: "both_allowed", + allowProfilingData: true, + allowLogData: true, + }, + { + name: "logs_allowed", + allowProfilingData: false, + allowLogData: true, + }, + { + name: "profiling_allowed", + allowProfilingData: true, + allowLogData: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + logs := createLogDataWithCustomLibraries(1, []string{"otel.logs", "otel.profiling"}, []int{1, 1}) + cfg := NewFactory().CreateDefaultConfig().(*Config) + cfg.LogDataEnabled = test.allowLogData + cfg.ProfilingDataEnabled = test.allowProfilingData + + requests, err := runLogExport(cfg, logs, t) + assert.NoError(t, err) + + seenLogs := false + seenProfiling := false + for _, r := range requests { + if r.headers.Get(libraryHeaderName) == profilingLibraryName { + seenProfiling = true + } else { + seenLogs = true + } + } + assert.Equal(t, test.allowLogData, seenLogs) + assert.Equal(t, test.allowProfilingData, seenProfiling) + }) + } +} + func TestSubLogs(t *testing.T) { // Creating 12 logs (2 resources x 2 libraries x 3 records) logs := createLogData(2, 2, 3) + c := client{ + config: NewFactory().CreateDefaultConfig().(*Config), + } + // Logs subset from leftmost index (resource 0, library 0, record 0). _0_0_0 := &index{resource: 0, library: 0, record: 0} //revive:disable-line:var-naming - got := subLogs(&logs, _0_0_0, nil) + got := c.subLogs(&logs, _0_0_0, nil) // Number of logs in subset should equal original logs. assert.Equal(t, logs.LogRecordCount(), got.LogRecordCount()) @@ -1160,7 +1219,7 @@ func TestSubLogs(t *testing.T) { // Logs subset from some mid index (resource 0, library 1, log 2). _0_1_2 := &index{resource: 0, library: 1, record: 2} //revive:disable-line:var-naming - got = subLogs(&logs, _0_1_2, nil) + got = c.subLogs(&logs, _0_1_2, nil) assert.Equal(t, 7, got.LogRecordCount()) @@ -1173,7 +1232,7 @@ func TestSubLogs(t *testing.T) { // Logs subset from rightmost index (resource 1, library 1, log 2). _1_1_2 := &index{resource: 1, library: 1, record: 2} //revive:disable-line:var-naming - got = subLogs(&logs, _1_1_2, nil) + got = c.subLogs(&logs, _1_1_2, nil) // Number of logs in subset should be 1. assert.Equal(t, 1, got.LogRecordCount()) @@ -1187,7 +1246,7 @@ func TestSubLogs(t *testing.T) { slice := &index{resource: 1, library: 0, record: 5} profSlice := &index{resource: 0, library: 1, record: 8} - got = subLogs(&logs, slice, profSlice) + got = c.subLogs(&logs, slice, profSlice) assert.Equal(t, 5+2+10, got.LogRecordCount()) assert.Equal(t, "otel.logs", got.ResourceLogs().At(0).ScopeLogs().At(0).Scope().Name()) diff --git a/exporter/splunkhecexporter/config.go b/exporter/splunkhecexporter/config.go index df8be937ebfb..710e9f1f720f 100644 --- a/exporter/splunkhecexporter/config.go +++ b/exporter/splunkhecexporter/config.go @@ -52,6 +52,12 @@ type Config struct { exporterhelper.QueueSettings `mapstructure:"sending_queue"` exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` + // LogDataEnabled can be used to disable sending logs by the exporter. + LogDataEnabled bool `mapstructure:"log_data_enabled"` + + // ProfilingDataEnabled can be used to disable sending profiling data by the exporter. + ProfilingDataEnabled bool `mapstructure:"profiling_data_enabled"` + // HEC Token is the authentication token provided by Splunk: https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector. Token string `mapstructure:"token"` @@ -155,5 +161,8 @@ func (cfg *Config) Validate() error { if err := cfg.QueueSettings.Validate(); err != nil { return fmt.Errorf("sending_queue settings has invalid configuration: %w", err) } + if !cfg.LogDataEnabled && !cfg.ProfilingDataEnabled { + return errors.New(`either "log_data_enabled" or "profiling_data_enabled" has to be true`) + } return nil } diff --git a/exporter/splunkhecexporter/config_test.go b/exporter/splunkhecexporter/config_test.go index e9aee9d8d6ee..9d1b3e34129b 100644 --- a/exporter/splunkhecexporter/config_test.go +++ b/exporter/splunkhecexporter/config_test.go @@ -61,6 +61,8 @@ func TestLoadConfig(t *testing.T) { Index: "metrics", SplunkAppName: "OpenTelemetry-Collector Splunk Exporter", SplunkAppVersion: "v0.0.1", + LogDataEnabled: true, + ProfilingDataEnabled: true, MaxConnections: 100, MaxContentLengthLogs: 2 * 1024 * 1024, MaxContentLengthMetrics: 2 * 1024 * 1024, diff --git a/exporter/splunkhecexporter/factory.go b/exporter/splunkhecexporter/factory.go index 3682406d8f4c..f03bab0e6ec3 100644 --- a/exporter/splunkhecexporter/factory.go +++ b/exporter/splunkhecexporter/factory.go @@ -60,7 +60,9 @@ func NewFactory() component.ExporterFactory { func createDefaultConfig() config.Exporter { return &Config{ - ExporterSettings: config.NewExporterSettings(config.NewComponentID(typeStr)), + LogDataEnabled: true, + ProfilingDataEnabled: true, + ExporterSettings: config.NewExporterSettings(config.NewComponentID(typeStr)), TimeoutSettings: exporterhelper.TimeoutSettings{ Timeout: defaultHTTPTimeout, }, diff --git a/exporter/splunkhecexporter/testdata/config.yaml b/exporter/splunkhecexporter/testdata/config.yaml index 7346e86bdca5..491fd7e2faca 100644 --- a/exporter/splunkhecexporter/testdata/config.yaml +++ b/exporter/splunkhecexporter/testdata/config.yaml @@ -14,6 +14,8 @@ exporters: source: "otel" sourcetype: "otel" index: "metrics" + log_data_enabled: true + profiling_data_enabled: true tls: insecure_skip_verify: false ca_file: "" From 44aca0881d5a1a714bb8fdf94e01ab03a55a1a0c Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Fri, 8 Apr 2022 21:10:14 -0400 Subject: [PATCH 22/59] fix featuregate for googlecloud exporter by not checking it during NewFactory (#9116) --- CHANGELOG.md | 1 + exporter/googlecloudexporter/factory.go | 56 +++++++++---------------- 2 files changed, 20 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4823123c5714..e474f1294125 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ - `hostmetricsreceiver`: Use cpu times for time delta in cpu.utilization calculation (#8857) - `dynatraceexporter`: Remove overly verbose stacktrace from certain logs (#8989) +- `googlecloudexporter`: fix the `exporter.googlecloud.OTLPDirect` fature-gate, which was not applied when the flag was provided (#9116) ### 🚩 Deprecations 🚩 diff --git a/exporter/googlecloudexporter/factory.go b/exporter/googlecloudexporter/factory.go index e826c0126a22..3faa7c967ed1 100644 --- a/exporter/googlecloudexporter/factory.go +++ b/exporter/googlecloudexporter/factory.go @@ -42,14 +42,6 @@ func init() { // NewFactory creates a factory for the googlecloud exporter func NewFactory() component.ExporterFactory { - if !featuregate.IsEnabled(pdataExporterFeatureGate) { - return component.NewExporterFactory( - typeStr, - createLegacyDefaultConfig, - component.WithTracesExporter(createLegacyTracesExporter), - component.WithMetricsExporter(createLegacyMetricsExporter), - ) - } return component.NewExporterFactory( typeStr, createDefaultConfig, @@ -60,6 +52,15 @@ func NewFactory() component.ExporterFactory { // createDefaultConfig creates the default configuration for exporter. func createDefaultConfig() config.Exporter { + if !featuregate.IsEnabled(pdataExporterFeatureGate) { + return &LegacyConfig{ + ExporterSettings: config.NewExporterSettings(config.NewComponentID(typeStr)), + TimeoutSettings: exporterhelper.TimeoutSettings{Timeout: defaultTimeout}, + RetrySettings: exporterhelper.NewDefaultRetrySettings(), + QueueSettings: exporterhelper.NewDefaultQueueSettings(), + UserAgent: "opentelemetry-collector-contrib {{version}}", + } + } return &Config{ ExporterSettings: config.NewExporterSettings(config.NewComponentID(typeStr)), TimeoutSettings: exporterhelper.TimeoutSettings{Timeout: defaultTimeout}, @@ -69,22 +70,17 @@ func createDefaultConfig() config.Exporter { } } -func createLegacyDefaultConfig() config.Exporter { - return &LegacyConfig{ - ExporterSettings: config.NewExporterSettings(config.NewComponentID(typeStr)), - TimeoutSettings: exporterhelper.TimeoutSettings{Timeout: defaultTimeout}, - RetrySettings: exporterhelper.NewDefaultRetrySettings(), - QueueSettings: exporterhelper.NewDefaultQueueSettings(), - UserAgent: "opentelemetry-collector-contrib {{version}}", - } -} - // createTracesExporter creates a trace exporter based on this config. func createTracesExporter( ctx context.Context, params component.ExporterCreateSettings, cfg config.Exporter) (component.TracesExporter, error) { - eCfg := cfg.(*Config) + var eCfg *Config + if !featuregate.IsEnabled(pdataExporterFeatureGate) { + eCfg = toNewConfig(cfg.(*LegacyConfig)) + } else { + eCfg = cfg.(*Config) + } tExp, err := collector.NewGoogleCloudTracesExporter(ctx, eCfg.Config, params.BuildInfo.Version, eCfg.Timeout) if err != nil { return nil, err @@ -101,20 +97,15 @@ func createTracesExporter( exporterhelper.WithRetry(eCfg.RetrySettings)) } -// createLegacyTracesExporter creates a trace exporter based on this config. -func createLegacyTracesExporter( - ctx context.Context, - params component.ExporterCreateSettings, - cfg config.Exporter) (component.TracesExporter, error) { - eCfg := cfg.(*LegacyConfig) - return createTracesExporter(ctx, params, toNewConfig(eCfg)) -} - // createMetricsExporter creates a metrics exporter based on this config. func createMetricsExporter( ctx context.Context, params component.ExporterCreateSettings, cfg config.Exporter) (component.MetricsExporter, error) { + if !featuregate.IsEnabled(pdataExporterFeatureGate) { + eCfg := cfg.(*LegacyConfig) + return newLegacyGoogleCloudMetricsExporter(eCfg, params) + } eCfg := cfg.(*Config) mExp, err := collector.NewGoogleCloudMetricsExporter(ctx, eCfg.Config, params.TelemetrySettings.Logger, params.BuildInfo.Version, eCfg.Timeout) if err != nil { @@ -131,12 +122,3 @@ func createMetricsExporter( exporterhelper.WithQueue(eCfg.QueueSettings), exporterhelper.WithRetry(eCfg.RetrySettings)) } - -// createLegacyMetricsExporter creates a metrics exporter based on this config. -func createLegacyMetricsExporter( - ctx context.Context, - params component.ExporterCreateSettings, - cfg config.Exporter) (component.MetricsExporter, error) { - eCfg := cfg.(*LegacyConfig) - return newLegacyGoogleCloudMetricsExporter(eCfg, params) -} From 439369ef440b8e84137411b6778a56d530a81937 Mon Sep 17 00:00:00 2001 From: Corbin Phelps Date: Fri, 8 Apr 2022 23:27:58 -0400 Subject: [PATCH 23/59] [receiver/mongodbatlas] Refactored to use New metric builder (#9093) * Refactored mongodbatlas to use new metric builder Signed-off-by: Corbin Phelps * Updated changelog with pr number Signed-off-by: Corbin Phelps * Fixed linter issues Signed-off-by: Corbin Phelps * Switched mongodbatlas metric lookup to switch statement Signed-off-by: Corbin Phelps --- CHANGELOG.md | 1 + receiver/mongodbatlasreceiver/config.go | 9 +- receiver/mongodbatlasreceiver/doc.go | 2 +- .../mongodbatlasreceiver/documentation.md | 32 +- receiver/mongodbatlasreceiver/factory.go | 3 + .../internal/metadata/generated_metrics.go | 1150 ----- .../internal/metadata/generated_metrics_v2.go | 4473 +++++++++++++++++ .../internal/metadata/metric_name_mapping.go | 1273 ++--- .../metadata/metric_name_mapping_test.go | 50 - .../internal/metric_conversion.go | 22 +- .../internal/mongodb_atlas_client.go | 25 +- receiver/mongodbatlasreceiver/metadata.yaml | 49 +- receiver/mongodbatlasreceiver/receiver.go | 143 +- 13 files changed, 5357 insertions(+), 1875 deletions(-) delete mode 100644 receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go create mode 100644 receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go delete mode 100644 receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index e474f1294125..6a4250687cf3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ - A detailed [Upgrade Guide](https://github.com/open-telemetry/opentelemetry-log-collection/releases/tag/v0.28.0) is available in the log-collection v0.28.0 release notes. - `datadogexporter`: Remove `OnlyMetadata` method from `Config` struct (#8980) - `datadogexporter`: Remove `GetCensoredKey` method from `APIConfig` struct (#8980) +- `mongodbatlasreceiver`: Updated to uses newer metric builder which changed some metric and resource attributes (#9093) - `dynatraceexporter`: Make `serialization` package `/internal` (#9097) ### 🧰 Bug fixes 🧰 diff --git a/receiver/mongodbatlasreceiver/config.go b/receiver/mongodbatlasreceiver/config.go index d39a36c52791..730368d81c31 100644 --- a/receiver/mongodbatlasreceiver/config.go +++ b/receiver/mongodbatlasreceiver/config.go @@ -18,15 +18,18 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" ) var _ config.Receiver = (*Config)(nil) type Config struct { scraperhelper.ScraperControllerSettings `mapstructure:",squash"` - PublicKey string `mapstructure:"public_key"` - PrivateKey string `mapstructure:"private_key"` - Granularity string `mapstructure:"granularity"` + PublicKey string `mapstructure:"public_key"` + PrivateKey string `mapstructure:"private_key"` + Granularity string `mapstructure:"granularity"` + Metrics metadata.MetricsSettings `mapstructure:"metrics"` RetrySettings exporterhelper.RetrySettings `mapstructure:"retry_on_failure"` } diff --git a/receiver/mongodbatlasreceiver/doc.go b/receiver/mongodbatlasreceiver/doc.go index 8debb189ff76..af6ab888013e 100644 --- a/receiver/mongodbatlasreceiver/doc.go +++ b/receiver/mongodbatlasreceiver/doc.go @@ -15,6 +15,6 @@ //go:build !windows // +build !windows -//go:generate mdatagen metadata.yaml +//go:generate mdatagen --experimental-gen metadata.yaml package mongodbatlasreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver" diff --git a/receiver/mongodbatlasreceiver/documentation.md b/receiver/mongodbatlasreceiver/documentation.md index ab74c04e4cfd..ddc55eb7ff0f 100644 --- a/receiver/mongodbatlasreceiver/documentation.md +++ b/receiver/mongodbatlasreceiver/documentation.md @@ -18,8 +18,8 @@ These are the metrics available for this scraper. | **mongodbatlas.disk.partition.space.max** | Disk partition space Aggregate of MongoDB Metrics DISK_PARTITION_SPACE_FREE, DISK_PARTITION_SPACE_USED | By | Gauge(Double) |
  • disk_status
| | **mongodbatlas.disk.partition.usage.average** | Disk partition usage (%) Aggregate of MongoDB Metrics DISK_PARTITION_SPACE_PERCENT_FREE, DISK_PARTITION_SPACE_PERCENT_USED | 1 | Gauge(Double) |
  • disk_status
| | **mongodbatlas.disk.partition.usage.max** | Disk partition usage (%) Aggregate of MongoDB Metrics MAX_DISK_PARTITION_SPACE_PERCENT_USED, MAX_DISK_PARTITION_SPACE_PERCENT_FREE | 1 | Gauge(Double) |
  • disk_status
| -| **mongodbatlas.disk.partition.utilization.average** | Disk partition utilization (%) MongoDB Metrics DISK_PARTITION_UTILIZATION | 1 | Gauge(Double) |
| -| **mongodbatlas.disk.partition.utilization.max** | Disk partition utilization (%) MongoDB Metrics MAX_DISK_PARTITION_UTILIZATION | 1 | Gauge(Double) |
| +| **mongodbatlas.disk.partition.utilization.average** | Disk partition utilization (%) MongoDB Metrics DISK_PARTITION_UTILIZATION | 1 | Gauge(Double) |
  • disk_status
| +| **mongodbatlas.disk.partition.utilization.max** | Disk partition utilization (%) MongoDB Metrics MAX_DISK_PARTITION_UTILIZATION | 1 | Gauge(Double) |
  • disk_status
| | **mongodbatlas.process.asserts** | Number of assertions per second Aggregate of MongoDB Metrics ASSERT_REGULAR, ASSERT_USER, ASSERT_MSG, ASSERT_WARNING | {assertions}/s | Gauge(Double) |
  • assert_type
| | **mongodbatlas.process.background_flush** | Amount of data flushed in the background MongoDB Metric BACKGROUND_FLUSH_AVG | 1 | Gauge(Double) |
| | **mongodbatlas.process.cache.io** | Cache throughput (per second) Aggregate of MongoDB Metrics CACHE_BYTES_READ_INTO, CACHE_BYTES_WRITTEN_FROM | By | Gauge(Double) |
  • cache_direction
| @@ -63,8 +63,8 @@ These are the metrics available for this scraper. | **mongodbatlas.system.fts.cpu.usage** | Full-text search (%) | 1 | Gauge(Double) |
  • cpu_state
| | **mongodbatlas.system.fts.disk.used** | Full text search disk usage MongoDB Metric FTS_DISK_USAGE | By | Gauge(Double) |
| | **mongodbatlas.system.fts.memory.usage** | Full-text search Aggregate of MongoDB Metrics FTS_MEMORY_MAPPED, FTS_PROCESS_SHARED_MEMORY, FTS_PROCESS_RESIDENT_MEMORY, FTS_PROCESS_VIRTUAL_MEMORY | MiBy | Sum(Double) |
  • memory_state
| -| **mongodbatlas.system.memory.usage.average** | System Memory Usage Aggregate of MongoDB Metrics SYSTEM_MEMORY_AVAILABLE, SYSTEM_MEMORY_BUFFERS, SYSTEM_MEMORY_USED, SYSTEM_MEMORY_CACHED, SYSTEM_MEMORY_SHARED, SYSTEM_MEMORY_FREE | KiBy | Gauge(Double) |
  • memory_state
| -| **mongodbatlas.system.memory.usage.max** | System Memory Usage Aggregate of MongoDB Metrics MAX_SYSTEM_MEMORY_CACHED, MAX_SYSTEM_MEMORY_AVAILABLE, MAX_SYSTEM_MEMORY_USED, MAX_SYSTEM_MEMORY_BUFFERS, MAX_SYSTEM_MEMORY_FREE, MAX_SYSTEM_MEMORY_SHARED | KiBy | Gauge(Double) |
  • memory_state
| +| **mongodbatlas.system.memory.usage.average** | System Memory Usage Aggregate of MongoDB Metrics SYSTEM_MEMORY_AVAILABLE, SYSTEM_MEMORY_BUFFERS, SYSTEM_MEMORY_USED, SYSTEM_MEMORY_CACHED, SYSTEM_MEMORY_SHARED, SYSTEM_MEMORY_FREE | KiBy | Gauge(Double) |
  • memory_status
| +| **mongodbatlas.system.memory.usage.max** | System Memory Usage Aggregate of MongoDB Metrics MAX_SYSTEM_MEMORY_CACHED, MAX_SYSTEM_MEMORY_AVAILABLE, MAX_SYSTEM_MEMORY_USED, MAX_SYSTEM_MEMORY_BUFFERS, MAX_SYSTEM_MEMORY_FREE, MAX_SYSTEM_MEMORY_SHARED | KiBy | Gauge(Double) |
  • memory_status
| | **mongodbatlas.system.network.io.average** | System Network IO Aggregate of MongoDB Metrics SYSTEM_NETWORK_IN, SYSTEM_NETWORK_OUT | By/s | Gauge(Double) |
  • direction
| | **mongodbatlas.system.network.io.max** | System Network IO Aggregate of MongoDB Metrics MAX_SYSTEM_NETWORK_OUT, MAX_SYSTEM_NETWORK_IN | By/s | Gauge(Double) |
  • direction
| | **mongodbatlas.system.paging.io.average** | Swap IO Aggregate of MongoDB Metrics SWAP_IO_IN, SWAP_IO_OUT | {pages}/s | Gauge(Double) |
  • direction
| @@ -72,7 +72,28 @@ These are the metrics available for this scraper. | **mongodbatlas.system.paging.usage.average** | Swap usage Aggregate of MongoDB Metrics SWAP_USAGE_FREE, SWAP_USAGE_USED | KiBy | Gauge(Double) |
  • direction
| | **mongodbatlas.system.paging.usage.max** | Swap usage Aggregate of MongoDB Metrics MAX_SWAP_USAGE_FREE, MAX_SWAP_USAGE_USED | KiBy | Gauge(Double) |
  • direction
| -**Highlighted metrics** are emitted by default. +**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default. +Any metric can be enabled or disabled with the following scraper configuration: + +```yaml +metrics: + : + enabled: +``` + +## Resource attributes + +| Name | Description | Type | +| ---- | ----------- | ---- | +| mongodb_atlas.db.name | Name of the Database | String | +| mongodb_atlas.disk.partition | Name of a disk partition | String | +| mongodb_atlas.host.name | Hostname of the process | String | +| mongodb_atlas.org_name | Organization Name | String | +| mongodb_atlas.process.id | ID of the process | String | +| mongodb_atlas.process.port | Port process is bound to | String | +| mongodb_atlas.process.type_name | Process type | String | +| mongodb_atlas.project.id | Project ID | String | +| mongodb_atlas.project.name | Project Name | String | ## Metric attributes @@ -93,6 +114,7 @@ These are the metrics available for this scraper. | global_lock_state | Which queue is locked | | memory_issue_type | Type of memory issue encountered | | memory_state | Memory usage type | +| memory_status | Memory measurement type | | object_type | MongoDB object type | | operation | Type of database operation | | oplog_type | Oplog type | diff --git a/receiver/mongodbatlasreceiver/factory.go b/receiver/mongodbatlasreceiver/factory.go index b1f49346b0d8..cceb6fe59a72 100644 --- a/receiver/mongodbatlasreceiver/factory.go +++ b/receiver/mongodbatlasreceiver/factory.go @@ -23,6 +23,8 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/receiver/scraperhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" ) const ( @@ -58,5 +60,6 @@ func createDefaultConfig() config.Receiver { ScraperControllerSettings: scraperhelper.NewDefaultScraperControllerSettings(typeStr), Granularity: defaultGranularity, RetrySettings: exporterhelper.NewDefaultRetrySettings(), + Metrics: metadata.DefaultMetricsSettings(), } } diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go deleted file mode 100644 index f86fa1c0caee..000000000000 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go +++ /dev/null @@ -1,1150 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" -) - -// Type is the component type name. -const Type config.Type = "mongoatlasreceiver" - -// MetricIntf is an interface to generically interact with generated metric. -type MetricIntf interface { - Name() string - New() pdata.Metric - Init(metric pdata.Metric) -} - -// Intentionally not exposing this so that it is opaque and can change freely. -type metricImpl struct { - name string - initFunc func(pdata.Metric) -} - -// Name returns the metric name. -func (m *metricImpl) Name() string { - return m.name -} - -// New creates a metric object preinitialized. -func (m *metricImpl) New() pdata.Metric { - metric := pdata.NewMetric() - m.Init(metric) - return metric -} - -// Init initializes the provided metric object. -func (m *metricImpl) Init(metric pdata.Metric) { - m.initFunc(metric) -} - -type metricStruct struct { - MongodbatlasDbCounts MetricIntf - MongodbatlasDbSize MetricIntf - MongodbatlasDiskPartitionIopsAverage MetricIntf - MongodbatlasDiskPartitionIopsMax MetricIntf - MongodbatlasDiskPartitionLatencyAverage MetricIntf - MongodbatlasDiskPartitionLatencyMax MetricIntf - MongodbatlasDiskPartitionSpaceAverage MetricIntf - MongodbatlasDiskPartitionSpaceMax MetricIntf - MongodbatlasDiskPartitionUsageAverage MetricIntf - MongodbatlasDiskPartitionUsageMax MetricIntf - MongodbatlasDiskPartitionUtilizationAverage MetricIntf - MongodbatlasDiskPartitionUtilizationMax MetricIntf - MongodbatlasProcessAsserts MetricIntf - MongodbatlasProcessBackgroundFlush MetricIntf - MongodbatlasProcessCacheIo MetricIntf - MongodbatlasProcessCacheSize MetricIntf - MongodbatlasProcessConnections MetricIntf - MongodbatlasProcessCPUChildrenNormalizedUsageAverage MetricIntf - MongodbatlasProcessCPUChildrenNormalizedUsageMax MetricIntf - MongodbatlasProcessCPUChildrenUsageAverage MetricIntf - MongodbatlasProcessCPUChildrenUsageMax MetricIntf - MongodbatlasProcessCPUNormalizedUsageAverage MetricIntf - MongodbatlasProcessCPUNormalizedUsageMax MetricIntf - MongodbatlasProcessCPUUsageAverage MetricIntf - MongodbatlasProcessCPUUsageMax MetricIntf - MongodbatlasProcessCursors MetricIntf - MongodbatlasProcessDbDocumentRate MetricIntf - MongodbatlasProcessDbOperationsRate MetricIntf - MongodbatlasProcessDbOperationsTime MetricIntf - MongodbatlasProcessDbQueryExecutorScanned MetricIntf - MongodbatlasProcessDbQueryTargetingScannedPerReturned MetricIntf - MongodbatlasProcessDbStorage MetricIntf - MongodbatlasProcessFtsCPUUsage MetricIntf - MongodbatlasProcessGlobalLock MetricIntf - MongodbatlasProcessIndexBtreeMissRatio MetricIntf - MongodbatlasProcessIndexCounters MetricIntf - MongodbatlasProcessJournalingCommits MetricIntf - MongodbatlasProcessJournalingDataFiles MetricIntf - MongodbatlasProcessJournalingWritten MetricIntf - MongodbatlasProcessMemoryUsage MetricIntf - MongodbatlasProcessNetworkIo MetricIntf - MongodbatlasProcessNetworkRequests MetricIntf - MongodbatlasProcessOplogRate MetricIntf - MongodbatlasProcessOplogTime MetricIntf - MongodbatlasProcessPageFaults MetricIntf - MongodbatlasProcessRestarts MetricIntf - MongodbatlasProcessTickets MetricIntf - MongodbatlasSystemCPUNormalizedUsageAverage MetricIntf - MongodbatlasSystemCPUNormalizedUsageMax MetricIntf - MongodbatlasSystemCPUUsageAverage MetricIntf - MongodbatlasSystemCPUUsageMax MetricIntf - MongodbatlasSystemFtsCPUNormalizedUsage MetricIntf - MongodbatlasSystemFtsCPUUsage MetricIntf - MongodbatlasSystemFtsDiskUsed MetricIntf - MongodbatlasSystemFtsMemoryUsage MetricIntf - MongodbatlasSystemMemoryUsageAverage MetricIntf - MongodbatlasSystemMemoryUsageMax MetricIntf - MongodbatlasSystemNetworkIoAverage MetricIntf - MongodbatlasSystemNetworkIoMax MetricIntf - MongodbatlasSystemPagingIoAverage MetricIntf - MongodbatlasSystemPagingIoMax MetricIntf - MongodbatlasSystemPagingUsageAverage MetricIntf - MongodbatlasSystemPagingUsageMax MetricIntf -} - -// Names returns a list of all the metric name strings. -func (m *metricStruct) Names() []string { - return []string{ - "mongodbatlas.db.counts", - "mongodbatlas.db.size", - "mongodbatlas.disk.partition.iops.average", - "mongodbatlas.disk.partition.iops.max", - "mongodbatlas.disk.partition.latency.average", - "mongodbatlas.disk.partition.latency.max", - "mongodbatlas.disk.partition.space.average", - "mongodbatlas.disk.partition.space.max", - "mongodbatlas.disk.partition.usage.average", - "mongodbatlas.disk.partition.usage.max", - "mongodbatlas.disk.partition.utilization.average", - "mongodbatlas.disk.partition.utilization.max", - "mongodbatlas.process.asserts", - "mongodbatlas.process.background_flush", - "mongodbatlas.process.cache.io", - "mongodbatlas.process.cache.size", - "mongodbatlas.process.connections", - "mongodbatlas.process.cpu.children.normalized.usage.average", - "mongodbatlas.process.cpu.children.normalized.usage.max", - "mongodbatlas.process.cpu.children.usage.average", - "mongodbatlas.process.cpu.children.usage.max", - "mongodbatlas.process.cpu.normalized.usage.average", - "mongodbatlas.process.cpu.normalized.usage.max", - "mongodbatlas.process.cpu.usage.average", - "mongodbatlas.process.cpu.usage.max", - "mongodbatlas.process.cursors", - "mongodbatlas.process.db.document.rate", - "mongodbatlas.process.db.operations.rate", - "mongodbatlas.process.db.operations.time", - "mongodbatlas.process.db.query_executor.scanned", - "mongodbatlas.process.db.query_targeting.scanned_per_returned", - "mongodbatlas.process.db.storage", - "mongodbatlas.process.fts.cpu.usage", - "mongodbatlas.process.global_lock", - "mongodbatlas.process.index.btree_miss_ratio", - "mongodbatlas.process.index.counters", - "mongodbatlas.process.journaling.commits", - "mongodbatlas.process.journaling.data_files", - "mongodbatlas.process.journaling.written", - "mongodbatlas.process.memory.usage", - "mongodbatlas.process.network.io", - "mongodbatlas.process.network.requests", - "mongodbatlas.process.oplog.rate", - "mongodbatlas.process.oplog.time", - "mongodbatlas.process.page_faults", - "mongodbatlas.process.restarts", - "mongodbatlas.process.tickets", - "mongodbatlas.system.cpu.normalized.usage.average", - "mongodbatlas.system.cpu.normalized.usage.max", - "mongodbatlas.system.cpu.usage.average", - "mongodbatlas.system.cpu.usage.max", - "mongodbatlas.system.fts.cpu.normalized.usage", - "mongodbatlas.system.fts.cpu.usage", - "mongodbatlas.system.fts.disk.used", - "mongodbatlas.system.fts.memory.usage", - "mongodbatlas.system.memory.usage.average", - "mongodbatlas.system.memory.usage.max", - "mongodbatlas.system.network.io.average", - "mongodbatlas.system.network.io.max", - "mongodbatlas.system.paging.io.average", - "mongodbatlas.system.paging.io.max", - "mongodbatlas.system.paging.usage.average", - "mongodbatlas.system.paging.usage.max", - } -} - -var metricsByName = map[string]MetricIntf{ - "mongodbatlas.db.counts": Metrics.MongodbatlasDbCounts, - "mongodbatlas.db.size": Metrics.MongodbatlasDbSize, - "mongodbatlas.disk.partition.iops.average": Metrics.MongodbatlasDiskPartitionIopsAverage, - "mongodbatlas.disk.partition.iops.max": Metrics.MongodbatlasDiskPartitionIopsMax, - "mongodbatlas.disk.partition.latency.average": Metrics.MongodbatlasDiskPartitionLatencyAverage, - "mongodbatlas.disk.partition.latency.max": Metrics.MongodbatlasDiskPartitionLatencyMax, - "mongodbatlas.disk.partition.space.average": Metrics.MongodbatlasDiskPartitionSpaceAverage, - "mongodbatlas.disk.partition.space.max": Metrics.MongodbatlasDiskPartitionSpaceMax, - "mongodbatlas.disk.partition.usage.average": Metrics.MongodbatlasDiskPartitionUsageAverage, - "mongodbatlas.disk.partition.usage.max": Metrics.MongodbatlasDiskPartitionUsageMax, - "mongodbatlas.disk.partition.utilization.average": Metrics.MongodbatlasDiskPartitionUtilizationAverage, - "mongodbatlas.disk.partition.utilization.max": Metrics.MongodbatlasDiskPartitionUtilizationMax, - "mongodbatlas.process.asserts": Metrics.MongodbatlasProcessAsserts, - "mongodbatlas.process.background_flush": Metrics.MongodbatlasProcessBackgroundFlush, - "mongodbatlas.process.cache.io": Metrics.MongodbatlasProcessCacheIo, - "mongodbatlas.process.cache.size": Metrics.MongodbatlasProcessCacheSize, - "mongodbatlas.process.connections": Metrics.MongodbatlasProcessConnections, - "mongodbatlas.process.cpu.children.normalized.usage.average": Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageAverage, - "mongodbatlas.process.cpu.children.normalized.usage.max": Metrics.MongodbatlasProcessCPUChildrenNormalizedUsageMax, - "mongodbatlas.process.cpu.children.usage.average": Metrics.MongodbatlasProcessCPUChildrenUsageAverage, - "mongodbatlas.process.cpu.children.usage.max": Metrics.MongodbatlasProcessCPUChildrenUsageMax, - "mongodbatlas.process.cpu.normalized.usage.average": Metrics.MongodbatlasProcessCPUNormalizedUsageAverage, - "mongodbatlas.process.cpu.normalized.usage.max": Metrics.MongodbatlasProcessCPUNormalizedUsageMax, - "mongodbatlas.process.cpu.usage.average": Metrics.MongodbatlasProcessCPUUsageAverage, - "mongodbatlas.process.cpu.usage.max": Metrics.MongodbatlasProcessCPUUsageMax, - "mongodbatlas.process.cursors": Metrics.MongodbatlasProcessCursors, - "mongodbatlas.process.db.document.rate": Metrics.MongodbatlasProcessDbDocumentRate, - "mongodbatlas.process.db.operations.rate": Metrics.MongodbatlasProcessDbOperationsRate, - "mongodbatlas.process.db.operations.time": Metrics.MongodbatlasProcessDbOperationsTime, - "mongodbatlas.process.db.query_executor.scanned": Metrics.MongodbatlasProcessDbQueryExecutorScanned, - "mongodbatlas.process.db.query_targeting.scanned_per_returned": Metrics.MongodbatlasProcessDbQueryTargetingScannedPerReturned, - "mongodbatlas.process.db.storage": Metrics.MongodbatlasProcessDbStorage, - "mongodbatlas.process.fts.cpu.usage": Metrics.MongodbatlasProcessFtsCPUUsage, - "mongodbatlas.process.global_lock": Metrics.MongodbatlasProcessGlobalLock, - "mongodbatlas.process.index.btree_miss_ratio": Metrics.MongodbatlasProcessIndexBtreeMissRatio, - "mongodbatlas.process.index.counters": Metrics.MongodbatlasProcessIndexCounters, - "mongodbatlas.process.journaling.commits": Metrics.MongodbatlasProcessJournalingCommits, - "mongodbatlas.process.journaling.data_files": Metrics.MongodbatlasProcessJournalingDataFiles, - "mongodbatlas.process.journaling.written": Metrics.MongodbatlasProcessJournalingWritten, - "mongodbatlas.process.memory.usage": Metrics.MongodbatlasProcessMemoryUsage, - "mongodbatlas.process.network.io": Metrics.MongodbatlasProcessNetworkIo, - "mongodbatlas.process.network.requests": Metrics.MongodbatlasProcessNetworkRequests, - "mongodbatlas.process.oplog.rate": Metrics.MongodbatlasProcessOplogRate, - "mongodbatlas.process.oplog.time": Metrics.MongodbatlasProcessOplogTime, - "mongodbatlas.process.page_faults": Metrics.MongodbatlasProcessPageFaults, - "mongodbatlas.process.restarts": Metrics.MongodbatlasProcessRestarts, - "mongodbatlas.process.tickets": Metrics.MongodbatlasProcessTickets, - "mongodbatlas.system.cpu.normalized.usage.average": Metrics.MongodbatlasSystemCPUNormalizedUsageAverage, - "mongodbatlas.system.cpu.normalized.usage.max": Metrics.MongodbatlasSystemCPUNormalizedUsageMax, - "mongodbatlas.system.cpu.usage.average": Metrics.MongodbatlasSystemCPUUsageAverage, - "mongodbatlas.system.cpu.usage.max": Metrics.MongodbatlasSystemCPUUsageMax, - "mongodbatlas.system.fts.cpu.normalized.usage": Metrics.MongodbatlasSystemFtsCPUNormalizedUsage, - "mongodbatlas.system.fts.cpu.usage": Metrics.MongodbatlasSystemFtsCPUUsage, - "mongodbatlas.system.fts.disk.used": Metrics.MongodbatlasSystemFtsDiskUsed, - "mongodbatlas.system.fts.memory.usage": Metrics.MongodbatlasSystemFtsMemoryUsage, - "mongodbatlas.system.memory.usage.average": Metrics.MongodbatlasSystemMemoryUsageAverage, - "mongodbatlas.system.memory.usage.max": Metrics.MongodbatlasSystemMemoryUsageMax, - "mongodbatlas.system.network.io.average": Metrics.MongodbatlasSystemNetworkIoAverage, - "mongodbatlas.system.network.io.max": Metrics.MongodbatlasSystemNetworkIoMax, - "mongodbatlas.system.paging.io.average": Metrics.MongodbatlasSystemPagingIoAverage, - "mongodbatlas.system.paging.io.max": Metrics.MongodbatlasSystemPagingIoMax, - "mongodbatlas.system.paging.usage.average": Metrics.MongodbatlasSystemPagingUsageAverage, - "mongodbatlas.system.paging.usage.max": Metrics.MongodbatlasSystemPagingUsageMax, -} - -func (m *metricStruct) ByName(n string) MetricIntf { - return metricsByName[n] -} - -// Metrics contains a set of methods for each metric that help with -// manipulating those metrics. -var Metrics = &metricStruct{ - &metricImpl{ - "mongodbatlas.db.counts", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.db.counts") - metric.SetDescription("Database feature size") - metric.SetUnit("{objects}") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.db.size", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.db.size") - metric.SetDescription("Database feature size") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.iops.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.iops.average") - metric.SetDescription("Disk partition iops") - metric.SetUnit("{ops}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.iops.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.iops.max") - metric.SetDescription("Disk partition iops") - metric.SetUnit("{ops}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.latency.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.latency.average") - metric.SetDescription("Disk partition latency") - metric.SetUnit("ms") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.latency.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.latency.max") - metric.SetDescription("Disk partition latency") - metric.SetUnit("ms") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.space.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.space.average") - metric.SetDescription("Disk partition space") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.space.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.space.max") - metric.SetDescription("Disk partition space") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.usage.average") - metric.SetDescription("Disk partition usage (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.usage.max") - metric.SetDescription("Disk partition usage (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.utilization.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.utilization.average") - metric.SetDescription("Disk partition utilization (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.disk.partition.utilization.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.disk.partition.utilization.max") - metric.SetDescription("Disk partition utilization (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.asserts", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.asserts") - metric.SetDescription("Number of assertions per second") - metric.SetUnit("{assertions}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.background_flush", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.background_flush") - metric.SetDescription("Amount of data flushed in the background") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cache.io", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cache.io") - metric.SetDescription("Cache throughput (per second)") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cache.size", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cache.size") - metric.SetDescription("Cache sizes") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mongodbatlas.process.connections", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.connections") - metric.SetDescription("Number of current connections") - metric.SetUnit("{connections}") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mongodbatlas.process.cpu.children.normalized.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cpu.children.normalized.usage.average") - metric.SetDescription("CPU Usage for child processes, normalized to pct") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cpu.children.normalized.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cpu.children.normalized.usage.max") - metric.SetDescription("CPU Usage for child processes, normalized to pct") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cpu.children.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cpu.children.usage.average") - metric.SetDescription("CPU Usage for child processes (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cpu.children.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cpu.children.usage.max") - metric.SetDescription("CPU Usage for child processes (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cpu.normalized.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cpu.normalized.usage.average") - metric.SetDescription("CPU Usage, normalized to pct") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cpu.normalized.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cpu.normalized.usage.max") - metric.SetDescription("CPU Usage, normalized to pct") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cpu.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cpu.usage.average") - metric.SetDescription("CPU Usage (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cpu.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cpu.usage.max") - metric.SetDescription("CPU Usage (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.cursors", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.cursors") - metric.SetDescription("Number of cursors") - metric.SetUnit("{cursors}") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.db.document.rate", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.db.document.rate") - metric.SetDescription("Document access rates") - metric.SetUnit("{documents}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.db.operations.rate", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.db.operations.rate") - metric.SetDescription("DB Operation Rates") - metric.SetUnit("{operations}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.db.operations.time", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.db.operations.time") - metric.SetDescription("DB Operation Times") - metric.SetUnit("ms") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mongodbatlas.process.db.query_executor.scanned", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.db.query_executor.scanned") - metric.SetDescription("Scanned objects") - metric.SetUnit("{objects}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.db.query_targeting.scanned_per_returned", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.db.query_targeting.scanned_per_returned") - metric.SetDescription("Scanned objects per returned") - metric.SetUnit("{scanned}/{returned}") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.db.storage", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.db.storage") - metric.SetDescription("Storage used by the database") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.fts.cpu.usage", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.fts.cpu.usage") - metric.SetDescription("Full text search CPU (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.global_lock", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.global_lock") - metric.SetDescription("Number and status of locks") - metric.SetUnit("{locks}") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.index.btree_miss_ratio", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.index.btree_miss_ratio") - metric.SetDescription("Index miss ratio (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.index.counters", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.index.counters") - metric.SetDescription("Indexes") - metric.SetUnit("{indexes}") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.journaling.commits", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.journaling.commits") - metric.SetDescription("Journaling commits") - metric.SetUnit("{commits}") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.journaling.data_files", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.journaling.data_files") - metric.SetDescription("Data file sizes") - metric.SetUnit("MiBy") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.journaling.written", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.journaling.written") - metric.SetDescription("Journals written") - metric.SetUnit("MiBy") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.memory.usage", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.memory.usage") - metric.SetDescription("Memory Usage") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.network.io", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.network.io") - metric.SetDescription("Network IO") - metric.SetUnit("By/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.network.requests", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.network.requests") - metric.SetDescription("Network requests") - metric.SetUnit("{requests}") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mongodbatlas.process.oplog.rate", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.oplog.rate") - metric.SetDescription("Execution rate by operation") - metric.SetUnit("GiBy/h") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.oplog.time", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.oplog.time") - metric.SetDescription("Execution time by operation") - metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.page_faults", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.page_faults") - metric.SetDescription("Page faults") - metric.SetUnit("{faults}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.restarts", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.restarts") - metric.SetDescription("Restarts in last hour") - metric.SetUnit("{restarts}/h") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.process.tickets", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.process.tickets") - metric.SetDescription("Tickets") - metric.SetUnit("{tickets}") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.cpu.normalized.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.cpu.normalized.usage.average") - metric.SetDescription("System CPU Normalized to pct") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.cpu.normalized.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.cpu.normalized.usage.max") - metric.SetDescription("System CPU Normalized to pct") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.cpu.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.cpu.usage.average") - metric.SetDescription("System CPU Usage (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.cpu.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.cpu.usage.max") - metric.SetDescription("System CPU Usage (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.fts.cpu.normalized.usage", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.fts.cpu.normalized.usage") - metric.SetDescription("Full text search disk usage (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.fts.cpu.usage", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.fts.cpu.usage") - metric.SetDescription("Full-text search (%)") - metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.fts.disk.used", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.fts.disk.used") - metric.SetDescription("Full text search disk usage") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.fts.memory.usage", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.fts.memory.usage") - metric.SetDescription("Full-text search") - metric.SetUnit("MiBy") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "mongodbatlas.system.memory.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.memory.usage.average") - metric.SetDescription("System Memory Usage") - metric.SetUnit("KiBy") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.memory.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.memory.usage.max") - metric.SetDescription("System Memory Usage") - metric.SetUnit("KiBy") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.network.io.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.network.io.average") - metric.SetDescription("System Network IO") - metric.SetUnit("By/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.network.io.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.network.io.max") - metric.SetDescription("System Network IO") - metric.SetUnit("By/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.paging.io.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.paging.io.average") - metric.SetDescription("Swap IO") - metric.SetUnit("{pages}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.paging.io.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.paging.io.max") - metric.SetDescription("Swap IO") - metric.SetUnit("{pages}/s") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.paging.usage.average", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.paging.usage.average") - metric.SetDescription("Swap usage") - metric.SetUnit("KiBy") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, - &metricImpl{ - "mongodbatlas.system.paging.usage.max", - func(metric pdata.Metric) { - metric.SetName("mongodbatlas.system.paging.usage.max") - metric.SetDescription("Swap usage") - metric.SetUnit("KiBy") - metric.SetDataType(pdata.MetricDataTypeGauge) - }, - }, -} - -// M contains a set of methods for each metric that help with -// manipulating those metrics. M is an alias for Metrics -var M = Metrics - -// Attributes contains the possible metric attributes that can be used. -var Attributes = struct { - // AssertType (MongoDB assertion type) - AssertType string - // BtreeCounterType (Database index effectiveness) - BtreeCounterType string - // CacheDirection (Whether read into or written from) - CacheDirection string - // CacheStatus (Cache status) - CacheStatus string - // ClusterRole (Whether process is acting as replica or primary) - ClusterRole string - // CPUState (CPU state) - CPUState string - // CursorState (Whether cursor is open or timed out) - CursorState string - // Direction (Network traffic direction) - Direction string - // DiskDirection (Measurement type for disk operation) - DiskDirection string - // DiskStatus (Disk measurement type) - DiskStatus string - // DocumentStatus (Status of documents in the database) - DocumentStatus string - // ExecutionType (Type of command) - ExecutionType string - // GlobalLockState (Which queue is locked) - GlobalLockState string - // MemoryIssueType (Type of memory issue encountered) - MemoryIssueType string - // MemoryState (Memory usage type) - MemoryState string - // ObjectType (MongoDB object type) - ObjectType string - // Operation (Type of database operation) - Operation string - // OplogType (Oplog type) - OplogType string - // ScannedType (Objects or indexes scanned during query) - ScannedType string - // StorageStatus (Views on database size) - StorageStatus string - // TicketType (Type of ticket available) - TicketType string -}{ - "assert_type", - "btree_counter_type", - "cache_direction", - "cache_status", - "cluster_role", - "cpu_state", - "cursor_state", - "direction", - "disk_direction", - "disk_status", - "document_status", - "execution_type", - "global_lock_state", - "memory_issue_type", - "memory_state", - "object_type", - "operation", - "oplog_type", - "scanned_type", - "storage_status", - "ticket_type", -} - -// A is an alias for Attributes. -var A = Attributes - -// AttributeAssertType are the possible values that the attribute "assert_type" can have. -var AttributeAssertType = struct { - Regular string - Warning string - Msg string - User string -}{ - "regular", - "warning", - "msg", - "user", -} - -// AttributeBtreeCounterType are the possible values that the attribute "btree_counter_type" can have. -var AttributeBtreeCounterType = struct { - Accesses string - Hits string - Misses string -}{ - "accesses", - "hits", - "misses", -} - -// AttributeCacheDirection are the possible values that the attribute "cache_direction" can have. -var AttributeCacheDirection = struct { - ReadInto string - WrittenFrom string -}{ - "read_into", - "written_from", -} - -// AttributeCacheStatus are the possible values that the attribute "cache_status" can have. -var AttributeCacheStatus = struct { - Dirty string - Used string -}{ - "dirty", - "used", -} - -// AttributeClusterRole are the possible values that the attribute "cluster_role" can have. -var AttributeClusterRole = struct { - Primary string - Replica string -}{ - "primary", - "replica", -} - -// AttributeCPUState are the possible values that the attribute "cpu_state" can have. -var AttributeCPUState = struct { - Kernel string - User string - Nice string - Iowait string - Irq string - Softirq string - Guest string - Steal string -}{ - "kernel", - "user", - "nice", - "iowait", - "irq", - "softirq", - "guest", - "steal", -} - -// AttributeCursorState are the possible values that the attribute "cursor_state" can have. -var AttributeCursorState = struct { - TimedOut string - Open string -}{ - "timed_out", - "open", -} - -// AttributeDirection are the possible values that the attribute "direction" can have. -var AttributeDirection = struct { - Receive string - Transmit string -}{ - "receive", - "transmit", -} - -// AttributeDiskDirection are the possible values that the attribute "disk_direction" can have. -var AttributeDiskDirection = struct { - Read string - Write string - Total string -}{ - "read", - "write", - "total", -} - -// AttributeDiskStatus are the possible values that the attribute "disk_status" can have. -var AttributeDiskStatus = struct { - Free string - Used string -}{ - "free", - "used", -} - -// AttributeDocumentStatus are the possible values that the attribute "document_status" can have. -var AttributeDocumentStatus = struct { - Returned string - Inserted string - Updated string - Deleted string -}{ - "returned", - "inserted", - "updated", - "deleted", -} - -// AttributeExecutionType are the possible values that the attribute "execution_type" can have. -var AttributeExecutionType = struct { - Reads string - Writes string - Commands string -}{ - "reads", - "writes", - "commands", -} - -// AttributeGlobalLockState are the possible values that the attribute "global_lock_state" can have. -var AttributeGlobalLockState = struct { - CurrentQueueTotal string - CurrentQueueReaders string - CurrentQueueWriters string -}{ - "current_queue_total", - "current_queue_readers", - "current_queue_writers", -} - -// AttributeMemoryIssueType are the possible values that the attribute "memory_issue_type" can have. -var AttributeMemoryIssueType = struct { - ExtraInfo string - GlobalAccessesNotInMemory string - ExceptionsThrown string -}{ - "extra_info", - "global_accesses_not_in_memory", - "exceptions_thrown", -} - -// AttributeMemoryState are the possible values that the attribute "memory_state" can have. -var AttributeMemoryState = struct { - Resident string - Virtual string - Mapped string - Computed string -}{ - "resident", - "virtual", - "mapped", - "computed", -} - -// AttributeObjectType are the possible values that the attribute "object_type" can have. -var AttributeObjectType = struct { - Collection string - Index string - Extent string - Object string - View string - Storage string - Data string -}{ - "collection", - "index", - "extent", - "object", - "view", - "storage", - "data", -} - -// AttributeOperation are the possible values that the attribute "operation" can have. -var AttributeOperation = struct { - Cmd string - Query string - Update string - Delete string - Getmore string - Insert string -}{ - "cmd", - "query", - "update", - "delete", - "getmore", - "insert", -} - -// AttributeOplogType are the possible values that the attribute "oplog_type" can have. -var AttributeOplogType = struct { - SlaveLagMasterTime string - MasterTime string - MasterLagTimeDiff string -}{ - "slave_lag_master_time", - "master_time", - "master_lag_time_diff", -} - -// AttributeScannedType are the possible values that the attribute "scanned_type" can have. -var AttributeScannedType = struct { - IndexItems string - Objects string -}{ - "index_items", - "objects", -} - -// AttributeStorageStatus are the possible values that the attribute "storage_status" can have. -var AttributeStorageStatus = struct { - Total string - DataSize string - IndexSize string - DataSizeWoSystem string -}{ - "total", - "data_size", - "index_size", - "data_size_wo_system", -} - -// AttributeTicketType are the possible values that the attribute "ticket_type" can have. -var AttributeTicketType = struct { - AvailableReads string - AvailableWrites string -}{ - "available_reads", - "available_writes", -} diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go new file mode 100644 index 000000000000..ad6396335861 --- /dev/null +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go @@ -0,0 +1,4473 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/model/pdata" +) + +// MetricSettings provides common settings for a particular metric. +type MetricSettings struct { + Enabled bool `mapstructure:"enabled"` +} + +// MetricsSettings provides settings for mongoatlasreceiver metrics. +type MetricsSettings struct { + MongodbatlasDbCounts MetricSettings `mapstructure:"mongodbatlas.db.counts"` + MongodbatlasDbSize MetricSettings `mapstructure:"mongodbatlas.db.size"` + MongodbatlasDiskPartitionIopsAverage MetricSettings `mapstructure:"mongodbatlas.disk.partition.iops.average"` + MongodbatlasDiskPartitionIopsMax MetricSettings `mapstructure:"mongodbatlas.disk.partition.iops.max"` + MongodbatlasDiskPartitionLatencyAverage MetricSettings `mapstructure:"mongodbatlas.disk.partition.latency.average"` + MongodbatlasDiskPartitionLatencyMax MetricSettings `mapstructure:"mongodbatlas.disk.partition.latency.max"` + MongodbatlasDiskPartitionSpaceAverage MetricSettings `mapstructure:"mongodbatlas.disk.partition.space.average"` + MongodbatlasDiskPartitionSpaceMax MetricSettings `mapstructure:"mongodbatlas.disk.partition.space.max"` + MongodbatlasDiskPartitionUsageAverage MetricSettings `mapstructure:"mongodbatlas.disk.partition.usage.average"` + MongodbatlasDiskPartitionUsageMax MetricSettings `mapstructure:"mongodbatlas.disk.partition.usage.max"` + MongodbatlasDiskPartitionUtilizationAverage MetricSettings `mapstructure:"mongodbatlas.disk.partition.utilization.average"` + MongodbatlasDiskPartitionUtilizationMax MetricSettings `mapstructure:"mongodbatlas.disk.partition.utilization.max"` + MongodbatlasProcessAsserts MetricSettings `mapstructure:"mongodbatlas.process.asserts"` + MongodbatlasProcessBackgroundFlush MetricSettings `mapstructure:"mongodbatlas.process.background_flush"` + MongodbatlasProcessCacheIo MetricSettings `mapstructure:"mongodbatlas.process.cache.io"` + MongodbatlasProcessCacheSize MetricSettings `mapstructure:"mongodbatlas.process.cache.size"` + MongodbatlasProcessConnections MetricSettings `mapstructure:"mongodbatlas.process.connections"` + MongodbatlasProcessCPUChildrenNormalizedUsageAverage MetricSettings `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.average"` + MongodbatlasProcessCPUChildrenNormalizedUsageMax MetricSettings `mapstructure:"mongodbatlas.process.cpu.children.normalized.usage.max"` + MongodbatlasProcessCPUChildrenUsageAverage MetricSettings `mapstructure:"mongodbatlas.process.cpu.children.usage.average"` + MongodbatlasProcessCPUChildrenUsageMax MetricSettings `mapstructure:"mongodbatlas.process.cpu.children.usage.max"` + MongodbatlasProcessCPUNormalizedUsageAverage MetricSettings `mapstructure:"mongodbatlas.process.cpu.normalized.usage.average"` + MongodbatlasProcessCPUNormalizedUsageMax MetricSettings `mapstructure:"mongodbatlas.process.cpu.normalized.usage.max"` + MongodbatlasProcessCPUUsageAverage MetricSettings `mapstructure:"mongodbatlas.process.cpu.usage.average"` + MongodbatlasProcessCPUUsageMax MetricSettings `mapstructure:"mongodbatlas.process.cpu.usage.max"` + MongodbatlasProcessCursors MetricSettings `mapstructure:"mongodbatlas.process.cursors"` + MongodbatlasProcessDbDocumentRate MetricSettings `mapstructure:"mongodbatlas.process.db.document.rate"` + MongodbatlasProcessDbOperationsRate MetricSettings `mapstructure:"mongodbatlas.process.db.operations.rate"` + MongodbatlasProcessDbOperationsTime MetricSettings `mapstructure:"mongodbatlas.process.db.operations.time"` + MongodbatlasProcessDbQueryExecutorScanned MetricSettings `mapstructure:"mongodbatlas.process.db.query_executor.scanned"` + MongodbatlasProcessDbQueryTargetingScannedPerReturned MetricSettings `mapstructure:"mongodbatlas.process.db.query_targeting.scanned_per_returned"` + MongodbatlasProcessDbStorage MetricSettings `mapstructure:"mongodbatlas.process.db.storage"` + MongodbatlasProcessFtsCPUUsage MetricSettings `mapstructure:"mongodbatlas.process.fts.cpu.usage"` + MongodbatlasProcessGlobalLock MetricSettings `mapstructure:"mongodbatlas.process.global_lock"` + MongodbatlasProcessIndexBtreeMissRatio MetricSettings `mapstructure:"mongodbatlas.process.index.btree_miss_ratio"` + MongodbatlasProcessIndexCounters MetricSettings `mapstructure:"mongodbatlas.process.index.counters"` + MongodbatlasProcessJournalingCommits MetricSettings `mapstructure:"mongodbatlas.process.journaling.commits"` + MongodbatlasProcessJournalingDataFiles MetricSettings `mapstructure:"mongodbatlas.process.journaling.data_files"` + MongodbatlasProcessJournalingWritten MetricSettings `mapstructure:"mongodbatlas.process.journaling.written"` + MongodbatlasProcessMemoryUsage MetricSettings `mapstructure:"mongodbatlas.process.memory.usage"` + MongodbatlasProcessNetworkIo MetricSettings `mapstructure:"mongodbatlas.process.network.io"` + MongodbatlasProcessNetworkRequests MetricSettings `mapstructure:"mongodbatlas.process.network.requests"` + MongodbatlasProcessOplogRate MetricSettings `mapstructure:"mongodbatlas.process.oplog.rate"` + MongodbatlasProcessOplogTime MetricSettings `mapstructure:"mongodbatlas.process.oplog.time"` + MongodbatlasProcessPageFaults MetricSettings `mapstructure:"mongodbatlas.process.page_faults"` + MongodbatlasProcessRestarts MetricSettings `mapstructure:"mongodbatlas.process.restarts"` + MongodbatlasProcessTickets MetricSettings `mapstructure:"mongodbatlas.process.tickets"` + MongodbatlasSystemCPUNormalizedUsageAverage MetricSettings `mapstructure:"mongodbatlas.system.cpu.normalized.usage.average"` + MongodbatlasSystemCPUNormalizedUsageMax MetricSettings `mapstructure:"mongodbatlas.system.cpu.normalized.usage.max"` + MongodbatlasSystemCPUUsageAverage MetricSettings `mapstructure:"mongodbatlas.system.cpu.usage.average"` + MongodbatlasSystemCPUUsageMax MetricSettings `mapstructure:"mongodbatlas.system.cpu.usage.max"` + MongodbatlasSystemFtsCPUNormalizedUsage MetricSettings `mapstructure:"mongodbatlas.system.fts.cpu.normalized.usage"` + MongodbatlasSystemFtsCPUUsage MetricSettings `mapstructure:"mongodbatlas.system.fts.cpu.usage"` + MongodbatlasSystemFtsDiskUsed MetricSettings `mapstructure:"mongodbatlas.system.fts.disk.used"` + MongodbatlasSystemFtsMemoryUsage MetricSettings `mapstructure:"mongodbatlas.system.fts.memory.usage"` + MongodbatlasSystemMemoryUsageAverage MetricSettings `mapstructure:"mongodbatlas.system.memory.usage.average"` + MongodbatlasSystemMemoryUsageMax MetricSettings `mapstructure:"mongodbatlas.system.memory.usage.max"` + MongodbatlasSystemNetworkIoAverage MetricSettings `mapstructure:"mongodbatlas.system.network.io.average"` + MongodbatlasSystemNetworkIoMax MetricSettings `mapstructure:"mongodbatlas.system.network.io.max"` + MongodbatlasSystemPagingIoAverage MetricSettings `mapstructure:"mongodbatlas.system.paging.io.average"` + MongodbatlasSystemPagingIoMax MetricSettings `mapstructure:"mongodbatlas.system.paging.io.max"` + MongodbatlasSystemPagingUsageAverage MetricSettings `mapstructure:"mongodbatlas.system.paging.usage.average"` + MongodbatlasSystemPagingUsageMax MetricSettings `mapstructure:"mongodbatlas.system.paging.usage.max"` +} + +func DefaultMetricsSettings() MetricsSettings { + return MetricsSettings{ + MongodbatlasDbCounts: MetricSettings{ + Enabled: true, + }, + MongodbatlasDbSize: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionIopsAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionIopsMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionLatencyAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionLatencyMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionSpaceAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionSpaceMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionUsageMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionUtilizationAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasDiskPartitionUtilizationMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessAsserts: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessBackgroundFlush: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCacheIo: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCacheSize: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessConnections: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCPUChildrenNormalizedUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCPUChildrenNormalizedUsageMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCPUChildrenUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCPUChildrenUsageMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCPUNormalizedUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCPUNormalizedUsageMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCPUUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCPUUsageMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessCursors: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessDbDocumentRate: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessDbOperationsRate: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessDbOperationsTime: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessDbQueryExecutorScanned: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessDbQueryTargetingScannedPerReturned: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessDbStorage: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessFtsCPUUsage: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessGlobalLock: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessIndexBtreeMissRatio: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessIndexCounters: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessJournalingCommits: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessJournalingDataFiles: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessJournalingWritten: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessMemoryUsage: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessNetworkIo: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessNetworkRequests: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessOplogRate: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessOplogTime: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessPageFaults: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessRestarts: MetricSettings{ + Enabled: true, + }, + MongodbatlasProcessTickets: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemCPUNormalizedUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemCPUNormalizedUsageMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemCPUUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemCPUUsageMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemFtsCPUNormalizedUsage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemFtsCPUUsage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemFtsDiskUsed: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemFtsMemoryUsage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemMemoryUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemMemoryUsageMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemNetworkIoAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemNetworkIoMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemPagingIoAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemPagingIoMax: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemPagingUsageAverage: MetricSettings{ + Enabled: true, + }, + MongodbatlasSystemPagingUsageMax: MetricSettings{ + Enabled: true, + }, + } +} + +type metricMongodbatlasDbCounts struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.db.counts metric with initial data. +func (m *metricMongodbatlasDbCounts) init() { + m.data.SetName("mongodbatlas.db.counts") + m.data.SetDescription("Database feature size") + m.data.SetUnit("{objects}") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDbCounts) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, objectTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.ObjectType, pdata.NewValueString(objectTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDbCounts) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDbCounts) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDbCounts(settings MetricSettings) metricMongodbatlasDbCounts { + m := metricMongodbatlasDbCounts{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDbSize struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.db.size metric with initial data. +func (m *metricMongodbatlasDbSize) init() { + m.data.SetName("mongodbatlas.db.size") + m.data.SetDescription("Database feature size") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDbSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, objectTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.ObjectType, pdata.NewValueString(objectTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDbSize) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDbSize) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDbSize(settings MetricSettings) metricMongodbatlasDbSize { + m := metricMongodbatlasDbSize{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionIopsAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.iops.average metric with initial data. +func (m *metricMongodbatlasDiskPartitionIopsAverage) init() { + m.data.SetName("mongodbatlas.disk.partition.iops.average") + m.data.SetDescription("Disk partition iops") + m.data.SetUnit("{ops}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionIopsAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskDirection, pdata.NewValueString(diskDirectionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionIopsAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionIopsAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionIopsAverage(settings MetricSettings) metricMongodbatlasDiskPartitionIopsAverage { + m := metricMongodbatlasDiskPartitionIopsAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionIopsMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.iops.max metric with initial data. +func (m *metricMongodbatlasDiskPartitionIopsMax) init() { + m.data.SetName("mongodbatlas.disk.partition.iops.max") + m.data.SetDescription("Disk partition iops") + m.data.SetUnit("{ops}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionIopsMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskDirection, pdata.NewValueString(diskDirectionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionIopsMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionIopsMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionIopsMax(settings MetricSettings) metricMongodbatlasDiskPartitionIopsMax { + m := metricMongodbatlasDiskPartitionIopsMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionLatencyAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.latency.average metric with initial data. +func (m *metricMongodbatlasDiskPartitionLatencyAverage) init() { + m.data.SetName("mongodbatlas.disk.partition.latency.average") + m.data.SetDescription("Disk partition latency") + m.data.SetUnit("ms") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionLatencyAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskDirection, pdata.NewValueString(diskDirectionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionLatencyAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionLatencyAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionLatencyAverage(settings MetricSettings) metricMongodbatlasDiskPartitionLatencyAverage { + m := metricMongodbatlasDiskPartitionLatencyAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionLatencyMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.latency.max metric with initial data. +func (m *metricMongodbatlasDiskPartitionLatencyMax) init() { + m.data.SetName("mongodbatlas.disk.partition.latency.max") + m.data.SetDescription("Disk partition latency") + m.data.SetUnit("ms") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionLatencyMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskDirection, pdata.NewValueString(diskDirectionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionLatencyMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionLatencyMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionLatencyMax(settings MetricSettings) metricMongodbatlasDiskPartitionLatencyMax { + m := metricMongodbatlasDiskPartitionLatencyMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionSpaceAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.space.average metric with initial data. +func (m *metricMongodbatlasDiskPartitionSpaceAverage) init() { + m.data.SetName("mongodbatlas.disk.partition.space.average") + m.data.SetDescription("Disk partition space") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionSpaceAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionSpaceAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionSpaceAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionSpaceAverage(settings MetricSettings) metricMongodbatlasDiskPartitionSpaceAverage { + m := metricMongodbatlasDiskPartitionSpaceAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionSpaceMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.space.max metric with initial data. +func (m *metricMongodbatlasDiskPartitionSpaceMax) init() { + m.data.SetName("mongodbatlas.disk.partition.space.max") + m.data.SetDescription("Disk partition space") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionSpaceMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionSpaceMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionSpaceMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionSpaceMax(settings MetricSettings) metricMongodbatlasDiskPartitionSpaceMax { + m := metricMongodbatlasDiskPartitionSpaceMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.usage.average metric with initial data. +func (m *metricMongodbatlasDiskPartitionUsageAverage) init() { + m.data.SetName("mongodbatlas.disk.partition.usage.average") + m.data.SetDescription("Disk partition usage (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionUsageAverage(settings MetricSettings) metricMongodbatlasDiskPartitionUsageAverage { + m := metricMongodbatlasDiskPartitionUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.usage.max metric with initial data. +func (m *metricMongodbatlasDiskPartitionUsageMax) init() { + m.data.SetName("mongodbatlas.disk.partition.usage.max") + m.data.SetDescription("Disk partition usage (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionUsageMax(settings MetricSettings) metricMongodbatlasDiskPartitionUsageMax { + m := metricMongodbatlasDiskPartitionUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionUtilizationAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.utilization.average metric with initial data. +func (m *metricMongodbatlasDiskPartitionUtilizationAverage) init() { + m.data.SetName("mongodbatlas.disk.partition.utilization.average") + m.data.SetDescription("Disk partition utilization (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionUtilizationAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionUtilizationAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionUtilizationAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionUtilizationAverage(settings MetricSettings) metricMongodbatlasDiskPartitionUtilizationAverage { + m := metricMongodbatlasDiskPartitionUtilizationAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasDiskPartitionUtilizationMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.disk.partition.utilization.max metric with initial data. +func (m *metricMongodbatlasDiskPartitionUtilizationMax) init() { + m.data.SetName("mongodbatlas.disk.partition.utilization.max") + m.data.SetDescription("Disk partition utilization (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasDiskPartitionUtilizationMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasDiskPartitionUtilizationMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasDiskPartitionUtilizationMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasDiskPartitionUtilizationMax(settings MetricSettings) metricMongodbatlasDiskPartitionUtilizationMax { + m := metricMongodbatlasDiskPartitionUtilizationMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessAsserts struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.asserts metric with initial data. +func (m *metricMongodbatlasProcessAsserts) init() { + m.data.SetName("mongodbatlas.process.asserts") + m.data.SetDescription("Number of assertions per second") + m.data.SetUnit("{assertions}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessAsserts) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, assertTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.AssertType, pdata.NewValueString(assertTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessAsserts) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessAsserts) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessAsserts(settings MetricSettings) metricMongodbatlasProcessAsserts { + m := metricMongodbatlasProcessAsserts{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessBackgroundFlush struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.background_flush metric with initial data. +func (m *metricMongodbatlasProcessBackgroundFlush) init() { + m.data.SetName("mongodbatlas.process.background_flush") + m.data.SetDescription("Amount of data flushed in the background") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) +} + +func (m *metricMongodbatlasProcessBackgroundFlush) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessBackgroundFlush) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessBackgroundFlush) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessBackgroundFlush(settings MetricSettings) metricMongodbatlasProcessBackgroundFlush { + m := metricMongodbatlasProcessBackgroundFlush{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCacheIo struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cache.io metric with initial data. +func (m *metricMongodbatlasProcessCacheIo) init() { + m.data.SetName("mongodbatlas.process.cache.io") + m.data.SetDescription("Cache throughput (per second)") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCacheIo) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cacheDirectionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CacheDirection, pdata.NewValueString(cacheDirectionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCacheIo) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCacheIo) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCacheIo(settings MetricSettings) metricMongodbatlasProcessCacheIo { + m := metricMongodbatlasProcessCacheIo{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCacheSize struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cache.size metric with initial data. +func (m *metricMongodbatlasProcessCacheSize) init() { + m.data.SetName("mongodbatlas.process.cache.size") + m.data.SetDescription("Cache sizes") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCacheSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cacheStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CacheStatus, pdata.NewValueString(cacheStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCacheSize) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCacheSize) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCacheSize(settings MetricSettings) metricMongodbatlasProcessCacheSize { + m := metricMongodbatlasProcessCacheSize{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessConnections struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.connections metric with initial data. +func (m *metricMongodbatlasProcessConnections) init() { + m.data.SetName("mongodbatlas.process.connections") + m.data.SetDescription("Number of current connections") + m.data.SetUnit("{connections}") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(false) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) +} + +func (m *metricMongodbatlasProcessConnections) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessConnections) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessConnections) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessConnections(settings MetricSettings) metricMongodbatlasProcessConnections { + m := metricMongodbatlasProcessConnections{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cpu.children.normalized.usage.average metric with initial data. +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) init() { + m.data.SetName("mongodbatlas.process.cpu.children.normalized.usage.average") + m.data.SetDescription("CPU Usage for child processes, normalized to pct") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(settings MetricSettings) metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage { + m := metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCPUChildrenNormalizedUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cpu.children.normalized.usage.max metric with initial data. +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) init() { + m.data.SetName("mongodbatlas.process.cpu.children.normalized.usage.max") + m.data.SetDescription("CPU Usage for child processes, normalized to pct") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCPUChildrenNormalizedUsageMax(settings MetricSettings) metricMongodbatlasProcessCPUChildrenNormalizedUsageMax { + m := metricMongodbatlasProcessCPUChildrenNormalizedUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCPUChildrenUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cpu.children.usage.average metric with initial data. +func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) init() { + m.data.SetName("mongodbatlas.process.cpu.children.usage.average") + m.data.SetDescription("CPU Usage for child processes (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCPUChildrenUsageAverage(settings MetricSettings) metricMongodbatlasProcessCPUChildrenUsageAverage { + m := metricMongodbatlasProcessCPUChildrenUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCPUChildrenUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cpu.children.usage.max metric with initial data. +func (m *metricMongodbatlasProcessCPUChildrenUsageMax) init() { + m.data.SetName("mongodbatlas.process.cpu.children.usage.max") + m.data.SetDescription("CPU Usage for child processes (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCPUChildrenUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCPUChildrenUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCPUChildrenUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCPUChildrenUsageMax(settings MetricSettings) metricMongodbatlasProcessCPUChildrenUsageMax { + m := metricMongodbatlasProcessCPUChildrenUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCPUNormalizedUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cpu.normalized.usage.average metric with initial data. +func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) init() { + m.data.SetName("mongodbatlas.process.cpu.normalized.usage.average") + m.data.SetDescription("CPU Usage, normalized to pct") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCPUNormalizedUsageAverage(settings MetricSettings) metricMongodbatlasProcessCPUNormalizedUsageAverage { + m := metricMongodbatlasProcessCPUNormalizedUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCPUNormalizedUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cpu.normalized.usage.max metric with initial data. +func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) init() { + m.data.SetName("mongodbatlas.process.cpu.normalized.usage.max") + m.data.SetDescription("CPU Usage, normalized to pct") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCPUNormalizedUsageMax(settings MetricSettings) metricMongodbatlasProcessCPUNormalizedUsageMax { + m := metricMongodbatlasProcessCPUNormalizedUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCPUUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cpu.usage.average metric with initial data. +func (m *metricMongodbatlasProcessCPUUsageAverage) init() { + m.data.SetName("mongodbatlas.process.cpu.usage.average") + m.data.SetDescription("CPU Usage (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCPUUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCPUUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCPUUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCPUUsageAverage(settings MetricSettings) metricMongodbatlasProcessCPUUsageAverage { + m := metricMongodbatlasProcessCPUUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCPUUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cpu.usage.max metric with initial data. +func (m *metricMongodbatlasProcessCPUUsageMax) init() { + m.data.SetName("mongodbatlas.process.cpu.usage.max") + m.data.SetDescription("CPU Usage (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCPUUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCPUUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCPUUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCPUUsageMax(settings MetricSettings) metricMongodbatlasProcessCPUUsageMax { + m := metricMongodbatlasProcessCPUUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessCursors struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.cursors metric with initial data. +func (m *metricMongodbatlasProcessCursors) init() { + m.data.SetName("mongodbatlas.process.cursors") + m.data.SetDescription("Number of cursors") + m.data.SetUnit("{cursors}") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessCursors) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cursorStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CursorState, pdata.NewValueString(cursorStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessCursors) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessCursors) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessCursors(settings MetricSettings) metricMongodbatlasProcessCursors { + m := metricMongodbatlasProcessCursors{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessDbDocumentRate struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.db.document.rate metric with initial data. +func (m *metricMongodbatlasProcessDbDocumentRate) init() { + m.data.SetName("mongodbatlas.process.db.document.rate") + m.data.SetDescription("Document access rates") + m.data.SetUnit("{documents}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessDbDocumentRate) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, documentStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.DocumentStatus, pdata.NewValueString(documentStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessDbDocumentRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessDbDocumentRate) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessDbDocumentRate(settings MetricSettings) metricMongodbatlasProcessDbDocumentRate { + m := metricMongodbatlasProcessDbDocumentRate{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessDbOperationsRate struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.db.operations.rate metric with initial data. +func (m *metricMongodbatlasProcessDbOperationsRate) init() { + m.data.SetName("mongodbatlas.process.db.operations.rate") + m.data.SetDescription("DB Operation Rates") + m.data.SetUnit("{operations}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessDbOperationsRate) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) + dp.Attributes().Insert(A.ClusterRole, pdata.NewValueString(clusterRoleAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessDbOperationsRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessDbOperationsRate) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessDbOperationsRate(settings MetricSettings) metricMongodbatlasProcessDbOperationsRate { + m := metricMongodbatlasProcessDbOperationsRate{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessDbOperationsTime struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.db.operations.time metric with initial data. +func (m *metricMongodbatlasProcessDbOperationsTime) init() { + m.data.SetName("mongodbatlas.process.db.operations.time") + m.data.SetDescription("DB Operation Times") + m.data.SetUnit("ms") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessDbOperationsTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, executionTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.ExecutionType, pdata.NewValueString(executionTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessDbOperationsTime) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessDbOperationsTime) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessDbOperationsTime(settings MetricSettings) metricMongodbatlasProcessDbOperationsTime { + m := metricMongodbatlasProcessDbOperationsTime{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessDbQueryExecutorScanned struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.db.query_executor.scanned metric with initial data. +func (m *metricMongodbatlasProcessDbQueryExecutorScanned) init() { + m.data.SetName("mongodbatlas.process.db.query_executor.scanned") + m.data.SetDescription("Scanned objects") + m.data.SetUnit("{objects}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessDbQueryExecutorScanned) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, scannedTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.ScannedType, pdata.NewValueString(scannedTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessDbQueryExecutorScanned) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessDbQueryExecutorScanned) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessDbQueryExecutorScanned(settings MetricSettings) metricMongodbatlasProcessDbQueryExecutorScanned { + m := metricMongodbatlasProcessDbQueryExecutorScanned{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessDbQueryTargetingScannedPerReturned struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.db.query_targeting.scanned_per_returned metric with initial data. +func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) init() { + m.data.SetName("mongodbatlas.process.db.query_targeting.scanned_per_returned") + m.data.SetDescription("Scanned objects per returned") + m.data.SetUnit("{scanned}/{returned}") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, scannedTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.ScannedType, pdata.NewValueString(scannedTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessDbQueryTargetingScannedPerReturned(settings MetricSettings) metricMongodbatlasProcessDbQueryTargetingScannedPerReturned { + m := metricMongodbatlasProcessDbQueryTargetingScannedPerReturned{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessDbStorage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.db.storage metric with initial data. +func (m *metricMongodbatlasProcessDbStorage) init() { + m.data.SetName("mongodbatlas.process.db.storage") + m.data.SetDescription("Storage used by the database") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessDbStorage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, storageStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.StorageStatus, pdata.NewValueString(storageStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessDbStorage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessDbStorage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessDbStorage(settings MetricSettings) metricMongodbatlasProcessDbStorage { + m := metricMongodbatlasProcessDbStorage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessFtsCPUUsage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.fts.cpu.usage metric with initial data. +func (m *metricMongodbatlasProcessFtsCPUUsage) init() { + m.data.SetName("mongodbatlas.process.fts.cpu.usage") + m.data.SetDescription("Full text search CPU (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessFtsCPUUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessFtsCPUUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessFtsCPUUsage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessFtsCPUUsage(settings MetricSettings) metricMongodbatlasProcessFtsCPUUsage { + m := metricMongodbatlasProcessFtsCPUUsage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessGlobalLock struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.global_lock metric with initial data. +func (m *metricMongodbatlasProcessGlobalLock) init() { + m.data.SetName("mongodbatlas.process.global_lock") + m.data.SetDescription("Number and status of locks") + m.data.SetUnit("{locks}") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessGlobalLock) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, globalLockStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.GlobalLockState, pdata.NewValueString(globalLockStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessGlobalLock) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessGlobalLock) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessGlobalLock(settings MetricSettings) metricMongodbatlasProcessGlobalLock { + m := metricMongodbatlasProcessGlobalLock{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessIndexBtreeMissRatio struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.index.btree_miss_ratio metric with initial data. +func (m *metricMongodbatlasProcessIndexBtreeMissRatio) init() { + m.data.SetName("mongodbatlas.process.index.btree_miss_ratio") + m.data.SetDescription("Index miss ratio (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) +} + +func (m *metricMongodbatlasProcessIndexBtreeMissRatio) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessIndexBtreeMissRatio) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessIndexBtreeMissRatio) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessIndexBtreeMissRatio(settings MetricSettings) metricMongodbatlasProcessIndexBtreeMissRatio { + m := metricMongodbatlasProcessIndexBtreeMissRatio{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessIndexCounters struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.index.counters metric with initial data. +func (m *metricMongodbatlasProcessIndexCounters) init() { + m.data.SetName("mongodbatlas.process.index.counters") + m.data.SetDescription("Indexes") + m.data.SetUnit("{indexes}") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessIndexCounters) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, btreeCounterTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.BtreeCounterType, pdata.NewValueString(btreeCounterTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessIndexCounters) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessIndexCounters) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessIndexCounters(settings MetricSettings) metricMongodbatlasProcessIndexCounters { + m := metricMongodbatlasProcessIndexCounters{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessJournalingCommits struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.journaling.commits metric with initial data. +func (m *metricMongodbatlasProcessJournalingCommits) init() { + m.data.SetName("mongodbatlas.process.journaling.commits") + m.data.SetDescription("Journaling commits") + m.data.SetUnit("{commits}") + m.data.SetDataType(pdata.MetricDataTypeGauge) +} + +func (m *metricMongodbatlasProcessJournalingCommits) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessJournalingCommits) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessJournalingCommits) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessJournalingCommits(settings MetricSettings) metricMongodbatlasProcessJournalingCommits { + m := metricMongodbatlasProcessJournalingCommits{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessJournalingDataFiles struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.journaling.data_files metric with initial data. +func (m *metricMongodbatlasProcessJournalingDataFiles) init() { + m.data.SetName("mongodbatlas.process.journaling.data_files") + m.data.SetDescription("Data file sizes") + m.data.SetUnit("MiBy") + m.data.SetDataType(pdata.MetricDataTypeGauge) +} + +func (m *metricMongodbatlasProcessJournalingDataFiles) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessJournalingDataFiles) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessJournalingDataFiles) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessJournalingDataFiles(settings MetricSettings) metricMongodbatlasProcessJournalingDataFiles { + m := metricMongodbatlasProcessJournalingDataFiles{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessJournalingWritten struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.journaling.written metric with initial data. +func (m *metricMongodbatlasProcessJournalingWritten) init() { + m.data.SetName("mongodbatlas.process.journaling.written") + m.data.SetDescription("Journals written") + m.data.SetUnit("MiBy") + m.data.SetDataType(pdata.MetricDataTypeGauge) +} + +func (m *metricMongodbatlasProcessJournalingWritten) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessJournalingWritten) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessJournalingWritten) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessJournalingWritten(settings MetricSettings) metricMongodbatlasProcessJournalingWritten { + m := metricMongodbatlasProcessJournalingWritten{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessMemoryUsage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.memory.usage metric with initial data. +func (m *metricMongodbatlasProcessMemoryUsage) init() { + m.data.SetName("mongodbatlas.process.memory.usage") + m.data.SetDescription("Memory Usage") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.MemoryState, pdata.NewValueString(memoryStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessMemoryUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessMemoryUsage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessMemoryUsage(settings MetricSettings) metricMongodbatlasProcessMemoryUsage { + m := metricMongodbatlasProcessMemoryUsage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessNetworkIo struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.network.io metric with initial data. +func (m *metricMongodbatlasProcessNetworkIo) init() { + m.data.SetName("mongodbatlas.process.network.io") + m.data.SetDescription("Network IO") + m.data.SetUnit("By/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessNetworkIo) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessNetworkIo) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessNetworkIo) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessNetworkIo(settings MetricSettings) metricMongodbatlasProcessNetworkIo { + m := metricMongodbatlasProcessNetworkIo{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessNetworkRequests struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.network.requests metric with initial data. +func (m *metricMongodbatlasProcessNetworkRequests) init() { + m.data.SetName("mongodbatlas.process.network.requests") + m.data.SetDescription("Network requests") + m.data.SetUnit("{requests}") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) +} + +func (m *metricMongodbatlasProcessNetworkRequests) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessNetworkRequests) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessNetworkRequests) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessNetworkRequests(settings MetricSettings) metricMongodbatlasProcessNetworkRequests { + m := metricMongodbatlasProcessNetworkRequests{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessOplogRate struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.oplog.rate metric with initial data. +func (m *metricMongodbatlasProcessOplogRate) init() { + m.data.SetName("mongodbatlas.process.oplog.rate") + m.data.SetDescription("Execution rate by operation") + m.data.SetUnit("GiBy/h") + m.data.SetDataType(pdata.MetricDataTypeGauge) +} + +func (m *metricMongodbatlasProcessOplogRate) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessOplogRate) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessOplogRate) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessOplogRate(settings MetricSettings) metricMongodbatlasProcessOplogRate { + m := metricMongodbatlasProcessOplogRate{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessOplogTime struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.oplog.time metric with initial data. +func (m *metricMongodbatlasProcessOplogTime) init() { + m.data.SetName("mongodbatlas.process.oplog.time") + m.data.SetDescription("Execution time by operation") + m.data.SetUnit("s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessOplogTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, oplogTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.OplogType, pdata.NewValueString(oplogTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessOplogTime) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessOplogTime) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessOplogTime(settings MetricSettings) metricMongodbatlasProcessOplogTime { + m := metricMongodbatlasProcessOplogTime{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessPageFaults struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.page_faults metric with initial data. +func (m *metricMongodbatlasProcessPageFaults) init() { + m.data.SetName("mongodbatlas.process.page_faults") + m.data.SetDescription("Page faults") + m.data.SetUnit("{faults}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessPageFaults) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryIssueTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.MemoryIssueType, pdata.NewValueString(memoryIssueTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessPageFaults) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessPageFaults) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessPageFaults(settings MetricSettings) metricMongodbatlasProcessPageFaults { + m := metricMongodbatlasProcessPageFaults{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessRestarts struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.restarts metric with initial data. +func (m *metricMongodbatlasProcessRestarts) init() { + m.data.SetName("mongodbatlas.process.restarts") + m.data.SetDescription("Restarts in last hour") + m.data.SetUnit("{restarts}/h") + m.data.SetDataType(pdata.MetricDataTypeGauge) +} + +func (m *metricMongodbatlasProcessRestarts) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessRestarts) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessRestarts) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessRestarts(settings MetricSettings) metricMongodbatlasProcessRestarts { + m := metricMongodbatlasProcessRestarts{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasProcessTickets struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.process.tickets metric with initial data. +func (m *metricMongodbatlasProcessTickets) init() { + m.data.SetName("mongodbatlas.process.tickets") + m.data.SetDescription("Tickets") + m.data.SetUnit("{tickets}") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasProcessTickets) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, ticketTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.TicketType, pdata.NewValueString(ticketTypeAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasProcessTickets) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasProcessTickets) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasProcessTickets(settings MetricSettings) metricMongodbatlasProcessTickets { + m := metricMongodbatlasProcessTickets{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemCPUNormalizedUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.cpu.normalized.usage.average metric with initial data. +func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) init() { + m.data.SetName("mongodbatlas.system.cpu.normalized.usage.average") + m.data.SetDescription("System CPU Normalized to pct") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemCPUNormalizedUsageAverage(settings MetricSettings) metricMongodbatlasSystemCPUNormalizedUsageAverage { + m := metricMongodbatlasSystemCPUNormalizedUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemCPUNormalizedUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.cpu.normalized.usage.max metric with initial data. +func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) init() { + m.data.SetName("mongodbatlas.system.cpu.normalized.usage.max") + m.data.SetDescription("System CPU Normalized to pct") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemCPUNormalizedUsageMax(settings MetricSettings) metricMongodbatlasSystemCPUNormalizedUsageMax { + m := metricMongodbatlasSystemCPUNormalizedUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemCPUUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.cpu.usage.average metric with initial data. +func (m *metricMongodbatlasSystemCPUUsageAverage) init() { + m.data.SetName("mongodbatlas.system.cpu.usage.average") + m.data.SetDescription("System CPU Usage (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemCPUUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemCPUUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemCPUUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemCPUUsageAverage(settings MetricSettings) metricMongodbatlasSystemCPUUsageAverage { + m := metricMongodbatlasSystemCPUUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemCPUUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.cpu.usage.max metric with initial data. +func (m *metricMongodbatlasSystemCPUUsageMax) init() { + m.data.SetName("mongodbatlas.system.cpu.usage.max") + m.data.SetDescription("System CPU Usage (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemCPUUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemCPUUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemCPUUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemCPUUsageMax(settings MetricSettings) metricMongodbatlasSystemCPUUsageMax { + m := metricMongodbatlasSystemCPUUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemFtsCPUNormalizedUsage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.fts.cpu.normalized.usage metric with initial data. +func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) init() { + m.data.SetName("mongodbatlas.system.fts.cpu.normalized.usage") + m.data.SetDescription("Full text search disk usage (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemFtsCPUNormalizedUsage(settings MetricSettings) metricMongodbatlasSystemFtsCPUNormalizedUsage { + m := metricMongodbatlasSystemFtsCPUNormalizedUsage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemFtsCPUUsage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.fts.cpu.usage metric with initial data. +func (m *metricMongodbatlasSystemFtsCPUUsage) init() { + m.data.SetName("mongodbatlas.system.fts.cpu.usage") + m.data.SetDescription("Full-text search (%)") + m.data.SetUnit("1") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemFtsCPUUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemFtsCPUUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemFtsCPUUsage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemFtsCPUUsage(settings MetricSettings) metricMongodbatlasSystemFtsCPUUsage { + m := metricMongodbatlasSystemFtsCPUUsage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemFtsDiskUsed struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.fts.disk.used metric with initial data. +func (m *metricMongodbatlasSystemFtsDiskUsed) init() { + m.data.SetName("mongodbatlas.system.fts.disk.used") + m.data.SetDescription("Full text search disk usage") + m.data.SetUnit("By") + m.data.SetDataType(pdata.MetricDataTypeGauge) +} + +func (m *metricMongodbatlasSystemFtsDiskUsed) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemFtsDiskUsed) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemFtsDiskUsed) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemFtsDiskUsed(settings MetricSettings) metricMongodbatlasSystemFtsDiskUsed { + m := metricMongodbatlasSystemFtsDiskUsed{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemFtsMemoryUsage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.fts.memory.usage metric with initial data. +func (m *metricMongodbatlasSystemFtsMemoryUsage) init() { + m.data.SetName("mongodbatlas.system.fts.memory.usage") + m.data.SetDescription("Full-text search") + m.data.SetUnit("MiBy") + m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemFtsMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryStateAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.MemoryState, pdata.NewValueString(memoryStateAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemFtsMemoryUsage) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemFtsMemoryUsage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemFtsMemoryUsage(settings MetricSettings) metricMongodbatlasSystemFtsMemoryUsage { + m := metricMongodbatlasSystemFtsMemoryUsage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemMemoryUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.memory.usage.average metric with initial data. +func (m *metricMongodbatlasSystemMemoryUsageAverage) init() { + m.data.SetName("mongodbatlas.system.memory.usage.average") + m.data.SetDescription("System Memory Usage") + m.data.SetUnit("KiBy") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemMemoryUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.MemoryStatus, pdata.NewValueString(memoryStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemMemoryUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemMemoryUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemMemoryUsageAverage(settings MetricSettings) metricMongodbatlasSystemMemoryUsageAverage { + m := metricMongodbatlasSystemMemoryUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemMemoryUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.memory.usage.max metric with initial data. +func (m *metricMongodbatlasSystemMemoryUsageMax) init() { + m.data.SetName("mongodbatlas.system.memory.usage.max") + m.data.SetDescription("System Memory Usage") + m.data.SetUnit("KiBy") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemMemoryUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryStatusAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.MemoryStatus, pdata.NewValueString(memoryStatusAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemMemoryUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemMemoryUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemMemoryUsageMax(settings MetricSettings) metricMongodbatlasSystemMemoryUsageMax { + m := metricMongodbatlasSystemMemoryUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemNetworkIoAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.network.io.average metric with initial data. +func (m *metricMongodbatlasSystemNetworkIoAverage) init() { + m.data.SetName("mongodbatlas.system.network.io.average") + m.data.SetDescription("System Network IO") + m.data.SetUnit("By/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemNetworkIoAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemNetworkIoAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemNetworkIoAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemNetworkIoAverage(settings MetricSettings) metricMongodbatlasSystemNetworkIoAverage { + m := metricMongodbatlasSystemNetworkIoAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemNetworkIoMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.network.io.max metric with initial data. +func (m *metricMongodbatlasSystemNetworkIoMax) init() { + m.data.SetName("mongodbatlas.system.network.io.max") + m.data.SetDescription("System Network IO") + m.data.SetUnit("By/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemNetworkIoMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemNetworkIoMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemNetworkIoMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemNetworkIoMax(settings MetricSettings) metricMongodbatlasSystemNetworkIoMax { + m := metricMongodbatlasSystemNetworkIoMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemPagingIoAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.paging.io.average metric with initial data. +func (m *metricMongodbatlasSystemPagingIoAverage) init() { + m.data.SetName("mongodbatlas.system.paging.io.average") + m.data.SetDescription("Swap IO") + m.data.SetUnit("{pages}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemPagingIoAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemPagingIoAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemPagingIoAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemPagingIoAverage(settings MetricSettings) metricMongodbatlasSystemPagingIoAverage { + m := metricMongodbatlasSystemPagingIoAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemPagingIoMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.paging.io.max metric with initial data. +func (m *metricMongodbatlasSystemPagingIoMax) init() { + m.data.SetName("mongodbatlas.system.paging.io.max") + m.data.SetDescription("Swap IO") + m.data.SetUnit("{pages}/s") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemPagingIoMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemPagingIoMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemPagingIoMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemPagingIoMax(settings MetricSettings) metricMongodbatlasSystemPagingIoMax { + m := metricMongodbatlasSystemPagingIoMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemPagingUsageAverage struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.paging.usage.average metric with initial data. +func (m *metricMongodbatlasSystemPagingUsageAverage) init() { + m.data.SetName("mongodbatlas.system.paging.usage.average") + m.data.SetDescription("Swap usage") + m.data.SetUnit("KiBy") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemPagingUsageAverage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemPagingUsageAverage) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemPagingUsageAverage(settings MetricSettings) metricMongodbatlasSystemPagingUsageAverage { + m := metricMongodbatlasSystemPagingUsageAverage{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +type metricMongodbatlasSystemPagingUsageMax struct { + data pdata.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodbatlas.system.paging.usage.max metric with initial data. +func (m *metricMongodbatlasSystemPagingUsageMax) init() { + m.data.SetName("mongodbatlas.system.paging.usage.max") + m.data.SetDescription("Swap usage") + m.data.SetUnit("KiBy") + m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbatlasSystemPagingUsageMax) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbatlasSystemPagingUsageMax) emit(metrics pdata.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbatlasSystemPagingUsageMax(settings MetricSettings) metricMongodbatlasSystemPagingUsageMax { + m := metricMongodbatlasSystemPagingUsageMax{settings: settings} + if settings.Enabled { + m.data = pdata.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user settings. +type MetricsBuilder struct { + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + metricMongodbatlasDbCounts metricMongodbatlasDbCounts + metricMongodbatlasDbSize metricMongodbatlasDbSize + metricMongodbatlasDiskPartitionIopsAverage metricMongodbatlasDiskPartitionIopsAverage + metricMongodbatlasDiskPartitionIopsMax metricMongodbatlasDiskPartitionIopsMax + metricMongodbatlasDiskPartitionLatencyAverage metricMongodbatlasDiskPartitionLatencyAverage + metricMongodbatlasDiskPartitionLatencyMax metricMongodbatlasDiskPartitionLatencyMax + metricMongodbatlasDiskPartitionSpaceAverage metricMongodbatlasDiskPartitionSpaceAverage + metricMongodbatlasDiskPartitionSpaceMax metricMongodbatlasDiskPartitionSpaceMax + metricMongodbatlasDiskPartitionUsageAverage metricMongodbatlasDiskPartitionUsageAverage + metricMongodbatlasDiskPartitionUsageMax metricMongodbatlasDiskPartitionUsageMax + metricMongodbatlasDiskPartitionUtilizationAverage metricMongodbatlasDiskPartitionUtilizationAverage + metricMongodbatlasDiskPartitionUtilizationMax metricMongodbatlasDiskPartitionUtilizationMax + metricMongodbatlasProcessAsserts metricMongodbatlasProcessAsserts + metricMongodbatlasProcessBackgroundFlush metricMongodbatlasProcessBackgroundFlush + metricMongodbatlasProcessCacheIo metricMongodbatlasProcessCacheIo + metricMongodbatlasProcessCacheSize metricMongodbatlasProcessCacheSize + metricMongodbatlasProcessConnections metricMongodbatlasProcessConnections + metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage + metricMongodbatlasProcessCPUChildrenNormalizedUsageMax metricMongodbatlasProcessCPUChildrenNormalizedUsageMax + metricMongodbatlasProcessCPUChildrenUsageAverage metricMongodbatlasProcessCPUChildrenUsageAverage + metricMongodbatlasProcessCPUChildrenUsageMax metricMongodbatlasProcessCPUChildrenUsageMax + metricMongodbatlasProcessCPUNormalizedUsageAverage metricMongodbatlasProcessCPUNormalizedUsageAverage + metricMongodbatlasProcessCPUNormalizedUsageMax metricMongodbatlasProcessCPUNormalizedUsageMax + metricMongodbatlasProcessCPUUsageAverage metricMongodbatlasProcessCPUUsageAverage + metricMongodbatlasProcessCPUUsageMax metricMongodbatlasProcessCPUUsageMax + metricMongodbatlasProcessCursors metricMongodbatlasProcessCursors + metricMongodbatlasProcessDbDocumentRate metricMongodbatlasProcessDbDocumentRate + metricMongodbatlasProcessDbOperationsRate metricMongodbatlasProcessDbOperationsRate + metricMongodbatlasProcessDbOperationsTime metricMongodbatlasProcessDbOperationsTime + metricMongodbatlasProcessDbQueryExecutorScanned metricMongodbatlasProcessDbQueryExecutorScanned + metricMongodbatlasProcessDbQueryTargetingScannedPerReturned metricMongodbatlasProcessDbQueryTargetingScannedPerReturned + metricMongodbatlasProcessDbStorage metricMongodbatlasProcessDbStorage + metricMongodbatlasProcessFtsCPUUsage metricMongodbatlasProcessFtsCPUUsage + metricMongodbatlasProcessGlobalLock metricMongodbatlasProcessGlobalLock + metricMongodbatlasProcessIndexBtreeMissRatio metricMongodbatlasProcessIndexBtreeMissRatio + metricMongodbatlasProcessIndexCounters metricMongodbatlasProcessIndexCounters + metricMongodbatlasProcessJournalingCommits metricMongodbatlasProcessJournalingCommits + metricMongodbatlasProcessJournalingDataFiles metricMongodbatlasProcessJournalingDataFiles + metricMongodbatlasProcessJournalingWritten metricMongodbatlasProcessJournalingWritten + metricMongodbatlasProcessMemoryUsage metricMongodbatlasProcessMemoryUsage + metricMongodbatlasProcessNetworkIo metricMongodbatlasProcessNetworkIo + metricMongodbatlasProcessNetworkRequests metricMongodbatlasProcessNetworkRequests + metricMongodbatlasProcessOplogRate metricMongodbatlasProcessOplogRate + metricMongodbatlasProcessOplogTime metricMongodbatlasProcessOplogTime + metricMongodbatlasProcessPageFaults metricMongodbatlasProcessPageFaults + metricMongodbatlasProcessRestarts metricMongodbatlasProcessRestarts + metricMongodbatlasProcessTickets metricMongodbatlasProcessTickets + metricMongodbatlasSystemCPUNormalizedUsageAverage metricMongodbatlasSystemCPUNormalizedUsageAverage + metricMongodbatlasSystemCPUNormalizedUsageMax metricMongodbatlasSystemCPUNormalizedUsageMax + metricMongodbatlasSystemCPUUsageAverage metricMongodbatlasSystemCPUUsageAverage + metricMongodbatlasSystemCPUUsageMax metricMongodbatlasSystemCPUUsageMax + metricMongodbatlasSystemFtsCPUNormalizedUsage metricMongodbatlasSystemFtsCPUNormalizedUsage + metricMongodbatlasSystemFtsCPUUsage metricMongodbatlasSystemFtsCPUUsage + metricMongodbatlasSystemFtsDiskUsed metricMongodbatlasSystemFtsDiskUsed + metricMongodbatlasSystemFtsMemoryUsage metricMongodbatlasSystemFtsMemoryUsage + metricMongodbatlasSystemMemoryUsageAverage metricMongodbatlasSystemMemoryUsageAverage + metricMongodbatlasSystemMemoryUsageMax metricMongodbatlasSystemMemoryUsageMax + metricMongodbatlasSystemNetworkIoAverage metricMongodbatlasSystemNetworkIoAverage + metricMongodbatlasSystemNetworkIoMax metricMongodbatlasSystemNetworkIoMax + metricMongodbatlasSystemPagingIoAverage metricMongodbatlasSystemPagingIoAverage + metricMongodbatlasSystemPagingIoMax metricMongodbatlasSystemPagingIoMax + metricMongodbatlasSystemPagingUsageAverage metricMongodbatlasSystemPagingUsageAverage + metricMongodbatlasSystemPagingUsageMax metricMongodbatlasSystemPagingUsageMax +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), + metricMongodbatlasDbCounts: newMetricMongodbatlasDbCounts(settings.MongodbatlasDbCounts), + metricMongodbatlasDbSize: newMetricMongodbatlasDbSize(settings.MongodbatlasDbSize), + metricMongodbatlasDiskPartitionIopsAverage: newMetricMongodbatlasDiskPartitionIopsAverage(settings.MongodbatlasDiskPartitionIopsAverage), + metricMongodbatlasDiskPartitionIopsMax: newMetricMongodbatlasDiskPartitionIopsMax(settings.MongodbatlasDiskPartitionIopsMax), + metricMongodbatlasDiskPartitionLatencyAverage: newMetricMongodbatlasDiskPartitionLatencyAverage(settings.MongodbatlasDiskPartitionLatencyAverage), + metricMongodbatlasDiskPartitionLatencyMax: newMetricMongodbatlasDiskPartitionLatencyMax(settings.MongodbatlasDiskPartitionLatencyMax), + metricMongodbatlasDiskPartitionSpaceAverage: newMetricMongodbatlasDiskPartitionSpaceAverage(settings.MongodbatlasDiskPartitionSpaceAverage), + metricMongodbatlasDiskPartitionSpaceMax: newMetricMongodbatlasDiskPartitionSpaceMax(settings.MongodbatlasDiskPartitionSpaceMax), + metricMongodbatlasDiskPartitionUsageAverage: newMetricMongodbatlasDiskPartitionUsageAverage(settings.MongodbatlasDiskPartitionUsageAverage), + metricMongodbatlasDiskPartitionUsageMax: newMetricMongodbatlasDiskPartitionUsageMax(settings.MongodbatlasDiskPartitionUsageMax), + metricMongodbatlasDiskPartitionUtilizationAverage: newMetricMongodbatlasDiskPartitionUtilizationAverage(settings.MongodbatlasDiskPartitionUtilizationAverage), + metricMongodbatlasDiskPartitionUtilizationMax: newMetricMongodbatlasDiskPartitionUtilizationMax(settings.MongodbatlasDiskPartitionUtilizationMax), + metricMongodbatlasProcessAsserts: newMetricMongodbatlasProcessAsserts(settings.MongodbatlasProcessAsserts), + metricMongodbatlasProcessBackgroundFlush: newMetricMongodbatlasProcessBackgroundFlush(settings.MongodbatlasProcessBackgroundFlush), + metricMongodbatlasProcessCacheIo: newMetricMongodbatlasProcessCacheIo(settings.MongodbatlasProcessCacheIo), + metricMongodbatlasProcessCacheSize: newMetricMongodbatlasProcessCacheSize(settings.MongodbatlasProcessCacheSize), + metricMongodbatlasProcessConnections: newMetricMongodbatlasProcessConnections(settings.MongodbatlasProcessConnections), + metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage: newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(settings.MongodbatlasProcessCPUChildrenNormalizedUsageAverage), + metricMongodbatlasProcessCPUChildrenNormalizedUsageMax: newMetricMongodbatlasProcessCPUChildrenNormalizedUsageMax(settings.MongodbatlasProcessCPUChildrenNormalizedUsageMax), + metricMongodbatlasProcessCPUChildrenUsageAverage: newMetricMongodbatlasProcessCPUChildrenUsageAverage(settings.MongodbatlasProcessCPUChildrenUsageAverage), + metricMongodbatlasProcessCPUChildrenUsageMax: newMetricMongodbatlasProcessCPUChildrenUsageMax(settings.MongodbatlasProcessCPUChildrenUsageMax), + metricMongodbatlasProcessCPUNormalizedUsageAverage: newMetricMongodbatlasProcessCPUNormalizedUsageAverage(settings.MongodbatlasProcessCPUNormalizedUsageAverage), + metricMongodbatlasProcessCPUNormalizedUsageMax: newMetricMongodbatlasProcessCPUNormalizedUsageMax(settings.MongodbatlasProcessCPUNormalizedUsageMax), + metricMongodbatlasProcessCPUUsageAverage: newMetricMongodbatlasProcessCPUUsageAverage(settings.MongodbatlasProcessCPUUsageAverage), + metricMongodbatlasProcessCPUUsageMax: newMetricMongodbatlasProcessCPUUsageMax(settings.MongodbatlasProcessCPUUsageMax), + metricMongodbatlasProcessCursors: newMetricMongodbatlasProcessCursors(settings.MongodbatlasProcessCursors), + metricMongodbatlasProcessDbDocumentRate: newMetricMongodbatlasProcessDbDocumentRate(settings.MongodbatlasProcessDbDocumentRate), + metricMongodbatlasProcessDbOperationsRate: newMetricMongodbatlasProcessDbOperationsRate(settings.MongodbatlasProcessDbOperationsRate), + metricMongodbatlasProcessDbOperationsTime: newMetricMongodbatlasProcessDbOperationsTime(settings.MongodbatlasProcessDbOperationsTime), + metricMongodbatlasProcessDbQueryExecutorScanned: newMetricMongodbatlasProcessDbQueryExecutorScanned(settings.MongodbatlasProcessDbQueryExecutorScanned), + metricMongodbatlasProcessDbQueryTargetingScannedPerReturned: newMetricMongodbatlasProcessDbQueryTargetingScannedPerReturned(settings.MongodbatlasProcessDbQueryTargetingScannedPerReturned), + metricMongodbatlasProcessDbStorage: newMetricMongodbatlasProcessDbStorage(settings.MongodbatlasProcessDbStorage), + metricMongodbatlasProcessFtsCPUUsage: newMetricMongodbatlasProcessFtsCPUUsage(settings.MongodbatlasProcessFtsCPUUsage), + metricMongodbatlasProcessGlobalLock: newMetricMongodbatlasProcessGlobalLock(settings.MongodbatlasProcessGlobalLock), + metricMongodbatlasProcessIndexBtreeMissRatio: newMetricMongodbatlasProcessIndexBtreeMissRatio(settings.MongodbatlasProcessIndexBtreeMissRatio), + metricMongodbatlasProcessIndexCounters: newMetricMongodbatlasProcessIndexCounters(settings.MongodbatlasProcessIndexCounters), + metricMongodbatlasProcessJournalingCommits: newMetricMongodbatlasProcessJournalingCommits(settings.MongodbatlasProcessJournalingCommits), + metricMongodbatlasProcessJournalingDataFiles: newMetricMongodbatlasProcessJournalingDataFiles(settings.MongodbatlasProcessJournalingDataFiles), + metricMongodbatlasProcessJournalingWritten: newMetricMongodbatlasProcessJournalingWritten(settings.MongodbatlasProcessJournalingWritten), + metricMongodbatlasProcessMemoryUsage: newMetricMongodbatlasProcessMemoryUsage(settings.MongodbatlasProcessMemoryUsage), + metricMongodbatlasProcessNetworkIo: newMetricMongodbatlasProcessNetworkIo(settings.MongodbatlasProcessNetworkIo), + metricMongodbatlasProcessNetworkRequests: newMetricMongodbatlasProcessNetworkRequests(settings.MongodbatlasProcessNetworkRequests), + metricMongodbatlasProcessOplogRate: newMetricMongodbatlasProcessOplogRate(settings.MongodbatlasProcessOplogRate), + metricMongodbatlasProcessOplogTime: newMetricMongodbatlasProcessOplogTime(settings.MongodbatlasProcessOplogTime), + metricMongodbatlasProcessPageFaults: newMetricMongodbatlasProcessPageFaults(settings.MongodbatlasProcessPageFaults), + metricMongodbatlasProcessRestarts: newMetricMongodbatlasProcessRestarts(settings.MongodbatlasProcessRestarts), + metricMongodbatlasProcessTickets: newMetricMongodbatlasProcessTickets(settings.MongodbatlasProcessTickets), + metricMongodbatlasSystemCPUNormalizedUsageAverage: newMetricMongodbatlasSystemCPUNormalizedUsageAverage(settings.MongodbatlasSystemCPUNormalizedUsageAverage), + metricMongodbatlasSystemCPUNormalizedUsageMax: newMetricMongodbatlasSystemCPUNormalizedUsageMax(settings.MongodbatlasSystemCPUNormalizedUsageMax), + metricMongodbatlasSystemCPUUsageAverage: newMetricMongodbatlasSystemCPUUsageAverage(settings.MongodbatlasSystemCPUUsageAverage), + metricMongodbatlasSystemCPUUsageMax: newMetricMongodbatlasSystemCPUUsageMax(settings.MongodbatlasSystemCPUUsageMax), + metricMongodbatlasSystemFtsCPUNormalizedUsage: newMetricMongodbatlasSystemFtsCPUNormalizedUsage(settings.MongodbatlasSystemFtsCPUNormalizedUsage), + metricMongodbatlasSystemFtsCPUUsage: newMetricMongodbatlasSystemFtsCPUUsage(settings.MongodbatlasSystemFtsCPUUsage), + metricMongodbatlasSystemFtsDiskUsed: newMetricMongodbatlasSystemFtsDiskUsed(settings.MongodbatlasSystemFtsDiskUsed), + metricMongodbatlasSystemFtsMemoryUsage: newMetricMongodbatlasSystemFtsMemoryUsage(settings.MongodbatlasSystemFtsMemoryUsage), + metricMongodbatlasSystemMemoryUsageAverage: newMetricMongodbatlasSystemMemoryUsageAverage(settings.MongodbatlasSystemMemoryUsageAverage), + metricMongodbatlasSystemMemoryUsageMax: newMetricMongodbatlasSystemMemoryUsageMax(settings.MongodbatlasSystemMemoryUsageMax), + metricMongodbatlasSystemNetworkIoAverage: newMetricMongodbatlasSystemNetworkIoAverage(settings.MongodbatlasSystemNetworkIoAverage), + metricMongodbatlasSystemNetworkIoMax: newMetricMongodbatlasSystemNetworkIoMax(settings.MongodbatlasSystemNetworkIoMax), + metricMongodbatlasSystemPagingIoAverage: newMetricMongodbatlasSystemPagingIoAverage(settings.MongodbatlasSystemPagingIoAverage), + metricMongodbatlasSystemPagingIoMax: newMetricMongodbatlasSystemPagingIoMax(settings.MongodbatlasSystemPagingIoMax), + metricMongodbatlasSystemPagingUsageAverage: newMetricMongodbatlasSystemPagingUsageAverage(settings.MongodbatlasSystemPagingUsageAverage), + metricMongodbatlasSystemPagingUsageMax: newMetricMongodbatlasSystemPagingUsageMax(settings.MongodbatlasSystemPagingUsageMax), + } + for _, op := range options { + op(mb) + } + return mb +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// WithMongodbAtlasDbName sets provided value as "mongodb_atlas.db.name" attribute for current resource. +func WithMongodbAtlasDbName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.db.name", val) + } +} + +// WithMongodbAtlasDiskPartition sets provided value as "mongodb_atlas.disk.partition" attribute for current resource. +func WithMongodbAtlasDiskPartition(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.disk.partition", val) + } +} + +// WithMongodbAtlasHostName sets provided value as "mongodb_atlas.host.name" attribute for current resource. +func WithMongodbAtlasHostName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.host.name", val) + } +} + +// WithMongodbAtlasOrgName sets provided value as "mongodb_atlas.org_name" attribute for current resource. +func WithMongodbAtlasOrgName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.org_name", val) + } +} + +// WithMongodbAtlasProcessID sets provided value as "mongodb_atlas.process.id" attribute for current resource. +func WithMongodbAtlasProcessID(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.process.id", val) + } +} + +// WithMongodbAtlasProcessPort sets provided value as "mongodb_atlas.process.port" attribute for current resource. +func WithMongodbAtlasProcessPort(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.process.port", val) + } +} + +// WithMongodbAtlasProcessTypeName sets provided value as "mongodb_atlas.process.type_name" attribute for current resource. +func WithMongodbAtlasProcessTypeName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.process.type_name", val) + } +} + +// WithMongodbAtlasProjectID sets provided value as "mongodb_atlas.project.id" attribute for current resource. +func WithMongodbAtlasProjectID(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.project.id", val) + } +} + +// WithMongodbAtlasProjectName sets provided value as "mongodb_atlas.project.name" attribute for current resource. +func WithMongodbAtlasProjectName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("mongodb_atlas.project.name", val) + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("otelcol/mongoatlasreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricMongodbatlasDbCounts.emit(ils.Metrics()) + mb.metricMongodbatlasDbSize.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionIopsAverage.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionIopsMax.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionLatencyAverage.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionLatencyMax.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionSpaceAverage.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionSpaceMax.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionUsageMax.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionUtilizationAverage.emit(ils.Metrics()) + mb.metricMongodbatlasDiskPartitionUtilizationMax.emit(ils.Metrics()) + mb.metricMongodbatlasProcessAsserts.emit(ils.Metrics()) + mb.metricMongodbatlasProcessBackgroundFlush.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCacheIo.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCacheSize.emit(ils.Metrics()) + mb.metricMongodbatlasProcessConnections.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCPUChildrenUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCPUChildrenUsageMax.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCPUNormalizedUsageMax.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCPUUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCPUUsageMax.emit(ils.Metrics()) + mb.metricMongodbatlasProcessCursors.emit(ils.Metrics()) + mb.metricMongodbatlasProcessDbDocumentRate.emit(ils.Metrics()) + mb.metricMongodbatlasProcessDbOperationsRate.emit(ils.Metrics()) + mb.metricMongodbatlasProcessDbOperationsTime.emit(ils.Metrics()) + mb.metricMongodbatlasProcessDbQueryExecutorScanned.emit(ils.Metrics()) + mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.emit(ils.Metrics()) + mb.metricMongodbatlasProcessDbStorage.emit(ils.Metrics()) + mb.metricMongodbatlasProcessFtsCPUUsage.emit(ils.Metrics()) + mb.metricMongodbatlasProcessGlobalLock.emit(ils.Metrics()) + mb.metricMongodbatlasProcessIndexBtreeMissRatio.emit(ils.Metrics()) + mb.metricMongodbatlasProcessIndexCounters.emit(ils.Metrics()) + mb.metricMongodbatlasProcessJournalingCommits.emit(ils.Metrics()) + mb.metricMongodbatlasProcessJournalingDataFiles.emit(ils.Metrics()) + mb.metricMongodbatlasProcessJournalingWritten.emit(ils.Metrics()) + mb.metricMongodbatlasProcessMemoryUsage.emit(ils.Metrics()) + mb.metricMongodbatlasProcessNetworkIo.emit(ils.Metrics()) + mb.metricMongodbatlasProcessNetworkRequests.emit(ils.Metrics()) + mb.metricMongodbatlasProcessOplogRate.emit(ils.Metrics()) + mb.metricMongodbatlasProcessOplogTime.emit(ils.Metrics()) + mb.metricMongodbatlasProcessPageFaults.emit(ils.Metrics()) + mb.metricMongodbatlasProcessRestarts.emit(ils.Metrics()) + mb.metricMongodbatlasProcessTickets.emit(ils.Metrics()) + mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemCPUNormalizedUsageMax.emit(ils.Metrics()) + mb.metricMongodbatlasSystemCPUUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemCPUUsageMax.emit(ils.Metrics()) + mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemFtsCPUUsage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemFtsDiskUsed.emit(ils.Metrics()) + mb.metricMongodbatlasSystemFtsMemoryUsage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemMemoryUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemMemoryUsageMax.emit(ils.Metrics()) + mb.metricMongodbatlasSystemNetworkIoAverage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemNetworkIoMax.emit(ils.Metrics()) + mb.metricMongodbatlasSystemPagingIoAverage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemPagingIoMax.emit(ils.Metrics()) + mb.metricMongodbatlasSystemPagingUsageAverage.emit(ils.Metrics()) + mb.metricMongodbatlasSystemPagingUsageMax.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics +} + +// RecordMongodbatlasDbCountsDataPoint adds a data point to mongodbatlas.db.counts metric. +func (mb *MetricsBuilder) RecordMongodbatlasDbCountsDataPoint(ts pdata.Timestamp, val float64, objectTypeAttributeValue string) { + mb.metricMongodbatlasDbCounts.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue) +} + +// RecordMongodbatlasDbSizeDataPoint adds a data point to mongodbatlas.db.size metric. +func (mb *MetricsBuilder) RecordMongodbatlasDbSizeDataPoint(ts pdata.Timestamp, val float64, objectTypeAttributeValue string) { + mb.metricMongodbatlasDbSize.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue) +} + +// RecordMongodbatlasDiskPartitionIopsAverageDataPoint adds a data point to mongodbatlas.disk.partition.iops.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { + mb.metricMongodbatlasDiskPartitionIopsAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue) +} + +// RecordMongodbatlasDiskPartitionIopsMaxDataPoint adds a data point to mongodbatlas.disk.partition.iops.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { + mb.metricMongodbatlasDiskPartitionIopsMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue) +} + +// RecordMongodbatlasDiskPartitionLatencyAverageDataPoint adds a data point to mongodbatlas.disk.partition.latency.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { + mb.metricMongodbatlasDiskPartitionLatencyAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue) +} + +// RecordMongodbatlasDiskPartitionLatencyMaxDataPoint adds a data point to mongodbatlas.disk.partition.latency.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { + mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue) +} + +// RecordMongodbatlasDiskPartitionSpaceAverageDataPoint adds a data point to mongodbatlas.disk.partition.space.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) +} + +// RecordMongodbatlasDiskPartitionSpaceMaxDataPoint adds a data point to mongodbatlas.disk.partition.space.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) +} + +// RecordMongodbatlasDiskPartitionUsageAverageDataPoint adds a data point to mongodbatlas.disk.partition.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) +} + +// RecordMongodbatlasDiskPartitionUsageMaxDataPoint adds a data point to mongodbatlas.disk.partition.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + mb.metricMongodbatlasDiskPartitionUsageMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) +} + +// RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint adds a data point to mongodbatlas.disk.partition.utilization.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + mb.metricMongodbatlasDiskPartitionUtilizationAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) +} + +// RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint adds a data point to mongodbatlas.disk.partition.utilization.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { + mb.metricMongodbatlasDiskPartitionUtilizationMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) +} + +// RecordMongodbatlasProcessAssertsDataPoint adds a data point to mongodbatlas.process.asserts metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessAssertsDataPoint(ts pdata.Timestamp, val float64, assertTypeAttributeValue string) { + mb.metricMongodbatlasProcessAsserts.recordDataPoint(mb.startTime, ts, val, assertTypeAttributeValue) +} + +// RecordMongodbatlasProcessBackgroundFlushDataPoint adds a data point to mongodbatlas.process.background_flush metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessBackgroundFlushDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessBackgroundFlush.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessCacheIoDataPoint adds a data point to mongodbatlas.process.cache.io metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pdata.Timestamp, val float64, cacheDirectionAttributeValue string) { + mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue) +} + +// RecordMongodbatlasProcessCacheSizeDataPoint adds a data point to mongodbatlas.process.cache.size metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pdata.Timestamp, val float64, cacheStatusAttributeValue string) { + mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue) +} + +// RecordMongodbatlasProcessConnectionsDataPoint adds a data point to mongodbatlas.process.connections metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessConnectionsDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessConnections.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessCPUChildrenUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessCPUChildrenUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessCPUUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessCPUUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessCursorsDataPoint adds a data point to mongodbatlas.process.cursors metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessCursorsDataPoint(ts pdata.Timestamp, val float64, cursorStateAttributeValue string) { + mb.metricMongodbatlasProcessCursors.recordDataPoint(mb.startTime, ts, val, cursorStateAttributeValue) +} + +// RecordMongodbatlasProcessDbDocumentRateDataPoint adds a data point to mongodbatlas.process.db.document.rate metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbDocumentRateDataPoint(ts pdata.Timestamp, val float64, documentStatusAttributeValue string) { + mb.metricMongodbatlasProcessDbDocumentRate.recordDataPoint(mb.startTime, ts, val, documentStatusAttributeValue) +} + +// RecordMongodbatlasProcessDbOperationsRateDataPoint adds a data point to mongodbatlas.process.db.operations.rate metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsRateDataPoint(ts pdata.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) { + mb.metricMongodbatlasProcessDbOperationsRate.recordDataPoint(mb.startTime, ts, val, operationAttributeValue, clusterRoleAttributeValue) +} + +// RecordMongodbatlasProcessDbOperationsTimeDataPoint adds a data point to mongodbatlas.process.db.operations.time metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts pdata.Timestamp, val float64, executionTypeAttributeValue string) { + mb.metricMongodbatlasProcessDbOperationsTime.recordDataPoint(mb.startTime, ts, val, executionTypeAttributeValue) +} + +// RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint adds a data point to mongodbatlas.process.db.query_executor.scanned metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts pdata.Timestamp, val float64, scannedTypeAttributeValue string) { + mb.metricMongodbatlasProcessDbQueryExecutorScanned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue) +} + +// RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint adds a data point to mongodbatlas.process.db.query_targeting.scanned_per_returned metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts pdata.Timestamp, val float64, scannedTypeAttributeValue string) { + mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue) +} + +// RecordMongodbatlasProcessDbStorageDataPoint adds a data point to mongodbatlas.process.db.storage metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbStorageDataPoint(ts pdata.Timestamp, val float64, storageStatusAttributeValue string) { + mb.metricMongodbatlasProcessDbStorage.recordDataPoint(mb.startTime, ts, val, storageStatusAttributeValue) +} + +// RecordMongodbatlasProcessFtsCPUUsageDataPoint adds a data point to mongodbatlas.process.fts.cpu.usage metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessFtsCPUUsageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasProcessFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasProcessGlobalLockDataPoint adds a data point to mongodbatlas.process.global_lock metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessGlobalLockDataPoint(ts pdata.Timestamp, val float64, globalLockStateAttributeValue string) { + mb.metricMongodbatlasProcessGlobalLock.recordDataPoint(mb.startTime, ts, val, globalLockStateAttributeValue) +} + +// RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint adds a data point to mongodbatlas.process.index.btree_miss_ratio metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessIndexBtreeMissRatio.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessIndexCountersDataPoint adds a data point to mongodbatlas.process.index.counters metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexCountersDataPoint(ts pdata.Timestamp, val float64, btreeCounterTypeAttributeValue string) { + mb.metricMongodbatlasProcessIndexCounters.recordDataPoint(mb.startTime, ts, val, btreeCounterTypeAttributeValue) +} + +// RecordMongodbatlasProcessJournalingCommitsDataPoint adds a data point to mongodbatlas.process.journaling.commits metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingCommitsDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessJournalingCommits.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessJournalingDataFilesDataPoint adds a data point to mongodbatlas.process.journaling.data_files metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessJournalingDataFiles.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessJournalingWrittenDataPoint adds a data point to mongodbatlas.process.journaling.written metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingWrittenDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessJournalingWritten.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessMemoryUsageDataPoint adds a data point to mongodbatlas.process.memory.usage metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessMemoryUsageDataPoint(ts pdata.Timestamp, val float64, memoryStateAttributeValue string) { + mb.metricMongodbatlasProcessMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue) +} + +// RecordMongodbatlasProcessNetworkIoDataPoint adds a data point to mongodbatlas.process.network.io metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkIoDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { + mb.metricMongodbatlasProcessNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) +} + +// RecordMongodbatlasProcessNetworkRequestsDataPoint adds a data point to mongodbatlas.process.network.requests metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkRequestsDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessNetworkRequests.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessOplogRateDataPoint adds a data point to mongodbatlas.process.oplog.rate metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogRateDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessOplogRate.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessOplogTimeDataPoint adds a data point to mongodbatlas.process.oplog.time metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogTimeDataPoint(ts pdata.Timestamp, val float64, oplogTypeAttributeValue string) { + mb.metricMongodbatlasProcessOplogTime.recordDataPoint(mb.startTime, ts, val, oplogTypeAttributeValue) +} + +// RecordMongodbatlasProcessPageFaultsDataPoint adds a data point to mongodbatlas.process.page_faults metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessPageFaultsDataPoint(ts pdata.Timestamp, val float64, memoryIssueTypeAttributeValue string) { + mb.metricMongodbatlasProcessPageFaults.recordDataPoint(mb.startTime, ts, val, memoryIssueTypeAttributeValue) +} + +// RecordMongodbatlasProcessRestartsDataPoint adds a data point to mongodbatlas.process.restarts metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessRestartsDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasProcessRestarts.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasProcessTicketsDataPoint adds a data point to mongodbatlas.process.tickets metric. +func (mb *MetricsBuilder) RecordMongodbatlasProcessTicketsDataPoint(ts pdata.Timestamp, val float64, ticketTypeAttributeValue string) { + mb.metricMongodbatlasProcessTickets.recordDataPoint(mb.startTime, ts, val, ticketTypeAttributeValue) +} + +// RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasSystemCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasSystemCPUUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasSystemCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasSystemCPUUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasSystemCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.normalized.usage metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasSystemFtsCPUUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.usage metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { + mb.metricMongodbatlasSystemFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) +} + +// RecordMongodbatlasSystemFtsDiskUsedDataPoint adds a data point to mongodbatlas.system.fts.disk.used metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts pdata.Timestamp, val float64) { + mb.metricMongodbatlasSystemFtsDiskUsed.recordDataPoint(mb.startTime, ts, val) +} + +// RecordMongodbatlasSystemFtsMemoryUsageDataPoint adds a data point to mongodbatlas.system.fts.memory.usage metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts pdata.Timestamp, val float64, memoryStateAttributeValue string) { + mb.metricMongodbatlasSystemFtsMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue) +} + +// RecordMongodbatlasSystemMemoryUsageAverageDataPoint adds a data point to mongodbatlas.system.memory.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts pdata.Timestamp, val float64, memoryStatusAttributeValue string) { + mb.metricMongodbatlasSystemMemoryUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue) +} + +// RecordMongodbatlasSystemMemoryUsageMaxDataPoint adds a data point to mongodbatlas.system.memory.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts pdata.Timestamp, val float64, memoryStatusAttributeValue string) { + mb.metricMongodbatlasSystemMemoryUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue) +} + +// RecordMongodbatlasSystemNetworkIoAverageDataPoint adds a data point to mongodbatlas.system.network.io.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { + mb.metricMongodbatlasSystemNetworkIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) +} + +// RecordMongodbatlasSystemNetworkIoMaxDataPoint adds a data point to mongodbatlas.system.network.io.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { + mb.metricMongodbatlasSystemNetworkIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) +} + +// RecordMongodbatlasSystemPagingIoAverageDataPoint adds a data point to mongodbatlas.system.paging.io.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoAverageDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { + mb.metricMongodbatlasSystemPagingIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) +} + +// RecordMongodbatlasSystemPagingIoMaxDataPoint adds a data point to mongodbatlas.system.paging.io.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoMaxDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { + mb.metricMongodbatlasSystemPagingIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) +} + +// RecordMongodbatlasSystemPagingUsageAverageDataPoint adds a data point to mongodbatlas.system.paging.usage.average metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { + mb.metricMongodbatlasSystemPagingUsageAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) +} + +// RecordMongodbatlasSystemPagingUsageMaxDataPoint adds a data point to mongodbatlas.system.paging.usage.max metric. +func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { + mb.metricMongodbatlasSystemPagingUsageMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pdata.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} + +// Attributes contains the possible metric attributes that can be used. +var Attributes = struct { + // AssertType (MongoDB assertion type) + AssertType string + // BtreeCounterType (Database index effectiveness) + BtreeCounterType string + // CacheDirection (Whether read into or written from) + CacheDirection string + // CacheStatus (Cache status) + CacheStatus string + // ClusterRole (Whether process is acting as replica or primary) + ClusterRole string + // CPUState (CPU state) + CPUState string + // CursorState (Whether cursor is open or timed out) + CursorState string + // Direction (Network traffic direction) + Direction string + // DiskDirection (Measurement type for disk operation) + DiskDirection string + // DiskStatus (Disk measurement type) + DiskStatus string + // DocumentStatus (Status of documents in the database) + DocumentStatus string + // ExecutionType (Type of command) + ExecutionType string + // GlobalLockState (Which queue is locked) + GlobalLockState string + // MemoryIssueType (Type of memory issue encountered) + MemoryIssueType string + // MemoryState (Memory usage type) + MemoryState string + // MemoryStatus (Memory measurement type) + MemoryStatus string + // ObjectType (MongoDB object type) + ObjectType string + // Operation (Type of database operation) + Operation string + // OplogType (Oplog type) + OplogType string + // ScannedType (Objects or indexes scanned during query) + ScannedType string + // StorageStatus (Views on database size) + StorageStatus string + // TicketType (Type of ticket available) + TicketType string +}{ + "assert_type", + "btree_counter_type", + "cache_direction", + "cache_status", + "cluster_role", + "cpu_state", + "cursor_state", + "direction", + "disk_direction", + "disk_status", + "document_status", + "execution_type", + "global_lock_state", + "memory_issue_type", + "memory_state", + "memory_status", + "object_type", + "operation", + "oplog_type", + "scanned_type", + "storage_status", + "ticket_type", +} + +// A is an alias for Attributes. +var A = Attributes + +// AttributeAssertType are the possible values that the attribute "assert_type" can have. +var AttributeAssertType = struct { + Regular string + Warning string + Msg string + User string +}{ + "regular", + "warning", + "msg", + "user", +} + +// AttributeBtreeCounterType are the possible values that the attribute "btree_counter_type" can have. +var AttributeBtreeCounterType = struct { + Accesses string + Hits string + Misses string +}{ + "accesses", + "hits", + "misses", +} + +// AttributeCacheDirection are the possible values that the attribute "cache_direction" can have. +var AttributeCacheDirection = struct { + ReadInto string + WrittenFrom string +}{ + "read_into", + "written_from", +} + +// AttributeCacheStatus are the possible values that the attribute "cache_status" can have. +var AttributeCacheStatus = struct { + Dirty string + Used string +}{ + "dirty", + "used", +} + +// AttributeClusterRole are the possible values that the attribute "cluster_role" can have. +var AttributeClusterRole = struct { + Primary string + Replica string +}{ + "primary", + "replica", +} + +// AttributeCPUState are the possible values that the attribute "cpu_state" can have. +var AttributeCPUState = struct { + Kernel string + User string + Nice string + Iowait string + Irq string + Softirq string + Guest string + Steal string +}{ + "kernel", + "user", + "nice", + "iowait", + "irq", + "softirq", + "guest", + "steal", +} + +// AttributeCursorState are the possible values that the attribute "cursor_state" can have. +var AttributeCursorState = struct { + TimedOut string + Open string +}{ + "timed_out", + "open", +} + +// AttributeDirection are the possible values that the attribute "direction" can have. +var AttributeDirection = struct { + Receive string + Transmit string +}{ + "receive", + "transmit", +} + +// AttributeDiskDirection are the possible values that the attribute "disk_direction" can have. +var AttributeDiskDirection = struct { + Read string + Write string + Total string +}{ + "read", + "write", + "total", +} + +// AttributeDiskStatus are the possible values that the attribute "disk_status" can have. +var AttributeDiskStatus = struct { + Free string + Used string +}{ + "free", + "used", +} + +// AttributeDocumentStatus are the possible values that the attribute "document_status" can have. +var AttributeDocumentStatus = struct { + Returned string + Inserted string + Updated string + Deleted string +}{ + "returned", + "inserted", + "updated", + "deleted", +} + +// AttributeExecutionType are the possible values that the attribute "execution_type" can have. +var AttributeExecutionType = struct { + Reads string + Writes string + Commands string +}{ + "reads", + "writes", + "commands", +} + +// AttributeGlobalLockState are the possible values that the attribute "global_lock_state" can have. +var AttributeGlobalLockState = struct { + CurrentQueueTotal string + CurrentQueueReaders string + CurrentQueueWriters string +}{ + "current_queue_total", + "current_queue_readers", + "current_queue_writers", +} + +// AttributeMemoryIssueType are the possible values that the attribute "memory_issue_type" can have. +var AttributeMemoryIssueType = struct { + ExtraInfo string + GlobalAccessesNotInMemory string + ExceptionsThrown string +}{ + "extra_info", + "global_accesses_not_in_memory", + "exceptions_thrown", +} + +// AttributeMemoryState are the possible values that the attribute "memory_state" can have. +var AttributeMemoryState = struct { + Resident string + Virtual string + Mapped string + Computed string + Shared string + Free string + Used string +}{ + "resident", + "virtual", + "mapped", + "computed", + "shared", + "free", + "used", +} + +// AttributeMemoryStatus are the possible values that the attribute "memory_status" can have. +var AttributeMemoryStatus = struct { + Available string + Buffers string + Cached string + Free string + Shared string + Used string +}{ + "available", + "buffers", + "cached", + "free", + "shared", + "used", +} + +// AttributeObjectType are the possible values that the attribute "object_type" can have. +var AttributeObjectType = struct { + Collection string + Index string + Extent string + Object string + View string + Storage string + Data string +}{ + "collection", + "index", + "extent", + "object", + "view", + "storage", + "data", +} + +// AttributeOperation are the possible values that the attribute "operation" can have. +var AttributeOperation = struct { + Cmd string + Query string + Update string + Delete string + Getmore string + Insert string + ScanAndOrder string +}{ + "cmd", + "query", + "update", + "delete", + "getmore", + "insert", + "scan_and_order", +} + +// AttributeOplogType are the possible values that the attribute "oplog_type" can have. +var AttributeOplogType = struct { + SlaveLagMasterTime string + MasterTime string + MasterLagTimeDiff string +}{ + "slave_lag_master_time", + "master_time", + "master_lag_time_diff", +} + +// AttributeScannedType are the possible values that the attribute "scanned_type" can have. +var AttributeScannedType = struct { + IndexItems string + Objects string +}{ + "index_items", + "objects", +} + +// AttributeStorageStatus are the possible values that the attribute "storage_status" can have. +var AttributeStorageStatus = struct { + Total string + DataSize string + IndexSize string + DataSizeWoSystem string +}{ + "total", + "data_size", + "index_size", + "data_size_wo_system", +} + +// AttributeTicketType are the possible values that the attribute "ticket_type" can have. +var AttributeTicketType = struct { + AvailableReads string + AvailableWrites string +}{ + "available_reads", + "available_writes", +} diff --git a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go index 8a623c729566..25f982703c5a 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go @@ -15,707 +15,856 @@ package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" import ( - "fmt" "time" "go.mongodb.org/atlas/mongodbatlas" "go.opentelemetry.io/collector/model/pdata" ) -type metricMappingData struct { - metricName string - attributes map[string]pdata.Value -} +// metricRecordFunc records the data point to the metric builder at the supplied timestamp +type metricRecordFunc func(*MetricsBuilder, *mongodbatlas.DataPoints, pdata.Timestamp) -var metricNameMapping = map[string]metricMappingData{ +// getRecordFunc returns the metricRecordFunc that matches the metric name. Nil if none is found. +func getRecordFunc(metricName string) metricRecordFunc { + switch metricName { // MongoDB CPU usage. For hosts with more than one CPU core, these values can exceed 100%. - "PROCESS_CPU_USER": {"mongodbatlas.process.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, + case "PROCESS_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } - "MAX_PROCESS_CPU_USER": {"mongodbatlas.process.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, + case "MAX_PROCESS_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } - "PROCESS_CPU_KERNEL": {"mongodbatlas.process.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "PROCESS_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } - "MAX_PROCESS_CPU_KERNEL": {"mongodbatlas.process.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "MAX_PROCESS_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } - "PROCESS_CPU_CHILDREN_USER": {"mongodbatlas.process.cpu.children.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, + case "PROCESS_CPU_CHILDREN_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } - "MAX_PROCESS_CPU_CHILDREN_USER": {"mongodbatlas.process.cpu.children.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, + case "MAX_PROCESS_CPU_CHILDREN_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } - "PROCESS_CPU_CHILDREN_KERNEL": {"mongodbatlas.process.cpu.children.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "PROCESS_CPU_CHILDREN_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } - "MAX_PROCESS_CPU_CHILDREN_KERNEL": {"mongodbatlas.process.cpu.children.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "MAX_PROCESS_CPU_CHILDREN_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } // MongoDB CPU usage scaled to a range of 0% to 100%. Atlas computes this value by dividing by the number of CPU cores. - "PROCESS_NORMALIZED_CPU_USER": {"mongodbatlas.process.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, + case "PROCESS_NORMALIZED_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } - "MAX_PROCESS_NORMALIZED_CPU_USER": {"mongodbatlas.process.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, + case "MAX_PROCESS_NORMALIZED_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } - "PROCESS_NORMALIZED_CPU_KERNEL": {"mongodbatlas.process.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "PROCESS_NORMALIZED_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } - "MAX_PROCESS_NORMALIZED_CPU_KERNEL": {"mongodbatlas.process.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "MAX_PROCESS_NORMALIZED_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } - "PROCESS_NORMALIZED_CPU_CHILDREN_USER": {"mongodbatlas.process.cpu.children.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, + case "PROCESS_NORMALIZED_CPU_CHILDREN_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } // Context: Process - "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_USER": {"mongodbatlas.process.cpu.children.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, + case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } - "PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL": {"mongodbatlas.process.cpu.children.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } - "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL": {"mongodbatlas.process.cpu.children.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } // Rate of asserts for a MongoDB process found in the asserts document that the serverStatus command generates. - "ASSERT_REGULAR": {"mongodbatlas.process.asserts", map[string]pdata.Value{ - "assert_type": pdata.NewValueString("regular"), - }}, + case "ASSERT_REGULAR": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Regular) + } - "ASSERT_WARNING": {"mongodbatlas.process.asserts", map[string]pdata.Value{ - "assert_type": pdata.NewValueString("warning"), - }}, + case "ASSERT_WARNING": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Warning) + } - "ASSERT_MSG": {"mongodbatlas.process.asserts", map[string]pdata.Value{ - "assert_type": pdata.NewValueString("msg"), - }}, + case "ASSERT_MSG": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Msg) + } - "ASSERT_USER": {"mongodbatlas.process.asserts", map[string]pdata.Value{ - "assert_type": pdata.NewValueString("user"), - }}, + case "ASSERT_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.User) + } // Amount of data flushed in the background. - "BACKGROUND_FLUSH_AVG": {"mongodbatlas.process.background_flush", map[string]pdata.Value{}}, + case "BACKGROUND_FLUSH_AVG": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessBackgroundFlushDataPoint(ts, float64(*dp.Value)) + } // Amount of bytes in the WiredTiger storage engine cache and tickets found in the wiredTiger.cache and wiredTiger.concurrentTransactions documents that the serverStatus command generates. - "CACHE_BYTES_READ_INTO": {"mongodbatlas.process.cache.io", map[string]pdata.Value{ - "cache_direction": pdata.NewValueString("read_into"), - }}, + case "CACHE_BYTES_READ_INTO": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirection.ReadInto) + } - "CACHE_BYTES_WRITTEN_FROM": {"mongodbatlas.process.cache.io", map[string]pdata.Value{ - "cache_direction": pdata.NewValueString("written_from"), - }}, + case "CACHE_BYTES_WRITTEN_FROM": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirection.WrittenFrom) + } - "CACHE_DIRTY_BYTES": {"mongodbatlas.process.cache.size", map[string]pdata.Value{ - "cache_status": pdata.NewValueString("dirty"), - }}, + case "CACHE_DIRTY_BYTES": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatus.Dirty) + } - "CACHE_USED_BYTES": {"mongodbatlas.process.cache.size", map[string]pdata.Value{ - "cache_status": pdata.NewValueString("used"), - }}, + case "CACHE_USED_BYTES": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatus.Used) + } - "TICKETS_AVAILABLE_READS": {"mongodbatlas.process.tickets", map[string]pdata.Value{ - "ticket_type": pdata.NewValueString("available_reads"), - }}, + case "TICKETS_AVAILABLE_READS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketType.AvailableReads) + } - "TICKETS_AVAILABLE_WRITE": {"mongodbatlas.process.tickets", map[string]pdata.Value{ - "ticket_type": pdata.NewValueString("available_writes"), - }}, + case "TICKETS_AVAILABLE_WRITE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketType.AvailableWrites) + } // Number of connections to a MongoDB process found in the connections document that the serverStatus command generates. - "CONNECTIONS": {"mongodbatlas.process.connections", map[string]pdata.Value{}}, + case "CONNECTIONS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessConnectionsDataPoint(ts, float64(*dp.Value)) + } // Number of cursors for a MongoDB process found in the metrics.cursor document that the serverStatus command generates. - "CURSORS_TOTAL_OPEN": {"mongodbatlas.process.cursors", map[string]pdata.Value{ - "cursor_state": pdata.NewValueString("open"), - }}, + case "CURSORS_TOTAL_OPEN": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorState.Open) + } - "CURSORS_TOTAL_TIMED_OUT": {"mongodbatlas.process.cursors", map[string]pdata.Value{ - "cursor_state": pdata.NewValueString("timed_out"), - }}, + case "CURSORS_TOTAL_TIMED_OUT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorState.TimedOut) + } // Numbers of Memory Issues and Page Faults for a MongoDB process. - "EXTRA_INFO_PAGE_FAULTS": {"mongodbatlas.process.page_faults", map[string]pdata.Value{ - "memory_issue_type": pdata.NewValueString("extra_info"), - }}, + case "EXTRA_INFO_PAGE_FAULTS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.ExtraInfo) + } - "GLOBAL_ACCESSES_NOT_IN_MEMORY": {"mongodbatlas.process.page_faults", map[string]pdata.Value{ - "memory_issue_type": pdata.NewValueString("global_accesses_not_in_memory"), - }}, - "GLOBAL_PAGE_FAULT_EXCEPTIONS_THROWN": {"mongodbatlas.process.page_faults", map[string]pdata.Value{ - "memory_issue_type": pdata.NewValueString("exceptions_thrown"), - }}, + case "GLOBAL_ACCESSES_NOT_IN_MEMORY": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.GlobalAccessesNotInMemory) + } + case "GLOBAL_PAGE_FAULT_EXCEPTIONS_THROWN": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.ExceptionsThrown) + } // Number of operations waiting on locks for the MongoDB process that the serverStatus command generates. Cloud Manager computes these values based on the type of storage engine. - "GLOBAL_LOCK_CURRENT_QUEUE_TOTAL": {"mongodbatlas.process.global_lock", map[string]pdata.Value{ - "global_lock_state": pdata.NewValueString("current_queue_total"), - }}, - "GLOBAL_LOCK_CURRENT_QUEUE_READERS": {"mongodbatlas.process.global_lock", map[string]pdata.Value{ - "global_lock_state": pdata.NewValueString("current_queue_readers"), - }}, - "GLOBAL_LOCK_CURRENT_QUEUE_WRITERS": {"mongodbatlas.process.global_lock", map[string]pdata.Value{ - "global_lock_state": pdata.NewValueString("current_queue_writers"), - }}, + case "GLOBAL_LOCK_CURRENT_QUEUE_TOTAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueTotal) + } + case "GLOBAL_LOCK_CURRENT_QUEUE_READERS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueReaders) + } + case "GLOBAL_LOCK_CURRENT_QUEUE_WRITERS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueWriters) + } // Number of index btree operations. - "INDEX_COUNTERS_BTREE_ACCESSES": {"mongodbatlas.process.index.counters", map[string]pdata.Value{ - "btree_counter_type": pdata.NewValueString("accesses"), - }}, - "INDEX_COUNTERS_BTREE_HITS": {"mongodbatlas.process.index.counters", map[string]pdata.Value{ - "btree_counter_type": pdata.NewValueString("hits"), - }}, - "INDEX_COUNTERS_BTREE_MISSES": {"mongodbatlas.process.index.counters", map[string]pdata.Value{ - "btree_counter_type": pdata.NewValueString("misses"), - }}, - "INDEX_COUNTERS_BTREE_MISS_RATIO": {"mongodbatlas.process.index.btree_miss_ratio", map[string]pdata.Value{}}, + case "INDEX_COUNTERS_BTREE_ACCESSES": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Accesses) + } + case "INDEX_COUNTERS_BTREE_HITS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Hits) + } + case "INDEX_COUNTERS_BTREE_MISSES": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Misses) + } + case "INDEX_COUNTERS_BTREE_MISS_RATIO": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts, float64(*dp.Value)) + } // Number of journaling operations. - "JOURNALING_COMMITS_IN_WRITE_LOCK": {"mongodbatlas.process.journaling.commits", map[string]pdata.Value{}}, - "JOURNALING_MB": {"mongodbatlas.process.journaling.written", map[string]pdata.Value{}}, - "JOURNALING_WRITE_DATA_FILES_MB": {"mongodbatlas.process.journaling.data_files", map[string]pdata.Value{}}, + case "JOURNALING_COMMITS_IN_WRITE_LOCK": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessJournalingCommitsDataPoint(ts, float64(*dp.Value)) + } + case "JOURNALING_MB": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessJournalingWrittenDataPoint(ts, float64(*dp.Value)) + } + case "JOURNALING_WRITE_DATA_FILES_MB": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts, float64(*dp.Value)) + } // Amount of memory for a MongoDB process found in the mem document that the serverStatus command collects. - "MEMORY_RESIDENT": {"mongodbatlas.process.memory.usage", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("resident"), - }}, - "MEMORY_VIRTUAL": {"mongodbatlas.process.memory.usage", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("virtual"), - }}, - - "MEMORY_MAPPED": {"mongodbatlas.process.memory.usage", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("mapped"), - }}, - "COMPUTED_MEMORY": {"mongodbatlas.process.memory.usage", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("computed"), - }}, + case "MEMORY_RESIDENT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Resident) + } + case "MEMORY_VIRTUAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Virtual) + } + + case "MEMORY_MAPPED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Mapped) + } + case "COMPUTED_MEMORY": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Computed) + } // Amount of throughput for MongoDB process found in the network document that the serverStatus command collects. - "NETWORK_BYTES_IN": {"mongodbatlas.process.network.io", map[string]pdata.Value{ - "direction": pdata.NewValueString("receive"), - }}, - "NETWORK_BYTES_OUT": {"mongodbatlas.process.network.io", map[string]pdata.Value{ - "direction": pdata.NewValueString("transmit"), - }}, - "NETWORK_NUM_REQUESTS": {"mongodbatlas.process.network.requests", map[string]pdata.Value{}}, + case "NETWORK_BYTES_IN": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) + } + case "NETWORK_BYTES_OUT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) + } + case "NETWORK_NUM_REQUESTS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessNetworkRequestsDataPoint(ts, float64(*dp.Value)) + } // Durations and throughput of the MongoDB process' oplog. - "OPLOG_SLAVE_LAG_MASTER_TIME": {"mongodbatlas.process.oplog.time", map[string]pdata.Value{ - "oplog_type": pdata.NewValueString("slave_lag_master_time"), - }}, - "OPLOG_MASTER_TIME": {"mongodbatlas.process.oplog.time", map[string]pdata.Value{ - "oplog_type": pdata.NewValueString("master_time"), - }}, - "OPLOG_MASTER_LAG_TIME_DIFF": {"mongodbatlas.process.oplog.time", map[string]pdata.Value{ - "oplog_type": pdata.NewValueString("master_lag_time_diff"), - }}, - "OPLOG_RATE_GB_PER_HOUR": {"mongodbatlas.process.oplog.rate", map[string]pdata.Value{}}, + case "OPLOG_SLAVE_LAG_MASTER_TIME": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.SlaveLagMasterTime) + } + case "OPLOG_MASTER_TIME": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.MasterTime) + } + case "OPLOG_MASTER_LAG_TIME_DIFF": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.MasterLagTimeDiff) + } + case "OPLOG_RATE_GB_PER_HOUR": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessOplogRateDataPoint(ts, float64(*dp.Value)) + } // Number of database operations on a MongoDB process since the process last started. - "DB_STORAGE_TOTAL": {"mongodbatlas.process.db.storage", map[string]pdata.Value{ - "storage_status": pdata.NewValueString("total"), - }}, + case "DB_STORAGE_TOTAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.Total) + } - "DB_DATA_SIZE_TOTAL": {"mongodbatlas.process.db.storage", map[string]pdata.Value{ - "storage_status": pdata.NewValueString("data_size"), - }}, - "DB_INDEX_SIZE_TOTAL": {"mongodbatlas.process.db.storage", map[string]pdata.Value{ - "storage_status": pdata.NewValueString("index_size"), - }}, - "DB_DATA_SIZE_TOTAL_WO_SYSTEM": {"mongodbatlas.process.db.storage", map[string]pdata.Value{ - "storage_status": pdata.NewValueString("data_size_wo_system"), - }}, + case "DB_DATA_SIZE_TOTAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.DataSize) + } + case "DB_INDEX_SIZE_TOTAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.IndexSize) + } + case "DB_DATA_SIZE_TOTAL_WO_SYSTEM": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.DataSizeWoSystem) + } // Rate of database operations on a MongoDB process since the process last started found in the opcounters document that the serverStatus command collects. - "OPCOUNTER_CMD": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("cmd"), - "role": pdata.NewValueString("primary"), - }}, - "OPCOUNTER_QUERY": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("query"), - "role": pdata.NewValueString("primary"), - }}, - "OPCOUNTER_UPDATE": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("update"), - "role": pdata.NewValueString("primary"), - }}, - "OPCOUNTER_DELETE": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("delete"), - "role": pdata.NewValueString("primary"), - }}, - "OPCOUNTER_GETMORE": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("getmore"), - "role": pdata.NewValueString("primary"), - }}, - "OPCOUNTER_INSERT": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("insert"), - "role": pdata.NewValueString("primary"), - }}, + case "OPCOUNTER_CMD": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Cmd, AttributeClusterRole.Primary) + } + case "OPCOUNTER_QUERY": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Query, AttributeClusterRole.Primary) + } + case "OPCOUNTER_UPDATE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Update, AttributeClusterRole.Primary) + } + case "OPCOUNTER_DELETE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Delete, AttributeClusterRole.Primary) + } + case "OPCOUNTER_GETMORE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Getmore, AttributeClusterRole.Primary) + } + case "OPCOUNTER_INSERT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Insert, AttributeClusterRole.Primary) + } // Rate of database operations on MongoDB secondaries found in the opcountersRepl document that the serverStatus command collects. - "OPCOUNTER_REPL_CMD": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("cmd"), - "role": pdata.NewValueString("replica"), - }}, - "OPCOUNTER_REPL_UPDATE": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("update"), - "role": pdata.NewValueString("replica"), - }}, - "OPCOUNTER_REPL_DELETE": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("delete"), - "role": pdata.NewValueString("replica"), - }}, - "OPCOUNTER_REPL_INSERT": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("insert"), - "role": pdata.NewValueString("replica"), - }}, + case "OPCOUNTER_REPL_CMD": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Cmd, AttributeClusterRole.Replica) + } + case "OPCOUNTER_REPL_UPDATE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Update, AttributeClusterRole.Replica) + } + case "OPCOUNTER_REPL_DELETE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Delete, AttributeClusterRole.Replica) + } + case "OPCOUNTER_REPL_INSERT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Insert, AttributeClusterRole.Replica) + } // Average rate of documents returned, inserted, updated, or deleted per second during a selected time period. - "DOCUMENT_METRICS_RETURNED": {"mongodbatlas.process.db.document.rate", map[string]pdata.Value{ - "document_status": pdata.NewValueString("returned"), - }}, - "DOCUMENT_METRICS_INSERTED": {"mongodbatlas.process.db.document.rate", map[string]pdata.Value{ - "document_status": pdata.NewValueString("inserted"), - }}, - "DOCUMENT_METRICS_UPDATED": {"mongodbatlas.process.db.document.rate", map[string]pdata.Value{ - "document_status": pdata.NewValueString("updated"), - }}, - "DOCUMENT_METRICS_DELETED": {"mongodbatlas.process.db.document.rate", map[string]pdata.Value{ - "document_status": pdata.NewValueString("deleted"), - }}, + case "DOCUMENT_METRICS_RETURNED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Returned) + } + case "DOCUMENT_METRICS_INSERTED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Inserted) + } + case "DOCUMENT_METRICS_UPDATED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Updated) + } + case "DOCUMENT_METRICS_DELETED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Deleted) + } // Average rate for operations per second during a selected time period that perform a sort but cannot perform the sort using an index. - "OPERATIONS_SCAN_AND_ORDER": {"mongodbatlas.process.db.operations.rate", map[string]pdata.Value{ - "operation": pdata.NewValueString("scan_and_order"), - }}, + case "OPERATIONS_SCAN_AND_ORDER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.ScanAndOrder, AttributeClusterRole.Primary) + } // Average execution time in milliseconds per read, write, or command operation during a selected time period. - "OP_EXECUTION_TIME_READS": {"mongodbatlas.process.db.operations.time", map[string]pdata.Value{ - "execution_type": pdata.NewValueString("reads"), - }}, - "OP_EXECUTION_TIME_WRITES": {"mongodbatlas.process.db.operations.time", map[string]pdata.Value{ - "execution_type": pdata.NewValueString("writes"), - }}, - "OP_EXECUTION_TIME_COMMANDS": {"mongodbatlas.process.db.operations.time", map[string]pdata.Value{ - "execution_type": pdata.NewValueString("commands"), - }}, + case "OP_EXECUTION_TIME_READS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Reads) + } + case "OP_EXECUTION_TIME_WRITES": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Writes) + } + case "OP_EXECUTION_TIME_COMMANDS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Commands) + } // Number of times the host restarted within the previous hour. - "RESTARTS_IN_LAST_HOUR": {"mongodbatlas.process.restarts", map[string]pdata.Value{}}, + case "RESTARTS_IN_LAST_HOUR": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessRestartsDataPoint(ts, float64(*dp.Value)) + } // Average rate per second to scan index items during queries and query-plan evaluations found in the value of totalKeysExamined from the explain command. - "QUERY_EXECUTOR_SCANNED": {"mongodbatlas.process.db.query_executor.scanned", map[string]pdata.Value{ - "scanned_type": pdata.NewValueString("index_items"), - }}, + case "QUERY_EXECUTOR_SCANNED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedType.IndexItems) + } // Average rate of documents scanned per second during queries and query-plan evaluations found in the value of totalDocsExamined from the explain command. - "QUERY_EXECUTOR_SCANNED_OBJECTS": {"mongodbatlas.process.db.query_executor.scanned", map[string]pdata.Value{ - "scanned_type": pdata.NewValueString("objects"), - }}, + case "QUERY_EXECUTOR_SCANNED_OBJECTS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedType.Objects) + } // Ratio of the number of index items scanned to the number of documents returned. - "QUERY_TARGETING_SCANNED_PER_RETURNED": {"mongodbatlas.process.db.query_targeting.scanned_per_returned", map[string]pdata.Value{ - "scanned_type": pdata.NewValueString("index_items"), - }}, + case "QUERY_TARGETING_SCANNED_PER_RETURNED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedType.IndexItems) + } // Ratio of the number of documents scanned to the number of documents returned. - "QUERY_TARGETING_SCANNED_OBJECTS_PER_RETURNED": {"mongodbatlas.process.db.query_targeting.scanned_per_returned", map[string]pdata.Value{ - "scanned_type": pdata.NewValueString("objects"), - }}, + case "QUERY_TARGETING_SCANNED_OBJECTS_PER_RETURNED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedType.Objects) + } // CPU usage of processes on the host. For hosts with more than one CPU core, this value can exceed 100%. - "SYSTEM_CPU_USER": {"mongodbatlas.system.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, - "MAX_SYSTEM_CPU_USER": {"mongodbatlas.system.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, - "SYSTEM_CPU_KERNEL": {"mongodbatlas.system.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, - "MAX_SYSTEM_CPU_KERNEL": {"mongodbatlas.system.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, - "SYSTEM_CPU_NICE": {"mongodbatlas.system.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("nice"), - }}, - "MAX_SYSTEM_CPU_NICE": {"mongodbatlas.system.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("nice"), - }}, - "SYSTEM_CPU_IOWAIT": {"mongodbatlas.system.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("iowait"), - }}, - "MAX_SYSTEM_CPU_IOWAIT": {"mongodbatlas.system.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("iowait"), - }}, - "SYSTEM_CPU_IRQ": {"mongodbatlas.system.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("irq"), - }}, - "MAX_SYSTEM_CPU_IRQ": {"mongodbatlas.system.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("irq"), - }}, - "SYSTEM_CPU_SOFTIRQ": {"mongodbatlas.system.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("softirq"), - }}, - "MAX_SYSTEM_CPU_SOFTIRQ": {"mongodbatlas.system.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("softirq"), - }}, - "SYSTEM_CPU_GUEST": {"mongodbatlas.system.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("guest"), - }}, - "MAX_SYSTEM_CPU_GUEST": {"mongodbatlas.system.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("guest"), - }}, - "SYSTEM_CPU_STEAL": {"mongodbatlas.system.cpu.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("steal"), - }}, - "MAX_SYSTEM_CPU_STEAL": {"mongodbatlas.system.cpu.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("steal"), - }}, + case "SYSTEM_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } + case "MAX_SYSTEM_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } + case "SYSTEM_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } + case "MAX_SYSTEM_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } + case "SYSTEM_CPU_NICE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice) + } + case "MAX_SYSTEM_CPU_NICE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice) + } + case "SYSTEM_CPU_IOWAIT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait) + } + case "MAX_SYSTEM_CPU_IOWAIT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait) + } + case "SYSTEM_CPU_IRQ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq) + } + case "MAX_SYSTEM_CPU_IRQ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq) + } + case "SYSTEM_CPU_SOFTIRQ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq) + } + case "MAX_SYSTEM_CPU_SOFTIRQ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq) + } + case "SYSTEM_CPU_GUEST": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest) + } + case "MAX_SYSTEM_CPU_GUEST": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest) + } + case "SYSTEM_CPU_STEAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal) + } + case "MAX_SYSTEM_CPU_STEAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal) + } // CPU usage of processes on the host scaled to a range of 0 to 100% by dividing by the number of CPU cores. - "SYSTEM_NORMALIZED_CPU_USER": {"mongodbatlas.system.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, - "MAX_SYSTEM_NORMALIZED_CPU_USER": {"mongodbatlas.system.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, - "MAX_SYSTEM_NORMALIZED_CPU_NICE": {"mongodbatlas.system.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("nice"), - }}, - "SYSTEM_NORMALIZED_CPU_KERNEL": {"mongodbatlas.system.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, - "MAX_SYSTEM_NORMALIZED_CPU_KERNEL": {"mongodbatlas.system.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, - "SYSTEM_NORMALIZED_CPU_NICE": {"mongodbatlas.system.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("nice"), - }}, - "SYSTEM_NORMALIZED_CPU_IOWAIT": {"mongodbatlas.system.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("iowait"), - }}, - "MAX_SYSTEM_NORMALIZED_CPU_IOWAIT": {"mongodbatlas.system.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("iowait"), - }}, - "SYSTEM_NORMALIZED_CPU_IRQ": {"mongodbatlas.system.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("irq"), - }}, - "MAX_SYSTEM_NORMALIZED_CPU_IRQ": {"mongodbatlas.system.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("irq"), - }}, - "SYSTEM_NORMALIZED_CPU_SOFTIRQ": {"mongodbatlas.system.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("softirq"), - }}, - "MAX_SYSTEM_NORMALIZED_CPU_SOFTIRQ": {"mongodbatlas.system.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("softirq"), - }}, - "SYSTEM_NORMALIZED_CPU_GUEST": {"mongodbatlas.system.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("guest"), - }}, - "MAX_SYSTEM_NORMALIZED_CPU_GUEST": {"mongodbatlas.system.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("guest"), - }}, - "SYSTEM_NORMALIZED_CPU_STEAL": {"mongodbatlas.system.cpu.normalized.usage.average", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("steal"), - }}, - "MAX_SYSTEM_NORMALIZED_CPU_STEAL": {"mongodbatlas.system.cpu.normalized.usage.max", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("steal"), - }}, + case "SYSTEM_NORMALIZED_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } + case "MAX_SYSTEM_NORMALIZED_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } + case "MAX_SYSTEM_NORMALIZED_CPU_NICE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice) + } + case "SYSTEM_NORMALIZED_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } + case "MAX_SYSTEM_NORMALIZED_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } + case "SYSTEM_NORMALIZED_CPU_NICE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice) + } + case "SYSTEM_NORMALIZED_CPU_IOWAIT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait) + } + case "MAX_SYSTEM_NORMALIZED_CPU_IOWAIT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait) + } + case "SYSTEM_NORMALIZED_CPU_IRQ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq) + } + case "MAX_SYSTEM_NORMALIZED_CPU_IRQ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq) + } + case "SYSTEM_NORMALIZED_CPU_SOFTIRQ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq) + } + case "MAX_SYSTEM_NORMALIZED_CPU_SOFTIRQ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq) + } + case "SYSTEM_NORMALIZED_CPU_GUEST": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest) + } + case "MAX_SYSTEM_NORMALIZED_CPU_GUEST": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest) + } + case "SYSTEM_NORMALIZED_CPU_STEAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal) + } + case "MAX_SYSTEM_NORMALIZED_CPU_STEAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal) + } // Physical memory usage, in bytes, that the host uses. - "SYSTEM_MEMORY_AVAILABLE": {"mongodbatlas.system.memory.usage.average", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("available"), - }}, - "MAX_SYSTEM_MEMORY_AVAILABLE": {"mongodbatlas.system.memory.usage.max", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("available"), - }}, - "SYSTEM_MEMORY_BUFFERS": {"mongodbatlas.system.memory.usage.average", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("buffers"), - }}, - "MAX_SYSTEM_MEMORY_BUFFERS": {"mongodbatlas.system.memory.usage.max", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("buffers"), - }}, - "SYSTEM_MEMORY_CACHED": {"mongodbatlas.system.memory.usage.average", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("cached"), - }}, - "MAX_SYSTEM_MEMORY_CACHED": {"mongodbatlas.system.memory.usage.max", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("cached"), - }}, - "SYSTEM_MEMORY_FREE": {"mongodbatlas.system.memory.usage.average", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("free"), - }}, - "MAX_SYSTEM_MEMORY_FREE": {"mongodbatlas.system.memory.usage.average", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("free"), - }}, - "SYSTEM_MEMORY_SHARED": {"mongodbatlas.system.memory.usage.average", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("shared"), - }}, - "MAX_SYSTEM_MEMORY_SHARED": {"mongodbatlas.system.memory.usage.max", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("shared"), - }}, - "SYSTEM_MEMORY_USED": {"mongodbatlas.system.memory.usage.average", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("used"), - }}, - "MAX_SYSTEM_MEMORY_USED": {"mongodbatlas.system.memory.usage.max", map[string]pdata.Value{ - "memory_status": pdata.NewValueString("used"), - }}, + case "SYSTEM_MEMORY_AVAILABLE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Available) + } + case "MAX_SYSTEM_MEMORY_AVAILABLE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Available) + } + case "SYSTEM_MEMORY_BUFFERS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Buffers) + } + case "MAX_SYSTEM_MEMORY_BUFFERS": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Buffers) + } + case "SYSTEM_MEMORY_CACHED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Cached) + } + case "MAX_SYSTEM_MEMORY_CACHED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Cached) + } + case "SYSTEM_MEMORY_FREE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Free) + } + case "MAX_SYSTEM_MEMORY_FREE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Free) + } + case "SYSTEM_MEMORY_SHARED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Shared) + } + case "MAX_SYSTEM_MEMORY_SHARED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Shared) + } + case "SYSTEM_MEMORY_USED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Used) + } + case "MAX_SYSTEM_MEMORY_USED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Used) + } // Average rate of physical bytes per second that the eth0 network interface received and transmitted. - "SYSTEM_NETWORK_IN": {"mongodbatlas.system.network.io.average", map[string]pdata.Value{ - "direction": pdata.NewValueString("receive"), - }}, - "MAX_SYSTEM_NETWORK_IN": {"mongodbatlas.system.network.io.max", map[string]pdata.Value{ - "direction": pdata.NewValueString("receive"), - }}, - "SYSTEM_NETWORK_OUT": {"mongodbatlas.system.network.io.average", map[string]pdata.Value{ - "direction": pdata.NewValueString("transmit"), - }}, - "MAX_SYSTEM_NETWORK_OUT": {"mongodbatlas.system.network.io.max", map[string]pdata.Value{ - "direction": pdata.NewValueString("transmit"), - }}, + case "SYSTEM_NETWORK_IN": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) + } + case "MAX_SYSTEM_NETWORK_IN": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) + } + case "SYSTEM_NETWORK_OUT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) + } + case "MAX_SYSTEM_NETWORK_OUT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) + } // Total amount of memory that swap uses. - "SWAP_USAGE_USED": {"mongodbatlas.system.paging.usage.average", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("used"), - }}, - "MAX_SWAP_USAGE_USED": {"mongodbatlas.system.paging.usage.max", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("used"), - }}, - "SWAP_USAGE_FREE": {"mongodbatlas.system.paging.usage.average", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("free"), - }}, - "MAX_SWAP_USAGE_FREE": {"mongodbatlas.system.paging.usage.max", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("free"), - }}, + case "SWAP_USAGE_USED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Used) + } + case "MAX_SWAP_USAGE_USED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Used) + } + case "SWAP_USAGE_FREE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Free) + } + case "MAX_SWAP_USAGE_FREE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Free) + } // Total amount of memory written and read from swap. - "SWAP_IO_IN": {"mongodbatlas.system.paging.io.average", map[string]pdata.Value{ - "direction": pdata.NewValueString("in"), - }}, - "MAX_SWAP_IO_IN": {"mongodbatlas.system.paging.io.max", map[string]pdata.Value{ - "direction": pdata.NewValueString("in"), - }}, - "SWAP_IO_OUT": {"mongodbatlas.system.paging.io.average", map[string]pdata.Value{ - "direction": pdata.NewValueString("out"), - }}, - "MAX_SWAP_IO_OUT": {"mongodbatlas.system.paging.io.max", map[string]pdata.Value{ - "direction": pdata.NewValueString("out"), - }}, + case "SWAP_IO_IN": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) + } + case "MAX_SWAP_IO_IN": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) + } + case "SWAP_IO_OUT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) + } + case "MAX_SWAP_IO_OUT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) + } // Memory usage, in bytes, that Atlas Search processes use. - "FTS_PROCESS_RESIDENT_MEMORY": {"mongodbatlas.system.fts.memory.usage", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("resident"), - }}, - "FTS_PROCESS_VIRTUAL_MEMORY": {"mongodbatlas.system.fts.memory.usage", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("virtual"), - }}, - "FTS_PROCESS_SHARED_MEMORY": {"mongodbatlas.system.fts.memory.usage", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("shared"), - }}, - "FTS_MEMORY_MAPPED": {"mongodbatlas.system.fts.memory.usage", map[string]pdata.Value{ - "memory_state": pdata.NewValueString("mapped"), - }}, + case "FTS_PROCESS_RESIDENT_MEMORY": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Resident) + } + case "FTS_PROCESS_VIRTUAL_MEMORY": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Virtual) + } + case "FTS_PROCESS_SHARED_MEMORY": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Shared) + } + case "FTS_MEMORY_MAPPED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Mapped) + } // Disk space, in bytes, that Atlas Search indexes use. - "FTS_DISK_USAGE": {"mongodbatlas.system.fts.disk.used", map[string]pdata.Value{}}, + case "FTS_DISK_USAGE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts, float64(*dp.Value)) + } // Percentage of CPU that Atlas Search processes use. - "FTS_PROCESS_CPU_USER": {"mongodbatlas.system.fts.cpu.usage", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, - "FTS_PROCESS_CPU_KERNEL": {"mongodbatlas.system.fts.cpu.usage", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, - "FTS_PROCESS_NORMALIZED_CPU_USER": {"mongodbatlas.system.fts.cpu.normalized.usage", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("user"), - }}, - "FTS_PROCESS_NORMALIZED_CPU_KERNEL": {"mongodbatlas.system.fts.cpu.normalized.usage", map[string]pdata.Value{ - "cpu_state": pdata.NewValueString("kernel"), - }}, + case "FTS_PROCESS_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } + case "FTS_PROCESS_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } + case "FTS_PROCESS_NORMALIZED_CPU_USER": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) + } + case "FTS_PROCESS_NORMALIZED_CPU_KERNEL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) + } // Process Disk Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/) // Measures throughput of I/O operations for the disk partition used for MongoDB. - "DISK_PARTITION_IOPS_READ": {"mongodbatlas.disk.partition.iops.average", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("read"), - }}, - - "MAX_DISK_PARTITION_IOPS_READ": {"mongodbatlas.disk.partition.iops.average", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("read"), - }}, - - "DISK_PARTITION_IOPS_WRITE": {"mongodbatlas.disk.partition.iops.average", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("write"), - }}, + case "DISK_PARTITION_IOPS_READ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read) + } - "MAX_DISK_PARTITION_IOPS_WRITE": {"mongodbatlas.disk.partition.iops.max", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("write"), - }}, + case "MAX_DISK_PARTITION_IOPS_READ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read) + } - "DISK_PARTITION_IOPS_TOTAL": {"mongodbatlas.disk.partition.iops.average", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("total"), - }}, + case "DISK_PARTITION_IOPS_WRITE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write) + } - "MAX_DISK_PARTITION_IOPS_TOTAL": {"mongodbatlas.disk.partition.iops.max", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("total"), - }}, + case "MAX_DISK_PARTITION_IOPS_WRITE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write) + } - "DISK_PARTITION_UTILIZATION": {"mongodbatlas.disk.partition.utilization.average", map[string]pdata.Value{}}, + case "DISK_PARTITION_IOPS_TOTAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Total) + } - "MAX_DISK_PARTITION_UTILIZATION": {"mongodbatlas.disk.partition.utilization.max", map[string]pdata.Value{}}, + case "MAX_DISK_PARTITION_IOPS_TOTAL": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Total) + } // The percentage of time during which requests are being issued to and serviced by the partition. // This includes requests from any process, not just MongoDB processes. - "DISK_PARTITION_LATENCY_READ": {"mongodbatlas.disk.partition.latency.average", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("read"), - }}, + case "DISK_PARTITION_LATENCY_READ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read) + } - "MAX_DISK_PARTITION_LATENCY_READ": {"mongodbatlas.disk.partition.latency.max", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("read"), - }}, + case "MAX_DISK_PARTITION_LATENCY_READ": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read) + } - "DISK_PARTITION_LATENCY_WRITE": {"mongodbatlas.disk.partition.latency.average", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("write"), - }}, + case "DISK_PARTITION_LATENCY_WRITE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write) + } - "MAX_DISK_PARTITION_LATENCY_WRITE": {"mongodbatlas.disk.partition.latency.max", map[string]pdata.Value{ - "disk_direction": pdata.NewValueString("write"), - }}, + case "MAX_DISK_PARTITION_LATENCY_WRITE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write) + } // Measures latency per operation type of the disk partition used by MongoDB. - "DISK_PARTITION_SPACE_FREE": {"mongodbatlas.disk.partition.space.average", map[string]pdata.Value{ - "disk_status": pdata.NewValueString("free"), - }}, - - "MAX_DISK_PARTITION_SPACE_FREE": {"mongodbatlas.disk.partition.space.max", map[string]pdata.Value{ - "disk_status": pdata.NewValueString("free"), - }}, - - "DISK_PARTITION_SPACE_USED": {"mongodbatlas.disk.partition.space.average", map[string]pdata.Value{ - "disk_status": pdata.NewValueString("used"), - }}, - - "MAX_DISK_PARTITION_SPACE_USED": {"mongodbatlas.disk.partition.space.max", map[string]pdata.Value{ - "disk_status": pdata.NewValueString("used"), - }}, - - "DISK_PARTITION_SPACE_PERCENT_FREE": {"mongodbatlas.disk.partition.utilization.average", map[string]pdata.Value{ - "disk_status": pdata.NewValueString("free"), - }}, - "MAX_DISK_PARTITION_SPACE_PERCENT_FREE": {"mongodbatlas.disk.partition.utilization.max", map[string]pdata.Value{ - "disk_status": pdata.NewValueString("free"), - }}, - "DISK_PARTITION_SPACE_PERCENT_USED": {"mongodbatlas.disk.partition.utilization.average", map[string]pdata.Value{ - "disk_status": pdata.NewValueString("used"), - }}, - "MAX_DISK_PARTITION_SPACE_PERCENT_USED": {"mongodbatlas.disk.partition.utilization.max", map[string]pdata.Value{ - "disk_status": pdata.NewValueString("used"), - }}, + case "DISK_PARTITION_SPACE_FREE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free) + } - // Process Database Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/) - "DATABASE_COLLECTION_COUNT": {"mongodbatlas.db.counts", map[string]pdata.Value{ - "object_type": pdata.NewValueString("collection"), - }}, - "DATABASE_INDEX_COUNT": {"mongodbatlas.db.counts", map[string]pdata.Value{ - "object_type": pdata.NewValueString("index"), - }}, - "DATABASE_EXTENT_COUNT": {"mongodbatlas.db.counts", map[string]pdata.Value{ - "object_type": pdata.NewValueString("extent"), - }}, - "DATABASE_OBJECT_COUNT": {"mongodbatlas.db.counts", map[string]pdata.Value{ - "object_type": pdata.NewValueString("object"), - }}, - "DATABASE_VIEW_COUNT": {"mongodbatlas.db.counts", map[string]pdata.Value{ - "object_type": pdata.NewValueString("view"), - }}, - "DATABASE_AVERAGE_OBJECT_SIZE": {"mongodbatlas.db.size", map[string]pdata.Value{ - "object_type": pdata.NewValueString("object"), - }}, - "DATABASE_STORAGE_SIZE": {"mongodbatlas.db.size", map[string]pdata.Value{ - "object_type": pdata.NewValueString("storage"), - }}, - "DATABASE_INDEX_SIZE": {"mongodbatlas.db.size", map[string]pdata.Value{ - "object_type": pdata.NewValueString("index"), - }}, - "DATABASE_DATA_SIZE": {"mongodbatlas.db.size", map[string]pdata.Value{ - "object_type": pdata.NewValueString("data"), - }}, -} + case "MAX_DISK_PARTITION_SPACE_FREE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free) + } -func mappedMetricByName(name string) (MetricIntf, map[string]pdata.Value) { - info, found := metricNameMapping[name] - if !found { - return nil, nil - } + case "DISK_PARTITION_SPACE_USED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used) + } - metricinf := Metrics.ByName(info.metricName) - return metricinf, info.attributes -} + case "MAX_DISK_PARTITION_SPACE_USED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used) + } -func MeasurementsToMetric(meas *mongodbatlas.Measurements, buildUnrecognized bool) (*pdata.Metric, error) { - intf, attrs := mappedMetricByName(meas.Name) - if intf == nil { - return nil, nil // Not an error- simply skipping undocumented metrics - } - m := pdata.NewMetric() - intf.Init(m) - switch m.DataType() { - case pdata.MetricDataTypeGauge: - datapoints := m.Gauge().DataPoints() - err := addDataPoints(datapoints, meas, attrs) - if err != nil { - return nil, err - } - case pdata.MetricDataTypeSum: - datapoints := m.Sum().DataPoints() - err := addDataPoints(datapoints, meas, attrs) - if err != nil { - return nil, err + case "DISK_PARTITION_SPACE_PERCENT_FREE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free) + } + case "MAX_DISK_PARTITION_SPACE_PERCENT_FREE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free) + } + case "DISK_PARTITION_SPACE_PERCENT_USED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used) + } + case "MAX_DISK_PARTITION_SPACE_PERCENT_USED": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used) + } + + // Process Database Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/) + case "DATABASE_COLLECTION_COUNT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Collection) + } + case "DATABASE_INDEX_COUNT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Index) + } + case "DATABASE_EXTENT_COUNT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Extent) } + case "DATABASE_OBJECT_COUNT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Object) + } + case "DATABASE_VIEW_COUNT": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.View) + } + case "DATABASE_AVERAGE_OBJECT_SIZE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Object) + } + case "DATABASE_STORAGE_SIZE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Storage) + } + case "DATABASE_INDEX_SIZE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Index) + } + case "DATABASE_DATA_SIZE": + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Data) + } + default: - return nil, fmt.Errorf("unrecognized data type for metric '%s'", meas.Name) + return nil } +} - return &m, nil +func MeasurementsToMetric(mb *MetricsBuilder, meas *mongodbatlas.Measurements, buildUnrecognized bool) error { + recordFunc := getRecordFunc(meas.Name) + if recordFunc == nil { + return nil + } + + return addDataPoint(mb, meas, recordFunc) } -func addDataPoints(datapoints pdata.NumberDataPointSlice, meas *mongodbatlas.Measurements, attrs map[string]pdata.Value) error { +func addDataPoint(mb *MetricsBuilder, meas *mongodbatlas.Measurements, recordFunc metricRecordFunc) error { for _, point := range meas.DataPoints { if point.Value != nil { - dp := datapoints.AppendEmpty() curTime, err := time.Parse(time.RFC3339, point.Timestamp) if err != nil { return err } - for k, v := range attrs { - dp.Attributes().Upsert(k, v) - } - dp.SetTimestamp(pdata.NewTimestampFromTime(curTime)) - dp.SetDoubleVal(float64(*point.Value)) + recordFunc(mb, point, pdata.NewTimestampFromTime(curTime)) } } return nil diff --git a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping_test.go b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping_test.go deleted file mode 100644 index 98083c0d4c0d..000000000000 --- a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "fmt" - "strings" - "testing" - - "go.opentelemetry.io/collector/model/pdata" -) - -func TestKnownMetricsMapped(t *testing.T) { - missingMetrics := make([]string, 0) - wrongNames := make([]string, 0) - // Test all at once so we get one failure with all - // missing or unmatching metrics. - for mongodbName, metricData := range metricNameMapping { - m := pdata.NewMetric() - metricf, _ := mappedMetricByName(mongodbName) - if metricf == nil { - missingMetrics = append(missingMetrics, mongodbName) - } else { - metricf.Init(m) - if metricData.metricName != m.Name() { - wrongNames = append(wrongNames, fmt.Sprintf("found: %s, expected: %s", m.Name(), metricData.metricName)) - } - } - } - - if len(missingMetrics) > 0 { - t.Errorf("Missing metrics with MongoDB names: %s", strings.Join(missingMetrics, ", ")) - } - - if len(wrongNames) > 0 { - t.Errorf("Mismatching names found: %s", strings.Join(wrongNames, ",")) - } -} diff --git a/receiver/mongodbatlasreceiver/internal/metric_conversion.go b/receiver/mongodbatlasreceiver/internal/metric_conversion.go index 183856d7a972..40be503e9ac0 100644 --- a/receiver/mongodbatlasreceiver/internal/metric_conversion.go +++ b/receiver/mongodbatlasreceiver/internal/metric_conversion.go @@ -19,33 +19,25 @@ import ( "github.com/hashicorp/go-multierror" "go.mongodb.org/atlas/mongodbatlas" - "go.opentelemetry.io/collector/model/pdata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" ) func processMeasurements( - resource pdata.Resource, + mb *metadata.MetricsBuilder, measurements []*mongodbatlas.Measurements, -) (pdata.Metrics, error) { +) error { allErrors := make([]error, 0) - metricSlice := pdata.NewMetrics() - rm := metricSlice.ResourceMetrics().AppendEmpty() - resource.CopyTo(rm.Resource()) - ilms := rm.ScopeMetrics().AppendEmpty() + for _, meas := range measurements { - metric, err := metadata.MeasurementsToMetric(meas, false) + err := metadata.MeasurementsToMetric(mb, meas, false) if err != nil { allErrors = append(allErrors, err) - } else { - if metric != nil { - // TODO: still handling skipping metrics, there's got to be better - metric.CopyTo(ilms.Metrics().AppendEmpty()) - } } } + if len(allErrors) > 0 { - return metricSlice, multierror.Append(errors.New("errors occurred while processing measurements"), allErrors...) + return multierror.Append(errors.New("errors occurred while processing measurements"), allErrors...) } - return metricSlice, nil + return nil } diff --git a/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go b/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go index f11995b61c91..c3b51dc4527f 100644 --- a/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go +++ b/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go @@ -25,8 +25,9 @@ import ( "github.com/pkg/errors" "go.mongodb.org/atlas/mongodbatlas" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" ) type clientRoundTripper struct { @@ -309,14 +310,14 @@ func (s *MongoDBAtlasClient) ProcessDatabases( // ProcessMetrics returns a set of metrics associated with the specified running process. func (s *MongoDBAtlasClient) ProcessMetrics( ctx context.Context, - resource pdata.Resource, + mb *metadata.MetricsBuilder, projectID string, host string, port int, start string, end string, resolution string, -) (pdata.Metrics, error) { +) error { allMeasurements := make([]*mongodbatlas.Measurements, 0) pageNum := 1 for { @@ -340,7 +341,7 @@ func (s *MongoDBAtlasClient) ProcessMetrics( break } } - return processMeasurements(resource, allMeasurements) + return processMeasurements(mb, allMeasurements) } func (s *MongoDBAtlasClient) getProcessMeasurementsPage( @@ -375,7 +376,7 @@ func (s *MongoDBAtlasClient) getProcessMeasurementsPage( // ProcessDatabaseMetrics returns metrics about a particular database running within a MongoDB Atlas process func (s *MongoDBAtlasClient) ProcessDatabaseMetrics( ctx context.Context, - resource pdata.Resource, + mb *metadata.MetricsBuilder, projectID string, host string, port int, @@ -383,7 +384,7 @@ func (s *MongoDBAtlasClient) ProcessDatabaseMetrics( start string, end string, resolution string, -) (pdata.Metrics, error) { +) error { allMeasurements := make([]*mongodbatlas.Measurements, 0) pageNum := 1 for { @@ -399,7 +400,7 @@ func (s *MongoDBAtlasClient) ProcessDatabaseMetrics( resolution, ) if err != nil { - return pdata.Metrics{}, err + return err } pageNum++ allMeasurements = append(allMeasurements, measurements...) @@ -407,7 +408,7 @@ func (s *MongoDBAtlasClient) ProcessDatabaseMetrics( break } } - return processMeasurements(resource, allMeasurements) + return processMeasurements(mb, allMeasurements) } func (s *MongoDBAtlasClient) getProcessDatabaseMeasurementsPage( @@ -489,7 +490,7 @@ func (s *MongoDBAtlasClient) getProcessDisksPage( // ProcessDiskMetrics returns metrics supplied for a particular disk partition used by a MongoDB Atlas process func (s *MongoDBAtlasClient) ProcessDiskMetrics( ctx context.Context, - resource pdata.Resource, + mb *metadata.MetricsBuilder, projectID string, host string, port int, @@ -497,7 +498,7 @@ func (s *MongoDBAtlasClient) ProcessDiskMetrics( start string, end string, resolution string, -) (pdata.Metrics, error) { +) error { allMeasurements := make([]*mongodbatlas.Measurements, 0) pageNum := 1 for { @@ -513,7 +514,7 @@ func (s *MongoDBAtlasClient) ProcessDiskMetrics( resolution, ) if err != nil { - return pdata.Metrics{}, err + return err } pageNum++ allMeasurements = append(allMeasurements, measurements...) @@ -521,7 +522,7 @@ func (s *MongoDBAtlasClient) ProcessDiskMetrics( break } } - return processMeasurements(resource, allMeasurements) + return processMeasurements(mb, allMeasurements) } func (s *MongoDBAtlasClient) processDiskMeasurementsPage( diff --git a/receiver/mongodbatlasreceiver/metadata.yaml b/receiver/mongodbatlasreceiver/metadata.yaml index 0364ab90b0ef..8a03e605aed9 100644 --- a/receiver/mongodbatlasreceiver/metadata.yaml +++ b/receiver/mongodbatlasreceiver/metadata.yaml @@ -1,5 +1,33 @@ name: mongoatlasreceiver +resource_attributes: + mongodb_atlas.org_name: + description: Organization Name + type: string + mongodb_atlas.project.name: + description: Project Name + type: string + mongodb_atlas.project.id: + description: Project ID + type: string + mongodb_atlas.host.name: + description: Hostname of the process + type: string + mongodb_atlas.process.port: + description: Port process is bound to + type: string + mongodb_atlas.process.type_name: + description: Process type + type: string + mongodb_atlas.process.id: + description: ID of the process + type: string + mongodb_atlas.db.name: + description: Name of the Database + type: string + mongodb_atlas.disk.partition: + description: Name of a disk partition + type: string attributes: cpu_state: description: CPU state @@ -64,6 +92,9 @@ attributes: - virtual - mapped - computed + - shared + - free + - used direction: description: Network traffic direction enum: @@ -85,6 +116,7 @@ attributes: - delete - getmore - insert + - scan_and_order cluster_role: description: Whether process is acting as replica or primary enum: @@ -119,6 +151,15 @@ attributes: enum: - free - used + memory_status: + description: Memory measurement type + enum: + - available + - buffers + - cached + - free + - shared + - used object_type: description: MongoDB object type enum: @@ -178,7 +219,7 @@ metrics: sum: value_type: double monotonic: false - aggregation: cumulative + aggregation: cumulative mongodbatlas.process.cpu.usage.max: enabled: true description: CPU Usage (%) @@ -461,7 +502,7 @@ metrics: description: System Memory Usage extended_documentation: Aggregate of MongoDB Metrics MAX_SYSTEM_MEMORY_CACHED, MAX_SYSTEM_MEMORY_AVAILABLE, MAX_SYSTEM_MEMORY_USED, MAX_SYSTEM_MEMORY_BUFFERS, MAX_SYSTEM_MEMORY_FREE, MAX_SYSTEM_MEMORY_SHARED unit: KiBy - attributes: [memory_state] + attributes: [memory_status] gauge: value_type: double mongodbatlas.system.memory.usage.average: @@ -469,7 +510,7 @@ metrics: description: System Memory Usage extended_documentation: Aggregate of MongoDB Metrics SYSTEM_MEMORY_AVAILABLE, SYSTEM_MEMORY_BUFFERS, SYSTEM_MEMORY_USED, SYSTEM_MEMORY_CACHED, SYSTEM_MEMORY_SHARED, SYSTEM_MEMORY_FREE unit: KiBy - attributes: [memory_state] + attributes: [memory_status] gauge: value_type: double mongodbatlas.system.network.io.max: @@ -549,6 +590,7 @@ metrics: description: Disk partition utilization (%) extended_documentation: MongoDB Metrics MAX_DISK_PARTITION_UTILIZATION unit: 1 + attributes: [disk_status] gauge: value_type: double mongodbatlas.disk.partition.utilization.average: @@ -556,6 +598,7 @@ metrics: description: Disk partition utilization (%) extended_documentation: MongoDB Metrics DISK_PARTITION_UTILIZATION unit: 1 + attributes: [disk_status] gauge: value_type: double mongodbatlas.disk.partition.latency.max: diff --git a/receiver/mongodbatlasreceiver/receiver.go b/receiver/mongodbatlasreceiver/receiver.go index 26dfe2395983..5a6c1da8da15 100644 --- a/receiver/mongodbatlasreceiver/receiver.go +++ b/receiver/mongodbatlasreceiver/receiver.go @@ -26,6 +26,7 @@ import ( "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbatlasreceiver/internal/metadata" ) type receiver struct { @@ -33,6 +34,7 @@ type receiver struct { cfg *Config client *internal.MongoDBAtlasClient lastRun time.Time + mb *metadata.MetricsBuilder } type timeconstraints struct { @@ -46,18 +48,17 @@ func newMongoDBAtlasScraper(log *zap.Logger, cfg *Config) (scraperhelper.Scraper if err != nil { return nil, err } - recv := &receiver{log: log, cfg: cfg, client: client} + recv := &receiver{log: log, cfg: cfg, client: client, mb: metadata.NewMetricsBuilder(cfg.Metrics)} return scraperhelper.NewScraper(typeStr, recv.scrape, scraperhelper.WithShutdown(recv.shutdown)) } func (s *receiver) scrape(ctx context.Context) (pdata.Metrics, error) { now := time.Now() - metrics, err := s.poll(ctx, s.timeConstraints(now)) - if err != nil { + if err := s.poll(ctx, s.timeConstraints(now)); err != nil { return pdata.Metrics{}, err } s.lastRun = now - return metrics, nil + return s.mb.Emit(), nil } func (s *receiver) timeConstraints(now time.Time) timeconstraints { @@ -78,108 +79,95 @@ func (s *receiver) shutdown(context.Context) error { return s.client.Shutdown() } -func (s *receiver) poll(ctx context.Context, time timeconstraints) (pdata.Metrics, error) { - resourceAttributes := pdata.NewMap() - allMetrics := pdata.NewMetrics() +func (s *receiver) poll(ctx context.Context, time timeconstraints) error { orgs, err := s.client.Organizations(ctx) if err != nil { - return pdata.Metrics{}, errors.Wrap(err, "error retrieving organizations") + return errors.Wrap(err, "error retrieving organizations") } for _, org := range orgs { - resourceAttributes.InsertString("mongodb_atlas.org_name", org.Name) projects, err := s.client.Projects(ctx, org.ID) if err != nil { - return pdata.Metrics{}, errors.Wrap(err, "error retrieving projects") + return errors.Wrap(err, "error retrieving projects") } for _, project := range projects { - resourceAttributes.InsertString("mongodb_atlas.project.name", project.Name) - resourceAttributes.InsertString("mongodb_atlas.project.id", project.ID) processes, err := s.client.Processes(ctx, project.ID) if err != nil { - return pdata.Metrics{}, errors.Wrap(err, "error retrieving MongoDB Atlas processes") + return errors.Wrap(err, "error retrieving MongoDB Atlas processes") } for _, process := range processes { - resource := pdata.NewResource() - resourceAttributes.CopyTo(resource.Attributes()) - resource.Attributes().InsertString("host.name", process.Hostname) - resource.Attributes().InsertString("process.port", strconv.Itoa(process.Port)) - resource.Attributes().InsertString("process.type_name", process.TypeName) - resource.Attributes().InsertString("process.id", process.ID) - resourceMetrics, err := s.extractProcessMetrics( + if err := s.extractProcessMetrics( ctx, time, + org.Name, project, process, - resource, - ) - if err != nil { - return pdata.Metrics{}, err + ); err != nil { + return err } - resourceMetrics.MoveAndAppendTo(allMetrics.ResourceMetrics()) + s.mb.EmitForResource( + metadata.WithMongodbAtlasOrgName(org.Name), + metadata.WithMongodbAtlasProjectName(project.Name), + metadata.WithMongodbAtlasProjectID(project.ID), + metadata.WithMongodbAtlasHostName(process.Hostname), + metadata.WithMongodbAtlasProcessPort(strconv.Itoa(process.Port)), + metadata.WithMongodbAtlasProcessTypeName(process.TypeName), + metadata.WithMongodbAtlasProcessID(process.ID), + ) } } } - return allMetrics, nil + return nil } func (s *receiver) extractProcessMetrics( ctx context.Context, time timeconstraints, + orgName string, project *mongodbatlas.Project, process *mongodbatlas.Process, - resource pdata.Resource, -) (pdata.ResourceMetricsSlice, error) { - processMetrics := pdata.NewResourceMetricsSlice() +) error { // This receiver will support both logs and metrics- if one pipeline // or the other is not configured, it will be nil. - metrics, err := + if err := s.client.ProcessMetrics( ctx, - resource, + s.mb, project.ID, process.Hostname, process.Port, time.start, time.end, time.resolution, - ) - if err != nil { - return pdata.ResourceMetricsSlice{}, errors.Wrap( + ); err != nil { + return errors.Wrap( err, "error when polling process metrics from MongoDB Atlas", ) } - metrics.ResourceMetrics().MoveAndAppendTo(processMetrics) - databaseMetrics, err := s.extractProcessDatabaseMetrics(ctx, time, project, process, resource) - if err != nil { - return pdata.ResourceMetricsSlice{}, errors.Wrap( + if err := s.extractProcessDatabaseMetrics(ctx, time, orgName, project, process); err != nil { + return errors.Wrap( err, "error when polling process database metrics from MongoDB Atlas", ) } - databaseMetrics.MoveAndAppendTo(processMetrics) - diskMetrics, err := s.extractProcessDiskMetrics(ctx, time, project, process, resource) - if err != nil { - return pdata.ResourceMetricsSlice{}, errors.Wrap( + if err := s.extractProcessDiskMetrics(ctx, time, orgName, project, process); err != nil { + return errors.Wrap( err, "error when polling process disk metrics from MongoDB Atlas", ) } - diskMetrics.MoveAndAppendTo(processMetrics) - - return processMetrics, nil + return nil } func (s *receiver) extractProcessDatabaseMetrics( ctx context.Context, time timeconstraints, + orgName string, project *mongodbatlas.Project, process *mongodbatlas.Process, - resource pdata.Resource, -) (pdata.ResourceMetricsSlice, error) { - pdMetrics := pdata.NewResourceMetricsSlice() +) error { processDatabases, err := s.client.ProcessDatabases( ctx, project.ID, @@ -187,17 +175,13 @@ func (s *receiver) extractProcessDatabaseMetrics( process.Port, ) if err != nil { - return pdata.ResourceMetricsSlice{}, errors.Wrap(err, "error retrieving process databases") + return errors.Wrap(err, "error retrieving process databases") } for _, db := range processDatabases { - dbResource := pdata.NewResource() - resource.CopyTo(dbResource) - resource.Attributes(). - InsertString("mongodb_atlas.db.name", db.DatabaseName) - metrics, err := s.client.ProcessDatabaseMetrics( + if err := s.client.ProcessDatabaseMetrics( ctx, - resource, + s.mb, project.ID, process.Hostname, process.Port, @@ -205,34 +189,37 @@ func (s *receiver) extractProcessDatabaseMetrics( time.start, time.end, time.resolution, - ) - if err != nil { - return pdata.ResourceMetricsSlice{}, errors.Wrap( + ); err != nil { + return errors.Wrap( err, "error when polling database metrics from MongoDB Atlas", ) } - metrics.ResourceMetrics().MoveAndAppendTo(pdMetrics) + s.mb.EmitForResource( + metadata.WithMongodbAtlasOrgName(orgName), + metadata.WithMongodbAtlasProjectName(project.Name), + metadata.WithMongodbAtlasProjectID(project.ID), + metadata.WithMongodbAtlasHostName(process.Hostname), + metadata.WithMongodbAtlasProcessPort(strconv.Itoa(process.Port)), + metadata.WithMongodbAtlasProcessTypeName(process.TypeName), + metadata.WithMongodbAtlasProcessID(process.ID), + metadata.WithMongodbAtlasDbName(db.DatabaseName), + ) } - return pdMetrics, nil + return nil } func (s *receiver) extractProcessDiskMetrics( ctx context.Context, time timeconstraints, + orgName string, project *mongodbatlas.Project, process *mongodbatlas.Process, - resource pdata.Resource, -) (pdata.ResourceMetricsSlice, error) { - pdMetrics := pdata.NewResourceMetricsSlice() +) error { for _, disk := range s.client.ProcessDisks(ctx, project.ID, process.Hostname, process.Port) { - diskResource := pdata.NewResource() - resource.CopyTo(diskResource) - diskResource.Attributes(). - InsertString("mongodb_atlas.disk.partition", disk.PartitionName) - metrics, err := s.client.ProcessDiskMetrics( + if err := s.client.ProcessDiskMetrics( ctx, - diskResource, + s.mb, project.ID, process.Hostname, process.Port, @@ -240,14 +227,22 @@ func (s *receiver) extractProcessDiskMetrics( time.start, time.end, time.resolution, - ) - if err != nil { - return pdata.ResourceMetricsSlice{}, errors.Wrap( + ); err != nil { + return errors.Wrap( err, "error when polling from MongoDB Atlas", ) } - metrics.ResourceMetrics().MoveAndAppendTo(pdMetrics) + s.mb.EmitForResource( + metadata.WithMongodbAtlasOrgName(orgName), + metadata.WithMongodbAtlasProjectName(project.Name), + metadata.WithMongodbAtlasProjectID(project.ID), + metadata.WithMongodbAtlasHostName(process.Hostname), + metadata.WithMongodbAtlasProcessPort(strconv.Itoa(process.Port)), + metadata.WithMongodbAtlasProcessTypeName(process.TypeName), + metadata.WithMongodbAtlasProcessID(process.ID), + metadata.WithMongodbAtlasDiskPartition(disk.PartitionName), + ) } - return pdMetrics, nil + return nil } From 83d292cd8d5665f4bd69c209f77b76da6ac7dd19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20=C5=9Awi=C4=85tek?= Date: Mon, 11 Apr 2022 16:45:12 +0200 Subject: [PATCH 24/59] [extension/filestorage] use correct bbolt options for compaction (#9134) --- CHANGELOG.md | 3 ++- extension/storage/filestorage/client.go | 13 +++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a4250687cf3..10a0f39a62df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ ### 🧰 Bug fixes 🧰 +- `filestorageextension`: use correct bbolt options for compaction (#9134) - `hostmetricsreceiver`: Use cpu times for time delta in cpu.utilization calculation (#8857) - `dynatraceexporter`: Remove overly verbose stacktrace from certain logs (#8989) - `googlecloudexporter`: fix the `exporter.googlecloud.OTLPDirect` fature-gate, which was not applied when the flag was provided (#9116) @@ -2007,4 +2008,4 @@ First release of OpenTelemetry Collector Contrib. [v0.2.7]: https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/v0.2.6...v0.2.7 [v0.2.6]: https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/v0.0.5...v0.2.6 [v0.0.5]: https://github.com/open-telemetry/opentelemetry-collector-contrib/compare/v0.0.1...v0.0.5 -[v0.0.1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.0.1 \ No newline at end of file +[v0.0.1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.0.1 diff --git a/extension/storage/filestorage/client.go b/extension/storage/filestorage/client.go index 09ed200d1922..ff9a2f4d3ce0 100644 --- a/extension/storage/filestorage/client.go +++ b/extension/storage/filestorage/client.go @@ -31,13 +31,17 @@ type fileStorageClient struct { db *bbolt.DB } -func newClient(filePath string, timeout time.Duration) (*fileStorageClient, error) { - options := &bbolt.Options{ +func bboltOptions(timeout time.Duration) *bbolt.Options { + return &bbolt.Options{ Timeout: timeout, NoSync: true, NoFreelistSync: true, FreelistType: bbolt.FreelistMapType, } +} + +func newClient(filePath string, timeout time.Duration) (*fileStorageClient, error) { + options := bboltOptions(timeout) db, err := bbolt.Open(filePath, 0600, options) if err != nil { return nil, err @@ -121,10 +125,7 @@ func (c *fileStorageClient) Compact(ctx context.Context, compactionDirectory str } // use temporary file as compaction target - options := &bbolt.Options{ - Timeout: timeout, - NoSync: true, - } + options := bboltOptions(timeout) // cannot reuse newClient as db shouldn't contain any bucket db, err := bbolt.Open(file.Name(), 0600, options) From 69fe7957848b0c83a28a362ce61d0f282fa69e9b Mon Sep 17 00:00:00 2001 From: Brandon Johnson Date: Mon, 11 Apr 2022 15:37:16 -0400 Subject: [PATCH 25/59] [receiver/windowsperfcountersreceiver] Emit double values instead of integer values (#9138) * emit double values instead of integral values --- CHANGELOG.md | 1 + .../testdata/scraper/sum_metric.json | 2 +- .../windowsperfcounters_scraper.go | 11 +---------- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10a0f39a62df..c5db81dafa2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ - `hostmetricsreceiver`: Use cpu times for time delta in cpu.utilization calculation (#8857) - `dynatraceexporter`: Remove overly verbose stacktrace from certain logs (#8989) - `googlecloudexporter`: fix the `exporter.googlecloud.OTLPDirect` fature-gate, which was not applied when the flag was provided (#9116) +- `windowsperfcountersreceiver`: fix exported values being integers instead of doubles (#9138) ### 🚩 Deprecations 🚩 diff --git a/receiver/windowsperfcountersreceiver/testdata/scraper/sum_metric.json b/receiver/windowsperfcountersreceiver/testdata/scraper/sum_metric.json index 518db2dfa87b..28a5a81fd75f 100644 --- a/receiver/windowsperfcountersreceiver/testdata/scraper/sum_metric.json +++ b/receiver/windowsperfcountersreceiver/testdata/scraper/sum_metric.json @@ -12,7 +12,7 @@ "aggregationTemporality": "AGGREGATION_TEMPORALITY_CUMULATIVE", "dataPoints": [ { - "asInt": "19446169600", + "asDouble": "19446169600", "timeUnixNano": "1646862225775600200" } ] diff --git a/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper.go b/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper.go index 0390da65d91b..828986666d1a 100644 --- a/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper.go +++ b/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper.go @@ -165,15 +165,6 @@ func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { return md, errs } -func initializeNumberDataPointAsDouble(dataPoint pdata.NumberDataPoint, now pdata.Timestamp, instanceLabel string, value float64) { - if instanceLabel != "" { - dataPoint.Attributes().InsertString(instanceLabelName, instanceLabel) - } - - dataPoint.SetTimestamp(now) - dataPoint.SetDoubleVal(value) -} - func initializeMetricDps(metric pdata.Metric, now pdata.Timestamp, counterValues []win_perf_counters.CounterValue, attributes map[string]string) { var dps pdata.NumberDataPointSlice @@ -197,6 +188,6 @@ func initializeMetricDps(metric pdata.Metric, now pdata.Timestamp, counterValues } dp.SetTimestamp(now) - dp.SetIntVal(int64(counterValue.Value)) + dp.SetDoubleVal(counterValue.Value) } } From c78cb9e829d716033b3906a2569ec58bfcbe348b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juraci=20Paix=C3=A3o=20Kr=C3=B6hling?= Date: Mon, 11 Apr 2022 17:07:48 -0300 Subject: [PATCH 26/59] Fix flaky test in jaegerremotesampling (#9137) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #9113 Signed-off-by: Juraci Paixão Kröhling --- extension/jaegerremotesampling/extension_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/extension/jaegerremotesampling/extension_test.go b/extension/jaegerremotesampling/extension_test.go index 49dc22540dae..afc915c976d6 100644 --- a/extension/jaegerremotesampling/extension_test.go +++ b/extension/jaegerremotesampling/extension_test.go @@ -59,14 +59,15 @@ func TestStartAndShutdownRemote(t *testing.T) { // create the mock server server := grpc.NewServer() + + // register the service + api_v2.RegisterSamplingManagerServer(server, &samplingServer{}) + go func() { err = server.Serve(lis) require.NoError(t, err) }() - // register the service - api_v2.RegisterSamplingManagerServer(server, &samplingServer{}) - // create the config, pointing to the mock server cfg := createDefaultConfig().(*Config) cfg.Source.Remote = &configgrpc.GRPCClientSettings{ From ee0c9ceb6a1ae53574e1ca590e30c52b7015d0f5 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Mon, 11 Apr 2022 13:11:18 -0700 Subject: [PATCH 27/59] remove log names from filters (#9131) --- CHANGELOG.md | 1 + .../coreinternal/processor/filterconfig/config.go | 9 --------- .../coreinternal/processor/filterlog/filterlog.go | 11 ----------- .../processor/filterlog/filterlog_test.go | 3 +-- .../processor/filterspan/filterspan_test.go | 4 ++-- processor/attributesprocessor/README.md | 10 +++------- processor/attributesprocessor/factory.go | 4 ---- 7 files changed, 7 insertions(+), 35 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5db81dafa2b..0e0bc1368ca3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ - `datadogexporter`: Remove `GetCensoredKey` method from `APIConfig` struct (#8980) - `mongodbatlasreceiver`: Updated to uses newer metric builder which changed some metric and resource attributes (#9093) - `dynatraceexporter`: Make `serialization` package `/internal` (#9097) +- `attributesprocessor`: Remove log names from filters (#9131) ### 🧰 Bug fixes 🧰 diff --git a/internal/coreinternal/processor/filterconfig/config.go b/internal/coreinternal/processor/filterconfig/config.go index 5410e3b0e230..04c4f1e86a58 100644 --- a/internal/coreinternal/processor/filterconfig/config.go +++ b/internal/coreinternal/processor/filterconfig/config.go @@ -90,11 +90,6 @@ type MatchProperties struct { // This is an optional field. SpanNames []string `mapstructure:"span_names"` - // LogNames is a list of strings that the LogRecord's name field must match - // against. - // Deprecated: the Name field is removed from the log data model. - LogNames []string `mapstructure:"log_names"` - // LogBodies is a list of strings that the LogRecord's body field must match // against. LogBodies []string `mapstructure:"log_bodies"` @@ -123,10 +118,6 @@ type MatchProperties struct { // ValidateForSpans validates properties for spans. func (mp *MatchProperties) ValidateForSpans() error { - if len(mp.LogNames) > 0 { - return errors.New("log_names should not be specified for trace spans") - } - if len(mp.LogBodies) > 0 { return errors.New("log_bodies should not be specified for trace spans") } diff --git a/internal/coreinternal/processor/filterlog/filterlog.go b/internal/coreinternal/processor/filterlog/filterlog.go index c22fee6188bb..65cd246146b6 100644 --- a/internal/coreinternal/processor/filterlog/filterlog.go +++ b/internal/coreinternal/processor/filterlog/filterlog.go @@ -36,9 +36,6 @@ type Matcher interface { type propertiesMatcher struct { filtermatcher.PropertiesMatcher - // log names to compare to. - nameFilters filterset.FilterSet - // log bodies to compare to. bodyFilters filterset.FilterSet } @@ -58,13 +55,6 @@ func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { return nil, err } - var nameFS filterset.FilterSet - if len(mp.LogNames) > 0 { - nameFS, err = filterset.CreateFilterSet(mp.LogNames, &mp.Config) - if err != nil { - return nil, fmt.Errorf("error creating log record name filters: %v", err) - } - } var bodyFS filterset.FilterSet if len(mp.LogBodies) > 0 { bodyFS, err = filterset.CreateFilterSet(mp.LogBodies, &mp.Config) @@ -75,7 +65,6 @@ func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { return &propertiesMatcher{ PropertiesMatcher: rm, - nameFilters: nameFS, bodyFilters: bodyFS, }, nil } diff --git a/internal/coreinternal/processor/filterlog/filterlog_test.go b/internal/coreinternal/processor/filterlog/filterlog_test.go index 8aa0440048ce..db6f8c243add 100644 --- a/internal/coreinternal/processor/filterlog/filterlog_test.go +++ b/internal/coreinternal/processor/filterlog/filterlog_test.go @@ -43,9 +43,8 @@ func TestLogRecord_validateMatchesConfiguration_InvalidConfig(t *testing.T) { errorString: `at least one of "attributes", "libraries", "resources" or "log_bodies" field must be specified`, }, { - name: "empty_log_names_and_attributes", + name: "empty_log_bodies_and_attributes", property: filterconfig.MatchProperties{ - LogNames: []string{}, LogBodies: []string{}, }, errorString: `at least one of "attributes", "libraries", "resources" or "log_bodies" field must be specified`, diff --git a/internal/coreinternal/processor/filterspan/filterspan_test.go b/internal/coreinternal/processor/filterspan/filterspan_test.go index 089726ccd86a..1893a655188c 100644 --- a/internal/coreinternal/processor/filterspan/filterspan_test.go +++ b/internal/coreinternal/processor/filterspan/filterspan_test.go @@ -54,9 +54,9 @@ func TestSpan_validateMatchesConfiguration_InvalidConfig(t *testing.T) { { name: "log_properties", property: filterconfig.MatchProperties{ - LogNames: []string{"log"}, + LogBodies: []string{"log"}, }, - errorString: "log_names should not be specified for trace spans", + errorString: "log_bodies should not be specified for trace spans", }, { name: "invalid_match_type", diff --git a/processor/attributesprocessor/README.md b/processor/attributesprocessor/README.md index c29b41ae33ff..74715aed1dd7 100644 --- a/processor/attributesprocessor/README.md +++ b/processor/attributesprocessor/README.md @@ -166,13 +166,13 @@ if the input data should be included or excluded from the processor. To configur this option, under `include` and/or `exclude` at least `match_type` and one of the following is required: - For spans, one of `services`, `span_names`, `attributes`, `resources`, or `libraries` must be specified -with a non-empty value for a valid configuration. The `log_names`, `log_bodies`, `expressions`, `resource_attributes` and +with a non-empty value for a valid configuration. The `log_bodies`, `expressions`, `resource_attributes` and `metric_names` fields are invalid. -- For logs, one of `log_names`, `log_bodies`, `attributes`, `resources`, or `libraries` must be specified with a +- For logs, one of `log_bodies`, `attributes`, `resources`, or `libraries` must be specified with a non-empty value for a valid configuration. The `span_names`, `metric_names`, `expressions`, `resource_attributes`, and `services` fields are invalid. - For metrics, one of `metric_names`, `resources` must be specified -with a valid non-empty value for a valid configuration. The `span_names`, `log_names`, `log_bodies` and +with a valid non-empty value for a valid configuration. The `span_names`, `log_bodies` and `services` fields are invalid. @@ -214,10 +214,6 @@ attributes: # This is an optional field. span_names: [, ..., ] - # The log name must match at least one of the items. - # This is an optional field. - log_names: [, ..., ] - # The log body must match at least one of the items. # Currently only string body types are supported. # This is an optional field. diff --git a/processor/attributesprocessor/factory.go b/processor/attributesprocessor/factory.go index 74a33a073bc4..4374fba336a7 100644 --- a/processor/attributesprocessor/factory.go +++ b/processor/attributesprocessor/factory.go @@ -98,10 +98,6 @@ func createLogProcessor( return nil, fmt.Errorf("error creating \"attributes\" processor: %w of processor %v", err, cfg.ID()) } - if (oCfg.Include != nil && len(oCfg.Include.LogNames) > 0) || (oCfg.Exclude != nil && len(oCfg.Exclude.LogNames) > 0) { - set.Logger.Warn("log_names setting is deprecated and will be removed soon") - } - include, err := filterlog.NewMatcher(oCfg.Include) if err != nil { return nil, err From c03519ac7f2895b0e3da35d91c53d07c6f4dc02b Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Mon, 11 Apr 2022 22:30:54 +0200 Subject: [PATCH 28/59] [exporter/datadog] Add `host_metadata` section (#9100) * [exporter/datadog] Add `host_metadata` section * Add CHANGELOG entry * Apply suggestions from code review Co-authored-by: Albert Vaca Cintora * Apply suggestions from code review Co-authored-by: Kylian Serrania Co-authored-by: Albert Vaca Cintora Co-authored-by: Kylian Serrania --- CHANGELOG.md | 5 ++ exporter/datadogexporter/README.md | 4 +- exporter/datadogexporter/config/config.go | 74 +++++++++++++++++-- .../datadogexporter/config/config_test.go | 48 +++++++++--- .../datadogexporter/config/warn_envvars.go | 4 + .../config/warning_deprecated.go | 31 ++++++++ exporter/datadogexporter/example/config.yaml | 52 ++++++++++++- exporter/datadogexporter/factory.go | 5 ++ exporter/datadogexporter/factory_test.go | 46 +++++++++--- exporter/datadogexporter/hostmetadata.go | 14 ++-- exporter/datadogexporter/hostmetadata_test.go | 62 +++++++++------- exporter/datadogexporter/metrics_exporter.go | 2 +- .../datadogexporter/metrics_exporter_test.go | 4 +- exporter/datadogexporter/testdata/config.yaml | 16 +++- exporter/datadogexporter/traces_exporter.go | 2 +- .../datadogexporter/traces_exporter_test.go | 7 +- 16 files changed, 309 insertions(+), 67 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e0bc1368ca3..10c4ca103980 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ - `podmanreceiver`: Add API timeout configuration option (#9014) - `cmd/mdatagen`: Add `sem_conv_version` field to metadata.yaml that is used to set metrics SchemaURL (#9010) - `splunkheceporter`: Add an option to disable log or profiling data (#9065) +- `datadogexporter`: Add `host_metadata` configuration section to configure host metadata export (#9100) ### 🛑 Breaking changes 🛑 @@ -41,9 +42,13 @@ - `datadogexporter`: Deprecate `version` setting in favor of `service.version` semantic convention (#8784) - `datadogexporter`: Deprecate `env` setting in favor of `deployment.environment` semantic convention (#9017) - `datadogexporter`: Deprecate `GetHostTags` method from `TagsConfig` struct (#8975) +- `datadogexporter`: Deprecate `tags` setting in favor of `host_metadata::tags` (#9100) +- `datadogexporter`: Deprecate `send_metadata` setting in favor of `host_metadata::enabled` (#9100) +- `datadogexporter`: Deprecate `use_resource_metadata` setting in favor of `host_metadata::hostname_source` (#9100) - `prometheusexecreceiver`: Deprecate prom_exec receiver (#9058) - `fluentbitextension`: Deprecate Fluentbit extension (#9062) + ### 🚀 New components 🚀 ### 🧰 Bug fixes 🧰 diff --git a/exporter/datadogexporter/README.md b/exporter/datadogexporter/README.md index f1ccda09712d..1f441f347c8c 100644 --- a/exporter/datadogexporter/README.md +++ b/exporter/datadogexporter/README.md @@ -69,8 +69,8 @@ exporters: datadog/api: hostname: customhostname - tags: - - example:tag + host_metadata: + tags: [example:tag] api: key: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa diff --git a/exporter/datadogexporter/config/config.go b/exporter/datadogexporter/config/config.go index 967efd466c7d..e2e2b54d4c91 100644 --- a/exporter/datadogexporter/config/config.go +++ b/exporter/datadogexporter/config/config.go @@ -31,7 +31,7 @@ import ( var ( errUnsetAPIKey = errors.New("api.key is not set") - errNoMetadata = errors.New("only_metadata can't be enabled when send_metadata or use_resource_metadata is disabled") + errNoMetadata = errors.New("only_metadata can't be enabled when host_metadata::enabled = false or host_metadata::hostname_source != first_resource") ) // TODO: Import these from translator when we eliminate cyclic dependency. @@ -228,10 +228,11 @@ type TagsConfig struct { // Superseded by Tags if the latter is set. // Should not be set in the user-provided config. // - // Deprecated: [v0.47.0] Use Tags instead. + // Deprecated: [v0.47.0] Use `host_metadata::tags` HostMetadataConfig.Tags instead. EnvVarTags string `mapstructure:"envvartags"` // Tags is the list of default tags to add to every metric or trace. + // Deprecated: [v0.49.0] Use `host_metadata::tags` (HostMetadataConfig.Tags) Tags []string `mapstructure:"tags"` } @@ -250,6 +251,64 @@ func (t *TagsConfig) GetHostTags() []string { return tags } +// HostnameSource is the source for the hostname of host metadata. +type HostnameSource string + +const ( + // HostnameSourceFirstResource picks the host metadata hostname from the resource + // attributes on the first OTLP payload that gets to the exporter. If it is lacking any + // hostname-like attributes, it will fallback to 'config_or_system' behavior (see below). + // + // Do not use this hostname source if receiving data from multiple hosts. + HostnameSourceFirstResource HostnameSource = "first_resource" + + // HostnameSourceConfigOrSystem picks the host metadata hostname from the 'hostname' setting, + // and if this is empty, from available system APIs and cloud provider endpoints. + HostnameSourceConfigOrSystem HostnameSource = "config_or_system" +) + +var _ encoding.TextUnmarshaler = (*HostnameSource)(nil) + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (sm *HostnameSource) UnmarshalText(in []byte) error { + switch mode := HostnameSource(in); mode { + case HostnameSourceFirstResource, + HostnameSourceConfigOrSystem: + *sm = mode + return nil + default: + return fmt.Errorf("invalid host metadata hostname source %q", mode) + } +} + +// HostMetadataConfig defines the host metadata related configuration. +// Host metadata is the information used for populating the infrastructure list, +// the host map and providing host tags functionality. +// +// The exporter will send host metadata for a single host, whose name is chosen +// according to `host_metadata::hostname_source`. +type HostMetadataConfig struct { + // Enabled enables the host metadata functionality. + Enabled bool `mapstructure:"enabled"` + + // HostnameSource is the source for the hostname of host metadata. + // Valid values are 'first_resource' and 'config_or_system': + // - 'first_resource' picks the host metadata hostname from the resource + // attributes on the first OTLP payload that gets to the exporter. + // If the first payload lacks hostname-like attributes, it will fallback to 'config_or_system'. + // Do not use this hostname source if receiving data from multiple hosts. + // - 'config_or_system' picks the host metadata hostname from the 'hostname' setting, + // If this is empty it will use available system APIs and cloud provider endpoints. + // + // The current default if 'first_resource'. + HostnameSource HostnameSource `mapstructure:"hostname_source"` + + // Tags is a list of host tags. + // These tags will be attached to telemetry signals that have the host metadata hostname. + // To attach tags to telemetry signals regardless of the host, use a processor instead. + Tags []string `mapstructure:"tags"` +} + // LimitedTLSClientSetting is a subset of TLSClientSetting, see LimitedHTTPClientSettings for more details type LimitedTLSClientSettings struct { // InsecureSkipVerify controls whether a client verifies the server's @@ -281,10 +340,14 @@ type Config struct { // Traces defines the Traces exporter specific configuration Traces TracesConfig `mapstructure:"traces"` + // HostMetadata defines the host metadata specific configuration + HostMetadata HostMetadataConfig `mapstructure:"host_metadata"` + // SendMetadata defines whether to send host metadata // This is undocumented and only used for unit testing. // // This can't be disabled if `only_metadata` is true. + // Deprecated: [v0.49.0] Use `host_metadata::enabled` (HostMetadata.Enabled) instead. SendMetadata bool `mapstructure:"send_metadata"` // OnlyMetadata defines whether to only send metadata @@ -292,8 +355,8 @@ type Config struct { // metadata about a host is sent to the backend even // when telemetry data is reported via a different host. // - // This flag is incompatible with disabling `send_metadata` - // or `use_resource_metadata`. + // This flag is incompatible with disabling host metadata, + // `use_resource_metadata`, or `host_metadata::hostname_source != first_resource` OnlyMetadata bool `mapstructure:"only_metadata"` // UseResourceMetadata defines whether to use resource attributes @@ -302,6 +365,7 @@ type Config struct { // By default this is true: the first resource attribute getting to // the exporter will be used for host metadata. // Disable this in the Collector if you are using an agent-collector setup. + // Deprecated: [v0.49.0] Use `host_metadata::hostname_source` (HostMetadata.HostnameSource) instead. UseResourceMetadata bool `mapstructure:"use_resource_metadata"` // warnings stores non-fatal configuration errors. @@ -314,7 +378,7 @@ func (c *Config) Sanitize(logger *zap.Logger) error { c.TagsConfig.Env = "none" } - if c.OnlyMetadata && (!c.SendMetadata || !c.UseResourceMetadata) { + if c.OnlyMetadata && (!c.HostMetadata.Enabled || c.HostMetadata.HostnameSource != HostnameSourceFirstResource) { return errNoMetadata } diff --git a/exporter/datadogexporter/config/config_test.go b/exporter/datadogexporter/config/config_test.go index f68891f069a2..66e70d7a2228 100644 --- a/exporter/datadogexporter/config/config_test.go +++ b/exporter/datadogexporter/config/config_test.go @@ -180,16 +180,44 @@ func TestSpanNameRemappingsValidation(t *testing.T) { require.Error(t, err) } -func TestInvalidSumMode(t *testing.T) { - cfgMap := config.NewMapFromStringMap(map[string]interface{}{ - "metrics": map[string]interface{}{ - "sums": map[string]interface{}{ - "cumulative_monotonic_mode": "invalid_mode", - }, +func TestUnmarshal(t *testing.T) { + tests := []struct { + name string + configMap *config.Map + cfg Config + err string + }{ + { + name: "invalid cumulative monotonic mode", + configMap: config.NewMapFromStringMap(map[string]interface{}{ + "metrics": map[string]interface{}{ + "sums": map[string]interface{}{ + "cumulative_monotonic_mode": "invalid_mode", + }, + }, + }), + err: "1 error(s) decoding:\n\n* error decoding 'metrics.sums.cumulative_monotonic_mode': invalid cumulative monotonic sum mode \"invalid_mode\"", + }, + { + name: "invalid host metadata hostname source", + configMap: config.NewMapFromStringMap(map[string]interface{}{ + "host_metadata": map[string]interface{}{ + "hostname_source": "invalid_source", + }, + }), + err: "1 error(s) decoding:\n\n* error decoding 'host_metadata.hostname_source': invalid host metadata hostname source \"invalid_source\"", }, - }) + } - cfg := futureDefaultConfig() - err := cfg.Unmarshal(cfgMap) - assert.EqualError(t, err, "1 error(s) decoding:\n\n* error decoding 'metrics.sums.cumulative_monotonic_mode': invalid cumulative monotonic sum mode \"invalid_mode\"") + for _, testInstance := range tests { + t.Run(testInstance.name, func(t *testing.T) { + cfg := futureDefaultConfig() + err := cfg.Unmarshal(testInstance.configMap) + if err != nil || testInstance.err != "" { + assert.EqualError(t, err, testInstance.err) + } else { + assert.Equal(t, testInstance.cfg, cfg) + } + }) + } } diff --git a/exporter/datadogexporter/config/warn_envvars.go b/exporter/datadogexporter/config/warn_envvars.go index e7a786270a37..9e2176809a59 100644 --- a/exporter/datadogexporter/config/warn_envvars.go +++ b/exporter/datadogexporter/config/warn_envvars.go @@ -51,6 +51,10 @@ func futureDefaultConfig() *Config { SampleRate: 1, IgnoreResources: []string{}, }, + HostMetadata: HostMetadataConfig{ + Enabled: true, + HostnameSource: HostnameSourceFirstResource, + }, SendMetadata: true, UseResourceMetadata: true, } diff --git a/exporter/datadogexporter/config/warning_deprecated.go b/exporter/datadogexporter/config/warning_deprecated.go index 91d9a9c536ce..d4b732e29e05 100644 --- a/exporter/datadogexporter/config/warning_deprecated.go +++ b/exporter/datadogexporter/config/warning_deprecated.go @@ -53,6 +53,37 @@ var renamedSettings = []renameError{ } }, }, + { + oldName: "tags", + newName: "host_metadata::tags", + oldRemovedIn: "v0.52.0", + issueNumber: 9099, + updateFn: func(c *Config) { + c.HostMetadata.Tags = c.Tags + }, + }, + { + oldName: "send_metadata", + newName: "host_metadata::enabled", + oldRemovedIn: "v0.52.0", + issueNumber: 9099, + updateFn: func(c *Config) { + c.HostMetadata.Enabled = c.SendMetadata + }, + }, + { + oldName: "use_resource_metadata", + newName: "host_metadata::hostname_source", + oldRemovedIn: "v0.52.0", + issueNumber: 9099, + updateFn: func(c *Config) { + if c.UseResourceMetadata { + c.HostMetadata.HostnameSource = HostnameSourceFirstResource + } else { + c.HostMetadata.HostnameSource = HostnameSourceConfigOrSystem + } + }, + }, } // Error implements the error interface. diff --git a/exporter/datadogexporter/example/config.yaml b/exporter/datadogexporter/example/config.yaml index e26fde0143f9..5f743496171f 100644 --- a/exporter/datadogexporter/example/config.yaml +++ b/exporter/datadogexporter/example/config.yaml @@ -38,16 +38,32 @@ exporters: # version: myversion ## @param tags - list of strings - optional - default: [] - ## The list of default tags to add to every metric or trace. + ## The list of tags to send as host tags. + ## Deprecated: [v0.49.0] Use `host_metadata::tags` instead. + ## This option will be removed in v0.52.0. ## If unset it will be determined from the `DD_TAGS` environment variable, specified ## as a list of space-separated strings (Deprecated: [v0.47.0] use 'env' config source instead). # # tags: [] + ## @params send_metadata - boolean - optional - default: true + ## Deprecated: [v0.49.0] Use `host_metadata::enabled` instead. + ## This option will be removed in v0.52.0. + # + # send_metadata: true + + ## @params use_resource_metadata - boolean - optional - default: true + ## Deprecated: [v0.49.0] Use `host_metadata::hostname_source` instead. + ## This option will be removed in v0.52.0. + # + # use_resource_metadata: true + ## @param only_metadata - boolean - optional - default: false ## Whether to send only metadata. This is useful for agent-collector ## setups, so that metadata about a host is sent to the backend even ## when telemetry data is reported via a different host. + # + # only_metadata: false ## @param api - custom object - required. ## Specific API configuration. @@ -177,6 +193,40 @@ exporters: ## https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/1909 # # span_name_as_resource_name: true + + ## @param host_metadata - custom object - optional + ## Host metadata specific configuration. + ## Host metadata is the information used for populating the infrastructure list, the host map and providing host tags functionality within the Datadog app. + ## + ## The exporter will only send host metadata for a single host, whose name is chosen + ## according to `host_metadata::hostname_source`. + # + # host_metadata: + ## @param enabled - boolean - optional - default: true + ## Enable the host metadata functionality + # + # enabled: true + + ## @param hostname_source - enum - optional - default: first_resource + ## Source for the hostname of host metadata. + ## Valid values are 'first_resource' and 'config_or_system': + ## - 'first_resource' picks the host metadata hostname from the resource attributes on the first OTLP payload that gets to the exporter. + ## If the first payload lacks hostname-like attributes, it will fallback to 'config_or_system' behavior. + ## Do not use this hostname source if receiving data from multiple hosts. + ## + ## - 'config_or_system' picks the host metadata hostname from the 'hostname' setting, falling back to system and cloud provider APIs. + ## + ## The current default is 'first_resource'. + # + # hostname_source: first_resource + + ## @param tags - list of strings - optional - default: empty list + ## List of host tags to be sent as part of the host metadata. + ## These tags will be attached to telemetry signals that have the host metadata hostname. + ## + ## To attach tags to telemetry signals regardless of the host, use a processor instead. + # + # tags: [] service: diff --git a/exporter/datadogexporter/factory.go b/exporter/datadogexporter/factory.go index cff6b4afc18f..902c7d5e9f42 100644 --- a/exporter/datadogexporter/factory.go +++ b/exporter/datadogexporter/factory.go @@ -108,6 +108,11 @@ func (*factory) createDefaultConfig() config.Exporter { IgnoreResources: []string{}, }, + HostMetadata: ddconfig.HostMetadataConfig{ + Enabled: true, + HostnameSource: ddconfig.HostnameSourceFirstResource, + }, + SendMetadata: true, UseResourceMetadata: true, } diff --git a/exporter/datadogexporter/factory_test.go b/exporter/datadogexporter/factory_test.go index d0a20cb0373e..2e314e4c6a55 100644 --- a/exporter/datadogexporter/factory_test.go +++ b/exporter/datadogexporter/factory_test.go @@ -106,6 +106,11 @@ func TestCreateDefaultConfig(t *testing.T) { EnvVarTags: "TAGS", }, + HostMetadata: ddconfig.HostMetadataConfig{ + Enabled: true, + HostnameSource: ddconfig.HostnameSourceFirstResource, + }, + SendMetadata: true, OnlyMetadata: false, UseResourceMetadata: true, @@ -218,6 +223,12 @@ func TestLoadConfig(t *testing.T) { }, IgnoreResources: []string{}, }, + + HostMetadata: ddconfig.HostMetadataConfig{ + Enabled: true, + HostnameSource: ddconfig.HostnameSourceFirstResource, + }, + SendMetadata: true, OnlyMetadata: false, UseResourceMetadata: true, @@ -226,6 +237,16 @@ func TestLoadConfig(t *testing.T) { invalidConfig := cfg.Exporters[config.NewComponentIDWithName(typeStr, "invalid")].(*ddconfig.Config) err = invalidConfig.Sanitize(zap.NewNop()) require.Error(t, err) + + hostMetadataConfig := cfg.Exporters[config.NewComponentIDWithName(typeStr, "hostmetadata")].(*ddconfig.Config) + err = hostMetadataConfig.Sanitize(zap.NewNop()) + require.NoError(t, err) + + assert.Equal(t, ddconfig.HostMetadataConfig{ + Enabled: true, + HostnameSource: ddconfig.HostnameSourceConfigOrSystem, + Tags: []string{"example:one"}, + }, hostMetadataConfig.HostMetadata) } // TestLoadConfigEnvVariables tests that the loading configuration takes into account @@ -270,7 +291,6 @@ func TestLoadConfigEnvVariables(t *testing.T) { Hostname: "customhostname", Env: "none", EnvVarTags: "envexample:tag envexample2:tag", - Tags: []string{"example:tag"}, }, apiConfig.TagsConfig) assert.Equal(t, ddconfig.APIConfig{ @@ -301,6 +321,12 @@ func TestLoadConfigEnvVariables(t *testing.T) { }, IgnoreResources: []string{}, }, apiConfig.Traces) + assert.Equal(t, + ddconfig.HostMetadataConfig{ + Enabled: true, + HostnameSource: ddconfig.HostnameSourceFirstResource, + Tags: []string{"example:tag"}, + }, apiConfig.HostMetadata) defaultConfig := cfg.Exporters[config.NewComponentIDWithName(typeStr, "default2")].(*ddconfig.Config) err = defaultConfig.Sanitize(zap.NewNop()) @@ -363,7 +389,7 @@ func TestCreateAPIMetricsExporter(t *testing.T) { // Use the mock server for API key validation c := (cfg.Exporters[config.NewComponentIDWithName(typeStr, "api")]).(*ddconfig.Config) c.Metrics.TCPAddr.Endpoint = server.URL - c.SendMetadata = false + c.HostMetadata.Enabled = false ctx := context.Background() exp, err := factory.CreateMetricsExporter( @@ -393,7 +419,7 @@ func TestCreateAPITracesExporter(t *testing.T) { // Use the mock server for API key validation c := (cfg.Exporters[config.NewComponentIDWithName(typeStr, "api")]).(*ddconfig.Config) c.Metrics.TCPAddr.Endpoint = server.URL - c.SendMetadata = false + c.HostMetadata.Enabled = false ctx := context.Background() exp, err := factory.CreateTracesExporter( @@ -423,13 +449,15 @@ func TestOnlyMetadata(t *testing.T) { RetrySettings: exporterhelper.NewDefaultRetrySettings(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), - API: ddconfig.APIConfig{Key: "notnull"}, - Metrics: ddconfig.MetricsConfig{TCPAddr: confignet.TCPAddr{Endpoint: server.URL}}, - Traces: ddconfig.TracesConfig{TCPAddr: confignet.TCPAddr{Endpoint: server.URL}}, + API: ddconfig.APIConfig{Key: "notnull"}, + Metrics: ddconfig.MetricsConfig{TCPAddr: confignet.TCPAddr{Endpoint: server.URL}}, + Traces: ddconfig.TracesConfig{TCPAddr: confignet.TCPAddr{Endpoint: server.URL}}, + OnlyMetadata: true, - SendMetadata: true, - OnlyMetadata: true, - UseResourceMetadata: true, + HostMetadata: ddconfig.HostMetadataConfig{ + Enabled: true, + HostnameSource: ddconfig.HostnameSourceFirstResource, + }, } expTraces, err := factory.CreateTracesExporter( diff --git a/exporter/datadogexporter/hostmetadata.go b/exporter/datadogexporter/hostmetadata.go index 3f4cbbe93d32..cb33aae32ce3 100644 --- a/exporter/datadogexporter/hostmetadata.go +++ b/exporter/datadogexporter/hostmetadata.go @@ -23,16 +23,16 @@ import ( ) // getHostTags gets the host tags extracted from the configuration. -func getHostTags(t *config.TagsConfig) []string { - tags := t.Tags +func getHostTags(c *config.Config) []string { + tags := c.HostMetadata.Tags if len(tags) == 0 { //lint:ignore SA1019 Will be removed when environment variable detection is removed - tags = strings.Split(t.EnvVarTags, " ") //nolint + tags = strings.Split(c.EnvVarTags, " ") //nolint } - if t.Env != "none" { - tags = append(tags, fmt.Sprintf("env:%s", t.Env)) + if c.Env != "none" { + tags = append(tags, fmt.Sprintf("env:%s", c.Env)) } return tags } @@ -41,10 +41,10 @@ func getHostTags(t *config.TagsConfig) []string { func newMetadataConfigfromConfig(cfg *config.Config) metadata.PusherConfig { return metadata.PusherConfig{ ConfigHostname: cfg.Hostname, - ConfigTags: getHostTags(&cfg.TagsConfig), + ConfigTags: getHostTags(cfg), MetricsEndpoint: cfg.Metrics.Endpoint, APIKey: cfg.API.Key, - UseResourceMetadata: cfg.UseResourceMetadata, + UseResourceMetadata: cfg.HostMetadata.HostnameSource == config.HostnameSourceFirstResource, InsecureSkipVerify: cfg.TLSSetting.InsecureSkipVerify, TimeoutSettings: cfg.TimeoutSettings, RetrySettings: cfg.RetrySettings, diff --git a/exporter/datadogexporter/hostmetadata_test.go b/exporter/datadogexporter/hostmetadata_test.go index 7a4620fc7c22..0b34228a4559 100644 --- a/exporter/datadogexporter/hostmetadata_test.go +++ b/exporter/datadogexporter/hostmetadata_test.go @@ -23,13 +23,18 @@ import ( ) func TestHostTags(t *testing.T) { - tc := config.TagsConfig{ - Hostname: "customhost", - Env: "customenv", - // Service and version should be only used for traces - Service: "customservice", - Version: "customversion", - Tags: []string{"key1:val1", "key2:val2"}, + c := config.Config{ + TagsConfig: config.TagsConfig{ + Hostname: "customhost", + Env: "customenv", + // Service and version should be only used for traces + Service: "customservice", + Version: "customversion", + }, + + HostMetadata: config.HostMetadataConfig{ + Tags: []string{"key1:val1", "key2:val2"}, + }, } assert.ElementsMatch(t, @@ -38,17 +43,22 @@ func TestHostTags(t *testing.T) { "key1:val1", "key2:val2", }, - getHostTags(&tc), + getHostTags(&c), ) - tc = config.TagsConfig{ - Hostname: "customhost", - Env: "customenv", - // Service and version should be only used for traces - Service: "customservice", - Version: "customversion", - Tags: []string{"key1:val1", "key2:val2"}, - EnvVarTags: "key3:val3 key4:val4", + c = config.Config{ + TagsConfig: config.TagsConfig{ + Hostname: "customhost", + Env: "customenv", + // Service and version should be only used for traces + Service: "customservice", + Version: "customversion", + EnvVarTags: "key3:val3 key4:val4", + }, + + HostMetadata: config.HostMetadataConfig{ + Tags: []string{"key1:val1", "key2:val2"}, + }, } assert.ElementsMatch(t, @@ -57,16 +67,18 @@ func TestHostTags(t *testing.T) { "key1:val1", "key2:val2", }, - getHostTags(&tc), + getHostTags(&c), ) - tc = config.TagsConfig{ - Hostname: "customhost", - Env: "customenv", - // Service and version should be only used for traces - Service: "customservice", - Version: "customversion", - EnvVarTags: "key3:val3 key4:val4", + c = config.Config{ + TagsConfig: config.TagsConfig{ + Hostname: "customhost", + Env: "customenv", + // Service and version should be only used for traces + Service: "customservice", + Version: "customversion", + EnvVarTags: "key3:val3 key4:val4", + }, } assert.ElementsMatch(t, @@ -75,6 +87,6 @@ func TestHostTags(t *testing.T) { "key3:val3", "key4:val4", }, - getHostTags(&tc), + getHostTags(&c), ) } diff --git a/exporter/datadogexporter/metrics_exporter.go b/exporter/datadogexporter/metrics_exporter.go index 85b6bcb20694..5f04cf522950 100644 --- a/exporter/datadogexporter/metrics_exporter.go +++ b/exporter/datadogexporter/metrics_exporter.go @@ -161,7 +161,7 @@ func (exp *metricsExporter) PushMetricsData(ctx context.Context, md pdata.Metric // Start host metadata with resource attributes from // the first payload. - if exp.cfg.SendMetadata { + if exp.cfg.HostMetadata.Enabled { exp.onceMetadata.Do(func() { attrs := pdata.NewMap() if md.ResourceMetrics().Len() > 0 { diff --git a/exporter/datadogexporter/metrics_exporter_test.go b/exporter/datadogexporter/metrics_exporter_test.go index 5a313ed48daf..c74425c91e69 100644 --- a/exporter/datadogexporter/metrics_exporter_test.go +++ b/exporter/datadogexporter/metrics_exporter_test.go @@ -63,8 +63,8 @@ func TestNewExporter(t *testing.T) { require.NoError(t, err) assert.Equal(t, len(server.MetadataChan), 0) - cfg.SendMetadata = true - cfg.UseResourceMetadata = true + cfg.HostMetadata.Enabled = true + cfg.HostMetadata.HostnameSource = config.HostnameSourceFirstResource err = exp.ConsumeMetrics(context.Background(), testutils.TestMetrics.Clone()) require.NoError(t, err) body := <-server.MetadataChan diff --git a/exporter/datadogexporter/testdata/config.yaml b/exporter/datadogexporter/testdata/config.yaml index 69179e2de6ce..f2c1c068951a 100644 --- a/exporter/datadogexporter/testdata/config.yaml +++ b/exporter/datadogexporter/testdata/config.yaml @@ -14,6 +14,7 @@ exporters: # Deprecated; kept here to avoid regressions. version: myversion + # Deprecated; kept here to avoid regressions. tags: - example:tag @@ -26,8 +27,9 @@ exporters: datadog/api2: hostname: customhostname - tags: - - example:tag + + host_metadata: + tags: [example:tag] api: key: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa @@ -45,6 +47,16 @@ exporters: api: key: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + datadog/hostmetadata: + api: + key: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + # Deprecated; kept here to test rename system + tags: [example:one] + # Deprecated; kept here to test rename system + send_metadata: true + # Deprecated; kept here to test rename system + use_resource_metadata: false + datadog/default2: datadog/invalid: diff --git a/exporter/datadogexporter/traces_exporter.go b/exporter/datadogexporter/traces_exporter.go index d4ee257365c8..16d897c7787f 100644 --- a/exporter/datadogexporter/traces_exporter.go +++ b/exporter/datadogexporter/traces_exporter.go @@ -116,7 +116,7 @@ func (exp *traceExporter) pushTraceData( // Start host metadata with resource attributes from // the first payload. - if exp.cfg.SendMetadata { + if exp.cfg.HostMetadata.Enabled { exp.onceMetadata.Do(func() { attrs := pdata.NewMap() if td.ResourceSpans().Len() > 0 { diff --git a/exporter/datadogexporter/traces_exporter_test.go b/exporter/datadogexporter/traces_exporter_test.go index 16fd7b7fceb3..0a2acba161b0 100644 --- a/exporter/datadogexporter/traces_exporter_test.go +++ b/exporter/datadogexporter/traces_exporter_test.go @@ -188,8 +188,11 @@ func TestPushTraceData(t *testing.T) { SampleRate: 1, TCPAddr: confignet.TCPAddr{Endpoint: server.URL}, }, - SendMetadata: true, - UseResourceMetadata: true, + + HostMetadata: config.HostMetadataConfig{ + Enabled: true, + HostnameSource: config.HostnameSourceFirstResource, + }, } params := componenttest.NewNopExporterCreateSettings() From 67a8a8d017c873c2c5ac5d727aa9ff11e24ddde9 Mon Sep 17 00:00:00 2001 From: Sam DeHaan Date: Mon, 11 Apr 2022 16:33:14 -0400 Subject: [PATCH 29/59] [cmd/mdatagen] Generate more mdatagen attribute documentation (#8985) * Enhance mdatagen attribute documentation * ensure example has attribute without value/enums * Add changelog entry * Add testing of mdatagen documentation generation * Commit all updated documentation.md files * Satisfy linter --- CHANGELOG.md | 2 + cmd/mdatagen/documentation.tmpl | 6 +-- cmd/mdatagen/main.go | 1 + cmd/mdatagen/main_test.go | 47 +++++++++++++----- cmd/mdatagen/testdata/documentation_v1.md | 20 ++++++++ cmd/mdatagen/testdata/documentation_v2.md | 27 +++++++++++ receiver/apachereceiver/documentation.md | 10 ++-- receiver/couchbasereceiver/documentation.md | 4 +- receiver/couchdbreceiver/documentation.md | 12 ++--- .../elasticsearchreceiver/documentation.md | 34 ++++++------- .../scraper/cpuscraper/documentation.md | 8 ++-- .../scraper/diskscraper/documentation.md | 8 ++-- .../filesystemscraper/documentation.md | 14 +++--- .../scraper/loadscraper/documentation.md | 4 +- .../scraper/memoryscraper/documentation.md | 6 +-- .../scraper/networkscraper/documentation.md | 12 ++--- .../scraper/pagingscraper/documentation.md | 12 ++--- .../scraper/processesscraper/documentation.md | 6 +-- .../scraper/processscraper/documentation.md | 8 ++-- .../kafkametricsreceiver/documentation.md | 10 ++-- .../kubeletstatsreceiver/documentation.md | 8 ++-- receiver/memcachedreceiver/documentation.md | 14 +++--- .../mongodbatlasreceiver/documentation.md | 48 +++++++++---------- receiver/mongodbreceiver/documentation.md | 14 +++--- receiver/mysqlreceiver/documentation.md | 32 ++++++------- receiver/nginxreceiver/documentation.md | 6 +-- receiver/postgresqlreceiver/documentation.md | 14 +++--- receiver/rabbitmqreceiver/documentation.md | 6 +-- receiver/redisreceiver/documentation.md | 8 ++-- receiver/riakreceiver/documentation.md | 8 ++-- receiver/zookeeperreceiver/documentation.md | 8 ++-- 31 files changed, 245 insertions(+), 172 deletions(-) create mode 100644 cmd/mdatagen/testdata/documentation_v1.md create mode 100644 cmd/mdatagen/testdata/documentation_v2.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 10c4ca103980..67c870d0ffd8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,8 @@ - `splunkheceporter`: Add an option to disable log or profiling data (#9065) - `datadogexporter`: Add `host_metadata` configuration section to configure host metadata export (#9100) +- `cmd/mdatagen`: Update documentation generated for attributes to list enumerated values and show the "value" that will be visible on metrics when it is different from the attribute key in metadata.yaml (#8983) + ### 🛑 Breaking changes 🛑 - `filelogreceiver`, `journaldreceiver`, `syslogreceiver`, `tcplogreceiver`, `udplogreceiver`: diff --git a/cmd/mdatagen/documentation.tmpl b/cmd/mdatagen/documentation.tmpl index 370a0ec184b5..19e7c061b918 100644 --- a/cmd/mdatagen/documentation.tmpl +++ b/cmd/mdatagen/documentation.tmpl @@ -38,8 +38,8 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | +| Name | Description | Values | +| ---- | ----------- | ------ | {{- range $attributeName, $attributeInfo := .Attributes }} -| {{ $attributeName }} | {{ $attributeInfo.Description }} | +| {{ $attributeName }}{{- if $attributeInfo.Value }} ({{ $attributeInfo.Value }}){{- end}} | {{ $attributeInfo.Description }} | {{ stringsJoin $attributeInfo.Enum ", " }} | {{- end }} diff --git a/cmd/mdatagen/main.go b/cmd/mdatagen/main.go index c2e6be55a541..1a7be8d7b609 100644 --- a/cmd/mdatagen/main.go +++ b/cmd/mdatagen/main.go @@ -130,6 +130,7 @@ func generateDocumentation(ymlDir string, thisDir string, md metadata, useExpGen "publicVar": func(s string) (string, error) { return formatIdentifier(s, true) }, + "stringsJoin": strings.Join, }).ParseFiles(path.Join(thisDir, "documentation.tmpl"))) buf := bytes.Buffer{} diff --git a/cmd/mdatagen/main_test.go b/cmd/mdatagen/main_test.go index 58b7cf4daf38..5c9d8ff0c751 100644 --- a/cmd/mdatagen/main_test.go +++ b/cmd/mdatagen/main_test.go @@ -26,6 +26,16 @@ import ( const ( validMetadata = ` name: metricreceiver +attributes: + cpu_type: + value: type + description: The type of CPU consumption + enum: + - user + - io_wait + - system + host: + description: The type of CPU consumption metrics: system.cpu.time: enabled: true @@ -35,7 +45,7 @@ metrics: sum: aggregation: cumulative value_type: double - attributes: [] + attributes: [host, cpu_type] ` ) @@ -45,20 +55,23 @@ func Test_runContents(t *testing.T) { useExpGen bool } tests := []struct { - name string - args args - want string - wantErr string + name string + args args + expectedDocumentation string + want string + wantErr string }{ { - name: "valid metadata", - args: args{validMetadata, false}, - want: "", + name: "valid metadata", + args: args{validMetadata, false}, + expectedDocumentation: "testdata/documentation_v1.md", + want: "", }, { - name: "valid metadata v2", - args: args{validMetadata, true}, - want: "", + name: "valid metadata v2", + args: args{validMetadata, true}, + expectedDocumentation: "testdata/documentation_v2.md", + want: "", }, { name: "invalid yaml", @@ -91,7 +104,17 @@ func Test_runContents(t *testing.T) { } require.FileExists(t, genFilePath) - require.FileExists(t, filepath.Join(tmpdir, "documentation.md")) + actualDocumentation := filepath.Join(tmpdir, "documentation.md") + require.FileExists(t, actualDocumentation) + if tt.expectedDocumentation != "" { + expectedFileBytes, err := ioutil.ReadFile(tt.expectedDocumentation) + require.NoError(t, err) + + actualFileBytes, err := ioutil.ReadFile(actualDocumentation) + require.NoError(t, err) + + require.Equal(t, expectedFileBytes, actualFileBytes) + } } }) } diff --git a/cmd/mdatagen/testdata/documentation_v1.md b/cmd/mdatagen/testdata/documentation_v1.md new file mode 100644 index 000000000000..08b448399c10 --- /dev/null +++ b/cmd/mdatagen/testdata/documentation_v1.md @@ -0,0 +1,20 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# metricreceiver + +## Metrics + +These are the metrics available for this scraper. + +| Name | Description | Unit | Type | Attributes | +| ---- | ----------- | ---- | ---- | ---------- | +| **system.cpu.time** | Total CPU seconds broken down by different states. Additional information on CPU Time can be found [here](https://en.wikipedia.org/wiki/CPU_time). | s | Sum(Double) |
  • host
  • cpu_type
| + +**Highlighted metrics** are emitted by default. + +## Metric attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| cpu_type (type) | The type of CPU consumption | user, io_wait, system | +| host | The type of CPU consumption | | diff --git a/cmd/mdatagen/testdata/documentation_v2.md b/cmd/mdatagen/testdata/documentation_v2.md new file mode 100644 index 000000000000..bbfefb6b08d0 --- /dev/null +++ b/cmd/mdatagen/testdata/documentation_v2.md @@ -0,0 +1,27 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# metricreceiver + +## Metrics + +These are the metrics available for this scraper. + +| Name | Description | Unit | Type | Attributes | +| ---- | ----------- | ---- | ---- | ---------- | +| **system.cpu.time** | Total CPU seconds broken down by different states. Additional information on CPU Time can be found [here](https://en.wikipedia.org/wiki/CPU_time). | s | Sum(Double) |
  • host
  • cpu_type
| + +**Highlighted metrics** are emitted by default. Other metrics are optional and not emitted by default. +Any metric can be enabled or disabled with the following scraper configuration: + +```yaml +metrics: + : + enabled: +``` + +## Metric attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| cpu_type (type) | The type of CPU consumption | user, io_wait, system | +| host | The type of CPU consumption | | diff --git a/receiver/apachereceiver/documentation.md b/receiver/apachereceiver/documentation.md index 392e54d0a24f..247c870df6ec 100644 --- a/receiver/apachereceiver/documentation.md +++ b/receiver/apachereceiver/documentation.md @@ -26,8 +26,8 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| scoreboard_state | The state of a connection. | -| server_name | The name of the Apache HTTP server. | -| workers_state | The state of workers. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| scoreboard_state (state) | The state of a connection. | open, waiting, starting, reading, sending, keepalive, dnslookup, closing, logging, finishing, idle_cleanup | +| server_name | The name of the Apache HTTP server. | | +| workers_state (state) | The state of workers. | busy, idle | diff --git a/receiver/couchbasereceiver/documentation.md b/receiver/couchbasereceiver/documentation.md index a9d8ebe8492e..af65d09f58ba 100644 --- a/receiver/couchbasereceiver/documentation.md +++ b/receiver/couchbasereceiver/documentation.md @@ -20,5 +20,5 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | +| Name | Description | Values | +| ---- | ----------- | ------ | diff --git a/receiver/couchdbreceiver/documentation.md b/receiver/couchdbreceiver/documentation.md index 59a07961182a..fc5ff65fc583 100644 --- a/receiver/couchdbreceiver/documentation.md +++ b/receiver/couchdbreceiver/documentation.md @@ -34,9 +34,9 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| http.method | An HTTP request method. | -| http.status_code | An HTTP status code. | -| operation | The operation type. | -| view | The view type. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| http.method | An HTTP request method. | COPY, DELETE, GET, HEAD, OPTIONS, POST, PUT | +| http.status_code | An HTTP status code. | | +| operation | The operation type. | writes, reads | +| view | The view type. | temporary_view_reads, view_reads | diff --git a/receiver/elasticsearchreceiver/documentation.md b/receiver/elasticsearchreceiver/documentation.md index 2ee889d55a39..84a5a3ec0580 100644 --- a/receiver/elasticsearchreceiver/documentation.md +++ b/receiver/elasticsearchreceiver/documentation.md @@ -49,20 +49,20 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| cache_name | The name of cache. | -| collector_name | The name of the garbage collector. | -| direction | The direction of network data. | -| disk_usage_state | The state of a section of space on disk. | -| document_state | The state of the document. | -| elasticsearch.cluster.name | The name of the elasticsearch cluster. | -| elasticsearch.node.name | The name of the elasticsearch node. | -| fs_direction | The direction of filesystem IO. | -| health_status | The health status of the cluster. | -| memory_pool_name | The name of the JVM memory pool. | -| operation | The type of operation. | -| shard_state | The state of the shard. | -| task_state | The state of the task. | -| thread_pool_name | The name of the thread pool. | -| thread_state | The state of the thread. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| cache_name | The name of cache. | fielddata, query | +| collector_name (name) | The name of the garbage collector. | | +| direction | The direction of network data. | received, sent | +| disk_usage_state (state) | The state of a section of space on disk. | used, free | +| document_state (state) | The state of the document. | active, deleted | +| elasticsearch.cluster.name | The name of the elasticsearch cluster. | | +| elasticsearch.node.name | The name of the elasticsearch node. | | +| fs_direction (direction) | The direction of filesystem IO. | read, write | +| health_status (status) | The health status of the cluster. | green, yellow, red | +| memory_pool_name (name) | The name of the JVM memory pool. | | +| operation (operation) | The type of operation. | index, delete, get, query, fetch, scroll, suggest, merge, refresh, flush, warmer | +| shard_state (state) | The state of the shard. | active, relocating, initializing, unassigned | +| task_state (state) | The state of the task. | rejected, completed | +| thread_pool_name | The name of the thread pool. | | +| thread_state (state) | The state of the thread. | active, idle | diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/documentation.md index a1359673dc08..40b95a760716 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/documentation.md @@ -22,7 +22,7 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| cpu | CPU number starting at 0. | -| state | Breakdown of CPU usage by type. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| cpu | CPU number starting at 0. | | +| state | Breakdown of CPU usage by type. | idle, interrupt, nice, softirq, steal, system, user, wait | diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md index 3a1143ca09d4..29b84dbded26 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md @@ -27,7 +27,7 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| device | Name of the disk. | -| direction | Direction of flow of bytes/operations (read or write). | +| Name | Description | Values | +| ---- | ----------- | ------ | +| device | Name of the disk. | | +| direction | Direction of flow of bytes/operations (read or write). | read, write | diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/documentation.md index 3983180d9775..041a367ab32e 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/documentation.md @@ -23,10 +23,10 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| device | Identifier of the filesystem. | -| mode | Mountpoint mode such "ro", "rw", etc. | -| mountpoint | Mountpoint path. | -| state | Breakdown of filesystem usage by type. | -| type | Filesystem type, such as, "ext4", "tmpfs", etc. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| device | Identifier of the filesystem. | | +| mode | Mountpoint mode such "ro", "rw", etc. | | +| mountpoint | Mountpoint path. | | +| state | Breakdown of filesystem usage by type. | free, reserved, used | +| type | Filesystem type, such as, "ext4", "tmpfs", etc. | | diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/documentation.md index a445bc4a12f7..ab2fac72ee5f 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/documentation.md @@ -23,5 +23,5 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | +| Name | Description | Values | +| ---- | ----------- | ------ | diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/documentation.md index 81cf3f554928..ea7d4f534f6a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/documentation.md @@ -22,6 +22,6 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| state | Breakdown of memory usage by type. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| state | Breakdown of memory usage by type. | buffered, cached, inactive, free, slab_reclaimable, slab_unreclaimable, used | diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md index dedf15002e2a..681f1adab581 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md @@ -25,9 +25,9 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| device | Name of the network interface. | -| direction | Direction of flow of bytes/operations (receive or transmit). | -| protocol | Network protocol, e.g. TCP or UDP. | -| state | State of the network connection. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| device | Name of the network interface. | | +| direction | Direction of flow of bytes/operations (receive or transmit). | receive, transmit | +| protocol | Network protocol, e.g. TCP or UDP. | tcp | +| state | State of the network connection. | | diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md index b2c2f4b6c3e6..7cbcde3058e9 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md @@ -24,9 +24,9 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| device | Name of the page file. | -| direction | Page In or Page Out. | -| state | Breakdown of paging usage by type. | -| type | Type of fault. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| device | Name of the page file. | | +| direction | Page In or Page Out. | page_in, page_out | +| state | Breakdown of paging usage by type. | cached, free, used | +| type | Type of fault. | major, minor | diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/documentation.md index 0aa165ba5089..b4f1900c2dca 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/documentation.md @@ -22,6 +22,6 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| status | Breakdown status of the processes. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| status | Breakdown status of the processes. | blocked, daemon, detached, idle, locked, orphan, paging, running, sleeping, stopped, system, unknown, zombies | diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md index 6bc364703c16..2027dee584ea 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md @@ -35,7 +35,7 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| direction | Direction of flow of bytes (read or write). | -| state | Breakdown of CPU usage by type. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| direction | Direction of flow of bytes (read or write). | read, write | +| state | Breakdown of CPU usage by type. | system, user, wait | diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index bc6984c4642c..0b7470c9511b 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -24,8 +24,8 @@ These are the metrics available for this scraper. ## Metric attributes -| Name | Description | -| ---- | ----------- | -| group | The ID (string) of a consumer group | -| partition | The number (integer) of the partition | -| topic | The ID (integer) of a topic | +| Name | Description | Values | +| ---- | ----------- | ------ | +| group | The ID (string) of a consumer group | | +| partition | The number (integer) of the partition | | +| topic | The ID (integer) of a topic | | diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index 940ca6053ace..9acd22d4a2fa 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -31,7 +31,7 @@ These are the metrics available for this scraper. ## Metric attributes -| Name | Description | -| ---- | ----------- | -| direction | Direction of flow of bytes/operations (receive or transmit). | -| interface | Name of the network interface. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| direction | Direction of flow of bytes/operations (receive or transmit). | receive, transmit | +| interface | Name of the network interface. | | diff --git a/receiver/memcachedreceiver/documentation.md b/receiver/memcachedreceiver/documentation.md index 2deb8e17ba9e..578d45cd009a 100644 --- a/receiver/memcachedreceiver/documentation.md +++ b/receiver/memcachedreceiver/documentation.md @@ -24,10 +24,10 @@ These are the metrics available for this scraper. ## Metric attributes -| Name | Description | -| ---- | ----------- | -| command | The type of command. | -| direction | Direction of data flow. | -| operation | The type of operation. | -| state | The type of CPU usage. | -| type | Result of cache request. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| command | The type of command. | get, set, flush, touch | +| direction | Direction of data flow. | sent, received | +| operation | The type of operation. | increment, decrement, get | +| state | The type of CPU usage. | system, user | +| type | Result of cache request. | hit, miss | diff --git a/receiver/mongodbatlasreceiver/documentation.md b/receiver/mongodbatlasreceiver/documentation.md index ddc55eb7ff0f..bfb41f87ff2a 100644 --- a/receiver/mongodbatlasreceiver/documentation.md +++ b/receiver/mongodbatlasreceiver/documentation.md @@ -97,27 +97,27 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| assert_type | MongoDB assertion type | -| btree_counter_type | Database index effectiveness | -| cache_direction | Whether read into or written from | -| cache_status | Cache status | -| cluster_role | Whether process is acting as replica or primary | -| cpu_state | CPU state | -| cursor_state | Whether cursor is open or timed out | -| direction | Network traffic direction | -| disk_direction | Measurement type for disk operation | -| disk_status | Disk measurement type | -| document_status | Status of documents in the database | -| execution_type | Type of command | -| global_lock_state | Which queue is locked | -| memory_issue_type | Type of memory issue encountered | -| memory_state | Memory usage type | -| memory_status | Memory measurement type | -| object_type | MongoDB object type | -| operation | Type of database operation | -| oplog_type | Oplog type | -| scanned_type | Objects or indexes scanned during query | -| storage_status | Views on database size | -| ticket_type | Type of ticket available | +| Name | Description | Values | +| ---- | ----------- | ------ | +| assert_type | MongoDB assertion type | regular, warning, msg, user | +| btree_counter_type | Database index effectiveness | accesses, hits, misses | +| cache_direction | Whether read into or written from | read_into, written_from | +| cache_status | Cache status | dirty, used | +| cluster_role | Whether process is acting as replica or primary | primary, replica | +| cpu_state | CPU state | kernel, user, nice, iowait, irq, softirq, guest, steal | +| cursor_state | Whether cursor is open or timed out | timed_out, open | +| direction | Network traffic direction | receive, transmit | +| disk_direction | Measurement type for disk operation | read, write, total | +| disk_status | Disk measurement type | free, used | +| document_status | Status of documents in the database | returned, inserted, updated, deleted | +| execution_type | Type of command | reads, writes, commands | +| global_lock_state | Which queue is locked | current_queue_total, current_queue_readers, current_queue_writers | +| memory_issue_type | Type of memory issue encountered | extra_info, global_accesses_not_in_memory, exceptions_thrown | +| memory_state | Memory usage type | resident, virtual, mapped, computed, shared, free, used | +| memory_status | Memory measurement type | available, buffers, cached, free, shared, used | +| object_type | MongoDB object type | collection, index, extent, object, view, storage, data | +| operation | Type of database operation | cmd, query, update, delete, getmore, insert, scan_and_order | +| oplog_type | Oplog type | slave_lag_master_time, master_time, master_lag_time_diff | +| scanned_type | Objects or indexes scanned during query | index_items, objects | +| storage_status | Views on database size | total, data_size, index_size, data_size_wo_system | +| ticket_type | Type of ticket available | available_reads, available_writes | diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index a0327bd6807a..8b8c0f28121c 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -32,10 +32,10 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| connection_type | The status of the connection. | -| database | The name of a database. | -| memory_type | The type of memory used. | -| operation | The MongoDB operation being counted. | -| type | The result of a cache request. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| connection_type (type) | The status of the connection. | active, available, current | +| database | The name of a database. | | +| memory_type (type) | The type of memory used. | resident, virtual | +| operation | The MongoDB operation being counted. | insert, query, update, delete, getmore, command | +| type | The result of a cache request. | hit, miss | diff --git a/receiver/mysqlreceiver/documentation.md b/receiver/mysqlreceiver/documentation.md index 4035cad7bad1..50440a477cb5 100644 --- a/receiver/mysqlreceiver/documentation.md +++ b/receiver/mysqlreceiver/documentation.md @@ -37,19 +37,19 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| buffer_pool_data | The status of buffer pool data. | -| buffer_pool_operations | The buffer pool operations types. | -| buffer_pool_pages | The buffer pool pages types. | -| command | The command types. | -| double_writes | The doublewrite types. | -| handler | The handler types. | -| locks | The table locks type. | -| log_operations | The log operation types. | -| operations | The operation types. | -| page_operations | The page operation types. | -| row_locks | The row lock type. | -| row_operations | The row operation type. | -| sorts | The sort count type. | -| threads | The thread count type. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| buffer_pool_data (status) | The status of buffer pool data. | dirty, clean | +| buffer_pool_operations (operation) | The buffer pool operations types. | read_ahead_rnd, read_ahead, read_ahead_evicted, read_requests, reads, wait_free, write_requests | +| buffer_pool_pages (kind) | The buffer pool pages types. | data, free, misc | +| command (command) | The command types. | execute, close, fetch, prepare, reset, send_long_data | +| double_writes (kind) | The doublewrite types. | pages_written, writes | +| handler (kind) | The handler types. | commit, delete, discover, external_lock, mrr_init, prepare, read_first, read_key, read_last, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepoint_rollback, update, write | +| locks (kind) | The table locks type. | immediate, waited | +| log_operations (operation) | The log operation types. | waits, write_requests, writes | +| operations (operation) | The operation types. | fsyncs, reads, writes | +| page_operations (operation) | The page operation types. | created, read, written | +| row_locks (kind) | The row lock type. | waits, time | +| row_operations (operation) | The row operation type. | deleted, inserted, read, updated | +| sorts (kind) | The sort count type. | merge_passes, range, rows, scan | +| threads (kind) | The thread count type. | cached, connected, created, running | diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index 2d1b2a7a8f91..804831a7ae2b 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -24,6 +24,6 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| state | The state of a connection | +| Name | Description | Values | +| ---- | ----------- | ------ | +| state | The state of a connection | active, reading, writing, waiting | diff --git a/receiver/postgresqlreceiver/documentation.md b/receiver/postgresqlreceiver/documentation.md index 8efdf5abcbf1..50a052d100e2 100644 --- a/receiver/postgresqlreceiver/documentation.md +++ b/receiver/postgresqlreceiver/documentation.md @@ -27,10 +27,10 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| database | The name of the database. | -| operation | The database operation. | -| source | The block read source type. | -| state | The tuple (row) state. | -| table | The schema name followed by the table name. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| database | The name of the database. | | +| operation | The database operation. | ins, upd, del, hot_upd | +| source | The block read source type. | heap_read, heap_hit, idx_read, idx_hit, toast_read, toast_hit, tidx_read, tidx_hit | +| state | The tuple (row) state. | dead, live | +| table | The schema name followed by the table name. | | diff --git a/receiver/rabbitmqreceiver/documentation.md b/receiver/rabbitmqreceiver/documentation.md index ee7a7a76e025..68f829d38cf1 100644 --- a/receiver/rabbitmqreceiver/documentation.md +++ b/receiver/rabbitmqreceiver/documentation.md @@ -34,6 +34,6 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| message.state | The state of messages in a queue. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| message.state (state) | The state of messages in a queue. | ready, unacknowledged | diff --git a/receiver/redisreceiver/documentation.md b/receiver/redisreceiver/documentation.md index a453165955d8..483f39d6fc60 100644 --- a/receiver/redisreceiver/documentation.md +++ b/receiver/redisreceiver/documentation.md @@ -49,7 +49,7 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| db | Redis database identifier | -| state | Redis CPU usage state | +| Name | Description | Values | +| ---- | ----------- | ------ | +| db | Redis database identifier | | +| state | Redis CPU usage state | | diff --git a/receiver/riakreceiver/documentation.md b/receiver/riakreceiver/documentation.md index 9f6acf5413ba..3caec48b98af 100644 --- a/receiver/riakreceiver/documentation.md +++ b/receiver/riakreceiver/documentation.md @@ -32,7 +32,7 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| operation | The operation type for index operations. | -| request | The request operation type. | +| Name | Description | Values | +| ---- | ----------- | ------ | +| operation | The operation type for index operations. | read, write, delete | +| request | The request operation type. | put, get | diff --git a/receiver/zookeeperreceiver/documentation.md b/receiver/zookeeperreceiver/documentation.md index 316aee43a3fe..9a70a8c9c12f 100644 --- a/receiver/zookeeperreceiver/documentation.md +++ b/receiver/zookeeperreceiver/documentation.md @@ -42,7 +42,7 @@ metrics: ## Metric attributes -| Name | Description | -| ---- | ----------- | -| direction | State of a packet based on io direction. | -| state | State of followers | +| Name | Description | Values | +| ---- | ----------- | ------ | +| direction | State of a packet based on io direction. | received, sent | +| state | State of followers | synced, unsynced | From 0e06e0d1644b4f05c94a3ccdbea189d12cc09ece Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Mon, 11 Apr 2022 16:51:34 -0400 Subject: [PATCH 30/59] Update log-collection to v0.29.0 (final independent release) (#9139) * Update log-collection to v0.29.0 * Update changlog * Update documented usages of library --- CHANGELOG.md | 4 +- cmd/configschema/go.mod | 2 +- cmd/configschema/go.sum | 4 +- examples/kubernetes/otel-collector-config.yml | 28 ++-- examples/kubernetes/otel-collector.yaml | 26 ++-- go.mod | 2 +- go.sum | 4 +- internal/stanza/go.mod | 2 +- internal/stanza/go.sum | 4 +- receiver/filelogreceiver/README.md | 2 +- receiver/filelogreceiver/go.mod | 2 +- receiver/filelogreceiver/go.sum | 4 +- receiver/journaldreceiver/go.mod | 2 +- receiver/journaldreceiver/go.sum | 4 +- receiver/prometheusreceiver/go.sum | 3 +- receiver/riakreceiver/go.sum | 122 ------------------ receiver/syslogreceiver/go.mod | 2 +- receiver/syslogreceiver/go.sum | 4 +- receiver/syslogreceiver/syslog_test.go | 4 +- receiver/tcplogreceiver/go.mod | 2 +- receiver/tcplogreceiver/go.sum | 4 +- receiver/udplogreceiver/go.mod | 2 +- receiver/udplogreceiver/go.sum | 4 +- 23 files changed, 57 insertions(+), 180 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67c870d0ffd8..0f482a469b27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,8 +22,8 @@ ### 🛑 Breaking changes 🛑 - `filelogreceiver`, `journaldreceiver`, `syslogreceiver`, `tcplogreceiver`, `udplogreceiver`: - - Updated data model to align with stable logs data model, which includes various breaking changes. (#8835) - - A detailed [Upgrade Guide](https://github.com/open-telemetry/opentelemetry-log-collection/releases/tag/v0.28.0) is available in the log-collection v0.28.0 release notes. + - Updated data model to align with stable logs data model, which includes various breaking changes. (#9139, #8835) + - A detailed [Upgrade Guide](https://github.com/open-telemetry/opentelemetry-log-collection/releases/tag/v0.28.0) is available in the log-collection v0.29.0 release notes. - `datadogexporter`: Remove `OnlyMetadata` method from `Config` struct (#8980) - `datadogexporter`: Remove `GetCensoredKey` method from `APIConfig` struct (#8980) - `mongodbatlasreceiver`: Updated to uses newer metric builder which changed some metric and resource attributes (#9093) diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index 625d2ea8d1a6..70908f00ec56 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -364,7 +364,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/windowsperfcountersreceiver v0.48.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.48.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zookeeperreceiver v0.48.0 // indirect - github.com/open-telemetry/opentelemetry-log-collection v0.28.0 // indirect + github.com/open-telemetry/opentelemetry-log-collection v0.29.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.1.0 // indirect diff --git a/cmd/configschema/go.sum b/cmd/configschema/go.sum index 18a0493a2721..0a145d50e531 100644 --- a/cmd/configschema/go.sum +++ b/cmd/configschema/go.sum @@ -1671,8 +1671,8 @@ github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+t github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-telemetry/opentelemetry-log-collection v0.28.0 h1:qKreydspLlcAZnuxkbn3yqgz9Aj/5kznHLMHhYVVYwc= -github.com/open-telemetry/opentelemetry-log-collection v0.28.0/go.mod h1:ybKbU2jpv6JHt3gf649dCKWyORXq2HKaF1k5QRVUG5U= +github.com/open-telemetry/opentelemetry-log-collection v0.29.0 h1:8P3dU3fuuUkdzkCvfMJyaqpvD1Hf4nnno6PlLjq3t50= +github.com/open-telemetry/opentelemetry-log-collection v0.29.0/go.mod h1:jc765D8x90g+kMfDSSb0MxoEURxoCxz1fV0dutvzlrc= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= diff --git a/examples/kubernetes/otel-collector-config.yml b/examples/kubernetes/otel-collector-config.yml index 7d2665a86567..e3952edf7f46 100644 --- a/examples/kubernetes/otel-collector-config.yml +++ b/examples/kubernetes/otel-collector-config.yml @@ -25,7 +25,7 @@ receivers: regex: '^(?P