From 017cbd2d735097550810de5ed8dbafc32190763d Mon Sep 17 00:00:00 2001 From: Piotr <17101802+thampiotr@users.noreply.github.com> Date: Thu, 21 Mar 2024 10:32:29 +0000 Subject: [PATCH 01/83] Fix incorrect clustering beta labels (#6740) --- .../flow/reference/components/loki.source.kubernetes.md | 4 ++-- docs/sources/flow/reference/components/loki.source.podlogs.md | 4 ++-- .../components/prometheus.operator.servicemonitors.md | 4 ++-- docs/sources/flow/reference/components/prometheus.scrape.md | 4 ++-- docs/sources/flow/reference/components/pyroscope.scrape.md | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes.md b/docs/sources/flow/reference/components/loki.source.kubernetes.md index 66194a3db465..eb79e6cf817d 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes.md +++ b/docs/sources/flow/reference/components/loki.source.kubernetes.md @@ -94,7 +94,7 @@ inside a `client` block. [authorization]: #authorization-block [oauth2]: #oauth2-block [tls_config]: #tls_config-block -[clustering]: #clustering-beta +[clustering]: #clustering-block ### client block @@ -143,7 +143,7 @@ Name | Type | Description {{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} -### clustering (beta) +### clustering block Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- diff --git a/docs/sources/flow/reference/components/loki.source.podlogs.md b/docs/sources/flow/reference/components/loki.source.podlogs.md index 7c204593b28a..5220c43e373e 100644 --- a/docs/sources/flow/reference/components/loki.source.podlogs.md +++ b/docs/sources/flow/reference/components/loki.source.podlogs.md @@ -157,7 +157,7 @@ inside a `client` block. [tls_config]: #tls_config-block [selector]: #selector-block [match_expression]: #match_expression-block -[clustering]: #clustering-beta +[clustering]: #clustering-block ### client block @@ -242,7 +242,7 @@ The `operator` argument must be one of the following strings: Both `selector` and `namespace_selector` can make use of multiple `match_expression` inner blocks which are treated as AND clauses. -### clustering (beta) +### clustering block Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- diff --git a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md b/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md index b3e89eee3210..24a1b886aa30 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md +++ b/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md @@ -75,7 +75,7 @@ inside a `client` block. [match_expression]: #match_expression-block [rule]: #rule-block [scrape]: #scrape-block -[clustering]: #clustering-beta +[clustering]: #clustering-block ### client block @@ -164,7 +164,7 @@ The `operator` argument must be one of the following strings: If there are multiple `match_expressions` blocks inside of a `selector` block, they are combined together with AND clauses. -### clustering (beta) +### clustering block Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- diff --git a/docs/sources/flow/reference/components/prometheus.scrape.md b/docs/sources/flow/reference/components/prometheus.scrape.md index e329bfe4e535..6cd15ddb2553 100644 --- a/docs/sources/flow/reference/components/prometheus.scrape.md +++ b/docs/sources/flow/reference/components/prometheus.scrape.md @@ -118,7 +118,7 @@ an `oauth2` block. [authorization]: #authorization-block [oauth2]: #oauth2-block [tls_config]: #tls_config-block -[clustering]: #clustering-beta +[clustering]: #clustering-block ### basic_auth block @@ -136,7 +136,7 @@ an `oauth2` block. {{< docs/shared lookup="flow/reference/components/tls-config-block.md" source="agent" version="" >}} -### clustering (beta) +### clustering block Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- diff --git a/docs/sources/flow/reference/components/pyroscope.scrape.md b/docs/sources/flow/reference/components/pyroscope.scrape.md index 813035c8e230..9d00df3a8c3b 100644 --- a/docs/sources/flow/reference/components/pyroscope.scrape.md +++ b/docs/sources/flow/reference/components/pyroscope.scrape.md @@ -210,7 +210,7 @@ the defaults documented in [profile.mutex][] will be used. [profile.godeltaprof_block]: #profilegodeltaprof_block-block [profile.custom]: #profilecustom-block [pprof]: https://github.com/google/pprof/blob/main/doc/README.md -[clustering]: #clustering-beta +[clustering]: #clustering-block [fgprof]: https://github.com/felixge/fgprof [godeltaprof]: https://github.com/grafana/pyroscope-go/tree/main/godeltaprof @@ -389,7 +389,7 @@ Name | Type | Description | Default | Required When the `delta` argument is `true`, a `seconds` query parameter is automatically added to requests. The `seconds` used will be equal to `scrape_interval - 1`. -### clustering (beta) +### clustering block Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- From 7b0ff9a5f9b9e9c3f3b4a410e5288232e24b753e Mon Sep 17 00:00:00 2001 From: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> Date: Thu, 21 Mar 2024 10:12:45 -0400 Subject: [PATCH 02/83] Fix an issue where the default values of some component's arguments change whenever that argument is explicitly configured (#6730) * Fix an issue where the default values of some component's arguments change whenever that argument is explicitly configured. Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> * component/all: add test to verify SetToDefault do not share pointers --------- Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> Co-authored-by: Robert Fratto --- CHANGELOG.md | 4 + go.mod | 1 - go.sum | 3 - internal/component/all/all_test.go | 176 ++++++++++++++++++ .../component/discovery/kubelet/kubelet.go | 16 +- .../discovery/kubelet/kubelet_test.go | 12 +- internal/component/faro/receiver/arguments.go | 57 +++--- .../component/otelcol/config_debug_metrics.go | 9 +- internal/component/otelcol/config_queue.go | 25 ++- internal/component/otelcol/config_retry.go | 19 +- .../otelcol/connector/host_info/host_info.go | 11 +- .../connector/servicegraph/servicegraph.go | 85 ++++----- .../otelcol/exporter/exporter_test.go | 4 +- .../exporter/loadbalancing/loadbalancing.go | 59 +++--- .../otelcol/exporter/logging/logging.go | 15 +- .../component/otelcol/exporter/otlp/otlp.go | 34 ++-- .../otelcol/exporter/otlphttp/otlphttp.go | 35 ++-- .../jaeger_remote_sampling.go | 37 ++-- .../otelcol/processor/discovery/discovery.go | 23 +-- .../processor/discovery/discovery_test.go | 12 +- .../internal/system/config.go | 34 ++-- .../resourcedetection/resourcedetection.go | 36 ++-- .../resourcedetection_test.go | 65 ++++--- .../otelcol/receiver/jaeger/jaeger.go | 5 +- .../component/otelcol/receiver/kafka/kafka.go | 83 +++++---- .../otelcol/receiver/opencensus/opencensus.go | 23 +-- .../receiver/opencensus/opencensus_test.go | 8 +- .../component/otelcol/receiver/otlp/otlp.go | 42 ++--- .../otelcol/receiver/receiver_test.go | 4 +- .../otelcol/receiver/vcenter/vcenter.go | 128 +++++++------ .../otelcol/receiver/zipkin/zipkin.go | 15 +- .../prometheus/exporter/azure/azure.go | 24 ++- .../prometheus/exporter/cadvisor/cadvisor.go | 46 +++-- .../prometheus/exporter/self/self.go | 5 +- .../testdata-v2/integrations_v2.river | 4 +- .../testdata-v2/unsupported.river | 4 +- .../staticconvert/testdata/traces_multi.river | 8 +- .../staticconvert/testdata/unsupported.river | 4 +- 38 files changed, 658 insertions(+), 517 deletions(-) create mode 100644 internal/component/all/all_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index b7170b58c2a8..1c97705997ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,10 @@ Main (unreleased) - Update gcp_exporter to a newer version with a patch for incorrect delta histograms (@kgeckhart) +- Fix an issue where the default values of some component's arguments change + whenever that argument is explicitly configured. This issue only affected a + small subset of arguments across 15 components. (@erikbaranowski, @rfratto) + ### Other changes - Clustering for Grafana Agent in Flow mode has graduated from beta to stable. diff --git a/go.mod b/go.mod index 170fc3063f34..20083bafb94e 100644 --- a/go.mod +++ b/go.mod @@ -620,7 +620,6 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.87.0 - github.com/prometheus/tsdb v0.10.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 golang.org/x/crypto/x509roots/fallback v0.0.0-20240208163226-62c9f1799c91 k8s.io/apimachinery v0.28.3 diff --git a/go.sum b/go.sum index ce676740a1ec..7f5d05326ee6 100644 --- a/go.sum +++ b/go.sum @@ -560,7 +560,6 @@ github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsY github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.1.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/digitalocean/godo v1.10.0/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= @@ -2013,8 +2012,6 @@ github.com/prometheus/snmp_exporter v0.24.1/go.mod h1:j6uIGkdR0DXvKn7HJtSkeDj//U github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0= github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM= -github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= -github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= diff --git a/internal/component/all/all_test.go b/internal/component/all/all_test.go new file mode 100644 index 000000000000..bbb3d205f824 --- /dev/null +++ b/internal/component/all/all_test.go @@ -0,0 +1,176 @@ +package all + +import ( + "fmt" + "reflect" + "testing" + + "github.com/grafana/agent/internal/component" + "github.com/grafana/river" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestSetDefault_NoPointerReuse ensures that calls to SetDefault do not re-use +// pointers. The test iterates through all registered components, and then +// recursively traverses through its Arguments type to guarantee that no two +// calls to SetDefault result in pointer reuse. +// +// Nested types that also implement river.Defaulter are also checked. +func TestSetDefault_NoPointerReuse(t *testing.T) { + allComponents := component.AllNames() + for _, componentName := range allComponents { + reg, ok := component.Get(componentName) + require.True(t, ok, "Expected component %q to exist", componentName) + + t.Run(reg.Name, func(t *testing.T) { + testNoReusePointer(t, reg) + }) + } +} + +func testNoReusePointer(t *testing.T, reg component.Registration) { + t.Helper() + + var ( + args1 = reg.CloneArguments() + args2 = reg.CloneArguments() + ) + + if args1, ok := args1.(river.Defaulter); ok { + args1.SetToDefault() + } + if args2, ok := args2.(river.Defaulter); ok { + args2.SetToDefault() + } + + rv1, rv2 := reflect.ValueOf(args1), reflect.ValueOf(args2) + ty := rv1.Type().Elem() + + // Edge case: if the component's arguments type is an empty struct, skip. + // Not skipping causes the test to fail, due to an optimization in + // reflect.New where initializing the same zero-length object results in the + // same pointer. + if rv1.Elem().NumField() == 0 { + return + } + + if path, shared := sharePointer(rv1, rv2); shared { + fullPath := fmt.Sprintf("%s.%s.%s", ty.PkgPath(), ty.Name(), path) + + assert.Fail(t, + fmt.Sprintf("Detected SetToDefault pointer reuse at %s", fullPath), + "Types implementing river.Defaulter must not reuse pointers across multiple calls. Doing so leads to default values being changed when unmarshaling configuration files. If you're seeing this error, check the path above and ensure that copies are being made of any pointers in all instances of SetToDefault calls where that field is used.", + ) + } +} + +func sharePointer(a, b reflect.Value) (string, bool) { + // We want to recursively check a and b, so if they're nil they need to be + // initialized to see if any of their inner values have shared pointers after + // being initialized with defaults. + initValue(a) + initValue(b) + + // From the documentation of reflect.Value.Pointer, values of chan, func, + // map, pointer, slice, and unsafe pointer are all pointer values. + // + // Additionally, we want to recurse into values (even if they don't have + // addresses) to see if there's shared pointers inside of them. + switch a.Kind() { + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + return "", a.Pointer() == b.Pointer() + + case reflect.Map: + if pointersMatch(a, b) { + return "", true + } + + iter := a.MapRange() + for iter.Next() { + aValue, bValue := iter.Value(), b.MapIndex(iter.Key()) + if !bValue.IsValid() { + continue + } + if path, shared := sharePointer(aValue, bValue); shared { + return path, true + } + } + return "", false + + case reflect.Pointer: + if pointersMatch(a, b) { + return "", true + } else { + // Recursively navigate inside of the pointer. + return sharePointer(a.Elem(), b.Elem()) + } + + case reflect.Interface: + if a.UnsafeAddr() == b.UnsafeAddr() { + return "", true + } + return sharePointer(a.Elem(), b.Elem()) + + case reflect.Slice: + if pointersMatch(a, b) { + // If the slices are preallocated immutable pointers such as []string{}, we can ignore + if a.Len() == 0 && a.Cap() == 0 && b.Len() == 0 && b.Cap() == 0 { + return "", false + } + return "", true + } + + size := min(a.Len(), b.Len()) + for i := 0; i < size; i++ { + if path, shared := sharePointer(a.Index(i), b.Index(i)); shared { + return path, true + } + } + return "", false + } + + // Recurse into non-pointer types. + switch a.Kind() { + case reflect.Array: + for i := 0; i < a.Len(); i++ { + if path, shared := sharePointer(a.Index(i), b.Index(i)); shared { + return path, true + } + } + return "", false + + case reflect.Struct: + // Check to make sure there are no shared pointers between args1 and args2. + for i := 0; i < a.NumField(); i++ { + if path, shared := sharePointer(a.Field(i), b.Field(i)); shared { + fullPath := a.Type().Field(i).Name + if path != "" { + fullPath += "." + path + } + return fullPath, true + } + } + return "", false + } + + return "", false +} + +func pointersMatch(a, b reflect.Value) bool { + if a.IsNil() || b.IsNil() { + return false + } + return a.Pointer() == b.Pointer() +} + +// initValue initializes nil pointers. If the nil pointer implements +// river.Defaulter, it is also set to default values. +func initValue(rv reflect.Value) { + if rv.Kind() == reflect.Pointer && rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + if defaulter, ok := rv.Interface().(river.Defaulter); ok { + defaulter.SetToDefault() + } + } +} diff --git a/internal/component/discovery/kubelet/kubelet.go b/internal/component/discovery/kubelet/kubelet.go index a753c1e7341e..1fcad0bcbd43 100644 --- a/internal/component/discovery/kubelet/kubelet.go +++ b/internal/component/discovery/kubelet/kubelet.go @@ -78,17 +78,15 @@ type Arguments struct { Namespaces []string `river:"namespaces,attr,optional"` } -// DefaultConfig holds defaults for SDConfig. -var DefaultConfig = Arguments{ - URL: config.URL{ - URL: defaultKubeletURL, - }, - HTTPClientConfig: config.DefaultHTTPClientConfig, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultConfig + cloneDefaultKubeletUrl := *defaultKubeletURL + *args = Arguments{ + URL: config.URL{ + URL: &cloneDefaultKubeletUrl, + }, + HTTPClientConfig: config.DefaultHTTPClientConfig, + } } // Validate implements river.Validator. diff --git a/internal/component/discovery/kubelet/kubelet_test.go b/internal/component/discovery/kubelet/kubelet_test.go index 078162fe7a88..89292f06be8b 100644 --- a/internal/component/discovery/kubelet/kubelet_test.go +++ b/internal/component/discovery/kubelet/kubelet_test.go @@ -51,7 +51,9 @@ func TestPodDeletion(t *testing.T) { Items: []v1.Pod{pod2}, } - kubeletDiscovery, err := NewKubeletDiscovery(DefaultConfig) + var args Arguments + args.SetToDefault() + kubeletDiscovery, err := NewKubeletDiscovery(args) require.NoError(t, err) _, err = kubeletDiscovery.refresh(podList1) @@ -100,7 +102,9 @@ func TestDiscoveryPodWithoutPod(t *testing.T) { Items: []v1.Pod{pod1, pod2}, } - kubeletDiscovery, err := NewKubeletDiscovery(DefaultConfig) + var args Arguments + args.SetToDefault() + kubeletDiscovery, err := NewKubeletDiscovery(args) require.NoError(t, err) _, err = kubeletDiscovery.refresh(podList1) @@ -109,7 +113,9 @@ func TestDiscoveryPodWithoutPod(t *testing.T) { } func TestWithDefaultKubeletHost(t *testing.T) { - kubeletDiscovery, err := NewKubeletDiscovery(DefaultConfig) + var args Arguments + args.SetToDefault() + kubeletDiscovery, err := NewKubeletDiscovery(args) require.NoError(t, err) require.Equal(t, "https://localhost:10250/pods", kubeletDiscovery.url) } diff --git a/internal/component/faro/receiver/arguments.go b/internal/component/faro/receiver/arguments.go index 62d2e413be50..0169f0e80e2c 100644 --- a/internal/component/faro/receiver/arguments.go +++ b/internal/component/faro/receiver/arguments.go @@ -10,33 +10,6 @@ import ( "github.com/grafana/river/rivertypes" ) -// Defaults for various arguments. -var ( - DefaultArguments = Arguments{ - Server: DefaultServerArguments, - SourceMaps: DefaultSourceMapsArguments, - } - - DefaultServerArguments = ServerArguments{ - Host: "127.0.0.1", - Port: 12347, - RateLimiting: DefaultRateLimitingArguments, - MaxAllowedPayloadSize: 5 * units.MiB, - } - - DefaultRateLimitingArguments = RateLimitingArguments{ - Enabled: true, - Rate: 50, - BurstSize: 100, - } - - DefaultSourceMapsArguments = SourceMapsArguments{ - Download: true, - DownloadFromOrigins: []string{"*"}, - DownloadTimeout: time.Second, - } -) - // Arguments configures the app_agent_receiver component. type Arguments struct { LogLabels map[string]string `river:"extra_log_labels,attr,optional"` @@ -49,7 +22,10 @@ type Arguments struct { var _ river.Defaulter = (*Arguments)(nil) // SetToDefault applies default settings. -func (args *Arguments) SetToDefault() { *args = DefaultArguments } +func (args *Arguments) SetToDefault() { + args.Server.SetToDefault() + args.SourceMaps.SetToDefault() +} // ServerArguments configures the HTTP server where telemetry information will // be sent from Faro clients. @@ -63,6 +39,15 @@ type ServerArguments struct { RateLimiting RateLimitingArguments `river:"rate_limiting,block,optional"` } +func (s *ServerArguments) SetToDefault() { + *s = ServerArguments{ + Host: "127.0.0.1", + Port: 12347, + MaxAllowedPayloadSize: 5 * units.MiB, + } + s.RateLimiting.SetToDefault() +} + // RateLimitingArguments configures rate limiting for the HTTP server. type RateLimitingArguments struct { Enabled bool `river:"enabled,attr,optional"` @@ -70,6 +55,14 @@ type RateLimitingArguments struct { BurstSize float64 `river:"burst_size,attr,optional"` } +func (r *RateLimitingArguments) SetToDefault() { + *r = RateLimitingArguments{ + Enabled: true, + Rate: 50, + BurstSize: 100, + } +} + // SourceMapsArguments configures how app_agent_receiver will retrieve source // maps for transforming stack traces. type SourceMapsArguments struct { @@ -79,6 +72,14 @@ type SourceMapsArguments struct { Locations []LocationArguments `river:"location,block,optional"` } +func (s *SourceMapsArguments) SetToDefault() { + *s = SourceMapsArguments{ + Download: true, + DownloadFromOrigins: []string{"*"}, + DownloadTimeout: time.Second, + } +} + // LocationArguments specifies an individual location where source maps will be loaded. type LocationArguments struct { Path string `river:"path,attr"` diff --git a/internal/component/otelcol/config_debug_metrics.go b/internal/component/otelcol/config_debug_metrics.go index f387f64cbfdf..c0a47a9d08fd 100644 --- a/internal/component/otelcol/config_debug_metrics.go +++ b/internal/component/otelcol/config_debug_metrics.go @@ -5,12 +5,9 @@ type DebugMetricsArguments struct { DisableHighCardinalityMetrics bool `river:"disable_high_cardinality_metrics,attr,optional"` } -// DefaultDebugMetricsArguments holds default settings for DebugMetricsArguments. -var DefaultDebugMetricsArguments = DebugMetricsArguments{ - DisableHighCardinalityMetrics: true, -} - // SetToDefault implements river.Defaulter. func (args *DebugMetricsArguments) SetToDefault() { - *args = DefaultDebugMetricsArguments + *args = DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + } } diff --git a/internal/component/otelcol/config_queue.go b/internal/component/otelcol/config_queue.go index b6a61294e9cd..a15a7983b812 100644 --- a/internal/component/otelcol/config_queue.go +++ b/internal/component/otelcol/config_queue.go @@ -16,22 +16,19 @@ type QueueArguments struct { // TODO(rfratto): queues can send to persistent storage through an extension. } -// DefaultQueueArguments holds default settings for QueueArguments. -var DefaultQueueArguments = QueueArguments{ - Enabled: true, - NumConsumers: 10, - - // Copied from [upstream](https://github.com/open-telemetry/opentelemetry-collector/blob/241334609fc47927b4a8533dfca28e0f65dad9fe/exporter/exporterhelper/queue_sender.go#L50-L53) - // - // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue - // This can be estimated at 1-4 GB worth of maximum memory usage - // This default is probably still too high, and may be adjusted further down in a future release - QueueSize: 1000, -} - // SetToDefault implements river.Defaulter. func (args *QueueArguments) SetToDefault() { - *args = DefaultQueueArguments + *args = QueueArguments{ + Enabled: true, + NumConsumers: 10, + + // Copied from [upstream](https://github.com/open-telemetry/opentelemetry-collector/blob/241334609fc47927b4a8533dfca28e0f65dad9fe/exporter/exporterhelper/queue_sender.go#L50-L53) + // + // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue + // This can be estimated at 1-4 GB worth of maximum memory usage + // This default is probably still too high, and may be adjusted further down in a future release + QueueSize: 1000, + } } // Convert converts args into the upstream type. diff --git a/internal/component/otelcol/config_retry.go b/internal/component/otelcol/config_retry.go index 8b1d7b35991a..12a2ffea85b9 100644 --- a/internal/component/otelcol/config_retry.go +++ b/internal/component/otelcol/config_retry.go @@ -24,19 +24,16 @@ var ( _ river.Validator = (*RetryArguments)(nil) ) -// DefaultRetryArguments holds default settings for RetryArguments. -var DefaultRetryArguments = RetryArguments{ - Enabled: true, - InitialInterval: 5 * time.Second, - RandomizationFactor: 0.5, - Multiplier: 1.5, - MaxInterval: 30 * time.Second, - MaxElapsedTime: 5 * time.Minute, -} - // SetToDefault implements river.Defaulter. func (args *RetryArguments) SetToDefault() { - *args = DefaultRetryArguments + *args = RetryArguments{ + Enabled: true, + InitialInterval: 5 * time.Second, + RandomizationFactor: 0.5, + Multiplier: 1.5, + MaxInterval: 30 * time.Second, + MaxElapsedTime: 5 * time.Minute, + } } // Validate returns an error if args is invalid. diff --git a/internal/component/otelcol/connector/host_info/host_info.go b/internal/component/otelcol/connector/host_info/host_info.go index 9fbb4dcee6b5..b97f55397b16 100644 --- a/internal/component/otelcol/connector/host_info/host_info.go +++ b/internal/component/otelcol/connector/host_info/host_info.go @@ -43,15 +43,12 @@ var ( _ connector.Arguments = (*Arguments)(nil) ) -// DefaultArguments holds default settings for Arguments. -var DefaultArguments = Arguments{ - HostIdentifiers: []string{"host.id"}, - MetricsFlushInterval: 60 * time.Second, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + HostIdentifiers: []string{"host.id"}, + MetricsFlushInterval: 60 * time.Second, + } } // Validate implements river.Validator. diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph.go b/internal/component/otelcol/connector/servicegraph/servicegraph.go index a637a94a7329..dfd25b2be497 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph.go @@ -65,55 +65,56 @@ type StoreConfig struct { TTL time.Duration `river:"ttl,attr,optional"` } +func (sc *StoreConfig) SetToDefault() { + *sc = StoreConfig{ + MaxItems: 1000, + TTL: 2 * time.Second, + } +} + var ( _ river.Validator = (*Arguments)(nil) _ river.Defaulter = (*Arguments)(nil) ) -// DefaultArguments holds default settings for Arguments. -var DefaultArguments = Arguments{ - LatencyHistogramBuckets: []time.Duration{ - 2 * time.Millisecond, - 4 * time.Millisecond, - 6 * time.Millisecond, - 8 * time.Millisecond, - 10 * time.Millisecond, - 50 * time.Millisecond, - 100 * time.Millisecond, - 200 * time.Millisecond, - 400 * time.Millisecond, - 800 * time.Millisecond, - 1 * time.Second, - 1400 * time.Millisecond, - 2 * time.Second, - 5 * time.Second, - 10 * time.Second, - 15 * time.Second, - }, - Dimensions: []string{}, - Store: StoreConfig{ - MaxItems: 1000, - TTL: 2 * time.Second, - }, - CacheLoop: 1 * time.Minute, - StoreExpirationLoop: 2 * time.Second, - //TODO: Add VirtualNodePeerAttributes when it's no longer controlled by - // the "processor.servicegraph.virtualNode" feature gate. - // VirtualNodePeerAttributes: []string{ - // semconv.AttributeDBName, - // semconv.AttributeNetSockPeerAddr, - // semconv.AttributeNetPeerName, - // semconv.AttributeRPCService, - // semconv.AttributeNetSockPeerName, - // semconv.AttributeNetPeerName, - // semconv.AttributeHTTPURL, - // semconv.AttributeHTTPTarget, - // }, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + LatencyHistogramBuckets: []time.Duration{ + 2 * time.Millisecond, + 4 * time.Millisecond, + 6 * time.Millisecond, + 8 * time.Millisecond, + 10 * time.Millisecond, + 50 * time.Millisecond, + 100 * time.Millisecond, + 200 * time.Millisecond, + 400 * time.Millisecond, + 800 * time.Millisecond, + 1 * time.Second, + 1400 * time.Millisecond, + 2 * time.Second, + 5 * time.Second, + 10 * time.Second, + 15 * time.Second, + }, + Dimensions: []string{}, + CacheLoop: 1 * time.Minute, + StoreExpirationLoop: 2 * time.Second, + //TODO: Add VirtualNodePeerAttributes when it's no longer controlled by + // the "processor.servicegraph.virtualNode" feature gate. + // VirtualNodePeerAttributes: []string{ + // semconv.AttributeDBName, + // semconv.AttributeNetSockPeerAddr, + // semconv.AttributeNetPeerName, + // semconv.AttributeRPCService, + // semconv.AttributeNetSockPeerName, + // semconv.AttributeNetPeerName, + // semconv.AttributeHTTPURL, + // semconv.AttributeHTTPTarget, + // }, + } + args.Store.SetToDefault() } // Validate implements river.Validator. diff --git a/internal/component/otelcol/exporter/exporter_test.go b/internal/component/otelcol/exporter/exporter_test.go index 8c9489af71c4..dd61355ec62c 100644 --- a/internal/component/otelcol/exporter/exporter_test.go +++ b/internal/component/otelcol/exporter/exporter_test.go @@ -138,7 +138,9 @@ func (fa fakeExporterArgs) Exporters() map[otelcomponent.DataType]map[otelcompon } func (fe fakeExporterArgs) DebugMetricsConfig() otelcol.DebugMetricsArguments { - return otelcol.DefaultDebugMetricsArguments + var dma otelcol.DebugMetricsArguments + dma.SetToDefault() + return dma } type fakeExporter struct { diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go index c355f27315bc..d0c1ee9f27b8 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -55,27 +55,13 @@ var ( _ river.Validator = &Arguments{} ) -var ( - // DefaultArguments holds default values for Arguments. - DefaultArguments = Arguments{ - Protocol: Protocol{ - OTLP: DefaultOTLPConfig, - }, - RoutingKey: "traceID", - DebugMetrics: otelcol.DefaultDebugMetricsArguments, - } - - DefaultOTLPConfig = OtlpConfig{ - Timeout: otelcol.DefaultTimeout, - Queue: otelcol.DefaultQueueArguments, - Retry: otelcol.DefaultRetryArguments, - Client: DefaultGRPCClientArguments, - } -) - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + RoutingKey: "traceID", + } + args.DebugMetrics.SetToDefault() + args.Protocol.OTLP.SetToDefault() } // Validate implements river.Validator. @@ -124,18 +110,23 @@ type OtlpConfig struct { Client GRPCClientArguments `river:"client,block"` } -func (OtlpConfig *OtlpConfig) SetToDefault() { - *OtlpConfig = DefaultOTLPConfig +func (oc *OtlpConfig) SetToDefault() { + *oc = OtlpConfig{ + Timeout: otelcol.DefaultTimeout, + } + oc.Client.SetToDefault() + oc.Retry.SetToDefault() + oc.Queue.SetToDefault() } -func (otlpConfig OtlpConfig) Convert() otlpexporter.Config { +func (oc OtlpConfig) Convert() otlpexporter.Config { return otlpexporter.Config{ TimeoutSettings: exporterhelper.TimeoutSettings{ - Timeout: otlpConfig.Timeout, + Timeout: oc.Timeout, }, - QueueSettings: *otlpConfig.Queue.Convert(), - RetrySettings: *otlpConfig.Retry.Convert(), - GRPCClientSettings: *otlpConfig.Client.Convert(), + QueueSettings: *oc.Queue.Convert(), + RetrySettings: *oc.Retry.Convert(), + GRPCClientSettings: *oc.Client.Convert(), } } @@ -316,16 +307,12 @@ func (args *GRPCClientArguments) Extensions() map[otelcomponent.ID]otelextension return m } -// DefaultGRPCClientArguments holds component-specific default settings for -// GRPCClientArguments. -var DefaultGRPCClientArguments = GRPCClientArguments{ - Headers: map[string]string{}, - Compression: otelcol.CompressionTypeGzip, - WriteBufferSize: 512 * 1024, - BalancerName: otelcol.DefaultBalancerName, -} - // SetToDefault implements river.Defaulter. func (args *GRPCClientArguments) SetToDefault() { - *args = DefaultGRPCClientArguments + *args = GRPCClientArguments{ + Headers: map[string]string{}, + Compression: otelcol.CompressionTypeGzip, + WriteBufferSize: 512 * 1024, + BalancerName: otelcol.DefaultBalancerName, + } } diff --git a/internal/component/otelcol/exporter/logging/logging.go b/internal/component/otelcol/exporter/logging/logging.go index 71dac49faf06..1e08fe2104b3 100644 --- a/internal/component/otelcol/exporter/logging/logging.go +++ b/internal/component/otelcol/exporter/logging/logging.go @@ -38,17 +38,14 @@ type Arguments struct { var _ exporter.Arguments = Arguments{} -// DefaultArguments holds default values for Arguments. -var DefaultArguments = Arguments{ - Verbosity: configtelemetry.LevelNormal, - SamplingInitial: 2, - SamplingThereafter: 500, - DebugMetrics: otelcol.DefaultDebugMetricsArguments, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + Verbosity: configtelemetry.LevelNormal, + SamplingInitial: 2, + SamplingThereafter: 500, + } + args.DebugMetrics.SetToDefault() } // Convert implements exporter.Arguments. diff --git a/internal/component/otelcol/exporter/otlp/otlp.go b/internal/component/otelcol/exporter/otlp/otlp.go index d50c87622657..a86aec221954 100644 --- a/internal/component/otelcol/exporter/otlp/otlp.go +++ b/internal/component/otelcol/exporter/otlp/otlp.go @@ -43,18 +43,16 @@ type Arguments struct { var _ exporter.Arguments = Arguments{} -// DefaultArguments holds default values for Arguments. -var DefaultArguments = Arguments{ - Timeout: otelcol.DefaultTimeout, - Queue: otelcol.DefaultQueueArguments, - Retry: otelcol.DefaultRetryArguments, - Client: DefaultGRPCClientArguments, - DebugMetrics: otelcol.DefaultDebugMetricsArguments, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + Timeout: otelcol.DefaultTimeout, + } + + args.Queue.SetToDefault() + args.Retry.SetToDefault() + args.Client.SetToDefault() + args.DebugMetrics.SetToDefault() } // Convert implements exporter.Arguments. @@ -88,16 +86,12 @@ func (args Arguments) DebugMetricsConfig() otelcol.DebugMetricsArguments { // component-specific defaults. type GRPCClientArguments otelcol.GRPCClientArguments -// DefaultGRPCClientArguments holds component-specific default settings for -// GRPCClientArguments. -var DefaultGRPCClientArguments = GRPCClientArguments{ - Headers: map[string]string{}, - Compression: otelcol.CompressionTypeGzip, - WriteBufferSize: 512 * 1024, - BalancerName: otelcol.DefaultBalancerName, -} - // SetToDefault implements river.Defaulter. func (args *GRPCClientArguments) SetToDefault() { - *args = DefaultGRPCClientArguments + *args = GRPCClientArguments{ + Headers: map[string]string{}, + Compression: otelcol.CompressionTypeGzip, + WriteBufferSize: 512 * 1024, + BalancerName: otelcol.DefaultBalancerName, + } } diff --git a/internal/component/otelcol/exporter/otlphttp/otlphttp.go b/internal/component/otelcol/exporter/otlphttp/otlphttp.go index 246d9df846cc..a5c4dbe1de61 100644 --- a/internal/component/otelcol/exporter/otlphttp/otlphttp.go +++ b/internal/component/otelcol/exporter/otlphttp/otlphttp.go @@ -48,17 +48,13 @@ type Arguments struct { var _ exporter.Arguments = Arguments{} -// DefaultArguments holds default values for Arguments. -var DefaultArguments = Arguments{ - Queue: otelcol.DefaultQueueArguments, - Retry: otelcol.DefaultRetryArguments, - Client: DefaultHTTPClientArguments, - DebugMetrics: otelcol.DefaultDebugMetricsArguments, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{} + args.Queue.SetToDefault() + args.Retry.SetToDefault() + args.Client.SetToDefault() + args.DebugMetrics.SetToDefault() } // Convert implements exporter.Arguments. @@ -102,11 +98,17 @@ type HTTPClientArguments otelcol.HTTPClientArguments // Default server settings. var ( - DefaultMaxIddleConns = 100 - DefaultIdleConnTimeout = 90 * time.Second - DefaultHTTPClientArguments = HTTPClientArguments{ - MaxIdleConns: &DefaultMaxIddleConns, - IdleConnTimeout: &DefaultIdleConnTimeout, + DefaultMaxIdleConns = 100 + DefaultIdleConnTimeout = 90 * time.Second +) + +// SetToDefault implements river.Defaulter. +func (args *HTTPClientArguments) SetToDefault() { + maxIdleConns := DefaultMaxIdleConns + idleConnTimeout := DefaultIdleConnTimeout + *args = HTTPClientArguments{ + MaxIdleConns: &maxIdleConns, + IdleConnTimeout: &idleConnTimeout, Timeout: 30 * time.Second, Headers: map[string]string{}, @@ -114,9 +116,4 @@ var ( ReadBufferSize: 0, WriteBufferSize: 512 * 1024, } -) - -// SetToDefault implements river.Defaulter. -func (args *HTTPClientArguments) SetToDefault() { - *args = DefaultHTTPClientArguments } diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go index b01b92ccc119..6269e9982acc 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go @@ -37,18 +37,6 @@ type ( HTTPServerArguments otelcol.HTTPServerArguments ) -// Default server settings. -var ( - DefaultGRPCServerArguments = GRPCServerArguments{ - Endpoint: "0.0.0.0:14250", - Transport: "tcp", - } - - DefaultHTTPServerArguments = HTTPServerArguments{ - Endpoint: "0.0.0.0:5778", - } -) - // Arguments configures the otelcol.extension.jaegerremotesampling component. type Arguments struct { GRPC *GRPCServerArguments `river:"grpc,block,optional"` @@ -127,12 +115,17 @@ func (a *ArgumentsSource) Validate() error { // SetToDefault implements river.Defaulter. func (args *GRPCServerArguments) SetToDefault() { - *args = DefaultGRPCServerArguments + *args = GRPCServerArguments{ + Endpoint: "0.0.0.0:14250", + Transport: "tcp", + } } // SetToDefault implements river.Defaulter. func (args *HTTPServerArguments) SetToDefault() { - *args = DefaultHTTPServerArguments + *args = HTTPServerArguments{ + Endpoint: "0.0.0.0:5778", + } } // GRPCClientArguments is used to configure @@ -140,16 +133,12 @@ func (args *HTTPServerArguments) SetToDefault() { // component-specific defaults. type GRPCClientArguments otelcol.GRPCClientArguments -// DefaultGRPCClientArguments holds component-specific -// default settings for GRPCClientArguments. -var DefaultGRPCClientArguments = GRPCClientArguments{ - Headers: map[string]string{}, - Compression: otelcol.CompressionTypeGzip, - WriteBufferSize: 512 * 1024, - BalancerName: otelcol.DefaultBalancerName, -} - // SetToDefault implements river.Defaulter. func (args *GRPCClientArguments) SetToDefault() { - *args = DefaultGRPCClientArguments + *args = GRPCClientArguments{ + Headers: map[string]string{}, + Compression: otelcol.CompressionTypeGzip, + WriteBufferSize: 512 * 1024, + BalancerName: otelcol.DefaultBalancerName, + } } diff --git a/internal/component/otelcol/processor/discovery/discovery.go b/internal/component/otelcol/processor/discovery/discovery.go index 08073fa58b4c..226899139930 100644 --- a/internal/component/otelcol/processor/discovery/discovery.go +++ b/internal/component/otelcol/processor/discovery/discovery.go @@ -45,21 +45,18 @@ var ( _ river.Validator = (*Arguments)(nil) ) -// DefaultArguments holds default settings for Arguments. -var DefaultArguments = Arguments{ - OperationType: promsdconsumer.OperationTypeUpsert, - PodAssociations: []string{ - promsdconsumer.PodAssociationIPLabel, - promsdconsumer.PodAssociationOTelIPLabel, - promsdconsumer.PodAssociationk8sIPLabel, - promsdconsumer.PodAssociationHostnameLabel, - promsdconsumer.PodAssociationConnectionIP, - }, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + OperationType: promsdconsumer.OperationTypeUpsert, + PodAssociations: []string{ + promsdconsumer.PodAssociationIPLabel, + promsdconsumer.PodAssociationOTelIPLabel, + promsdconsumer.PodAssociationk8sIPLabel, + promsdconsumer.PodAssociationHostnameLabel, + promsdconsumer.PodAssociationConnectionIP, + }, + } } // Validate implements river.Validator. diff --git a/internal/component/otelcol/processor/discovery/discovery_test.go b/internal/component/otelcol/processor/discovery/discovery_test.go index 5c7788d247d2..ac741673325d 100644 --- a/internal/component/otelcol/processor/discovery/discovery_test.go +++ b/internal/component/otelcol/processor/discovery/discovery_test.go @@ -57,8 +57,10 @@ func Test_DefaultConfig(t *testing.T) { var args discovery.Arguments require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + var defaultArgs discovery.Arguments + defaultArgs.SetToDefault() require.Equal(t, args.OperationType, promsdconsumer.OperationTypeUpsert) - require.Equal(t, args.PodAssociations, discovery.DefaultArguments.PodAssociations) + require.Equal(t, args.PodAssociations, defaultArgs.PodAssociations) var inputTrace = `{ "resourceSpans": [{ @@ -157,8 +159,10 @@ func Test_Insert(t *testing.T) { var args discovery.Arguments require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + var defaultArgs discovery.Arguments + defaultArgs.SetToDefault() require.Equal(t, args.OperationType, promsdconsumer.OperationTypeInsert) - require.Equal(t, args.PodAssociations, discovery.DefaultArguments.PodAssociations) + require.Equal(t, args.PodAssociations, defaultArgs.PodAssociations) var inputTrace = `{ "resourceSpans": [{ @@ -278,8 +282,10 @@ func Test_Update(t *testing.T) { var args discovery.Arguments require.NoError(t, river.Unmarshal([]byte(cfg), &args)) + var defaultArgs discovery.Arguments + defaultArgs.SetToDefault() require.Equal(t, args.OperationType, promsdconsumer.OperationTypeUpdate) - require.Equal(t, args.PodAssociations, discovery.DefaultArguments.PodAssociations) + require.Equal(t, args.PodAssociations, defaultArgs.PodAssociations) var inputTrace = `{ "resourceSpans": [{ diff --git a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go index c661cf6e8391..3c72a13228a8 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go @@ -19,28 +19,26 @@ type Config struct { ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` } -var DefaultArguments = Config{ - HostnameSources: []string{"dns", "os"}, - ResourceAttributes: ResourceAttributesConfig{ - HostArch: rac.ResourceAttributeConfig{Enabled: false}, - HostCPUCacheL2Size: rac.ResourceAttributeConfig{Enabled: false}, - HostCPUFamily: rac.ResourceAttributeConfig{Enabled: false}, - HostCPUModelID: rac.ResourceAttributeConfig{Enabled: false}, - HostCPUModelName: rac.ResourceAttributeConfig{Enabled: false}, - HostCPUStepping: rac.ResourceAttributeConfig{Enabled: false}, - HostCPUVendorID: rac.ResourceAttributeConfig{Enabled: false}, - HostID: rac.ResourceAttributeConfig{Enabled: false}, - HostName: rac.ResourceAttributeConfig{Enabled: true}, - OsDescription: rac.ResourceAttributeConfig{Enabled: false}, - OsType: rac.ResourceAttributeConfig{Enabled: true}, - }, -} - var _ river.Defaulter = (*Config)(nil) // SetToDefault implements river.Defaulter. func (c *Config) SetToDefault() { - *c = DefaultArguments + *c = Config{ + HostnameSources: []string{"dns", "os"}, + ResourceAttributes: ResourceAttributesConfig{ + HostArch: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUCacheL2Size: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUFamily: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUModelID: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUModelName: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUStepping: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUVendorID: rac.ResourceAttributeConfig{Enabled: false}, + HostID: rac.ResourceAttributeConfig{Enabled: false}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + OsDescription: rac.ResourceAttributeConfig{Enabled: false}, + OsType: rac.ResourceAttributeConfig{Enabled: true}, + }, + } } // Validate config diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go index de32c1c6aea1..f8630d6c275c 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection.go @@ -19,7 +19,6 @@ import ( "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/gcp" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/heroku" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/k8snode" - kubernetes_node "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/k8snode" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/openshift" "github.com/grafana/agent/internal/component/otelcol/processor/resourcedetection/internal/system" "github.com/grafana/agent/internal/featuregate" @@ -114,21 +113,11 @@ type DetectorConfig struct { OpenShiftConfig openshift.Config `river:"openshift,block,optional"` // KubernetesNode contains user-specified configurations for the K8SNode detector - KubernetesNodeConfig kubernetes_node.Config `river:"kubernetes_node,block,optional"` + KubernetesNodeConfig k8snode.Config `river:"kubernetes_node,block,optional"` } -var ( - _ processor.Arguments = Arguments{} - _ river.Validator = (*Arguments)(nil) - _ river.Defaulter = (*Arguments)(nil) -) - -// DefaultArguments holds default settings for Arguments. -var DefaultArguments = Arguments{ - Detectors: []string{"env"}, - Override: true, - Timeout: 5 * time.Second, - DetectorConfig: DetectorConfig{ +func (dc *DetectorConfig) SetToDefault() { + *dc = DetectorConfig{ EC2Config: ec2.DefaultArguments, ECSConfig: ecs.DefaultArguments, EKSConfig: eks.DefaultArguments, @@ -140,15 +129,26 @@ var DefaultArguments = Arguments{ DockerConfig: docker.DefaultArguments, GcpConfig: gcp.DefaultArguments, HerokuConfig: heroku.DefaultArguments, - SystemConfig: system.DefaultArguments, OpenShiftConfig: openshift.DefaultArguments, - KubernetesNodeConfig: kubernetes_node.DefaultArguments, - }, + KubernetesNodeConfig: k8snode.DefaultArguments, + } + dc.SystemConfig.SetToDefault() } +var ( + _ processor.Arguments = Arguments{} + _ river.Validator = (*Arguments)(nil) + _ river.Defaulter = (*Arguments)(nil) +) + // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + Detectors: []string{"env"}, + Override: true, + Timeout: 5 * time.Second, + } + args.DetectorConfig.SetToDefault() } // Validate implements river.Validator. diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go index 437c2cf6bacc..d52c3f65e323 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go @@ -26,6 +26,9 @@ import ( ) func TestArguments_UnmarshalRiver(t *testing.T) { + var defaultArgs system.Config + defaultArgs.SetToDefault() + tests := []struct { testName string cfg string @@ -77,7 +80,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -102,7 +105,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -143,7 +146,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -185,7 +188,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -238,7 +241,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -300,7 +303,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -362,7 +365,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -399,7 +402,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -441,7 +444,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -502,7 +505,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -571,7 +574,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -608,7 +611,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -650,7 +653,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -677,7 +680,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -767,7 +770,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "consul": consul.DefaultArguments.Convert(), "docker": docker.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -794,7 +797,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -836,7 +839,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "consul": consul.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -863,7 +866,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -928,7 +931,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -955,7 +958,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1007,7 +1010,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1034,7 +1037,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1088,7 +1091,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1115,7 +1118,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1178,7 +1181,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "consul": consul.DefaultArguments.Convert(), "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1205,7 +1208,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1253,7 +1256,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), }, }, @@ -1291,7 +1294,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1397,7 +1400,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, @@ -1467,7 +1470,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, }, @@ -1494,7 +1497,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "docker": docker.DefaultArguments.Convert(), "gcp": gcp.DefaultArguments.Convert(), "heroku": heroku.DefaultArguments.Convert(), - "system": system.DefaultArguments.Convert(), + "system": defaultArgs.Convert(), "openshift": openshift.DefaultArguments.Convert(), "k8snode": kubernetes_node.DefaultArguments.Convert(), }, diff --git a/internal/component/otelcol/receiver/jaeger/jaeger.go b/internal/component/otelcol/receiver/jaeger/jaeger.go index 27632e040e03..c7817cc68ab2 100644 --- a/internal/component/otelcol/receiver/jaeger/jaeger.go +++ b/internal/component/otelcol/receiver/jaeger/jaeger.go @@ -44,9 +44,8 @@ var _ receiver.Arguments = Arguments{} // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = Arguments{ - DebugMetrics: otelcol.DefaultDebugMetricsArguments, - } + *args = Arguments{} + args.DebugMetrics.SetToDefault() } // Validate implements river.Validator. diff --git a/internal/component/otelcol/receiver/kafka/kafka.go b/internal/component/otelcol/receiver/kafka/kafka.go index dd798f1cee20..d52223f1adee 100644 --- a/internal/component/otelcol/receiver/kafka/kafka.go +++ b/internal/component/otelcol/receiver/kafka/kafka.go @@ -54,43 +54,25 @@ type Arguments struct { var _ receiver.Arguments = Arguments{} -// DefaultArguments holds default values for Arguments. -var DefaultArguments = Arguments{ - // We use the defaults from the upstream OpenTelemetry Collector component - // for compatibility, even though that means using a client and group ID of - // "otel-collector". - - Topic: "otlp_spans", - Encoding: "otlp_proto", - Brokers: []string{"localhost:9092"}, - ClientID: "otel-collector", - GroupID: "otel-collector", - InitialOffset: "latest", - Metadata: MetadataArguments{ - IncludeAllTopics: true, - Retry: MetadataRetryArguments{ - MaxRetries: 3, - Backoff: 250 * time.Millisecond, - }, - }, - AutoCommit: AutoCommitArguments{ - Enable: true, - Interval: time.Second, - }, - MessageMarking: MessageMarkingArguments{ - AfterExecution: false, - IncludeUnsuccessful: false, - }, - HeaderExtraction: HeaderExtraction{ - ExtractHeaders: false, - Headers: []string{}, - }, - DebugMetrics: otelcol.DefaultDebugMetricsArguments, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + // We use the defaults from the upstream OpenTelemetry Collector component + // for compatibility, even though that means using a client and group ID of + // "otel-collector". + + Topic: "otlp_spans", + Encoding: "otlp_proto", + Brokers: []string{"localhost:9092"}, + ClientID: "otel-collector", + GroupID: "otel-collector", + InitialOffset: "latest", + } + args.Metadata.SetToDefault() + args.AutoCommit.SetToDefault() + args.MessageMarking.SetToDefault() + args.HeaderExtraction.SetToDefault() + args.DebugMetrics.SetToDefault() } // Convert implements receiver.Arguments. @@ -247,6 +229,16 @@ type MetadataArguments struct { Retry MetadataRetryArguments `river:"retry,block,optional"` } +func (args *MetadataArguments) SetToDefault() { + *args = MetadataArguments{ + IncludeAllTopics: true, + Retry: MetadataRetryArguments{ + MaxRetries: 3, + Backoff: 250 * time.Millisecond, + }, + } +} + // Convert converts args into the upstream type. func (args MetadataArguments) Convert() kafkaexporter.Metadata { return kafkaexporter.Metadata{ @@ -278,6 +270,13 @@ type AutoCommitArguments struct { Interval time.Duration `river:"interval,attr,optional"` } +func (args *AutoCommitArguments) SetToDefault() { + *args = AutoCommitArguments{ + Enable: true, + Interval: time.Second, + } +} + // Convert converts args into the upstream type. func (args AutoCommitArguments) Convert() kafkareceiver.AutoCommit { return kafkareceiver.AutoCommit{ @@ -292,6 +291,13 @@ type MessageMarkingArguments struct { IncludeUnsuccessful bool `river:"include_unsuccessful,attr,optional"` } +func (args *MessageMarkingArguments) SetToDefault() { + *args = MessageMarkingArguments{ + AfterExecution: false, + IncludeUnsuccessful: false, + } +} + // Convert converts args into the upstream type. func (args MessageMarkingArguments) Convert() kafkareceiver.MessageMarking { return kafkareceiver.MessageMarking{ @@ -305,6 +311,13 @@ type HeaderExtraction struct { Headers []string `river:"headers,attr,optional"` } +func (h *HeaderExtraction) SetToDefault() { + *h = HeaderExtraction{ + ExtractHeaders: false, + Headers: []string{}, + } +} + // Convert converts HeaderExtraction into the upstream type. func (h HeaderExtraction) Convert() kafkareceiver.HeaderExtraction { return kafkareceiver.HeaderExtraction{ diff --git a/internal/component/otelcol/receiver/opencensus/opencensus.go b/internal/component/otelcol/receiver/opencensus/opencensus.go index 1a4ac11573c6..2de11c257831 100644 --- a/internal/component/otelcol/receiver/opencensus/opencensus.go +++ b/internal/component/otelcol/receiver/opencensus/opencensus.go @@ -40,21 +40,18 @@ type Arguments struct { var _ receiver.Arguments = Arguments{} -// Default server settings. -var DefaultArguments = Arguments{ - GRPC: otelcol.GRPCServerArguments{ - Endpoint: "0.0.0.0:55678", - Transport: "tcp", - - ReadBufferSize: 512 * units.Kibibyte, - // We almost write 0 bytes, so no need to tune WriteBufferSize. - }, - DebugMetrics: otelcol.DefaultDebugMetricsArguments, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + GRPC: otelcol.GRPCServerArguments{ + Endpoint: "0.0.0.0:55678", + Transport: "tcp", + + ReadBufferSize: 512 * units.Kibibyte, + // We almost write 0 bytes, so no need to tune WriteBufferSize. + }, + } + args.DebugMetrics.SetToDefault() } // Convert implements receiver.Arguments. diff --git a/internal/component/otelcol/receiver/opencensus/opencensus_test.go b/internal/component/otelcol/receiver/opencensus/opencensus_test.go index 8cc95042a26e..aa8536a3e3a9 100644 --- a/internal/component/otelcol/receiver/opencensus/opencensus_test.go +++ b/internal/component/otelcol/receiver/opencensus/opencensus_test.go @@ -54,10 +54,12 @@ func TestDefaultArguments_UnmarshalRiver(t *testing.T) { require.True(t, ok) + var defaultArgs opencensus.Arguments + defaultArgs.SetToDefault() // Check the gRPC arguments - require.Equal(t, opencensus.DefaultArguments.GRPC.Endpoint, otelArgs.NetAddr.Endpoint) - require.Equal(t, opencensus.DefaultArguments.GRPC.Transport, otelArgs.NetAddr.Transport) - require.Equal(t, int(opencensus.DefaultArguments.GRPC.ReadBufferSize), otelArgs.ReadBufferSize) + require.Equal(t, defaultArgs.GRPC.Endpoint, otelArgs.NetAddr.Endpoint) + require.Equal(t, defaultArgs.GRPC.Transport, otelArgs.NetAddr.Transport) + require.Equal(t, int(defaultArgs.GRPC.ReadBufferSize), otelArgs.ReadBufferSize) } func TestArguments_UnmarshalRiver(t *testing.T) { diff --git a/internal/component/otelcol/receiver/otlp/otlp.go b/internal/component/otelcol/receiver/otlp/otlp.go index 8e59fab2be23..9beb1f7ca3f9 100644 --- a/internal/component/otelcol/receiver/otlp/otlp.go +++ b/internal/component/otelcol/receiver/otlp/otlp.go @@ -71,9 +71,8 @@ var _ receiver.Arguments = Arguments{} // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = Arguments{ - DebugMetrics: otelcol.DefaultDebugMetricsArguments, - } + *args = Arguments{} + args.DebugMetrics.SetToDefault() } // Convert implements receiver.Arguments. @@ -107,26 +106,6 @@ type ( GRPCServerArguments otelcol.GRPCServerArguments ) -// Default server settings. -var ( - DefaultGRPCServerArguments = GRPCServerArguments{ - Endpoint: "0.0.0.0:4317", - Transport: "tcp", - - ReadBufferSize: 512 * units.Kibibyte, - // We almost write 0 bytes, so no need to tune WriteBufferSize. - } - - DefaultHTTPConfigArguments = HTTPConfigArguments{ - HTTPServerArguments: &otelcol.HTTPServerArguments{ - Endpoint: "0.0.0.0:4318", - }, - MetricsURLPath: "/v1/metrics", - LogsURLPath: "/v1/logs", - TracesURLPath: "/v1/traces", - } -) - // Validate implements river.Validator. func (args *Arguments) Validate() error { if args.HTTP != nil { @@ -155,12 +134,25 @@ func validateURL(url string, urlName string) error { // SetToDefault implements river.Defaulter. func (args *GRPCServerArguments) SetToDefault() { - *args = DefaultGRPCServerArguments + *args = GRPCServerArguments{ + Endpoint: "0.0.0.0:4317", + Transport: "tcp", + + ReadBufferSize: 512 * units.Kibibyte, + // We almost write 0 bytes, so no need to tune WriteBufferSize. + } } // SetToDefault implements river.Defaulter. func (args *HTTPConfigArguments) SetToDefault() { - *args = DefaultHTTPConfigArguments + *args = HTTPConfigArguments{ + HTTPServerArguments: &otelcol.HTTPServerArguments{ + Endpoint: "0.0.0.0:4318", + }, + MetricsURLPath: "/v1/metrics", + LogsURLPath: "/v1/logs", + TracesURLPath: "/v1/traces", + } } // DebugMetricsConfig implements receiver.Arguments. diff --git a/internal/component/otelcol/receiver/receiver_test.go b/internal/component/otelcol/receiver/receiver_test.go index 2e6164a3f152..989b40fa9ffa 100644 --- a/internal/component/otelcol/receiver/receiver_test.go +++ b/internal/component/otelcol/receiver/receiver_test.go @@ -129,5 +129,7 @@ func (fa fakeReceiverArgs) NextConsumers() *otelcol.ConsumerArguments { } func (fa fakeReceiverArgs) DebugMetricsConfig() otelcol.DebugMetricsArguments { - return otelcol.DefaultDebugMetricsArguments + var args otelcol.DebugMetricsArguments + args.SetToDefault() + return args } diff --git a/internal/component/otelcol/receiver/vcenter/vcenter.go b/internal/component/otelcol/receiver/vcenter/vcenter.go index a44105a3054f..68f3eea716ed 100644 --- a/internal/component/otelcol/receiver/vcenter/vcenter.go +++ b/internal/component/otelcol/receiver/vcenter/vcenter.go @@ -86,6 +86,50 @@ type MetricsConfig struct { VcenterVMNetworkUsage MetricConfig `river:"vcenter.vm.network.usage,block,optional"` } +func (args *MetricsConfig) SetToDefault() { + *args = MetricsConfig{ + VcenterClusterCPUEffective: MetricConfig{Enabled: true}, + VcenterClusterCPULimit: MetricConfig{Enabled: true}, + VcenterClusterHostCount: MetricConfig{Enabled: true}, + VcenterClusterMemoryEffective: MetricConfig{Enabled: true}, + VcenterClusterMemoryLimit: MetricConfig{Enabled: true}, + VcenterClusterMemoryUsed: MetricConfig{Enabled: true}, + VcenterClusterVMCount: MetricConfig{Enabled: true}, + VcenterDatastoreDiskUsage: MetricConfig{Enabled: true}, + VcenterDatastoreDiskUtilization: MetricConfig{Enabled: true}, + VcenterHostCPUUsage: MetricConfig{Enabled: true}, + VcenterHostCPUUtilization: MetricConfig{Enabled: true}, + VcenterHostDiskLatencyAvg: MetricConfig{Enabled: true}, + VcenterHostDiskLatencyMax: MetricConfig{Enabled: true}, + VcenterHostDiskThroughput: MetricConfig{Enabled: true}, + VcenterHostMemoryUsage: MetricConfig{Enabled: true}, + VcenterHostMemoryUtilization: MetricConfig{Enabled: true}, + VcenterHostNetworkPacketCount: MetricConfig{Enabled: true}, + VcenterHostNetworkPacketErrors: MetricConfig{Enabled: true}, + VcenterHostNetworkThroughput: MetricConfig{Enabled: true}, + VcenterHostNetworkUsage: MetricConfig{Enabled: true}, + VcenterResourcePoolCPUShares: MetricConfig{Enabled: true}, + VcenterResourcePoolCPUUsage: MetricConfig{Enabled: true}, + VcenterResourcePoolMemoryShares: MetricConfig{Enabled: true}, + VcenterResourcePoolMemoryUsage: MetricConfig{Enabled: true}, + VcenterVMCPUUsage: MetricConfig{Enabled: true}, + VcenterVMCPUUtilization: MetricConfig{Enabled: true}, + VcenterVMDiskLatencyAvg: MetricConfig{Enabled: true}, + VcenterVMDiskLatencyMax: MetricConfig{Enabled: true}, + VcenterVMDiskThroughput: MetricConfig{Enabled: true}, + VcenterVMDiskUsage: MetricConfig{Enabled: true}, + VcenterVMDiskUtilization: MetricConfig{Enabled: true}, + VcenterVMMemoryBallooned: MetricConfig{Enabled: true}, + VcenterVMMemorySwapped: MetricConfig{Enabled: true}, + VcenterVMMemorySwappedSsd: MetricConfig{Enabled: true}, + VcenterVMMemoryUsage: MetricConfig{Enabled: true}, + VcenterVMMemoryUtilization: MetricConfig{Enabled: false}, + VcenterVMNetworkPacketCount: MetricConfig{Enabled: true}, + VcenterVMNetworkThroughput: MetricConfig{Enabled: true}, + VcenterVMNetworkUsage: MetricConfig{Enabled: true}, + } +} + func (args *MetricsConfig) Convert() map[string]interface{} { if args == nil { return nil @@ -157,6 +201,18 @@ type ResourceAttributesConfig struct { VcenterVMName ResourceAttributeConfig `river:"vcenter.vm.name,block,optional"` } +func (args *ResourceAttributesConfig) SetToDefault() { + *args = ResourceAttributesConfig{ + VcenterClusterName: ResourceAttributeConfig{Enabled: true}, + VcenterDatastoreName: ResourceAttributeConfig{Enabled: true}, + VcenterHostName: ResourceAttributeConfig{Enabled: true}, + VcenterResourcePoolInventoryPath: ResourceAttributeConfig{Enabled: true}, + VcenterResourcePoolName: ResourceAttributeConfig{Enabled: true}, + VcenterVMID: ResourceAttributeConfig{Enabled: true}, + VcenterVMName: ResourceAttributeConfig{Enabled: true}, + } +} + func (args *ResourceAttributesConfig) Convert() map[string]interface{} { if args == nil { return nil @@ -180,6 +236,12 @@ type MetricsBuilderConfig struct { ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` } +func (mbc *MetricsBuilderConfig) SetToDefault() { + *mbc = MetricsBuilderConfig{} + mbc.Metrics.SetToDefault() + mbc.ResourceAttributes.SetToDefault() +} + func (args *MetricsBuilderConfig) Convert() map[string]interface{} { if args == nil { return nil @@ -213,69 +275,13 @@ type Arguments struct { var _ receiver.Arguments = Arguments{} -var ( - // DefaultArguments holds default values for Arguments. - DefaultArguments = Arguments{ - ScraperControllerArguments: otelcol.DefaultScraperControllerArguments, - MetricsBuilderConfig: MetricsBuilderConfig{ - Metrics: MetricsConfig{ - VcenterClusterCPUEffective: MetricConfig{Enabled: true}, - VcenterClusterCPULimit: MetricConfig{Enabled: true}, - VcenterClusterHostCount: MetricConfig{Enabled: true}, - VcenterClusterMemoryEffective: MetricConfig{Enabled: true}, - VcenterClusterMemoryLimit: MetricConfig{Enabled: true}, - VcenterClusterMemoryUsed: MetricConfig{Enabled: true}, - VcenterClusterVMCount: MetricConfig{Enabled: true}, - VcenterDatastoreDiskUsage: MetricConfig{Enabled: true}, - VcenterDatastoreDiskUtilization: MetricConfig{Enabled: true}, - VcenterHostCPUUsage: MetricConfig{Enabled: true}, - VcenterHostCPUUtilization: MetricConfig{Enabled: true}, - VcenterHostDiskLatencyAvg: MetricConfig{Enabled: true}, - VcenterHostDiskLatencyMax: MetricConfig{Enabled: true}, - VcenterHostDiskThroughput: MetricConfig{Enabled: true}, - VcenterHostMemoryUsage: MetricConfig{Enabled: true}, - VcenterHostMemoryUtilization: MetricConfig{Enabled: true}, - VcenterHostNetworkPacketCount: MetricConfig{Enabled: true}, - VcenterHostNetworkPacketErrors: MetricConfig{Enabled: true}, - VcenterHostNetworkThroughput: MetricConfig{Enabled: true}, - VcenterHostNetworkUsage: MetricConfig{Enabled: true}, - VcenterResourcePoolCPUShares: MetricConfig{Enabled: true}, - VcenterResourcePoolCPUUsage: MetricConfig{Enabled: true}, - VcenterResourcePoolMemoryShares: MetricConfig{Enabled: true}, - VcenterResourcePoolMemoryUsage: MetricConfig{Enabled: true}, - VcenterVMCPUUsage: MetricConfig{Enabled: true}, - VcenterVMCPUUtilization: MetricConfig{Enabled: true}, - VcenterVMDiskLatencyAvg: MetricConfig{Enabled: true}, - VcenterVMDiskLatencyMax: MetricConfig{Enabled: true}, - VcenterVMDiskThroughput: MetricConfig{Enabled: true}, - VcenterVMDiskUsage: MetricConfig{Enabled: true}, - VcenterVMDiskUtilization: MetricConfig{Enabled: true}, - VcenterVMMemoryBallooned: MetricConfig{Enabled: true}, - VcenterVMMemorySwapped: MetricConfig{Enabled: true}, - VcenterVMMemorySwappedSsd: MetricConfig{Enabled: true}, - VcenterVMMemoryUsage: MetricConfig{Enabled: true}, - VcenterVMMemoryUtilization: MetricConfig{Enabled: false}, - VcenterVMNetworkPacketCount: MetricConfig{Enabled: true}, - VcenterVMNetworkThroughput: MetricConfig{Enabled: true}, - VcenterVMNetworkUsage: MetricConfig{Enabled: true}, - }, - ResourceAttributes: ResourceAttributesConfig{ - VcenterClusterName: ResourceAttributeConfig{Enabled: true}, - VcenterDatastoreName: ResourceAttributeConfig{Enabled: true}, - VcenterHostName: ResourceAttributeConfig{Enabled: true}, - VcenterResourcePoolInventoryPath: ResourceAttributeConfig{Enabled: true}, - VcenterResourcePoolName: ResourceAttributeConfig{Enabled: true}, - VcenterVMID: ResourceAttributeConfig{Enabled: true}, - VcenterVMName: ResourceAttributeConfig{Enabled: true}, - }, - }, - DebugMetrics: otelcol.DefaultDebugMetricsArguments, - } -) - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + ScraperControllerArguments: otelcol.DefaultScraperControllerArguments, + } + args.MetricsBuilderConfig.SetToDefault() + args.DebugMetrics.SetToDefault() } // Convert implements receiver.Arguments. diff --git a/internal/component/otelcol/receiver/zipkin/zipkin.go b/internal/component/otelcol/receiver/zipkin/zipkin.go index 46c6d74b9c80..6cf15ac2a15f 100644 --- a/internal/component/otelcol/receiver/zipkin/zipkin.go +++ b/internal/component/otelcol/receiver/zipkin/zipkin.go @@ -39,17 +39,14 @@ type Arguments struct { var _ receiver.Arguments = Arguments{} -// DefaultArguments holds default settings for otelcol.receiver.zipkin. -var DefaultArguments = Arguments{ - HTTPServer: otelcol.HTTPServerArguments{ - Endpoint: "0.0.0.0:9411", - }, - DebugMetrics: otelcol.DefaultDebugMetricsArguments, -} - // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{ + HTTPServer: otelcol.HTTPServerArguments{ + Endpoint: "0.0.0.0:9411", + }, + } + args.DebugMetrics.SetToDefault() } // Convert implements receiver.Arguments. diff --git a/internal/component/prometheus/exporter/azure/azure.go b/internal/component/prometheus/exporter/azure/azure.go index 391e9c593233..3671cfa78325 100644 --- a/internal/component/prometheus/exporter/azure/azure.go +++ b/internal/component/prometheus/exporter/azure/azure.go @@ -41,21 +41,19 @@ type Arguments struct { Regions []string `river:"regions,attr,optional"` } -var DefaultArguments = Arguments{ - Timespan: "PT1M", - MetricNameTemplate: "azure_{type}_{metric}_{aggregation}_{unit}", - MetricHelpTemplate: "Azure metric {metric} for {type} with aggregation {aggregation} as {unit}", - IncludedResourceTags: []string{"owner"}, - AzureCloudEnvironment: "azurecloud", - // Dimensions do not always apply to all metrics for a service, which requires you to configure multiple exporters - // to fully monitor a service which is tedious. Turning off validation eliminates this complexity. The underlying - // sdk will only give back the dimensions which are valid for particular metrics. - ValidateDimensions: false, -} - // SetToDefault implements river.Defaulter. func (a *Arguments) SetToDefault() { - *a = DefaultArguments + *a = Arguments{ + Timespan: "PT1M", + MetricNameTemplate: "azure_{type}_{metric}_{aggregation}_{unit}", + MetricHelpTemplate: "Azure metric {metric} for {type} with aggregation {aggregation} as {unit}", + IncludedResourceTags: []string{"owner"}, + AzureCloudEnvironment: "azurecloud", + // Dimensions do not always apply to all metrics for a service, which requires you to configure multiple exporters + // to fully monitor a service which is tedious. Turning off validation eliminates this complexity. The underlying + // sdk will only give back the dimensions which are valid for particular metrics. + ValidateDimensions: false, + } } // Validate implements river.Validator. diff --git a/internal/component/prometheus/exporter/cadvisor/cadvisor.go b/internal/component/prometheus/exporter/cadvisor/cadvisor.go index ce6d1f1ca815..e28b4e1c7dab 100644 --- a/internal/component/prometheus/exporter/cadvisor/cadvisor.go +++ b/internal/component/prometheus/exporter/cadvisor/cadvisor.go @@ -26,30 +26,6 @@ func createExporter(opts component.Options, args component.Arguments, defaultIns return integrations.NewIntegrationWithInstanceKey(opts.Logger, a.Convert(), defaultInstanceKey) } -// DefaultArguments holds non-zero default options for Arguments when it is -// unmarshaled from river. -var DefaultArguments = Arguments{ - StoreContainerLabels: true, - AllowlistedContainerLabels: []string{""}, - EnvMetadataAllowlist: []string{""}, - RawCgroupPrefixAllowlist: []string{""}, - ResctrlInterval: 0, - StorageDuration: 2 * time.Minute, - - ContainerdHost: "/run/containerd/containerd.sock", - ContainerdNamespace: "k8s.io", - - // TODO(@tpaschalis) Do we need the default cert/key/ca since tls is disabled by default? - DockerHost: "unix:///var/run/docker.sock", - UseDockerTLS: false, - DockerTLSCert: "cert.pem", - DockerTLSKey: "key.pem", - DockerTLSCA: "ca.pem", - - DockerOnly: false, - DisableRootCgroupStats: false, -} - // Arguments configures the prometheus.exporter.cadvisor component. type Arguments struct { StoreContainerLabels bool `river:"store_container_labels,attr,optional"` @@ -74,7 +50,27 @@ type Arguments struct { // SetToDefault implements river.Defaulter. func (a *Arguments) SetToDefault() { - *a = DefaultArguments + *a = Arguments{ + StoreContainerLabels: true, + AllowlistedContainerLabels: []string{""}, + EnvMetadataAllowlist: []string{""}, + RawCgroupPrefixAllowlist: []string{""}, + ResctrlInterval: 0, + StorageDuration: 2 * time.Minute, + + ContainerdHost: "/run/containerd/containerd.sock", + ContainerdNamespace: "k8s.io", + + // TODO(@tpaschalis) Do we need the default cert/key/ca since tls is disabled by default? + DockerHost: "unix:///var/run/docker.sock", + UseDockerTLS: false, + DockerTLSCert: "cert.pem", + DockerTLSKey: "key.pem", + DockerTLSCA: "ca.pem", + + DockerOnly: false, + DisableRootCgroupStats: false, + } } // Convert returns the upstream-compatible configuration struct. diff --git a/internal/component/prometheus/exporter/self/self.go b/internal/component/prometheus/exporter/self/self.go index 3d0af166dfcb..32735c19e5ca 100644 --- a/internal/component/prometheus/exporter/self/self.go +++ b/internal/component/prometheus/exporter/self/self.go @@ -30,12 +30,9 @@ type Arguments struct{} // Exports holds the values exported by the prometheus.exporter.self component. type Exports struct{} -// DefaultArguments defines the default settings -var DefaultArguments = Arguments{} - // SetToDefault implements river.Defaulter func (args *Arguments) SetToDefault() { - *args = DefaultArguments + *args = Arguments{} } func (a *Arguments) Convert() *agent.Config { diff --git a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river index c609330be6c7..6fc587de7d74 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river +++ b/internal/converter/internal/staticconvert/testdata-v2/integrations_v2.river @@ -689,15 +689,13 @@ faro.receiver "integrations_app_agent_receiver" { max_allowed_payload_size = "4MiB786KiB832B" rate_limiting { - enabled = true rate = 100 burst_size = 50 } } sourcemaps { - download_from_origins = ["*"] - download_timeout = "1s" + download = false } output { diff --git a/internal/converter/internal/staticconvert/testdata-v2/unsupported.river b/internal/converter/internal/staticconvert/testdata-v2/unsupported.river index c9585a88c5dc..2ec9a53004a6 100644 --- a/internal/converter/internal/staticconvert/testdata-v2/unsupported.river +++ b/internal/converter/internal/staticconvert/testdata-v2/unsupported.river @@ -29,15 +29,13 @@ faro.receiver "integrations_app_agent_receiver" { max_allowed_payload_size = "4MiB786KiB832B" rate_limiting { - enabled = true rate = 100 burst_size = 50 } } sourcemaps { - download_from_origins = ["*"] - download_timeout = "1s" + download = false } output { diff --git a/internal/converter/internal/staticconvert/testdata/traces_multi.river b/internal/converter/internal/staticconvert/testdata/traces_multi.river index b7e4a4ea7615..acf5f44e0676 100644 --- a/internal/converter/internal/staticconvert/testdata/traces_multi.river +++ b/internal/converter/internal/staticconvert/testdata/traces_multi.river @@ -3,7 +3,9 @@ otelcol.receiver.otlp "trace_config_1_default" { include_metadata = true } - http { } + http { + include_metadata = true + } output { metrics = [] @@ -40,7 +42,9 @@ otelcol.receiver.otlp "trace_config_2_default" { include_metadata = true } - http { } + http { + include_metadata = true + } output { metrics = [] diff --git a/internal/converter/internal/staticconvert/testdata/unsupported.river b/internal/converter/internal/staticconvert/testdata/unsupported.river index 3e9a55630b65..cd5e525bef91 100644 --- a/internal/converter/internal/staticconvert/testdata/unsupported.river +++ b/internal/converter/internal/staticconvert/testdata/unsupported.river @@ -55,7 +55,9 @@ otelcol.receiver.otlp "default" { include_metadata = true } - http { } + http { + include_metadata = true + } output { metrics = [] From 514c541a0f4e0e097e9fda45934d7b19c03f1f46 Mon Sep 17 00:00:00 2001 From: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> Date: Thu, 21 Mar 2024 10:42:14 -0400 Subject: [PATCH 03/83] don't reuse pointers for the windows exporter defaults (#6744) Signed-off-by: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> --- .../windows/config_default_windows_test.go | 50 +++---- .../exporter/windows/config_windows.go | 132 +++++++++--------- 2 files changed, 90 insertions(+), 92 deletions(-) diff --git a/internal/component/prometheus/exporter/windows/config_default_windows_test.go b/internal/component/prometheus/exporter/windows/config_default_windows_test.go index 7242ac42e525..6cdd9cf60283 100644 --- a/internal/component/prometheus/exporter/windows/config_default_windows_test.go +++ b/internal/component/prometheus/exporter/windows/config_default_windows_test.go @@ -12,28 +12,30 @@ func TestRiverUnmarshalWithDefaultConfig(t *testing.T) { err := river.Unmarshal([]byte(""), &args) require.NoError(t, err) - require.Equal(t, DefaultArguments.EnabledCollectors, args.EnabledCollectors) - require.Equal(t, DefaultArguments.Dfsr.SourcesEnabled, args.Dfsr.SourcesEnabled) - require.Equal(t, DefaultArguments.Exchange.EnabledList, args.Exchange.EnabledList) - require.Equal(t, DefaultArguments.IIS.AppExclude, args.IIS.AppExclude) - require.Equal(t, DefaultArguments.IIS.AppInclude, args.IIS.AppInclude) - require.Equal(t, DefaultArguments.IIS.SiteExclude, args.IIS.SiteExclude) - require.Equal(t, DefaultArguments.IIS.SiteInclude, args.IIS.SiteInclude) - require.Equal(t, DefaultArguments.LogicalDisk.Exclude, args.LogicalDisk.Exclude) - require.Equal(t, DefaultArguments.LogicalDisk.Include, args.LogicalDisk.Include) - require.Equal(t, DefaultArguments.MSMQ.Where, args.MSMQ.Where) - require.Equal(t, DefaultArguments.MSSQL.EnabledClasses, args.MSSQL.EnabledClasses) - require.Equal(t, DefaultArguments.Network.Exclude, args.Network.Exclude) - require.Equal(t, DefaultArguments.Network.Include, args.Network.Include) - require.Equal(t, DefaultArguments.PhysicalDisk.Exclude, args.PhysicalDisk.Exclude) - require.Equal(t, DefaultArguments.PhysicalDisk.Include, args.PhysicalDisk.Include) - require.Equal(t, DefaultArguments.Process.Exclude, args.Process.Exclude) - require.Equal(t, DefaultArguments.Process.Include, args.Process.Include) - require.Equal(t, DefaultArguments.ScheduledTask.Exclude, args.ScheduledTask.Exclude) - require.Equal(t, DefaultArguments.ScheduledTask.Include, args.ScheduledTask.Include) - require.Equal(t, DefaultArguments.Service.UseApi, args.Service.UseApi) - require.Equal(t, DefaultArguments.Service.Where, args.Service.Where) - require.Equal(t, DefaultArguments.SMTP.Exclude, args.SMTP.Exclude) - require.Equal(t, DefaultArguments.SMTP.Include, args.SMTP.Include) - require.Equal(t, DefaultArguments.TextFile.TextFileDirectory, args.TextFile.TextFileDirectory) + var defaultArgs Arguments + defaultArgs.SetToDefault() + require.Equal(t, defaultArgs.EnabledCollectors, args.EnabledCollectors) + require.Equal(t, defaultArgs.Dfsr.SourcesEnabled, args.Dfsr.SourcesEnabled) + require.Equal(t, defaultArgs.Exchange.EnabledList, args.Exchange.EnabledList) + require.Equal(t, defaultArgs.IIS.AppExclude, args.IIS.AppExclude) + require.Equal(t, defaultArgs.IIS.AppInclude, args.IIS.AppInclude) + require.Equal(t, defaultArgs.IIS.SiteExclude, args.IIS.SiteExclude) + require.Equal(t, defaultArgs.IIS.SiteInclude, args.IIS.SiteInclude) + require.Equal(t, defaultArgs.LogicalDisk.Exclude, args.LogicalDisk.Exclude) + require.Equal(t, defaultArgs.LogicalDisk.Include, args.LogicalDisk.Include) + require.Equal(t, defaultArgs.MSMQ.Where, args.MSMQ.Where) + require.Equal(t, defaultArgs.MSSQL.EnabledClasses, args.MSSQL.EnabledClasses) + require.Equal(t, defaultArgs.Network.Exclude, args.Network.Exclude) + require.Equal(t, defaultArgs.Network.Include, args.Network.Include) + require.Equal(t, defaultArgs.PhysicalDisk.Exclude, args.PhysicalDisk.Exclude) + require.Equal(t, defaultArgs.PhysicalDisk.Include, args.PhysicalDisk.Include) + require.Equal(t, defaultArgs.Process.Exclude, args.Process.Exclude) + require.Equal(t, defaultArgs.Process.Include, args.Process.Include) + require.Equal(t, defaultArgs.ScheduledTask.Exclude, args.ScheduledTask.Exclude) + require.Equal(t, defaultArgs.ScheduledTask.Include, args.ScheduledTask.Include) + require.Equal(t, defaultArgs.Service.UseApi, args.Service.UseApi) + require.Equal(t, defaultArgs.Service.Where, args.Service.Where) + require.Equal(t, defaultArgs.SMTP.Exclude, args.SMTP.Exclude) + require.Equal(t, defaultArgs.SMTP.Include, args.SMTP.Include) + require.Equal(t, defaultArgs.TextFile.TextFileDirectory, args.TextFile.TextFileDirectory) } diff --git a/internal/component/prometheus/exporter/windows/config_windows.go b/internal/component/prometheus/exporter/windows/config_windows.go index 773e3f5bfc0e..d1e138b9b520 100644 --- a/internal/component/prometheus/exporter/windows/config_windows.go +++ b/internal/component/prometheus/exporter/windows/config_windows.go @@ -7,74 +7,70 @@ import ( col "github.com/prometheus-community/windows_exporter/pkg/collector" ) -// DefaultArguments holds non-zero default options for Arguments when it is -// unmarshaled from YAML. -var DefaultArguments = Arguments{ - EnabledCollectors: strings.Split(windows_integration.DefaultConfig.EnabledCollectors, ","), - Dfsr: DfsrConfig{ - SourcesEnabled: strings.Split(col.ConfigDefaults.Dfsr.DfsrEnabledCollectors, ","), - }, - Exchange: ExchangeConfig{ - EnabledList: strings.Split(col.ConfigDefaults.Exchange.CollectorsEnabled, ","), - }, - IIS: IISConfig{ - AppBlackList: col.ConfigDefaults.Iis.AppExclude, - AppWhiteList: col.ConfigDefaults.Iis.AppInclude, - SiteBlackList: col.ConfigDefaults.Iis.SiteExclude, - SiteWhiteList: col.ConfigDefaults.Iis.SiteInclude, - AppInclude: col.ConfigDefaults.Iis.AppInclude, - AppExclude: col.ConfigDefaults.Iis.AppExclude, - SiteInclude: col.ConfigDefaults.Iis.SiteInclude, - SiteExclude: col.ConfigDefaults.Iis.SiteExclude, - }, - LogicalDisk: LogicalDiskConfig{ - BlackList: col.ConfigDefaults.LogicalDisk.VolumeExclude, - WhiteList: col.ConfigDefaults.LogicalDisk.VolumeInclude, - Include: col.ConfigDefaults.LogicalDisk.VolumeInclude, - Exclude: col.ConfigDefaults.LogicalDisk.VolumeExclude, - }, - MSMQ: MSMQConfig{ - Where: col.ConfigDefaults.Msmq.QueryWhereClause, - }, - MSSQL: MSSQLConfig{ - EnabledClasses: strings.Split(col.ConfigDefaults.Mssql.EnabledCollectors, ","), - }, - Network: NetworkConfig{ - BlackList: col.ConfigDefaults.Net.NicExclude, - WhiteList: col.ConfigDefaults.Net.NicInclude, - Include: col.ConfigDefaults.Net.NicInclude, - Exclude: col.ConfigDefaults.Net.NicExclude, - }, - PhysicalDisk: PhysicalDiskConfig{ - Exclude: col.ConfigDefaults.PhysicalDisk.DiskExclude, - Include: col.ConfigDefaults.PhysicalDisk.DiskInclude, - }, - Process: ProcessConfig{ - BlackList: col.ConfigDefaults.Process.ProcessExclude, - WhiteList: col.ConfigDefaults.Process.ProcessInclude, - Include: col.ConfigDefaults.Process.ProcessInclude, - Exclude: col.ConfigDefaults.Process.ProcessExclude, - }, - ScheduledTask: ScheduledTaskConfig{ - Include: col.ConfigDefaults.ScheduledTask.TaskInclude, - Exclude: col.ConfigDefaults.ScheduledTask.TaskExclude, - }, - Service: ServiceConfig{ - UseApi: "false", - Where: col.ConfigDefaults.Service.ServiceWhereClause, - }, - SMTP: SMTPConfig{ - BlackList: col.ConfigDefaults.Smtp.ServerExclude, - WhiteList: col.ConfigDefaults.Smtp.ServerInclude, - Include: col.ConfigDefaults.Smtp.ServerInclude, - Exclude: col.ConfigDefaults.Smtp.ServerExclude, - }, - TextFile: TextFileConfig{ - TextFileDirectory: col.ConfigDefaults.Textfile.TextFileDirectories, - }, -} - // SetToDefault implements river.Defaulter. func (a *Arguments) SetToDefault() { - *a = DefaultArguments + *a = Arguments{ + EnabledCollectors: strings.Split(windows_integration.DefaultConfig.EnabledCollectors, ","), + Dfsr: DfsrConfig{ + SourcesEnabled: strings.Split(col.ConfigDefaults.Dfsr.DfsrEnabledCollectors, ","), + }, + Exchange: ExchangeConfig{ + EnabledList: strings.Split(col.ConfigDefaults.Exchange.CollectorsEnabled, ","), + }, + IIS: IISConfig{ + AppBlackList: col.ConfigDefaults.Iis.AppExclude, + AppWhiteList: col.ConfigDefaults.Iis.AppInclude, + SiteBlackList: col.ConfigDefaults.Iis.SiteExclude, + SiteWhiteList: col.ConfigDefaults.Iis.SiteInclude, + AppInclude: col.ConfigDefaults.Iis.AppInclude, + AppExclude: col.ConfigDefaults.Iis.AppExclude, + SiteInclude: col.ConfigDefaults.Iis.SiteInclude, + SiteExclude: col.ConfigDefaults.Iis.SiteExclude, + }, + LogicalDisk: LogicalDiskConfig{ + BlackList: col.ConfigDefaults.LogicalDisk.VolumeExclude, + WhiteList: col.ConfigDefaults.LogicalDisk.VolumeInclude, + Include: col.ConfigDefaults.LogicalDisk.VolumeInclude, + Exclude: col.ConfigDefaults.LogicalDisk.VolumeExclude, + }, + MSMQ: MSMQConfig{ + Where: col.ConfigDefaults.Msmq.QueryWhereClause, + }, + MSSQL: MSSQLConfig{ + EnabledClasses: strings.Split(col.ConfigDefaults.Mssql.EnabledCollectors, ","), + }, + Network: NetworkConfig{ + BlackList: col.ConfigDefaults.Net.NicExclude, + WhiteList: col.ConfigDefaults.Net.NicInclude, + Include: col.ConfigDefaults.Net.NicInclude, + Exclude: col.ConfigDefaults.Net.NicExclude, + }, + PhysicalDisk: PhysicalDiskConfig{ + Exclude: col.ConfigDefaults.PhysicalDisk.DiskExclude, + Include: col.ConfigDefaults.PhysicalDisk.DiskInclude, + }, + Process: ProcessConfig{ + BlackList: col.ConfigDefaults.Process.ProcessExclude, + WhiteList: col.ConfigDefaults.Process.ProcessInclude, + Include: col.ConfigDefaults.Process.ProcessInclude, + Exclude: col.ConfigDefaults.Process.ProcessExclude, + }, + ScheduledTask: ScheduledTaskConfig{ + Include: col.ConfigDefaults.ScheduledTask.TaskInclude, + Exclude: col.ConfigDefaults.ScheduledTask.TaskExclude, + }, + Service: ServiceConfig{ + UseApi: "false", + Where: col.ConfigDefaults.Service.ServiceWhereClause, + }, + SMTP: SMTPConfig{ + BlackList: col.ConfigDefaults.Smtp.ServerExclude, + WhiteList: col.ConfigDefaults.Smtp.ServerInclude, + Include: col.ConfigDefaults.Smtp.ServerInclude, + Exclude: col.ConfigDefaults.Smtp.ServerExclude, + }, + TextFile: TextFileConfig{ + TextFileDirectory: col.ConfigDefaults.Textfile.TextFileDirectories, + }, + } } From 6554bf60a71525e2bc11446066d6a137fff37c96 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Thu, 21 Mar 2024 15:34:26 -0400 Subject: [PATCH 04/83] docs: exempt converters from backwards compatibility guarantees (#6747) Converter code is not cheap to maintain, and in some cases can bring in many dependencies that are otherwise unused, making project maintainance more difficult. This change explicitly marks converter code as exempt from backwards compatibility guarntees, allowing its removal between minor versions if a converter is no longer needed and maintainers don't want to wait for the next major release to remove it. The RFC for backwards compatibility guarantees is also updated to make it clear that some functionality may opt out of backwards compatibility guarantees, which includes the `convert` and `tools` commands. --- docs/rfcs/0008-backwards-compatibility.md | 2 ++ docs/sources/flow/reference/cli/convert.md | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/rfcs/0008-backwards-compatibility.md b/docs/rfcs/0008-backwards-compatibility.md index 56d4bac647a3..28d4c23c2e50 100644 --- a/docs/rfcs/0008-backwards-compatibility.md +++ b/docs/rfcs/0008-backwards-compatibility.md @@ -63,6 +63,8 @@ It's impossible to guarantee that full backwards compatibility is achieved. Ther - Other telemetry data: metrics, logs, and traces may change between releases. Only telemetry data which is used in official dashboards is protected under backwards compatibility. +- Tagged as exempt: functionality which is explicitly marked as exempt from backwards compatibility guarantees may include breaking changes or removal between minor releases. + ### Avoiding major release burnout  As a new major release implies a user must put extra effort into upgrading, it is possible to burn out users by releasing breaking changes too frequently.  diff --git a/docs/sources/flow/reference/cli/convert.md b/docs/sources/flow/reference/cli/convert.md index 3b44d662e87a..793acfb2031a 100644 --- a/docs/sources/flow/reference/cli/convert.md +++ b/docs/sources/flow/reference/cli/convert.md @@ -17,6 +17,10 @@ weight: 100 The `convert` command converts a supported configuration format to {{< param "PRODUCT_NAME" >}} River format. +{{< admonition type="caution" >}} +This command has no backward compatibility guarantees and may change or be removed between releases. +{{< /admonition >}} + ## Usage Usage: @@ -113,4 +117,4 @@ flags with a space between each flag, for example `--extra-args="-enable-feature If you have unsupported features in a Static mode source configuration, you will receive [errors][] when you convert to a Flow mode configuration. The converter will also raise warnings for configuration options that may require your attention. -Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-static/" >}}) for a detailed migration guide. \ No newline at end of file +Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-static/" >}}) for a detailed migration guide. From 60fdd26c5f9c02a2c67b518d80eb3a479f4b3c21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Tr=E1=BB=8Dng=20H=E1=BA=A3i?= <41283691+hainenber@users.noreply.github.com> Date: Fri, 22 Mar 2024 02:39:22 +0700 Subject: [PATCH 05/83] feat(exporter/otelcol): support converting `vcenterreceiver` (#6714) * feat(otelcol/exporter): support converting `vcenterreceiver` Signed-off-by: hainenber * fix(converter/otelcol): add converter to `MetricsBuilderConfig` attribute + use helper for converting TLS setting Signed-off-by: hainenber --------- Signed-off-by: hainenber Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --- .../converter_vcenterreceiver.go | 145 ++++++++++++++++++ .../testdata/vcenterreceiver.river | 28 ++++ .../testdata/vcenterreceiver.yaml | 24 +++ 3 files changed, 197 insertions(+) create mode 100644 internal/converter/internal/otelcolconvert/converter_vcenterreceiver.go create mode 100644 internal/converter/internal/otelcolconvert/testdata/vcenterreceiver.river create mode 100644 internal/converter/internal/otelcolconvert/testdata/vcenterreceiver.yaml diff --git a/internal/converter/internal/otelcolconvert/converter_vcenterreceiver.go b/internal/converter/internal/otelcolconvert/converter_vcenterreceiver.go new file mode 100644 index 000000000000..b4b791d8ffbb --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_vcenterreceiver.go @@ -0,0 +1,145 @@ +package otelcolconvert + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/agent/internal/component/otelcol/receiver/vcenter" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/river/rivertypes" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, vcenterReceiverConverter{}) +} + +type vcenterReceiverConverter struct{} + +func (vcenterReceiverConverter) Factory() component.Factory { return vcenterreceiver.NewFactory() } + +func (vcenterReceiverConverter) InputComponentName() string { return "" } + +func (vcenterReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toVcenterReceiver(state, id, cfg.(*vcenterreceiver.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "receiver", "vcenter"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toVcenterReceiver(state *state, id component.InstanceID, cfg *vcenterreceiver.Config) *vcenter.Arguments { + var ( + nextMetrics = state.Next(id, component.DataTypeMetrics) + nextTraces = state.Next(id, component.DataTypeTraces) + ) + + return &vcenter.Arguments{ + Endpoint: cfg.Endpoint, + Username: cfg.Username, + Password: rivertypes.Secret(cfg.Password), + + DebugMetrics: common.DefaultValue[vcenter.Arguments]().DebugMetrics, + + MetricsBuilderConfig: toMetricsBuildConfig(encodeMapstruct(cfg.MetricsBuilderConfig)), + + ScraperControllerArguments: otelcol.ScraperControllerArguments{ + CollectionInterval: cfg.CollectionInterval, + InitialDelay: cfg.InitialDelay, + Timeout: cfg.Timeout, + }, + + TLS: toTLSClientArguments(cfg.TLSClientSetting), + + Output: &otelcol.ConsumerArguments{ + Metrics: toTokenizedConsumers(nextMetrics), + Traces: toTokenizedConsumers(nextTraces), + }, + } +} + +func toMetricsBuildConfig(cfg map[string]any) vcenter.MetricsBuilderConfig { + return vcenter.MetricsBuilderConfig{ + Metrics: toVcenterMetricsConfig(encodeMapstruct(cfg["metrics"])), + ResourceAttributes: toVcenterResourceAttributesConfig(encodeMapstruct(cfg["resource_attributes"])), + } +} + +func toVcenterMetricConfig(cfg map[string]any) vcenter.MetricConfig { + return vcenter.MetricConfig{ + Enabled: cfg["enabled"].(bool), + } +} + +func toVcenterMetricsConfig(cfg map[string]any) vcenter.MetricsConfig { + return vcenter.MetricsConfig{ + VcenterClusterCPUEffective: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.cluster.cpu.effective"])), + VcenterClusterCPULimit: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.cluster.cpu.limit"])), + VcenterClusterHostCount: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.cluster.host.count"])), + VcenterClusterMemoryEffective: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.cluster.memory.effective"])), + VcenterClusterMemoryLimit: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.cluster.memory.limit"])), + VcenterClusterMemoryUsed: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.cluster.memory.used"])), + VcenterClusterVMCount: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.cluster.vm.count"])), + VcenterDatastoreDiskUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.datastore.disk.usage"])), + VcenterDatastoreDiskUtilization: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.datastore.disk.utilization"])), + VcenterHostCPUUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.cpu.usage"])), + VcenterHostCPUUtilization: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.cpu.utilization"])), + VcenterHostDiskLatencyAvg: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.disk.latency.avg"])), + VcenterHostDiskLatencyMax: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.disk.latency.max"])), + VcenterHostDiskThroughput: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.disk.throughput"])), + VcenterHostMemoryUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.memory.usage"])), + VcenterHostMemoryUtilization: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.memory.utilization"])), + VcenterHostNetworkPacketCount: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.network.packet.count"])), + VcenterHostNetworkPacketErrors: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.network.packet.errors"])), + VcenterHostNetworkThroughput: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.network.throughput"])), + VcenterHostNetworkUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.host.network.usage"])), + VcenterResourcePoolCPUShares: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.resource_pool.cpu.shares"])), + VcenterResourcePoolCPUUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.resource_pool.cpu.usage"])), + VcenterResourcePoolMemoryShares: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.resource_pool.memory.shares"])), + VcenterResourcePoolMemoryUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.resource_pool.memory.usage"])), + VcenterVMCPUUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.cpu.usage"])), + VcenterVMCPUUtilization: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.cpu.utilization"])), + VcenterVMDiskLatencyAvg: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.disk.latency.avg"])), + VcenterVMDiskLatencyMax: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.disk.latency.max"])), + VcenterVMDiskThroughput: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.disk.throughput"])), + VcenterVMDiskUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.disk.usage"])), + VcenterVMDiskUtilization: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.disk.utilization"])), + VcenterVMMemoryBallooned: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.memory.ballooned"])), + VcenterVMMemorySwapped: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.memory.swapped"])), + VcenterVMMemorySwappedSsd: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.memory.swapped_ssd"])), + VcenterVMMemoryUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.memory.usage"])), + VcenterVMMemoryUtilization: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.memory.utilization"])), + VcenterVMNetworkPacketCount: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.network.packet.count"])), + VcenterVMNetworkThroughput: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.network.throughput"])), + VcenterVMNetworkUsage: toVcenterMetricConfig(encodeMapstruct(cfg["vcenter.vm.network.usage"])), + } +} + +func toVcenterResourceAttributesConfig(cfg map[string]any) vcenter.ResourceAttributesConfig { + return vcenter.ResourceAttributesConfig{ + VcenterClusterName: toVcenterResourceAttributeConfig(encodeMapstruct(cfg["vcenter.cluster.name"])), + VcenterDatastoreName: toVcenterResourceAttributeConfig(encodeMapstruct(cfg["vcenter.datastore.name"])), + VcenterHostName: toVcenterResourceAttributeConfig(encodeMapstruct(cfg["vcenter.host.name"])), + VcenterResourcePoolInventoryPath: toVcenterResourceAttributeConfig(encodeMapstruct(cfg["vcenter.resource_pool.inventory_path"])), + VcenterResourcePoolName: toVcenterResourceAttributeConfig(encodeMapstruct(cfg["vcenter.resource_pool.name"])), + VcenterVMID: toVcenterResourceAttributeConfig(encodeMapstruct(cfg["vcenter.vm.id"])), + VcenterVMName: toVcenterResourceAttributeConfig(encodeMapstruct(cfg["vcenter.vm.name"])), + } +} + +func toVcenterResourceAttributeConfig(cfg map[string]any) vcenter.ResourceAttributeConfig { + return vcenter.ResourceAttributeConfig{ + Enabled: cfg["enabled"].(bool), + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/vcenterreceiver.river b/internal/converter/internal/otelcolconvert/testdata/vcenterreceiver.river new file mode 100644 index 000000000000..905459d7c6c8 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/vcenterreceiver.river @@ -0,0 +1,28 @@ +otelcol.receiver.vcenter "default" { + endpoint = "http://localhost:15672" + username = "otelu" + password = "abc" + + metrics { + vcenter.host.cpu.utilization { + enabled = false + } + } + + resource_attributes { + vcenter.cluster.name { + enabled = false + } + } + + output { + metrics = [] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/vcenterreceiver.yaml b/internal/converter/internal/otelcolconvert/testdata/vcenterreceiver.yaml new file mode 100644 index 000000000000..d0c37fb707b9 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/vcenterreceiver.yaml @@ -0,0 +1,24 @@ +receivers: + vcenter: + endpoint: http://localhost:15672 + username: otelu + password: "abc" + collection_interval: 1m + initial_delay: 1s + metrics: + vcenter.host.cpu.utilization: + enabled: false + resource_attributes: + vcenter.cluster.name: + enabled: false + +exporters: + otlp: + endpoint: database:4317 + +service: + pipelines: + traces: + receivers: [vcenter] + processors: [] + exporters: [otlp] From da363b7d243a8bf2373fe5d08a9a18f0734362ef Mon Sep 17 00:00:00 2001 From: Paulin Todev Date: Fri, 22 Mar 2024 09:34:52 +0000 Subject: [PATCH 06/83] Upgrade OTel from v0.87.0 to v0.96.0 (#6725) * Upgrade OTel from v0.87.0 to v0.96.0 * Fixes for linter and unit tests * Use the grafana fork of the Collector * Fix issues with converters --- CHANGELOG.md | 103 +++ docs/sources/_index.md | 2 +- docs/sources/_index.md.t | 2 +- .../otelcol.connector.servicegraph.md | 3 + .../otelcol.connector.spanmetrics.md | 69 +- .../components/otelcol.exporter.otlp.md | 7 + .../components/otelcol.exporter.otlphttp.md | 37 +- ...telcol.extension.jaeger_remote_sampling.md | 12 +- .../components/otelcol.processor.filter.md | 5 +- .../otelcol.processor.resourcedetection.md | 35 +- .../otelcol.processor.tail_sampling.md | 12 +- .../components/otelcol.processor.transform.md | 7 +- .../components/otelcol.receiver.kafka.md | 3 +- .../components/otelcol.receiver.otlp.md | 16 +- .../components/otelcol.receiver.vcenter.md | 2 +- .../components/otelcol-tls-config-block.md | 35 +- .../static/configuration/traces-config.md | 9 +- go.mod | 378 ++++---- go.sum | 809 +++++++++--------- internal/component/otelcol/auth/auth.go | 4 +- .../component/otelcol/config_compression.go | 18 +- internal/component/otelcol/config_grpc.go | 10 +- internal/component/otelcol/config_http.go | 35 +- internal/component/otelcol/config_retry.go | 6 +- internal/component/otelcol/config_tls.go | 40 +- .../component/otelcol/connector/connector.go | 4 +- .../connector/servicegraph/servicegraph.go | 14 +- .../servicegraph/servicegraph_test.go | 14 +- .../connector/spanmetrics/spanmetrics.go | 42 +- .../connector/spanmetrics/spanmetrics_test.go | 58 +- .../otelcol/connector/spanmetrics/types.go | 27 +- .../component/otelcol/exporter/exporter.go | 4 +- .../exporter/loadbalancing/loadbalancing.go | 10 +- .../loadbalancing/loadbalancing_test.go | 15 +- .../component/otelcol/exporter/otlp/otlp.go | 6 +- .../otelcol/exporter/otlphttp/otlphttp.go | 38 +- .../component/otelcol/extension/extension.go | 4 +- .../internal/jaegerremotesampling/config.go | 26 +- .../jaegerremotesampling/config_test.go | 43 +- .../jaegerremotesampling/extension.go | 30 +- .../jaegerremotesampling/extension_test.go | 23 +- .../internal/jaegerremotesampling/factory.go | 57 +- .../jaegerremotesampling/factory_test.go | 17 +- .../jaegerremotesampling/internal/grpc.go | 25 +- .../internal/grpc_test.go | 27 +- .../jaegerremotesampling/internal/http.go | 28 +- .../internal/http_test.go | 39 +- .../internal/internal_test.go | 13 +- .../internal/metadata/generated_status.go | 25 + .../internal/remote_strategy_cache.go | 13 +- .../internal/remote_strategy_cache_test.go | 13 +- .../internal/remote_strategy_store.go | 15 +- .../jaeger_remote_sampling.go | 4 +- .../otelcol/processor/filter/filter_test.go | 2 +- .../component/otelcol/processor/processor.go | 4 +- .../internal/aws/ecs/config.go | 3 + .../internal/aws/eks/config.go | 15 +- .../internal/azure/aks/config.go | 15 +- .../internal/system/config.go | 6 + .../resourcedetection_test.go | 22 +- .../otelcol/processor/tail_sampling/types.go | 13 +- .../otelcol/receiver/jaeger/jaeger.go | 4 +- .../component/otelcol/receiver/kafka/kafka.go | 3 + .../otelcol/receiver/opencensus/opencensus.go | 4 +- .../component/otelcol/receiver/otlp/otlp.go | 8 +- .../internal/staleness_end_to_end_test.go | 2 +- .../otelcol/receiver/prometheus/prometheus.go | 4 +- .../component/otelcol/receiver/receiver.go | 4 +- .../otelcol/receiver/zipkin/zipkin.go | 4 +- .../otelcol/receiver/zipkin/zipkin_test.go | 8 +- .../converter_jaegerreceiver.go | 4 +- ...converter_jaegerremotesamplingextension.go | 8 +- .../otelcolconvert/converter_kafkareceiver.go | 2 + .../converter_loadbalancingexporter.go | 4 +- .../converter_opencensusreceiver.go | 2 +- .../otelcolconvert/converter_otlpexporter.go | 11 +- .../converter_otlphttpexporter.go | 27 +- .../otelcolconvert/converter_otlpreceiver.go | 29 +- .../converter_spanmetricsconnector.go | 21 +- .../converter_tailsamplingprocessor.go | 3 +- .../converter_zipkinreceiver.go | 2 +- .../internal/otelcolconvert/otelcolconvert.go | 2 +- .../otelcolconvert/testdata/bearertoken.river | 5 +- .../otelcolconvert/testdata/kafka.river | 1 + .../testdata/memorylimiter.river | 3 +- .../testdata/memorylimiter.yaml | 1 + .../otelcolconvert/testdata/oauth2.river | 3 +- .../otelcolconvert/testdata/otlphttp.river | 3 +- internal/flow/tracing/tracing.go | 2 +- internal/static/traces/config.go | 48 +- internal/static/traces/config_test.go | 19 + internal/static/traces/instance.go | 20 +- .../servicegraphprocessor/processor_test.go | 4 +- internal/static/traces/traceutils/server.go | 13 +- internal/util/otel_feature_gate.go | 26 +- internal/util/otel_feature_gate_test.go | 3 - internal/util/testappender/compare.go | 6 +- 97 files changed, 1448 insertions(+), 1238 deletions(-) create mode 100644 internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/metadata/generated_status.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c97705997ac..cc3d6b73637a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,109 @@ v0.40.3 (2024-03-14) - Upgrade to Go 1.22.1 (@thampiotr) +- Upgrade from OpenTelemetry Collector v0.87.0 to v0.96.0: + * [ottl]: Fix bug where named parameters needed a space after the equal sign (`=`) +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28511 + * [exporters] Additional enqueue_failed metrics +https://github.com/open-telemetry/opentelemetry-collector/issues/8673 + * [otelcol.receiver.kafka]: Fix issue where counting number of logs emitted could cause panic + * [otelcol.processor.k8sattributes]: The time format of k8s.pod.start_time attribute value migrated to RFC3339: +Before: 2023-07-10 12:34:39.740638 -0700 PDT m=+0.020184946 +After: 2023-07-10T12:39:53.112485-07:00 +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28817 + * [otelcol.processor.tail_sampling] A new `upper_threshold_ms` argument for the `latency` policy. +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/26115 + * [otelcol.connector.spanmetrics] Add a new `events` metric. +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27451 + * [otelcol.connector.spanmetrics] A new `max_per_data_point` argument for exemplar generation. + * https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/22620 + * [ottl] Add IsBool Converter +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27897 + * [otelcol.processor.tail_sampling] Optimize memory performance of tailsamplingprocessor +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/27889 + * [otelcol.connector.servicegraph] Add a `metrics_flush_interval` argument. +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27679 + * [ottl] Add IsDouble Converter +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27895 + * [ottl] Add new `silent` ErrorMode +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/29710 + * [otelcol.connector.spanmetrics] A new `resource_metrics_cache_size` argument. +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27654 + * [ottl] Add IsInt Converter +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27894 + * [ottl] Validate that all path elements are used +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30042 + * [ottl] Validate Keys are used +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30162 + * [otelcol.receiver.vcenter] Add statement of support for version 8 of ESXi and vCenter +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30274 + * [ottl] Add Hour converter +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29468 + * [otelcol.connector.spanmetrics] A new `resource_metrics_key_attributes` argument to fix broken spanmetrics counters + after a span producing service restart, when resource attributes contain dynamic/ephemeral values (e.g. process id). +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/29711 + * [ottl] Issue with the hash value of a match group in the replace_pattern editors +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29409 + * [ottl] Fix bug where IsBool wasn't usable +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30151 + * [ottl] Add flatten function +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30455 + * [ottl] Fix bugs with parsing of string escapes in OTTL +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/23238 + * [ottl]: Add functions for parsing CSV +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30921 + * [ottl] Allow users to specify the format of the hashed replacement string in the `replace_pattern` editors +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27820 + * [ottl] Add ParseKeyValue function +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30998 + * [otelcol.receiver.opencensus] Fix memory leak on shutdown +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/31152 + * [otelcol.processor.memory_limiter] Fix leaking goroutine +https://github.com/open-telemetry/opentelemetry-collector/issues/9099 + * Additional `http2_read_idle_timeout` and `http2_ping_timeout` arguments for HTTP clients +https://github.com/open-telemetry/opentelemetry-collector/pull/9022 + * [otelcol.auth.bearer] Fix for "401 Unauthorized" on HTTP connections +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/24656 +* Update to OTLP 1.1 +https://github.com/open-telemetry/opentelemetry-collector/pull/9588 + * [otelcol.auth.basic] Accept empty usernames. +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30470 + * [exporters] Do not re-enqueue failed batches, rely on the `retry_on_failure` strategy instead. +https://github.com/open-telemetry/opentelemetry-collector/issues/8382 + * [otelcol.exporter.otlphttp] A `Host` header is added automatically. +https://github.com/open-telemetry/opentelemetry-collector/issues/9395 + * [exporters] PartialSuccess is treated as success, logged as warning. +https://github.com/open-telemetry/opentelemetry-collector/issues/9243 + * [otelcol.exporter.otlphttp] Supports JSON encoding through an additional `encoding` argument. +https://github.com/open-telemetry/opentelemetry-collector/issues/6945 + * [exporters] A new `include_system_ca_certs_pool` argument for TLS config. +https://github.com/open-telemetry/opentelemetry-collector/issues/7774 + * [otelcol.receiver.vcenter] The receiver emits vCenter performance metrics with object metric label dimension. +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/30615 + * [otelcol.processor.transform] Add copy_metric function +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30846 + * [otelcol.exporter.loadbalancing] Optimized CPU performance +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30141 + * [otelcol.processor.k8sattributes] Set attributes from namespace/node labels or annotations even if node/namespaces attribute are not set. +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/28837 + * [otelcol.receiver.kafka] An additional `resolve_canonical_bootstrap_servers_only` argument +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/26022 + * [otelcol.receiver.kafka] Add Azure Resource Log Support +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/18210 + * [otelcol.processor.resourcedetection] Add a `k8s.cluster.name` resource attribute for AKS and EKS. +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/26794 + * [otelcol.processor.resourcedetection] Add detection of `host.ip` to system detector. +https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/24450 + * [otelcol.processor.resourcedetection] Add detection of `host.mac` to system detector. +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29587 + * [otelcol.processor.resourcedetection] Change type of `host.cpu.model.id` and `host.cpu.model.family` to string. +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29025 + * [otelcol.processor.resourcedetection] Add a `aws.ecs.task.id` attribute +https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8274 + * [otelcol.exporter.otlp] Additional RPC debug metrics such as `rpc_client_duration_milliseconds`. + * [otelcol.receiver.otlp] Additional RPC debug metrics such as `rpc_server_duration_milliseconds`. + + v0.40.2 (2024-03-05) -------------------- diff --git a/docs/sources/_index.md b/docs/sources/_index.md index c83771319829..c1579be48469 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -10,7 +10,7 @@ description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry weight: 350 cascade: AGENT_RELEASE: v0.40.3 - OTEL_VERSION: v0.87.0 + OTEL_VERSION: v0.96.0 --- # Grafana Agent diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index f4c9bfc9eb0d..af7244fff0d9 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -10,7 +10,7 @@ description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry weight: 350 cascade: AGENT_RELEASE: $AGENT_VERSION - OTEL_VERSION: v0.87.0 + OTEL_VERSION: v0.96.0 --- # Grafana Agent diff --git a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md b/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md index 06f20833f0e2..a37a300111e7 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md +++ b/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md @@ -65,6 +65,7 @@ Name | Type | Description | Default | Required `dimensions` | `list(string)` | A list of dimensions to add with the default dimensions. | `[]` | no `cache_loop` | `duration` | Configures how often to delete series which have not been updated. | `"1m"` | no `store_expiration_loop` | `duration` | The time to expire old entries from the store periodically. | `"2s"` | no +`metrics_flush_interval` | `duration` | The interval at which metrics are flushed to downstream components. | `"0s"` | no Service graphs work by inspecting traces and looking for spans with parent-children relationship that represent a request. @@ -113,6 +114,8 @@ Additional labels can be included using the `dimensions` configuration option: * Firstly the resource attributes will be searched. If the attribute is not found, the span attributes will be searched. +When `metrics_flush_interval` is set to `0s`, metrics will be flushed on every received batch of traces. + [Span Kind]: https://opentelemetry.io/docs/concepts/signals/traces/#span-kind ## Blocks diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md index ffc5f408cc59..2ef2ff67ab39 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md @@ -67,13 +67,15 @@ otelcol.connector.spanmetrics "LABEL" { `otelcol.connector.spanmetrics` supports the following arguments: -| Name | Type | Description | Default | Required | -| ------------------------- | -------------- | --------------------------------------------------------------------- | -------------- | -------- | -| `dimensions_cache_size` | `number` | How many dimensions to cache. | `1000` | no | -| `aggregation_temporality` | `string` | Configures whether to reset the metrics after flushing. | `"CUMULATIVE"` | no | -| `metrics_flush_interval` | `duration` | How often to flush generated metrics. | `"15s"` | no | -| `namespace` | `string` | Metric namespace. | `""` | no | -| `exclude_dimensions` | `list(string)` | List of dimensions to be excluded from the default set of dimensions. | `false` | no | +| Name | Type | Description | Default | Required | +| --------------------------------- | -------------- | ------------------------------------------------------------------------------------------ | -------------- | -------- | +| `aggregation_temporality` | `string` | Configures whether to reset the metrics after flushing. | `"CUMULATIVE"` | no | +| `dimensions_cache_size` | `number` | How many dimensions to cache. | `1000` | no | +| `exclude_dimensions` | `list(string)` | List of dimensions to be excluded from the default set of dimensions. | `[]` | no | +| `metrics_flush_interval` | `duration` | How often to flush generated metrics. | `"15s"` | no | +| `namespace` | `string` | Metric namespace. | `""` | no | +| `resource_metrics_cache_size` | `number` | The size of the cache holding metrics for a service. | `1000` | no | +| `resource_metrics_key_attributes` | `list(string)` | Span resources with the same values for those resource attributes are aggregated together. | `[]` | no | Adjusting `dimensions_cache_size` can improve the Agent process' memory usage. @@ -84,19 +86,28 @@ The supported values for `aggregation_temporality` are: If `namespace` is set, the generated metric name will be added a `namespace.` prefix. +`resource_metrics_cache_size` is mostly relevant for cumulative temporality. It helps avoid issues with increasing memory and with incorrect metric timestamp resets. + +`resource_metrics_key_attributes` can be used to avoid situations where resource attributes may change across service restarts, +causing metric counters to break (and duplicate). A resource does not need to have all of the attributes. +The list must include enough attributes to properly identify unique resources or risk aggregating data from more than one service and span. +For example, `["service.name", "telemetry.sdk.language", "telemetry.sdk.name"]`. + ## Blocks The following blocks are supported inside the definition of `otelcol.connector.spanmetrics`: -| Hierarchy | Block | Description | Required | -| ----------------------- | --------------- | ------------------------------------------------------- | -------- | -| dimension | [dimension][] | Dimensions to be added in addition to the default ones. | no | -| histogram | [histogram][] | Configures the histogram derived from spans durations. | yes | -| histogram > exponential | [exponential][] | Configuration for a histogram with exponential buckets. | no | -| histogram > explicit | [explicit][] | Configuration for a histogram with explicit buckets. | no | -| exemplars | [exemplars][] | Configures how to attach exemplars to histograms. | no | -| output | [output][] | Configures where to send telemetry data. | yes | +| Hierarchy | Block | Description | Required | +| ----------------------- | --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | +| dimension | [dimension][] | Dimensions to be added in addition to the default ones. | no | +| events | [events][] | Configures the events metric. | no | +| events > dimension | [dimension][] | Span event attributes to add as dimensions to the events metric, _on top of_ the default ones and the ones configured in the top-level `dimension` block. | no | +| exemplars | [exemplars][] | Configures how to attach exemplars to histograms. | no | +| histogram | [histogram][] | Configures the histogram derived from spans durations. | yes | +| histogram > explicit | [explicit][] | Configuration for a histogram with explicit buckets. | no | +| histogram > exponential | [exponential][] | Configuration for a histogram with exponential buckets. | no | +| output | [output][] | Configures where to send telemetry data. | yes | It is necessary to specify either a "[exponential][]" or an "[explicit][]" block: @@ -108,6 +119,7 @@ It is necessary to specify either a "[exponential][]" or an "[explicit][]" block [exponential]: #exponential-block [explicit]: #explicit-block [exemplars]: #exemplars-block +[events]: #events-block [output]: #output-block ### dimension block @@ -128,8 +140,8 @@ The following attributes are supported: | Name | Type | Description | Default | Required | | --------- | -------- | ------------------------------------------------ | ------- | -------- | -| `name` | `string` | Span attribute or resource attribute to look up. | | yes | | `default` | `string` | Value to use if the attribute is missing. | null | no | +| `name` | `string` | Span attribute or resource attribute to look up. | | yes | `otelcol.connector.spanmetrics` will look for the `name` attribute in the span's collection of attributes. If it is not found, the resource attributes will be checked. @@ -139,6 +151,20 @@ If the attribute is missing in both the span and resource attributes: - If `default` is not set, the dimension will be omitted. - If `default` is set, the dimension will be added and its value will be set to the value of `default`. +### events block + +The `events` block configures the `events` metric, which tracks [span events][span-events]. + +The following attributes are supported: + +| Name | Type | Description | Default | Required | +| --------- | ------ | -------------------------- | ------- | -------- | +| `enabled` | `bool` | Enables all events metric. | `false` | no | + +At least one `dimension` block is required if `enabled` is set to `true`. + +[span-events]: https://opentelemetry.io/docs/concepts/signals/traces/#span-events + ### histogram block The `histogram` block configures the histogram derived from spans' durations. @@ -147,8 +173,8 @@ The following attributes are supported: | Name | Type | Description | Default | Required | | --------- | -------- | ------------------------------- | ------- | -------- | -| `unit` | `string` | Configures the histogram units. | `"ms"` | no | | `disable` | `bool` | Disable all histogram metrics. | `false` | no | +| `unit` | `string` | Configures the histogram units. | `"ms"` | no | The supported values for `unit` are: @@ -181,9 +207,12 @@ The `exemplars` block configures how to attach exemplars to histograms. The following attributes are supported: -| Name | Type | Description | Default | Required | -| --------- | ------ | -------------------------------------------------- | ------- | -------- | -| `enabled` | `bool` | Configures whether to add exemplars to histograms. | `false` | no | +| Name | Type | Description | Default | Required | +| -------------------- | -------- | --------------------------------------------------------------------------- | ------- | -------- | +| `enabled` | `bool` | Configures whether to add exemplars to histograms. | `false` | no | +| `max_per_data_point` | `number` | Limits the number of exemplars that can be added to a unique dimension set. | `null` | no | + +`max_per_data_point` can help with reducing memory consumption. ### output block diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlp.md index fce2576d8e29..58b428070367 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlp.md @@ -177,6 +177,13 @@ information. * `exporter_sent_spans_ratio_total` (counter): Number of spans successfully sent to destination. * `exporter_send_failed_spans_ratio_total` (counter): Number of spans in failed attempts to send to destination. +* `exporter_queue_capacity_ratio` (gauge): Fixed capacity of the retry queue (in batches) +* `exporter_queue_size_ratio` (gauge): Current size of the retry queue (in batches) +* `rpc_client_duration_milliseconds` (histogram): Measures the duration of inbound RPC. +* `rpc_client_request_size_bytes` (histogram): Measures size of RPC request messages (uncompressed). +* `rpc_client_requests_per_rpc` (histogram): Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs. +* `rpc_client_response_size_bytes` (histogram): Measures size of RPC response messages (uncompressed). +* `rpc_client_responses_per_rpc` (histogram): Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs. ## Examples diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md index eccaf51f9f22..e0d4dda49477 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md @@ -73,24 +73,33 @@ The `client` block configures the HTTP client used by the component. The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`endpoint` | `string` | The target URL to send telemetry data to. | | yes -`read_buffer_size` | `string` | Size of the read buffer the HTTP client uses for reading server responses. | `0` | no -`write_buffer_size` | `string` | Size of the write buffer the HTTP client uses for writing requests. | `"512KiB"` | no -`timeout` | `duration` | Time to wait before marking a request as failed. | `"30s"` | no -`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no -`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no -`max_idle_conns` | `int` | Limits the number of idle HTTP connections the client can keep open. | `100` | no -`max_idle_conns_per_host` | `int` | Limits the number of idle HTTP connections the host can keep open. | `0` | no -`max_conns_per_host` | `int` | Limits the total (dialing,active, and idle) number of connections per host. | `0` | no -`idle_conn_timeout` | `duration` | Time to wait before an idle connection closes itself. | `"90s"` | no -`disable_keep_alives`| `bool` | Disable HTTP keep-alive. | `false` | no -`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no +Name | Type | Description | Default | Required +------------------------- | -------------------------- | ----------- | ------- | -------- +`endpoint` | `string` | The target URL to send telemetry data to. | | yes +`encoding` | `string` | The encoding to use for messages. Should be either `"proto"` or `"json"`. | `"proto"` | no +`read_buffer_size` | `string` | Size of the read buffer the HTTP client uses for reading server responses. | `0` | no +`write_buffer_size` | `string` | Size of the write buffer the HTTP client uses for writing requests. | `"512KiB"` | no +`timeout` | `duration` | Time to wait before marking a request as failed. | `"30s"` | no +`headers` | `map(string)` | Additional headers to send with the request. | `{}` | no +`compression` | `string` | Compression mechanism to use for requests. | `"gzip"` | no +`max_idle_conns` | `int` | Limits the number of idle HTTP connections the client can keep open. | `100` | no +`max_idle_conns_per_host` | `int` | Limits the number of idle HTTP connections the host can keep open. | `0` | no +`max_conns_per_host` | `int` | Limits the total (dialing,active, and idle) number of connections per host. | `0` | no +`idle_conn_timeout` | `duration` | Time to wait before an idle connection closes itself. | `"90s"` | no +`disable_keep_alives` | `bool` | Disable HTTP keep-alive. | `false` | no +`http2_read_idle_timeout` | `duration` | Timeout after which a health check using ping frame will be carried out if no frame is received on the connection. | `0s` | no +`http2_ping_timeout` | `duration` | Timeout after which the connection will be closed if a response to Ping is not received. | `15s` | no +`auth` | `capsule(otelcol.Handler)` | Handler from an `otelcol.auth` component to use for authenticating requests. | | no + +When setting `headers`, note that: + - Certain headers such as `Content-Length` and `Connection` are automatically written when needed and values in `headers` may be ignored. + - The `Host` header is automatically derived from the `endpoint` value. However, this automatic assignment can be overridden by explicitly setting a `Host` header in `headers`. Setting `disable_keep_alives` to `true` will result in significant overhead establishing a new HTTP(s) connection for every request. Before enabling this option, consider whether changes to idle connection settings can achieve your goal. +If `http2_ping_timeout` is unset or set to `0s`, it will default to `15s`. + {{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} ### tls block diff --git a/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md b/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md index 893d38b5911e..f229db000c05 100644 --- a/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md +++ b/docs/sources/flow/reference/components/otelcol.extension.jaeger_remote_sampling.md @@ -92,17 +92,7 @@ Name | Type | Description | Default | Required The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ca_file` | `string` | Path to the CA file. | | no -`cert_file` | `string` | Path to the TLS certificate. | | no -`key_file` | `string` | Path to the TLS certificate key. | | no -`min_version` | `string` | Minimum acceptable TLS version for connections. | `"TLS 1.2"` | no -`max_version` | `string` | Maximum acceptable TLS version for connections. | `"TLS 1.3"` | no -`reload_interval` | `duration` | Frequency to reload the certificates. | | no -`client_ca_file` | `string` | Path to the CA file used to authenticate client certificates. | | no +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### cors block diff --git a/docs/sources/flow/reference/components/otelcol.processor.filter.md b/docs/sources/flow/reference/components/otelcol.processor.filter.md index c82be95aa09b..c5392a6037eb 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.filter.md +++ b/docs/sources/flow/reference/components/otelcol.processor.filter.md @@ -87,8 +87,9 @@ Name | Type | Description `error_mode` | `string` | How to react to errors if they occur while processing a statement. | `"propagate"` | no The supported values for `error_mode` are: -* `ignore`: Ignore errors returned by statements and continue on to the next statement. This is the recommended mode. -* `propagate`: Return the error up the pipeline. This will result in the payload being dropped from the Agent. +* `ignore`: Ignore errors returned by conditions, log them, and continue on to the next condition. This is the recommended mode. +* `silent`: Ignore errors returned by conditions, do not log them, and continue on to the next condition. +* `propagate`: Return the error up the pipeline. This will result in the payload being dropped from {{< param "PRODUCT_ROOT_NAME" >}}. ## Blocks diff --git a/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md b/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md index 2cc2224fa6b6..9f4f5d882e68 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md +++ b/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md @@ -197,6 +197,7 @@ Block | Description [aws.ecs.launchtype][res-attr-cfg] | Toggles the `aws.ecs.launchtype` resource attribute.
Sets `enabled` to `true` by default. | no [aws.ecs.task.arn][res-attr-cfg] | Toggles the `aws.ecs.task.arn` resource attribute.
Sets `enabled` to `true` by default. | no [aws.ecs.task.family][res-attr-cfg] | Toggles the `aws.ecs.task.family` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.task.id][res-attr-cfg] | Toggles the `aws.ecs.task.id` resource attribute.
Sets `enabled` to `true` by default. | no [aws.ecs.task.revision][res-attr-cfg] | Toggles the `aws.ecs.task.revision` resource attribute.
Sets `enabled` to `true` by default. | no [aws.log.group.arns][res-attr-cfg] | Toggles the `aws.log.group.arns` resource attribute.
Sets `enabled` to `true` by default. | no [aws.log.group.names][res-attr-cfg] | Toggles the `aws.log.group.names` resource attribute.
Sets `enabled` to `true` by default. | no @@ -222,10 +223,11 @@ Block | Description The `resource_attributes` block supports the following blocks: -Block | Description | Required -------------------------------- | ------------------------------------------------------------------------------------------- | -------- -[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no -[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +Block | Description | Required +-------------------------------- | ---------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[k8s.cluster.name][res-attr-cfg] | Toggles the `k8s.cluster.name` resource attribute.
Sets `enabled` to `false` by default. | no Example values: * `cloud.provider`: `"aws"` @@ -354,15 +356,30 @@ Block | Description The `resource_attributes` block supports the following blocks: -Block | Description | Required -------------------------------- | ------------------------------------------------------------------------------------------- | -------- -[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no -[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +Block | Description | Required +-------------------------------- | ---------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[k8s.cluster.name][res-attr-cfg] | Toggles the `k8s.cluster.name` resource attribute.
Sets `enabled` to `false` by default. | no Example values: * `cloud.provider`: `"azure"` * `cloud.platform`: `"azure_vm"` +Azure AKS cluster name is derived from the Azure Instance Metadata Service's (IMDS) infrastructure resource group field. +This field contains the resource group and name of the cluster, separated by underscores. For example: `MC___`. + +Example: + - Resource group: `my-resource-group` + - Cluster name: `my-cluster` + - Location: `eastus` + - Generated name: `MC_my-resource-group_my-cluster_eastus` + +The cluster name is detected if it does not contain underscores and if a custom infrastructure resource group name was not used. + +If accurate parsing cannot be performed, the infrastructure resource group value is returned. +This value can be used to uniquely identify the cluster, because Azure will not allow users to create multiple clusters with the same infrastructure resource group name. + ### consul The `consul` block queries a Consul agent and reads its configuration endpoint to retrieve values for resource attributes. @@ -625,6 +642,8 @@ Block | Description [host.cpu.stepping][res-attr-cfg] | Toggles the `host.cpu.stepping` resource attribute.
Sets `enabled` to `false` by default. | no [host.cpu.vendor.id][res-attr-cfg] | Toggles the `host.cpu.vendor.id` resource attribute.
Sets `enabled` to `false` by default. | no [host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `false` by default. | no +[host.ip][res-attr-cfg] | Toggles the `host.ip` resource attribute.
Sets `enabled` to `false` by default. | no +[host.mac][res-attr-cfg] | Toggles the `host.mac` resource attribute.
Sets `enabled` to `false` by default. | no [host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no [os.description][res-attr-cfg] | Toggles the `os.description` resource attribute.
Sets `enabled` to `false` by default. | no [os.type][res-attr-cfg] | Toggles the `os.type` resource attribute.
Sets `enabled` to `true` by default. | no diff --git a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md b/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md index 32ff9ac4f7ac..baeb5593db53 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md +++ b/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md @@ -155,7 +155,12 @@ The following arguments are supported: Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- -`threshold_ms` | `number` | The latency threshold for sampling, in milliseconds. | | yes +`threshold_ms` | `number` | Lower latency threshold for sampling, in milliseconds. | | yes +`upper_threshold_ms` | `number` | Upper latency threshold for sampling, in milliseconds. | `0` | no + +For a trace to be sampled, its latency should be greater than `threshold_ms` and lower than or equal to `upper_threshold_ms`. + +An `upper_threshold_ms` of `0` will result in a policy which samples anything greater than `threshold_ms`. ### numeric_attribute block @@ -260,8 +265,9 @@ Name | Type | Description | Default | Required `spanevent` | `list(string)` | OTTL conditions for span events. | `[]` | no The supported values for `error_mode` are: -* `ignore`: Errors cause evaluation to continue to the next statement. -* `propagate`: Errors cause the evaluation to be false and an error is returned. +* `ignore`: Ignore errors returned by conditions, log them, and continue on to the next condition. This is the recommended mode. +* `silent`: Ignore errors returned by conditions, do not log them, and continue on to the next condition. +* `propagate`: Return the error up the pipeline. This will result in the payload being dropped from {{< param "PRODUCT_ROOT_NAME" >}}. At least one of `span` or `spanevent` should be specified. Both `span` and `spanevent` can also be specified. diff --git a/docs/sources/flow/reference/components/otelcol.processor.transform.md b/docs/sources/flow/reference/components/otelcol.processor.transform.md index 65e8bd5b6ca2..06ecc32e044a 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.transform.md +++ b/docs/sources/flow/reference/components/otelcol.processor.transform.md @@ -29,6 +29,7 @@ there is also a set of metrics-only functions: * [convert_gauge_to_sum][] * [convert_summary_count_val_to_sum][] * [convert_summary_sum_val_to_sum][] +* [copy_metric][] [OTTL][] statements can also contain constructs such as: * [Booleans][OTTL booleans]: @@ -111,8 +112,9 @@ Name | Type | Description | Default | Required `error_mode` | `string` | How to react to errors if they occur while processing a statement. | `"propagate"` | no The supported values for `error_mode` are: -* `ignore`: Ignore errors returned by statements and continue on to the next statement. This is the recommended mode. -* `propagate`: Return the error up the pipeline. This will result in the payload being dropped from the Agent. +* `ignore`: Ignore errors returned by conditions, log them, and continue on to the next condition. This is the recommended mode. +* `silent`: Ignore errors returned by conditions, do not log them, and continue on to the next condition. +* `propagate`: Return the error up the pipeline. This will result in the payload being dropped from {{< param "PRODUCT_ROOT_NAME" >}}. ## Blocks @@ -580,6 +582,7 @@ each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] [convert_gauge_to_sum]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/processor/transformprocessor#convert_gauge_to_sum [convert_summary_count_val_to_sum]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/processor/transformprocessor#convert_summary_count_val_to_sum [convert_summary_sum_val_to_sum]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/processor/transformprocessor#convert_summary_sum_val_to_sum +[copy_metric]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/processor/transformprocessor#copy_metric [OTTL booleans]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/pkg/ottl#booleans [OTTL math expressions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/pkg/ottl#math-expressions [OTTL boolean expressions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/{{< param "OTEL_VERSION" >}}/pkg/ottl#boolean-expressions diff --git a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md b/docs/sources/flow/reference/components/otelcol.receiver.kafka.md index a1bcf950dedc..042db0227a92 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.kafka.md @@ -50,6 +50,7 @@ Name | Type | Description | Default | Required `group_id` | `string` | Consumer group to consume messages from. | `"otel-collector"` | no `client_id` | `string` | Consumer client ID to use. | `"otel-collector"` | no `initial_offset` | `string` | Initial offset to use if no offset was previously committed. | `"latest"` | no +`resolve_canonical_bootstrap_servers_only` | `bool` | Whether to resolve then reverse-lookup broker IPs during startup. | `"false"` | no The `encoding` argument determines how to decode messages read from Kafka. `encoding` must be one of the following strings: @@ -64,7 +65,7 @@ The `encoding` argument determines how to decode messages read from Kafka. * `"text"`: Decode the log message as text and insert it into the body of a log record. By default, UTF-8 is used to decode. A different encoding can be chosen by using `text_`. For example, `text_utf-8` or `text_shift_jis`. * `"json"`: Decode the JSON payload and insert it into the body of a log record. - +* `"azure_resource_logs"`: The payload is converted from Azure Resource Logs format to an OTLP log. `"otlp_proto"` must be used to read all telemetry types from Kafka; other encodings are signal-specific. diff --git a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md b/docs/sources/flow/reference/components/otelcol.receiver.otlp.md index 55bb0db34536..86b3633c4ac5 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.otlp.md @@ -94,17 +94,7 @@ Name | Type | Description | Default | Required The `tls` block configures TLS settings used for a server. If the `tls` block isn't provided, TLS won't be used for connections to the server. -The following arguments are supported: - -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`ca_file` | `string` | Path to the CA file. | | no -`cert_file` | `string` | Path to the TLS certificate. | | no -`key_file` | `string` | Path to the TLS certificate key. | | no -`min_version` | `string` | Minimum acceptable TLS version for connections. | `"TLS 1.2"` | no -`max_version` | `string` | Maximum acceptable TLS version for connections. | `"TLS 1.3"` | no -`reload_interval` | `duration` | Frequency to reload the certificates. | | no -`client_ca_file` | `string` | Path to the CA file used to authenticate client certificates. | | no +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} ### keepalive block @@ -212,6 +202,10 @@ information. * `receiver_accepted_spans_ratio_total` (counter): Number of spans successfully pushed into the pipeline. * `receiver_refused_spans_ratio_total` (counter): Number of spans that could not be pushed into the pipeline. * `rpc_server_duration_milliseconds` (histogram): Duration of RPC requests from a gRPC server. +* `rpc_server_request_size_bytes` (histogram): Measures size of RPC request messages (uncompressed). +* `rpc_server_requests_per_rpc` (histogram): Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs. +* `rpc_server_response_size_bytes` (histogram): Measures size of RPC response messages (uncompressed). +* `rpc_server_responses_per_rpc` (histogram): Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs. ## Example diff --git a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md b/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md index a7f0f70ced05..d24741a59b9b 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md @@ -34,7 +34,7 @@ The full list of metrics that can be collected can be found in [vcenter receiver This receiver has been built to support ESXi and vCenter versions: -- 7.5 +- 8 - 7.0 - 6.7 diff --git a/docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md b/docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md index caf4d45001ae..beae95b5e7e9 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md +++ b/docs/sources/shared/flow/reference/components/otelcol-tls-config-block.md @@ -12,20 +12,22 @@ headless: true The following arguments are supported: -Name | Type | Description | Default | Required ------------------------|------------|---------------------------------------------------------|-------------|--------- -`ca_file` | `string` | Path to the CA file. | | no -`ca_pem` | `string` | CA PEM-encoded text to validate the server with. | | no -`cert_file` | `string` | Path to the TLS certificate. | | no -`cert_pem` | `string` | Certificate PEM-encoded text for client authentication. | | no -`insecure_skip_verify` | `boolean` | Ignores insecure server TLS certificates. | | no -`insecure` | `boolean` | Disables TLS when connecting to the configured server. | | no -`key_file` | `string` | Path to the TLS certificate key. | | no -`key_pem` | `secret` | Key PEM-encoded text for client authentication. | | no -`max_version` | `string` | Maximum acceptable TLS version for connections. | `"TLS 1.3"` | no -`min_version` | `string` | Minimum acceptable TLS version for connections. | `"TLS 1.2"` | no -`reload_interval` | `duration` | The duration after which the certificate is reloaded. | `"0s"` | no -`server_name` | `string` | Verifies the hostname of server certificates when set. | | no +Name | Type | Description | Default | Required +-------------------------------|----------------|----------------------------------------------------------------------------------------------|-------------|--------- +`ca_file` | `string` | Path to the CA file. | | no +`ca_pem` | `string` | CA PEM-encoded text to validate the server with. | | no +`cert_file` | `string` | Path to the TLS certificate. | | no +`cert_pem` | `string` | Certificate PEM-encoded text for client authentication. | | no +`insecure_skip_verify` | `boolean` | Ignores insecure server TLS certificates. | | no +`include_system_ca_certs_pool` | `boolean` | Whether to load the system certificate authorities pool alongside the certificate authority. | `false` | no +`insecure` | `boolean` | Disables TLS when connecting to the configured server. | | no +`key_file` | `string` | Path to the TLS certificate key. | | no +`key_pem` | `secret` | Key PEM-encoded text for client authentication. | | no +`max_version` | `string` | Maximum acceptable TLS version for connections. | `"TLS 1.3"` | no +`min_version` | `string` | Minimum acceptable TLS version for connections. | `"TLS 1.2"` | no +`cipher_suites` | `list(string)` | A list of TLS cipher suites that the TLS transport can use. | `[]` | no +`reload_interval` | `duration` | The duration after which the certificate is reloaded. | `"0s"` | no +`server_name` | `string` | Verifies the hostname of server certificates when set. | | no If the server doesn't support TLS, you must set the `insecure` argument to `true`. @@ -38,3 +40,8 @@ The following pairs of arguments are mutually exclusive and can't both be set si * `ca_pem` and `ca_file` * `cert_pem` and `cert_file` * `key_pem` and `key_file` + +If `cipher_suites` is left blank, a safe default list is used. +See the [Go TLS documentation][golang-tls] for a list of supported cipher suites. + +[golang-tls]: https://go.dev/src/crypto/tls/cipher_suites.go \ No newline at end of file diff --git a/docs/sources/static/configuration/traces-config.md b/docs/sources/static/configuration/traces-config.md index 4ff3bfc85e2a..51b026d74fac 100644 --- a/docs/sources/static/configuration/traces-config.md +++ b/docs/sources/static/configuration/traces-config.md @@ -125,9 +125,16 @@ remote_write: # Maximum acceptable TLS version. # If not set, it is handled by crypto/tls - currently it is "1.3". [ max_version: | default = "" ] - # ReloadInterval specifies the duration after which the certificate will be reloaded. + # The duration after which the certificate will be reloaded. # If not set, it will never be reloaded. [ reload_interval: ] + # If true, load system CA certificates pool in addition to the certificates + # configured in this struct. + [ include_system_ca_certs_pool: ] + # A list of TLS cipher suites that the TLS transport can use. + # If left blank, a safe default list is used. + # See https://go.dev/src/crypto/tls/cipher_suites.go for a list of supported cipher suites. + [ cipher_suites: ] # Controls TLS settings of the exporter's client: # https://prometheus.io/docs/prometheus/2.45/configuration/configuration/#tls_config diff --git a/go.mod b/go.mod index 20083bafb94e..bd466bed8759 100644 --- a/go.mod +++ b/go.mod @@ -9,19 +9,19 @@ retract ( ) require ( - cloud.google.com/go/pubsub v1.33.0 + cloud.google.com/go/pubsub v1.34.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 github.com/Azure/go-autorest/autorest v0.11.29 - github.com/IBM/sarama v1.42.1 + github.com/IBM/sarama v1.43.0 github.com/Lusitaniae/apache_exporter v0.11.1-0.20220518131644-f9522724dab4 github.com/Masterminds/sprig/v3 v3.2.3 github.com/PuerkitoBio/rehttp v1.3.0 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.45.25 // indirect - github.com/aws/aws-sdk-go-v2 v1.25.0 - github.com/aws/aws-sdk-go-v2/config v1.27.0 + github.com/aws/aws-sdk-go v1.50.27 // indirect + github.com/aws/aws-sdk-go-v2 v1.25.2 + github.com/aws/aws-sdk-go-v2/config v1.27.4 github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0 github.com/bmatcuk/doublestar v1.3.4 github.com/burningalchemist/sql_exporter v0.0.0-20240103092044-466b38b6abc4 @@ -30,13 +30,13 @@ require ( github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/coreos/go-systemd/v22 v22.5.0 github.com/davidmparrott/kafka_exporter/v2 v2.0.1 - github.com/docker/docker v24.0.7+incompatible - github.com/docker/go-connections v0.4.0 + github.com/docker/docker v24.0.9+incompatible + github.com/docker/go-connections v0.5.0 github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 github.com/fatih/color v1.15.0 github.com/fatih/structs v1.1.0 github.com/fortytw2/leaktest v1.3.0 - github.com/fsnotify/fsnotify v1.6.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/github/smimesign v0.2.0 github.com/go-git/go-git/v5 v5.11.0 github.com/go-kit/log v0.2.1 @@ -53,8 +53,8 @@ require ( github.com/google/go-jsonnet v0.18.0 github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 github.com/google/renameio/v2 v2.0.0 - github.com/google/uuid v1.4.0 - github.com/gorilla/mux v1.8.0 + github.com/google/uuid v1.6.0 + github.com/gorilla/mux v1.8.1 github.com/grafana/ckit v0.0.0-20230906125525-c046c99a5c04 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb @@ -67,14 +67,14 @@ require ( github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1 github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 - github.com/grafana/vmware_exporter v0.0.4-beta + github.com/grafana/vmware_exporter v0.0.5-beta github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/hashicorp/consul/api v1.25.1 + github.com/hashicorp/consul/api v1.27.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-discover v0.0.0-20230724184603-e89ebd1b2f65 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v1.0.2 - github.com/hashicorp/golang-lru/v2 v2.0.5 + github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hashicorp/vault/api v1.10.0 github.com/hashicorp/vault/api/auth/approle v0.2.0 github.com/hashicorp/vault/api/auth/aws v0.2.0 @@ -86,14 +86,14 @@ require ( github.com/heroku/x v0.0.61 github.com/iamseth/oracledb_exporter v0.0.0-20230918193147-95e16f21ceee github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 - github.com/jaegertracing/jaeger v1.50.0 + github.com/jaegertracing/jaeger v1.54.0 github.com/jmespath/go-jmespath v0.4.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.4 + github.com/klauspost/compress v1.17.7 github.com/lib/pq v1.10.9 github.com/mackerelio/go-osstat v0.2.3 github.com/miekg/dns v1.1.56 - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c github.com/mitchellh/reflectwalk v1.0.2 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/ncabatoff/process-exporter v0.7.10 @@ -102,34 +102,33 @@ require ( github.com/oklog/run v1.1.0 github.com/olekukonko/tablewriter v0.0.5 github.com/oliver006/redis_exporter v1.54.0 - github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.87.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.95.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.96.0 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 @@ -147,9 +146,9 @@ require ( github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.66.0 github.com/prometheus-operator/prometheus-operator/pkg/client v0.66.0 github.com/prometheus/blackbox_exporter v0.24.1-0.20230623125439-bd22efa1c900 - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.46.0 + github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_model v0.6.0 + github.com/prometheus/common v0.48.0 github.com/prometheus/consul_exporter v0.8.0 github.com/prometheus/memcached_exporter v0.13.0 github.com/prometheus/mysqld_exporter v0.14.0 @@ -160,13 +159,13 @@ require ( github.com/prometheus/statsd_exporter v0.22.8 github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 github.com/rs/cors v1.10.1 - github.com/shirou/gopsutil/v3 v3.23.9 + github.com/shirou/gopsutil/v3 v3.24.1 github.com/sijms/go-ora/v2 v2.7.6 github.com/sirupsen/logrus v1.9.3 github.com/spaolacci/murmur3 v1.1.0 - github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.8.4 - github.com/testcontainers/testcontainers-go v0.25.0 + github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.9.0 + github.com/testcontainers/testcontainers-go v0.27.0 github.com/testcontainers/testcontainers-go/modules/k3s v0.0.0-20230615142642-c175df34bd1d github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/vincent-petithory/dataurl v1.0.0 @@ -175,77 +174,76 @@ require ( github.com/wk8/go-ordered-map v0.2.0 github.com/xdg-go/scram v1.1.2 github.com/zeebo/xxh3 v1.0.2 - go.opentelemetry.io/collector v0.87.0 - go.opentelemetry.io/collector/component v0.87.0 - go.opentelemetry.io/collector/config/configauth v0.87.0 - go.opentelemetry.io/collector/config/configcompression v0.87.0 - go.opentelemetry.io/collector/config/configgrpc v0.87.0 - go.opentelemetry.io/collector/config/confighttp v0.87.0 - go.opentelemetry.io/collector/config/confignet v0.87.0 - go.opentelemetry.io/collector/config/configopaque v0.87.0 - go.opentelemetry.io/collector/config/configtelemetry v0.87.0 - go.opentelemetry.io/collector/config/configtls v0.87.0 - go.opentelemetry.io/collector/confmap v0.87.0 - go.opentelemetry.io/collector/connector v0.87.0 - go.opentelemetry.io/collector/consumer v0.87.0 - go.opentelemetry.io/collector/exporter v0.87.0 - go.opentelemetry.io/collector/exporter/loggingexporter v0.87.0 - go.opentelemetry.io/collector/exporter/otlpexporter v0.87.0 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.87.0 - go.opentelemetry.io/collector/extension v0.87.0 - go.opentelemetry.io/collector/extension/auth v0.87.0 - go.opentelemetry.io/collector/featuregate v1.0.0-rcv0016 - go.opentelemetry.io/collector/otelcol v0.87.0 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 - go.opentelemetry.io/collector/processor v0.87.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.87.0 - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.87.0 - go.opentelemetry.io/collector/receiver v0.87.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.87.0 - go.opentelemetry.io/collector/semconv v0.87.0 - go.opentelemetry.io/collector/service v0.87.0 + go.opentelemetry.io/collector v0.96.0 + go.opentelemetry.io/collector/component v0.96.0 + go.opentelemetry.io/collector/config/configauth v0.96.0 + go.opentelemetry.io/collector/config/configcompression v0.96.0 + go.opentelemetry.io/collector/config/configgrpc v0.96.0 + go.opentelemetry.io/collector/config/confighttp v0.96.0 + go.opentelemetry.io/collector/config/confignet v0.96.0 + go.opentelemetry.io/collector/config/configopaque v1.3.0 + go.opentelemetry.io/collector/config/configtelemetry v0.96.0 + go.opentelemetry.io/collector/config/configtls v0.96.0 + go.opentelemetry.io/collector/confmap v0.96.0 + go.opentelemetry.io/collector/connector v0.96.0 + go.opentelemetry.io/collector/consumer v0.96.0 + go.opentelemetry.io/collector/exporter v0.96.0 + go.opentelemetry.io/collector/exporter/loggingexporter v0.96.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.96.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.96.0 + go.opentelemetry.io/collector/extension v0.96.0 + go.opentelemetry.io/collector/extension/auth v0.96.0 + go.opentelemetry.io/collector/featuregate v1.3.0 + go.opentelemetry.io/collector/otelcol v0.96.0 + go.opentelemetry.io/collector/pdata v1.3.0 + go.opentelemetry.io/collector/processor v0.96.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.96.0 + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.96.0 + go.opentelemetry.io/collector/receiver v0.96.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.96.0 + go.opentelemetry.io/collector/semconv v0.96.0 + go.opentelemetry.io/collector/service v0.96.0 go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.45.0 - go.opentelemetry.io/otel v1.21.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 - go.opentelemetry.io/otel/exporters/prometheus v0.42.0 - go.opentelemetry.io/otel/metric v1.21.0 - go.opentelemetry.io/otel/sdk v1.21.0 - go.opentelemetry.io/otel/sdk/metric v1.20.0 - go.opentelemetry.io/otel/trace v1.21.0 - go.opentelemetry.io/proto/otlp v1.0.0 + go.opentelemetry.io/otel v1.24.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 + go.opentelemetry.io/otel/exporters/prometheus v0.46.0 + go.opentelemetry.io/otel/metric v1.24.0 + go.opentelemetry.io/otel/sdk v1.24.0 + go.opentelemetry.io/otel/sdk/metric v1.24.0 + go.opentelemetry.io/otel/trace v1.24.0 + go.opentelemetry.io/proto/otlp v1.1.0 go.uber.org/atomic v1.11.0 - go.uber.org/goleak v1.2.1 + go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.26.0 + go.uber.org/zap v1.27.0 golang.org/x/crypto v0.20.0 - golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb + golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc golang.org/x/net v0.21.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/oauth2 v0.17.0 golang.org/x/sys v0.17.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.152.0 - google.golang.org/grpc v1.61.0 + google.golang.org/api v0.155.0 + google.golang.org/grpc v1.62.0 google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 gotest.tools v2.2.0+incompatible - k8s.io/api v0.28.3 - k8s.io/apiextensions-apiserver v0.28.0 - k8s.io/client-go v0.28.3 - k8s.io/component-base v0.28.1 - k8s.io/klog/v2 v2.100.1 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/controller-runtime v0.16.2 - sigs.k8s.io/yaml v1.3.0 + k8s.io/api v0.29.2 + k8s.io/apiextensions-apiserver v0.29.0 + k8s.io/client-go v0.29.2 + k8s.io/component-base v0.29.0 + k8s.io/klog/v2 v2.110.1 + k8s.io/utils v0.0.0-20240102154912-e7106e64919e + sigs.k8s.io/controller-runtime v0.17.2 + sigs.k8s.io/yaml v1.4.0 ) require ( - cloud.google.com/go v0.111.0 // indirect + cloud.google.com/go v0.112.0 // indirect cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.4-0.20230617002413-005d2dfb6b68 // indirect cloud.google.com/go/iam v1.1.5 // indirect - contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/AlekSi/pointer v1.1.0 // indirect @@ -277,31 +275,30 @@ require ( github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect - github.com/alecthomas/participle/v2 v2.1.0 // indirect + github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect github.com/andybalholm/brotli v1.0.5 // indirect - github.com/antonmedv/expr v1.15.3 // indirect github.com/apache/arrow/go/v12 v12.0.1 // indirect github.com/apache/thrift v0.19.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/avvmoto/buf-readerat v0.0.0-20171115124131-a17c8cb89270 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 + github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.27.0 // indirect - github.com/aws/smithy-go v1.20.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect + github.com/aws/smithy-go v1.20.1 // indirect github.com/beevik/ntp v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible // indirect @@ -314,7 +311,7 @@ require ( github.com/cespare/xxhash v1.1.0 // indirect github.com/checkpoint-restore/go-criu/v5 v5.3.0 // indirect github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect - github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 // indirect + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/console v1.0.3 // indirect github.com/containerd/containerd v1.7.11 // indirect @@ -337,7 +334,7 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.6.0 // indirect - github.com/eapache/go-resiliency v1.4.0 // indirect + github.com/eapache/go-resiliency v1.6.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect @@ -347,14 +344,14 @@ require ( github.com/ema/qdisc v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/envoyproxy/go-control-plane v0.11.1 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/envoyproxy/go-control-plane v0.12.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/fgprof v0.9.3 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect @@ -400,12 +397,12 @@ require ( github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 // indirect github.com/grafana/loki/pkg/push v0.0.0-20231212100434-384e5c2dc872 // k180 branch github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-envparse v0.1.0 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect @@ -478,7 +475,7 @@ require ( github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mna/redisc v1.3.2 // indirect - github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/term v0.5.0 // indirect @@ -486,24 +483,24 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/montanaflynn/stats v0.7.0 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/mostynb/go-grpc-compression v1.2.1 // indirect + github.com/mostynb/go-grpc-compression v1.2.2 // indirect github.com/mrunalp/fileutils v0.5.1 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 // indirect github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.87.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.96.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc4 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/opencontainers/runc v1.1.12 // indirect github.com/opencontainers/runtime-spec v1.1.0-rc.1 // indirect github.com/opencontainers/selinux v1.11.0 // indirect @@ -512,8 +509,8 @@ require ( github.com/ovh/go-ovh v1.4.3 // indirect github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect - github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/prometheus-community/go-runit v0.1.0 // indirect @@ -539,16 +536,15 @@ require ( github.com/snowflakedb/gosnowflake v1.7.2-0.20240103203018-f1d625f17408 // indirect github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect github.com/soheilhy/cmux v0.1.5 // indirect - github.com/spf13/afero v1.9.5 // indirect - github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.16.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/spf13/viper v1.18.2 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect - github.com/tg123/go-htpasswd v1.2.1 // indirect + github.com/tg123/go-htpasswd v1.2.2 // indirect github.com/tilinna/clock v1.1.0 github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect @@ -557,7 +553,7 @@ require ( github.com/vertica/vertica-sql-go v1.3.3 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect - github.com/vmware/govmomi v0.32.0 // indirect + github.com/vmware/govmomi v0.36.1 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/willf/bitset v1.1.11 // indirect github.com/willf/bloom v2.0.3+incompatible // indirect @@ -573,28 +569,27 @@ require ( github.com/xo/dburl v0.20.0 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.etcd.io/etcd/api/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/v3 v3.5.9 // indirect + go.etcd.io/etcd/api/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect + go.etcd.io/etcd/client/v3 v3.5.10 // indirect go.mongodb.org/mongo-driver v1.12.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/config/internal v0.87.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.19.0 // indirect - go.opentelemetry.io/otel/bridge/opencensus v0.42.0 // indirect + go.opentelemetry.io/collector/config/internal v0.96.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel/bridge/opencensus v1.24.0 // indirect go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect golang.org/x/mod v0.14.0 // indirect - golang.org/x/sync v0.5.0 // indirect + golang.org/x/sync v0.6.0 // indirect golang.org/x/term v0.17.0 // indirect - golang.org/x/tools v0.16.0 + golang.org/x/tools v0.16.1 golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gonum.org/v1/gonum v0.14.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -604,7 +599,7 @@ require ( howett.net/plist v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) require github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab // indirect @@ -616,13 +611,17 @@ require ( github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361 github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d github.com/natefinch/atomic v1.0.1 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.87.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.87.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.96.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.96.0 + go.opentelemetry.io/collector/config/configretry v0.96.0 + go.opentelemetry.io/collector/confmap/converter/expandconverter v0.96.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v0.96.0 + go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.96.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 golang.org/x/crypto/x509roots/fallback v0.0.0-20240208163226-62c9f1799c91 - k8s.io/apimachinery v0.28.3 + k8s.io/apimachinery v0.29.2 ) require ( @@ -630,7 +629,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 // indirect github.com/DataDog/sketches-go v1.4.4 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.21.0 // indirect github.com/Shopify/sarama v1.38.1 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/Workiva/go-datastructures v1.1.0 // indirect @@ -650,43 +649,55 @@ require ( github.com/containerd/log v0.1.0 // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/drone/envsubst v1.0.3 // indirect + github.com/expr-lang/expr v1.16.1 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/grafana/jfr-parser v0.8.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/hetznercloud/hcloud-go/v2 v2.4.0 // indirect github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/knadh/koanf/v2 v2.0.1 // indirect + github.com/knadh/koanf/v2 v2.1.0 // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.87.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.96.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.96.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/prometheus-community/prom-label-proxy v0.6.0 // indirect + github.com/relvacode/iso8601 v1.4.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect github.com/skeema/knownhosts v1.2.1 // indirect github.com/sony/gobreaker v0.5.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect github.com/tidwall/gjson v1.10.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/tinylru v1.1.0 // indirect github.com/tidwall/wal v1.1.7 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 // indirect + go.opentelemetry.io/collector/confmap/provider/envprovider v0.96.0 // indirect + go.opentelemetry.io/collector/confmap/provider/httpprovider v0.96.0 // indirect + go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.96.0 // indirect + go.opentelemetry.io/contrib/config v0.4.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 // indirect ) // NOTE: replace directives below must always be *temporary*. @@ -705,7 +716,8 @@ require ( // Replace directives from Prometheus replace ( k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0 - k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.3.0 + // Prometheus uses v3.3.0, but we will get a compilation error from another module if we use it. + k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.4.0 ) // TODO(marctc): remove replace directive once: @@ -756,8 +768,8 @@ replace ( // https://github.com/open-telemetry/opentelemetry-collector/pull/7696 // https://github.com/open-telemetry/opentelemetry-collector/issues/4970 replace ( - go.opentelemetry.io/collector/otelcol => github.com/grafana/opentelemetry-collector/otelcol v0.0.0-20231018134914-c0109e052230 - go.opentelemetry.io/collector/service => github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e052230 + go.opentelemetry.io/collector/otelcol => github.com/grafana/opentelemetry-collector/otelcol v0.0.0-20240321103955-8919a1c85cbe + go.opentelemetry.io/collector/service => github.com/grafana/opentelemetry-collector/service v0.0.0-20240321103955-8919a1c85cbe ) // Required to avoid an ambiguous import with github.com/tencentcloud/tencentcloud-sdk-go diff --git a/go.sum b/go.sum index 7f5d05326ee6..1fe561b288b1 100644 --- a/go.sum +++ b/go.sum @@ -4,7 +4,6 @@ cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7h cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -17,7 +16,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -28,8 +26,8 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= +cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= +cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -50,20 +48,17 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= -cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.34.0 h1:ZtPbfwfi5rLaPeSvDC29fFoE20/tQvGrUS6kVJZJvkU= +cloud.google.com/go/pubsub v1.34.0/go.mod h1:alj4l4rBg+N3YTFDDC+/YyFTs6JAjam2QfYsddcAW4c= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= connectrpc.com/connect v1.14.0 h1:PDS+J7uoz5Oui2VEOMcfz6Qft7opQM9hPiKvtGC01pA= connectrpc.com/connect v1.14.0/go.mod h1:uoAq5bmhhn43TwhaKdGKN/bZcGtzPW1v+ngDTn5u+8s= -contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= -contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -187,12 +182,12 @@ github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW5 github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0= github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 h1:KeNholpO2xKjgaaSyd+DyQRrsQjhbSeS7qe4nEw8aQw= github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962/go.mod h1:kC29dT1vFpj7py2OvG1khBdQpo3kInWP+6QipLbdngo= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 h1:tk85AYGwOf6VNtoOQi8w/kVDi2vmPxp3/OU2FsUpdcA= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.21.0 h1:aNyyrkRcLMWFum5qgYbXl6Ut+MMOmfH/kLjZJ5YJP/I= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.21.0/go.mod h1:BEOBnuYVyPt9wxVRQqqpKUK9FXVcL2+LOjZ8apLa9ao= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/IBM/sarama v1.42.1 h1:wugyWa15TDEHh2kvq2gAy1IHLjEjuYOYgXz/ruC/OSQ= -github.com/IBM/sarama v1.42.1/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ= +github.com/IBM/sarama v1.43.0 h1:YFFDn8mMI2QL0wOrG0J2sFoVIAFl7hS9JQi2YZsXtJc= +github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP9BM= github.com/Jeffail/gabs v1.1.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= @@ -259,8 +254,8 @@ github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVd github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= -github.com/alecthomas/participle/v2 v2.1.0 h1:z7dElHRrOEEq45F2TG5cbQihMtNTv8vwldytDj7Wrz4= -github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= +github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= +github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -282,8 +277,6 @@ github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI= -github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/DiJbg= github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -317,30 +310,30 @@ github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.45.25 h1:c4fLlh5sLdK2DCRTY1z0hyuJZU4ygxX8m1FswL6/nF4= -github.com/aws/aws-sdk-go v1.45.25/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.50.27 h1:96ifhrSuja+AzdP3W/T2337igqVQ2FcSIJYkk+0rCeA= +github.com/aws/aws-sdk-go v1.50.27/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.25.0 h1:sv7+1JVJxOu/dD/sz/csHX7jFqmP001TIY7aytBWDSQ= -github.com/aws/aws-sdk-go-v2 v1.25.0/go.mod h1:G104G1Aho5WqF+SR3mDIobTABQzpYV0WxMsKxlMggOA= +github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w= +github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 h1:2UO6/nT1lCZq1LqM67Oa4tdgP1CvL1sLSxvuD+VrOeE= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0/go.mod h1:5zGj2eA85ClyedTDK+Whsu+w9yimnVIZvhvBKrDquM8= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/config v1.27.0 h1:J5sdGCAHuWKIXLeXiqr8II/adSvetkx0qdZwdbXXpb0= -github.com/aws/aws-sdk-go-v2/config v1.27.0/go.mod h1:cfh8v69nuSUohNFMbIISP2fhmblGmYEOKs5V53HiHnk= +github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M= +github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/credentials v1.17.0 h1:lMW2x6sKBsiAJrpi1doOXqWFyEPoE886DTb1X0wb7So= -github.com/aws/aws-sdk-go-v2/credentials v1.17.0/go.mod h1:uT41FIH8cCIxOdUYIL0PYyHlL1NoneDuDSCwg5VE/5o= +github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 h1:xWCwjjvVz2ojYTP4kBKUuUh9ZrXfcAXpflhOUUeXg1k= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0/go.mod h1:j3fACuqXg4oMTQOR2yY7m0NmJY0yBK4L4sLsRXq1Ins= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.0 h1:FHVyVIJpOeQZCnYj9EVKTWahb4WDNFEUOKCx/dOUPcM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.0/go.mod h1:SL/aJzGL0LsQPQ1y2HMNbJGrm/Xh6aVCGq6ki+DLGEw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 h1:NPs/EqVO+ajwOoq56EfcGKa3L3ruWuazkIw1BqxwOPw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0/go.mod h1:D+duLy2ylgatV+yTlQ8JTuLfDD0BnFvnQRc+o6tbZ4M= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 h1:ks7KGMVUMoDzcxNWUlEdI+/lokMFD136EL6DWmUOV80= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0/go.mod h1:hL6BWM/d/qz113fVitZjbXR0E+RCTU1+x+1Idyn5NgE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= @@ -360,13 +353,13 @@ github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.36.0 h1:aQD36/N github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.36.0/go.mod h1:EF/UkL+0uEqcqr0sKFJJIT3Jbcxgt2oWz9R0vaLNSVU= github.com/aws/aws-sdk-go-v2/service/ec2 v1.147.0 h1:m9+QgPg/qzlxL0Oxb/dD12jzeWfuQGn9XqCWyDAipi8= github.com/aws/aws-sdk-go-v2/service/ec2 v1.147.0/go.mod h1:ntWksNNQcXImRQMdxab74tp+H94neF/TwQJ9Ndxb04k= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 h1:a33HuFlO0KsveiP90IUJh8Xr/cx9US2PqkSroaLc+o8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0/go.mod h1:SxIkWpByiGbhbHYTo9CMTUnx2G4p4ZQMrDPcRRy//1c= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0 h1:UiSyK6ent6OKpkMJN3+k5HZ4sk4UfchEaaW5wv7SblQ= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0/go.mod h1:l7kzl8n8DXoRyFz5cIMG70HnPauWa649TUhgw8Rq6lo= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 h1:SHN/umDLTmFTmYfI+gkanz6da3vK8Kvj/5wkqnTHbuA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0/go.mod h1:l8gPU5RYGOFHJqWEpPMoRTP0VoaWQSkJdKo+hwWnnDA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 h1:l5puwOHr7IxECuPMIuZG7UKOzAnF24v6t4l+Z5Moay4= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0/go.mod h1:Oov79flWa/n7Ni+lQC3z+VM7PoRM47omRqbJU9B5Y7E= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.20.0 h1:MaTOKZEPC2ANMAKzZgXbBC7OCD3BTv/BKk1dH7dKA6o= @@ -378,19 +371,19 @@ github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.27.0/go.mod h1:JsJDZFHwLG github.com/aws/aws-sdk-go-v2/service/shield v1.24.0 h1:DasZw37v6ciRecoPkslCl8rHmoPfzfwpnR48pxWJaGg= github.com/aws/aws-sdk-go-v2/service/shield v1.24.0/go.mod h1:sq11Jfbf0XW0SoJ4esedM4kCsBPmjzakxfpvG1Z+pgs= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= -github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 h1:u6OkVDxtBPnxPkZ9/63ynEe+8kHbtS5IfaC4PzVxzWM= -github.com/aws/aws-sdk-go-v2/service/sso v1.19.0/go.mod h1:YqbU3RS/pkDVu+v+Nwxvn0i1WB0HkNWEePWbmODEbbs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0 h1:6DL0qu5+315wbsAEEmzK+P9leRwNbkp+lGjPC+CEvb8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0/go.mod h1:olUAyg+FaoFaL/zFaeQQONjOZ9HXoxgvI/c7mQTYz7M= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y= github.com/aws/aws-sdk-go-v2/service/storagegateway v1.26.0 h1:mUZTy6ckniofJCEiHSISSX7CuioLWHvGyiEIC0ZqxWQ= github.com/aws/aws-sdk-go-v2/service/storagegateway v1.26.0/go.mod h1:vs7VbPSVlTiuEHVruOY+zqOJLmaW0lcJDj0lzFHuvZs= github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/aws-sdk-go-v2/service/sts v1.27.0 h1:cjTRjh700H36MQ8M0LnDn33W3JmwC77mdxIIyPWCdpM= -github.com/aws/aws-sdk-go-v2/service/sts v1.27.0/go.mod h1:nXfOBMWPokIbOY+Gi7a1psWMSvskUCemZzI+SMB7Akc= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA= github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.20.0 h1:6+kZsCXZwKxZS9RfISnPc4EXlHoyAkm2hPuM8X2BrrQ= -github.com/aws/smithy-go v1.20.0/go.mod h1:uo5RKksAl4PzhqaAbjd4rLgFoq5koTsQKYuGe7dklGc= +github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= +github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/axiomhq/hyperloglog v0.0.0-20240124082744-24bca3a5b39b h1:F3yMzKumBUQ6Fn0sYI1YQ16vQRucpZOfBQ9HXWl5+XI= github.com/axiomhq/hyperloglog v0.0.0-20240124082744-24bca3a5b39b/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0 h1:0NmehRCgyk5rljDQLKUO+cRJCnduDyn11+zGZIc9Z48= @@ -478,8 +471,8 @@ github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnht github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -522,7 +515,7 @@ github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFl github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -577,11 +570,12 @@ github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m3 github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -602,8 +596,8 @@ github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.4.0 h1:3OK9bWpPk5q6pbFAaYSEwD9CLUSHG8bnZuqX2yMt3B0= -github.com/eapache/go-resiliency v1.4.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= +github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= @@ -643,11 +637,11 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= @@ -656,8 +650,10 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/expr-lang/expr v1.16.1 h1:Na8CUcMdyGbnNpShY7kzcHCU7WqxuL+hnxgHZ4vaz/A= +github.com/expr-lang/expr v1.16.1/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= @@ -673,8 +669,8 @@ github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= @@ -687,12 +683,13 @@ github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -738,8 +735,8 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= @@ -813,6 +810,8 @@ github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr6 github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= @@ -880,8 +879,6 @@ github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EO github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -949,7 +946,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -980,7 +976,6 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -1000,8 +995,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -1012,7 +1007,6 @@ github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qK github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= @@ -1026,8 +1020,8 @@ github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -1066,10 +1060,10 @@ github.com/grafana/mysqld_exporter v0.12.2-0.20231005125903-364b9c41e595 h1:I9sR github.com/grafana/mysqld_exporter v0.12.2-0.20231005125903-364b9c41e595/go.mod h1:U8ifHC5pT2WuVTO7ki4KZmWLjfEKfktQiU3bh0J8scw= github.com/grafana/node_exporter v0.18.1-grafana-r01.0.20231004161416-702318429731 h1:vyyIYY2sLpmgFIckJ1vSO/oYkvB0thDF6UiFYp5PThM= github.com/grafana/node_exporter v0.18.1-grafana-r01.0.20231004161416-702318429731/go.mod h1:vOZxEzxm0nZmuNqjtIfvtmvdRtJik9POmcN5mQVLf5E= -github.com/grafana/opentelemetry-collector/otelcol v0.0.0-20231018134914-c0109e052230 h1:TT+IIRYVmpUGfZhzEgpqf0mQFhTKaeMbV+0RKleguf4= -github.com/grafana/opentelemetry-collector/otelcol v0.0.0-20231018134914-c0109e052230/go.mod h1:BSCvlVMyWLH3704kfGUv8KWRRN2nvNJBOBcfKttXTDM= -github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e052230 h1:f054R0C2NPdYXRl6kV+tGSI69Y4Vl/vduo2m/iIuSFE= -github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e052230/go.mod h1:kBdpzrqR2wJkOdg50yzp4dv+2XBMyeqTgF4lCx0hSpQ= +github.com/grafana/opentelemetry-collector/otelcol v0.0.0-20240321103955-8919a1c85cbe h1:XffwtyK11B/undScvvYBi/LSWG7ob43lzkdhxmxZkJw= +github.com/grafana/opentelemetry-collector/otelcol v0.0.0-20240321103955-8919a1c85cbe/go.mod h1:Xo58hEmoZFLyOIs9Wk400ME9gEFV+ttxCGcls6NxbhI= +github.com/grafana/opentelemetry-collector/service v0.0.0-20240321103955-8919a1c85cbe h1:LEmmaAnTjtp7pWCsnc8iMfuHIHzDbYIiCXnxpMTOLms= +github.com/grafana/opentelemetry-collector/service v0.0.0-20240321103955-8919a1c85cbe/go.mod h1:9El7PPhnV+2xPXLlyileLaUa5mOE+vw6sswmcZBaUlc= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520 h1:HnFWqxhoSF3WC7sKAdMZ+SRXvHLVZlZ3sbQjuUlTqkw= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520/go.mod h1:+HPXgiOV0InDHcZ2jNijL1SOKvo0eEPege5fQA0+ICI= github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406 h1:LVIOYe5j92m10wluP5hgeHqSkOLnZzcPxhYCkdbLXCE= @@ -1092,8 +1086,8 @@ github.com/grafana/stackdriver_exporter v0.0.0-20240228143257-3a2c9acef5a2 h1:xB github.com/grafana/stackdriver_exporter v0.0.0-20240228143257-3a2c9acef5a2/go.mod h1:Ce7MjYSAUzZZeFb5jBNqSUUZ45w5IMdnNEKfz3jJRos= github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 h1:bjh0PVYSVVFxzINqPFYJmAmJNrWPgnVjuSdYJGHmtFU= github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0/go.mod h1:7t5XR+2IA8P2qggOAHTj/GCZfoLBle3OvNSYh1VkRBU= -github.com/grafana/vmware_exporter v0.0.4-beta h1:Tb8Edm/wDYh0Lvhm38HLNTlkflUrlPGB+jD+/hW4xHI= -github.com/grafana/vmware_exporter v0.0.4-beta/go.mod h1:+SsUoWeCJ3nDm1gkw8xqBp4NNSUIrGVoEbnwJeeoIVU= +github.com/grafana/vmware_exporter v0.0.5-beta h1:2JCqzIWJzns8FN78wPsueC9rT3e3kZo2OUoL5kGMjdM= +github.com/grafana/vmware_exporter v0.0.5-beta/go.mod h1:1CecUZII0zVsVcHtNfNeTTcxK7EksqAsAn/TCCB0Mh4= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445 h1:FlKQKUYPZ5yDCN248M3R7x8yu2E3yEZ0H7aLomE4EoE= github.com/grobie/gomemcache v0.0.0-20230213081705-239240bbc445/go.mod h1:L69/dBlPQlWkcnU76WgcppK5e4rrxzQdi6LhLnK/ytA= @@ -1103,8 +1097,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vb github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= @@ -1118,14 +1112,14 @@ github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoP github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/api v1.27.0 h1:gmJ6DPKQog1426xsdmgk5iqDyoRiNc+ipBdJOqKQFjc= +github.com/hashicorp/consul/api v1.27.0/go.mod h1:JkekNRSou9lANFdt+4IKx3Za7XY0JzzpQjEb4Ivo1c8= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= -github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= +github.com/hashicorp/consul/sdk v0.15.1 h1:kKIGxc7CZtflcF5DLfHeq7rOQmRq3vk7kwISN9bif8Q= +github.com/hashicorp/consul/sdk v0.15.1/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -1149,8 +1143,8 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -1210,8 +1204,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= -github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v0.0.0-20180906183839-65a6292f0157/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -1363,8 +1357,8 @@ github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jaegertracing/jaeger v1.50.0 h1:qsOcPeB3nAc3h8tx+gnZ3JODAZfqbYmQr45jPEwBd2w= -github.com/jaegertracing/jaeger v1.50.0/go.mod h1:MVGvxf4+Pcn31gz9RnLo0097w3khKFwJIprIZHOt89s= +github.com/jaegertracing/jaeger v1.54.0 h1:BfQiFxrE/2Fw+qU24qjSuUGsgJQLwKHi1TXBy6J3qKo= +github.com/jaegertracing/jaeger v1.54.0/go.mod h1:wNmtyrAJ/sJAgOvC9BltyKErJY8glTHCvWLTsvhaqkY= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= @@ -1452,20 +1446,19 @@ github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.0.1 h1:1dYGITt1I23x8cfx8ZnldtezdyaZtfAuRtIFOiRzK7g= -github.com/knadh/koanf/v2 v2.0.1/go.mod h1:ZeiIlIDXTE7w1lMT6UVcNiRAS2/rCeLn/GdLNvY1Dus= +github.com/knadh/koanf/v2 v2.1.0 h1:eh4QmHHBuU8BybfIJ8mB8K8gsGCD/AUQTdwGq/GzId8= +github.com/knadh/koanf/v2 v2.1.0/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -1613,16 +1606,16 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mna/redisc v1.3.2 h1:sc9C+nj6qmrTFnsXb70xkjAHpXKtjjBuE6v2UcQV0ZE= github.com/mna/redisc v1.3.2/go.mod h1:CplIoaSTDi5h9icnj4FLbRgHoNKCHDNJDVRztWDGeSQ= -github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= -github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= @@ -1645,8 +1638,8 @@ github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mostynb/go-grpc-compression v1.2.1 h1:16tdYxBZSD8p9AUmvw4F7Nyc2T4/eE7XsIXrgxSEcJI= -github.com/mostynb/go-grpc-compression v1.2.1/go.mod h1:oidYvYyefMmhcuvU8fLJ8FfZyTyVzJ6SkmD5fIKgRe8= +github.com/mostynb/go-grpc-compression v1.2.2 h1:XaDbnRvt2+1vgr0b/l0qh4mJAfIxE0bKXtz2Znl3GGI= +github.com/mostynb/go-grpc-compression v1.2.2/go.mod h1:GOCr2KBxXcblCuczg3YdLQlcin1/NfyDA348ckuCH6w= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mrunalp/fileutils v0.5.1 h1:F+S7ZlNKnrwHfSwdlgNSkKo67ReVf8o9fel6C3dkm/Q= github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= @@ -1713,8 +1706,8 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= +github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1723,110 +1716,112 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.87.0 h1:ArBXfq0KQ89DV9th/MU/snH205Uh6jFCnIiwd/wKp+s= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.87.0/go.mod h1:hN1ufLEIhE10FeG7L/yKMXMr9B0hcyrvqiZ3vR/qq/c= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.87.0 h1:RUlUN9Rtd8pVq3tI6pbmpiCTGiAzDCJcwT4EMGnOeBg= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.87.0/go.mod h1:vKVg+UCn1agpq5A3EaFol1bbkxAlm3CCQQHlTb9+sZo= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.87.0 h1:JADmKOCiLljFDjic3Fb5yyPsvNj7IBJyZU6QrnOUZH0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.87.0/go.mod h1:WKjmyVi+Xhhvuvj2J+1Z0fXvY38MKRbREe2aR5UPOIw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.87.0 h1:+apdZt5DPPIxjBrayu1muKbvUK3zqsfgb+3fMh6Hnyo= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.87.0/go.mod h1:JXVmcuySy3xyo3JjoU+CrNWy/C12Fw6JB1HWXf26HwQ= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.87.0 h1:5LmBAlLycadwA3AHI2rqPuDjx1HFb/PSn3946Eyp3Jw= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.87.0/go.mod h1:lgOFfu/GLf6LbvZwlumkUv3iBLqRdtBentZKcrrqb3Y= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.87.0 h1:52+RVfmzj+JePVJuD07gfppdzF9fsKASIRGzTC05QIg= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.87.0/go.mod h1:VQ7QIry+qNpzGr2/1HrS/IzV9JXoWnqLIsgSi3qPvhM= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension v0.87.0 h1:p4pPpRv9zOT/kOQT8GJPhl2drySkTDIpLEhLjXjo5yc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension v0.87.0/go.mod h1:vbU0PUtyhWa3iwIJn7blygKYVnt2GzEAA66zlPbLz90= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension v0.87.0 h1:UVFqhd0y7IGSabrHUiDX4efC7qW71tq/FyDFPcBFaJE= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension v0.87.0/go.mod h1:xPWViWgSZhXRGGeByF+awZSb0CwnTHyt9RGXYZ7AwPg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension v0.87.0 h1:YMVikePSZOjuB6mdXUQdxiSssexzj+8yD2DzZHEiy4g= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension v0.87.0/go.mod h1:HVqsfJuqdPN6vz+x/uHr6sg9MPj0DeWng6Ja4mfdNpk= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.87.0 h1:le3sa1Vkn2IxRqahljtWf47rTPkaA05BxPGGoYY96Zw= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.87.0/go.mod h1:Ik+BslrriohE2WlcxZDvJ9KkYji/L4FaXDwaLm2ADAk= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.87.0 h1:HeNHs47RQP8nrcujyJY8DI14H1GwN3luXg871LaFfCA= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.87.0/go.mod h1:DRpgdIDMa+CFE96SoEPwigGBuZbwSNWotTgkJlrZMVc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0 h1:Z4o71/rS7mmpJ/9uzta3/nTaT+vKt0CU35o4inDLA9Y= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0/go.mod h1:clScLUe8m0CTZMcV0scqq+fFFvw5Q1dASkYlYsrRptM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0 h1:JJsQ6iMFIDb7W6uLh6LQ5k4XOgWolr7ugVBoeV4l7hQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0/go.mod h1:rDdtaUrMV6TJHqssyiYSfsLfFN1pIg4JOTDaE9AUapQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0 h1:W4Ty2pSyge/qNAOILO6HqyKrAcgALs0bn5CmpGZJXVo= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0/go.mod h1:3EFmVoLcdM8Adj75N8TGJ4txDB29oW1chTLCFiL/wxs= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.87.0 h1:ekT4/I9J484j4yR/0VHj5AGtgv8KmNd+e4oXxNJNR/o= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.87.0/go.mod h1:waI3VDdKYW7es1LmLY35SHJYNwUX+JJN719wXmP3WAc= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.87.0 h1:jtCKA0Mfc5RgZzPGuxXioW8oCSmJsayqXSCTlaA67xM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.87.0/go.mod h1:vt3N5XEF1QigYUz4NagTDL3/Gd8bivCLcYFoi91Luy8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.87.0 h1:+T/u+x1tO7FShn0DBLB9mqAE2MnXoAZ+u70q5wSbt9E= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.87.0/go.mod h1:/K1o7YNhLJ0WIoI0+pQFbhf/lqCX4FJC33J7uY0ey/U= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.87.0 h1:om2WAvLFABdCdJWjE6GdK9xEn8me0z9UN3gz091SiG4= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.87.0/go.mod h1:ntSfqIeoGj0O+pXXyqDG9iTAw/PQg2JsO26EJ1GAKto= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0 h1:kDamu7uZHRmeJWqaJg42LSgprRGokmQ4t8ACslzS0GU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0/go.mod h1:EAw9aBkrDIDWQvRBdJiDkaJmCqcgZpiZzYZEvOjg4uI= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0 h1:8pVElJ4AMIiJxS+sxnK9CX73RED7iv/FYbqkvvX01ig= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0/go.mod h1:zRQU4eN6rNXeVKD8g2p2Czb88o/Hd2BkVdar5nCk0+k= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.87.0 h1:sx1ye7Y2rJ2qi11i2ih9T7BocxaV0uaBBf7B8ijCYpU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.87.0/go.mod h1:AobBiNPFNHUm0MJFTieajasG/xNMjMYI7BGGTSKh0xg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.87.0 h1:sy75u6ZwBvRwv9RjEF65SqlkBsAeZFqF4+eFOLhIsJQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.87.0/go.mod h1:trWrnucNKCkBEYpe5IBo+RgGQtzwMT454sGkWsggmDM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.87.0 h1:jtdLqdRyt29EUd6H6p431/XmGHwS6BHB5s/EqCfEWoc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.87.0/go.mod h1:JUVS9CCWJyXSJn8cTmS2Dq3vC3jlhJozjg3SZdZ/4OY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0 h1:9RtkoPmUPRW1NrOawEiWsxOZ/dBlym5DzhLXjRpM9tM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0/go.mod h1:xvO0/6zTw6UBl7g4hZpvapfvANNSnj6sQcSnF6jqSSg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0 h1:zA50pvJziZjWQiN9MZIkT6Ii3hMSaCKa6jvs1vCYT5g= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0/go.mod h1:IjdtiiTTNlAkspcNyAjHysWAZs5U48alWGUodTkkxhI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.87.0 h1:JFPnEdsGaGhay69k7QJJuT7gq3XQn8fzYL1gm4Oqpj0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.87.0/go.mod h1:EWuhJl1M5r6lsFeQntHKxkjKzsmM2T4vLk8zWS5TL2A= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.87.0 h1:WRgrvgi/fHuOrqlXgILssE+Bujdd3rCoB3FWfd37g/s= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.87.0/go.mod h1:OvjvWY8zLsGEiYQRQIyLE4FhmOhJmveebbwVhBVxB54= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.87.0 h1:H0nkDuusL0FAMGy3ANaVjmpDfdQjmz2nJa4SzJ5s6Jw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.87.0/go.mod h1:U7TXNkK7vwi687WZYi7iZFmM1/0G06qsTIHqkTmsyjA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.87.0 h1:25XSKNdhAuuLvJdPJ0Je4vRGJmG/8iXOspzeMIr4zSE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.87.0/go.mod h1:hKArXrn+iYk888KKQThhdPEgPf2GMay2CBe7NnTnmTs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.87.0 h1:1eceF0bEseOnk7K6U5OdrEcFKvxEdjnqHTzwNAw2pxA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.87.0/go.mod h1:shG9MpBWsBTzns2MYKRFiRymJXhdNb3snGyjgTW5mDg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.87.0 h1:uQDcjWlVodE6nYzsRI5LPxZ0X0Ki3fYLJJ3SFK8+MgM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.87.0/go.mod h1:xeUhbksYHZ6PkkKidaK95zztJOQcemwxdS+SHracC3Y= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.87.0 h1:RljU9Xodt7Ptc0enTRuTwUotGi2BuiWBqCUVQwT1otY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.87.0/go.mod h1:ybZnD0ldx1tEm6xgJ5wP5tK2x6AY8PNpTonCpOBVI6Y= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.87.0 h1:9MVdMcdtc+Gl0DAaeZ+PdJzskIg1K8FKuYql4h6pQC0= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.87.0/go.mod h1:NLScciQgJO4tKQ7vXqiUkzjk6O3bo2aMVkMcmYsSDQY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.87.0 h1:EJHxvRiZbgq25s6U+4iYSv4D4GAonfQ6hiNFxhll634= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.87.0/go.mod h1:ZSLBv4EAicncp1IfpVweKyTZWWR4Yb0deRlsDiw1eI0= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.87.0 h1:mm9DXnoWNHckL0MnYdmCNOU5DOomwdGeUl9t51bQ/Ac= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.87.0/go.mod h1:g6H0fB9TW03Lb8M+H0BXtgQp7gPncIwf3Fk73xOs9EA= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0 h1:QJKdtNcsxBhG2ZwSzYRVI0oxUqBJJvhfWf0OnjHU3jY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0/go.mod h1:skMmFcl+gxyiOQXvwHc0IKpC73iyQ7zl9r1aRNmPMwI= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0 h1:gEv7UNu4K5ptvKIpWQmVS+0XMrIzqZWczcjyhLnsx9M= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0/go.mod h1:6Rnjwj4bZU7Ab+nLD1YqQlbdsnsKoOR/OzyI42+PyE8= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0 h1:BIGb6dfmaTlDE7KbiQUhnD9SvL5HanbJbWJrnzURfPY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0/go.mod h1:EnaQxXfCCWkSEfsQbGOvYbeJ/EuqvtMYTLTq8RN6TiY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.87.0 h1:4l/QetnprIMethZYfD2RK+MfMR83f6QycYb9bhJFItc= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.87.0/go.mod h1:MQlwEsqYNvM8oTaI6pHOG/NBv2wrpE3daRi8hr9caao= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.87.0 h1:ElQCXm3L6LxiqT4k/jHKCVA1GnbbMYVgd8+pYDubH8o= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.87.0/go.mod h1:e8r2y116jZ32ugu/x5pzIxb0UVGVFfSezt8v7ilXgCE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.87.0 h1:+eb5MvKfh3gPss00kvgPTS7SDLJND6zJEcZoQlxOiIE= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.87.0/go.mod h1:iUrecf5kSV8pGF7OaM/brFOJs4OMEhogclBncGT5QtI= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.87.0 h1:1TK0+QULqgwwvE8JJxpQlugRdUw6knt0vYMkI65Jac0= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.87.0/go.mod h1:UYmAgkCKmtMyt40ffRherZJcU3zeCJjq4nZ7hVxApT4= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.87.0 h1:8LN1Ky+Q6L6dmzm3k7Bec4fmlYs1OuJ7vaMuVnVIBLo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.87.0/go.mod h1:xb97OESLQFviQ0ikbUmneISHuRG91Uf+97EymDW4yus= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.87.0 h1:+rsOWSP9SDxbnwmRmQcsdZZJJeHvuKjPFN10jQXgsQI= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.87.0/go.mod h1:2ALknylZKXTYYM7TnHAJKBxy3Z85IhowQiWCcEdfk5Y= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.87.0 h1:AgW1CDlKYfm5jwk/k6uibKqhxshUOMXm6P5ze2AJmMc= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.87.0/go.mod h1:LIGa2oqb+geqkmWvteeDjzulK1PfDYCY8Jp6pI0ey2A= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.87.0 h1:fwmow4M0aJUsmY9DGUMe6yykd0TvgB6PpLS+Z590R5s= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.87.0/go.mod h1:ZLfpGguza42G+SwGEZ5/plr1wa3D7GA7I6KJyARgHPA= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.87.0 h1:TI5m4trLA3cVMQSRyxU14MzCzHXDk56+sc+9TY01uw0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.87.0/go.mod h1:IA/xIUE0Fl8lc7hkEOkVyYcTF7sE7AGawI9s8ipqRKc= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.87.0 h1:0DeNqM3fhNYPsfmPbaZ1PyBJ2vtOSFpMGadRKvryXfs= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.87.0/go.mod h1:tSxkxxWCcGh/vh1mHflhQTlwulkwWM1yyEABa6DXSmY= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.96.0 h1:hfpAlT/CWcPzb4HfFAE+u+uay3d3QUBqXOGhwBU0ihY= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector v0.96.0/go.mod h1:/NA9T4O1WOlkUwvTXBz5wmuddpC0cc2cDLEBH5ck9eM= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.96.0 h1:KAlAzuzvYq0xZWRR+N2qUJhE7/pvmNFYlcN5yW8Km60= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.96.0/go.mod h1:KcZjtSdoelUWRwGtVaiEX16Hw8mFH+JnYrN+r4Ox550= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.96.0 h1:2FnXGN9xxIcIz7f4hdX+OgsGowWC1D35oNtX5ErnLBc= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.96.0/go.mod h1:VPyawEuVpqKg3oemeDnYwDfBbh9gjGbrVVXl4OeHK60= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.96.0 h1:3+Ca2P/XLCSSc3299+4fjQf2sPMepiewR+KSBzzIGvg= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter v0.96.0/go.mod h1:uZDY3FgHszo41nC5zXfqwISt3YyDoQ5BG7FuvqInWKM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.96.0 h1:gsaLBki5KNaZnXp23kGXv0TxeZWZcgw79PiqVF1nweM= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.96.0/go.mod h1:2ywOhG08VvgjT8KnwIO/ZjIHpfWDlwZjMxVh0tBMo8A= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.96.0 h1:Fm1uUP4l6Tq3nmVGwmCjHYSR8Cq6XGufQ9/phvSry+k= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.96.0/go.mod h1:KEjojept0S9JcfkpBmOpI1tqriXDhmPOqUve7DTzsMk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension v0.96.0 h1:ALIpxeAXHp4K11a4QQsEn5P9quEFevlc1WdsX+lFHjA= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/basicauthextension v0.96.0/go.mod h1:6M0tTAzffURxRHLEEL5JFn0XVIPkoixE1cBzDbXkkFI= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension v0.96.0 h1:BM3CZHNvO8p132nrpgutwego0073LCIYOl/u6BbpGdg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/bearertokenauthextension v0.96.0/go.mod h1:3BtBeb4OAKxxzGi9z/N6QQfByw+c2yQgZdvew7KhkOI= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension v0.96.0 h1:iQ2WhoskCdCODq5OmtpI4fA9V68HWXM1rR1vJwQRZLU= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/headerssetterextension v0.96.0/go.mod h1:DEoDcA7H8/PW2vG7ET67tScrHLleUuBV7Gy8HuoTtEA= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.96.0 h1:ny3RUx++7VuDrtGPbSoTPx3yNgoLIHkPMWGljVY0ViM= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.96.0/go.mod h1:0Un+n9jSemPvzKv/gv81IBDlGHsAobM0NSCpwe1ElAc= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.96.0 h1:CV5DmP9XGujLLaqnWi2rEoUIBh3YCx9Jra2Eo2p4CXU= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.96.0/go.mod h1:rjNN7v6/a84r6Eb+pKceqYDAmPOVpJaA/29agiieKAI= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.96.0 h1:YnPi0BZwqrZeHWb+DJpZ23lMThTZPiCTYsyUwolkTiM= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.96.0/go.mod h1:Ynut4t5ljCzNsyVp+5QGU2HI5/oQjO9DXaVOE9faFFc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.96.0 h1:GI8hvKwMD4YE+CUeDT+v+Fce6lD+ppaq6MQ08mVUGh8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.96.0/go.mod h1:Mfb4Plf9pyVZGc+gxB1k95Lx1XgKu8UwBPnGvF3KrdA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.96.0 h1:uG8YgKM932zjruNwAicIKrGpW09bt+Ckcw5Zi4gn1qU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.96.0/go.mod h1:/NVf7ci5xbUiSwqttXqqdsJMjH/C38cHhhBLnXrECA0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.96.0 h1:tyNJ1qYXm1jMJV2NbskYosfo7xIyRP7YvbdcvldXAeA= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.96.0/go.mod h1:f3d2OcVhcMGgcMkyf614jPfAD8eE+zlJ6Pd5P43qWyI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.96.0 h1:OedRxe8lJl1ltht3fWYIVqcLC1hGAVLZgwePuPeo71M= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.96.0/go.mod h1:eyzWPLQomfVZZZlRaKyFIupGTOjrpcUKVHz4rH4kGhg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.96.0 h1:XIMJQMKDIozb6sLy/5TOphwWHx8wy/uGNajMC7BW2aU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.96.0/go.mod h1:bjU0eqQSHk5HN9I+HLFNB4plj+5LDtHOVjYqbFw0r6I= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.96.0 h1:k6vK9RZETkSh3Oa8SmphnKlew8yMGWftofckS81vmQs= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.96.0/go.mod h1:2WpWMiJCQ0X11IcOOazSQleCrNSHNU0pyJabXl93Oh4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.96.0 h1:lsOJsYuELFPTm3cm2b11DwVD18H9Mz3fWVNaRPdUxPg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.96.0/go.mod h1:iZi3xMSwNdGs6eAQrq2C3eWm3+PE40tQV84fNw42ERI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.96.0 h1:iRAyOfiGlU/gTLbL6qVsxuJMphyiYonb0Y24iNUWvFU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.96.0/go.mod h1:jRIQFBjkd5cO+IjLyj/3H8aN59zIeo3Ig8X8yZ5OPBw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.96.0 h1:i4PpId1NbxqVrtTb4VLYyIE57ZhTlgEEmUbTUyULlDk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.96.0/go.mod h1:Fa/tTxfSAMyDGcbon9CqBuMe0083BY+B4/4PBhop2Ko= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.96.0 h1:wNeHYUxCxUHmT6YmUBFM8V1UyR80yV+bUIgWwNydrOU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.96.0/go.mod h1:tV2btxxNu9EhivKU2Meq0HagHHrSkpCiwrQTcwRrsss= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.96.0 h1:6xhEYeFRjui33hCCP8tD9B2R2VCGNdNrzi+pivp0osk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.96.0/go.mod h1:7CDMyrWBi/iST+UVvheDNjZX8VWyboTJqkXHz7gpLDI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.96.0 h1:nVptseHpC27Zq7Fq9yF7WOgNHrCntwQ9syRI217C3sk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.96.0/go.mod h1:GxkZncXE27WmKiuI2TgR9+P/btT8sSPvY3zezKa5JEs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.96.0 h1:iynLFjnG869r53AIhiavbEVMZoPqCba7Mijm+9MRdOo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.96.0/go.mod h1:FuTdjIZj7Un07dcbJs06IF1DJiYfpQkc4oklhNWE8fg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.96.0 h1:nzAR1IjPcbgLNFmJElLPyRlLOfijAkQcWo4L9CXixu4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.96.0/go.mod h1:Zn0A4V5t3uNr2FYsgnzT4t0OBqdOk8jcPjgHgy3jHG0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.96.0 h1:MvQZTcguOaRNPoj7aGOF+0c5eG7/n5G3ktEtTKA9cuE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.96.0/go.mod h1:AnyAMKQjT3kLArnrD0Gm5qcUK8o77fFKS4Id3MU6qGI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.96.0 h1:ZKH4+0dAqGW0Yc/W3NeP4zwcWouUoLIPgjzP0Dq9qew= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.96.0/go.mod h1:6jYdZIsLvWzVyJ7gvJ3dpTAw3WgSsSitc3+M0PzxoUM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.96.0 h1:nRk4vyYsMkFht1Mo3n1d2X7WxLex0LzIWtQhE5/c2P8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.96.0/go.mod h1:XTq2FQlb1ao2NHoZvkBC+LPUdMO4DG/4FJ7LqAyE8YU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.96.0 h1:Bh0rVndvEV9FNPi7jOB9HCGwLEiC86NyUfbyOWYhmQE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.96.0/go.mod h1:XCAClBjwnMbqNoq47dUmJVmCytAgAL4OYXa205kb5dQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.96.0 h1:LZ2wG6AzwfOFc1bFPo1G8A9EbVcYb7pJOi9vpvoEiYE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.96.0/go.mod h1:dT01MSqUkDuSiXBpbk8IpeFe5FEYPQWKnuckJxciyBc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.96.0 h1:t2SRxwm1Fx+Y+GDzRyESKCWaSQ4CGT/Cf6FaHTStnaA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.96.0/go.mod h1:Ovei5kLnN5gCKdNGu+F6Hxz2DNPHzEGG31IjLMjUIZ4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.96.0 h1:ouHtTPQw3Hm53i/mwZmjG2yUWlUnGq2Hv2QGfb+aM+M= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.96.0/go.mod h1:xOy/cudXXCv1rgxWakUnJote6+yaeQCg8CsZxkGYu28= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.96.0 h1:rIU8xMvn8sW22/Qg1BRmUMWaVhKqfNA6XkdWaXM0v2A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.96.0/go.mod h1:zhqxjkw5cM9reIfN7prd4RObR12jmze/bUWQU4auDB4= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.96.0 h1:Xr4J7mX8QZTlsruw+9uAyZYsef5l2gVxNAqMcmjQ43c= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.96.0/go.mod h1:5u0tb6il3OC+ba7aV8gLx6NaN0A3NrR82Mxnux7JOew= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.96.0 h1:v50yY2krDn1Wf3GEj+RFdUxVqWBjPep0VocHI1WfST0= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.96.0/go.mod h1:IBH5fviypbWAiYT52+A8u1NbUe0pmVLZZ7/B5n7LZgg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.96.0 h1:gYk6w7/H9PDdjO0Jp7JZWSXW9owReBldRsAo3jCDeds= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.96.0/go.mod h1:tQxlJSq1zgSjnHdQVnTfn/+lNo8REx0vebUf3LZzqxc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.96.0 h1:jCX3fN6i7a+bOL8+/Qk8FE5x+Ps2fVgR9aQc0MPcZ8w= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.96.0/go.mod h1:fX0WCKzhLEF5I2CRMHzxdTAKXsveyAlorMzUBGMKptk= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.96.0 h1:FPkPbJcV2mxIppHHkyJY4hAFAtxs2PwlmO+KeflN+Ck= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.96.0/go.mod h1:byeQDum5hz1W6ko+rZjnQqcGWDEBgSw7o4GpgLxHmk4= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.95.0 h1:zL6QvBvBvP4SqC/fBwH73wYIGzrs6p/pI8oIB8MsjLk= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.95.0/go.mod h1:hlgS5QXAk0Yq07Hqho+YzfHnmVnNapbuYiWIOg8bo8k= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.96.0 h1:xk76zUw+de12cTub55C8dFFX7LrHit6+gn7DFSXfbUA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.96.0/go.mod h1:zo7zFs2ouCcWZFTm8e+rB7cKoBPMczQc4UxINpCGVtk= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.96.0 h1:I7pyP5UMHf+Xc7WnO/PN4ff15IbTmrNlcJn/LEngzWU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.96.0/go.mod h1:dMQQJpxvUVsvii1WU/NaUzWmUf4H63ycRC1YG6RZA+M= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.96.0 h1:kqxZ0V2h6kv+AU4Dl2vp57/ayycJy9w3krWe9vBt/IA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.96.0/go.mod h1:nSzmYMNiaw/CtKrmfG93D2Wpln0ZTvEPZ6oW/UECHuM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.96.0 h1:5rdHJH2SKp9+g3ypk7wlRfMq1a7xRKqwvTffZHIOVgQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.96.0/go.mod h1:yk9+s0wSHn8WKzvBSa63puaPhCrjr+rmkfJ4/4NVyeQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.96.0 h1:V3DvS2g8qPp2Pr0i39iS37iByUlk7JvE6iEA6Ia1F58= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.96.0/go.mod h1:SpDMTfNxJhLoh90tzVbFVR6jBznomtSSfv1+mKR1s9I= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.96.0 h1:gK3nBuj0qhtt8HT4MuiW60KfNcnAA1hjdqnwdIbxHaU= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.96.0/go.mod h1:xc2JC4VmYfGsjaH834h0O+nCTHcddAGZkt5fJxQF7LE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.96.0 h1:SK1GpgAte9WhTSeY6NiO6vHB+BhFF7akPlK7fyMO+ps= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.96.0/go.mod h1:yrd0L+k2JKVpyVXObHpHZXUlxgWX/RlGHz5RLxEUN2Q= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.96.0 h1:C7riRI0ehDu4k6lf/ei8OObT3jGJJ5PbJ7sRO/QSMMQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/vcenterreceiver v0.96.0/go.mod h1:OvyUlG4f37oXFVqOBXi0+KdoQjmjjPuHkASu5DTFjXw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.96.0 h1:y9QNvhQ0XjJOJid4jNlEliJQI4+AFdEaN6weB9jMWaY= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.96.0/go.mod h1:9xJFaECGKFH3U9ToFeE9f9/L5zw0fSY3jJCgHzFOBR0= github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= -github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= @@ -1882,8 +1877,8 @@ github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTK github.com/pborman/getopt v0.0.0-20180811024354-2b5b3bfb099b/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/percona/exporter_shared v0.7.4-0.20211108113423-8555cdbac68b h1:tPnodYuNto6iPkeBCKJKw2HLeEYCiRmN2cpcMzTs8W4= github.com/percona/exporter_shared v0.7.4-0.20211108113423-8555cdbac68b/go.mod h1:bweWrCdYX+iAONTNUNIIkXGDjGg8dbFL0VBxuUv0wus= github.com/percona/mongodb_exporter v0.39.1-0.20230706092307-28432707eb65 h1:SOB6SH1o//vt7uOWA47Nvahd0lVOLH1vjrBzEECuB+o= @@ -1898,8 +1893,8 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= -github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= @@ -1910,7 +1905,6 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -1952,10 +1946,8 @@ github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66Id github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1963,8 +1955,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.0.0-20180326160409-38c53a9f4bfc/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1977,10 +1969,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= -github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/consul_exporter v0.8.0 h1:2z3drFic65WFoHaJRKkmnJRRlBLmmxVqT8L9LO2yxAo= @@ -2004,18 +1994,18 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/snmp_exporter v0.24.1 h1:AihTbJHurMo8bjtjJde8U+4gMEvpvYvT21Xbd4SzJgY= github.com/prometheus/snmp_exporter v0.24.1/go.mod h1:j6uIGkdR0DXvKn7HJtSkeDj//UY0sWmdd6XhvdBjln0= -github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0= github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/relvacode/iso8601 v1.4.0 h1:GsInVSEJfkYuirYFxa80nMLbH2aydgZpIf52gYZXUJs= +github.com/relvacode/iso8601 v1.4.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= github.com/remeh/sizedwaitgroup v1.0.0 h1:VNGGFwNo/R5+MJBf6yrsr110p0m4/OX4S3DCy7Kyl5E= github.com/remeh/sizedwaitgroup v1.0.0/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -2036,8 +2026,8 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -2053,6 +2043,10 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -2076,8 +2070,8 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= -github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= @@ -2098,8 +2092,8 @@ github.com/siebenmann/go-kstat v0.0.0-20210513183136-173c9b0a9973/go.mod h1:G81a github.com/sijms/go-ora/v2 v2.7.6 h1:QyR1CKFxG+VVk2+LdHoHF4NxDSvcQ3deBXtZCrahSq4= github.com/sijms/go-ora/v2 v2.7.6/go.mod h1:EHxlY6x7y9HAsdfumurRfTd+v8NrEOTR3Xl4FWlH6xk= github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k= -github.com/simonpasquier/klog-gokit/v3 v3.3.0 h1:HMzH999kO5gEgJTaWWO+xjncW5oycspcsBnjn9b853Q= -github.com/simonpasquier/klog-gokit/v3 v3.3.0/go.mod h1:uSbnWC3T7kt1dQyY9sjv0Ao1SehMAJdVnUNSKhjaDsg= +github.com/simonpasquier/klog-gokit/v3 v3.4.0 h1:2eD2INbzUHuGNynPP86BCB8H6Lwfp6wlkOcuyTr3VWM= +github.com/simonpasquier/klog-gokit/v3 v3.4.0/go.mod h1:RREVB5Cc6yYHsweRfhUyM1ZP+Odb8ehxLfY8jaiqvjg= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -2131,28 +2125,28 @@ github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5Q github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -2163,8 +2157,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -2177,13 +2172,13 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807 h1:LUsDduamlucuNnWcaTbXQ6aLILFcLXADpOzeEH3U+OI= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= @@ -2191,12 +2186,12 @@ github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955u github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6uN61EDVkuLyI2+qGHhI= github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= -github.com/testcontainers/testcontainers-go v0.25.0 h1:erH6cQjsaJrH+rJDU9qIf89KFdhK0Bft0aEZHlYC3Vs= -github.com/testcontainers/testcontainers-go v0.25.0/go.mod h1:4sC9SiJyzD1XFi59q8umTQYWxnkweEc5OjVtTUlJzqQ= +github.com/testcontainers/testcontainers-go v0.27.0 h1:IeIrJN4twonTDuMuBNQdKZ+K97yd7VrmNGu+lDpYcDk= +github.com/testcontainers/testcontainers-go v0.27.0/go.mod h1:+HgYZcd17GshBUZv9b+jKFJ198heWPQq3KQIp2+N+7U= github.com/testcontainers/testcontainers-go/modules/k3s v0.0.0-20230615142642-c175df34bd1d h1:KyYCHo9iBoQYw5AzcozD/77uNbFlRjTmMTA7QjSxHOQ= github.com/testcontainers/testcontainers-go/modules/k3s v0.0.0-20230615142642-c175df34bd1d/go.mod h1:Pa91ahCbzRB6d9FBi6UAjurTEm7WmyBVeuklLkwAKKs= -github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU= -github.com/tg123/go-htpasswd v1.2.1/go.mod h1:erHp1B86KXdwQf1X5ZrLb7erXZnWueEQezb2dql4q58= +github.com/tg123/go-htpasswd v1.2.2 h1:tmNccDsQ+wYsoRfiONzIhDm5OkVHQzN3w4FOBAlN6BY= +github.com/tg123/go-htpasswd v1.2.2/go.mod h1:FcIrK0J+6zptgVwK1JDlqyajW/1B4PtuJ/FLWl7nx8A= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -2244,8 +2239,8 @@ github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1 github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/vmware/govmomi v0.32.0 h1:Rsdi/HAX5Ebf9Byp/FvBir4sfM7yP5DBUeRlbC6vLBo= -github.com/vmware/govmomi v0.32.0/go.mod h1:JA63Pg0SgQcSjk+LuPzjh3rJdcWBo/ZNCIwbb1qf2/0= +github.com/vmware/govmomi v0.36.1 h1:+E/nlfteQ8JvC0xhuKAfpnMsuIeGeGj7rJwqENUcWm8= +github.com/vmware/govmomi v0.36.1/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= @@ -2308,22 +2303,22 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= -go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= -go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= -go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= @@ -2340,107 +2335,121 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.87.0 h1:160HewHp+/wzr62BzWjQgIvdTtzpaYTlCnGVb8DYnM0= -go.opentelemetry.io/collector v0.87.0/go.mod h1:VsAXXIK0D1na+Ysoy1/GIx0GgkH8vQqA6zwosddFz7A= -go.opentelemetry.io/collector/component v0.87.0 h1:Q+lwM5WAa2x4a5lgyaF6SjFBpIij5gyjsoiv9KFG36A= -go.opentelemetry.io/collector/component v0.87.0/go.mod h1:LsfDQRkwJRHOSHNnM1/pdi/6EQNj41WpIxpZRqSdI0E= -go.opentelemetry.io/collector/config/configauth v0.87.0 h1:FufZLHvJ+VcAM2xi404TpuYnpO1Rmeq7XtHleQLavrs= -go.opentelemetry.io/collector/config/configauth v0.87.0/go.mod h1:xT8mIo1b57j0znSOssEFaJtE3rGw/kTZZucP5lEw6OU= -go.opentelemetry.io/collector/config/configcompression v0.87.0 h1:hWRT47RJbjbowDGQMXQO/dt/pzyYjMcf+rroW8b8fws= -go.opentelemetry.io/collector/config/configcompression v0.87.0/go.mod h1:LaavoxZsro5lL7qh1g9DMifG0qixWPEecW18Qr8bpag= -go.opentelemetry.io/collector/config/configgrpc v0.87.0 h1:5fH+ja4hLGoYww1RG+bpJVVAzdlAvrGiQjy7tEo3YJ0= -go.opentelemetry.io/collector/config/configgrpc v0.87.0/go.mod h1:0Iv6apeYihw6MKsC6p/rYLxLfO/9ZRmZ1GL0d4LxnII= -go.opentelemetry.io/collector/config/confighttp v0.87.0 h1:FOC4ArxbvJRiwABXsv/bSrRlD3m9nAEAACEYXmpNC+g= -go.opentelemetry.io/collector/config/confighttp v0.87.0/go.mod h1:Vt4DECSuhncd/bTKU3pB6MUjHwBKfPqiIkFg5fHJHIE= -go.opentelemetry.io/collector/config/confignet v0.87.0 h1:ULV44732QN0wTCtSIdYG04I+6wjZWzOCme/J4pqKYWg= -go.opentelemetry.io/collector/config/confignet v0.87.0/go.mod h1:cpO8JYWGONaViOygKVw+Hd2UoBcn2cUiyi0WWeFTwJY= -go.opentelemetry.io/collector/config/configopaque v0.87.0 h1:+qqJG1oEzX4+/YNbgeaXW9YM0BPWSj5XCi5y2zZLhDY= -go.opentelemetry.io/collector/config/configopaque v0.87.0/go.mod h1:TPCHaU+QXiEV+JXbgyr6mSErTI9chwQyasDVMdJr3eY= -go.opentelemetry.io/collector/config/configtelemetry v0.87.0 h1:xUqayM9b41OvXkjU3p8RkUr8hUrCjfDUmO+oKhRNSwc= -go.opentelemetry.io/collector/config/configtelemetry v0.87.0/go.mod h1:+LAXM5WFMW/UbTlAuSs6L/W72WC+q8TBJt/6z39FPOU= -go.opentelemetry.io/collector/config/configtls v0.87.0 h1:EXa9Plr74+r9t2/59dTyjR3y53zqwigHN0dQsI8VGiQ= -go.opentelemetry.io/collector/config/configtls v0.87.0/go.mod h1:3UoeynehS/NNhg1Qbt3xQdgPyrkWnjBRLUG2Gw7BFFc= -go.opentelemetry.io/collector/config/internal v0.87.0 h1:wffyWbpanr2HFQaPPp5bG62KqJYlw5EdPxwR0iG+Lbo= -go.opentelemetry.io/collector/config/internal v0.87.0/go.mod h1:42VsQ/1kP2qnvzjNi+dfNP+KyCFRADejyrJ8m2GVL3M= -go.opentelemetry.io/collector/confmap v0.87.0 h1:LFnyDKIOMtlJm5EsdcFN2t0rcU/QLbS9QEs/awM2HOA= -go.opentelemetry.io/collector/confmap v0.87.0/go.mod h1:inqYRP70+bMrUwGGnuhcWyyufxyU3VQT6rl3/EX0f+g= -go.opentelemetry.io/collector/connector v0.87.0 h1:Y00shHpxBSxliE/liJex2JMdYpJxbakfCUbaXe9eVMU= -go.opentelemetry.io/collector/connector v0.87.0/go.mod h1:qk+c3IeAdRkpUjXLh3PqAnC8BkKuMF7EhA5GpGNu7AI= -go.opentelemetry.io/collector/consumer v0.87.0 h1:oR5XKZoVF/hwz0FnrYPaHcbbQazHifMsxpENMR7ivvo= -go.opentelemetry.io/collector/consumer v0.87.0/go.mod h1:lui5rg1byAT7QPbCY733StCDc/TPxS3hVNXKoVQ3LsI= -go.opentelemetry.io/collector/exporter v0.87.0 h1:DZ0QT2yp1qACmHMxs6W2ho5RPqdevCx9R/LFCxnxi9w= -go.opentelemetry.io/collector/exporter v0.87.0/go.mod h1:SGobdCR0xwQElJT2Sbofo7BprMlV8XeXdsNP9fsNaKY= -go.opentelemetry.io/collector/exporter/loggingexporter v0.87.0 h1:F/WkglGgCSHOFYjafYEAwD/qGpZ5HpawLMWu/Jcf0SE= -go.opentelemetry.io/collector/exporter/loggingexporter v0.87.0/go.mod h1:rYi0mKzgRH6xwsrYN9gb+WBccfoP1SpJ9U0xklrhV7g= -go.opentelemetry.io/collector/exporter/otlpexporter v0.87.0 h1:1seSC+OX1QnbpED0Kuo1DbWQSER+vy88yp4zxBubY4A= -go.opentelemetry.io/collector/exporter/otlpexporter v0.87.0/go.mod h1:Q4aS69GcAdcJLssnEd8ddt2rX97s/CkW/n1DdgdIaHQ= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.87.0 h1:EqexJl7mzozDw4KY9mzR14uij0QlB9zIg8CfNGJYNt0= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.87.0/go.mod h1:uwNO6qMa82a0EeokQx3YEiMl+R8HJulaDpUSS6T3pkg= -go.opentelemetry.io/collector/extension v0.87.0 h1:EMIaEequ5rjWzoid6vNImjQGVMfzbME+8JSa5XACYKs= -go.opentelemetry.io/collector/extension v0.87.0/go.mod h1:D3srNZC99QVTAdLNUVuqfmmgJge4sQHDrnt5XWscvxI= -go.opentelemetry.io/collector/extension/auth v0.87.0 h1:na1OumQSd5l+JvUiMr3oaiW6fuiDr7mEnydwQwmE+nk= -go.opentelemetry.io/collector/extension/auth v0.87.0/go.mod h1:b7T9VefuK1GzSp5z1yjbkAvTxpWvflUmYoawTcGGuOs= -go.opentelemetry.io/collector/extension/zpagesextension v0.87.0 h1:vSaCojdWMq34LDw2qR6To0PkSe4p+1BtP2Xr37PHH7w= -go.opentelemetry.io/collector/extension/zpagesextension v0.87.0/go.mod h1:aXIFi7aIGD2uQCJPCF8uRSPJEK0+jjiZfvOehNGA1ZU= -go.opentelemetry.io/collector/featuregate v1.0.0-rcv0016 h1:/6N9990tbjotvXgrXpV5AbaFiyxTdFEXDypGBHVDSQM= -go.opentelemetry.io/collector/featuregate v1.0.0-rcv0016/go.mod h1:fLmJMf1AoHttkF8p5oJAc4o5ZpHu8yO5XYJ7gbLCLzo= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= -go.opentelemetry.io/collector/processor v0.87.0 h1:aUGtRyeQk0WgQwp2rZBvJ1j+6+WJO8XMb1kjtanIWo8= -go.opentelemetry.io/collector/processor v0.87.0/go.mod h1:FHqpqdm/uyjjhNQxXJBhvQDIwjnP01EW9M6t0xVaRR4= -go.opentelemetry.io/collector/processor/batchprocessor v0.87.0 h1:/a2yjC8XMg1j/9hnpDbxTKbG/AyWac2xsQSx0PmFz1M= -go.opentelemetry.io/collector/processor/batchprocessor v0.87.0/go.mod h1:uY8Lu7zFtNZC39ylu8bphgqO0c3VIqVdegKxXlHo9Po= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.87.0 h1:pWR4fPyKOBo0YWi745pai6ae7jFdlRvRiEg7VmtpGNw= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.87.0/go.mod h1:Id8f4KVl5p5Uzn7RlfFwufdaiINQTKILcTCLQFsSH6c= -go.opentelemetry.io/collector/receiver v0.87.0 h1:4HpA5Rxb1jcMywCB8y5aNTXiqSt3n7oaFLfQbAkSaWM= -go.opentelemetry.io/collector/receiver v0.87.0/go.mod h1:uApnlS81KGGfQJrzbCdBZWsB5DQJgcPTsYlb9CFdE3s= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.87.0 h1:iXO30EKZwEP1TEuLlQjxVaeVeffDkdJqz9DuqjzME9c= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.87.0/go.mod h1:1IE82wJuyGW0z0BeJ3A0SoPxsPlqf9aefCycbtuxUO0= -go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= -go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/collector v0.96.0 h1:qXA3biNps8LPYYCTJwepGu58sW0XInmwnQbkkWZchIg= +go.opentelemetry.io/collector v0.96.0/go.mod h1:/i3zyRg23r7vloTLzKG/mRI2VkEt1Q4ARXbe3vKnAaE= +go.opentelemetry.io/collector/component v0.96.0 h1:O7F8F1YWOHNCqK5NH6vkGI6S1ObR4aPMFq3nHUxdWs0= +go.opentelemetry.io/collector/component v0.96.0/go.mod h1:HsiWaGHT+npm+c54iuUes1MpZJuGKZzS+ts2iaKt/Lo= +go.opentelemetry.io/collector/config/configauth v0.96.0 h1:nnRLtaPVafazVij60/Q6qL32WEWHOlPee5E+5D3pN4c= +go.opentelemetry.io/collector/config/configauth v0.96.0/go.mod h1:XABE3s1OiLzjhHv6R/eMOp8fYFweF6/Naa9NgDD+Ntg= +go.opentelemetry.io/collector/config/configcompression v0.96.0 h1:mbP0YbYTfbpovxcZE6JrBYmWg5G1Dozj7eOuLAdqcI4= +go.opentelemetry.io/collector/config/configcompression v0.96.0/go.mod h1:owL6s04LI1fPrNZvXiRm6o4B0jaxb3z/oFEcgrakFK4= +go.opentelemetry.io/collector/config/configgrpc v0.96.0 h1:FxCtsN8V4zYYq5wlSYAjBs3OEI1AbjfzmzSPkHYZKkY= +go.opentelemetry.io/collector/config/configgrpc v0.96.0/go.mod h1:uUxDCwvWvyf331boTH8/gZhUXXST2r1ps5+ZAvxZl4o= +go.opentelemetry.io/collector/config/confighttp v0.96.0 h1:/piTkhB+UhhkvHc2PmHBuZzvp0okWTGiL/kZIh+zMmQ= +go.opentelemetry.io/collector/config/confighttp v0.96.0/go.mod h1:KWac7J9mNFjtN4dQz8AUmFVBr7c2UOfo5OM7wfdPToI= +go.opentelemetry.io/collector/config/confignet v0.96.0 h1:ZUwziVVxWgcRMqukfKfdEjxfgmfhGsX6J3GEzF/Pupk= +go.opentelemetry.io/collector/config/confignet v0.96.0/go.mod h1:BVw5xkQ7TH2wH75cbph+dtOoxq1baWLuhdSYIAvuVu0= +go.opentelemetry.io/collector/config/configopaque v1.3.0 h1:J60RL/XxGmBF+OX2+Gx+yAo/p7YwjSsOOlPlo1yXotA= +go.opentelemetry.io/collector/config/configopaque v1.3.0/go.mod h1:+vgBSjB0aSA5SnYAbLlWAcfqgNsrX/65/8EjMKCBGyk= +go.opentelemetry.io/collector/config/configretry v0.96.0 h1:rdZqq/ddPCjZCYYuqDGxrC93uHzQWhX5MQ9tt5uMSpM= +go.opentelemetry.io/collector/config/configretry v0.96.0/go.mod h1:Nq7hp4nk+zeH0LYYsx348NHl02O89FnV45hcCCmqdtg= +go.opentelemetry.io/collector/config/configtelemetry v0.96.0 h1:Q9bSLPUzJUFG+P8eQ7W25Feko8yjdB7dK98V7hmUxCA= +go.opentelemetry.io/collector/config/configtelemetry v0.96.0/go.mod h1:tl8sI2RE3LSgJ0HjpadYpIwsKzw/CRA0nZUXLzMAZS0= +go.opentelemetry.io/collector/config/configtls v0.96.0 h1:SPsL0ZzmNscRtKYCECXfvEE8tB6BqNdnWAgB42KCPeE= +go.opentelemetry.io/collector/config/configtls v0.96.0/go.mod h1:/LHiDf3jMuEY+rXu3DMWBmArcf0DPIc3V0aKQeaTEdQ= +go.opentelemetry.io/collector/config/internal v0.96.0 h1:/HJtvjB9/XJRFs+g0XpRInRdUz0O7yeIbe0Av/Dg/TM= +go.opentelemetry.io/collector/config/internal v0.96.0/go.mod h1:74acJyU1E+bFidoy0tjTORZGttdjDYnKhkqGjao/bUA= +go.opentelemetry.io/collector/confmap v0.96.0 h1:415ELCfC8S3xjiNFLneDWJi6h7j7SUw8A8pZtINEQdI= +go.opentelemetry.io/collector/confmap v0.96.0/go.mod h1:q/dWHLvkk1vgvAF0l5dbgQSiPOmGwpv0FwcNaGpqsfM= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.96.0 h1:ISINaNKyTKIQQO5ijXXb2fagAmyZgndluqgF6Eccl50= +go.opentelemetry.io/collector/confmap/converter/expandconverter v0.96.0/go.mod h1:eBpkcWGSKntw8xzifdg9okqHZ0o3kIqHjfyGM3afPbc= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.96.0 h1:ylJ12FL62H7liRCvxfvZQBSc0u4ggtTvjxML97jBwlE= +go.opentelemetry.io/collector/confmap/provider/envprovider v0.96.0/go.mod h1:vxIfEbehiq8JwlWqDgzWO14kl0K/g1jc/+J6LANnpj0= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.96.0 h1:1PrjESnmkJ231dCsO/n5SGGUPKc7J4qk4tYtvjoJbMs= +go.opentelemetry.io/collector/confmap/provider/fileprovider v0.96.0/go.mod h1:rJXmOz1Q9nTl3Mubxb2HvAy7KWoi3Re2ettuo0BFUVw= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.96.0 h1:ulbACw+gY1Wp9tiewDz3+w8lTfUdCT7YJ/qb//JqD7s= +go.opentelemetry.io/collector/confmap/provider/httpprovider v0.96.0/go.mod h1:QqSQiNy44Kw39cHIqCB7Zyq5f2SU1bX7AgmUhZblxRI= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.96.0 h1:Hb51q5K2F5IYPS0/fwixQr2Metzb8RnZO0lT25z0ehM= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.96.0/go.mod h1:mD+9d+cBvlAm7tgLEgfDbHBaLgr+9PIPJI8K5cMNU7o= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.96.0 h1:/uuau+nbiLvw9Zan78ySruDr9S12iLPw5d9i6h8379I= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.96.0/go.mod h1:NPK3XddOGCuNavzunXD94dP9jLU4BAMs4mI9pOW0N80= +go.opentelemetry.io/collector/connector v0.96.0 h1:wPrX7Yc70Xko7nFin5gYxe6xPCkV7mHcKeQ75Y0HsTU= +go.opentelemetry.io/collector/connector v0.96.0/go.mod h1:6laIl3khhccApCiWjPeupU9BKw6KRoqLDt0OHS2OIp0= +go.opentelemetry.io/collector/consumer v0.96.0 h1:JN4JHelp5EGMGoC2UVelTMG6hyZjgtgdLLt5eZfVynU= +go.opentelemetry.io/collector/consumer v0.96.0/go.mod h1:Vn+qzzKgekDFayCVV8peSH5Btx1xrt/bmzD9gTxgidQ= +go.opentelemetry.io/collector/exporter v0.96.0 h1:SmOSaP+zUNq0nl+BcllsCSsYePdUNIIUfW5sXKKaUlI= +go.opentelemetry.io/collector/exporter v0.96.0/go.mod h1:DcuGaxcINhOV2LgojDI56r3830cUtuCsNadINMIU23c= +go.opentelemetry.io/collector/exporter/loggingexporter v0.96.0 h1:fKHt4iTcD7C0utDzeww6ZYVlDYaC0dw9wtzVwLha4CM= +go.opentelemetry.io/collector/exporter/loggingexporter v0.96.0/go.mod h1:vpuKdiIQ6yjwZbKiiAs/MV8rZMKiQfPF55vX8UxO8fk= +go.opentelemetry.io/collector/exporter/otlpexporter v0.96.0 h1:vZEd10B/zj7WkBWSVegDkGOwv7FZhUwyk60E2zkYwL4= +go.opentelemetry.io/collector/exporter/otlpexporter v0.96.0/go.mod h1:tzJFwn1fR88rdGKkpvygMawu2Prc53bFhuKFk3EnpS8= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.96.0 h1:c7pBEGYxlsnPPfs4ECIr2E+mEh25OTV+v20tv3pCIyU= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.96.0/go.mod h1:wmDxflnOrp0JRmB6gPBtNMPFD80V62+tZDZ2ZQNj3Cs= +go.opentelemetry.io/collector/extension v0.96.0 h1:b02WX/2XxDf/PlqboYwWUSmiT2BXXWSntlnDlGiJuWw= +go.opentelemetry.io/collector/extension v0.96.0/go.mod h1:RrjDbQUCPKZmR9mfZ6kVQ0J8OfrcYnf09U+6ZyToV/Q= +go.opentelemetry.io/collector/extension/auth v0.96.0 h1:10ZSoVCF0WI8IYS+kD7lbdvbvOdfUBGEQ0c4G1mVuCU= +go.opentelemetry.io/collector/extension/auth v0.96.0/go.mod h1:UX0SpWMwRvzEaVr6fxP2CNooQ2JnuTEnTGYD8kCAjWc= +go.opentelemetry.io/collector/extension/zpagesextension v0.96.0 h1:eSjSBqWIN+OiEBgZhRq8B2LDmqZMgBRMNAF1oGfp4XI= +go.opentelemetry.io/collector/extension/zpagesextension v0.96.0/go.mod h1:wWBmrP4H/gEInjtCiwZHlDj5+MuWI8bLybCSIrNXoNE= +go.opentelemetry.io/collector/featuregate v1.3.0 h1:nrFSx+zfjdisjE9oCx25Aep3nJ9RaUjeE1qFL6eovoU= +go.opentelemetry.io/collector/featuregate v1.3.0/go.mod h1:mm8+xyQfgDmqhyegZRNIQmoKsNnDTwWKFLsdMoXAb7A= +go.opentelemetry.io/collector/pdata v1.3.0 h1:JRYN7tVHYFwmtQhIYbxWeiKSa2L1nCohyAs8sYqKFZo= +go.opentelemetry.io/collector/pdata v1.3.0/go.mod h1:t7W0Undtes53HODPdSujPLTnfSR5fzT+WpL+RTaaayo= +go.opentelemetry.io/collector/processor v0.96.0 h1:TGo7tLbLJo9tBZ9NNoSlB7xBP5osUXThKxCmg96gSko= +go.opentelemetry.io/collector/processor v0.96.0/go.mod h1:fvTTODSFY97D6Fc/iwBOL3outreBvZBlaHT2ciEWNZQ= +go.opentelemetry.io/collector/processor/batchprocessor v0.96.0 h1:5SbJUx9q4w05QSl7p1xOEdNLOmmPZDE9SXi0OST1SbI= +go.opentelemetry.io/collector/processor/batchprocessor v0.96.0/go.mod h1:NSlJS7EQ4zR4aisHAoKZEC2KP9LcSgMw8Yap2cCAO4I= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.96.0 h1:BOAg0hhFCSsuFsCXdwAb5tGgPHRG5D9bwsWERHRc1So= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.96.0/go.mod h1:TnvJD4MgxhHX0/y9lVtfeiDP6033/VyhM8FO2TT9klE= +go.opentelemetry.io/collector/receiver v0.96.0 h1:OrlcuyFCBQpbWNb2klzTdz1ZXMk0acRDh7fbaQtP4eo= +go.opentelemetry.io/collector/receiver v0.96.0/go.mod h1:fb5Vr2+tAkzB4qE6+lNaMsZwaeE8qZvG3IBdzK5hCRY= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.96.0 h1:3tQG3+Bd5Ur/htNHTSHcwS25Cq+hLhC5+mti63x8NsY= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.96.0/go.mod h1:LgauKnCOayfutm4Y+WN3AaqK3cM+R7o+v3uXsq/be64= +go.opentelemetry.io/collector/semconv v0.96.0 h1:DrZy8BpzJDnN2zFxXRj6BhfGYxNlqpFHBqyuS9fVHRY= +go.opentelemetry.io/collector/semconv v0.96.0/go.mod h1:zOm/U3pgMIWcvrcnPbR9Xx2HinoXj46ERMK8PUV9wrs= +go.opentelemetry.io/contrib/config v0.4.0 h1:Xb+ncYOqseLroMuBesGNRgVQolXcXOhMj7EhGwJCdHs= +go.opentelemetry.io/contrib/config v0.4.0/go.mod h1:drNk2xRqLWW4/amk6Uh1S+sDAJTc7bcEEN1GfJzj418= go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.45.0 h1:CaagQrotQLgtDlHU6u9pE/Mf4mAwiLD8wrReIVt06lY= go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.45.0/go.mod h1:LOjFy00/ZMyMYfKFPta6kZe2cDUc1sNo/qtv1pSORWA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 h1:PzIubN4/sjByhDRHLviCjJuweBXWFZWhghjg7cS28+M= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0/go.mod h1:Ct6zzQEuGK3WpJs2n4dn+wfJYzd/+hNnxMRTWjGn30M= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= -go.opentelemetry.io/contrib/propagators/b3 v1.19.0 h1:ulz44cpm6V5oAeg5Aw9HyqGFMS6XM7untlMEhD7YzzA= -go.opentelemetry.io/contrib/propagators/b3 v1.19.0/go.mod h1:OzCmE2IVS+asTI+odXQstRGVfXQ4bXv9nMBRK0nNyqQ= -go.opentelemetry.io/contrib/zpages v0.45.0 h1:jIwHHGoWzJoZdbIUtWdErjL85Gni6BignnAFqDtMRL4= -go.opentelemetry.io/contrib/zpages v0.45.0/go.mod h1:4mIdA5hqH6hEx9sZgV50qKfQO8aIYolUZboHmz+G7vw= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/bridge/opencensus v0.42.0 h1:QvC+bcZkWMphWPiVqRQygMj6M0/3TOuJEO+erRA7kI8= -go.opentelemetry.io/otel/bridge/opencensus v0.42.0/go.mod h1:XJojP7g5DqYdiyArix/H9i1XzPPlIUc9dGLKtF9copI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA= -go.opentelemetry.io/otel/exporters/prometheus v0.42.0/go.mod h1:f3bYiqNqhoPxkvI2LrXqQVC546K7BuRDL/kKuxkujhA= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0 h1:4jJuoeOo9W6hZnz+r046fyoH5kykZPRvKfUXJVfMpB0= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0/go.mod h1:/MtYTE1SfC2QIcE0bDot6fIX+h+WvXjgTqgn9P0LNPE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 h1:Nw7Dv4lwvGrI68+wULbcq7su9K2cebeCUrDjVrUJHxM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0/go.mod h1:1MsF6Y7gTqosgoZvHlzcaaM8DIMNZgJh87ykokoNH7Y= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk/metric v1.20.0 h1:5eD40l/H2CqdKmbSV7iht2KMK0faAIL2pVYzJOWobGk= -go.opentelemetry.io/otel/sdk/metric v1.20.0/go.mod h1:AGvpC+YF/jblITiafMTYgvRBUiwi9hZf0EYE2E5XlS8= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/contrib/propagators/b3 v1.24.0 h1:n4xwCdTx3pZqZs2CjS/CUZAs03y3dZcGhC/FepKtEUY= +go.opentelemetry.io/contrib/propagators/b3 v1.24.0/go.mod h1:k5wRxKRU2uXx2F8uNJ4TaonuEO/V7/5xoz7kdsDACT8= +go.opentelemetry.io/contrib/zpages v0.49.0 h1:Wk217PkNBxcKWnIQpwtbZZE286K4ZY9uajnM5woSeLU= +go.opentelemetry.io/contrib/zpages v0.49.0/go.mod h1:6alLi5mmkZWbAtZMRPd1ffIgkTcsU9OTHQF2NbSOhrQ= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/bridge/opencensus v1.24.0 h1:Vlhy5ee5k5R0zASpH+9AgHiJH7xnKACI3XopO1tUZfY= +go.opentelemetry.io/otel/bridge/opencensus v1.24.0/go.mod h1:jRjVXV/X38jyrnHtvMGN8+9cejZB21JvXAAvooF2s+Q= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0 h1:f2jriWfOdldanBwS9jNBdeOKAQN7b4ugAMaNu1/1k9g= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.24.0/go.mod h1:B+bcQI1yTY+N0vqMpoZbEN7+XU4tNM0DmUiOwebFJWI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0 h1:mM8nKi6/iFQ0iqst80wDHU2ge198Ye/TfN0WBS5U24Y= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.24.0/go.mod h1:0PrIIzDteLSmNyxqcGYRL4mDIo8OTuBAOI/Bn1URxac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= +go.opentelemetry.io/otel/exporters/prometheus v0.46.0 h1:I8WIFXR351FoLJYuloU4EgXbtNX2URfU/85pUPheIEQ= +go.opentelemetry.io/otel/exporters/prometheus v0.46.0/go.mod h1:ztwVUHe5DTR/1v7PeuGRnU5Bbd4QKYwApWmuutKsJSs= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0 h1:JYE2HM7pZbOt5Jhk8ndWZTUWYOVift2cHjXVMkPdmdc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.24.0/go.mod h1:yMb/8c6hVsnma0RpsBMNo0fEiQKeclawtgaIaOp2MLY= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 h1:s0PHtIkN+3xrbDOpt2M8OTG92cWqUESvzh2MxiR5xY8= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0/go.mod h1:hZlFbDbRt++MMPCCfSJfmhkGIWnX1h3XjkfxZUjLrIA= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= +go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI= +go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY= go.starlark.net v0.0.0-20200901195727-6e684ef5eeee/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2451,8 +2460,8 @@ go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -2465,8 +2474,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2495,7 +2504,6 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -2526,8 +2534,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2623,10 +2631,7 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -2651,9 +2656,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2666,11 +2670,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2759,7 +2762,6 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2776,11 +2778,9 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2795,14 +2795,13 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= @@ -2907,7 +2906,6 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -2916,8 +2914,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= -golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2968,8 +2966,8 @@ google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00 google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.152.0 h1:t0r1vPnfMc260S2Ci+en7kfCZaLOPs5KI0sVV/6jZrY= -google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= +google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA= +google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3021,9 +3019,7 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -3043,12 +3039,12 @@ google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos= -google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY= -google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 h1:EWIeHfGuUf00zrVZGEgYFxok7plSAXBGcH7NNdMAWvA= -google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8= +google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v0.0.0-20180920234847-8997b5fa0873/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -3085,8 +3081,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -3102,7 +3098,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -3178,22 +3173,22 @@ howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.0.0-20180806132203-61b11ee65332/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.0.0-20190325185214-7544f9db76f6/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apiextensions-apiserver v0.28.0 h1:CszgmBL8CizEnj4sj7/PtLGey6Na3YgWyGCPONv7E9E= -k8s.io/apiextensions-apiserver v0.28.0/go.mod h1:uRdYiwIuu0SyqJKriKmqEN2jThIJPhVmOWETm8ud1VE= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= +k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= k8s.io/apimachinery v0.0.0-20180821005732-488889b0007f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.0.0-20190223001710-c182ff3b9841/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/component-base v0.28.1 h1:LA4AujMlK2mr0tZbQDZkjWbdhTV5bRyEyAFe0TJxlWg= -k8s.io/component-base v0.28.1/go.mod h1:jI11OyhbX21Qtbav7JkhehyBsIRfnO8oEgoAR12ArIU= +k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= +k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -3202,8 +3197,8 @@ k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iL k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= +k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= modernc.org/httpfs v1.0.0/go.mod h1:BSkfoMUcahSijQD5J/Vu4UMOxzmEf5SNRwyXC4PJBEw= modernc.org/libc v1.3.1/go.mod h1:f8sp9GAfEyGYh3lsRIKtBh/XwACdFvGznxm6GJmQvXk= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= @@ -3214,17 +3209,17 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU= -sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU= +sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= +sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= -sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/internal/component/otelcol/auth/auth.go b/internal/component/otelcol/auth/auth.go index f824a92acd76..10251a9b4d53 100644 --- a/internal/component/otelcol/auth/auth.go +++ b/internal/component/otelcol/auth/auth.go @@ -140,9 +140,7 @@ func (a *Auth) Update(args component.Arguments) error { TracerProvider: a.opts.Tracer, MeterProvider: metric.NewMeterProvider(metric.WithReader(promExporter)), - ReportComponentStatus: func(*otelcomponent.StatusEvent) error { - return nil - }, + ReportStatus: func(*otelcomponent.StatusEvent) {}, }, BuildInfo: otelcomponent.BuildInfo{ diff --git a/internal/component/otelcol/config_compression.go b/internal/component/otelcol/config_compression.go index 3639f9654a12..541b7ab497cd 100644 --- a/internal/component/otelcol/config_compression.go +++ b/internal/component/otelcol/config_compression.go @@ -37,18 +37,18 @@ func (ct *CompressionType) UnmarshalText(in []byte) error { } } -var compressionMappings = map[CompressionType]configcompression.CompressionType{ - CompressionTypeGzip: configcompression.Gzip, - CompressionTypeZlib: configcompression.Zlib, - CompressionTypeDeflate: configcompression.Deflate, - CompressionTypeSnappy: configcompression.Snappy, - CompressionTypeZstd: configcompression.Zstd, - CompressionTypeNone: configcompression.CompressionType("none"), - CompressionTypeEmpty: configcompression.CompressionType(""), +var compressionMappings = map[CompressionType]configcompression.Type{ + CompressionTypeGzip: configcompression.TypeGzip, + CompressionTypeZlib: configcompression.TypeZlib, + CompressionTypeDeflate: configcompression.TypeDeflate, + CompressionTypeSnappy: configcompression.TypeSnappy, + CompressionTypeZstd: configcompression.TypeZstd, + CompressionTypeNone: configcompression.Type("none"), + CompressionTypeEmpty: configcompression.Type(""), } // Convert converts ct into the upstream type. -func (ct CompressionType) Convert() configcompression.CompressionType { +func (ct CompressionType) Convert() configcompression.Type { upstream, ok := compressionMappings[ct] if !ok { // This line should never hit unless compressionMappings wasn't updated diff --git a/internal/component/otelcol/config_grpc.go b/internal/component/otelcol/config_grpc.go index da6cca47edfb..667c3a54c9c0 100644 --- a/internal/component/otelcol/config_grpc.go +++ b/internal/component/otelcol/config_grpc.go @@ -42,13 +42,13 @@ type GRPCServerArguments struct { } // Convert converts args into the upstream type. -func (args *GRPCServerArguments) Convert() *otelconfiggrpc.GRPCServerSettings { +func (args *GRPCServerArguments) Convert() *otelconfiggrpc.ServerConfig { if args == nil { return nil } - return &otelconfiggrpc.GRPCServerSettings{ - NetAddr: confignet.NetAddr{ + return &otelconfiggrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ Endpoint: args.Endpoint, Transport: args.Transport, }, @@ -154,7 +154,7 @@ type GRPCClientArguments struct { } // Convert converts args into the upstream type. -func (args *GRPCClientArguments) Convert() *otelconfiggrpc.GRPCClientSettings { +func (args *GRPCClientArguments) Convert() *otelconfiggrpc.ClientConfig { if args == nil { return nil } @@ -176,7 +176,7 @@ func (args *GRPCClientArguments) Convert() *otelconfiggrpc.GRPCClientSettings { balancerName = DefaultBalancerName } - return &otelconfiggrpc.GRPCClientSettings{ + return &otelconfiggrpc.ClientConfig{ Endpoint: args.Endpoint, Compression: args.Compression.Convert(), diff --git a/internal/component/otelcol/config_http.go b/internal/component/otelcol/config_http.go index f94da5cd84a4..2c9fad86d47c 100644 --- a/internal/component/otelcol/config_http.go +++ b/internal/component/otelcol/config_http.go @@ -34,12 +34,12 @@ type HTTPServerArguments struct { } // Convert converts args into the upstream type. -func (args *HTTPServerArguments) Convert() *otelconfighttp.HTTPServerSettings { +func (args *HTTPServerArguments) Convert() *otelconfighttp.ServerConfig { if args == nil { return nil } - return &otelconfighttp.HTTPServerSettings{ + return &otelconfighttp.ServerConfig{ Endpoint: args.Endpoint, TLSSetting: args.TLS.Convert(), CORS: args.CORS.Convert(), @@ -58,12 +58,12 @@ type CORSArguments struct { } // Convert converts args into the upstream type. -func (args *CORSArguments) Convert() *otelconfighttp.CORSSettings { +func (args *CORSArguments) Convert() *otelconfighttp.CORSConfig { if args == nil { return nil } - return &otelconfighttp.CORSSettings{ + return &otelconfighttp.CORSConfig{ AllowedOrigins: args.AllowedOrigins, AllowedHeaders: args.AllowedHeaders, @@ -85,11 +85,13 @@ type HTTPClientArguments struct { Timeout time.Duration `river:"timeout,attr,optional"` Headers map[string]string `river:"headers,attr,optional"` // CustomRoundTripper func(next http.RoundTripper) (http.RoundTripper, error) TODO (@tpaschalis) - MaxIdleConns *int `river:"max_idle_conns,attr,optional"` - MaxIdleConnsPerHost *int `river:"max_idle_conns_per_host,attr,optional"` - MaxConnsPerHost *int `river:"max_conns_per_host,attr,optional"` - IdleConnTimeout *time.Duration `river:"idle_conn_timeout,attr,optional"` - DisableKeepAlives bool `river:"disable_keep_alives,attr,optional"` + MaxIdleConns *int `river:"max_idle_conns,attr,optional"` + MaxIdleConnsPerHost *int `river:"max_idle_conns_per_host,attr,optional"` + MaxConnsPerHost *int `river:"max_conns_per_host,attr,optional"` + IdleConnTimeout *time.Duration `river:"idle_conn_timeout,attr,optional"` + DisableKeepAlives bool `river:"disable_keep_alives,attr,optional"` + HTTP2ReadIdleTimeout time.Duration `river:"http2_read_idle_timeout,attr,optional"` + HTTP2PingTimeout time.Duration `river:"http2_ping_timeout,attr,optional"` // Auth is a binding to an otelcol.auth.* component extension which handles // authentication. @@ -97,7 +99,7 @@ type HTTPClientArguments struct { } // Convert converts args into the upstream type. -func (args *HTTPClientArguments) Convert() *otelconfighttp.HTTPClientSettings { +func (args *HTTPClientArguments) Convert() *otelconfighttp.ClientConfig { if args == nil { return nil } @@ -113,7 +115,7 @@ func (args *HTTPClientArguments) Convert() *otelconfighttp.HTTPClientSettings { opaqueHeaders[headerName] = configopaque.String(headerVal) } - return &otelconfighttp.HTTPClientSettings{ + return &otelconfighttp.ClientConfig{ Endpoint: args.Endpoint, Compression: args.Compression.Convert(), @@ -125,10 +127,13 @@ func (args *HTTPClientArguments) Convert() *otelconfighttp.HTTPClientSettings { Timeout: args.Timeout, Headers: opaqueHeaders, // CustomRoundTripper: func(http.RoundTripper) (http.RoundTripper, error) { panic("not implemented") }, TODO (@tpaschalis) - MaxIdleConns: args.MaxIdleConns, - MaxIdleConnsPerHost: args.MaxIdleConnsPerHost, - MaxConnsPerHost: args.MaxConnsPerHost, - IdleConnTimeout: args.IdleConnTimeout, + MaxIdleConns: args.MaxIdleConns, + MaxIdleConnsPerHost: args.MaxIdleConnsPerHost, + MaxConnsPerHost: args.MaxConnsPerHost, + IdleConnTimeout: args.IdleConnTimeout, + DisableKeepAlives: args.DisableKeepAlives, + HTTP2ReadIdleTimeout: args.HTTP2ReadIdleTimeout, + HTTP2PingTimeout: args.HTTP2PingTimeout, Auth: auth, } diff --git a/internal/component/otelcol/config_retry.go b/internal/component/otelcol/config_retry.go index 12a2ffea85b9..1a7365325a04 100644 --- a/internal/component/otelcol/config_retry.go +++ b/internal/component/otelcol/config_retry.go @@ -5,7 +5,7 @@ import ( "time" "github.com/grafana/river" - otelexporterhelper "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/config/configretry" ) // RetryArguments holds shared settings for components which can retry @@ -50,12 +50,12 @@ func (args *RetryArguments) Validate() error { } // Convert converts args into the upstream type. -func (args *RetryArguments) Convert() *otelexporterhelper.RetrySettings { +func (args *RetryArguments) Convert() *configretry.BackOffConfig { if args == nil { return nil } - return &otelexporterhelper.RetrySettings{ + return &configretry.BackOffConfig{ Enabled: args.Enabled, InitialInterval: args.InitialInterval, RandomizationFactor: args.RandomizationFactor, diff --git a/internal/component/otelcol/config_tls.go b/internal/component/otelcol/config_tls.go index 58e02ad68739..6aaa5a35b070 100644 --- a/internal/component/otelcol/config_tls.go +++ b/internal/component/otelcol/config_tls.go @@ -54,15 +54,17 @@ func (args *TLSClientArguments) Convert() *otelconfigtls.TLSClientSetting { } type TLSSetting struct { - CA string `river:"ca_pem,attr,optional"` - CAFile string `river:"ca_file,attr,optional"` - Cert string `river:"cert_pem,attr,optional"` - CertFile string `river:"cert_file,attr,optional"` - Key rivertypes.Secret `river:"key_pem,attr,optional"` - KeyFile string `river:"key_file,attr,optional"` - MinVersion string `river:"min_version,attr,optional"` - MaxVersion string `river:"max_version,attr,optional"` - ReloadInterval time.Duration `river:"reload_interval,attr,optional"` + CA string `river:"ca_pem,attr,optional"` + CAFile string `river:"ca_file,attr,optional"` + Cert string `river:"cert_pem,attr,optional"` + CertFile string `river:"cert_file,attr,optional"` + Key rivertypes.Secret `river:"key_pem,attr,optional"` + KeyFile string `river:"key_file,attr,optional"` + MinVersion string `river:"min_version,attr,optional"` + MaxVersion string `river:"max_version,attr,optional"` + ReloadInterval time.Duration `river:"reload_interval,attr,optional"` + CipherSuites []string `river:"cipher_suites,attr,optional"` + IncludeSystemCACertsPool bool `river:"include_system_ca_certs_pool,attr,optional"` } func (args *TLSSetting) Convert() *otelconfigtls.TLSSetting { @@ -71,15 +73,17 @@ func (args *TLSSetting) Convert() *otelconfigtls.TLSSetting { } return &otelconfigtls.TLSSetting{ - CAPem: configopaque.String(args.CA), - CAFile: args.CAFile, - CertPem: configopaque.String(args.Cert), - CertFile: args.CertFile, - KeyPem: configopaque.String(string(args.Key)), - KeyFile: args.KeyFile, - MinVersion: args.MinVersion, - MaxVersion: args.MaxVersion, - ReloadInterval: args.ReloadInterval, + CAPem: configopaque.String(args.CA), + CAFile: args.CAFile, + CertPem: configopaque.String(args.Cert), + CertFile: args.CertFile, + KeyPem: configopaque.String(string(args.Key)), + KeyFile: args.KeyFile, + MinVersion: args.MinVersion, + MaxVersion: args.MaxVersion, + ReloadInterval: args.ReloadInterval, + CipherSuites: args.CipherSuites, + IncludeSystemCACertsPool: args.IncludeSystemCACertsPool, } } diff --git a/internal/component/otelcol/connector/connector.go b/internal/component/otelcol/connector/connector.go index 546465641abc..88f2a0bd3453 100644 --- a/internal/component/otelcol/connector/connector.go +++ b/internal/component/otelcol/connector/connector.go @@ -150,9 +150,7 @@ func (p *Connector) Update(args component.Arguments) error { TracerProvider: p.opts.Tracer, MeterProvider: metric.NewMeterProvider(metric.WithReader(promExporter)), - ReportComponentStatus: func(*otelcomponent.StatusEvent) error { - return nil - }, + ReportStatus: func(*otelcomponent.StatusEvent) {}, }, BuildInfo: otelcomponent.BuildInfo{ diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph.go b/internal/component/otelcol/connector/servicegraph/servicegraph.go index dfd25b2be497..24987536a95e 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph.go @@ -10,7 +10,6 @@ import ( "github.com/grafana/agent/internal/featuregate" "github.com/grafana/river" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" otelcomponent "go.opentelemetry.io/collector/component" otelextension "go.opentelemetry.io/collector/extension" ) @@ -54,6 +53,10 @@ type Arguments struct { // the "processor.servicegraph.virtualNode" feature gate. // VirtualNodePeerAttributes []string `river:"virtual_node_peer_attributes,attr,optional"` + // MetricsFlushInterval is the interval at which metrics are flushed to the exporter. + // If set to 0, metrics are flushed on every received batch of traces. + MetricsFlushInterval time.Duration `river:"metrics_flush_interval,attr,optional"` + // Output configures where to send processed data. Required. Output *otelcol.ConsumerArguments `river:"output,block"` } @@ -140,19 +143,20 @@ func (args *Arguments) Validate() error { // Convert implements connector.Arguments. func (args Arguments) Convert() (otelcomponent.Config, error) { - return &servicegraphprocessor.Config{ + return &servicegraphconnector.Config{ // Never set a metric exporter. // The consumer of metrics will be set via Otel's Connector API. // // MetricsExporter: "", LatencyHistogramBuckets: args.LatencyHistogramBuckets, Dimensions: args.Dimensions, - Store: servicegraphprocessor.StoreConfig{ + Store: servicegraphconnector.StoreConfig{ MaxItems: args.Store.MaxItems, TTL: args.Store.TTL, }, - CacheLoop: args.CacheLoop, - StoreExpirationLoop: args.StoreExpirationLoop, + CacheLoop: args.CacheLoop, + StoreExpirationLoop: args.StoreExpirationLoop, + MetricsFlushInterval: args.MetricsFlushInterval, //TODO: Add VirtualNodePeerAttributes when it's no longer controlled by // the "processor.servicegraph.virtualNode" feature gate. // VirtualNodePeerAttributes: args.VirtualNodePeerAttributes, diff --git a/internal/component/otelcol/connector/servicegraph/servicegraph_test.go b/internal/component/otelcol/connector/servicegraph/servicegraph_test.go index f437c5ce45b0..71ca973d2a40 100644 --- a/internal/component/otelcol/connector/servicegraph/servicegraph_test.go +++ b/internal/component/otelcol/connector/servicegraph/servicegraph_test.go @@ -6,7 +6,7 @@ import ( "github.com/grafana/agent/internal/component/otelcol/connector/servicegraph" "github.com/grafana/river" - "github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector" "github.com/stretchr/testify/require" ) @@ -14,7 +14,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { tests := []struct { testName string cfg string - expected servicegraphprocessor.Config + expected servicegraphconnector.Config errorMsg string }{ { @@ -22,7 +22,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { cfg: ` output {} `, - expected: servicegraphprocessor.Config{ + expected: servicegraphconnector.Config{ LatencyHistogramBuckets: []time.Duration{ 2 * time.Millisecond, 4 * time.Millisecond, @@ -42,7 +42,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { 15 * time.Second, }, Dimensions: []string{}, - Store: servicegraphprocessor.StoreConfig{ + Store: servicegraphconnector.StoreConfig{ MaxItems: 1000, TTL: 2 * time.Second, }, @@ -76,14 +76,14 @@ func TestArguments_UnmarshalRiver(t *testing.T) { output {} `, - expected: servicegraphprocessor.Config{ + expected: servicegraphconnector.Config{ LatencyHistogramBuckets: []time.Duration{ 2 * time.Millisecond, 4 * time.Second, 6 * time.Hour, }, Dimensions: []string{"foo", "bar"}, - Store: servicegraphprocessor.StoreConfig{ + Store: servicegraphconnector.StoreConfig{ MaxItems: 333, TTL: 12 * time.Hour, }, @@ -148,7 +148,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { actualPtr, err := args.Convert() require.NoError(t, err) - actual := actualPtr.(*servicegraphprocessor.Config) + actual := actualPtr.(*servicegraphconnector.Config) require.Equal(t, tc.expected, *actual) }) diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go index cc376f6a1f97..e168e476a2e7 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics.go @@ -45,6 +45,19 @@ type Arguments struct { // indefinitely over the lifetime of the collector. DimensionsCacheSize int `river:"dimensions_cache_size,attr,optional"` + // ResourceMetricsCacheSize defines the size of the cache holding metrics for a service. This is mostly relevant for + // cumulative temporality to avoid memory leaks and correct metric timestamp resets. + ResourceMetricsCacheSize int `river:"resource_metrics_cache_size,attr,optional"` + + // ResourceMetricsKeyAttributes filters the resource attributes used to create the resource metrics key hash. + // This can be used to avoid situations where resource attributes may change across service restarts, causing + // metric counters to break (and duplicate). A resource does not need to have all of the attributes. The list + // must include enough attributes to properly identify unique resources or risk aggregating data from more + // than one service and span. + // e.g. ["service.name", "telemetry.sdk.language", "telemetry.sdk.name"] + // See https://opentelemetry.io/docs/specs/semconv/resource/ for possible attributes. + ResourceMetricsKeyAttributes []string `river:"resource_metrics_key_attributes,attr,optional"` + AggregationTemporality string `river:"aggregation_temporality,attr,optional"` Histogram HistogramConfig `river:"histogram,block"` @@ -58,6 +71,9 @@ type Arguments struct { // Exemplars defines the configuration for exemplars. Exemplars ExemplarsConfig `river:"exemplars,block,optional"` + // Events defines the configuration for events section of spans. + Events EventsConfig `river:"events,block,optional"` + // Output configures where to send processed data. Required. Output *otelcol.ConsumerArguments `river:"output,block"` } @@ -75,9 +91,10 @@ const ( // DefaultArguments holds default settings for Arguments. var DefaultArguments = Arguments{ - DimensionsCacheSize: 1000, - AggregationTemporality: AggregationTemporalityCumulative, - MetricsFlushInterval: 15 * time.Second, + DimensionsCacheSize: 1000, + AggregationTemporality: AggregationTemporalityCumulative, + MetricsFlushInterval: 15 * time.Second, + ResourceMetricsCacheSize: 1000, } // SetToDefault implements river.Defaulter. @@ -149,14 +166,17 @@ func (args Arguments) Convert() (otelcomponent.Config, error) { excludeDimensions := append([]string(nil), args.ExcludeDimensions...) return &spanmetricsconnector.Config{ - Dimensions: dimensions, - ExcludeDimensions: excludeDimensions, - DimensionsCacheSize: args.DimensionsCacheSize, - AggregationTemporality: aggregationTemporality, - Histogram: *histogram, - MetricsFlushInterval: args.MetricsFlushInterval, - Namespace: args.Namespace, - Exemplars: *args.Exemplars.Convert(), + Dimensions: dimensions, + ExcludeDimensions: excludeDimensions, + DimensionsCacheSize: args.DimensionsCacheSize, + ResourceMetricsCacheSize: args.ResourceMetricsCacheSize, + ResourceMetricsKeyAttributes: args.ResourceMetricsKeyAttributes, + AggregationTemporality: aggregationTemporality, + Histogram: *histogram, + MetricsFlushInterval: args.MetricsFlushInterval, + Namespace: args.Namespace, + Exemplars: *args.Exemplars.Convert(), + Events: args.Events.Convert(), }, nil } diff --git a/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go b/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go index 1672172f765d..60aea802a307 100644 --- a/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go +++ b/internal/component/otelcol/connector/spanmetrics/spanmetrics_test.go @@ -36,10 +36,11 @@ func TestArguments_UnmarshalRiver(t *testing.T) { output {} `, expected: spanmetricsconnector.Config{ - Dimensions: []spanmetricsconnector.Dimension{}, - ExcludeDimensions: nil, - DimensionsCacheSize: 1000, - AggregationTemporality: "AGGREGATION_TEMPORALITY_CUMULATIVE", + Dimensions: []spanmetricsconnector.Dimension{}, + ExcludeDimensions: nil, + DimensionsCacheSize: 1000, + AggregationTemporality: "AGGREGATION_TEMPORALITY_CUMULATIVE", + ResourceMetricsCacheSize: 1000, Histogram: spanmetricsconnector.HistogramConfig{ Disable: false, Unit: 0, @@ -70,6 +71,10 @@ func TestArguments_UnmarshalRiver(t *testing.T) { Exemplars: spanmetricsconnector.ExemplarsConfig{ Enabled: false, }, + Events: spanmetricsconnector.EventsConfig{ + Enabled: false, + Dimensions: []spanmetricsconnector.Dimension{}, + }, }, }, { @@ -82,10 +87,11 @@ func TestArguments_UnmarshalRiver(t *testing.T) { output {} `, expected: spanmetricsconnector.Config{ - Dimensions: []spanmetricsconnector.Dimension{}, - DimensionsCacheSize: 1000, - ExcludeDimensions: nil, - AggregationTemporality: "AGGREGATION_TEMPORALITY_CUMULATIVE", + Dimensions: []spanmetricsconnector.Dimension{}, + DimensionsCacheSize: 1000, + ExcludeDimensions: nil, + AggregationTemporality: "AGGREGATION_TEMPORALITY_CUMULATIVE", + ResourceMetricsCacheSize: 1000, Histogram: spanmetricsconnector.HistogramConfig{ Disable: false, Unit: 0, @@ -94,6 +100,10 @@ func TestArguments_UnmarshalRiver(t *testing.T) { }, MetricsFlushInterval: 15 * time.Second, Namespace: "", + Events: spanmetricsconnector.EventsConfig{ + Enabled: false, + Dimensions: []spanmetricsconnector.Dimension{}, + }, }, }, { @@ -109,6 +119,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { exclude_dimensions = ["test_exclude_dim1", "test_exclude_dim2"] dimensions_cache_size = 333 aggregation_temporality = "DELTA" + resource_metrics_cache_size = 12345 histogram { disable = true unit = "s" @@ -121,7 +132,12 @@ func TestArguments_UnmarshalRiver(t *testing.T) { exemplars { enabled = true } - + events { + enabled = true + dimension { + name = "exception1" + } + } output {} `, expected: spanmetricsconnector.Config{ @@ -129,9 +145,10 @@ func TestArguments_UnmarshalRiver(t *testing.T) { {Name: "http.status_code", Default: nil}, {Name: "http.method", Default: getStringPtr("GET")}, }, - ExcludeDimensions: []string{"test_exclude_dim1", "test_exclude_dim2"}, - DimensionsCacheSize: 333, - AggregationTemporality: "AGGREGATION_TEMPORALITY_DELTA", + ExcludeDimensions: []string{"test_exclude_dim1", "test_exclude_dim2"}, + DimensionsCacheSize: 333, + AggregationTemporality: "AGGREGATION_TEMPORALITY_DELTA", + ResourceMetricsCacheSize: 12345, Histogram: spanmetricsconnector.HistogramConfig{ Disable: true, Unit: 1, @@ -149,6 +166,12 @@ func TestArguments_UnmarshalRiver(t *testing.T) { Exemplars: spanmetricsconnector.ExemplarsConfig{ Enabled: true, }, + Events: spanmetricsconnector.EventsConfig{ + Enabled: true, + Dimensions: []spanmetricsconnector.Dimension{ + {Name: "exception1", Default: nil}, + }, + }, }, }, { @@ -164,9 +187,10 @@ func TestArguments_UnmarshalRiver(t *testing.T) { output {} `, expected: spanmetricsconnector.Config{ - Dimensions: []spanmetricsconnector.Dimension{}, - DimensionsCacheSize: 1000, - AggregationTemporality: "AGGREGATION_TEMPORALITY_CUMULATIVE", + Dimensions: []spanmetricsconnector.Dimension{}, + DimensionsCacheSize: 1000, + AggregationTemporality: "AGGREGATION_TEMPORALITY_CUMULATIVE", + ResourceMetricsCacheSize: 1000, Histogram: spanmetricsconnector.HistogramConfig{ Unit: 0, Exponential: &spanmetricsconnector.ExponentialHistogramConfig{MaxSize: 123}, @@ -174,6 +198,10 @@ func TestArguments_UnmarshalRiver(t *testing.T) { }, MetricsFlushInterval: 15 * time.Second, Namespace: "", + Events: spanmetricsconnector.EventsConfig{ + Enabled: false, + Dimensions: []spanmetricsconnector.Dimension{}, + }, }, }, { diff --git a/internal/component/otelcol/connector/spanmetrics/types.go b/internal/component/otelcol/connector/spanmetrics/types.go index bba03fbe047d..a85de8f64d34 100644 --- a/internal/component/otelcol/connector/spanmetrics/types.go +++ b/internal/component/otelcol/connector/spanmetrics/types.go @@ -37,7 +37,7 @@ const ( // The unit is a private type in an internal Otel package, // so we need to convert it to a map and then back to the internal type. // ConvertMetricUnit matches the Unit type in this internal package: -// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.87.0/connector/spanmetricsconnector/internal/metrics/unit.go +// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/v0.96.0/connector/spanmetricsconnector/internal/metrics/unit.go func ConvertMetricUnit(unit string) (map[string]interface{}, error) { switch unit { case MetricsUnitMilliseconds: @@ -123,12 +123,14 @@ func (hc HistogramConfig) Convert() (*spanmetricsconnector.HistogramConfig, erro } type ExemplarsConfig struct { - Enabled bool `river:"enabled,attr,optional"` + Enabled bool `river:"enabled,attr,optional"` + MaxPerDataPoint *int `river:"max_per_data_point,attr,optional"` } func (ec ExemplarsConfig) Convert() *spanmetricsconnector.ExemplarsConfig { return &spanmetricsconnector.ExemplarsConfig{ - Enabled: ec.Enabled, + Enabled: ec.Enabled, + MaxPerDataPoint: ec.MaxPerDataPoint, } } @@ -197,3 +199,22 @@ func (hc ExplicitHistogramConfig) Convert() *spanmetricsconnector.ExplicitHistog Buckets: append([]time.Duration{}, hc.Buckets...), } } + +type EventsConfig struct { + // Enabled is a flag to enable events. + Enabled bool `river:"enabled,attr,optional"` + // Dimensions defines the list of dimensions to add to the events metric. + Dimensions []Dimension `river:"dimension,block,optional"` +} + +func (ec EventsConfig) Convert() spanmetricsconnector.EventsConfig { + dimensions := make([]spanmetricsconnector.Dimension, 0, len(ec.Dimensions)) + for _, d := range ec.Dimensions { + dimensions = append(dimensions, d.Convert()) + } + + return spanmetricsconnector.EventsConfig{ + Enabled: ec.Enabled, + Dimensions: dimensions, + } +} diff --git a/internal/component/otelcol/exporter/exporter.go b/internal/component/otelcol/exporter/exporter.go index 2524070aff7f..df222dbc5848 100644 --- a/internal/component/otelcol/exporter/exporter.go +++ b/internal/component/otelcol/exporter/exporter.go @@ -174,9 +174,7 @@ func (e *Exporter) Update(args component.Arguments) error { TracerProvider: e.opts.Tracer, MeterProvider: metric.NewMeterProvider(metricOpts...), - ReportComponentStatus: func(*otelcomponent.StatusEvent) error { - return nil - }, + ReportStatus: func(*otelcomponent.StatusEvent) {}, }, BuildInfo: otelcomponent.BuildInfo{ diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go index d0c1ee9f27b8..dedade66cd8c 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -124,9 +124,9 @@ func (oc OtlpConfig) Convert() otlpexporter.Config { TimeoutSettings: exporterhelper.TimeoutSettings{ Timeout: oc.Timeout, }, - QueueSettings: *oc.Queue.Convert(), - RetrySettings: *oc.Retry.Convert(), - GRPCClientSettings: *oc.Client.Convert(), + QueueConfig: *oc.Queue.Convert(), + RetryConfig: *oc.Retry.Convert(), + ClientConfig: *oc.Client.Convert(), } } @@ -260,7 +260,7 @@ type GRPCClientArguments struct { var _ river.Defaulter = &GRPCClientArguments{} // Convert converts args into the upstream type. -func (args *GRPCClientArguments) Convert() *otelconfiggrpc.GRPCClientSettings { +func (args *GRPCClientArguments) Convert() *otelconfiggrpc.ClientConfig { if args == nil { return nil } @@ -281,7 +281,7 @@ func (args *GRPCClientArguments) Convert() *otelconfiggrpc.GRPCClientSettings { balancerName = otelcol.DefaultBalancerName } - return &otelconfiggrpc.GRPCClientSettings{ + return &otelconfiggrpc.ClientConfig{ Compression: args.Compression.Convert(), TLSSetting: *args.TLS.Convert(), diff --git a/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go b/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go index 8034531ffbf9..c4fa8efbc7d6 100644 --- a/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go +++ b/internal/component/otelcol/exporter/loadbalancing/loadbalancing_test.go @@ -11,13 +11,14 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configopaque" + "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/otlpexporter" ) func TestConfigConversion(t *testing.T) { var ( - defaultRetrySettings = exporterhelper.NewDefaultRetrySettings() + defaultRetrySettings = configretry.NewDefaultBackOffConfig() defaultTimeoutSettings = exporterhelper.NewDefaultTimeoutSettings() defaultQueueSettings = exporterhelper.QueueSettings{ @@ -28,16 +29,16 @@ func TestConfigConversion(t *testing.T) { defaultProtocol = loadbalancingexporter.Protocol{ OTLP: otlpexporter.Config{ - GRPCClientSettings: configgrpc.GRPCClientSettings{ + ClientConfig: configgrpc.ClientConfig{ Endpoint: "", Compression: "gzip", WriteBufferSize: 512 * 1024, Headers: map[string]configopaque.String{}, BalancerName: otelcol.DefaultBalancerName, }, - RetrySettings: defaultRetrySettings, + RetryConfig: defaultRetrySettings, TimeoutSettings: defaultTimeoutSettings, - QueueSettings: defaultQueueSettings, + QueueConfig: defaultQueueSettings, }, } ) @@ -120,9 +121,9 @@ func TestConfigConversion(t *testing.T) { TimeoutSettings: exporterhelper.TimeoutSettings{ Timeout: 1 * time.Second, }, - RetrySettings: defaultRetrySettings, - QueueSettings: defaultQueueSettings, - GRPCClientSettings: configgrpc.GRPCClientSettings{ + RetryConfig: defaultRetrySettings, + QueueConfig: defaultQueueSettings, + ClientConfig: configgrpc.ClientConfig{ Endpoint: "", Compression: "gzip", WriteBufferSize: 512 * 1024, diff --git a/internal/component/otelcol/exporter/otlp/otlp.go b/internal/component/otelcol/exporter/otlp/otlp.go index a86aec221954..2f9013a95b78 100644 --- a/internal/component/otelcol/exporter/otlp/otlp.go +++ b/internal/component/otelcol/exporter/otlp/otlp.go @@ -61,9 +61,9 @@ func (args Arguments) Convert() (otelcomponent.Config, error) { TimeoutSettings: otelpexporterhelper.TimeoutSettings{ Timeout: args.Timeout, }, - QueueSettings: *args.Queue.Convert(), - RetrySettings: *args.Retry.Convert(), - GRPCClientSettings: *(*otelcol.GRPCClientArguments)(&args.Client).Convert(), + QueueConfig: *args.Queue.Convert(), + RetryConfig: *args.Retry.Convert(), + ClientConfig: *(*otelcol.GRPCClientArguments)(&args.Client).Convert(), }, nil } diff --git a/internal/component/otelcol/exporter/otlphttp/otlphttp.go b/internal/component/otelcol/exporter/otlphttp/otlphttp.go index a5c4dbe1de61..66292554068d 100644 --- a/internal/component/otelcol/exporter/otlphttp/otlphttp.go +++ b/internal/component/otelcol/exporter/otlphttp/otlphttp.go @@ -44,13 +44,22 @@ type Arguments struct { TracesEndpoint string `river:"traces_endpoint,attr,optional"` MetricsEndpoint string `river:"metrics_endpoint,attr,optional"` LogsEndpoint string `river:"logs_endpoint,attr,optional"` + + Encoding string `river:"encoding,attr,optional"` } var _ exporter.Arguments = Arguments{} +const ( + EncodingProto string = "proto" + EncodingJSON string = "json" +) + // SetToDefault implements river.Defaulter. func (args *Arguments) SetToDefault() { - *args = Arguments{} + *args = Arguments{ + Encoding: EncodingProto, + } args.Queue.SetToDefault() args.Retry.SetToDefault() args.Client.SetToDefault() @@ -60,12 +69,13 @@ func (args *Arguments) SetToDefault() { // Convert implements exporter.Arguments. func (args Arguments) Convert() (otelcomponent.Config, error) { return &otlphttpexporter.Config{ - HTTPClientSettings: *(*otelcol.HTTPClientArguments)(&args.Client).Convert(), - QueueSettings: *args.Queue.Convert(), - RetrySettings: *args.Retry.Convert(), - TracesEndpoint: args.TracesEndpoint, - MetricsEndpoint: args.MetricsEndpoint, - LogsEndpoint: args.LogsEndpoint, + ClientConfig: *(*otelcol.HTTPClientArguments)(&args.Client).Convert(), + QueueConfig: *args.Queue.Convert(), + RetryConfig: *args.Retry.Convert(), + TracesEndpoint: args.TracesEndpoint, + MetricsEndpoint: args.MetricsEndpoint, + LogsEndpoint: args.LogsEndpoint, + Encoding: otlphttpexporter.EncodingType(args.Encoding), }, nil } @@ -89,6 +99,9 @@ func (args *Arguments) Validate() error { if args.Client.Endpoint == "" && args.TracesEndpoint == "" && args.MetricsEndpoint == "" && args.LogsEndpoint == "" { return errors.New("at least one endpoint must be specified") } + if args.Encoding != EncodingProto && args.Encoding != EncodingJSON { + return errors.New("invalid encoding type") + } return nil } @@ -110,10 +123,11 @@ func (args *HTTPClientArguments) SetToDefault() { MaxIdleConns: &maxIdleConns, IdleConnTimeout: &idleConnTimeout, - Timeout: 30 * time.Second, - Headers: map[string]string{}, - Compression: otelcol.CompressionTypeGzip, - ReadBufferSize: 0, - WriteBufferSize: 512 * 1024, + Timeout: 30 * time.Second, + Headers: map[string]string{}, + Compression: otelcol.CompressionTypeGzip, + ReadBufferSize: 0, + WriteBufferSize: 512 * 1024, + HTTP2PingTimeout: 15 * time.Second, } } diff --git a/internal/component/otelcol/extension/extension.go b/internal/component/otelcol/extension/extension.go index 8bdafad80661..7372ed02aff5 100644 --- a/internal/component/otelcol/extension/extension.go +++ b/internal/component/otelcol/extension/extension.go @@ -117,9 +117,7 @@ func (e *Extension) Update(args component.Arguments) error { TracerProvider: e.opts.Tracer, MeterProvider: metric.NewMeterProvider(metric.WithReader(promExporter)), - ReportComponentStatus: func(*otelcomponent.StatusEvent) error { - return nil - }, + ReportStatus: func(*otelcomponent.StatusEvent) {}, }, BuildInfo: otelcomponent.BuildInfo{ diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config.go index 75e907170263..3c4465a50051 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config.go @@ -1,23 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 -package jaegerremotesampling +package jaegerremotesampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling" import ( "errors" "time" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" ) @@ -31,8 +21,8 @@ var ( // Config has the configuration for the extension enabling the health check // extension, used to report the health status of the service. type Config struct { - *confighttp.HTTPServerSettings `mapstructure:"http"` - *configgrpc.GRPCServerSettings `mapstructure:"grpc"` + HTTPServerConfig *confighttp.ServerConfig `mapstructure:"http"` + GRPCServerConfig *configgrpc.ServerConfig `mapstructure:"grpc"` // Source configures the source for the strategies file. One of `remote` or `file` has to be specified. Source Source `mapstructure:"source"` @@ -40,7 +30,7 @@ type Config struct { type Source struct { // Remote defines the remote location for the file - Remote *configgrpc.GRPCClientSettings `mapstructure:"remote"` + Remote *configgrpc.ClientConfig `mapstructure:"remote"` // File specifies a local file as the strategies source File string `mapstructure:"file"` @@ -53,9 +43,11 @@ type Source struct { Contents string `mapstructure:"contents"` } +var _ component.Config = (*Config)(nil) + // Validate checks if the extension configuration is valid func (cfg *Config) Validate() error { - if cfg.HTTPServerSettings == nil && cfg.GRPCServerSettings == nil { + if cfg.HTTPServerConfig == nil && cfg.GRPCServerConfig == nil { return errAtLeastOneProtocol } diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config_test.go index c208e9f4a7ea..74efe6d7cf46 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/config_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaegerremotesampling @@ -21,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - otelcomponent "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/confignet" @@ -32,29 +21,29 @@ func TestLoadConfig(t *testing.T) { t.Parallel() tests := []struct { - id otelcomponent.ID - expected otelcomponent.Config + id component.ID + expected component.Config }{ { - id: otelcomponent.NewID(typeStr), + id: component.NewID(typeStr), expected: &Config{ - HTTPServerSettings: &confighttp.HTTPServerSettings{Endpoint: ":5778"}, - GRPCServerSettings: &configgrpc.GRPCServerSettings{NetAddr: confignet.NetAddr{ + HTTPServerConfig: &confighttp.ServerConfig{Endpoint: ":5778"}, + GRPCServerConfig: &configgrpc.ServerConfig{NetAddr: confignet.AddrConfig{ Endpoint: ":14250", Transport: "tcp", }}, Source: Source{ - Remote: &configgrpc.GRPCClientSettings{ + Remote: &configgrpc.ClientConfig{ Endpoint: "jaeger-collector:14250", }, }, }, }, { - id: otelcomponent.NewIDWithName(typeStr, "1"), + id: component.NewIDWithName(typeStr, "1"), expected: &Config{ - HTTPServerSettings: &confighttp.HTTPServerSettings{Endpoint: ":5778"}, - GRPCServerSettings: &configgrpc.GRPCServerSettings{NetAddr: confignet.NetAddr{ + HTTPServerConfig: &confighttp.ServerConfig{Endpoint: ":5778"}, + GRPCServerConfig: &configgrpc.ServerConfig{NetAddr: confignet.AddrConfig{ Endpoint: ":14250", Transport: "tcp", }}, @@ -73,8 +62,8 @@ func TestLoadConfig(t *testing.T) { cfg := factory.CreateDefaultConfig() sub, err := cm.Sub(tt.id.String()) require.NoError(t, err) - require.NoError(t, otelcomponent.UnmarshalConfig(sub, cfg)) - assert.NoError(t, otelcomponent.ValidateConfig(cfg)) + require.NoError(t, component.UnmarshalConfig(sub, cfg)) + assert.NoError(t, component.ValidateConfig(cfg)) assert.Equal(t, tt.expected, cfg) }) } @@ -94,16 +83,16 @@ func TestValidate(t *testing.T) { { desc: "no sources", cfg: Config{ - GRPCServerSettings: &configgrpc.GRPCServerSettings{}, + GRPCServerConfig: &configgrpc.ServerConfig{}, }, expected: errNoSources, }, { desc: "too many sources", cfg: Config{ - GRPCServerSettings: &configgrpc.GRPCServerSettings{}, + GRPCServerConfig: &configgrpc.ServerConfig{}, Source: Source{ - Remote: &configgrpc.GRPCClientSettings{}, + Remote: &configgrpc.ClientConfig{}, File: "/tmp/some-file", }, }, diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension.go index 84b8ca8a9f52..0a116ac485b1 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension.go @@ -1,18 +1,7 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaegerremotesampling +// SPDX-License-Identifier: Apache-2.0 + +package jaegerremotesampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling" import ( "context" @@ -21,14 +10,14 @@ import ( "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore" "github.com/jaegertracing/jaeger/plugin/sampling/strategystore/static" "go.opentelemetry.io/collector/component" - otelextension "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension" "go.uber.org/zap" "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal" "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling/internal/strategy_store" ) -var _ otelextension.Extension = (*jrsExtension)(nil) +var _ extension.Extension = (*jrsExtension)(nil) type jrsExtension struct { cfg *Config @@ -54,7 +43,6 @@ func (jrse *jrsExtension) Start(ctx context.Context, host component.Host) error // source of the sampling config: // - remote (gRPC) // - local file - // - contents (string) // we can then use a simplified logic here to assign the appropriate store if jrse.cfg.Source.File != "" { opts := static.Options{ @@ -94,8 +82,8 @@ func (jrse *jrsExtension) Start(ctx context.Context, host component.Host) error jrse.samplingStore = ss } - if jrse.cfg.HTTPServerSettings != nil { - httpServer, err := internal.NewHTTP(jrse.telemetry, *jrse.cfg.HTTPServerSettings, jrse.samplingStore) + if jrse.cfg.HTTPServerConfig != nil { + httpServer, err := internal.NewHTTP(jrse.telemetry, *jrse.cfg.HTTPServerConfig, jrse.samplingStore) if err != nil { return fmt.Errorf("error while creating the HTTP server: %w", err) } @@ -106,8 +94,8 @@ func (jrse *jrsExtension) Start(ctx context.Context, host component.Host) error } } - if jrse.cfg.GRPCServerSettings != nil { - grpcServer, err := internal.NewGRPC(jrse.telemetry, *jrse.cfg.GRPCServerSettings, jrse.samplingStore) + if jrse.cfg.GRPCServerConfig != nil { + grpcServer, err := internal.NewGRPC(jrse.telemetry, *jrse.cfg.GRPCServerConfig, jrse.samplingStore) if err != nil { return fmt.Errorf("error while creating the gRPC server: %w", err) } diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension_test.go index f0a4501a218b..53be6b87d7d8 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/extension_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaegerremotesampling @@ -105,12 +94,13 @@ func TestRemote(t *testing.T) { err = server.Serve(lis) require.NoError(t, err) }() + defer func() { server.Stop() }() // create the config, pointing to the mock server cfg := testConfig() - cfg.GRPCServerSettings.NetAddr.Endpoint = "127.0.0.1:0" + cfg.GRPCServerConfig.NetAddr.Endpoint = "127.0.0.1:0" cfg.Source.ReloadInterval = tc.reloadInterval - cfg.Source.Remote = &configgrpc.GRPCClientSettings{ + cfg.Source.Remote = &configgrpc.ClientConfig{ Endpoint: fmt.Sprintf("127.0.0.1:%d", lis.Addr().(*net.TCPAddr).Port), TLSSetting: configtls.TLSClientSetting{ Insecure: true, // test only @@ -131,6 +121,7 @@ func TestRemote(t *testing.T) { resp, err := http.Get("http://127.0.0.1:5778/sampling?service=foo") assert.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) + assert.NoError(t, resp.Body.Close()) } // shut down the server @@ -174,7 +165,7 @@ func (s *samplingServer) GetSamplingStrategy(ctx context.Context, params *api_v2 func testConfig() *Config { cfg := createDefaultConfig().(*Config) - cfg.HTTPServerSettings.Endpoint = "127.0.0.1:5778" - cfg.GRPCServerSettings.NetAddr.Endpoint = "127.0.0.1:14250" + cfg.HTTPServerConfig.Endpoint = "127.0.0.1:5778" + cfg.GRPCServerConfig.NetAddr.Endpoint = "127.0.0.1:14250" return cfg } diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/factory.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/factory.go index 1699aa7c90d0..70462cde2511 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/factory.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/factory.go @@ -1,27 +1,19 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaegerremotesampling +// SPDX-License-Identifier: Apache-2.0 + +package jaegerremotesampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling" import ( "context" + "sync" + "go.opentelemetry.io/collector/component" otelcomponent "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/confignet" - otelextension "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension" + "go.uber.org/zap" ) const ( @@ -29,9 +21,9 @@ const ( typeStr = "jaegerremotesampling" ) -// NewFactory creates a factory for the OIDC Authenticator extension. -func NewFactory() otelextension.Factory { - return otelextension.NewFactory( +// NewFactory creates a factory for the jaeger remote sampling extension. +func NewFactory() extension.Factory { + return extension.NewFactory( typeStr, createDefaultConfig, createExtension, @@ -39,13 +31,13 @@ func NewFactory() otelextension.Factory { ) } -func createDefaultConfig() otelcomponent.Config { +func createDefaultConfig() component.Config { return &Config{ - HTTPServerSettings: &confighttp.HTTPServerSettings{ + HTTPServerConfig: &confighttp.ServerConfig{ Endpoint: ":5778", }, - GRPCServerSettings: &configgrpc.GRPCServerSettings{ - NetAddr: confignet.NetAddr{ + GRPCServerConfig: &configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ Endpoint: ":14250", Transport: "tcp", }, @@ -54,6 +46,25 @@ func createDefaultConfig() otelcomponent.Config { } } -func createExtension(_ context.Context, set otelextension.CreateSettings, cfg otelcomponent.Config) (otelcomponent.Component, error) { +var once sync.Once + +func logDeprecation(logger *zap.Logger) { + once.Do(func() { + logger.Warn("jaegerremotesampling extension will deprecate Thrift-gen and replace it with Proto-gen to be compatible with jaeger 1.42.0 and higher. See https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/18485 for more details.") + }) +} + +// nolint +// var protoGate = featuregate.GlobalRegistry().MustRegister( +// "extension.jaegerremotesampling.replaceThriftWithProto", +// featuregate.StageStable, +// featuregate.WithRegisterDescription( +// "When enabled, the jaegerremotesampling will use Proto-gen over Thrift-gen.", +// ), +// featuregate.WithRegisterToVersion("0.92.0"), +// ) + +func createExtension(_ context.Context, set extension.CreateSettings, cfg component.Config) (extension.Extension, error) { + logDeprecation(set.Logger) return newExtension(cfg.(*Config), set.TelemetrySettings), nil } diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/factory_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/factory_test.go index dc08b4552a4e..da6bb9df2400 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/factory_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/factory_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaegerremotesampling @@ -29,8 +18,8 @@ import ( func TestCreateDefaultConfig(t *testing.T) { // prepare and test expected := &Config{ - HTTPServerSettings: &confighttp.HTTPServerSettings{Endpoint: ":5778"}, - GRPCServerSettings: &configgrpc.GRPCServerSettings{NetAddr: confignet.NetAddr{ + HTTPServerConfig: &confighttp.ServerConfig{Endpoint: ":5778"}, + GRPCServerConfig: &configgrpc.ServerConfig{NetAddr: confignet.AddrConfig{ Endpoint: ":14250", Transport: "tcp", }}, diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/grpc.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/grpc.go index 4352f6966e8b..4e437834c594 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/grpc.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/grpc.go @@ -1,18 +1,7 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling/internal" import ( "context" @@ -44,7 +33,7 @@ type grpcServer interface { // NewGRPC returns a new sampling gRPC Server. func NewGRPC( telemetry component.TelemetrySettings, - settings configgrpc.GRPCServerSettings, + settings configgrpc.ServerConfig, strategyStore strategystore.StrategyStore, ) (*SamplingGRPCServer, error) { @@ -62,13 +51,13 @@ func NewGRPC( // SamplingGRPCServer implements component.Component to make the life cycle easy to manage. type SamplingGRPCServer struct { telemetry component.TelemetrySettings - settings configgrpc.GRPCServerSettings + settings configgrpc.ServerConfig strategyStore strategystore.StrategyStore grpcServer grpcServer } -func (s *SamplingGRPCServer) Start(_ context.Context, host component.Host) error { +func (s *SamplingGRPCServer) Start(ctx context.Context, host component.Host) error { server, err := s.settings.ToServer(host, s.telemetry) if err != nil { return err @@ -82,7 +71,7 @@ func (s *SamplingGRPCServer) Start(_ context.Context, host component.Host) error healthServer.SetServingStatus("jaeger.api_v2.SamplingManager", grpc_health_v1.HealthCheckResponse_SERVING) grpc_health_v1.RegisterHealthServer(server, healthServer) - listener, err := s.settings.ToListener() + listener, err := s.settings.NetAddr.Listen(ctx) if err != nil { return fmt.Errorf("failed to listen on gRPC port: %w", err) } diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/grpc_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/grpc_test.go index 95bc895e505b..731b6a40412c 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/grpc_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/grpc_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal @@ -28,16 +17,16 @@ import ( ) func TestMissingClientConfigManagerGRPC(t *testing.T) { - s, err := NewGRPC(componenttest.NewNopTelemetrySettings(), configgrpc.GRPCServerSettings{}, nil) + s, err := NewGRPC(componenttest.NewNopTelemetrySettings(), configgrpc.ServerConfig{}, nil) assert.Equal(t, errMissingStrategyStore, err) assert.Nil(t, s) } func TestStartAndStopGRPC(t *testing.T) { // prepare - srvSettings := configgrpc.GRPCServerSettings{ - NetAddr: confignet.NetAddr{ - Endpoint: ":0", + srvSettings := configgrpc.ServerConfig{ + NetAddr: confignet.AddrConfig{ + Endpoint: "127.0.0.1:0", Transport: "tcp", }, } @@ -97,6 +86,6 @@ type grpcServerMock struct { timeToGracefulStop time.Duration } -func (g *grpcServerMock) Serve(lis net.Listener) error { return nil } -func (g *grpcServerMock) Stop() {} -func (g *grpcServerMock) GracefulStop() { time.Sleep(g.timeToGracefulStop) } +func (g *grpcServerMock) Serve(_ net.Listener) error { return nil } +func (g *grpcServerMock) Stop() {} +func (g *grpcServerMock) GracefulStop() { time.Sleep(g.timeToGracefulStop) } diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/http.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/http.go index 87e240eec05f..b376723c3fe0 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/http.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/http.go @@ -1,18 +1,7 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling/internal" import ( "context" @@ -36,7 +25,7 @@ var _ component.Component = (*SamplingHTTPServer)(nil) type SamplingHTTPServer struct { telemetry component.TelemetrySettings - settings confighttp.HTTPServerSettings + settings confighttp.ServerConfig strategyStore strategystore.StrategyStore mux *http.ServeMux @@ -44,7 +33,7 @@ type SamplingHTTPServer struct { shutdownWG *sync.WaitGroup } -func NewHTTP(telemetry component.TelemetrySettings, settings confighttp.HTTPServerSettings, strategyStore strategystore.StrategyStore) (*SamplingHTTPServer, error) { +func NewHTTP(telemetry component.TelemetrySettings, settings confighttp.ServerConfig, strategyStore strategystore.StrategyStore) (*SamplingHTTPServer, error) { if strategyStore == nil { return nil, errMissingStrategyStore } @@ -58,11 +47,8 @@ func NewHTTP(telemetry component.TelemetrySettings, settings confighttp.HTTPServ } srv.mux = http.NewServeMux() - // the legacy endpoint - srv.mux.Handle("/", http.HandlerFunc(srv.samplingStrategyHandler)) - // the new endpoint -- not strictly necessary, as the previous one would match it - // already, but good to have it explicit here + // SEE: https://www.jaegertracing.io/docs/1.41/apis/#remote-sampling-configuration-stable srv.mux.Handle("/sampling", http.HandlerFunc(srv.samplingStrategyHandler)) return srv, nil @@ -86,7 +72,7 @@ func (h *SamplingHTTPServer) Start(_ context.Context, host component.Host) error defer h.shutdownWG.Done() if err := h.srv.Serve(hln); err != nil && !errors.Is(err, http.ErrServerClosed) { - host.ReportFatalError(err) + h.telemetry.ReportStatus(component.NewFatalErrorEvent(err)) } }() diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/http_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/http_test.go index fa31cfcb524b..18bdb4736463 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/http_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/http_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal @@ -32,15 +21,15 @@ import ( ) func TestMissingClientConfigManagerHTTP(t *testing.T) { - s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, nil) + s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.ServerConfig{}, nil) assert.Equal(t, errMissingStrategyStore, err) assert.Nil(t, s) } func TestStartAndStopHTTP(t *testing.T) { // prepare - srvSettings := confighttp.HTTPServerSettings{ - Endpoint: ":0", + srvSettings := confighttp.ServerConfig{ + Endpoint: "127.0.0.1:0", } s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), srvSettings, &mockCfgMgr{}) require.NoError(t, err) @@ -56,10 +45,6 @@ func TestEndpointsAreWired(t *testing.T) { desc string endpoint string }{ - { - desc: "legacy", - endpoint: "/", - }, { desc: "new", endpoint: "/sampling", @@ -68,7 +53,15 @@ func TestEndpointsAreWired(t *testing.T) { for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { // prepare - s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, &mockCfgMgr{}) + s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.ServerConfig{}, &mockCfgMgr{ + getSamplingStrategyFunc: func(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error) { + return &api_v2.SamplingStrategyResponse{ + ProbabilisticSampling: &api_v2.ProbabilisticSamplingStrategy{ + SamplingRate: 1, + }, + }, nil + }, + }) require.NoError(t, err) require.NotNil(t, s) @@ -87,14 +80,14 @@ func TestEndpointsAreWired(t *testing.T) { resp.Body.Close() body := string(samplingStrategiesBytes) - assert.Equal(t, `{}`, body) + assert.Equal(t, `{"probabilisticSampling":{"samplingRate":1}}`, body) }) } } func TestServiceNameIsRequired(t *testing.T) { // prepare - s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, &mockCfgMgr{}) + s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.ServerConfig{}, &mockCfgMgr{}) require.NoError(t, err) require.NotNil(t, s) @@ -112,7 +105,7 @@ func TestServiceNameIsRequired(t *testing.T) { } func TestErrorFromClientConfigManager(t *testing.T) { - s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.HTTPServerSettings{}, &mockCfgMgr{}) + s, err := NewHTTP(componenttest.NewNopTelemetrySettings(), confighttp.ServerConfig{}, &mockCfgMgr{}) require.NoError(t, err) require.NotNil(t, s) diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/internal_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/internal_test.go index 3e611a70aa35..0a88f8501b30 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/internal_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/internal_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/metadata/generated_status.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/metadata/generated_status.go new file mode 100644 index 000000000000..fed185a1e5cf --- /dev/null +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/metadata/generated_status.go @@ -0,0 +1,25 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +var ( + Type = component.MustNewType("jaegerremotesampling") +) + +const ( + ExtensionStability = component.StabilityLevelAlpha +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("otelcol") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("otelcol") +} diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_cache.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_cache.go index c8cd90a88f54..07c15b9376e2 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_cache.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_cache.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling/internal" diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_cache_test.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_cache_test.go index 4ecb89ad1de8..7bd6c4fa767c 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_cache_test.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_cache_test.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_store.go b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_store.go index a7f85fb86e18..d6d9bd811b11 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_store.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/internal/jaegerremotesampling/internal/remote_strategy_store.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling/internal" @@ -41,7 +30,7 @@ type grpcRemoteStrategyStore struct { // for service-specific outbound GetSamplingStrategy calls. func NewRemoteStrategyStore( conn *grpc.ClientConn, - grpcClientSettings *configgrpc.GRPCClientSettings, + grpcClientSettings *configgrpc.ClientConfig, reloadInterval time.Duration, ) (strategystore.StrategyStore, io.Closer) { diff --git a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go index 6269e9982acc..04ac8010cba2 100644 --- a/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go +++ b/internal/component/otelcol/extension/jaeger_remote_sampling/jaeger_remote_sampling.go @@ -59,8 +59,8 @@ var ( // Convert implements extension.Arguments. func (args Arguments) Convert() (otelcomponent.Config, error) { return &jaegerremotesampling.Config{ - HTTPServerSettings: (*otelcol.HTTPServerArguments)(args.HTTP).Convert(), - GRPCServerSettings: (*otelcol.GRPCServerArguments)(args.GRPC).Convert(), + HTTPServerConfig: (*otelcol.HTTPServerArguments)(args.HTTP).Convert(), + GRPCServerConfig: (*otelcol.GRPCServerArguments)(args.GRPC).Convert(), Source: jaegerremotesampling.Source{ Remote: (*otelcol.GRPCClientArguments)(args.Source.Remote).Convert(), File: args.Source.File, diff --git a/internal/component/otelcol/processor/filter/filter_test.go b/internal/component/otelcol/processor/filter/filter_test.go index a86d2d3fcdd7..aade36c458d1 100644 --- a/internal/component/otelcol/processor/filter/filter_test.go +++ b/internal/component/otelcol/processor/filter/filter_test.go @@ -154,7 +154,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { } output {} `, - errMsg: `unable to parse OTTL statement "match() where UnknowFunction(\"http.method\")": undefined function "UnknowFunction"`, + errMsg: `unable to parse OTTL condition "UnknowFunction(\"http.method\")": undefined function "UnknowFunction"`, }, } diff --git a/internal/component/otelcol/processor/processor.go b/internal/component/otelcol/processor/processor.go index 43d626ba5e5d..0ccbfe4c9bcc 100644 --- a/internal/component/otelcol/processor/processor.go +++ b/internal/component/otelcol/processor/processor.go @@ -136,9 +136,7 @@ func (p *Processor) Update(args component.Arguments) error { TracerProvider: p.opts.Tracer, MeterProvider: metric.NewMeterProvider(metric.WithReader(promExporter)), - ReportComponentStatus: func(*otelcomponent.StatusEvent) error { - return nil - }, + ReportStatus: func(*otelcomponent.StatusEvent) {}, }, BuildInfo: otelcomponent.BuildInfo{ diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go index d5c82182a980..34f339775536 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go @@ -18,6 +18,7 @@ var DefaultArguments = Config{ AwsEcsLaunchtype: rac.ResourceAttributeConfig{Enabled: true}, AwsEcsTaskArn: rac.ResourceAttributeConfig{Enabled: true}, AwsEcsTaskFamily: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsTaskID: rac.ResourceAttributeConfig{Enabled: true}, AwsEcsTaskRevision: rac.ResourceAttributeConfig{Enabled: true}, AwsLogGroupArns: rac.ResourceAttributeConfig{Enabled: true}, AwsLogGroupNames: rac.ResourceAttributeConfig{Enabled: true}, @@ -54,6 +55,7 @@ type ResourceAttributesConfig struct { AwsEcsLaunchtype rac.ResourceAttributeConfig `river:"aws.ecs.launchtype,block,optional"` AwsEcsTaskArn rac.ResourceAttributeConfig `river:"aws.ecs.task.arn,block,optional"` AwsEcsTaskFamily rac.ResourceAttributeConfig `river:"aws.ecs.task.family,block,optional"` + AwsEcsTaskID rac.ResourceAttributeConfig `river:"aws.ecs.task.id,block,optional"` AwsEcsTaskRevision rac.ResourceAttributeConfig `river:"aws.ecs.task.revision,block,optional"` AwsLogGroupArns rac.ResourceAttributeConfig `river:"aws.log.group.arns,block,optional"` AwsLogGroupNames rac.ResourceAttributeConfig `river:"aws.log.group.names,block,optional"` @@ -72,6 +74,7 @@ func (r ResourceAttributesConfig) Convert() map[string]interface{} { "aws.ecs.launchtype": r.AwsEcsLaunchtype.Convert(), "aws.ecs.task.arn": r.AwsEcsTaskArn.Convert(), "aws.ecs.task.family": r.AwsEcsTaskFamily.Convert(), + "aws.ecs.task.id": r.AwsEcsTaskID.Convert(), "aws.ecs.task.revision": r.AwsEcsTaskRevision.Convert(), "aws.log.group.arns": r.AwsLogGroupArns.Convert(), "aws.log.group.names": r.AwsLogGroupNames.Convert(), diff --git a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go index a0b2ca60c4c4..1f305d0fc21f 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go @@ -14,8 +14,9 @@ type Config struct { // DefaultArguments holds default settings for Config. var DefaultArguments = Config{ ResourceAttributes: ResourceAttributesConfig{ - CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, - CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + K8sClusterName: rac.ResourceAttributeConfig{Enabled: false}, }, } @@ -34,13 +35,15 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for eks resource attributes. type ResourceAttributesConfig struct { - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + K8sClusterName rac.ResourceAttributeConfig `river:"k8s.cluster.name,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { return map[string]interface{}{ - "cloud.platform": r.CloudPlatform.Convert(), - "cloud.provider": r.CloudProvider.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "k8s.cluster.name": r.K8sClusterName.Convert(), } } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go index b23e0fdba613..93c967b2391c 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go @@ -14,8 +14,9 @@ type Config struct { // DefaultArguments holds default settings for Config. var DefaultArguments = Config{ ResourceAttributes: ResourceAttributesConfig{ - CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, - CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + K8sClusterName: rac.ResourceAttributeConfig{Enabled: false}, }, } @@ -34,13 +35,15 @@ func (args Config) Convert() map[string]interface{} { // ResourceAttributesConfig provides config for aks resource attributes. type ResourceAttributesConfig struct { - CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` - CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + K8sClusterName rac.ResourceAttributeConfig `river:"k8s.cluster.name,block,optional"` } func (r ResourceAttributesConfig) Convert() map[string]interface{} { return map[string]interface{}{ - "cloud.platform": r.CloudPlatform.Convert(), - "cloud.provider": r.CloudProvider.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "k8s.cluster.name": r.K8sClusterName.Convert(), } } diff --git a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go index 3c72a13228a8..84cb4c1a99e7 100644 --- a/internal/component/otelcol/processor/resourcedetection/internal/system/config.go +++ b/internal/component/otelcol/processor/resourcedetection/internal/system/config.go @@ -34,6 +34,8 @@ func (c *Config) SetToDefault() { HostCPUStepping: rac.ResourceAttributeConfig{Enabled: false}, HostCPUVendorID: rac.ResourceAttributeConfig{Enabled: false}, HostID: rac.ResourceAttributeConfig{Enabled: false}, + HostIP: rac.ResourceAttributeConfig{Enabled: false}, + HostMac: rac.ResourceAttributeConfig{Enabled: false}, HostName: rac.ResourceAttributeConfig{Enabled: true}, OsDescription: rac.ResourceAttributeConfig{Enabled: false}, OsType: rac.ResourceAttributeConfig{Enabled: true}, @@ -71,6 +73,8 @@ type ResourceAttributesConfig struct { HostCPUStepping rac.ResourceAttributeConfig `river:"host.cpu.stepping,block,optional"` HostCPUVendorID rac.ResourceAttributeConfig `river:"host.cpu.vendor.id,block,optional"` HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostIP rac.ResourceAttributeConfig `river:"host.ip,block,optional"` + HostMac rac.ResourceAttributeConfig `river:"host.mac,block,optional"` HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` OsDescription rac.ResourceAttributeConfig `river:"os.description,block,optional"` OsType rac.ResourceAttributeConfig `river:"os.type,block,optional"` @@ -86,6 +90,8 @@ func (r ResourceAttributesConfig) Convert() map[string]interface{} { "host.cpu.stepping": r.HostCPUStepping.Convert(), "host.cpu.vendor.id": r.HostCPUVendorID.Convert(), "host.id": r.HostID.Convert(), + "host.ip": r.HostIP.Convert(), + "host.mac": r.HostMac.Convert(), "host.name": r.HostName.Convert(), "os.description": r.OsDescription.Convert(), "os.type": r.OsType.Convert(), diff --git a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go index d52c3f65e323..cb48f4f4dc48 100644 --- a/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go +++ b/internal/component/otelcol/processor/resourcedetection/resourcedetection_test.go @@ -251,22 +251,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { cfg: ` detectors = ["ecs"] ecs { - resource_attributes { - aws.ecs.cluster.arn { enabled = true } - aws.ecs.launchtype { enabled = true } - aws.ecs.task.arn { enabled = true } - aws.ecs.task.family { enabled = true } - aws.ecs.task.revision { enabled = true } - aws.log.group.arns { enabled = true } - aws.log.group.names { enabled = false } - // aws.log.stream.arns { enabled = true } - // aws.log.stream.names { enabled = true } - // cloud.account.id { enabled = true } - // cloud.availability_zone { enabled = true } - // cloud.platform { enabled = true } - // cloud.provider { enabled = true } - // cloud.region { enabled = true } - } + resource_attributes {} } output {} `, @@ -281,9 +266,10 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "aws.ecs.launchtype": map[string]interface{}{"enabled": true}, "aws.ecs.task.arn": map[string]interface{}{"enabled": true}, "aws.ecs.task.family": map[string]interface{}{"enabled": true}, + "aws.ecs.task.id": map[string]interface{}{"enabled": true}, "aws.ecs.task.revision": map[string]interface{}{"enabled": true}, "aws.log.group.arns": map[string]interface{}{"enabled": true}, - "aws.log.group.names": map[string]interface{}{"enabled": false}, + "aws.log.group.names": map[string]interface{}{"enabled": true}, "aws.log.stream.arns": map[string]interface{}{"enabled": true}, "aws.log.stream.names": map[string]interface{}{"enabled": true}, "cloud.account.id": map[string]interface{}{"enabled": true}, @@ -318,6 +304,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { aws.ecs.launchtype { enabled = true } aws.ecs.task.arn { enabled = true } aws.ecs.task.family { enabled = true } + aws.ecs.task.id { enabled = true } aws.ecs.task.revision { enabled = true } aws.log.group.arns { enabled = true } aws.log.group.names { enabled = false } @@ -343,6 +330,7 @@ func TestArguments_UnmarshalRiver(t *testing.T) { "aws.ecs.launchtype": map[string]interface{}{"enabled": true}, "aws.ecs.task.arn": map[string]interface{}{"enabled": true}, "aws.ecs.task.family": map[string]interface{}{"enabled": true}, + "aws.ecs.task.id": map[string]interface{}{"enabled": true}, "aws.ecs.task.revision": map[string]interface{}{"enabled": true}, "aws.log.group.arns": map[string]interface{}{"enabled": true}, "aws.log.group.names": map[string]interface{}{"enabled": false}, diff --git a/internal/component/otelcol/processor/tail_sampling/types.go b/internal/component/otelcol/processor/tail_sampling/types.go index 90087cbbb6fe..1561c29f33bb 100644 --- a/internal/component/otelcol/processor/tail_sampling/types.go +++ b/internal/component/otelcol/processor/tail_sampling/types.go @@ -65,11 +65,14 @@ type SharedPolicyConfig struct { type LatencyConfig struct { // ThresholdMs in milliseconds. ThresholdMs int64 `river:"threshold_ms,attr"` + // Upper bound in milliseconds. + UpperThresholdmsMs int64 `river:"upper_threshold_ms,attr,optional"` } func (latencyConfig LatencyConfig) Convert() tsp.LatencyCfg { return tsp.LatencyCfg{ - ThresholdMs: latencyConfig.ThresholdMs, + ThresholdMs: latencyConfig.ThresholdMs, + UpperThresholdmsMs: latencyConfig.UpperThresholdmsMs, } } @@ -208,9 +211,11 @@ func (booleanAttributeConfig BooleanAttributeConfig) Convert() tsp.BooleanAttrib type ErrorMode string const ( - // "ignore" causes evaluation to continue to the next statement. + // "ignore" ignores errors returned by conditions, logs them, and continues on to the next condition. ErrorModeIgnore ErrorMode = "ignore" - // "propagate" causes the evaluation to be false and an error is returned. + // "silent" ignores errors returned by conditions, does not log them, and continues on to the next condition. + ErrorModeSilent ErrorMode = "silent" + // "propagate" causes the evaluation to be false and an error is returned. The data is dropped. ErrorModePropagate ErrorMode = "propagate" ) @@ -253,7 +258,7 @@ func (e *ErrorMode) UnmarshalText(text []byte) error { str := ErrorMode(strings.ToLower(string(text))) switch str { - case ErrorModeIgnore, ErrorModePropagate: + case ErrorModeIgnore, ErrorModePropagate, ErrorModeSilent: *e = str return nil default: diff --git a/internal/component/otelcol/receiver/jaeger/jaeger.go b/internal/component/otelcol/receiver/jaeger/jaeger.go index c7817cc68ab2..29acbc16381b 100644 --- a/internal/component/otelcol/receiver/jaeger/jaeger.go +++ b/internal/component/otelcol/receiver/jaeger/jaeger.go @@ -112,7 +112,7 @@ func (args *GRPC) SetToDefault() { } // Convert converts proto into the upstream type. -func (args *GRPC) Convert() *otelconfiggrpc.GRPCServerSettings { +func (args *GRPC) Convert() *otelconfiggrpc.ServerConfig { if args == nil { return nil } @@ -134,7 +134,7 @@ func (args *ThriftHTTP) SetToDefault() { } // Convert converts proto into the upstream type. -func (args *ThriftHTTP) Convert() *otelconfighttp.HTTPServerSettings { +func (args *ThriftHTTP) Convert() *otelconfighttp.ServerConfig { if args == nil { return nil } diff --git a/internal/component/otelcol/receiver/kafka/kafka.go b/internal/component/otelcol/receiver/kafka/kafka.go index d52223f1adee..529b6c5b2edd 100644 --- a/internal/component/otelcol/receiver/kafka/kafka.go +++ b/internal/component/otelcol/receiver/kafka/kafka.go @@ -39,6 +39,8 @@ type Arguments struct { ClientID string `river:"client_id,attr,optional"` InitialOffset string `river:"initial_offset,attr,optional"` + ResolveCanonicalBootstrapServersOnly bool `river:"resolve_canonical_bootstrap_servers_only,attr,optional"` + Authentication AuthenticationArguments `river:"authentication,block,optional"` Metadata MetadataArguments `river:"metadata,block,optional"` AutoCommit AutoCommitArguments `river:"autocommit,block,optional"` @@ -93,6 +95,7 @@ func (args Arguments) Convert() (otelcomponent.Config, error) { result.GroupID = args.GroupID result.ClientID = args.ClientID result.InitialOffset = args.InitialOffset + result.ResolveCanonicalBootstrapServersOnly = args.ResolveCanonicalBootstrapServersOnly result.Metadata = args.Metadata.Convert() result.AutoCommit = args.AutoCommit.Convert() result.MessageMarking = args.MessageMarking.Convert() diff --git a/internal/component/otelcol/receiver/opencensus/opencensus.go b/internal/component/otelcol/receiver/opencensus/opencensus.go index 2de11c257831..c97182a5bdb7 100644 --- a/internal/component/otelcol/receiver/opencensus/opencensus.go +++ b/internal/component/otelcol/receiver/opencensus/opencensus.go @@ -57,8 +57,8 @@ func (args *Arguments) SetToDefault() { // Convert implements receiver.Arguments. func (args Arguments) Convert() (otelcomponent.Config, error) { return &opencensusreceiver.Config{ - CorsOrigins: args.CorsAllowedOrigins, - GRPCServerSettings: *args.GRPC.Convert(), + CorsOrigins: args.CorsAllowedOrigins, + ServerConfig: *args.GRPC.Convert(), }, nil } diff --git a/internal/component/otelcol/receiver/otlp/otlp.go b/internal/component/otelcol/receiver/otlp/otlp.go index 9beb1f7ca3f9..e8b04634bc5a 100644 --- a/internal/component/otelcol/receiver/otlp/otlp.go +++ b/internal/component/otelcol/receiver/otlp/otlp.go @@ -60,10 +60,10 @@ func (args *HTTPConfigArguments) Convert() *otlpreceiver.HTTPConfig { } return &otlpreceiver.HTTPConfig{ - HTTPServerSettings: args.HTTPServerArguments.Convert(), - TracesURLPath: args.TracesURLPath, - MetricsURLPath: args.MetricsURLPath, - LogsURLPath: args.LogsURLPath, + ServerConfig: args.HTTPServerArguments.Convert(), + TracesURLPath: args.TracesURLPath, + MetricsURLPath: args.MetricsURLPath, + LogsURLPath: args.LogsURLPath, } } diff --git a/internal/component/otelcol/receiver/prometheus/internal/staleness_end_to_end_test.go b/internal/component/otelcol/receiver/prometheus/internal/staleness_end_to_end_test.go index 224c3cd4e1f0..2c5ef6c02637 100644 --- a/internal/component/otelcol/receiver/prometheus/internal/staleness_end_to_end_test.go +++ b/internal/component/otelcol/receiver/prometheus/internal/staleness_end_to_end_test.go @@ -158,7 +158,7 @@ service: require.NoError(t, err) appSettings := otelcol.CollectorSettings{ - Factories: factories, + Factories: func() (otelcol.Factories, error) { return factories, nil }, ConfigProvider: configProvider, BuildInfo: component.BuildInfo{ Command: "otelcol", diff --git a/internal/component/otelcol/receiver/prometheus/prometheus.go b/internal/component/otelcol/receiver/prometheus/prometheus.go index ba033337887d..241605df693f 100644 --- a/internal/component/otelcol/receiver/prometheus/prometheus.go +++ b/internal/component/otelcol/receiver/prometheus/prometheus.go @@ -123,9 +123,7 @@ func (c *Component) Update(newConfig component.Arguments) error { TracerProvider: traceNoop.NewTracerProvider(), MeterProvider: metricNoop.NewMeterProvider(), - ReportComponentStatus: func(*otelcomponent.StatusEvent) error { - return nil - }, + ReportStatus: func(*otelcomponent.StatusEvent) {}, }, BuildInfo: otelcomponent.BuildInfo{ diff --git a/internal/component/otelcol/receiver/receiver.go b/internal/component/otelcol/receiver/receiver.go index 55cfebc604cd..13c52ccfd112 100644 --- a/internal/component/otelcol/receiver/receiver.go +++ b/internal/component/otelcol/receiver/receiver.go @@ -134,9 +134,7 @@ func (r *Receiver) Update(args component.Arguments) error { TracerProvider: r.opts.Tracer, MeterProvider: metric.NewMeterProvider(metricOpts...), - ReportComponentStatus: func(*otelcomponent.StatusEvent) error { - return nil - }, + ReportStatus: func(*otelcomponent.StatusEvent) {}, }, BuildInfo: otelcomponent.BuildInfo{ diff --git a/internal/component/otelcol/receiver/zipkin/zipkin.go b/internal/component/otelcol/receiver/zipkin/zipkin.go index 6cf15ac2a15f..0e8b87343b44 100644 --- a/internal/component/otelcol/receiver/zipkin/zipkin.go +++ b/internal/component/otelcol/receiver/zipkin/zipkin.go @@ -52,8 +52,8 @@ func (args *Arguments) SetToDefault() { // Convert implements receiver.Arguments. func (args Arguments) Convert() (otelcomponent.Config, error) { return &zipkinreceiver.Config{ - ParseStringTags: args.ParseStringTags, - HTTPServerSettings: *args.HTTPServer.Convert(), + ParseStringTags: args.ParseStringTags, + ServerConfig: *args.HTTPServer.Convert(), }, nil } diff --git a/internal/component/otelcol/receiver/zipkin/zipkin_test.go b/internal/component/otelcol/receiver/zipkin/zipkin_test.go index 4377bb5e3328..30ffabc11d60 100644 --- a/internal/component/otelcol/receiver/zipkin/zipkin_test.go +++ b/internal/component/otelcol/receiver/zipkin/zipkin_test.go @@ -69,10 +69,10 @@ func TestArguments_UnmarshalRiver(t *testing.T) { require.True(t, ok) // Check the arguments - require.Equal(t, otelArgs.HTTPServerSettings.Endpoint, httpAddr) - require.Equal(t, len(otelArgs.HTTPServerSettings.CORS.AllowedOrigins), 2) - require.Equal(t, otelArgs.HTTPServerSettings.CORS.AllowedOrigins[0], "https://*.test.com") - require.Equal(t, otelArgs.HTTPServerSettings.CORS.AllowedOrigins[1], "https://test.com") + require.Equal(t, otelArgs.ServerConfig.Endpoint, httpAddr) + require.Equal(t, len(otelArgs.ServerConfig.CORS.AllowedOrigins), 2) + require.Equal(t, otelArgs.ServerConfig.CORS.AllowedOrigins[0], "https://*.test.com") + require.Equal(t, otelArgs.ServerConfig.CORS.AllowedOrigins[1], "https://test.com") require.Equal(t, otelArgs.ParseStringTags, true) }) } diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go b/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go index c7cb41c5ea58..360520b94847 100644 --- a/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go @@ -62,14 +62,14 @@ func toJaegerReceiver(state *state, id component.InstanceID, cfg *jaegerreceiver } } -func toJaegerGRPCArguments(cfg *configgrpc.GRPCServerSettings) *jaeger.GRPC { +func toJaegerGRPCArguments(cfg *configgrpc.ServerConfig) *jaeger.GRPC { if cfg == nil { return nil } return &jaeger.GRPC{GRPCServerArguments: toGRPCServerArguments(cfg)} } -func toJaegerThriftHTTPArguments(cfg *confighttp.HTTPServerSettings) *jaeger.ThriftHTTP { +func toJaegerThriftHTTPArguments(cfg *confighttp.ServerConfig) *jaeger.ThriftHTTP { if cfg == nil { return nil } diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go index 2076a7290d7f..df1cda709fca 100644 --- a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go +++ b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go @@ -47,12 +47,12 @@ func toJaegerRemoteSamplingExtension(cfg *jaegerremotesampling.Config) *jaeger_r } var grpc *jaeger_remote_sampling.GRPCServerArguments - if cfg.GRPCServerSettings != nil { - grpc = (*jaeger_remote_sampling.GRPCServerArguments)(toGRPCServerArguments(cfg.GRPCServerSettings)) + if cfg.GRPCServerConfig != nil { + grpc = (*jaeger_remote_sampling.GRPCServerArguments)(toGRPCServerArguments(cfg.GRPCServerConfig)) } var http *jaeger_remote_sampling.HTTPServerArguments - if cfg.HTTPServerSettings != nil { - http = (*jaeger_remote_sampling.HTTPServerArguments)(toHTTPServerArguments(cfg.HTTPServerSettings)) + if cfg.HTTPServerConfig != nil { + http = (*jaeger_remote_sampling.HTTPServerArguments)(toHTTPServerArguments(cfg.HTTPServerConfig)) } var remote *jaeger_remote_sampling.GRPCClientArguments if cfg.Source.Remote != nil { diff --git a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go index ae941514e988..2dbc84db0c3e 100644 --- a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go @@ -59,6 +59,8 @@ func toKafkaReceiver(state *state, id component.InstanceID, cfg *kafkareceiver.C ClientID: cfg.ClientID, InitialOffset: cfg.InitialOffset, + ResolveCanonicalBootstrapServersOnly: cfg.ResolveCanonicalBootstrapServersOnly, + Authentication: toKafkaAuthentication(encodeMapstruct(cfg.Authentication)), Metadata: toKafkaMetadata(cfg.Metadata), AutoCommit: toKafkaAutoCommit(cfg.AutoCommit), diff --git a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go index a01136e1d2d6..9f73d5935940 100644 --- a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go @@ -81,8 +81,8 @@ func toProtocol(cfg loadbalancingexporter.Protocol) loadbalancing.Protocol { // remove unwanted fields. OTLP: loadbalancing.OtlpConfig{ Timeout: cfg.OTLP.Timeout, - Queue: toQueueArguments(cfg.OTLP.QueueSettings), - Retry: toRetryArguments(cfg.OTLP.RetrySettings), + Queue: toQueueArguments(cfg.OTLP.QueueConfig), + Retry: toRetryArguments(cfg.OTLP.RetryConfig), Client: loadbalancing.GRPCClientArguments{ Compression: otelcol.CompressionType(cfg.OTLP.Compression), diff --git a/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go b/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go index 78ae892ce456..cbc49366ad5a 100644 --- a/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go @@ -48,7 +48,7 @@ func toOpencensusReceiver(state *state, id component.InstanceID, cfg *opencensus return &opencensus.Arguments{ CorsAllowedOrigins: cfg.CorsOrigins, - GRPC: *toGRPCServerArguments(&cfg.GRPCServerSettings), + GRPC: *toGRPCServerArguments(&cfg.ServerConfig), DebugMetrics: common.DefaultValue[opencensus.Arguments]().DebugMetrics, diff --git a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go index 230478144c08..c13e550f6158 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configopaque" + "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/otlpexporter" @@ -59,12 +60,12 @@ func toOtelcolExporterOTLP(cfg *otlpexporter.Config) *otlp.Arguments { return &otlp.Arguments{ Timeout: cfg.Timeout, - Queue: toQueueArguments(cfg.QueueSettings), - Retry: toRetryArguments(cfg.RetrySettings), + Queue: toQueueArguments(cfg.QueueConfig), + Retry: toRetryArguments(cfg.RetryConfig), DebugMetrics: common.DefaultValue[otlp.Arguments]().DebugMetrics, - Client: otlp.GRPCClientArguments(toGRPCClientArguments(cfg.GRPCClientSettings)), + Client: otlp.GRPCClientArguments(toGRPCClientArguments(cfg.ClientConfig)), } } @@ -76,7 +77,7 @@ func toQueueArguments(cfg exporterhelper.QueueSettings) otelcol.QueueArguments { } } -func toRetryArguments(cfg exporterhelper.RetrySettings) otelcol.RetryArguments { +func toRetryArguments(cfg configretry.BackOffConfig) otelcol.RetryArguments { return otelcol.RetryArguments{ Enabled: cfg.Enabled, InitialInterval: cfg.InitialInterval, @@ -87,7 +88,7 @@ func toRetryArguments(cfg exporterhelper.RetrySettings) otelcol.RetryArguments { } } -func toGRPCClientArguments(cfg configgrpc.GRPCClientSettings) otelcol.GRPCClientArguments { +func toGRPCClientArguments(cfg configgrpc.ClientConfig) otelcol.GRPCClientArguments { var a *auth.Handler if cfg.Auth != nil { a = &auth.Handler{} diff --git a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go index d978e89c6a2e..64f1ff69e13d 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go @@ -57,14 +57,15 @@ func (otlpHTTPExporterConverter) ConvertAndAppend(state *state, id component.Ins func toOtelcolExporterOTLPHTTP(cfg *otlphttpexporter.Config) *otlphttp.Arguments { return &otlphttp.Arguments{ - Client: otlphttp.HTTPClientArguments(toHTTPClientArguments(cfg.HTTPClientSettings)), - Queue: toQueueArguments(cfg.QueueSettings), - Retry: toRetryArguments(cfg.RetrySettings), + Client: otlphttp.HTTPClientArguments(toHTTPClientArguments(cfg.ClientConfig)), + Queue: toQueueArguments(cfg.QueueConfig), + Retry: toRetryArguments(cfg.RetryConfig), + Encoding: string(cfg.Encoding), DebugMetrics: common.DefaultValue[otlphttp.Arguments]().DebugMetrics, } } -func toHTTPClientArguments(cfg confighttp.HTTPClientSettings) otelcol.HTTPClientArguments { +func toHTTPClientArguments(cfg confighttp.ClientConfig) otelcol.HTTPClientArguments { var a *auth.Handler if cfg.Auth != nil { a = &auth.Handler{} @@ -72,7 +73,7 @@ func toHTTPClientArguments(cfg confighttp.HTTPClientSettings) otelcol.HTTPClient var mic *int var ict *time.Duration - defaults := confighttp.NewDefaultHTTPClientSettings() + defaults := confighttp.NewDefaultClientConfig() if mic = cfg.MaxIdleConns; mic == nil { mic = defaults.MaxIdleConns } @@ -86,13 +87,15 @@ func toHTTPClientArguments(cfg confighttp.HTTPClientSettings) otelcol.HTTPClient ReadBufferSize: units.Base2Bytes(cfg.ReadBufferSize), WriteBufferSize: units.Base2Bytes(cfg.WriteBufferSize), - Timeout: cfg.Timeout, - Headers: toHeadersMap(cfg.Headers), - MaxIdleConns: mic, - MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost, - MaxConnsPerHost: cfg.MaxConnsPerHost, - IdleConnTimeout: ict, - DisableKeepAlives: cfg.DisableKeepAlives, + Timeout: cfg.Timeout, + Headers: toHeadersMap(cfg.Headers), + MaxIdleConns: mic, + MaxIdleConnsPerHost: cfg.MaxIdleConnsPerHost, + MaxConnsPerHost: cfg.MaxConnsPerHost, + IdleConnTimeout: ict, + DisableKeepAlives: cfg.DisableKeepAlives, + HTTP2PingTimeout: cfg.HTTP2PingTimeout, + HTTP2ReadIdleTimeout: cfg.HTTP2ReadIdleTimeout, Auth: a, } diff --git a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go index 6bbb1d7526b8..d649210ec952 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go @@ -64,7 +64,7 @@ func toOtelcolReceiverOTLP(state *state, id component.InstanceID, cfg *otlprecei } } -func toGRPCServerArguments(cfg *configgrpc.GRPCServerSettings) *otelcol.GRPCServerArguments { +func toGRPCServerArguments(cfg *configgrpc.ServerConfig) *otelcol.GRPCServerArguments { if cfg == nil { return nil } @@ -100,15 +100,18 @@ func toTLSServerArguments(cfg *configtls.TLSServerSetting) *otelcol.TLSServerArg func toTLSSetting(cfg configtls.TLSSetting) otelcol.TLSSetting { return otelcol.TLSSetting{ - CA: string(cfg.CAPem), - CAFile: cfg.CAFile, - Cert: string(cfg.CertPem), - CertFile: cfg.CertFile, - Key: rivertypes.Secret(cfg.KeyPem), - KeyFile: cfg.KeyFile, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - ReloadInterval: cfg.ReloadInterval, + CA: string(cfg.CAPem), + CAFile: cfg.CAFile, + Cert: string(cfg.CertPem), + CertFile: cfg.CertFile, + Key: rivertypes.Secret(cfg.KeyPem), + KeyFile: cfg.KeyFile, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + ReloadInterval: cfg.ReloadInterval, + IncludeSystemCACertsPool: cfg.IncludeSystemCACertsPool, + //TODO(ptodev): Do we need to copy this slice? + CipherSuites: cfg.CipherSuites, } } @@ -154,7 +157,7 @@ func toHTTPConfigArguments(cfg *otlpreceiver.HTTPConfig) *otlp.HTTPConfigArgumen } return &otlp.HTTPConfigArguments{ - HTTPServerArguments: toHTTPServerArguments(cfg.HTTPServerSettings), + HTTPServerArguments: toHTTPServerArguments(cfg.ServerConfig), TracesURLPath: cfg.TracesURLPath, MetricsURLPath: cfg.MetricsURLPath, @@ -162,7 +165,7 @@ func toHTTPConfigArguments(cfg *otlpreceiver.HTTPConfig) *otlp.HTTPConfigArgumen } } -func toHTTPServerArguments(cfg *confighttp.HTTPServerSettings) *otelcol.HTTPServerArguments { +func toHTTPServerArguments(cfg *confighttp.ServerConfig) *otelcol.HTTPServerArguments { if cfg == nil { return nil } @@ -179,7 +182,7 @@ func toHTTPServerArguments(cfg *confighttp.HTTPServerSettings) *otelcol.HTTPServ } } -func toCORSArguments(cfg *confighttp.CORSSettings) *otelcol.CORSArguments { +func toCORSArguments(cfg *confighttp.CORSConfig) *otelcol.CORSArguments { if cfg == nil { return nil } diff --git a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go index 5b26f953e268..e38f016f9ac6 100644 --- a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go +++ b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go @@ -79,6 +79,14 @@ func toSpanmetricsConnector(state *state, id component.InstanceID, cfg *spanmetr }) } + var eventDimensions []spanmetrics.Dimension + for _, d := range cfg.Dimensions { + eventDimensions = append(eventDimensions, spanmetrics.Dimension{ + Name: d.Name, + Default: d.Default, + }) + } + return &spanmetrics.Arguments{ Dimensions: dimensions, ExcludeDimensions: cfg.ExcludeDimensions, @@ -90,10 +98,17 @@ func toSpanmetricsConnector(state *state, id component.InstanceID, cfg *spanmetr Exponential: exponential, Explicit: explicit, }, - MetricsFlushInterval: cfg.MetricsFlushInterval, - Namespace: cfg.Namespace, + MetricsFlushInterval: cfg.MetricsFlushInterval, + Namespace: cfg.Namespace, + ResourceMetricsCacheSize: cfg.ResourceMetricsCacheSize, + ResourceMetricsKeyAttributes: cfg.ResourceMetricsKeyAttributes, Exemplars: spanmetrics.ExemplarsConfig{ - Enabled: cfg.Exemplars.Enabled, + Enabled: cfg.Exemplars.Enabled, + MaxPerDataPoint: cfg.Exemplars.MaxPerDataPoint, + }, + Events: spanmetrics.EventsConfig{ + Enabled: cfg.Events.Enabled, + Dimensions: eventDimensions, }, Output: &otelcol.ConsumerArguments{ diff --git a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go index 1bd2828c5f4d..18e7ab4a26f2 100644 --- a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go @@ -166,7 +166,8 @@ func toAndSubPolicyCfg(cfgs []tailsamplingprocessor.AndSubPolicyCfg) []tail_samp func toLatencyConfig(cfg tailsamplingprocessor.LatencyCfg) tail_sampling.LatencyConfig { return tail_sampling.LatencyConfig{ - ThresholdMs: cfg.ThresholdMs, + ThresholdMs: cfg.ThresholdMs, + UpperThresholdmsMs: cfg.UpperThresholdmsMs, } } diff --git a/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go b/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go index 66c0f7d52e3f..f2c3cc82cf66 100644 --- a/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go @@ -45,7 +45,7 @@ func toZipkinReceiver(state *state, id component.InstanceID, cfg *zipkinreceiver return &zipkin.Arguments{ ParseStringTags: cfg.ParseStringTags, - HTTPServer: *toHTTPServerArguments(&cfg.HTTPServerSettings), + HTTPServer: *toHTTPServerArguments(&cfg.ServerConfig), DebugMetrics: common.DefaultValue[zipkin.Arguments]().DebugMetrics, diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert.go b/internal/converter/internal/otelcolconvert/otelcolconvert.go index 8262053c0c90..7c10b23acc2f 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert.go @@ -92,7 +92,7 @@ func readOpentelemetryConfig(in []byte) (*otelcol.Config, error) { Providers: map[string]confmap.Provider{ provider.Scheme(): provider, }, - Converters: []confmap.Converter{expandconverter.New()}, + Converters: []confmap.Converter{expandconverter.New(confmap.ConverterSettings{})}, }, }) if err != nil { diff --git a/internal/converter/internal/otelcolconvert/testdata/bearertoken.river b/internal/converter/internal/otelcolconvert/testdata/bearertoken.river index 83a26c92d80d..2442c044a655 100644 --- a/internal/converter/internal/otelcolconvert/testdata/bearertoken.river +++ b/internal/converter/internal/otelcolconvert/testdata/bearertoken.river @@ -33,7 +33,8 @@ otelcol.exporter.otlp "default_withauth" { otelcol.exporter.otlphttp "default_withauth" { client { - endpoint = "database:4318" - auth = otelcol.auth.bearer.default_withscheme.handler + endpoint = "database:4318" + http2_ping_timeout = "0s" + auth = otelcol.auth.bearer.default_withscheme.handler } } diff --git a/internal/converter/internal/otelcolconvert/testdata/kafka.river b/internal/converter/internal/otelcolconvert/testdata/kafka.river index b98eabaf2f76..c2f11b594b98 100644 --- a/internal/converter/internal/otelcolconvert/testdata/kafka.river +++ b/internal/converter/internal/otelcolconvert/testdata/kafka.river @@ -1,6 +1,7 @@ otelcol.receiver.kafka "default" { brokers = ["broker:9092"] protocol_version = "2.0.0" + topic = "" authentication { plaintext { diff --git a/internal/converter/internal/otelcolconvert/testdata/memorylimiter.river b/internal/converter/internal/otelcolconvert/testdata/memorylimiter.river index 3bbf6a2d4d55..630a51732ad2 100644 --- a/internal/converter/internal/otelcolconvert/testdata/memorylimiter.river +++ b/internal/converter/internal/otelcolconvert/testdata/memorylimiter.river @@ -11,7 +11,8 @@ otelcol.receiver.otlp "default" { } otelcol.processor.memory_limiter "default" { - check_interval = "1s" + check_interval = "1s" + limit_percentage = 90 output { metrics = [otelcol.exporter.otlp.default.input] diff --git a/internal/converter/internal/otelcolconvert/testdata/memorylimiter.yaml b/internal/converter/internal/otelcolconvert/testdata/memorylimiter.yaml index 8dded5038724..72d73ad0a096 100644 --- a/internal/converter/internal/otelcolconvert/testdata/memorylimiter.yaml +++ b/internal/converter/internal/otelcolconvert/testdata/memorylimiter.yaml @@ -10,6 +10,7 @@ exporters: processors: memory_limiter: + limit_percentage: 90 check_interval: 1s diff --git a/internal/converter/internal/otelcolconvert/testdata/oauth2.river b/internal/converter/internal/otelcolconvert/testdata/oauth2.river index 9a125399bcee..054492b8bd63 100644 --- a/internal/converter/internal/otelcolconvert/testdata/oauth2.river +++ b/internal/converter/internal/otelcolconvert/testdata/oauth2.river @@ -39,6 +39,7 @@ otelcol.exporter.otlp "default_withauth" { otelcol.exporter.otlphttp "default_noauth" { client { - endpoint = "database:4318" + endpoint = "database:4318" + http2_ping_timeout = "0s" } } diff --git a/internal/converter/internal/otelcolconvert/testdata/otlphttp.river b/internal/converter/internal/otelcolconvert/testdata/otlphttp.river index c1260fe87fa3..8e456338f8c8 100644 --- a/internal/converter/internal/otelcolconvert/testdata/otlphttp.river +++ b/internal/converter/internal/otelcolconvert/testdata/otlphttp.river @@ -12,6 +12,7 @@ otelcol.receiver.otlp "default" { otelcol.exporter.otlphttp "default" { client { - endpoint = "database:4318" + endpoint = "database:4318" + http2_ping_timeout = "0s" } } diff --git a/internal/flow/tracing/tracing.go b/internal/flow/tracing/tracing.go index 9d1a174e330e..cd7acd8e4f1f 100644 --- a/internal/flow/tracing/tracing.go +++ b/internal/flow/tracing/tracing.go @@ -14,7 +14,7 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" "go.opentelemetry.io/otel/trace" ) diff --git a/internal/static/traces/config.go b/internal/static/traces/config.go index 12aa4c784e1f..80de470ed812 100644 --- a/internal/static/traces/config.go +++ b/internal/static/traces/config.go @@ -125,15 +125,15 @@ type InstanceConfig struct { RemoteWrite []RemoteWriteConfig `yaml:"remote_write,omitempty"` // Receivers: - // https://github.com/open-telemetry/opentelemetry-collector/blob/v0.87.0/receiver/README.md + // https://github.com/open-telemetry/opentelemetry-collector/blob/v0.96.0/receiver/README.md Receivers ReceiverMap `yaml:"receivers,omitempty"` // Batch: - // https://github.com/open-telemetry/opentelemetry-collector/tree/v0.87.0/processor/batchprocessor + // https://github.com/open-telemetry/opentelemetry-collector/tree/v0.96.0/processor/batchprocessor Batch map[string]interface{} `yaml:"batch,omitempty"` // Attributes: - // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.87.0/processor + // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.96.0/processor Attributes map[string]interface{} `yaml:"attributes,omitempty"` // prom service discovery config @@ -142,25 +142,25 @@ type InstanceConfig struct { PodAssociations []string `yaml:"prom_sd_pod_associations,omitempty"` // SpanMetricsProcessor: - // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.87.0/processor/spanmetricsprocessor + // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.95.0/processor/spanmetricsprocessor SpanMetrics *SpanMetricsConfig `yaml:"spanmetrics,omitempty"` // AutomaticLogging AutomaticLogging *automaticloggingprocessor.AutomaticLoggingConfig `yaml:"automatic_logging,omitempty"` // TailSampling defines a sampling strategy for the pipeline - // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.87.0/processor/tailsamplingprocessor + // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.96.0/processor/tailsamplingprocessor TailSampling *tailSamplingConfig `yaml:"tail_sampling,omitempty"` // LoadBalancing is used to distribute spans of the same trace to the same agent instance - // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.87.0/exporter/loadbalancingexporter + // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.96.0/exporter/loadbalancingexporter LoadBalancing *loadBalancingConfig `yaml:"load_balancing"` // ServiceGraphs ServiceGraphs *serviceGraphsConfig `yaml:"service_graphs,omitempty"` // Jaeger's Remote Sampling extension: - // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.87.0/extension/jaegerremotesampling + // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.96.0/extension/jaegerremotesampling JaegerRemoteSampling []JaegerRemoteSamplingConfig `yaml:"jaeger_remote_sampling"` } @@ -266,18 +266,20 @@ var DefaultRemoteWriteConfig = RemoteWriteConfig{ // TLSClientSetting configures the oauth2client extension TLS; compatible with configtls.TLSClientSetting type TLSClientSetting struct { - CAFile string `yaml:"ca_file,omitempty"` - CAPem SecretString `yaml:"ca_pem,omitempty"` - CertFile string `yaml:"cert_file,omitempty"` - CertPem SecretString `yaml:"cert_pem,omitempty"` - KeyFile string `yaml:"key_file,omitempty"` - KeyPem SecretString `yaml:"key_pem,omitempty"` - MinVersion string `yaml:"min_version,omitempty"` - MaxVersion string `yaml:"max_version,omitempty"` - ReloadInterval time.Duration `yaml:"reload_interval"` - Insecure bool `yaml:"insecure"` - InsecureSkipVerify bool `yaml:"insecure_skip_verify"` - ServerNameOverride string `yaml:"server_name_override,omitempty"` + CAFile string `yaml:"ca_file,omitempty"` + CAPem SecretString `yaml:"ca_pem,omitempty"` + IncludeSystemCACertsPool bool `yaml:"include_system_ca_certs_pool"` + CertFile string `yaml:"cert_file,omitempty"` + CertPem SecretString `yaml:"cert_pem,omitempty"` + KeyFile string `yaml:"key_file,omitempty"` + KeyPem SecretString `yaml:"key_pem,omitempty"` + MinVersion string `yaml:"min_version,omitempty"` + MaxVersion string `yaml:"max_version,omitempty"` + CipherSuites []string `yaml:"cipher_suites,omitempty"` + ReloadInterval time.Duration `yaml:"reload_interval"` + Insecure bool `yaml:"insecure"` + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + ServerNameOverride string `yaml:"server_name_override,omitempty"` } // OAuth2Config configures the oauth2client extension for a remote_write exporter @@ -326,8 +328,8 @@ type RemoteWriteConfig struct { BasicAuth *prom_config.BasicAuth `yaml:"basic_auth,omitempty"` Oauth2 *OAuth2Config `yaml:"oauth2,omitempty"` Headers map[string]string `yaml:"headers,omitempty"` - SendingQueue map[string]interface{} `yaml:"sending_queue,omitempty"` // https://github.com/open-telemetry/opentelemetry-collector/blob/v0.87.0/exporter/exporterhelper/queued_retry.go - RetryOnFailure map[string]interface{} `yaml:"retry_on_failure,omitempty"` // https://github.com/open-telemetry/opentelemetry-collector/blob/v0.87.0/exporter/exporterhelper/queued_retry.go + SendingQueue map[string]interface{} `yaml:"sending_queue,omitempty"` // https://github.com/open-telemetry/opentelemetry-collector/blob/v0.96.0/exporter/exporterhelper/queued_retry.go + RetryOnFailure map[string]interface{} `yaml:"retry_on_failure,omitempty"` // https://github.com/open-telemetry/opentelemetry-collector/blob/v0.96.0/exporter/exporterhelper/queued_retry.go } // UnmarshalYAML implements yaml.Unmarshaler. @@ -380,7 +382,7 @@ type SpanMetricsConfig struct { // tailSamplingConfig is the configuration for tail-based sampling type tailSamplingConfig struct { // Policies are the strategies used for sampling. Multiple policies can be used in the same pipeline. - // For more information, refer to https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.87.0/processor/tailsamplingprocessor + // For more information, refer to https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.96.0/processor/tailsamplingprocessor Policies []policy `yaml:"policies"` // DecisionWait defines the time to wait for a complete trace before making a decision DecisionWait time.Duration `yaml:"decision_wait,omitempty"` @@ -625,7 +627,7 @@ func (c *InstanceConfig) loadBalancingExporter() (map[string]interface{}, error) } // formatPolicies creates sampling policies (i.e. rules) compatible with OTel's tail sampling processor -// https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.87.0/processor/tailsamplingprocessor +// https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/v0.96.0/processor/tailsamplingprocessor func formatPolicies(cfg []policy) ([]map[string]interface{}, error) { policies := make([]map[string]interface{}, 0, len(cfg)) for i, policy := range cfg { diff --git a/internal/static/traces/config_test.go b/internal/static/traces/config_test.go index fb8443abab96..687eaf30de58 100644 --- a/internal/static/traces/config_test.go +++ b/internal/static/traces/config_test.go @@ -1159,6 +1159,8 @@ remote_write: scopes: ["api.metrics"] timeout: 2s `, + //TODO(ptodev): Look into why we need to add a "cipher_suites: []" explicitly. + // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} @@ -1172,6 +1174,8 @@ extensions: token_url: https://example.com/oauth2/default/v1/token scopes: ["api.metrics"] timeout: 2s + tls: + cipher_suites: [] exporters: otlphttp/0: endpoint: example.com:12345 @@ -1217,6 +1221,8 @@ remote_write: min_version: 1.3 reload_interval: 1h `, + //TODO(ptodev): Look into why we need to add a "cipher_suites: []" explicitly. + // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} @@ -1240,6 +1246,7 @@ extensions: key_file: keyfile min_version: 1.3 reload_interval: 1h + cipher_suites: [] exporters: otlphttp/0: endpoint: example.com:12345 @@ -1285,6 +1292,8 @@ remote_write: max_version: 1.2 reload_interval: 1h `, + //TODO: Look into why we need to add a "cipher_suites: []" explicitly. + // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} @@ -1308,6 +1317,7 @@ extensions: key_pem: test_secret_key_pem_string max_version: 1.2 reload_interval: 1h + cipher_suites: [] exporters: otlphttp/0: endpoint: example.com:12345 @@ -1351,6 +1361,8 @@ remote_write: scopes: ["api.metrics"] timeout: 2s `, + //TODO: Look into why we need to add a "cipher_suites: []" explicitly. + // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} @@ -1364,12 +1376,16 @@ extensions: token_url: https://example.com/oauth2/default/v1/token scopes: ["api.metrics"] timeout: 2s + tls: + cipher_suites: [] oauth2client/otlp1: client_id: anotherclientid client_secret: anotherclientsecret token_url: https://example.com/oauth2/default/v1/token scopes: ["api.metrics"] timeout: 2s + tls: + cipher_suites: [] exporters: otlphttp/0: endpoint: example.com:12345 @@ -1415,6 +1431,8 @@ remote_write: tls: insecure: true `, + //TODO: Look into why we need to add a "cipher_suites: []" explicitly. + // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} @@ -1430,6 +1448,7 @@ extensions: timeout: 2s tls: insecure: true + cipher_suites: [] exporters: otlphttp/0: endpoint: http://example.com:12345 diff --git a/internal/static/traces/instance.go b/internal/static/traces/instance.go index 0c2e3fcb1924..9e518a06794e 100644 --- a/internal/static/traces/instance.go +++ b/internal/static/traces/instance.go @@ -148,17 +148,15 @@ func (i *Instance) buildAndStartPipeline(ctx context.Context, cfg InstanceConfig } i.service, err = service.New(ctx, service.Settings{ - BuildInfo: appinfo, - Receivers: receiver.NewBuilder(otelConfig.Receivers, i.factories.Receivers), - Processors: processor.NewBuilder(otelConfig.Processors, i.factories.Processors), - Exporters: otelexporter.NewBuilder(otelConfig.Exporters, i.factories.Exporters), - Connectors: connector.NewBuilder(otelConfig.Connectors, i.factories.Connectors), - Extensions: extension.NewBuilder(otelConfig.Extensions, i.factories.Extensions), - OtelMetricViews: servicegraphprocessor.OtelMetricViews(), - OtelMetricReader: promExporter, - DisableProcessMetrics: true, - UseExternalMetricsServer: true, - TracerProvider: noop.NewTracerProvider(), + BuildInfo: appinfo, + Receivers: receiver.NewBuilder(otelConfig.Receivers, i.factories.Receivers), + Processors: processor.NewBuilder(otelConfig.Processors, i.factories.Processors), + Exporters: otelexporter.NewBuilder(otelConfig.Exporters, i.factories.Exporters), + Connectors: connector.NewBuilder(otelConfig.Connectors, i.factories.Connectors), + Extensions: extension.NewBuilder(otelConfig.Extensions, i.factories.Extensions), + OtelMetricViews: servicegraphprocessor.OtelMetricViews(), + OtelMetricReader: promExporter, + TracerProvider: noop.NewTracerProvider(), //TODO: Plug in an AsyncErrorChannel to shut down the Agent in case of a fatal event LoggingOptions: []zap.Option{ zap.WrapCore(func(zapcore.Core) zapcore.Core { diff --git a/internal/static/traces/servicegraphprocessor/processor_test.go b/internal/static/traces/servicegraphprocessor/processor_test.go index d2ba70f4b018..705a14bdd8f2 100644 --- a/internal/static/traces/servicegraphprocessor/processor_test.go +++ b/internal/static/traces/servicegraphprocessor/processor_test.go @@ -83,9 +83,7 @@ func TestConsumeMetrics(t *testing.T) { TelemetrySettings: component.TelemetrySettings{ MeterProvider: getTestMeterProvider(t, reg), - ReportComponentStatus: func(*otelcomponent.StatusEvent) error { - return nil - }, + ReportStatus: func(*otelcomponent.StatusEvent) {}, }, BuildInfo: component.BuildInfo{}, } diff --git a/internal/static/traces/traceutils/server.go b/internal/static/traces/traceutils/server.go index f22c445953e3..48f634d3a39b 100644 --- a/internal/static/traces/traceutils/server.go +++ b/internal/static/traces/traceutils/server.go @@ -147,13 +147,12 @@ func newServer(addr string, callback func(ptrace.Traces)) (*server, error) { } svc, err := service.New(context.Background(), service.Settings{ - Receivers: receiver.NewBuilder(otelCfg.Receivers, factories.Receivers), - Processors: processor.NewBuilder(otelCfg.Processors, factories.Processors), - Exporters: otelexporter.NewBuilder(otelCfg.Exporters, factories.Exporters), - Connectors: connector.NewBuilder(otelCfg.Connectors, factories.Connectors), - Extensions: extension.NewBuilder(otelCfg.Extensions, factories.Extensions), - UseExternalMetricsServer: false, - TracerProvider: noop.NewTracerProvider(), + Receivers: receiver.NewBuilder(otelCfg.Receivers, factories.Receivers), + Processors: processor.NewBuilder(otelCfg.Processors, factories.Processors), + Exporters: otelexporter.NewBuilder(otelCfg.Exporters, factories.Exporters), + Connectors: connector.NewBuilder(otelCfg.Connectors, factories.Connectors), + Extensions: extension.NewBuilder(otelCfg.Extensions, factories.Extensions), + TracerProvider: noop.NewTracerProvider(), }, otelCfg.Service) if err != nil { return nil, fmt.Errorf("failed to create Otel service: %w", err) diff --git a/internal/util/otel_feature_gate.go b/internal/util/otel_feature_gate.go index d2f4797668e7..51a5d917a8e9 100644 --- a/internal/util/otel_feature_gate.go +++ b/internal/util/otel_feature_gate.go @@ -4,7 +4,11 @@ import ( "fmt" "go.opentelemetry.io/collector/featuregate" - _ "go.opentelemetry.io/collector/obsreport" + + // Register the feature gates. + // The "service" package uses DisableHighCardinalityMetricsfeatureGate, so import "service". + // We cannot import DisableHighCardinalityMetricsfeatureGate directly because it's not exported. + _ "go.opentelemetry.io/collector/service" ) // Enables a set of feature gates in Otel's Global Feature Gate Registry. @@ -22,11 +26,6 @@ func EnableOtelFeatureGates(fgNames ...string) error { } var ( - // useOtelForInternalMetrics is required so that the Collector service configures Collector components using the Otel SDK - // instead of OpenCensus. If this is not specified, then the OtelMetricViews and OtelMetricReader parameters which we - // pass to service.New() below will not be taken into account. This would mean that metrics from custom components such as - // the one in pkg/traces/servicegraphprocessor would not work. - // // disableHighCardinalityMetrics is required so that we don't include labels containing ports and IP addresses in gRPC metrics. // Example metric with high cardinality... // rpc_server_duration_bucket{net_sock_peer_addr="127.0.0.1",net_sock_peer_port="59947",rpc_grpc_status_code="0",rpc_method="Export",rpc_service="opentelemetry.proto.collector.trace.v1.TraceService",rpc_system="grpc",traces_config="default",le="7500"} 294 @@ -36,24 +35,11 @@ var ( // https://opentelemetry.io/docs/specs/otel/metrics/semantic_conventions/rpc-metrics/ // https://github.com/open-telemetry/opentelemetry-go-contrib/pull/2700 // https://github.com/open-telemetry/opentelemetry-collector/pull/6788/files - // - // TODO: Remove "telemetry.useOtelForInternalMetrics" when Collector components - // use OpenTelemetry metrics by default. staticModeOtelFeatureGates = []string{ - "telemetry.useOtelForInternalMetrics", "telemetry.disableHighCardinalityMetrics", } - // Enable the "telemetry.useOtelForInternalMetrics" Collector feature gate. - // Currently, Collector components uses OpenCensus metrics by default. - // Those metrics cannot be integrated with Agent Flow, - // so we need to always use OpenTelemetry metrics. - // - // TODO: Remove "telemetry.useOtelForInternalMetrics" when Collector components - // use OpenTelemetry metrics by default. - flowModeOtelFeatureGates = []string{ - "telemetry.useOtelForInternalMetrics", - } + flowModeOtelFeatureGates = []string{} ) // Enables a set of feature gates which should always be enabled for Static mode. diff --git a/internal/util/otel_feature_gate_test.go b/internal/util/otel_feature_gate_test.go index d4b49ea92c91..76fd8ef1f72f 100644 --- a/internal/util/otel_feature_gate_test.go +++ b/internal/util/otel_feature_gate_test.go @@ -5,9 +5,6 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/featuregate" - - // Register the feature gates. - _ "go.opentelemetry.io/collector/exporter/exporterhelper" ) func Test_FeatureGates(t *testing.T) { diff --git a/internal/util/testappender/compare.go b/internal/util/testappender/compare.go index 1e2c6a1b970a..92e48bd1dc07 100644 --- a/internal/util/testappender/compare.go +++ b/internal/util/testappender/compare.go @@ -44,9 +44,11 @@ func (c Comparer) Compare(families []*dto.MetricFamily, expect string) error { buf bytes.Buffer ) if c.OpenMetrics { - enc = expfmt.NewEncoder(&buf, expfmt.FmtOpenMetrics_1_0_0) + expFormat := expfmt.NewFormat(expfmt.TypeOpenMetrics) + enc = expfmt.NewEncoder(&buf, expFormat) } else { - enc = expfmt.NewEncoder(&buf, expfmt.FmtText) + expFormat := expfmt.NewFormat(expfmt.TypeTextPlain) + enc = expfmt.NewEncoder(&buf, expFormat) } for _, f := range families { if err := enc.Encode(f); err != nil { From c41138787d77c149e3040432bf28816394ed5acd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Tr=E1=BB=8Dng=20H=E1=BA=A3i?= <41283691+hainenber@users.noreply.github.com> Date: Fri, 22 Mar 2024 20:10:14 +0700 Subject: [PATCH 07/83] feat(converter/otelcol): support converting `sigv4authextension` (#6713) Signed-off-by: hainenber --- .../converter_sigv4authextension.go | 55 +++++++++++++++++++ .../otelcolconvert/otelcolconvert_test.go | 1 + .../sigv4auth.river | 28 ++++++++++ .../otelcol_without_validation/sigv4auth.yaml | 35 ++++++++++++ .../internal/otelcolconvert/utils.go | 42 ++++++++++++++ .../converter/internal/test_common/testing.go | 5 +- 6 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 internal/converter/internal/otelcolconvert/converter_sigv4authextension.go create mode 100644 internal/converter/internal/otelcolconvert/testdata/otelcol_without_validation/sigv4auth.river create mode 100644 internal/converter/internal/otelcolconvert/testdata/otelcol_without_validation/sigv4auth.yaml diff --git a/internal/converter/internal/otelcolconvert/converter_sigv4authextension.go b/internal/converter/internal/otelcolconvert/converter_sigv4authextension.go new file mode 100644 index 000000000000..de22285cdb9b --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_sigv4authextension.go @@ -0,0 +1,55 @@ +package otelcolconvert + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/otelcol/auth/sigv4" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, sigV4AuthExtensionConverter{}) +} + +type sigV4AuthExtensionConverter struct{} + +func (sigV4AuthExtensionConverter) Factory() component.Factory { + return sigv4authextension.NewFactory() +} + +func (sigV4AuthExtensionConverter) InputComponentName() string { + return "otelcol.auth.sigv4" +} + +func (sigV4AuthExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toSigV4AuthExtension(cfg.(*sigv4authextension.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "auth", "sigv4"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + + return diags +} + +func toSigV4AuthExtension(cfg *sigv4authextension.Config) *sigv4.Arguments { + return &sigv4.Arguments{ + Region: cfg.Region, + Service: cfg.Service, + AssumeRole: sigv4.AssumeRole{ + ARN: cfg.AssumeRole.ARN, + SessionName: cfg.AssumeRole.SessionName, + STSRegion: cfg.AssumeRole.STSRegion, + }, + } +} diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert_test.go b/internal/converter/internal/otelcolconvert/otelcolconvert_test.go index 570a5dc08aef..ef35060e1c0b 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert_test.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert_test.go @@ -10,6 +10,7 @@ import ( func TestConvert(t *testing.T) { // TODO(rfratto): support -update flag. test_common.TestDirectory(t, "testdata", ".yaml", true, []string{}, otelcolconvert.Convert) + test_common.TestDirectory(t, "testdata/otelcol_without_validation", ".yaml", true, []string{}, otelcolconvert.ConvertWithoutValidation) } // TestConvertErrors tests errors specifically regarding the reading of diff --git a/internal/converter/internal/otelcolconvert/testdata/otelcol_without_validation/sigv4auth.river b/internal/converter/internal/otelcolconvert/testdata/otelcol_without_validation/sigv4auth.river new file mode 100644 index 000000000000..ec425d72aa7e --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/otelcol_without_validation/sigv4auth.river @@ -0,0 +1,28 @@ +otelcol.auth.sigv4 "default" { + region = "ap-southeast-1" + service = "s3" + + assume_role { + arn = "arn:aws:iam::123456789012:role/aws-service-role/access" + sts_region = "us-east-1" + } +} + +otelcol.receiver.otlp "default" { + grpc { } + + http { } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + auth = otelcol.auth.sigv4.default.handler + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/otelcol_without_validation/sigv4auth.yaml b/internal/converter/internal/otelcolconvert/testdata/otelcol_without_validation/sigv4auth.yaml new file mode 100644 index 000000000000..a244a5911eb5 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/otelcol_without_validation/sigv4auth.yaml @@ -0,0 +1,35 @@ +extensions: + sigv4auth: + region: "ap-southeast-1" + service: "s3" + assume_role: + arn: "arn:aws:iam::123456789012:role/aws-service-role/access" + sts_region: "us-east-1" + +receivers: + otlp: + protocols: + grpc: + http: + +exporters: + otlp: + auth: + authenticator: sigv4auth + endpoint: database:4317 + +service: + extensions: [sigv4auth] + pipelines: + metrics: + receivers: [otlp] + processors: [] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [] + exporters: [otlp] \ No newline at end of file diff --git a/internal/converter/internal/otelcolconvert/utils.go b/internal/converter/internal/otelcolconvert/utils.go index d3515919ff11..176a5a420aa6 100644 --- a/internal/converter/internal/otelcolconvert/utils.go +++ b/internal/converter/internal/otelcolconvert/utils.go @@ -1,9 +1,12 @@ package otelcolconvert import ( + "bytes" "fmt" "strings" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/river/token/builder" "go.opentelemetry.io/collector/component" ) @@ -32,3 +35,42 @@ func stringifyKind(k component.Kind) string { func stringifyBlock(block *builder.Block) string { return fmt.Sprintf("%s.%s", strings.Join(block.Name, "."), block.Label) } + +// ConvertWithoutValidation is similar to `otelcolconvert.go`'s Convert but without validating generated configs +// This is to help testing `sigv4authextension` converter as its Validate() method calls up external cloud +// service and we can't inject mock SigV4 credential provider since the attribute is set as internal in the +// upstream. +// Remove this once credentials provider is open for mocking. +func ConvertWithoutValidation(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { + var diags diag.Diagnostics + + if len(extraArgs) > 0 { + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("extra arguments are not supported for the otelcol converter: %s", extraArgs)) + return nil, diags + } + + cfg, err := readOpentelemetryConfig(in) + if err != nil { + diags.Add(diag.SeverityLevelCritical, err.Error()) + return nil, diags + } + + f := builder.NewFile() + + diags.AddAll(AppendConfig(f, cfg, "")) + diags.AddAll(common.ValidateNodes(f)) + + var buf bytes.Buffer + if _, err := f.WriteTo(&buf); err != nil { + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("failed to render Flow config: %s", err.Error())) + return nil, diags + } + + if len(buf.Bytes()) == 0 { + return nil, diags + } + + prettyByte, newDiags := common.PrettyPrint(buf.Bytes()) + diags.AddAll(newDiags) + return prettyByte, diags +} diff --git a/internal/converter/internal/test_common/testing.go b/internal/converter/internal/test_common/testing.go index fec09f30af06..f3de5823e88e 100644 --- a/internal/converter/internal/test_common/testing.go +++ b/internal/converter/internal/test_common/testing.go @@ -44,8 +44,9 @@ const ( // configuration generated by calling convert in step 1. func TestDirectory(t *testing.T, folderPath string, sourceSuffix string, loadFlowConfig bool, extraArgs []string, convert func(in []byte, extraArgs []string) ([]byte, diag.Diagnostics)) { require.NoError(t, filepath.WalkDir(folderPath, func(path string, d fs.DirEntry, _ error) error { - if d.IsDir() { - return nil + // Only skip iterating child folders + if d.IsDir() && path != folderPath { + return filepath.SkipDir } if strings.HasSuffix(path, sourceSuffix) { From 89e10ff3fd1bbba8787a5661373d66bea2d5582f Mon Sep 17 00:00:00 2001 From: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Date: Fri, 22 Mar 2024 10:05:16 -0700 Subject: [PATCH 08/83] Update cypher suite support (#6755) --- docs/sources/flow/reference/config-blocks/http.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/flow/reference/config-blocks/http.md b/docs/sources/flow/reference/config-blocks/http.md index 03a52010a8c0..3b023deb6b52 100644 --- a/docs/sources/flow/reference/config-blocks/http.md +++ b/docs/sources/flow/reference/config-blocks/http.md @@ -104,8 +104,8 @@ The set of cipher suites specified may be from the following: | ----------------------------------------------- | -------------------------------- | | `TLS_RSA_WITH_AES_128_CBC_SHA` | no | | `TLS_RSA_WITH_AES_256_CBC_SHA` | no | -| `TLS_RSA_WITH_AES_128_GCM_SHA256` | yes | -| `TLS_RSA_WITH_AES_256_GCM_SHA384` | yes | +| `TLS_RSA_WITH_AES_128_GCM_SHA256` | no | +| `TLS_RSA_WITH_AES_256_GCM_SHA384` | no | | `TLS_AES_128_GCM_SHA256` | no | | `TLS_AES_256_GCM_SHA384` | no | | `TLS_CHACHA20_POLY1305_SHA256` | no | From 0a83daec6cae5a4a1a2ce90ab2144da84361182b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Tr=E1=BB=8Dng=20H=E1=BA=A3i?= <41283691+hainenber@users.noreply.github.com> Date: Sun, 24 Mar 2024 01:17:31 +0700 Subject: [PATCH 09/83] doc(flow/get-started): fix wrong doc ref in Windows installation guide (#6736) Signed-off-by: hainenber Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> --- docs/sources/flow/get-started/install/windows.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/flow/get-started/install/windows.md b/docs/sources/flow/get-started/install/windows.md index a20ed3449792..b71ed6118970 100644 --- a/docs/sources/flow/get-started/install/windows.md +++ b/docs/sources/flow/get-started/install/windows.md @@ -84,7 +84,7 @@ This includes any configuration files in the installation directory. ## Next steps -- [Run {{< param "PRODUCT_NAME" >}}][Start] +- [Run {{< param "PRODUCT_NAME" >}}][Run] - [Configure {{< param "PRODUCT_NAME" >}}][Configure] [latest]: https://github.com/grafana/agent/releases/latest From eb56b0f7fa57c8c0b9608ccf6c8751569f7b126d Mon Sep 17 00:00:00 2001 From: Nick Pillitteri <56quarters@users.noreply.github.com> Date: Mon, 25 Mar 2024 12:25:01 -0400 Subject: [PATCH 10/83] Reduce default sync_interval for mimir.rules.kubernetes (#6753) Changes the default interval on which rules are synced from Mimir via `mimir.rules.kubernetes` when there are otherwise no changes to the `PrometheusRule` resources. This reduces load on the Mimir ruler APIs. Changes to `PrometheusRule` resources still result in immediately applying changes and syncing state with Mimir. Signed-off-by: Nick Pillitteri --- CHANGELOG.md | 3 +++ .../flow/reference/components/mimir.rules.kubernetes.md | 4 ++-- internal/component/mimir/rules/kubernetes/types.go | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc3d6b73637a..e601c1651f44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,9 @@ Main (unreleased) - The default listen port for `otelcol.receiver.opencensus` has changed from 4317 to 55678 to align with upstream. (@rfratto) +- The default sync interval for `mimir.rules.kubernetes` has changed from `30s` + to `5m` to reduce load on Mimir. (@56quarters) + ### Enhancements - Add support for importing folders as single module to `import.file`. (@wildum) diff --git a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md b/docs/sources/flow/reference/components/mimir.rules.kubernetes.md index 9a8672005b5f..17bb3c63fc37 100644 --- a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md +++ b/docs/sources/flow/reference/components/mimir.rules.kubernetes.md @@ -53,7 +53,7 @@ Name | Type | Description `tenant_id` | `string` | Mimir tenant ID. | | no `use_legacy_routes` | `bool` | Whether to use [deprecated][gem-2_2] ruler API endpoints. | false | no `prometheus_http_prefix` | `string` | Path prefix for [Mimir's Prometheus endpoint][gem-path-prefix]. | `/prometheus` | no -`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no +`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "5m" | no `mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no `bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no `bearer_token` | `secret` | Bearer token to authenticate with. | | no @@ -89,7 +89,7 @@ unique value for each deployment. If `use_legacy_routes` is set to `true`, `mimir.rules.kubernetes` contacts Mimir on a `/api/v1/rules` endpoint. -If `prometheus_http_prefix` is set to `/mimir`, `mimir.rules.kubernetes` contacts Mimir on a `/mimir/config/v1/rules` endpoint. +If `prometheus_http_prefix` is set to `/mimir`, `mimir.rules.kubernetes` contacts Mimir on a `/mimir/config/v1/rules` endpoint. This is useful if you configure Mimir to use a different [prefix][gem-path-prefix] for its Prometheus endpoints than the default one. `prometheus_http_prefix` is ignored if `use_legacy_routes` is set to `true`. diff --git a/internal/component/mimir/rules/kubernetes/types.go b/internal/component/mimir/rules/kubernetes/types.go index 564d6b4f0e67..5e662fb5644d 100644 --- a/internal/component/mimir/rules/kubernetes/types.go +++ b/internal/component/mimir/rules/kubernetes/types.go @@ -22,7 +22,7 @@ type Arguments struct { } var DefaultArguments = Arguments{ - SyncInterval: 30 * time.Second, + SyncInterval: 5 * time.Minute, MimirNameSpacePrefix: "agent", HTTPClientConfig: config.DefaultHTTPClientConfig, PrometheusHTTPPrefix: "/prometheus", From 19b05d27e1a51fd3c8185bacbfa168b238f329d7 Mon Sep 17 00:00:00 2001 From: Brett Jones Date: Mon, 25 Mar 2024 12:34:59 -0500 Subject: [PATCH 11/83] [Fixes #6565] Implement Luhn Loki processor (#6574) * implement luhn loki processor * Apply suggestions from code review Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> * use paypal generated credit card in docs * finish implementing luhn filter, remove dead code * fix lint --------- Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> Co-authored-by: mattdurham --- CHANGELOG.md | 2 + .../flow/reference/components/loki.process.md | 43 +++++ .../component/loki/process/stages/luhn.go | 147 ++++++++++++++++++ .../loki/process/stages/luhn_test.go | 54 +++++++ .../component/loki/process/stages/pipeline.go | 1 + .../component/loki/process/stages/stage.go | 6 + 6 files changed, 253 insertions(+) create mode 100644 internal/component/loki/process/stages/luhn.go create mode 100644 internal/component/loki/process/stages/luhn_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index e601c1651f44..d9e5763e426d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -280,6 +280,8 @@ v0.40.0 (2024-02-27) - Python profiling using eBPF is now aggregated now by kernel space. [PR](https://github.com/grafana/pyroscope/pull/2996) (@korniltsev) +- Add Luhn filter to `loki.process` to filter PCI data from log data + ### Bugfixes - Fix an issue in `remote.s3` where the exported content of an object would be an empty string if `remote.s3` failed to fully retrieve diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/flow/reference/components/loki.process.md index f30efb576793..05eb467d63c2 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/flow/reference/components/loki.process.md @@ -65,6 +65,7 @@ The following blocks are supported inside the definition of `loki.process`: | stage.labels | [stage.labels][] | Configures a `labels` processing stage. | no | | stage.limit | [stage.limit][] | Configures a `limit` processing stage. | no | | stage.logfmt | [stage.logfmt][] | Configures a `logfmt` processing stage. | no | +| stage.luhn | [stage.luhn][] | Configures a `luhn` processing stage. | no | | stage.match | [stage.match][] | Configures a `match` processing stage. | no | | stage.metrics | [stage.metrics][] | Configures a `metrics` stage. | no | | stage.multiline | [stage.multiline][] | Configures a `multiline` processing stage. | no | @@ -95,6 +96,7 @@ file. [stage.labels]: #stagelabels-block [stage.limit]: #stagelimit-block [stage.logfmt]: #stagelogfmt-block +[stage.luhn]: #stageluhn-block [stage.match]: #stagematch-block [stage.metrics]: #stagemetrics-block [stage.multiline]: #stagemultiline-block @@ -566,6 +568,47 @@ set of extracted data, with the value of `user=foo`. The second stage parses the contents of `extra` and appends the `username: foo` key-value pair to the set of extracted data. +### stage.luhn block + +The `stage.luhn` inner block configures a processing stage that reads incoming +log lines and redacts strings that match a Luhn algorithm. + +The [Luhn algorithm][] is a simple checksum formula used to validate various +identification numbers, such as credit card numbers, IMEI numbers, National +Provider Identifier numbers in the US, and Canadian Social Insurance Numbers. +Many Payment Card Industry environments require these numbers to be redacted. + +[Luhn algorithm]: https://en.wikipedia.org/wiki/Luhn_algorithm + +The following arguments are supported: + +| Name | Type | Description | Default | Required | +| ------------- | ------------- | ---------------------------------------------- | ---------------- | -------- | +| `replacement` | `string` | String to substitute the matched patterns with | `"**REDACTED**"` | no | +| `source` | `string` | Source of the data to parse. | `""` | no | +| `minLength` | `int` | Minimum length of digits to consider | `13` | no | + + +The `source` field defines the source of data to search. When `source` is +missing or empty, the stage parses the log line itself, but it can also be used +to parse a previously extracted value. + +The following example log line contains an approved credit card number. + +``` +time=2012-11-01T22:08:41+00:00 app=loki level=WARN duration=125 message="credit card approved 4032032513548443" extra="user=foo" + +stage.luhn { + replacement = "**DELETED**" +} +``` + +The stage parses the log line, redacts the credit card number, and produces the following updated log line: + +``` +time=2012-11-01T22:08:41+00:00 app=loki level=INFO duration=125 message="credit card approved **DELETED**" extra="user=foo" +``` + ### stage.match block The `stage.match` inner block configures a filtering stage that can conditionally diff --git a/internal/component/loki/process/stages/luhn.go b/internal/component/loki/process/stages/luhn.go new file mode 100644 index 000000000000..60610e659f5a --- /dev/null +++ b/internal/component/loki/process/stages/luhn.go @@ -0,0 +1,147 @@ +package stages + +import ( + "strconv" + "strings" + "time" + "unicode" + + "github.com/prometheus/common/model" +) + +// LuhnFilterConfig configures a processing stage that filters out Luhn-valid numbers. +type LuhnFilterConfig struct { + Replacement string `river:"replacement,attr,optional"` + Source *string `river:"source,attr,optional"` + MinLength int `river:"min_length,attr,optional"` +} + +// validateLuhnFilterConfig validates the LuhnFilterConfig. +func validateLuhnFilterConfig(c LuhnFilterConfig) error { + if c.Replacement == "" { + c.Replacement = "**REDACTED**" + } + if c.MinLength < 1 { + c.MinLength = 13 + } + if c.Source != nil && *c.Source == "" { + return ErrEmptyRegexStageSource + } + return nil +} + +// newLuhnFilterStage creates a new LuhnFilterStage. +func newLuhnFilterStage(config LuhnFilterConfig) (Stage, error) { + if err := validateLuhnFilterConfig(config); err != nil { + return nil, err + } + return toStage(&luhnFilterStage{ + config: &config, + }), nil +} + +// luhnFilterStage applies Luhn algorithm filtering to log entries. +type luhnFilterStage struct { + config *LuhnFilterConfig +} + +// Process implements Stage. +func (r *luhnFilterStage) Process(labels model.LabelSet, extracted map[string]interface{}, t *time.Time, entry *string) { + input := entry + if r.config.Source != nil { + value, ok := extracted[*r.config.Source] + if !ok { + return + } + strVal, ok := value.(string) + if !ok { + return + } + input = &strVal + } + + if input == nil { + return + } + + // Replace Luhn-valid numbers in the input. + updatedEntry := replaceLuhnValidNumbers(*input, r.config.Replacement, r.config.MinLength) + *entry = updatedEntry +} + +// replaceLuhnValidNumbers scans the input for Luhn-valid numbers and replaces them. + +func replaceLuhnValidNumbers(input, replacement string, minLength int) string { + var sb strings.Builder + var currentNumber strings.Builder + + flushNumber := func() { + // If the number is at least minLength, check if it's a Luhn-valid number. + if currentNumber.Len() >= minLength { + numberStr := currentNumber.String() + number, err := strconv.Atoi(numberStr) + if err == nil && isLuhn(number) { + // If the number is Luhn-valid, replace it. + sb.WriteString(replacement) + } else { + // If the number is not Luhn-valid, write it as is. + sb.WriteString(numberStr) + } + } else if currentNumber.Len() > 0 { + // If the number is less than minLength but not empty, write it as is. + sb.WriteString(currentNumber.String()) + } + // Reset the current number. + currentNumber.Reset() + } + + // Iterate over the input, replacing Luhn-valid numbers. + for _, char := range input { + // If the character is a digit, add it to the current number. + if unicode.IsDigit(char) { + currentNumber.WriteRune(char) + } else { + // If the character is not a digit, flush the current number and write the character. + flushNumber() + sb.WriteRune(char) + } + } + flushNumber() // Ensure any trailing number is processed + + return sb.String() +} + +// isLuhn check number is valid or not based on Luhn algorithm +func isLuhn(number int) bool { + // Luhn algorithm is a simple checksum formula used to validate a + // variety of identification numbers, such as credit card numbers, IMEI + // numbers, National Provider Identifier numbers in the US, and + // Canadian Social Insurance Numbers. This is a simple implementation + // of the Luhn algorithm. + // https://en.wikipedia.org/wiki/Luhn_algorithm + return (number%10+checksum(number/10))%10 == 0 +} + +func checksum(number int) int { + var luhn int + + for i := 0; number > 0; i++ { + cur := number % 10 + + if i%2 == 0 { // even + cur *= 2 + if cur > 9 { + cur = cur%10 + cur/10 + } + } + + luhn += cur + number /= 10 + } + return luhn % 10 +} + +// Name implements Stage. +func (r *luhnFilterStage) Name() string { + return StageTypeLuhn +} diff --git a/internal/component/loki/process/stages/luhn_test.go b/internal/component/loki/process/stages/luhn_test.go new file mode 100644 index 000000000000..ef618aa863bb --- /dev/null +++ b/internal/component/loki/process/stages/luhn_test.go @@ -0,0 +1,54 @@ +package stages + +import ( + "testing" +) + +// Test cases for the Luhn algorithm validation +func TestIsLuhnValid(t *testing.T) { + cases := []struct { + input int + want bool + }{ + {4539_1488_0343_6467, true}, // Valid Luhn number + {1234_5678_1234_5670, true}, // Another valid Luhn number + {499_2739_8112_1717, false}, // Invalid Luhn number + {1234567812345678, false}, // Another invalid Luhn number + {3782_822463_10005, true}, // Short, valid Luhn number + {123, false}, // Short, invalid Luhn number + } + + for _, c := range cases { + got := isLuhn(c.input) + if got != c.want { + t.Errorf("isLuhnValid(%q) == %t, want %t", c.input, got, c.want) + } + } +} + +// TestReplaceLuhnValidNumbers tests the replaceLuhnValidNumbers function. +func TestReplaceLuhnValidNumbers(t *testing.T) { + cases := []struct { + input string + replacement string + want string + }{ + // Test case with a single Luhn-valid number + {"My credit card number is 3530111333300000.", "**REDACTED**", "My credit card number is **REDACTED**."}, + // Test case with multiple Luhn-valid numbers + {"Cards 4532015112830366 and 6011111111111117 are valid.", "**REDACTED**", "Cards **REDACTED** and **REDACTED** are valid."}, + // Test case with no Luhn-valid numbers + {"No valid numbers here.", "**REDACTED**", "No valid numbers here."}, + // Test case with mixed content + {"Valid: 4556737586899855, invalid: 1234.", "**REDACTED**", "Valid: **REDACTED**, invalid: 1234."}, + // Test case with edge cases + {"Edge cases: 0, 00, 000, 1.", "**REDACTED**", "Edge cases: 0, 00, 000, 1."}, + } + + for _, c := range cases { + got := replaceLuhnValidNumbers(c.input, c.replacement, 13) + if got != c.want { + t.Errorf("replaceLuhnValidNumbers(%q, %q) == %q, want %q", c.input, c.replacement, got, c.want) + } + } +} diff --git a/internal/component/loki/process/stages/pipeline.go b/internal/component/loki/process/stages/pipeline.go index fb1be291e5d5..e24642c84cba 100644 --- a/internal/component/loki/process/stages/pipeline.go +++ b/internal/component/loki/process/stages/pipeline.go @@ -28,6 +28,7 @@ type StageConfig struct { LabelsConfig *LabelsConfig `river:"labels,block,optional"` LimitConfig *LimitConfig `river:"limit,block,optional"` LogfmtConfig *LogfmtConfig `river:"logfmt,block,optional"` + LuhnFilterConfig *LuhnFilterConfig `river:"luhn,block,optional"` MatchConfig *MatchConfig `river:"match,block,optional"` MetricsConfig *MetricsConfig `river:"metrics,block,optional"` MultilineConfig *MultilineConfig `river:"multiline,block,optional"` diff --git a/internal/component/loki/process/stages/stage.go b/internal/component/loki/process/stages/stage.go index a5657d570ea0..0958bfdf09b2 100644 --- a/internal/component/loki/process/stages/stage.go +++ b/internal/component/loki/process/stages/stage.go @@ -28,6 +28,7 @@ const ( StageTypeLabelDrop = "labeldrop" StageTypeLimit = "limit" StageTypeLogfmt = "logfmt" + StageTypeLuhn = "luhn" StageTypeMatch = "match" StageTypeMetric = "metrics" StageTypeMultiline = "multiline" @@ -136,6 +137,11 @@ func New(logger log.Logger, jobName *string, cfg StageConfig, registerer prometh if err != nil { return nil, err } + case cfg.LuhnFilterConfig != nil: + s, err = newLuhnFilterStage(*cfg.LuhnFilterConfig) + if err != nil { + return nil, err + } case cfg.MetricsConfig != nil: s, err = newMetricStage(logger, *cfg.MetricsConfig, registerer) if err != nil { From 5be00e4e2c4dde09b6f8430d3ede70b85a193891 Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Tue, 26 Mar 2024 15:13:14 +0100 Subject: [PATCH 12/83] otelcol.exporter.prometheus: log dropped delta sum (#6764) --- .../otelcol/exporter/prometheus/internal/convert/convert.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/component/otelcol/exporter/prometheus/internal/convert/convert.go b/internal/component/otelcol/exporter/prometheus/internal/convert/convert.go index a8f4d4a91c02..2103b044d4be 100644 --- a/internal/component/otelcol/exporter/prometheus/internal/convert/convert.go +++ b/internal/component/otelcol/exporter/prometheus/internal/convert/convert.go @@ -425,6 +425,7 @@ func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySerie case m.Sum().AggregationTemporality() == pmetric.AggregationTemporalityCumulative && !m.Sum().IsMonotonic(): convType = textparse.MetricTypeGauge case m.Sum().AggregationTemporality() == pmetric.AggregationTemporalityDelta && m.Sum().IsMonotonic(): + level.Debug(conv.log).Log("msg", "dropped unsupported delta sum") // Drop non-cumulative summaries for now, which is permitted by the spec. // // TODO(rfratto): implement delta-to-cumulative for sums. From 586fcb9a54a4a15e18fdaee6b8857f802fa7a220 Mon Sep 17 00:00:00 2001 From: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> Date: Tue, 26 Mar 2024 10:16:03 -0400 Subject: [PATCH 13/83] Implementation for static traces promsdprocessor conversion (#6722) * static traces promsdprocessor conversion to flow mode Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --------- Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --- .../internal/otelcolconvert/converter.go | 26 +- .../converter_attributesprocessor.go | 12 +- .../converter_basicauthextension.go | 4 +- .../converter_batchprocessor.go | 12 +- .../converter_bearertokenauthextension.go | 8 +- .../converter_filterprocessor.go | 12 +- .../converter_headerssetterextension.go | 4 +- .../otelcolconvert/converter_helpers.go | 2 +- .../converter_jaegerreceiver.go | 8 +- ...converter_jaegerremotesamplingextension.go | 4 +- .../converter_k8sattributesprocessor.go | 12 +- .../otelcolconvert/converter_kafkareceiver.go | 12 +- .../converter_loadbalancingexporter.go | 4 +- .../converter_loggingexporter.go | 4 +- .../converter_memorylimiterprocessor.go | 12 +- .../converter_oauth2clientauthextension.go | 4 +- .../converter_opencensusreceiver.go | 10 +- .../otelcolconvert/converter_otlpexporter.go | 4 +- .../converter_otlphttpexporter.go | 4 +- .../otelcolconvert/converter_otlpreceiver.go | 12 +- ...converter_probabilisticsamplerprocessor.go | 10 +- .../converter_sigv4authextension.go | 4 +- .../converter_spanmetricsconnector.go | 8 +- .../otelcolconvert/converter_spanprocessor.go | 8 +- .../converter_tailsamplingprocessor.go | 8 +- .../converter_transformprocessor.go | 12 +- .../converter_vcenterreceiver.go | 10 +- .../converter_zipkinreceiver.go | 8 +- .../internal/otelcolconvert/otelcolconvert.go | 17 +- .../internal/otelcolconvert/utils.go | 10 +- .../build/prometheus_blocks.go | 14 +- .../prometheusconvert/prometheusconvert.go | 2 +- .../internal/build/service_discovery.go | 2 +- .../internal/build/builder_traces.go | 17 +- .../build/converter_discoveryprocessor.go | 104 +++++++ .../staticconvert/testdata/traces.river | 265 +++--------------- .../staticconvert/testdata/traces.yaml | 163 ++--------- 37 files changed, 318 insertions(+), 514 deletions(-) create mode 100644 internal/converter/internal/staticconvert/internal/build/converter_discoveryprocessor.go diff --git a/internal/converter/internal/otelcolconvert/converter.go b/internal/converter/internal/otelcolconvert/converter.go index 77d74f61c208..dda3262a4a01 100644 --- a/internal/converter/internal/otelcolconvert/converter.go +++ b/internal/converter/internal/otelcolconvert/converter.go @@ -11,9 +11,9 @@ import ( "go.opentelemetry.io/collector/otelcol" ) -// componentConverter represents a converter which converts an OpenTelemetry +// ComponentConverter represents a converter which converts an OpenTelemetry // Collector component into a Flow component. -type componentConverter interface { +type ComponentConverter interface { // Factory should return the factory for the OpenTelemetry Collector // component. Factory() component.Factory @@ -39,25 +39,25 @@ type componentConverter interface { // ConvertAndAppend may be called more than once with the same component used // in different pipelines. Use [state.FlowComponentLabel] to get a guaranteed // unique Flow component label for the current state. - ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics + ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics } // List of component converters. This slice is appended to by init functions in // other files. -var converters []componentConverter +var converters []ComponentConverter -// state represents the state of the conversion. The state tracks: +// State represents the State of the conversion. The State tracks: // // - The OpenTelemetry Collector config being converted. // - The current OpenTelemetry Collector pipelines being converted. // - The current OpenTelemetry Collector component being converted. -type state struct { +type State struct { cfg *otelcol.Config // Input config. file *builder.File // Output file. group *pipelineGroup // Current pipeline group being converted. // converterLookup maps a converter key to the associated converter instance. - converterLookup map[converterKey]componentConverter + converterLookup map[converterKey]ComponentConverter // extensionLookup maps OTel extensions to Flow component IDs. extensionLookup map[component.ID]componentID @@ -74,18 +74,18 @@ type converterKey struct { // Body returns the body of the file being generated. Implementations of // [componentConverter] should use this to append components. -func (state *state) Body() *builder.Body { return state.file.Body() } +func (state *State) Body() *builder.Body { return state.file.Body() } // FlowComponentLabel returns the unique Flow label for the OpenTelemetry // Component component being converted. It is safe to use this label to create // multiple Flow components in a chain. -func (state *state) FlowComponentLabel() string { +func (state *State) FlowComponentLabel() string { return state.flowLabelForComponent(state.componentID) } // flowLabelForComponent returns the unique Flow label for the given // OpenTelemetry Collector component. -func (state *state) flowLabelForComponent(c component.InstanceID) string { +func (state *State) flowLabelForComponent(c component.InstanceID) string { const defaultLabel = "default" // We need to prove that it's possible to statelessly compute the label for a @@ -144,7 +144,7 @@ func (state *state) flowLabelForComponent(c component.InstanceID) string { // Next returns the set of Flow component IDs for a given data type that the // current component being converted should forward data to. -func (state *state) Next(c component.InstanceID, dataType component.DataType) []componentID { +func (state *State) Next(c component.InstanceID, dataType component.DataType) []componentID { instances := state.nextInstances(c, dataType) var ids []componentID @@ -177,7 +177,7 @@ func (state *state) Next(c component.InstanceID, dataType component.DataType) [] return ids } -func (state *state) nextInstances(c component.InstanceID, dataType component.DataType) []component.InstanceID { +func (state *State) nextInstances(c component.InstanceID, dataType component.DataType) []component.InstanceID { switch dataType { case component.DataTypeMetrics: return state.group.NextMetrics(c) @@ -191,7 +191,7 @@ func (state *state) nextInstances(c component.InstanceID, dataType component.Dat } } -func (state *state) LookupExtension(id component.ID) componentID { +func (state *State) LookupExtension(id component.ID) componentID { cid, ok := state.extensionLookup[id] if !ok { panic(fmt.Sprintf("no component name found for extension %q", id.Name())) diff --git a/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go b/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go index c9b9486b26ed..ac37c70e2458 100644 --- a/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go @@ -25,7 +25,7 @@ func (attributesProcessorConverter) InputComponentName() string { return "otelcol.processor.attributes" } -func (attributesProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (attributesProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -35,14 +35,14 @@ func (attributesProcessorConverter) ConvertAndAppend(state *state, id component. diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toAttributesProcessor(state *state, id component.InstanceID, cfg *attributesprocessor.Config) *attributes.Arguments { +func toAttributesProcessor(state *State, id component.InstanceID, cfg *attributesprocessor.Config) *attributes.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextTraces = state.Next(id, component.DataTypeTraces) @@ -53,9 +53,9 @@ func toAttributesProcessor(state *state, id component.InstanceID, cfg *attribute Match: toMatchConfig(cfg), Actions: toAttrActionKeyValue(encodeMapslice(cfg.Actions)), Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces)}, + Metrics: ToTokenizedConsumers(nextMetrics), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces)}, } } diff --git a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go index ac07fc6566b6..8f741e84013a 100644 --- a/internal/converter/internal/otelcolconvert/converter_basicauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_basicauthextension.go @@ -23,7 +23,7 @@ func (basicAuthConverterConverter) Factory() component.Factory { func (basicAuthConverterConverter) InputComponentName() string { return "otelcol.auth.basic" } -func (basicAuthConverterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (basicAuthConverterConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -33,7 +33,7 @@ func (basicAuthConverterConverter) ConvertAndAppend(state *state, id component.I diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) diff --git a/internal/converter/internal/otelcolconvert/converter_batchprocessor.go b/internal/converter/internal/otelcolconvert/converter_batchprocessor.go index df234aee2f7f..f6a6b2cabaf2 100644 --- a/internal/converter/internal/otelcolconvert/converter_batchprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_batchprocessor.go @@ -25,7 +25,7 @@ func (batchProcessorConverter) InputComponentName() string { return "otelcol.processor.batch" } -func (batchProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (batchProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -35,14 +35,14 @@ func (batchProcessorConverter) ConvertAndAppend(state *state, id component.Insta diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toBatchProcessor(state *state, id component.InstanceID, cfg *batchprocessor.Config) *batch.Arguments { +func toBatchProcessor(state *State, id component.InstanceID, cfg *batchprocessor.Config) *batch.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextLogs = state.Next(id, component.DataTypeLogs) @@ -56,9 +56,9 @@ func toBatchProcessor(state *state, id component.InstanceID, cfg *batchprocessor MetadataKeys: cfg.MetadataKeys, MetadataCardinalityLimit: cfg.MetadataCardinalityLimit, Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go index 63f134ec8abd..8801a682d71b 100644 --- a/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_bearertokenauthextension.go @@ -26,7 +26,7 @@ func (bearerTokenAuthExtensionConverter) Factory() component.Factory { func (bearerTokenAuthExtensionConverter) InputComponentName() string { return "otelcol.auth.bearer" } -func (bearerTokenAuthExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (bearerTokenAuthExtensionConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -52,7 +52,7 @@ func (bearerTokenAuthExtensionConverter) ConvertAndAppend(state *state, id compo diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) @@ -65,7 +65,7 @@ func toBearerTokenAuthExtension(cfg *bearertokenauthextension.Config) *bearer.Ar Token: rivertypes.Secret(string(cfg.BearerToken)), } } -func toBearerTokenAuthExtensionWithFilename(state *state, cfg *bearertokenauthextension.Config) (*bearer.Arguments, string) { +func toBearerTokenAuthExtensionWithFilename(state *State, cfg *bearertokenauthextension.Config) (*bearer.Arguments, string) { label := state.FlowComponentLabel() args := &file.Arguments{ Filename: cfg.Filename, @@ -78,5 +78,5 @@ func toBearerTokenAuthExtensionWithFilename(state *state, cfg *bearertokenauthex return &bearer.Arguments{ Scheme: cfg.Scheme, - }, fmt.Sprintf("%s.content", stringifyBlock(block)) + }, fmt.Sprintf("%s.content", StringifyBlock(block)) } diff --git a/internal/converter/internal/otelcolconvert/converter_filterprocessor.go b/internal/converter/internal/otelcolconvert/converter_filterprocessor.go index 71cb6749d35e..406d39345020 100644 --- a/internal/converter/internal/otelcolconvert/converter_filterprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_filterprocessor.go @@ -25,7 +25,7 @@ func (filterProcessorConverter) InputComponentName() string { return "otelcol.processor.filter" } -func (filterProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (filterProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -35,14 +35,14 @@ func (filterProcessorConverter) ConvertAndAppend(state *state, id component.Inst diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toFilterProcessor(state *state, id component.InstanceID, cfg *filterprocessor.Config) *filter.Arguments { +func toFilterProcessor(state *State, id component.InstanceID, cfg *filterprocessor.Config) *filter.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextLogs = state.Next(id, component.DataTypeLogs) @@ -63,9 +63,9 @@ func toFilterProcessor(state *state, id component.InstanceID, cfg *filterprocess LogRecord: cfg.Logs.LogConditions, }, Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go index 799bc96042a2..b66288fab6c8 100644 --- a/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go +++ b/internal/converter/internal/otelcolconvert/converter_headerssetterextension.go @@ -23,7 +23,7 @@ func (headersSetterExtensionConverter) Factory() component.Factory { func (headersSetterExtensionConverter) InputComponentName() string { return "otelcol.auth.headers" } -func (headersSetterExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (headersSetterExtensionConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -33,7 +33,7 @@ func (headersSetterExtensionConverter) ConvertAndAppend(state *state, id compone diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) diff --git a/internal/converter/internal/otelcolconvert/converter_helpers.go b/internal/converter/internal/otelcolconvert/converter_helpers.go index dbef1c481af5..cb2343e650a8 100644 --- a/internal/converter/internal/otelcolconvert/converter_helpers.go +++ b/internal/converter/internal/otelcolconvert/converter_helpers.go @@ -30,7 +30,7 @@ func (tc tokenizedConsumer) RiverTokenize() []builder.Token { }} } -func toTokenizedConsumers(components []componentID) []otelcol.Consumer { +func ToTokenizedConsumers(components []componentID) []otelcol.Consumer { res := make([]otelcol.Consumer, 0, len(components)) for _, component := range components { diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go b/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go index 360520b94847..75b3261170ce 100644 --- a/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_jaegerreceiver.go @@ -24,7 +24,7 @@ func (jaegerReceiverConverter) Factory() component.Factory { return jaegerreceiv func (jaegerReceiverConverter) InputComponentName() string { return "" } -func (jaegerReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (jaegerReceiverConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -34,14 +34,14 @@ func (jaegerReceiverConverter) ConvertAndAppend(state *state, id component.Insta diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toJaegerReceiver(state *state, id component.InstanceID, cfg *jaegerreceiver.Config) *jaeger.Arguments { +func toJaegerReceiver(state *State, id component.InstanceID, cfg *jaegerreceiver.Config) *jaeger.Arguments { var ( nextTraces = state.Next(id, component.DataTypeTraces) ) @@ -57,7 +57,7 @@ func toJaegerReceiver(state *state, id component.InstanceID, cfg *jaegerreceiver DebugMetrics: common.DefaultValue[jaeger.Arguments]().DebugMetrics, Output: &otelcol.ConsumerArguments{ - Traces: toTokenizedConsumers(nextTraces), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go index df1cda709fca..dcf17bc2352d 100644 --- a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go +++ b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go @@ -24,7 +24,7 @@ func (jaegerRemoteSamplingExtensionConverter) InputComponentName() string { return "otelcol.extension.jaeger_remote_sampling" } -func (jaegerRemoteSamplingExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (jaegerRemoteSamplingExtensionConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -34,7 +34,7 @@ func (jaegerRemoteSamplingExtensionConverter) ConvertAndAppend(state *state, id diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) diff --git a/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go b/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go index abd109bbd954..045fbed5b80f 100644 --- a/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_k8sattributesprocessor.go @@ -25,7 +25,7 @@ func (k8sAttributesProcessorConverter) InputComponentName() string { return "otelcol.processor.k8sattributes" } -func (k8sAttributesProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (k8sAttributesProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -35,14 +35,14 @@ func (k8sAttributesProcessorConverter) ConvertAndAppend(state *state, id compone diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toK8SAttributesProcessor(state *state, id component.InstanceID, cfg *k8sattributesprocessor.Config) *k8sattributes.Arguments { +func toK8SAttributesProcessor(state *State, id component.InstanceID, cfg *k8sattributesprocessor.Config) *k8sattributes.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextLogs = state.Next(id, component.DataTypeLogs) @@ -67,9 +67,9 @@ func toK8SAttributesProcessor(state *state, id component.InstanceID, cfg *k8satt Exclude: toExclude(cfg.Exclude), Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go index 2dbc84db0c3e..7f46b5157b4b 100644 --- a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go @@ -26,7 +26,7 @@ func (kafkaReceiverConverter) Factory() component.Factory { return kafkareceiver func (kafkaReceiverConverter) InputComponentName() string { return "" } -func (kafkaReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (kafkaReceiverConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -36,14 +36,14 @@ func (kafkaReceiverConverter) ConvertAndAppend(state *state, id component.Instan diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toKafkaReceiver(state *state, id component.InstanceID, cfg *kafkareceiver.Config) *kafka.Arguments { +func toKafkaReceiver(state *State, id component.InstanceID, cfg *kafkareceiver.Config) *kafka.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextLogs = state.Next(id, component.DataTypeLogs) @@ -70,9 +70,9 @@ func toKafkaReceiver(state *state, id component.InstanceID, cfg *kafkareceiver.C DebugMetrics: common.DefaultValue[kafka.Arguments]().DebugMetrics, Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go index 9f73d5935940..4475badd27e3 100644 --- a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go @@ -28,7 +28,7 @@ func (loadbalancingExporterConverter) InputComponentName() string { return "otelcol.exporter.loadbalancing" } -func (loadbalancingExporterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (loadbalancingExporterConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -46,7 +46,7 @@ func (loadbalancingExporterConverter) ConvertAndAppend(state *state, id componen diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) diff --git a/internal/converter/internal/otelcolconvert/converter_loggingexporter.go b/internal/converter/internal/otelcolconvert/converter_loggingexporter.go index 76d85cd2f06e..131ec2584715 100644 --- a/internal/converter/internal/otelcolconvert/converter_loggingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loggingexporter.go @@ -25,7 +25,7 @@ func (loggingExporterConverter) InputComponentName() string { return "otelcol.exporter.logging" } -func (loggingExporterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (loggingExporterConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -34,7 +34,7 @@ func (loggingExporterConverter) ConvertAndAppend(state *state, id component.Inst diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) diags.AddAll(common.ValidateSupported(common.NotEquals, diff --git a/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go b/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go index f870cf484855..c370fbd28293 100644 --- a/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_memorylimiterprocessor.go @@ -25,7 +25,7 @@ func (memoryLimiterProcessorConverter) Factory() component.Factory { func (memoryLimiterProcessorConverter) InputComponentName() string { return "otelcol.processor.memory_limiter" } -func (memoryLimiterProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (memoryLimiterProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -35,7 +35,7 @@ func (memoryLimiterProcessorConverter) ConvertAndAppend(state *state, id compone diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) @@ -43,7 +43,7 @@ func (memoryLimiterProcessorConverter) ConvertAndAppend(state *state, id compone return diags } -func toMemoryLimiterProcessor(state *state, id component.InstanceID, cfg *memorylimiterprocessor.Config) *memorylimiter.Arguments { +func toMemoryLimiterProcessor(state *State, id component.InstanceID, cfg *memorylimiterprocessor.Config) *memorylimiter.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextLogs = state.Next(id, component.DataTypeLogs) @@ -57,9 +57,9 @@ func toMemoryLimiterProcessor(state *state, id component.InstanceID, cfg *memory MemoryLimitPercentage: cfg.MemoryLimitPercentage, MemorySpikePercentage: cfg.MemorySpikePercentage, Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go index 14ba01ea91c9..f23d47b49ded 100644 --- a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go +++ b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go @@ -23,7 +23,7 @@ func (oauth2ClientAuthExtensionConverter) Factory() component.Factory { func (oauth2ClientAuthExtensionConverter) InputComponentName() string { return "otelcol.auth.oauth2" } -func (oauth2ClientAuthExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (oauth2ClientAuthExtensionConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -33,7 +33,7 @@ func (oauth2ClientAuthExtensionConverter) ConvertAndAppend(state *state, id comp diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) diff --git a/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go b/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go index cbc49366ad5a..4e88a0d273ae 100644 --- a/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_opencensusreceiver.go @@ -23,7 +23,7 @@ func (opencensusReceiverConverter) Factory() component.Factory { func (opencensusReceiverConverter) InputComponentName() string { return "" } -func (opencensusReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (opencensusReceiverConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -33,14 +33,14 @@ func (opencensusReceiverConverter) ConvertAndAppend(state *state, id component.I diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toOpencensusReceiver(state *state, id component.InstanceID, cfg *opencensusreceiver.Config) *opencensus.Arguments { +func toOpencensusReceiver(state *State, id component.InstanceID, cfg *opencensusreceiver.Config) *opencensus.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextTraces = state.Next(id, component.DataTypeTraces) @@ -53,8 +53,8 @@ func toOpencensusReceiver(state *state, id component.InstanceID, cfg *opencensus DebugMetrics: common.DefaultValue[opencensus.Arguments]().DebugMetrics, Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go index c13e550f6158..7f6881b489b6 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpexporter.go @@ -31,7 +31,7 @@ func (otlpExporterConverter) Factory() component.Factory { func (otlpExporterConverter) InputComponentName() string { return "otelcol.exporter.otlp" } -func (otlpExporterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (otlpExporterConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -49,7 +49,7 @@ func (otlpExporterConverter) ConvertAndAppend(state *state, id component.Instanc diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) diff --git a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go index 64f1ff69e13d..b24813e351fa 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_otlphttpexporter.go @@ -30,7 +30,7 @@ func (otlpHTTPExporterConverter) InputComponentName() string { return "otelcol.exporter.otlphttp" } -func (otlpHTTPExporterConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (otlpHTTPExporterConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -48,7 +48,7 @@ func (otlpHTTPExporterConverter) ConvertAndAppend(state *state, id component.Ins diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) diff --git a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go index d649210ec952..90384ba64d6e 100644 --- a/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_otlpreceiver.go @@ -26,7 +26,7 @@ func (otlpReceiverConverter) Factory() component.Factory { return otlpreceiver.N func (otlpReceiverConverter) InputComponentName() string { return "" } -func (otlpReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (otlpReceiverConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -36,14 +36,14 @@ func (otlpReceiverConverter) ConvertAndAppend(state *state, id component.Instanc diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toOtelcolReceiverOTLP(state *state, id component.InstanceID, cfg *otlpreceiver.Config) *otlp.Arguments { +func toOtelcolReceiverOTLP(state *State, id component.InstanceID, cfg *otlpreceiver.Config) *otlp.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextLogs = state.Next(id, component.DataTypeLogs) @@ -57,9 +57,9 @@ func toOtelcolReceiverOTLP(state *state, id component.InstanceID, cfg *otlprecei DebugMetrics: common.DefaultValue[otlp.Arguments]().DebugMetrics, Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go b/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go index de800410ae3e..ac4a0db4c895 100644 --- a/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_probabilisticsamplerprocessor.go @@ -25,7 +25,7 @@ func (probabilisticSamplerProcessorConverter) InputComponentName() string { return "otelcol.processor.probabilistic_sampler" } -func (probabilisticSamplerProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (probabilisticSamplerProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -35,14 +35,14 @@ func (probabilisticSamplerProcessorConverter) ConvertAndAppend(state *state, id diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toProbabilisticSamplerProcessor(state *state, id component.InstanceID, cfg *probabilisticsamplerprocessor.Config) *probabilistic_sampler.Arguments { +func toProbabilisticSamplerProcessor(state *State, id component.InstanceID, cfg *probabilisticsamplerprocessor.Config) *probabilistic_sampler.Arguments { var ( nextTraces = state.Next(id, component.DataTypeTraces) nextLogs = state.Next(id, component.DataTypeLogs) @@ -55,8 +55,8 @@ func toProbabilisticSamplerProcessor(state *state, id component.InstanceID, cfg FromAttribute: cfg.FromAttribute, SamplingPriority: cfg.SamplingPriority, Output: &otelcol.ConsumerArguments{ - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_sigv4authextension.go b/internal/converter/internal/otelcolconvert/converter_sigv4authextension.go index de22285cdb9b..bd68d4ec202b 100644 --- a/internal/converter/internal/otelcolconvert/converter_sigv4authextension.go +++ b/internal/converter/internal/otelcolconvert/converter_sigv4authextension.go @@ -24,7 +24,7 @@ func (sigV4AuthExtensionConverter) InputComponentName() string { return "otelcol.auth.sigv4" } -func (sigV4AuthExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (sigV4AuthExtensionConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -34,7 +34,7 @@ func (sigV4AuthExtensionConverter) ConvertAndAppend(state *state, id component.I diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) diff --git a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go index e38f016f9ac6..0b6a8f6528e2 100644 --- a/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go +++ b/internal/converter/internal/otelcolconvert/converter_spanmetricsconnector.go @@ -26,7 +26,7 @@ func (spanmetricsConnectorConverter) InputComponentName() string { return "otelcol.connector.spanmetrics" } -func (spanmetricsConnectorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (spanmetricsConnectorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -36,14 +36,14 @@ func (spanmetricsConnectorConverter) ConvertAndAppend(state *state, id component diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toSpanmetricsConnector(state *state, id component.InstanceID, cfg *spanmetricsconnector.Config) *spanmetrics.Arguments { +func toSpanmetricsConnector(state *State, id component.InstanceID, cfg *spanmetricsconnector.Config) *spanmetrics.Arguments { if cfg == nil { return nil } @@ -112,7 +112,7 @@ func toSpanmetricsConnector(state *state, id component.InstanceID, cfg *spanmetr }, Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), + Metrics: ToTokenizedConsumers(nextMetrics), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_spanprocessor.go b/internal/converter/internal/otelcolconvert/converter_spanprocessor.go index 6604fc960199..f46a24b266de 100644 --- a/internal/converter/internal/otelcolconvert/converter_spanprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_spanprocessor.go @@ -22,7 +22,7 @@ func (spanProcessorConverter) Factory() component.Factory { return spanprocessor func (spanProcessorConverter) InputComponentName() string { return "otelcol.processor.span" } -func (spanProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (spanProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -32,14 +32,14 @@ func (spanProcessorConverter) ConvertAndAppend(state *state, id component.Instan diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toSpanProcessor(state *state, id component.InstanceID, cfg *spanprocessor.Config) *span.Arguments { +func toSpanProcessor(state *State, id component.InstanceID, cfg *spanprocessor.Config) *span.Arguments { var ( nextTraces = state.Next(id, component.DataTypeTraces) ) @@ -72,7 +72,7 @@ func toSpanProcessor(state *state, id component.InstanceID, cfg *spanprocessor.C }, SetStatus: setStatus, Output: &otelcol.ConsumerArguments{ - Traces: toTokenizedConsumers(nextTraces), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go index 18e7ab4a26f2..439a84f8572a 100644 --- a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go @@ -26,7 +26,7 @@ func (tailSamplingProcessorConverter) InputComponentName() string { return "otelcol.processor.tail_sampling" } -func (tailSamplingProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (tailSamplingProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -36,14 +36,14 @@ func (tailSamplingProcessorConverter) ConvertAndAppend(state *state, id componen diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toTailSamplingProcessor(state *state, id component.InstanceID, cfg *tailsamplingprocessor.Config) *tail_sampling.Arguments { +func toTailSamplingProcessor(state *State, id component.InstanceID, cfg *tailsamplingprocessor.Config) *tail_sampling.Arguments { var ( nextTraces = state.Next(id, component.DataTypeTraces) ) @@ -57,7 +57,7 @@ func toTailSamplingProcessor(state *state, id component.InstanceID, cfg *tailsam NumTraces: cfg.NumTraces, ExpectedNewTracesPerSec: cfg.ExpectedNewTracesPerSec, Output: &otelcol.ConsumerArguments{ - Traces: toTokenizedConsumers(nextTraces), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_transformprocessor.go b/internal/converter/internal/otelcolconvert/converter_transformprocessor.go index 694046bb21e9..aa3160ca29c2 100644 --- a/internal/converter/internal/otelcolconvert/converter_transformprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_transformprocessor.go @@ -25,7 +25,7 @@ func (transformProcessorConverter) InputComponentName() string { return "otelcol.processor.transform" } -func (transformProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (transformProcessorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -35,14 +35,14 @@ func (transformProcessorConverter) ConvertAndAppend(state *state, id component.I diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toTransformProcessor(state *state, id component.InstanceID, cfg *transformprocessor.Config) *transform.Arguments { +func toTransformProcessor(state *State, id component.InstanceID, cfg *transformprocessor.Config) *transform.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextLogs = state.Next(id, component.DataTypeLogs) @@ -55,9 +55,9 @@ func toTransformProcessor(state *state, id component.InstanceID, cfg *transformp MetricStatements: toContextStatements(encodeMapslice(cfg.MetricStatements)), LogStatements: toContextStatements(encodeMapslice(cfg.LogStatements)), Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Logs: toTokenizedConsumers(nextLogs), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Logs: ToTokenizedConsumers(nextLogs), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_vcenterreceiver.go b/internal/converter/internal/otelcolconvert/converter_vcenterreceiver.go index b4b791d8ffbb..76855e48bfc9 100644 --- a/internal/converter/internal/otelcolconvert/converter_vcenterreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_vcenterreceiver.go @@ -22,7 +22,7 @@ func (vcenterReceiverConverter) Factory() component.Factory { return vcenterrece func (vcenterReceiverConverter) InputComponentName() string { return "" } -func (vcenterReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (vcenterReceiverConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -32,14 +32,14 @@ func (vcenterReceiverConverter) ConvertAndAppend(state *state, id component.Inst diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toVcenterReceiver(state *state, id component.InstanceID, cfg *vcenterreceiver.Config) *vcenter.Arguments { +func toVcenterReceiver(state *State, id component.InstanceID, cfg *vcenterreceiver.Config) *vcenter.Arguments { var ( nextMetrics = state.Next(id, component.DataTypeMetrics) nextTraces = state.Next(id, component.DataTypeTraces) @@ -63,8 +63,8 @@ func toVcenterReceiver(state *state, id component.InstanceID, cfg *vcenterreceiv TLS: toTLSClientArguments(cfg.TLSClientSetting), Output: &otelcol.ConsumerArguments{ - Metrics: toTokenizedConsumers(nextMetrics), - Traces: toTokenizedConsumers(nextTraces), + Metrics: ToTokenizedConsumers(nextMetrics), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go b/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go index f2c3cc82cf66..0096300de62a 100644 --- a/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_zipkinreceiver.go @@ -21,7 +21,7 @@ func (zipkinReceiverConverter) Factory() component.Factory { return zipkinreceiv func (zipkinReceiverConverter) InputComponentName() string { return "" } -func (zipkinReceiverConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { +func (zipkinReceiverConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { var diags diag.Diagnostics label := state.FlowComponentLabel() @@ -31,14 +31,14 @@ func (zipkinReceiverConverter) ConvertAndAppend(state *state, id component.Insta diags.Add( diag.SeverityLevelInfo, - fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), ) state.Body().AppendBlock(block) return diags } -func toZipkinReceiver(state *state, id component.InstanceID, cfg *zipkinreceiver.Config) *zipkin.Arguments { +func toZipkinReceiver(state *State, id component.InstanceID, cfg *zipkinreceiver.Config) *zipkin.Arguments { var ( nextTraces = state.Next(id, component.DataTypeTraces) ) @@ -50,7 +50,7 @@ func toZipkinReceiver(state *state, id component.InstanceID, cfg *zipkinreceiver DebugMetrics: common.DefaultValue[zipkin.Arguments]().DebugMetrics, Output: &otelcol.ConsumerArguments{ - Traces: toTokenizedConsumers(nextTraces), + Traces: ToTokenizedConsumers(nextTraces), }, } } diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert.go b/internal/converter/internal/otelcolconvert/otelcolconvert.go index 7c10b23acc2f..d24e072b94f1 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert.go @@ -65,7 +65,7 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { f := builder.NewFile() - diags.AddAll(AppendConfig(f, cfg, "")) + diags.AddAll(AppendConfig(f, cfg, "", nil)) diags.AddAll(common.ValidateNodes(f)) var buf bytes.Buffer @@ -143,7 +143,7 @@ func getFactories() otelcol.Factories { // AppendConfig converts the provided OpenTelemetry config into an equivalent // Flow config and appends the result to the provided file. -func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) diag.Diagnostics { +func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string, extraConverters []ComponentConverter) diag.Diagnostics { var diags diag.Diagnostics groups, err := createPipelineGroups(cfg.Service.Pipelines) @@ -153,7 +153,7 @@ func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) d } // TODO(rfratto): should this be deduplicated to avoid creating factories // twice? - converterTable := buildConverterTable() + converterTable := buildConverterTable(extraConverters) // Connector components are defined on the top level of the OpenTelemetry // config, but inside of the pipeline definitions they act like regular @@ -188,7 +188,7 @@ func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) d for _, ext := range cfg.Service.Extensions { cid := component.InstanceID{Kind: component.KindExtension, ID: ext} - state := &state{ + state := &State{ cfg: cfg, file: file, // We pass an empty pipelineGroup to make calls to @@ -237,7 +237,7 @@ func AppendConfig(file *builder.File, cfg *otelcol.Config, labelPrefix string) d for _, id := range componentSet.ids { componentID := component.InstanceID{Kind: componentSet.kind, ID: id} - state := &state{ + state := &State{ cfg: cfg, file: file, group: &group, @@ -289,10 +289,11 @@ func validateNoDuplicateReceivers(groups []pipelineGroup, connectorIDs []compone return diags } -func buildConverterTable() map[converterKey]componentConverter { - table := make(map[converterKey]componentConverter) +func buildConverterTable(extraConverters []ComponentConverter) map[converterKey]ComponentConverter { + table := make(map[converterKey]ComponentConverter) + allConverters := append(converters, extraConverters...) - for _, conv := range converters { + for _, conv := range allConverters { fact := conv.Factory() switch fact.(type) { diff --git a/internal/converter/internal/otelcolconvert/utils.go b/internal/converter/internal/otelcolconvert/utils.go index 176a5a420aa6..59563f46778a 100644 --- a/internal/converter/internal/otelcolconvert/utils.go +++ b/internal/converter/internal/otelcolconvert/utils.go @@ -11,11 +11,11 @@ import ( "go.opentelemetry.io/collector/component" ) -func stringifyInstanceID(id component.InstanceID) string { - return fmt.Sprintf("%s/%s", stringifyKind(id.Kind), id.ID) +func StringifyInstanceID(id component.InstanceID) string { + return fmt.Sprintf("%s/%s", StringifyKind(id.Kind), id.ID) } -func stringifyKind(k component.Kind) string { +func StringifyKind(k component.Kind) string { switch k { case component.KindReceiver: return "receiver" @@ -32,7 +32,7 @@ func stringifyKind(k component.Kind) string { } } -func stringifyBlock(block *builder.Block) string { +func StringifyBlock(block *builder.Block) string { return fmt.Sprintf("%s.%s", strings.Join(block.Name, "."), block.Label) } @@ -57,7 +57,7 @@ func ConvertWithoutValidation(in []byte, extraArgs []string) ([]byte, diag.Diagn f := builder.NewFile() - diags.AddAll(AppendConfig(f, cfg, "")) + diags.AddAll(AppendConfig(f, cfg, "", nil)) diags.AddAll(common.ValidateNodes(f)) var buf bytes.Buffer diff --git a/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go b/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go index adbe26077f48..eafeecdeb94c 100644 --- a/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go +++ b/internal/converter/internal/prometheusconvert/build/prometheus_blocks.go @@ -29,7 +29,7 @@ func NewPrometheusBlocks() *PrometheusBlocks { } } -// AppendToFile attaches prometheus blocks in a specific order. +// AppendToBody attaches prometheus blocks in a specific order. // // Order of blocks: // 1. Discovery component(s) @@ -37,25 +37,25 @@ func NewPrometheusBlocks() *PrometheusBlocks { // 3. Prometheus scrape component(s) // 4. Prometheus relabel component(s) (if any) // 5. Prometheus remote_write -func (pb *PrometheusBlocks) AppendToFile(f *builder.File) { +func (pb *PrometheusBlocks) AppendToBody(body *builder.Body) { for _, promBlock := range pb.DiscoveryBlocks { - f.Body().AppendBlock(promBlock.block) + body.AppendBlock(promBlock.block) } for _, promBlock := range pb.DiscoveryRelabelBlocks { - f.Body().AppendBlock(promBlock.block) + body.AppendBlock(promBlock.block) } for _, promBlock := range pb.PrometheusScrapeBlocks { - f.Body().AppendBlock(promBlock.block) + body.AppendBlock(promBlock.block) } for _, promBlock := range pb.PrometheusRelabelBlocks { - f.Body().AppendBlock(promBlock.block) + body.AppendBlock(promBlock.block) } for _, promBlock := range pb.PrometheusRemoteWriteBlocks { - f.Body().AppendBlock(promBlock.block) + body.AppendBlock(promBlock.block) } } diff --git a/internal/converter/internal/prometheusconvert/prometheusconvert.go b/internal/converter/internal/prometheusconvert/prometheusconvert.go index e5cdd84b4e0f..c38400b4c56a 100644 --- a/internal/converter/internal/prometheusconvert/prometheusconvert.go +++ b/internal/converter/internal/prometheusconvert/prometheusconvert.go @@ -111,7 +111,7 @@ func AppendAllNested(f *builder.File, promConfig *prom_config.Config, jobNameToC diags := validate(promConfig) diags.AddAll(pb.GetScrapeInfo()) - pb.AppendToFile(f) + pb.AppendToBody(f.Body()) return diags } diff --git a/internal/converter/internal/promtailconvert/internal/build/service_discovery.go b/internal/converter/internal/promtailconvert/internal/build/service_discovery.go index 533f5c8c2b69..dba4fff5ac61 100644 --- a/internal/converter/internal/promtailconvert/internal/build/service_discovery.go +++ b/internal/converter/internal/promtailconvert/internal/build/service_discovery.go @@ -22,7 +22,7 @@ func (s *ScrapeConfigBuilder) AppendSDs() { pb := build.NewPrometheusBlocks() targets := prometheusconvert.AppendServiceDiscoveryConfigs(pb, sdConfigs, common.LabelForParts(s.globalCtx.LabelPrefix, s.cfg.JobName)) - pb.AppendToFile(s.f) + pb.AppendToBody(s.f.Body()) targetLiterals := make([]discovery.Target, 0) for _, target := range targets { diff --git a/internal/converter/internal/staticconvert/internal/build/builder_traces.go b/internal/converter/internal/staticconvert/internal/build/builder_traces.go index b37b65892456..6762d8e580f2 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_traces.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_traces.go @@ -12,6 +12,10 @@ import ( "go.opentelemetry.io/collector/otelcol" ) +// List of component converters. This slice is appended to by init functions in +// other files. +var converters []otelcolconvert.ComponentConverter + func (b *ConfigBuilder) appendTraces() { if reflect.DeepEqual(b.cfg.Traces, traces.Config{}) { return @@ -24,17 +28,18 @@ func (b *ConfigBuilder) appendTraces() { continue } - // Remove the push receiver which is an implementation detail for static mode and unnecessary for the otel config. - removeReceiver(otelCfg, "traces", "push_receiver") - - b.translateAutomaticLogging(otelCfg, cfg) - // Only prefix component labels if we are doing more than 1 trace config. labelPrefix := "" if len(b.cfg.Traces.Configs) > 1 { labelPrefix = cfg.Name } - b.diags.AddAll(otelcolconvert.AppendConfig(b.f, otelCfg, labelPrefix)) + + // Remove the push receiver which is an implementation detail for static mode and unnecessary for the otel config. + removeReceiver(otelCfg, "traces", "push_receiver") + + b.translateAutomaticLogging(otelCfg, cfg) + + b.diags.AddAll(otelcolconvert.AppendConfig(b.f, otelCfg, labelPrefix, converters)) } } diff --git a/internal/converter/internal/staticconvert/internal/build/converter_discoveryprocessor.go b/internal/converter/internal/staticconvert/internal/build/converter_discoveryprocessor.go new file mode 100644 index 000000000000..42142dc610e6 --- /dev/null +++ b/internal/converter/internal/staticconvert/internal/build/converter_discoveryprocessor.go @@ -0,0 +1,104 @@ +package build + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/agent/internal/component/otelcol" + otelcol_discovery "github.com/grafana/agent/internal/component/otelcol/processor/discovery" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/agent/internal/converter/internal/otelcolconvert" + "github.com/grafana/agent/internal/converter/internal/prometheusconvert" + "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + prometheus_component "github.com/grafana/agent/internal/converter/internal/prometheusconvert/component" + "github.com/grafana/agent/internal/static/traces/promsdprocessor" + "github.com/grafana/river/scanner" + prom_config "github.com/prometheus/prometheus/config" + "go.opentelemetry.io/collector/component" + "gopkg.in/yaml.v3" +) + +func init() { + converters = append(converters, discoveryProcessorConverter{}) +} + +type discoveryProcessorConverter struct{} + +func (discoveryProcessorConverter) Factory() component.Factory { + return promsdprocessor.NewFactory() +} + +func (discoveryProcessorConverter) InputComponentName() string { + return "otelcol.processor.discovery" +} + +func (discoveryProcessorConverter) ConvertAndAppend(state *otelcolconvert.State, id component.InstanceID, cfg component.Config) diag.Diagnostics { + label := state.FlowComponentLabel() + + args, diags := toDiscoveryProcessor(state, id, cfg.(*promsdprocessor.Config), label) + block := common.NewBlockWithOverride([]string{"otelcol", "processor", "discovery"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", otelcolconvert.StringifyInstanceID(id), otelcolconvert.StringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toDiscoveryProcessor(state *otelcolconvert.State, id component.InstanceID, cfg *promsdprocessor.Config, label string) (*otelcol_discovery.Arguments, diag.Diagnostics) { + var ( + diags diag.Diagnostics + nextMetrics = state.Next(id, component.DataTypeMetrics) + nextLogs = state.Next(id, component.DataTypeLogs) + nextTraces = state.Next(id, component.DataTypeTraces) + ) + + // We need to Marshal/Unmarshal the scrape configs to translate them + // into their actual types for the conversion. + out, err := yaml.Marshal(cfg.ScrapeConfigs) + if err != nil { + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("unable to marshal scrapeConfigs interface{} to yaml: %s", err)) + return nil, diags + } + scrapeConfigs := make([]*prom_config.ScrapeConfig, 0) + err = yaml.Unmarshal(out, &scrapeConfigs) + if err != nil { + diags.Add(diag.SeverityLevelCritical, fmt.Sprintf("unable to unmarshal bytes to []*config.ScrapeConfig: %s", err)) + return nil, diags + } + + // Append the prometheus blocks to the file. prom_sd_processor makes use of + // only the ServiceDiscoveryConfigs and RelabelConfigs from its ScrapeConfigs. + // Other fields are ignored which is poorly designed Static mode config structure + // but correct for the conversion. + targets := []discovery.Target{} + pb := build.NewPrometheusBlocks() + for _, scrapeConfig := range scrapeConfigs { + labelConcat := scrapeConfig.JobName + if label != "" { + labelConcat = label + "_" + scrapeConfig.JobName + } + label, _ := scanner.SanitizeIdentifier(labelConcat) + scrapeTargets := prometheusconvert.AppendServiceDiscoveryConfigs(pb, scrapeConfig.ServiceDiscoveryConfigs, label) + promDiscoveryRelabelExports := prometheus_component.AppendDiscoveryRelabel(pb, scrapeConfig.RelabelConfigs, scrapeTargets, label) + if promDiscoveryRelabelExports != nil { + scrapeTargets = promDiscoveryRelabelExports.Output + } + targets = append(targets, scrapeTargets...) + } + pb.AppendToBody(state.Body()) + + return &otelcol_discovery.Arguments{ + Targets: targets, + OperationType: cfg.OperationType, + PodAssociations: cfg.PodAssociations, + Output: &otelcol.ConsumerArguments{ + Metrics: otelcolconvert.ToTokenizedConsumers(nextMetrics), + Logs: otelcolconvert.ToTokenizedConsumers(nextLogs), + Traces: otelcolconvert.ToTokenizedConsumers(nextTraces), + }, + }, diags +} diff --git a/internal/converter/internal/staticconvert/testdata/traces.river b/internal/converter/internal/staticconvert/testdata/traces.river index 750e89cd5540..716aefad0e3b 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.river +++ b/internal/converter/internal/staticconvert/testdata/traces.river @@ -10,48 +10,63 @@ otelcol.receiver.otlp "default" { output { metrics = [] logs = [] - traces = [otelcol.processor.attributes.default.input] + traces = [otelcol.processor.discovery.default.input] } } -otelcol.processor.attributes "default" { - action { - key = "db.table" - action = "delete" - } +discovery.azure "default_prometheus1" { + subscription_id = "subscription1" - action { - key = "redacted_span" - value = true - action = "upsert" + oauth { + client_id = "client1" + tenant_id = "tenant1" + client_secret = "secret1" } - action { - key = "copy_key" - from_attribute = "key_original" - action = "update" + managed_identity { + client_id = "client1" } +} - action { - key = "account_id" - value = 2245 - action = "insert" +discovery.lightsail "default_prometheus1" { + region = "us-east-1" + access_key = "YOUR_ACCESS_KEY" + secret_key = "YOUR_SECRET_KEY" + port = 8080 +} + +discovery.relabel "default_prometheus1" { + targets = concat( + discovery.azure.default_prometheus1.targets, + discovery.lightsail.default_prometheus1.targets, + ) + + rule { + source_labels = ["__address1__"] + target_label = "__param_target1" } - action { - key = "account_password" - action = "delete" + rule { + source_labels = ["__address2__"] + target_label = "__param_target2" } +} - action { - key = "account_email" - action = "hash" +otelcol.processor.discovery "default" { + targets = discovery.relabel.default_prometheus1.output + pod_associations = [] + + output { + metrics = [] + logs = [] + traces = [otelcol.processor.attributes.default.input] } +} +otelcol.processor.attributes "default" { action { - key = "http.status_code" - converted_type = "int" - action = "convert" + key = "db.table" + action = "delete" } output { @@ -66,202 +81,6 @@ otelcol.processor.tail_sampling "default" { name = "test-policy-1" type = "always_sample" } - - policy { - name = "test-policy-2" - type = "latency" - - latency { - threshold_ms = 5000 - } - } - - policy { - name = "test-policy-3" - type = "numeric_attribute" - - numeric_attribute { - key = "key1" - min_value = 50 - max_value = 100 - } - } - - policy { - name = "test-policy-4" - type = "probabilistic" - - probabilistic { - sampling_percentage = 10 - } - } - - policy { - name = "test-policy-5" - type = "status_code" - - status_code { - status_codes = ["ERROR", "UNSET"] - } - } - - policy { - name = "test-policy-6" - type = "string_attribute" - - string_attribute { - key = "key2" - values = ["value1", "value2"] - } - } - - policy { - name = "test-policy-7" - type = "string_attribute" - - string_attribute { - key = "key2" - values = ["value1", "val*"] - enabled_regex_matching = true - cache_max_size = 10 - } - } - - policy { - name = "test-policy-8" - type = "rate_limiting" - - rate_limiting { - spans_per_second = 35 - } - } - - policy { - name = "test-policy-9" - type = "string_attribute" - - string_attribute { - key = "http.url" - values = ["\\/health", "\\/metrics"] - enabled_regex_matching = true - invert_match = true - } - } - - policy { - name = "test-policy-10" - type = "span_count" - - span_count { - min_spans = 2 - max_spans = 20 - } - } - - policy { - name = "test-policy-11" - type = "trace_state" - - trace_state { - key = "key3" - values = ["value1", "value2"] - } - } - - policy { - name = "test-policy-12" - type = "boolean_attribute" - - boolean_attribute { - key = "key4" - value = true - } - } - - policy { - name = "test-policy-11" - type = "ottl_condition" - - ottl_condition { - error_mode = "ignore" - span = ["attributes[\"test_attr_key_1\"] == \"test_attr_val_1\"", "attributes[\"test_attr_key_2\"] != \"test_attr_val_1\""] - spanevent = ["name != \"test_span_event_name\"", "attributes[\"test_event_attr_key_2\"] != \"test_event_attr_val_1\""] - } - } - - policy { - name = "and-policy-1" - type = "and" - - and { - and_sub_policy { - name = "test-and-policy-1" - type = "numeric_attribute" - - numeric_attribute { - key = "key1" - min_value = 50 - max_value = 100 - } - } - - and_sub_policy { - name = "test-and-policy-2" - type = "string_attribute" - - string_attribute { - key = "key2" - values = ["value1", "value2"] - } - } - } - } - - policy { - name = "composite-policy-1" - type = "composite" - - composite { - max_total_spans_per_second = 1000 - policy_order = ["test-composite-policy-1", "test-composite-policy-2", "test-composite-policy-3"] - - composite_sub_policy { - name = "test-composite-policy-1" - type = "numeric_attribute" - - numeric_attribute { - key = "key1" - min_value = 50 - max_value = 100 - } - } - - composite_sub_policy { - name = "test-composite-policy-2" - type = "string_attribute" - - string_attribute { - key = "key2" - values = ["value1", "value2"] - } - } - - composite_sub_policy { - name = "test-composite-policy-3" - type = "always_sample" - } - - rate_allocation { - policy = "test-composite-policy-1" - percent = 50 - } - - rate_allocation { - policy = "test-composite-policy-2" - percent = 25 - } - } - } decision_wait = "5s" output { diff --git a/internal/converter/internal/staticconvert/testdata/traces.yaml b/internal/converter/internal/staticconvert/testdata/traces.yaml index f5e2fffec4fe..2553da25200a 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.yaml +++ b/internal/converter/internal/staticconvert/testdata/traces.yaml @@ -10,6 +10,23 @@ traces: - endpoint: http://localhost:1234/write automatic_logging: backend: "stdout" + scrape_configs: + - job_name: "prometheus1" + azure_sd_configs: + - subscription_id: "subscription1" + tenant_id: "tenant1" + client_id: "client1" + client_secret: "secret1" + lightsail_sd_configs: + - region: 'us-east-1' + access_key: 'YOUR_ACCESS_KEY' + secret_key: 'YOUR_SECRET_KEY' + port: 8080 + relabel_configs: + - source_labels: [__address1__] + target_label: __param_target1 + - source_labels: [__address2__] + target_label: __param_target2 tail_sampling: policies: [ @@ -17,150 +34,8 @@ traces: name: test-policy-1, type: always_sample }, - { - name: test-policy-2, - type: latency, - latency: {threshold_ms: 5000} - }, - { - name: test-policy-3, - type: numeric_attribute, - numeric_attribute: {key: key1, min_value: 50, max_value: 100} - }, - { - name: test-policy-4, - type: probabilistic, - probabilistic: {sampling_percentage: 10} - }, - { - name: test-policy-5, - type: status_code, - status_code: {status_codes: [ERROR, UNSET]} - }, - { - name: test-policy-6, - type: string_attribute, - string_attribute: {key: key2, values: [value1, value2]} - }, - { - name: test-policy-7, - type: string_attribute, - string_attribute: {key: key2, values: [value1, val*], enabled_regex_matching: true, cache_max_size: 10} - }, - { - name: test-policy-8, - type: rate_limiting, - rate_limiting: {spans_per_second: 35} - }, - { - name: test-policy-9, - type: string_attribute, - string_attribute: {key: http.url, values: [\/health, \/metrics], enabled_regex_matching: true, invert_match: true} - }, - { - name: test-policy-10, - type: span_count, - span_count: {min_spans: 2, max_spans: 20} - }, - { - name: test-policy-11, - type: trace_state, - trace_state: { key: key3, values: [value1, value2] } - }, - { - name: test-policy-12, - type: boolean_attribute, - boolean_attribute: {key: key4, value: true} - }, - { - name: test-policy-11, - type: ottl_condition, - ottl_condition: { - error_mode: ignore, - span: [ - "attributes[\"test_attr_key_1\"] == \"test_attr_val_1\"", - "attributes[\"test_attr_key_2\"] != \"test_attr_val_1\"", - ], - spanevent: [ - "name != \"test_span_event_name\"", - "attributes[\"test_event_attr_key_2\"] != \"test_event_attr_val_1\"", - ] - } - }, - { - name: and-policy-1, - type: and, - and: { - and_sub_policy: - [ - { - name: test-and-policy-1, - type: numeric_attribute, - numeric_attribute: { key: key1, min_value: 50, max_value: 100 } - }, - { - name: test-and-policy-2, - type: string_attribute, - string_attribute: { key: key2, values: [ value1, value2 ] } - }, - ] - } - }, - { - name: composite-policy-1, - type: composite, - composite: - { - max_total_spans_per_second: 1000, - policy_order: [test-composite-policy-1, test-composite-policy-2, test-composite-policy-3], - composite_sub_policy: - [ - { - name: test-composite-policy-1, - type: numeric_attribute, - numeric_attribute: {key: key1, min_value: 50, max_value: 100} - }, - { - name: test-composite-policy-2, - type: string_attribute, - string_attribute: {key: key2, values: [value1, value2]} - }, - { - name: test-composite-policy-3, - type: always_sample - } - ], - rate_allocation: - [ - { - policy: test-composite-policy-1, - percent: 50 - }, - { - policy: test-composite-policy-2, - percent: 25 - } - ] - } - }, - ] + ] attributes: actions: - key: db.table - action: delete - - key: redacted_span - value: true - action: upsert - - key: copy_key - from_attribute: key_original - action: update - - key: account_id - value: 2245 - action: insert - - key: account_password - action: delete - - key: account_email - action: hash - - key: http.status_code - action: convert - converted_type: int \ No newline at end of file + action: delete \ No newline at end of file From 59e7451d24573e935785a1c40c86dff3a18605d1 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Tue, 26 Mar 2024 14:56:31 -0400 Subject: [PATCH 14/83] Add support for changing over bookmark paths using the legacy bookmak path (#6653) * Add support for changing over bookmark paths using the legacy bookmark path * PR feedback * simplify --- CHANGELOG.md | 2 + .../components/loki.source.windowsevent.md | 30 +++++++------ .../loki/source/windowsevent/arguments.go | 1 + .../source/windowsevent/component_test.go | 44 +++++++++++++++++++ .../source/windowsevent/component_windows.go | 44 ++++++++++++++----- .../internal/build/windows_events.go | 2 +- .../testdata/windowsevents.river | 2 +- .../testdata/windowsevents_relabel.river | 2 +- .../staticconvert/testdata/sanitize.river | 4 +- 9 files changed, 101 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9e5763e426d..02d1b094fc09 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,8 @@ Main (unreleased) - Increased the alert interval and renamed the `ClusterSplitBrain` alert to `ClusterNodeCountMismatch` in the Grafana Agent Mixin to better match the alert conditions. (@thampiotr) +- Add conversion from static to flow mode for `loki.source.windowsevent` via `legacy_bookmark_path`. (@mattdurham) + ### Features - Added a new CLI flag `--stability.level` which defines the minimum stability diff --git a/docs/sources/flow/reference/components/loki.source.windowsevent.md b/docs/sources/flow/reference/components/loki.source.windowsevent.md index 522e9e683e54..58bc3431bd9b 100644 --- a/docs/sources/flow/reference/components/loki.source.windowsevent.md +++ b/docs/sources/flow/reference/components/loki.source.windowsevent.md @@ -32,19 +32,20 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.windowsevent` supports the following arguments: -Name | Type | Description | Default | Required ------------------------- |----------------------|--------------------------------------------------------------------------------|----------------------------| -------- -`locale` | `number` | Locale ID for event rendering. 0 default is Windows Locale. | `0` | no -`eventlog_name` | `string` | Event log to read from. | | See below. -`xpath_query` | `string` | Event log to read from. | `"*"` | See below. -`bookmark_path` | `string` | Keeps position in event log. | `"DATA_PATH/bookmark.xml"` | no -`poll_interval` | `duration` | How often to poll the event log. | `"3s"` | no -`exclude_event_data` | `bool` | Exclude event data. | `false` | no -`exclude_user_data` | `bool` | Exclude user data. | `false` | no -`exclude_event_message` | `bool` | Exclude the human-friendly event message. | `false` | no -`use_incoming_timestamp` | `bool` | When false, assigns the current timestamp to the log when it was processed. | `false` | no -`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes -`labels` | `map(string)` | The labels to associate with incoming logs. | | no +Name | Type | Description | Default | Required +------------------------ |----------------------|-----------------------------------------------------------------------------|----------------------------| -------- +`locale` | `number` | Locale ID for event rendering. 0 default is Windows Locale. | `0` | no +`eventlog_name` | `string` | Event log to read from. | | See below. +`xpath_query` | `string` | Event log to read from. | `"*"` | See below. +`bookmark_path` | `string` | Keeps position in event log. | `"DATA_PATH/bookmark.xml"` | no +`poll_interval` | `duration` | How often to poll the event log. | `"3s"` | no +`exclude_event_data` | `bool` | Exclude event data. | `false` | no +`exclude_user_data` | `bool` | Exclude user data. | `false` | no +`exclude_event_message` | `bool` | Exclude the human-friendly event message. | `false` | no +`use_incoming_timestamp` | `bool` | When false, assigns the current timestamp to the log when it was processed. | `false` | no +`forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes +`labels` | `map(string)` | The labels to associate with incoming logs. | | no +`legacy_bookmark_path` | `string` | The location of the Grafana Agent Static bookmark path. | `` | no > **NOTE**: `eventlog_name` is required if `xpath_query` does not specify the event log. @@ -52,6 +53,9 @@ Name | Type | Description > When using the XML form you can specify `event_log` in the `xpath_query`. > If using short form, you must define `eventlog_name`. +{{< admonition type="note" >}} +`legacy_bookmark_path` is used to convert the Grafana Agent Static to a {{< param "PRODUCT_NAME" >}} bookmark, if `bookmark_path` does not exist. +{{< /admonition >}} ## Component health diff --git a/internal/component/loki/source/windowsevent/arguments.go b/internal/component/loki/source/windowsevent/arguments.go index 8c26ed1e1ee3..61e6e6956fc8 100644 --- a/internal/component/loki/source/windowsevent/arguments.go +++ b/internal/component/loki/source/windowsevent/arguments.go @@ -24,6 +24,7 @@ type Arguments struct { UseIncomingTimestamp bool `river:"use_incoming_timestamp,attr,optional"` ForwardTo []loki.LogsReceiver `river:"forward_to,attr"` Labels map[string]string `river:"labels,attr,optional"` + LegacyBookmarkPath string `river:"legacy_bookmark_path,attr,optional"` } func defaultArgs() Arguments { diff --git a/internal/component/loki/source/windowsevent/component_test.go b/internal/component/loki/source/windowsevent/component_test.go index 49488e0313bd..f0559b91d0a3 100644 --- a/internal/component/loki/source/windowsevent/component_test.go +++ b/internal/component/loki/source/windowsevent/component_test.go @@ -4,6 +4,8 @@ package windowsevent import ( "context" + "os" + "path/filepath" "strings" "testing" "time" @@ -69,3 +71,45 @@ func TestEventLogger(t *testing.T) { cancelFunc() require.True(t, found) } + +func TestLegacyBookmarkConversion(t *testing.T) { + + bookmarkText := ` + +` + var loggerName = "agent_test" + //Setup Windows Event log with the log source name and logging levels + _ = eventlog.InstallAsEventCreate(loggerName, eventlog.Info|eventlog.Warning|eventlog.Error) + dataPath := t.TempDir() + legacyPath := filepath.Join(t.TempDir(), "legacy.xml") + err := os.WriteFile(legacyPath, []byte(bookmarkText), 644) + require.NoError(t, err) + rec := loki.NewLogsReceiver() + c, err := New(component.Options{ + ID: "loki.source.windowsevent.test", + Logger: util.TestFlowLogger(t), + DataPath: dataPath, + OnStateChange: func(e component.Exports) { + + }, + Registerer: prometheus.DefaultRegisterer, + Tracer: nil, + }, Arguments{ + Locale: 0, + EventLogName: "Application", + XPathQuery: "*", + BookmarkPath: "", + PollInterval: 10 * time.Millisecond, + ExcludeEventData: false, + ExcludeUserdata: false, + ExcludeEventMessage: false, + UseIncomingTimestamp: false, + ForwardTo: []loki.LogsReceiver{rec}, + Labels: map[string]string{"job": "windows"}, + LegacyBookmarkPath: legacyPath, + }) + require.NoError(t, err) + dd, _ := os.ReadFile(c.args.BookmarkPath) + // The New function will convert via calling update. + require.True(t, string(dd) == bookmarkText) +} diff --git a/internal/component/loki/source/windowsevent/component_windows.go b/internal/component/loki/source/windowsevent/component_windows.go index 58a4a6223417..e94fcd6a2a58 100644 --- a/internal/component/loki/source/windowsevent/component_windows.go +++ b/internal/component/loki/source/windowsevent/component_windows.go @@ -110,18 +110,9 @@ func (c *Component) Update(args component.Arguments) error { newArgs.BookmarkPath = path.Join(c.opts.DataPath, "bookmark.xml") } - // Create the bookmark file and parent folders if they don't exist. - _, err := os.Stat(newArgs.BookmarkPath) - if os.IsNotExist(err) { - err := os.MkdirAll(path.Dir(newArgs.BookmarkPath), 644) - if err != nil { - return err - } - f, err := os.Create(newArgs.BookmarkPath) - if err != nil { - return err - } - _ = f.Close() + err := createBookmark(newArgs) + if err != nil { + return err } winTarget, err := NewTarget(c.opts.Logger, c.handle, nil, convertConfig(newArgs)) @@ -142,6 +133,35 @@ func (c *Component) Update(args component.Arguments) error { return nil } +// createBookmark will create bookmark for saving the positions file. +// If LegacyBookMark is specified and the BookmarkPath doesnt exist it will copy over the legacy bookmark to the new path. +func createBookmark(args Arguments) error { + _, err := os.Stat(args.BookmarkPath) + // If the bookmark path does not exist then we should check to see if + if os.IsNotExist(err) { + err = os.MkdirAll(path.Dir(args.BookmarkPath), 644) + if err != nil { + return err + } + // Check to see if we need to convert the legacy bookmark to a new one. + // This will only trigger if the new bookmark path does not exist and legacy does. + _, legacyErr := os.Stat(args.LegacyBookmarkPath) + if legacyErr == nil { + bb, readErr := os.ReadFile(args.LegacyBookmarkPath) + if readErr == nil { + _ = os.WriteFile(args.BookmarkPath, bb, 644) + } + } else { + f, err := os.Create(args.BookmarkPath) + if err != nil { + return err + } + _ = f.Close() + } + } + return nil +} + func convertConfig(arg Arguments) *scrapeconfig.WindowsEventsTargetConfig { return &scrapeconfig.WindowsEventsTargetConfig{ Locale: uint32(arg.Locale), diff --git a/internal/converter/internal/promtailconvert/internal/build/windows_events.go b/internal/converter/internal/promtailconvert/internal/build/windows_events.go index e1784535b3fe..d67113d17150 100644 --- a/internal/converter/internal/promtailconvert/internal/build/windows_events.go +++ b/internal/converter/internal/promtailconvert/internal/build/windows_events.go @@ -15,7 +15,7 @@ func (s *ScrapeConfigBuilder) AppendWindowsEventsConfig() { Locale: int(winCfg.Locale), EventLogName: winCfg.EventlogName, XPathQuery: winCfg.Query, - BookmarkPath: winCfg.BookmarkPath, + LegacyBookmarkPath: winCfg.BookmarkPath, PollInterval: winCfg.PollInterval, ExcludeEventData: winCfg.ExcludeEventData, ExcludeUserdata: winCfg.ExcludeUserData, diff --git a/internal/converter/internal/promtailconvert/testdata/windowsevents.river b/internal/converter/internal/promtailconvert/testdata/windowsevents.river index 640ef93d8080..2aef42887759 100644 --- a/internal/converter/internal/promtailconvert/testdata/windowsevents.river +++ b/internal/converter/internal/promtailconvert/testdata/windowsevents.river @@ -2,7 +2,6 @@ loki.source.windowsevent "fun" { locale = 1033 eventlog_name = "Application" xpath_query = "Event/System[EventID=1000]" - bookmark_path = "C:/Users/username/Desktop/bookmark.txt" poll_interval = "10s" exclude_event_data = true exclude_user_data = true @@ -13,6 +12,7 @@ loki.source.windowsevent "fun" { host = "localhost", job = "windows", } + legacy_bookmark_path = "C:/Users/username/Desktop/bookmark.txt" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/windowsevents_relabel.river b/internal/converter/internal/promtailconvert/testdata/windowsevents_relabel.river index 39d28dea7a67..c35203273fc5 100644 --- a/internal/converter/internal/promtailconvert/testdata/windowsevents_relabel.river +++ b/internal/converter/internal/promtailconvert/testdata/windowsevents_relabel.river @@ -11,7 +11,6 @@ loki.source.windowsevent "fun" { locale = 1033 eventlog_name = "Application" xpath_query = "Event/System[EventID=1000]" - bookmark_path = "C:/Users/username/Desktop/bookmark.txt" poll_interval = "10s" exclude_event_data = true exclude_user_data = true @@ -19,6 +18,7 @@ loki.source.windowsevent "fun" { use_incoming_timestamp = true forward_to = [loki.relabel.fun.receiver] labels = {} + legacy_bookmark_path = "C:/Users/username/Desktop/bookmark.txt" } loki.write "default" { diff --git a/internal/converter/internal/staticconvert/testdata/sanitize.river b/internal/converter/internal/staticconvert/testdata/sanitize.river index 7b2bf9ef8ffb..193657f9a2c6 100644 --- a/internal/converter/internal/staticconvert/testdata/sanitize.river +++ b/internal/converter/internal/staticconvert/testdata/sanitize.river @@ -42,13 +42,13 @@ loki.relabel "logs_integrations_integrations_windows_exporter_application" { loki.source.windowsevent "logs_integrations_integrations_windows_exporter_application" { eventlog_name = "Application" xpath_query = "" - bookmark_path = "C:\\grafana_test\\Grafana Agent\\bookmarks.xml" poll_interval = "0s" use_incoming_timestamp = true forward_to = [loki.relabel.logs_integrations_integrations_windows_exporter_application.receiver] labels = { job = "integrations/windows_exporter", } + legacy_bookmark_path = "C:\\grafana_test\\Grafana Agent\\bookmarks.xml" } loki.process "logs_integrations_integrations_windows_exporter_system" { @@ -79,13 +79,13 @@ loki.relabel "logs_integrations_integrations_windows_exporter_system" { loki.source.windowsevent "logs_integrations_integrations_windows_exporter_system" { eventlog_name = "System" xpath_query = "" - bookmark_path = "C:\\grafana_test\\Grafana Agent\\bookmarks.xml" poll_interval = "0s" use_incoming_timestamp = true forward_to = [loki.relabel.logs_integrations_integrations_windows_exporter_system.receiver] labels = { job = "integrations/windows_exporter", } + legacy_bookmark_path = "C:\\grafana_test\\Grafana Agent\\bookmarks.xml" } loki.write "logs_integrations" { From e908a2f1a3d58206b6688c08b8e19eced7abf38e Mon Sep 17 00:00:00 2001 From: mattdurham Date: Wed, 27 Mar 2024 05:07:53 -0400 Subject: [PATCH 15/83] Finish otel upgrade (#6772) * handle otel feedback * minor documentation and test updates for updating otel to 0.96.0 * remove todos --- CHANGELOG.md | 2 +- .../components/otelcol.connector.spanmetrics.md | 4 ++-- .../components/otelcol.exporter.otlphttp.md | 4 +++- .../reference/components/extract-field-block.md | 14 +++++++------- .../otelcol/exporter/otlphttp/otlphttp.go | 3 ++- internal/static/traces/config.go | 10 +++------- internal/static/traces/config_test.go | 12 ++---------- 7 files changed, 20 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 02d1b094fc09..575b1eee41b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,7 +85,7 @@ https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/26115 * [otelcol.connector.spanmetrics] Add a new `events` metric. https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27451 * [otelcol.connector.spanmetrics] A new `max_per_data_point` argument for exemplar generation. - * https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/22620 + * https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29242 * [ottl] Add IsBool Converter https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27897 * [otelcol.processor.tail_sampling] Optimize memory performance of tailsamplingprocessor diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md index 2ef2ff67ab39..bfbcec6a129a 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md @@ -75,7 +75,7 @@ otelcol.connector.spanmetrics "LABEL" { | `metrics_flush_interval` | `duration` | How often to flush generated metrics. | `"15s"` | no | | `namespace` | `string` | Metric namespace. | `""` | no | | `resource_metrics_cache_size` | `number` | The size of the cache holding metrics for a service. | `1000` | no | -| `resource_metrics_key_attributes` | `list(string)` | Span resources with the same values for those resource attributes are aggregated together. | `[]` | no | +| `resource_metrics_key_attributes` | `list(string)` | Limits the resource attributes used to create the metrics. | `[]` | no | Adjusting `dimensions_cache_size` can improve the Agent process' memory usage. @@ -808,4 +808,4 @@ Connecting some components may not be sensible or components may require further Refer to the linked documentation for more details. {{< /admonition >}} - \ No newline at end of file + diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md index e0d4dda49477..bf743e547912 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md @@ -100,6 +100,8 @@ Before enabling this option, consider whether changes to idle connection setting If `http2_ping_timeout` is unset or set to `0s`, it will default to `15s`. +If `http2_read_idle_timeout` is unset or set to `0s`, then no health check will be performed. + {{< docs/shared lookup="flow/reference/components/otelcol-compression-field.md" source="agent" version="" >}} ### tls block @@ -177,4 +179,4 @@ Connecting some components may not be sensible or components may require further Refer to the linked documentation for more details. {{< /admonition >}} - \ No newline at end of file + diff --git a/docs/sources/shared/flow/reference/components/extract-field-block.md b/docs/sources/shared/flow/reference/components/extract-field-block.md index 207f2bc6053d..51ae70ed2a63 100644 --- a/docs/sources/shared/flow/reference/components/extract-field-block.md +++ b/docs/sources/shared/flow/reference/components/extract-field-block.md @@ -12,13 +12,13 @@ headless: true The following attributes are supported: -Name | Type | Description | Default | Required -------------|----------|----------------------------------------------------------------------------------------|---------|--------- -`from` | `string` | The source of the labels or annotations. Allowed values are `pod` and `namespace`. | `pod` | no -`key_regex` | `string` | A regular expression used to extract a key that matches the regular expression. | `""` | no -`key` | `string` | The annotation or label name. This key must exactly match an annotation or label name. | `""` | no -`regex` | `string` | An optional field used to extract a sub-string from a complex field value. | `""` | no -`tag_name` | `string` | The name of the resource attribute added to logs, metrics, or spans. | `""` | no +Name | Type | Description | Default | Required +------------|----------|-----------------------------------------------------------------------------------------------|---------|--------- +`from` | `string` | The source of the labels or annotations. Allowed values are `pod`, `namespace`, and `node`. | `pod` | no +`key_regex` | `string` | A regular expression used to extract a key that matches the regular expression. | `""` | no +`key` | `string` | The annotation or label name. This key must exactly match an annotation or label name. | `""` | no +`regex` | `string` | An optional field used to extract a sub-string from a complex field value. | `""` | no +`tag_name` | `string` | The name of the resource attribute added to logs, metrics, or spans. | `""` | no When you don't specify the `tag_name`, a default tag name is used with the format: * `k8s.pod.annotations.` diff --git a/internal/component/otelcol/exporter/otlphttp/otlphttp.go b/internal/component/otelcol/exporter/otlphttp/otlphttp.go index 66292554068d..667bc394ab4d 100644 --- a/internal/component/otelcol/exporter/otlphttp/otlphttp.go +++ b/internal/component/otelcol/exporter/otlphttp/otlphttp.go @@ -3,6 +3,7 @@ package otlphttp import ( "errors" + "fmt" "time" "github.com/grafana/agent/internal/component" @@ -100,7 +101,7 @@ func (args *Arguments) Validate() error { return errors.New("at least one endpoint must be specified") } if args.Encoding != EncodingProto && args.Encoding != EncodingJSON { - return errors.New("invalid encoding type") + return fmt.Errorf("invalid encoding type %s", args.Encoding) } return nil } diff --git a/internal/static/traces/config.go b/internal/static/traces/config.go index 80de470ed812..93364ea5ee37 100644 --- a/internal/static/traces/config.go +++ b/internal/static/traces/config.go @@ -168,9 +168,7 @@ type InstanceConfig struct { // Hides the value of the string during marshaling. type SecretString string -var ( - _ yaml.Marshaler = (*SecretString)(nil) -) +var _ yaml.Marshaler = (*SecretString)(nil) // MarshalYAML implements yaml.Marshaler. func (s SecretString) MarshalYAML() (interface{}, error) { @@ -182,9 +180,7 @@ func (s SecretString) MarshalYAML() (interface{}, error) { // ReceiverMap will marshal as YAML to the text "". type JaegerRemoteSamplingConfig map[string]interface{} -var ( - _ yaml.Marshaler = (*JaegerRemoteSamplingConfig)(nil) -) +var _ yaml.Marshaler = (*JaegerRemoteSamplingConfig)(nil) // MarshalYAML implements yaml.Marshaler. func (jrsm JaegerRemoteSamplingConfig) MarshalYAML() (interface{}, error) { @@ -1024,7 +1020,7 @@ func otelcolConfigFromStringMap(otelMapStructure map[string]interface{}, factori func validateConfigFromFactories(factories otelcol.Factories) error { var errs error - //TODO: We should not use componenttest in non-test code + // TODO: We should not use componenttest in non-test code for _, factory := range factories.Receivers { errs = multierr.Append(errs, componenttest.CheckConfigStruct(factory.CreateDefaultConfig())) } diff --git a/internal/static/traces/config_test.go b/internal/static/traces/config_test.go index 687eaf30de58..b7b2af4946e0 100644 --- a/internal/static/traces/config_test.go +++ b/internal/static/traces/config_test.go @@ -1159,8 +1159,8 @@ remote_write: scopes: ["api.metrics"] timeout: 2s `, - //TODO(ptodev): Look into why we need to add a "cipher_suites: []" explicitly. - // The expected config should unmarshal it to [], but instead it sets it to nil. + // The tls.ciphersuites would appear that it should output nothing or nil but during the conversion otelcolConfigFromStringMap the otelcol.Unmarshal converts the nil array to a blank one []. This is specifically used + // in confmap.go that transforms nil arrays into [] on the zeroSliceHookFunc. expectedConfig: ` receivers: push_receiver: {} @@ -1221,8 +1221,6 @@ remote_write: min_version: 1.3 reload_interval: 1h `, - //TODO(ptodev): Look into why we need to add a "cipher_suites: []" explicitly. - // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} @@ -1292,8 +1290,6 @@ remote_write: max_version: 1.2 reload_interval: 1h `, - //TODO: Look into why we need to add a "cipher_suites: []" explicitly. - // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} @@ -1361,8 +1357,6 @@ remote_write: scopes: ["api.metrics"] timeout: 2s `, - //TODO: Look into why we need to add a "cipher_suites: []" explicitly. - // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} @@ -1431,8 +1425,6 @@ remote_write: tls: insecure: true `, - //TODO: Look into why we need to add a "cipher_suites: []" explicitly. - // The expected config should unmarshal it to [], but instead it sets it to nil. expectedConfig: ` receivers: push_receiver: {} From ef2f5f368cef313931630bc053de49abd3300b16 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 27 Mar 2024 16:03:19 +0200 Subject: [PATCH 16/83] otelcolconvert: support converting servicegraph connector (#6681) * otelcolconvert: support converting servicegraph connector Signed-off-by: Paschalis Tsilias * fix for otel convert connectors Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --------- Signed-off-by: Paschalis Tsilias Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> Co-authored-by: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> --- .../converter_servicegraphconnector.go | 67 +++++++++++++++++++ .../internal/otelcolconvert/pipeline_group.go | 19 ++++++ .../testdata/inconsistent_processor.river | 4 +- .../testdata/servicegraph.river | 37 ++++++++++ .../otelcolconvert/testdata/servicegraph.yaml | 35 ++++++++++ .../otelcolconvert/testdata/spanmetrics.river | 2 +- .../testdata/spanmetrics_full.river | 2 +- 7 files changed, 162 insertions(+), 4 deletions(-) create mode 100644 internal/converter/internal/otelcolconvert/converter_servicegraphconnector.go create mode 100644 internal/converter/internal/otelcolconvert/testdata/servicegraph.river create mode 100644 internal/converter/internal/otelcolconvert/testdata/servicegraph.yaml diff --git a/internal/converter/internal/otelcolconvert/converter_servicegraphconnector.go b/internal/converter/internal/otelcolconvert/converter_servicegraphconnector.go new file mode 100644 index 000000000000..1211e6184347 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_servicegraphconnector.go @@ -0,0 +1,67 @@ +package otelcolconvert + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/agent/internal/component/otelcol/connector/servicegraph" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/servicegraphconnector" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, servicegraphConnectorConverter{}) +} + +type servicegraphConnectorConverter struct{} + +func (servicegraphConnectorConverter) Factory() component.Factory { + return servicegraphconnector.NewFactory() +} + +func (servicegraphConnectorConverter) InputComponentName() string { + return "otelcol.connector.servicegraph" +} + +func (servicegraphConnectorConverter) ConvertAndAppend(state *State, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toServicegraphConnector(state, id, cfg.(*servicegraphconnector.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "connector", "servicegraph"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", StringifyInstanceID(id), StringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toServicegraphConnector(state *State, id component.InstanceID, cfg *servicegraphconnector.Config) *servicegraph.Arguments { + if cfg == nil { + return nil + } + var ( + nextMetrics = state.Next(id, component.DataTypeMetrics) + ) + + return &servicegraph.Arguments{ + LatencyHistogramBuckets: cfg.LatencyHistogramBuckets, + Dimensions: cfg.Dimensions, + Store: servicegraph.StoreConfig{ + MaxItems: cfg.Store.MaxItems, + TTL: cfg.Store.TTL, + }, + CacheLoop: cfg.CacheLoop, + StoreExpirationLoop: cfg.StoreExpirationLoop, + MetricsFlushInterval: cfg.MetricsFlushInterval, + Output: &otelcol.ConsumerArguments{ + Metrics: ToTokenizedConsumers(nextMetrics), + }, + } +} diff --git a/internal/converter/internal/otelcolconvert/pipeline_group.go b/internal/converter/internal/otelcolconvert/pipeline_group.go index 3c6f278aad38..b1b185fecbe3 100644 --- a/internal/converter/internal/otelcolconvert/pipeline_group.go +++ b/internal/converter/internal/otelcolconvert/pipeline_group.go @@ -175,6 +175,11 @@ func (group pipelineGroup) NextTraces(fromID component.InstanceID) []component.I func nextInPipeline(pipeline *pipelines.PipelineConfig, fromID component.InstanceID) []component.InstanceID { switch fromID.Kind { case component.KindReceiver, component.KindConnector: + // Validate this receiver is part of the pipeline. + if !findInComponentIds(fromID, pipeline.Receivers) { + return nil + } + // Receivers and connectors should either send to the first processor // if one exists or to every exporter otherwise. if len(pipeline.Processors) > 0 { @@ -183,6 +188,11 @@ func nextInPipeline(pipeline *pipelines.PipelineConfig, fromID component.Instanc return toComponentInstanceIDs(component.KindExporter, pipeline.Exporters) case component.KindProcessor: + // Validate this processor is part of the pipeline. + if !findInComponentIds(fromID, pipeline.Processors) { + return nil + } + // Processors should send to the next processor if one exists or to every // exporter otherwise. processorIndex := slices.Index(pipeline.Processors, fromID.ID) @@ -217,3 +227,12 @@ func toComponentInstanceIDs(kind component.Kind, ids []component.ID) []component return res } + +func findInComponentIds(fromID component.InstanceID, componentIDs []component.ID) bool { + for _, id := range componentIDs { + if fromID.ID == id { + return true + } + } + return false +} diff --git a/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.river b/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.river index 141cfb77953f..199156c6ac43 100644 --- a/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.river +++ b/internal/converter/internal/otelcolconvert/testdata/inconsistent_processor.river @@ -12,9 +12,9 @@ otelcol.receiver.otlp "default" { otelcol.processor.batch "default" { output { - metrics = [otelcol.exporter.otlp.default.input] + metrics = [] logs = [otelcol.exporter.otlp.default.input] - traces = [otelcol.exporter.otlp.default.input] + traces = [] } } diff --git a/internal/converter/internal/otelcolconvert/testdata/servicegraph.river b/internal/converter/internal/otelcolconvert/testdata/servicegraph.river new file mode 100644 index 000000000000..96aeccfaff97 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/servicegraph.river @@ -0,0 +1,37 @@ +otelcol.receiver.otlp "default" { + grpc { } + + http { } + + output { + metrics = [] + logs = [] + traces = [otelcol.connector.servicegraph.default.input] + } +} + +otelcol.exporter.otlp "default" { + sending_queue { + queue_size = 5000 + } + + client { + endpoint = "database:4317" + } +} + +otelcol.connector.servicegraph "default" { + latency_histogram_buckets = ["100ms", "250ms", "1s", "5s", "10s"] + dimensions = ["dimension-1", "dimension-2"] + + store { + max_items = 10 + ttl = "1s" + } + cache_loop = "2m0s" + store_expiration_loop = "5s" + + output { + metrics = [otelcol.exporter.otlp.default.input] + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/servicegraph.yaml b/internal/converter/internal/otelcolconvert/testdata/servicegraph.yaml new file mode 100644 index 000000000000..f033a9022d6f --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/servicegraph.yaml @@ -0,0 +1,35 @@ +receivers: + otlp: + protocols: + grpc: + http: + +connectors: + servicegraph: + latency_histogram_buckets: [100ms, 250ms, 1s, 5s, 10s] + dimensions: + - dimension-1 + - dimension-2 + store: + ttl: 1s + max_items: 10 + cache_loop: 2m + store_expiration_loop: 5s + +exporters: + otlp: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:4317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +service: + pipelines: + traces: + receivers: [otlp] + exporters: [servicegraph] + metrics: + receivers: [servicegraph] + exporters: [otlp] diff --git a/internal/converter/internal/otelcolconvert/testdata/spanmetrics.river b/internal/converter/internal/otelcolconvert/testdata/spanmetrics.river index 655a859f8e92..7cb422b5b096 100644 --- a/internal/converter/internal/otelcolconvert/testdata/spanmetrics.river +++ b/internal/converter/internal/otelcolconvert/testdata/spanmetrics.river @@ -4,7 +4,7 @@ otelcol.receiver.otlp "default" { http { } output { - metrics = [otelcol.exporter.otlp.default.input] + metrics = [] logs = [otelcol.exporter.otlp.default.input] traces = [otelcol.connector.spanmetrics.default.input] } diff --git a/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.river b/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.river index ad20c4553708..f2806e3d12df 100644 --- a/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.river +++ b/internal/converter/internal/otelcolconvert/testdata/spanmetrics_full.river @@ -4,7 +4,7 @@ otelcol.receiver.otlp "default_traces" { http { } output { - metrics = [otelcol.exporter.otlp.default_metrics_backend.input] + metrics = [] logs = [] traces = [otelcol.exporter.otlp.default_traces_backend.input, otelcol.connector.spanmetrics.default.input] } From 5e037ebddd871e36e8a619b141e5e4987f13f954 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Wed, 27 Mar 2024 10:14:54 -0400 Subject: [PATCH 17/83] Add initial support for converting static mode positions file. (#6765) * Add initial support for converting static mode positions file. * add more doc * clarify docs * remove extra text * add full unit test * fix linting * fix linting * Update docs/sources/flow/reference/components/loki.source.file.md Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> * Update docs/sources/flow/reference/components/loki.source.file.md Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> * instead of using os.temp use the test harness * fix go.mod issue * pr feedback * pr feedback * fix spelling mistake * osx hates cleaning up files --------- Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> --- CHANGELOG.md | 2 + .../reference/components/loki.source.file.md | 22 ++- .../common/loki/positions/positions.go | 63 +++++++ .../common/loki/positions/positions_test.go | 80 +++++++++ internal/component/loki/source/file/file.go | 14 +- .../loki/source/file/legacy_file_test.go | 165 ++++++++++++++++++ 6 files changed, 335 insertions(+), 11 deletions(-) create mode 100644 internal/component/loki/source/file/legacy_file_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 575b1eee41b1..b544365fef3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,8 @@ Main (unreleased) - Add conversion from static to flow mode for `loki.source.windowsevent` via `legacy_bookmark_path`. (@mattdurham) +- Add ability to convert static mode positions file to `loki.source.file` compatible via `legacy_positions_file` argument. (@mattdurham) + ### Features - Added a new CLI flag `--stability.level` which defines the minimum stability diff --git a/docs/sources/flow/reference/components/loki.source.file.md b/docs/sources/flow/reference/components/loki.source.file.md index 683b66cabfc4..8fe8354850bf 100644 --- a/docs/sources/flow/reference/components/loki.source.file.md +++ b/docs/sources/flow/reference/components/loki.source.file.md @@ -37,12 +37,13 @@ log entries to the list of receivers passed in `forward_to`. `loki.source.file` supports the following arguments: -| Name | Type | Description | Default | Required | -| --------------- | -------------------- | ----------------------------------------------------------------------------------- | ------- | -------- | -| `targets` | `list(map(string))` | List of files to read from. | | yes | -| `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes | -| `encoding` | `string` | The encoding to convert from when reading files. | `""` | no | -| `tail_from_end` | `bool` | Whether a log file should be tailed from the end if a stored position is not found. | `false` | no | +| Name | Type | Description | Default | Required | +| ------------------------| -------------------- | ----------------------------------------------------------------------------------- | ------- | -------- | +| `targets` | `list(map(string))` | List of files to read from. | | yes | +| `forward_to` | `list(LogsReceiver)` | List of receivers to send log entries to. | | yes | +| `encoding` | `string` | The encoding to convert from when reading files. | `""` | no | +| `tail_from_end` | `bool` | Whether a log file should be tailed from the end if a stored position is not found. | `false` | no | +| `legacy_positions_file` | `string` | Allows conversion from legacy positions file. | `""` | no | The `encoding` argument must be a valid [IANA encoding][] name. If not set, it defaults to UTF-8. @@ -50,6 +51,15 @@ defaults to UTF-8. You can use the `tail_from_end` argument when you want to tail a large file without reading its entire content. When set to true, only new logs will be read, ignoring the existing ones. + +{{< admonition type="note" >}} +The `legacy_positions_file` argument is used when you are transitioning from legacy. The legacy positions file will be rewritten into the new format. +This operation will only occur if the new positions file does not exist and the `legacy_positions_file` is valid. +Once converted successfully, the `legacy_positions_file` will be deleted. +If you add any labels before `loki.source.file`, then the positions file will conversion will not work. +The legacy positions file did not have a concept of labels in the positions file, so the conversion assumes no labels. +{{< /admonition >}} + ## Blocks The following blocks are supported inside the definition of `loki.source.file`: diff --git a/internal/component/common/loki/positions/positions.go b/internal/component/common/loki/positions/positions.go index 0201d39e4748..8733cbe06a8d 100644 --- a/internal/component/common/loki/positions/positions.go +++ b/internal/component/common/loki/positions/positions.go @@ -96,6 +96,69 @@ type Positions interface { Stop() } +// LegacyFile is the copied struct for the static mode positions file. +type LegacyFile struct { + Positions map[string]string `yaml:"positions"` +} + +// ConvertLegacyPositionsFile will convert the legacy positions file to the new format if: +// 1. There is no file at the newpath +// 2. There is a file at the legacy path and that it is valid yaml +// If all the above is true then the legacy file will be deleted. +func ConvertLegacyPositionsFile(legacyPath, newPath string, l log.Logger) { + legacyPositions := readLegacyFile(legacyPath, l) + // LegacyPositions did not exist or was invalid so return. + if legacyPositions == nil { + return + } + fi, err := os.Stat(newPath) + // If the newpath exists, then don't convert. + if err == nil && fi.Size() > 0 { + level.Info(l).Log("msg", "new positions file already exists", "path", newPath) + return + } + + newPositions := make(map[Entry]string) + for k, v := range legacyPositions.Positions { + newPositions[Entry{ + Path: k, + // This is a map of labels but must be an empty map since that is what the new positions expects. + Labels: "{}", + }] = v + } + // After conversion remove the file. + err = writePositionFile(newPath, newPositions) + if err != nil { + level.Error(l).Log("msg", "error writing new positions file from legacy", "path", newPath, "error", err) + } + + // Finally remove the old path. + _ = os.Remove(legacyPath) +} + +func readLegacyFile(legacyPath string, l log.Logger) *LegacyFile { + oldFile, err := os.Stat(legacyPath) + // If the old file doesn't exist or is empty then return early. + if err != nil || oldFile.Size() == 0 { + level.Info(l).Log("msg", "no legacy positions file found", "path", legacyPath) + return nil + } + // Try to read and parse the legacy file. + clean := filepath.Clean(legacyPath) + buf, err := os.ReadFile(clean) + if err != nil { + level.Error(l).Log("msg", "error reading legacy positions file", "path", clean, "error", err) + return nil + } + legacyPositions := &LegacyFile{} + err = yaml.UnmarshalStrict(buf, legacyPositions) + if err != nil { + level.Error(l).Log("msg", "error parsing legacy positions file", "path", clean, "error", err) + return nil + } + return legacyPositions +} + // New makes a new Positions. func New(logger log.Logger, cfg Config) (Positions, error) { positionData, err := readPositionsFile(cfg, logger) diff --git a/internal/component/common/loki/positions/positions_test.go b/internal/component/common/loki/positions/positions_test.go index ab62cfcc5fd8..0357e618a500 100644 --- a/internal/component/common/loki/positions/positions_test.go +++ b/internal/component/common/loki/positions/positions_test.go @@ -6,12 +6,14 @@ package positions import ( "os" + "path/filepath" "strings" "testing" "time" "github.com/go-kit/log" "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" util_log "github.com/grafana/loki/pkg/util/log" ) @@ -37,6 +39,84 @@ func tempFilename(t *testing.T) string { return name } +func writeLegacy(t *testing.T, tmpDir string) string { + legacy := filepath.Join(tmpDir, "legacy") + legacyPositions := LegacyFile{ + Positions: make(map[string]string), + } + // Filename and byte offset + legacyPositions.Positions["/tmp/random.log"] = "17623" + buf, err := yaml.Marshal(legacyPositions) + require.NoError(t, err) + err = os.WriteFile(legacy, buf, 0644) + require.NoError(t, err) + return legacy +} + +func TestLegacyConversion(t *testing.T) { + tmpDir := t.TempDir() + legacy := writeLegacy(t, tmpDir) + positionsPath := filepath.Join(tmpDir, "positions") + ConvertLegacyPositionsFile(legacy, positionsPath, log.NewNopLogger()) + ps, err := readPositionsFile(Config{ + PositionsFile: positionsPath, + }, log.NewNopLogger()) + require.NoError(t, err) + require.Len(t, ps, 1) + for k, v := range ps { + require.True(t, k.Path == "/tmp/random.log") + require.True(t, v == "17623") + } + // Ensure old file is deleted. + _, err = os.Stat(legacy) + require.True(t, os.IsNotExist(err)) +} + +func TestLegacyConversionWithNewFile(t *testing.T) { + tmpDir := t.TempDir() + legacy := writeLegacy(t, tmpDir) + // Write a new file. + positionsPath := filepath.Join(tmpDir, "positions") + err := writePositionFile(positionsPath, map[Entry]string{ + {Path: "/tmp/newrandom.log", Labels: ""}: "100", + }) + require.NoError(t, err) + + // In this state nothing should be overwritten. + ConvertLegacyPositionsFile(legacy, positionsPath, log.NewNopLogger()) + ps, err := readPositionsFile(Config{ + PositionsFile: positionsPath, + }, log.NewNopLogger()) + require.NoError(t, err) + require.Len(t, ps, 1) + for k, v := range ps { + require.True(t, k.Path == "/tmp/newrandom.log") + require.True(t, v == "100") + } +} + +func TestLegacyConversionWithNoLegacyFile(t *testing.T) { + tmpDir := t.TempDir() + legacy := filepath.Join(tmpDir, "legacy") + positionsPath := filepath.Join(tmpDir, "positions") + // Write a new file. + err := writePositionFile(positionsPath, map[Entry]string{ + {Path: "/tmp/newrandom.log", Labels: ""}: "100", + }) + require.NoError(t, err) + + ConvertLegacyPositionsFile(legacy, positionsPath, log.NewNopLogger()) + ps, err := readPositionsFile(Config{ + PositionsFile: positionsPath, + }, log.NewNopLogger()) + require.NoError(t, err) + require.Len(t, ps, 1) + for k, v := range ps { + require.True(t, k.Path == "/tmp/newrandom.log") + require.True(t, v == "100") + } +} + func TestReadPositionsOK(t *testing.T) { temp := tempFilename(t) defer func() { diff --git a/internal/component/loki/source/file/file.go b/internal/component/loki/source/file/file.go index 63598afe86c1..311abcaa6c88 100644 --- a/internal/component/loki/source/file/file.go +++ b/internal/component/loki/source/file/file.go @@ -45,6 +45,7 @@ type Arguments struct { DecompressionConfig DecompressionConfig `river:"decompression,block,optional"` FileWatch FileWatch `river:"file_watch,block,optional"` TailFromEnd bool `river:"tail_from_end,attr,optional"` + LegacyPositionsFile string `river:"legacy_positions_file,attr,optional"` } type FileWatch struct { @@ -70,9 +71,7 @@ type DecompressionConfig struct { Format CompressionFormat `river:"format,attr"` } -var ( - _ component.Component = (*Component)(nil) -) +var _ component.Component = (*Component)(nil) // Component implements the loki.source.file component. type Component struct { @@ -95,9 +94,14 @@ func New(o component.Options, args Arguments) (*Component, error) { if err != nil && !os.IsExist(err) { return nil, err } + newPositionsPath := filepath.Join(o.DataPath, "positions.yml") + // Check to see if we can convert the legacy positions file to the new format. + if args.LegacyPositionsFile != "" { + positions.ConvertLegacyPositionsFile(args.LegacyPositionsFile, newPositionsPath, o.Logger) + } positionsFile, err := positions.New(o.Logger, positions.Config{ SyncPeriod: 10 * time.Second, - PositionsFile: filepath.Join(o.DataPath, "positions.yml"), + PositionsFile: newPositionsPath, IgnoreInvalidYaml: false, ReadOnly: false, }) @@ -197,7 +201,7 @@ func (c *Component) Update(args component.Arguments) error { for _, target := range newArgs.Targets { path := target[pathLabel] - var labels = make(model.LabelSet) + labels := make(model.LabelSet) for k, v := range target { if strings.HasPrefix(k, model.ReservedLabelPrefix) { continue diff --git a/internal/component/loki/source/file/legacy_file_test.go b/internal/component/loki/source/file/legacy_file_test.go new file mode 100644 index 000000000000..c73d46f432d7 --- /dev/null +++ b/internal/component/loki/source/file/legacy_file_test.go @@ -0,0 +1,165 @@ +//go:build linux + +package file + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/grafana/agent/internal/component/common/loki/positions" + "github.com/grafana/loki/pkg/loghttp/push" + "gopkg.in/yaml.v2" + + logkit "github.com/go-kit/log" + "github.com/grafana/agent/internal/component" + "github.com/grafana/agent/internal/component/common/loki" + "github.com/grafana/agent/internal/component/discovery" + "github.com/grafana/agent/internal/static/logs" + "github.com/grafana/agent/internal/util" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" +) + +// TestFullEndToEndLegacyConversion tests using static mode to tail a log, then converting and then writing new entries and ensuring +// the previously tailed data does not come through. +func TestFullEndToEndLegacyConversion(t *testing.T) { + // + // Create a temporary file to tail + // + positionsDir := t.TempDir() + tmpFileDir := t.TempDir() + + tmpFile, err := os.CreateTemp(tmpFileDir, "*.log") + require.NoError(t, err) + + // + // Listen for push requests and pass them through to a channel + // + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, lis.Close()) + }) + var written atomic.Bool + go func() { + _ = http.Serve(lis, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + _, err := push.ParseRequest(logkit.NewNopLogger(), "user_id", r, nil, nil, push.ParseLokiRequest) + require.NoError(t, err) + _, _ = rw.Write(nil) + written.Store(true) + })) + }() + + // + // Launch Loki so it starts tailing the file and writes to our server. + // + cfgText := util.Untab(fmt.Sprintf(` +positions_directory: %s +configs: +- name: default + clients: + - url: http://%s/loki/api/v1/push + batchwait: 50ms + batchsize: 1 + scrape_configs: + - job_name: system + static_configs: + - targets: [localhost] + labels: + job: test + __path__: %s + `, positionsDir, lis.Addr().String(), tmpFile.Name())) + + var cfg logs.Config + dec := yaml.NewDecoder(strings.NewReader(cfgText)) + dec.SetStrict(true) + require.NoError(t, dec.Decode(&cfg)) + require.NoError(t, cfg.ApplyDefaults()) + logger := logkit.NewNopLogger() + l, err := logs.New(prometheus.NewRegistry(), &cfg, logger, false) + require.NoError(t, err) + // + // Write a log line and wait for it to come through. + // + fmt.Fprintf(tmpFile, "Hello, world!\n") + // Ensure we have written and received the data. + require.Eventually(t, func() bool { + return written.Load() + }, 10*time.Second, 100*time.Millisecond) + + // Stop the tailer. + l.Stop() + + // Read the legacy file so we can ensure it is the same as the the new file logically. + oldPositions, err := os.ReadFile(filepath.Join(positionsDir, "default.yml")) + require.NoError(t, err) + + legacy := positions.LegacyFile{Positions: make(map[string]string)} + err = yaml.UnmarshalStrict(oldPositions, legacy) + require.NoError(t, err) + + opts := component.Options{ + Logger: util.TestFlowLogger(t), + Registerer: prometheus.NewRegistry(), + OnStateChange: func(e component.Exports) {}, + DataPath: t.TempDir(), + } + + // Create the Logs receiver component which will convert the legacy positions file into the new format. + ch1 := loki.NewLogsReceiver() + args := Arguments{} + args.LegacyPositionsFile = filepath.Join(positionsDir, "default.yml") + args.ForwardTo = []loki.LogsReceiver{ch1} + args.Targets = []discovery.Target{ + { + "__path__": tmpFile.Name(), + }, + } + + // New will do the actual conversion + c, err := New(opts, args) + require.NoError(t, err) + require.NotNil(t, c) + + // Before we actually start the component check to see if the legacy file and new file are logically the same. + buf, err := os.ReadFile(filepath.Join(opts.DataPath, "positions.yml")) + require.NoError(t, err) + newPositions := positions.File{Positions: make(map[positions.Entry]string)} + err = yaml.UnmarshalStrict(buf, newPositions) + require.NoError(t, err) + + require.Len(t, newPositions.Positions, 1) + require.Len(t, legacy.Positions, 1) + + for k, v := range newPositions.Positions { + val, found := legacy.Positions[k.Path] + require.True(t, found) + require.True(t, val == v) + } + + // Write some data, we should see this data but not old data. + fmt.Fprintf(tmpFile, "new thing!\n") + ctx := context.Background() + ctx, cncl := context.WithTimeout(ctx, 10*time.Second) + defer cncl() + go func() { + runErr := c.Run(ctx) + require.NoError(t, runErr) + }() + + // Check for the new data ensuring that we do not see the old data. + require.Eventually(t, func() bool { + entry := <-ch1.Chan() + // We don't want to reread the hello world so if something went wrong then the conversion didnt work. + require.False(t, strings.Contains(entry.Line, "Hello, world!")) + return strings.Contains(entry.Line, "new thing!") + }, 5*time.Second, 100*time.Millisecond) +} From 88780b8ee7ee74234ca5d86535a3ab1d553c098e Mon Sep 17 00:00:00 2001 From: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> Date: Fri, 29 Mar 2024 11:05:08 -0400 Subject: [PATCH 18/83] wire in the otel converter and create a migration guide (#6790) * wire in the otel converter and create a migration guide Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> --------- Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> --- CHANGELOG.md | 2 + docs/sources/flow/reference/cli/convert.md | 33 ++- docs/sources/flow/reference/cli/run.md | 2 +- .../flow/tasks/migrate/from-operator.md | 2 +- .../flow/tasks/migrate/from-otelcol.md | 274 ++++++++++++++++++ internal/converter/converter.go | 6 + 6 files changed, 310 insertions(+), 9 deletions(-) create mode 100644 docs/sources/flow/tasks/migrate/from-otelcol.md diff --git a/CHANGELOG.md b/CHANGELOG.md index b544365fef3a..2409fd3568bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,8 @@ Main (unreleased) - Add ability to convert static mode positions file to `loki.source.file` compatible via `legacy_positions_file` argument. (@mattdurham) +- Added support for `otelcol` configuration conversion in `grafana-agent convert` and `grafana-agent run` commands. (@rfratto, @erikbaranowski, @tpaschalis, @hainenber) + ### Features - Added a new CLI flag `--stability.level` which defines the minimum stability diff --git a/docs/sources/flow/reference/cli/convert.md b/docs/sources/flow/reference/cli/convert.md index 793acfb2031a..73761440eec4 100644 --- a/docs/sources/flow/reference/cli/convert.md +++ b/docs/sources/flow/reference/cli/convert.md @@ -48,12 +48,13 @@ The following flags are supported: * `--report`, `-r`: The filepath and filename where the report is written. -* `--source-format`, `-f`: Required. The format of the source file. Supported formats: [prometheus], [promtail], [static]. +* `--source-format`, `-f`: Required. The format of the source file. Supported formats: [otelcol], [prometheus], [promtail], [static]. * `--bypass-errors`, `-b`: Enable bypassing errors when converting. * `--extra-args`, `e`: Extra arguments from the original format used by the converter. +[otelcol]: #opentelemetry-collector [prometheus]: #prometheus [promtail]: #promtail [static]: #static @@ -72,6 +73,17 @@ Errors are defined as non-critical issues identified during the conversion where an output can still be generated. These can be bypassed using the `--bypass-errors` flag. +### OpenTelemetry Collector + +You can use the `--source-format=otelcol` to convert the source configuration from an [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/configuration/) to a {{< param "PRODUCT_NAME" >}} configuration. + +Many OpenTelemetry Collector components are supported. +Review the `otelcol.*` component information in the [Component Reference][] for more information about `otelcol` components that you can convert. +If a source configuration has unsupported features, you will receive [errors] when you convert it to a flow configuration. +The converter raises warnings for configuration options that may require your attention. + +Refer to [Migrate from OpenTelemetry Collector to {{< param "PRODUCT_NAME" >}}][migrate-otelcol] for a detailed migration guide. + ### Prometheus Using the `--source-format=prometheus` will convert the source configuration from @@ -86,7 +98,7 @@ This includes Prometheus features such as and many supported *_sd_configs. Unsupported features in a source configuration result in [errors]. -Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-prometheus/" >}}) for a detailed migration guide. +Refer to [Migrate from Prometheus to {{< param "PRODUCT_NAME" >}}][migrate-prometheus] for a detailed migration guide. ### Promtail @@ -100,21 +112,28 @@ are supported and can be converted to {{< param "PRODUCT_NAME" >}} configuration If you have unsupported features in a source configuration, you will receive [errors] when you convert to a flow configuration. The converter will also raise warnings for configuration options that may require your attention. -Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-promtail/" >}}) for a detailed migration guide. +Refer to [Migrate from Promtail to {{< param "PRODUCT_NAME" >}}][migrate-promtail] for a detailed migration guide. ### Static Using the `--source-format=static` will convert the source configuration from a -[Grafana Agent Static]({{< relref "../../../static" >}}) configuration to a {{< param "PRODUCT_NAME" >}} configuration. +[Grafana Agent Static][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. Include `--extra-args` for passing additional command line flags from the original format. For example, `--extra-args="-enable-features=integrations-next"` will convert a Grafana Agent Static -[integrations-next]({{< relref "../../../static/configuration/integrations/integrations-next/" >}}) -configuration to a {{< param "PRODUCT_NAME" >}} configuration. You can also +[integrations-next][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. You can also expand environment variables with `--extra-args="-config.expand-env"`. You can combine multiple command line flags with a space between each flag, for example `--extra-args="-enable-features=integrations-next -config.expand-env"`. If you have unsupported features in a Static mode source configuration, you will receive [errors][] when you convert to a Flow mode configuration. The converter will also raise warnings for configuration options that may require your attention. -Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}]({{< relref "../../tasks/migrate/from-static/" >}}) for a detailed migration guide. +Refer to [Migrate from Grafana Agent Static to {{< param "PRODUCT_NAME" >}}][migrate-static] for a detailed migration guide. + +[Component Reference]: ../../components/ +[migrate-otelcol]: ../../../tasks/migrate/from-otelcol/ +[migrate-prometheus]: ../../../tasks/migrate/from-prometheus/ +[migrate-promtail]: ../../../tasks/migrate/from-promtail/ +[migrate-static]: ../../../tasks/migrate/from-static/ +[Grafana Agent Static]: ../../../../static/ +[integrations-next]: ../../../../static/configuration/integrations/integrations-next/ \ No newline at end of file diff --git a/docs/sources/flow/reference/cli/run.md b/docs/sources/flow/reference/cli/run.md index c81c13f7374d..71c17837eb46 100644 --- a/docs/sources/flow/reference/cli/run.md +++ b/docs/sources/flow/reference/cli/run.md @@ -61,7 +61,7 @@ The following flags are supported: * `--cluster.advertise-interfaces`: List of interfaces used to infer an address to advertise. Set to `all` to use all available network interfaces on the system. (default `"eth0,en0"`). * `--cluster.max-join-peers`: Number of peers to join from the discovered set (default `5`). * `--cluster.name`: Name to prevent nodes without this identifier from joining the cluster (default `""`). -* `--config.format`: The format of the source file. Supported formats: `flow`, `prometheus`, `promtail`, `static` (default `"flow"`). +* `--config.format`: The format of the source file. Supported formats: `flow`, `otelcol`, `prometheus`, `promtail`, `static` (default `"flow"`). * `--config.bypass-conversion-errors`: Enable bypassing errors when converting (default `false`). * `--config.extra-args`: Extra arguments from the original format used by the converter. diff --git a/docs/sources/flow/tasks/migrate/from-operator.md b/docs/sources/flow/tasks/migrate/from-operator.md index c40f7d640533..1b19ac260a94 100644 --- a/docs/sources/flow/tasks/migrate/from-operator.md +++ b/docs/sources/flow/tasks/migrate/from-operator.md @@ -10,7 +10,7 @@ canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-operato description: Migrate from Grafana Agent Operator to Grafana Agent Flow menuTitle: Migrate from Operator title: Migrate from Grafana Agent Operator to Grafana Agent Flow -weight: 320 +weight: 300 --- # Migrate from Grafana Agent Operator to {{% param "PRODUCT_NAME" %}} diff --git a/docs/sources/flow/tasks/migrate/from-otelcol.md b/docs/sources/flow/tasks/migrate/from-otelcol.md new file mode 100644 index 000000000000..60b9fa44e4a2 --- /dev/null +++ b/docs/sources/flow/tasks/migrate/from-otelcol.md @@ -0,0 +1,274 @@ +--- +canonical: https://grafana.com/docs/agent/latest/flow/tasks/migrate/from-otelcol/ +description: Learn how to migrate from OpenTelemetry Collector to Grafana Agent Flow +menuTitle: Migrate from OpenTelemetry Collector +title: Migrate from OpenTelemetry Collector to Grafana Agent Flow +weight: 310 +--- + +# Migrate from OpenTelemetry Collector to {{% param "PRODUCT_NAME" %}} + +The built-in {{< param "PRODUCT_ROOT_NAME" >}} convert command can migrate your [OpenTelemetry Collector][] configuration to a {{< param "PRODUCT_NAME" >}} configuration. + +This topic describes how to: + +* Convert an OpenTelemetry Collector configuration to a {{< param "PRODUCT_NAME" >}} configuration. +* Run an OpenTelemetry Collector configuration natively using {{< param "PRODUCT_NAME" >}}. + +## Components used in this topic + +* [otelcol.receiver.otlp][] +* [otelcol.processor.memory_limiter][] +* [otelcol.exporter.otlp][] + +## Before you begin + +* You must have an existing OpenTelemetry Collector configuration. +* You must have a set of OpenTelemetry Collector applications ready to push telemetry data to {{< param "PRODUCT_NAME" >}}. +* You must be familiar with the concept of [Components][] in {{< param "PRODUCT_NAME" >}}. + +## Convert an OpenTelemetry Collector configuration + +To fully migrate your configuration from [OpenTelemetry Collector] to {{< param "PRODUCT_NAME" >}}, you must convert your OpenTelemetry Collector configuration into a {{< param "PRODUCT_NAME" >}} configuration. +This conversion will enable you to take full advantage of the many additional features available in {{< param "PRODUCT_NAME" >}}. + +> In this task, you will use the [convert][] CLI command to output a {{< param "PRODUCT_NAME" >}} +> configuration from a OpenTelemetry Collector configuration. + +1. Open a terminal window and run the following command. + + {{< code >}} + + ```static-binary + AGENT_MODE=flow grafana-agent convert --source-format=otelcol --output= + ``` + + ```flow-binary + grafana-agent-flow convert --source-format=otelcol --output= + ``` + + {{< /code >}} + + Replace the following: + + - _``_: The full path to the OpenTelemetry Collector configuration. + - _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. + +1. [Run][] {{< param "PRODUCT_NAME" >}} using the new {{< param "PRODUCT_NAME" >}} configuration from _``_: + +### Debugging + +1. If the `convert` command can't convert an OpenTelemetry Collector configuration, diagnostic information is sent to `stderr`.\ + You can bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion by including the `--bypass-errors` flag. + + {{< admonition type="caution" >}} + If you bypass the errors, the behavior of the converted configuration may not match the original OpenTelemetry Collector configuration. + Make sure you fully test the converted configuration before using it in a production environment. + {{< /admonition >}} + + {{< code >}} + + ```static-binary + AGENT_MODE=flow grafana-agent convert --source-format=otelcol --bypass-errors --output= + ``` + + ```flow-binary + grafana-agent-flow convert --source-format=otelcol --bypass-errors --output= + ``` + + {{< /code >}} + + Replace the following: + + - _``_: The full path to the OpenTelemetry Collector configuration. + - _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. + +1. You can also output a diagnostic report by including the `--report` flag. + + {{< code >}} + + ```static-binary + AGENT_MODE=flow grafana-agent convert --source-format=otelcol --report= --output= + ``` + + ```flow-binary + grafana-agent-flow convert --source-format=otelcol --report= --output= + ``` + + {{< /code >}} + + Replace the following: + + - _``_: The full path to the OpenTelemetry Collector configuration. + - _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. + - _``_: The output path for the report. + + Using the [example][] OpenTelemetry Collector configuration below, the diagnostic report provides the following information: + + ```plaintext + (Info) Converted receiver/otlp into otelcol.receiver.otlp.default + (Info) Converted processor/memory_limiter into otelcol.processor.memory_limiter.default + (Info) Converted exporter/otlp into otelcol.exporter.otlp.default + + A configuration file was generated successfully. + ``` + +## Run an OpenTelemetry Collector configuration + +If you’re not ready to completely switch to a {{< param "PRODUCT_NAME" >}} configuration, you can run {{< param "PRODUCT_ROOT_NAME" >}} using your existing OpenTelemetry Collector configuration. +The `--config.format=otelcol` flag tells {{< param "PRODUCT_ROOT_NAME" >}} to convert your OpenTelemetry Collector configuration to a {{< param "PRODUCT_NAME" >}} configuration and load it directly without saving the new configuration. +This allows you to try {{< param "PRODUCT_NAME" >}} without modifying your existing OpenTelemetry Collector configuration infrastructure. + +> In this task, you will use the [run][] CLI command to run {{< param "PRODUCT_NAME" >}} +> using an OpenTelemetry Collector configuration. + +[Run][] {{< param "PRODUCT_NAME" >}} and include the command line flag `--config.format=otelcol`. +Your configuration file must be a valid OpenTelemetry Collector configuration file rather than a {{< param "PRODUCT_NAME" >}} configuration file. + +### Debugging + +1. You can follow the convert CLI command [debugging][] instructions to generate a diagnostic report. + +1. Refer to the {{< param "PRODUCT_NAME" >}} [Debugging][DebuggingUI] for more information about a running {{< param "PRODUCT_NAME" >}}. + +1. If your OpenTelemetry Collector configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. + You can bypass any non-critical issues and start the Agent by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=otelcol`. + + {{< admonition type="caution" >}} + If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. + Do not use this flag in a production environment. + {{< /admonition >}} + +## Example + +This example demonstrates converting an OpenTelemetry Collector configuration file to a {{< param "PRODUCT_NAME" >}} configuration file. + +The following OpenTelemetry Collector configuration file provides the input for the conversion. + +```yaml +receivers: + otlp: + protocols: + grpc: + http: + +exporters: + otlp: + endpoint: database:4317 + +processors: + memory_limiter: + limit_percentage: 90 + check_interval: 1s + + +service: + pipelines: + metrics: + receivers: [otlp] + processors: [memory_limiter] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [memory_limiter] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [memory_limiter] + exporters: [otlp] +``` + +The convert command takes the YAML file as input and outputs a [River][] file. + +{{< code >}} + +```static-binary +AGENT_MODE=flow grafana-agent convert --source-format=otelcol --output= +``` + +```flow-binary +grafana-agent-flow convert --source-format=otelcol --output= +``` + +{{< /code >}} + +Replace the following: + +- _``_: The full path to the OpenTelemetry Collector configuration. +- _``_: The full path to output the {{< param "PRODUCT_NAME" >}} configuration. + +The new {{< param "PRODUCT_NAME" >}} configuration file looks like this: + +```river +otelcol.receiver.otlp "default" { + grpc { } + + http { } + + output { + metrics = [otelcol.processor.memory_limiter.default.input] + logs = [otelcol.processor.memory_limiter.default.input] + traces = [otelcol.processor.memory_limiter.default.input] + } +} + +otelcol.processor.memory_limiter "default" { + check_interval = "1s" + limit_percentage = 90 + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} +``` + +## Limitations + +Configuration conversion is done on a best-effort basis. {{< param "PRODUCT_ROOT_NAME" >}} will issue warnings or errors where the conversion can't be performed. + +After the configuration is converted, review the {{< param "PRODUCT_NAME" >}} configuration file created and verify that it's correct before starting to use it in a production environment. + +The following list is specific to the convert command and not {{< param "PRODUCT_NAME" >}}: + +* Components are supported which directly embed upstream OpenTelemetry Collector features. You can get a general idea of which exist in + {{< param "PRODUCT_NAME" >}} for conversion by reviewing the `otelcol.*` components in the [Component Reference][]. + Any additional unsupported features are returned as errors during conversion. +* Check if you are using any extra command line arguments with OpenTelemetry Collector that aren't present in your configuration file. +* Metamonitoring metrics exposed by {{< param "PRODUCT_NAME" >}} usually match OpenTelemetry Collector metamonitoring metrics but will use a different name. + Make sure that you use the new metric names, for example, in your alerts and dashboards queries. +* The logs produced by {{< param "PRODUCT_NAME" >}} differ from those produced by OpenTelemetry Collector. +* {{< param "PRODUCT_ROOT_NAME" >}} exposes the {{< param "PRODUCT_NAME" >}} [UI][]. + +[OpenTelemetry Collector]: https://opentelemetry.io/docs/collector/configuration/ +[debugging]: #debugging +[example]: #example + +{{% docs/reference %}} +[otelcol.receiver.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.receiver.otlp.md" +[otelcol.processor.memory_limiter]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.processor.memory_limiter.md" +[otelcol.exporter.otlp]: "/docs/agent/ -> /docs/agent//flow/reference/components/otelcol.exporter.otlp.md" +[Components]: "/docs/agent/ -> /docs/agent//flow/concepts/components.md" +[Components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/components.md" +[Component Reference]: "/docs/agent/ -> /docs/agent//flow/reference/components/_index.md" +[Component Reference]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components/_index.md" +[convert]: "/docs/agent/ -> /docs/agent//flow/reference/cli/convert.md" +[convert]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/convert.md" +[run]: "/docs/agent/ -> /docs/agent//flow/reference/cli/run.md" +[run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/cli/run.md" +[Run]: "/docs/agent/ -> /docs/agent//flow/get-started/run/" +[Run]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/run/" +[DebuggingUI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug.md" +[DebuggingUI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug.md" +[River]: "/docs/agent/ -> /docs/agent//flow/concepts/config-language/_index.md" +[River]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/concepts/config-language/_index.md" +[UI]: "/docs/agent/ -> /docs/agent//flow/tasks/debug#grafana-agent-flow-ui" +[UI]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/debug#grafana-agent-flow-ui" +{{% /docs/reference %}} diff --git a/internal/converter/converter.go b/internal/converter/converter.go index 3b9cf459bbf8..ce2414e8c4ed 100644 --- a/internal/converter/converter.go +++ b/internal/converter/converter.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/otelcolconvert" "github.com/grafana/agent/internal/converter/internal/prometheusconvert" "github.com/grafana/agent/internal/converter/internal/promtailconvert" "github.com/grafana/agent/internal/converter/internal/staticconvert" @@ -15,6 +16,8 @@ import ( type Input string const ( + // InputOtelCol indicates that the input file is an OpenTelemetry Collector YAML file. + InputOtelCol Input = "otelcol" // InputPrometheus indicates that the input file is a prometheus YAML file. InputPrometheus Input = "prometheus" // InputPromtail indicates that the input file is a promtail YAML file. @@ -24,6 +27,7 @@ const ( ) var SupportedFormats = []string{ + string(InputOtelCol), string(InputPrometheus), string(InputPromtail), string(InputStatic), @@ -47,6 +51,8 @@ var SupportedFormats = []string{ // error is returned alongside the resulting config. func Convert(in []byte, kind Input, extraArgs []string) ([]byte, diag.Diagnostics) { switch kind { + case InputOtelCol: + return otelcolconvert.Convert(in, extraArgs) case InputPrometheus: return prometheusconvert.Convert(in, extraArgs) case InputPromtail: From f4ab69d587904ef8ae93dfa7316e39390727bce1 Mon Sep 17 00:00:00 2001 From: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> Date: Mon, 1 Apr 2024 10:50:03 -0400 Subject: [PATCH 19/83] wire up static traces converter test for all major scenarios and squash a bug along the way (#6801) * wire up static traces converter test for all major scenarios and squash a bug along the way Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --------- Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --- .../internal/build/builder_traces.go | 32 +++++++++---- .../staticconvert/testdata/traces.river | 38 +++++++++++++-- .../staticconvert/testdata/traces.yaml | 48 +++++++++++++++---- 3 files changed, 97 insertions(+), 21 deletions(-) diff --git a/internal/converter/internal/staticconvert/internal/build/builder_traces.go b/internal/converter/internal/staticconvert/internal/build/builder_traces.go index 6762d8e580f2..da2411b9eaa3 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_traces.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_traces.go @@ -75,13 +75,19 @@ func removeReceiver(otelCfg *otelcol.Config, pipelineType otel_component.Type, r } delete(otelCfg.Receivers, otel_component.NewID(receiverType)) - spr := make([]otel_component.ID, 0, len(otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers)-1) - for _, r := range otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers { - if r != otel_component.NewID(receiverType) { - spr = append(spr, r) + for ix, p := range otelCfg.Service.Pipelines { + if ix.Type() != pipelineType { + continue + } + + spr := make([]otel_component.ID, 0) + for _, r := range p.Receivers { + if r.Type() != receiverType { + spr = append(spr, r) + } } + otelCfg.Service.Pipelines[ix].Receivers = spr } - otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers = spr } // removeProcessor removes a processor from the otel config for a specific pipeline type. @@ -91,11 +97,17 @@ func removeProcessor(otelCfg *otelcol.Config, pipelineType otel_component.Type, } delete(otelCfg.Processors, otel_component.NewID(processorType)) - spr := make([]otel_component.ID, 0, len(otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors)-1) - for _, r := range otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors { - if r != otel_component.NewID(processorType) { - spr = append(spr, r) + for ix, p := range otelCfg.Service.Pipelines { + if ix.Type() != pipelineType { + continue + } + + spr := make([]otel_component.ID, 0) + for _, r := range p.Processors { + if r.Type() != processorType { + spr = append(spr, r) + } } + otelCfg.Service.Pipelines[ix].Processors = spr } - otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors = spr } diff --git a/internal/converter/internal/staticconvert/testdata/traces.river b/internal/converter/internal/staticconvert/testdata/traces.river index 716aefad0e3b..85364b105d81 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.river +++ b/internal/converter/internal/staticconvert/testdata/traces.river @@ -1,3 +1,18 @@ +otelcol.extension.jaeger_remote_sampling "default_0" { + grpc { } + + http { } + + source { + remote { + endpoint = "jaeger-collector:14250" + compression = "" + write_buffer_size = "0B" + } + reload_interval = "30s" + } +} + otelcol.receiver.otlp "default" { grpc { include_metadata = true @@ -54,7 +69,8 @@ discovery.relabel "default_prometheus1" { otelcol.processor.discovery "default" { targets = discovery.relabel.default_prometheus1.output - pod_associations = [] + operation_type = "insert" + pod_associations = ["ip", "net.host.ip"] output { metrics = [] @@ -84,7 +100,19 @@ otelcol.processor.tail_sampling "default" { decision_wait = "5s" output { - traces = [otelcol.exporter.otlp.default_0.input, otelcol.exporter.logging.default.input] + traces = [otelcol.processor.batch.default.input] + } +} + +otelcol.processor.batch "default" { + timeout = "5s" + send_batch_size = 2048 + send_batch_max_size = 4096 + + output { + metrics = [] + logs = [] + traces = [otelcol.exporter.otlp.default_0.input, otelcol.exporter.logging.default.input] } } @@ -94,7 +122,11 @@ otelcol.exporter.otlp "default_0" { } client { - endpoint = "http://localhost:1234/write" + endpoint = "tempo.example.com:14250" + + tls { + insecure = true + } } } diff --git a/internal/converter/internal/staticconvert/testdata/traces.yaml b/internal/converter/internal/staticconvert/testdata/traces.yaml index 2553da25200a..251f4d24ee56 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.yaml +++ b/internal/converter/internal/staticconvert/testdata/traces.yaml @@ -1,15 +1,24 @@ traces: configs: - name: trace_config + attributes: + actions: + - key: db.table + action: delete + batch: + timeout: 5s + send_batch_size: 2048 + send_batch_max_size: 4096 + remote_write: + - endpoint: tempo.example.com:14250 + insecure: true + automatic_logging: + backend: "stdout" receivers: otlp: protocols: grpc: http: - remote_write: - - endpoint: http://localhost:1234/write - automatic_logging: - backend: "stdout" scrape_configs: - job_name: "prometheus1" azure_sd_configs: @@ -27,6 +36,13 @@ traces: target_label: __param_target1 - source_labels: [__address2__] target_label: __param_target2 + prom_sd_operation_type: "insert" + prom_sd_pod_associations: + - ip + - net.host.ip + # spanmetrics: + # namespace: testing + # metrics_instance: default tail_sampling: policies: [ @@ -35,7 +51,23 @@ traces: type: always_sample }, ] - attributes: - actions: - - key: db.table - action: delete \ No newline at end of file + # load_balancing: + # resolver: + # static: + # hostnames: + # - tempo1.example.com + # - tempo2.example.com + # service_graphs: + # enabled: true + jaeger_remote_sampling: + - source: + reload_interval: 30s + remote: + endpoint: jaeger-collector:14250 + +# This metrics config is needed when we enable spanmetrics for traces +# +# metrics: +# global: +# remote_write: +# - url: http://localhost:9009/api/prom/push From 787a094003a3d970d4d37c529bffa9f5ec5e18fb Mon Sep 17 00:00:00 2001 From: William Dumont Date: Tue, 2 Apr 2024 15:24:22 +0200 Subject: [PATCH 20/83] fix routing key in loadbalancingexporter converter (#6809) --- .../converter_loadbalancingexporter.go | 6 +- .../staticconvert/testdata/traces.river | 70 ++++++++++++++----- .../staticconvert/testdata/traces.yaml | 12 ++-- 3 files changed, 63 insertions(+), 25 deletions(-) diff --git a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go index 4475badd27e3..de09d99dde22 100644 --- a/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go +++ b/internal/converter/internal/otelcolconvert/converter_loadbalancingexporter.go @@ -54,10 +54,14 @@ func (loadbalancingExporterConverter) ConvertAndAppend(state *State, id componen } func toLoadbalancingExporter(cfg *loadbalancingexporter.Config) *loadbalancing.Arguments { + routingKey := "traceID" + if cfg.RoutingKey != "" { + routingKey = cfg.RoutingKey + } return &loadbalancing.Arguments{ Protocol: toProtocol(cfg.Protocol), Resolver: toResolver(cfg.Resolver), - RoutingKey: cfg.RoutingKey, + RoutingKey: routingKey, DebugMetrics: common.DefaultValue[loadbalancing.Arguments]().DebugMetrics, } diff --git a/internal/converter/internal/staticconvert/testdata/traces.river b/internal/converter/internal/staticconvert/testdata/traces.river index 85364b105d81..f8d8a4e8f584 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.river +++ b/internal/converter/internal/staticconvert/testdata/traces.river @@ -13,7 +13,7 @@ otelcol.extension.jaeger_remote_sampling "default_0" { } } -otelcol.receiver.otlp "default" { +otelcol.receiver.otlp "_0_default" { grpc { include_metadata = true } @@ -25,11 +25,11 @@ otelcol.receiver.otlp "default" { output { metrics = [] logs = [] - traces = [otelcol.processor.discovery.default.input] + traces = [otelcol.processor.discovery._0_default.input] } } -discovery.azure "default_prometheus1" { +discovery.azure "_0_default_prometheus1" { subscription_id = "subscription1" oauth { @@ -43,17 +43,17 @@ discovery.azure "default_prometheus1" { } } -discovery.lightsail "default_prometheus1" { +discovery.lightsail "_0_default_prometheus1" { region = "us-east-1" access_key = "YOUR_ACCESS_KEY" secret_key = "YOUR_SECRET_KEY" port = 8080 } -discovery.relabel "default_prometheus1" { +discovery.relabel "_0_default_prometheus1" { targets = concat( - discovery.azure.default_prometheus1.targets, - discovery.lightsail.default_prometheus1.targets, + discovery.azure._0_default_prometheus1.targets, + discovery.lightsail._0_default_prometheus1.targets, ) rule { @@ -67,19 +67,19 @@ discovery.relabel "default_prometheus1" { } } -otelcol.processor.discovery "default" { - targets = discovery.relabel.default_prometheus1.output +otelcol.processor.discovery "_0_default" { + targets = discovery.relabel._0_default_prometheus1.output operation_type = "insert" pod_associations = ["ip", "net.host.ip"] output { metrics = [] logs = [] - traces = [otelcol.processor.attributes.default.input] + traces = [otelcol.processor.attributes._0_default.input] } } -otelcol.processor.attributes "default" { +otelcol.processor.attributes "_0_default" { action { key = "db.table" action = "delete" @@ -88,11 +88,45 @@ otelcol.processor.attributes "default" { output { metrics = [] logs = [] - traces = [otelcol.processor.tail_sampling.default.input] + traces = [otelcol.exporter.loadbalancing._0_default.input, otelcol.exporter.logging._0_default.input] } } -otelcol.processor.tail_sampling "default" { +otelcol.exporter.loadbalancing "_0_default" { + protocol { + otlp { + retry { + max_elapsed_time = "1m0s" + } + + client { + compression = "none" + } + } + } + + resolver { + static { + hostnames = ["tempo1.example.com", "tempo2.example.com"] + } + } +} + +otelcol.exporter.logging "_0_default" { } + +otelcol.receiver.otlp "_1_lb" { + grpc { + endpoint = "0.0.0.0:4318" + } + + output { + metrics = [] + logs = [] + traces = [otelcol.processor.tail_sampling._1_default.input] + } +} + +otelcol.processor.tail_sampling "_1_default" { policy { name = "test-policy-1" type = "always_sample" @@ -100,11 +134,11 @@ otelcol.processor.tail_sampling "default" { decision_wait = "5s" output { - traces = [otelcol.processor.batch.default.input] + traces = [otelcol.processor.batch._1_default.input] } } -otelcol.processor.batch "default" { +otelcol.processor.batch "_1_default" { timeout = "5s" send_batch_size = 2048 send_batch_max_size = 4096 @@ -112,11 +146,11 @@ otelcol.processor.batch "default" { output { metrics = [] logs = [] - traces = [otelcol.exporter.otlp.default_0.input, otelcol.exporter.logging.default.input] + traces = [otelcol.exporter.otlp._1_0.input, otelcol.exporter.logging._1_default.input] } } -otelcol.exporter.otlp "default_0" { +otelcol.exporter.otlp "_1_0" { retry_on_failure { max_elapsed_time = "1m0s" } @@ -130,4 +164,4 @@ otelcol.exporter.otlp "default_0" { } } -otelcol.exporter.logging "default" { } +otelcol.exporter.logging "_1_default" { } diff --git a/internal/converter/internal/staticconvert/testdata/traces.yaml b/internal/converter/internal/staticconvert/testdata/traces.yaml index 251f4d24ee56..5a4cb2dfd332 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.yaml +++ b/internal/converter/internal/staticconvert/testdata/traces.yaml @@ -51,12 +51,12 @@ traces: type: always_sample }, ] - # load_balancing: - # resolver: - # static: - # hostnames: - # - tempo1.example.com - # - tempo2.example.com + load_balancing: + resolver: + static: + hostnames: + - tempo1.example.com + - tempo2.example.com # service_graphs: # enabled: true jaeger_remote_sampling: From 1570f972051381b7510dcecfa35da9f30dd8f38c Mon Sep 17 00:00:00 2001 From: William Dumont Date: Tue, 2 Apr 2024 17:06:37 +0200 Subject: [PATCH 21/83] Fix custom component registry access (#6811) * lock before before evaluating to prevent using a not fully initialized custom component registry * update changelog * add test that was creating a panic --- CHANGELOG.md | 2 + internal/flow/internal/controller/loader.go | 33 +++++----- .../testdata/import_file/import_file_17.txtar | 65 +++++++++++++++++++ 3 files changed, 84 insertions(+), 16 deletions(-) create mode 100644 internal/flow/testdata/import_file/import_file_17.txtar diff --git a/CHANGELOG.md b/CHANGELOG.md index 2409fd3568bc..db1ccf5a9a39 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,8 @@ Main (unreleased) whenever that argument is explicitly configured. This issue only affected a small subset of arguments across 15 components. (@erikbaranowski, @rfratto) +- Fix a bug where a panic could occur when reloading custom components. (@wildum) + ### Other changes - Clustering for Grafana Agent in Flow mode has graduated from beta to stable. diff --git a/internal/flow/internal/controller/loader.go b/internal/flow/internal/controller/loader.go index 8921d5ff1859..d9b328160497 100644 --- a/internal/flow/internal/controller/loader.go +++ b/internal/flow/internal/controller/loader.go @@ -37,21 +37,21 @@ type Loader struct { // pool for evaluation in EvaluateDependants, because the queue is full. This is an unlikely scenario, but when // it happens we should avoid retrying too often to give other goroutines a chance to progress. Having a backoff // also prevents log spamming with errors. - backoffConfig backoff.Config + backoffConfig backoff.Config + + mut sync.RWMutex + graph *dag.Graph + originalGraph *dag.Graph + componentNodes []ComponentNode + declareNodes map[string]*DeclareNode + importConfigNodes map[string]*ImportConfigNode + serviceNodes []*ServiceNode + cache *valueCache + blocks []*ast.BlockStmt // Most recently loaded blocks, used for writing + cm *controllerMetrics + cc *controllerCollector + moduleExportIndex int componentNodeManager *ComponentNodeManager - - mut sync.RWMutex - graph *dag.Graph - originalGraph *dag.Graph - componentNodes []ComponentNode - declareNodes map[string]*DeclareNode - importConfigNodes map[string]*ImportConfigNode - serviceNodes []*ServiceNode - cache *valueCache - blocks []*ast.BlockStmt // Most recently loaded blocks, used for writing - cm *controllerMetrics - cc *controllerCollector - moduleExportIndex int } // LoaderOptions holds options for creating a Loader. @@ -787,10 +787,11 @@ func (l *Loader) concurrentEvalFn(n dag.Node, spanCtx context.Context, tracer tr switch n := n.(type) { case BlockNode: ectx := l.cache.BuildContext() - evalErr := n.Evaluate(ectx) - // Only obtain loader lock after we have evaluated the node, allowing for concurrent evaluation. + // RLock before evaluate to prevent Evaluating while the config is being reloaded l.mut.RLock() + evalErr := n.Evaluate(ectx) + err = l.postEvaluate(l.log, n, evalErr) // Additional post-evaluation steps necessary for module exports. diff --git a/internal/flow/testdata/import_file/import_file_17.txtar b/internal/flow/testdata/import_file/import_file_17.txtar new file mode 100644 index 000000000000..8313fa642d33 --- /dev/null +++ b/internal/flow/testdata/import_file/import_file_17.txtar @@ -0,0 +1,65 @@ +This is a simple test with several levels of nesting. +This test would often panic when re-evaluation could run concurrently with a config reload. +Now that it is protected with mutex, this test should always pass. + +-- main.river -- +import.file "a_namespace" { + filename = "module.river" +} + +a_namespace.a "default" { + a_argument = 47 +} + +testcomponents.summation "sum" { + input = 10 +} + +-- module.river -- +declare "a" { + import.file "b_namespace" { + filename = "nested_module.river" + } + + argument "a_argument" { + comment = "Where to send collected metrics." + } + + b_namespace.b "default" { + b_argument = 147 + } + + export "a_export" { + value = argument.a_argument.value + b_namespace.b.default.b_export + 1 + } +} + +-- nested_module.river -- +declare "b" { + import.file "c_namespace" { + filename = "other_nested_module.river" + } + + argument "b_argument" { + optional = false + } + + c_namespace.c "default" { + c_argument = 101 + } + + export "b_export" { + value = argument.b_argument.value + c_namespace.c.default.c_export + 1 + } +} + +-- other_nested_module.river -- +declare "c" { + argument "c_argument" { + optional = false + } + + export "c_export" { + value = argument.c_argument.value + 1 + } +} \ No newline at end of file From d1afb4871e32be70207f7f793fc126d75e238add Mon Sep 17 00:00:00 2001 From: mattdurham Date: Tue, 2 Apr 2024 12:25:34 -0400 Subject: [PATCH 22/83] This is to handle myriad ways that git revision can be used. (#6803) * add more complex behavior to git to handle branches,tags and hashes * add changelog * small order change * simplify changes * clean up * lint changes * remove chdir * standardize pull frequency and eventually timing --- CHANGELOG.md | 2 + internal/flow/import_git_test.go | 277 +++++++++++++++++++++++++++---- internal/vcs/git.go | 69 ++++---- 3 files changed, 280 insertions(+), 68 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index db1ccf5a9a39..0e8bf05d97b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,8 @@ Main (unreleased) - Fix a bug where a panic could occur when reloading custom components. (@wildum) +- The `import.git` config block did not work with branches or tags this now fixes that behavior. (@mattdurham) + ### Other changes - Clustering for Grafana Agent in Flow mode has graduated from beta to stable. diff --git a/internal/flow/import_git_test.go b/internal/flow/import_git_test.go index c2b4c4f9211c..12027c7cdfce 100644 --- a/internal/flow/import_git_test.go +++ b/internal/flow/import_git_test.go @@ -3,10 +3,13 @@ package flow_test import ( + "bufio" + "bytes" "context" "os" "os/exec" "path/filepath" + "strings" "sync" "testing" "time" @@ -25,19 +28,77 @@ func TestPullUpdating(t *testing.T) { // file based git repo then committing a file, running the component, then updating the file in the repo. testRepo := t.TempDir() - contents := `declare "add" { - argument "a" {} - argument "b" {} + main := ` +import.git "testImport" { + repository = "` + testRepo + `" + path = "math.river" + pull_frequency = "1s" +} + +testImport.add "cc" { + a = 1 + b = 1 +} +` + // Create our git repository. + runGit(t, testRepo, "init", testRepo) + + // Add the file we want. + math := filepath.Join(testRepo, "math.river") + err := os.WriteFile(math, []byte(contents), 0666) + require.NoError(t, err) + + runGit(t, testRepo, "add", ".") + + runGit(t, testRepo, "commit", "-m \"test\"") + + defer verifyNoGoroutineLeaks(t) + ctrl, f := setup(t, main) + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + ctrl.Run(ctx) + }() + + // Check for initial condition + require.Eventually(t, func() bool { + export := getExport[map[string]interface{}](t, ctrl, "", "testImport.add.cc") + return export["sum"] == 2 + }, 5*time.Second, 100*time.Millisecond) + + err = os.WriteFile(math, []byte(contentsMore), 0666) + require.NoError(t, err) + + runGit(t, testRepo, "add", ".") + + runGit(t, testRepo, "commit", "-m \"test2\"") + + // Check for final condition. + require.Eventually(t, func() bool { + export := getExport[map[string]interface{}](t, ctrl, "", "testImport.add.cc") + return export["sum"] == 3 + }, 5*time.Second, 100*time.Millisecond) +} + +func TestPullUpdatingFromBranch(t *testing.T) { + testRepo := t.TempDir() - export "sum" { - value = argument.a.value + argument.b.value - } -}` main := ` import.git "testImport" { repository = "` + testRepo + `" path = "math.river" - pull_frequency = "5s" + pull_frequency = "1s" + revision = "testor" } testImport.add "cc" { @@ -45,21 +106,99 @@ testImport.add "cc" { b = 1 } ` - init := exec.Command("git", "init", testRepo) - err := init.Run() + runGit(t, testRepo, "init", testRepo) + + runGit(t, testRepo, "checkout", "-b", "testor") + + math := filepath.Join(testRepo, "math.river") + err := os.WriteFile(math, []byte(contents), 0666) + require.NoError(t, err) + + runGit(t, testRepo, "add", ".") + + runGit(t, testRepo, "commit", "-m \"test\"") + + defer verifyNoGoroutineLeaks(t) + ctrl, f := setup(t, main) + err = ctrl.LoadSource(f, nil) require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + ctrl.Run(ctx) + }() + + // Check for initial condition + require.Eventually(t, func() bool { + export := getExport[map[string]interface{}](t, ctrl, "", "testImport.add.cc") + return export["sum"] == 2 + }, 5*time.Second, 100*time.Millisecond) + + err = os.WriteFile(math, []byte(contentsMore), 0666) + require.NoError(t, err) + + runGit(t, testRepo, "add", ".") + + runGit(t, testRepo, "commit", "-m \"test2\"") + + // Check for final condition. + require.Eventually(t, func() bool { + export := getExport[map[string]interface{}](t, ctrl, "", "testImport.add.cc") + return export["sum"] == 3 + }, 5*time.Second, 100*time.Millisecond) +} + +func TestPullUpdatingFromHash(t *testing.T) { + testRepo := t.TempDir() + + runGit(t, testRepo, "init", testRepo) math := filepath.Join(testRepo, "math.river") - err = os.WriteFile(math, []byte(contents), 0666) + err := os.WriteFile(math, []byte(contents), 0666) require.NoError(t, err) - add := exec.Command("git", "add", ".") - add.Dir = testRepo - err = add.Run() + + runGit(t, testRepo, "add", ".") + + runGit(t, testRepo, "commit", "-m \"test\"") + + getHead := exec.Command("git", "rev-parse", "HEAD") + var stdBuffer bytes.Buffer + getHead.Dir = testRepo + getHead.Stdout = bufio.NewWriter(&stdBuffer) + err = getHead.Run() require.NoError(t, err) - commit := exec.Command("git", "commit", "-m \"test\"") - commit.Dir = testRepo - err = commit.Run() + hash := stdBuffer.String() + hash = strings.TrimSpace(hash) + + main := ` +import.git "testImport" { + repository = "` + testRepo + `" + path = "math.river" + pull_frequency = "1s" + revision = "` + hash + `" +} + +testImport.add "cc" { + a = 1 + b = 1 +} +` + + // After this update the sum should still be 2 and not 3 since it is pinned to the initial hash. + err = os.WriteFile(math, []byte(contentsMore), 0666) require.NoError(t, err) + runGit(t, testRepo, "add", ".") + + runGit(t, testRepo, "commit", "-m \"test2\"") + defer verifyNoGoroutineLeaks(t) ctrl, f := setup(t, main) err = ctrl.LoadSource(f, nil) @@ -78,33 +217,103 @@ testImport.add "cc" { ctrl.Run(ctx) }() - // Check for initial condition + // Check for final condition. require.Eventually(t, func() bool { export := getExport[map[string]interface{}](t, ctrl, "", "testImport.add.cc") return export["sum"] == 2 - }, 3*time.Second, 10*time.Millisecond) + }, 5*time.Second, 100*time.Millisecond) +} - contentsMore := `declare "add" { - argument "a" {} - argument "b" {} +func TestPullUpdatingFromTag(t *testing.T) { + testRepo := t.TempDir() - export "sum" { - value = argument.a.value + argument.b.value + 1 - } -}` + runGit(t, testRepo, "init", testRepo) + + math := filepath.Join(testRepo, "math.river") + err := os.WriteFile(math, []byte(contents), 0666) + require.NoError(t, err) + + runGit(t, testRepo, "add", ".") + + runGit(t, testRepo, "commit", "-m \"test\"") + + runGit(t, testRepo, "tag", "-a", "tagtest", "-m", "testtag") + + main := ` +import.git "testImport" { + repository = "` + testRepo + `" + path = "math.river" + pull_frequency = "1s" + revision = "tagtest" +} + +testImport.add "cc" { + a = 1 + b = 1 +} +` + + // After this update the sum should still be 2 and not 3 since it is pinned to the tag. err = os.WriteFile(math, []byte(contentsMore), 0666) require.NoError(t, err) - add2 := exec.Command("git", "add", ".") - add2.Dir = testRepo - add2.Run() - commit2 := exec.Command("git", "commit", "-m \"test2\"") - commit2.Dir = testRepo - commit2.Run() + runGit(t, testRepo, "add", ".") + + runGit(t, testRepo, "commit", "-m \"test2\"") + + defer verifyNoGoroutineLeaks(t) + + ctrl, f := setup(t, main) + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + ctrl.Run(ctx) + }() // Check for final condition. require.Eventually(t, func() bool { export := getExport[map[string]interface{}](t, ctrl, "", "testImport.add.cc") - return export["sum"] == 3 - }, 20*time.Second, 1*time.Millisecond) + return export["sum"] == 2 + }, 5*time.Second, 100*time.Millisecond) +} + +func runGit(t *testing.T, dir string, args ...string) { + exe := exec.Command("git", args...) + var stdErr bytes.Buffer + exe.Stderr = bufio.NewWriter(&stdErr) + exe.Dir = dir + err := exe.Run() + errTxt := stdErr.String() + if err != nil { + t.Error(errTxt) + } + require.NoErrorf(t, err, "command git %v failed", args) } + +const contents = `declare "add" { + argument "a" {} + argument "b" {} + + export "sum" { + value = argument.a.value + argument.b.value + } +}` + +const contentsMore = `declare "add" { + argument "a" {} + argument "b" {} + + export "sum" { + value = argument.a.value + argument.b.value + 1 + } +}` diff --git a/internal/vcs/git.go b/internal/vcs/git.go index 788c1029f785..41bd20949070 100644 --- a/internal/vcs/git.go +++ b/internal/vcs/git.go @@ -86,28 +86,18 @@ func NewGitRepo(ctx context.Context, storagePath string, opts GitRepoOptions) (* } } - // Finally, hard reset to our requested revision. - hash, err := findRevision(opts.Revision, repo) - if err != nil { - return nil, InvalidRevisionError{Revision: opts.Revision} - } - - workTree, err := repo.Worktree() - if err != nil { - return nil, err - } - err = workTree.Reset(&git.ResetOptions{ - Commit: hash, - Mode: git.HardReset, - }) - if err != nil { - return nil, err + checkoutErr := checkout(opts.Revision, repo) + if checkoutErr != nil { + return nil, UpdateFailedError{ + Repository: opts.Repository, + Inner: checkoutErr, + } } return &GitRepo{ opts: opts, repo: repo, - workTree: workTree, + workTree: wt, }, err } @@ -119,7 +109,6 @@ func isRepoCloned(dir string) bool { // Update updates the repository by pulling new content and re-checking out to // latest version of Revision. func (repo *GitRepo) Update(ctx context.Context) error { - var err error pullRepoErr := repo.workTree.PullContext(ctx, &git.PullOptions{ RemoteName: "origin", Force: true, @@ -132,17 +121,12 @@ func (repo *GitRepo) Update(ctx context.Context) error { } } - // Find the latest revision being requested and hard-reset to it. - hash, err := findRevision(repo.opts.Revision, repo.repo) - if err != nil { - return InvalidRevisionError{Revision: repo.opts.Revision} - } - err = repo.workTree.Reset(&git.ResetOptions{ - Commit: hash, - Mode: git.HardReset, - }) - if err != nil { - return err + checkoutErr := checkout(repo.opts.Revision, repo.repo) + if checkoutErr != nil { + return UpdateFailedError{ + Repository: repo.opts.Repository, + Inner: checkoutErr, + } } return nil @@ -186,24 +170,41 @@ func (repo *GitRepo) CurrentRevision() (string, error) { return ref.Hash().String(), nil } -func findRevision(rev string, repo *git.Repository) (plumbing.Hash, error) { +// Depending on the type of revision we need to handle checkout differently. +// Tags are checked out as branches +// Branches as branches +// Commits are commits +func checkout(rev string, repo *git.Repository) error { // Try looking for the revision in the following order: // // 1. Search by tag name. // 2. Search by remote ref name. // 3. Try to resolve the revision directly. + wt, err := repo.Worktree() + if err != nil { + return err + } if tagRef, err := repo.Tag(rev); err == nil { - return tagRef.Hash(), nil + return wt.Checkout(&git.CheckoutOptions{ + Branch: tagRef.Name(), + Force: true, + }) } if remoteRef, err := repo.Reference(plumbing.NewRemoteReferenceName("origin", rev), true); err == nil { - return remoteRef.Hash(), nil + return wt.Checkout(&git.CheckoutOptions{ + Branch: remoteRef.Name(), + Force: true, + }) } if hash, err := repo.ResolveRevision(plumbing.Revision(rev)); err == nil { - return *hash, nil + return wt.Checkout(&git.CheckoutOptions{ + Hash: *hash, + Force: true, + }) } - return plumbing.ZeroHash, plumbing.ErrReferenceNotFound + return plumbing.ErrReferenceNotFound } From d8d8872d15076004a2faff40e8d5bfff48125fe9 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Tue, 2 Apr 2024 16:24:35 -0400 Subject: [PATCH 23/83] Update log conversion for legacy file path (#6812) * Update log conversion. * linter feedback * fix test --- CHANGELOG.md | 2 + .../common/loki/positions/positions.go | 4 -- .../common/loki/positions/positions_test.go | 3 -- .../internal/build/scrape_builder.go | 6 ++- .../promtailconvert/promtailconvert.go | 9 ++-- .../promtailconvert/testdata/azure.river | 5 ++- .../promtailconvert/testdata/consul.river | 5 ++- .../testdata/consulagent.river | 5 ++- .../testdata/digitalocean.river | 5 ++- .../testdata/dockerswarm.river | 5 ++- .../promtailconvert/testdata/ec2.river | 5 ++- .../promtailconvert/testdata/file.river | 5 ++- .../promtailconvert/testdata/gce.river | 5 ++- .../testdata/globalpositions.river | 43 +++++++++++++++++++ .../testdata/globalpositions.yaml | 21 +++++++++ .../promtailconvert/testdata/kubernetes.river | 5 ++- .../promtailconvert/testdata/marathon.river | 5 ++- .../testdata/mixed_pipeline.river | 5 ++- .../promtailconvert/testdata/nerve.river | 5 ++- .../promtailconvert/testdata/openstack.river | 5 ++- .../testdata/pipeline_stages_cri_empty.river | 5 ++- .../testdata/pipeline_stages_drop.river | 5 ++- .../pipeline_stages_match_nested.river | 5 ++- .../testdata/pipeline_stages_part1.river | 5 ++- .../testdata/pipeline_stages_part2.river | 5 ++- .../pipeline_stages_structured_metadata.river | 5 ++- .../testdata/sd_pipeline_example.river | 5 ++- .../promtailconvert/testdata/serverset.river | 5 ++- .../testdata/static_pipeline_example.river | 1 + .../promtailconvert/testdata/triton.river | 5 ++- .../internal/promtailconvert/validate.go | 9 ---- .../testdata/promtail_prom.river | 2 + 32 files changed, 144 insertions(+), 66 deletions(-) create mode 100644 internal/converter/internal/promtailconvert/testdata/globalpositions.river create mode 100644 internal/converter/internal/promtailconvert/testdata/globalpositions.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e8bf05d97b7..e9476af3aa28 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,8 @@ Main (unreleased) - Added support for `otelcol` configuration conversion in `grafana-agent convert` and `grafana-agent run` commands. (@rfratto, @erikbaranowski, @tpaschalis, @hainenber) +- Add automatic conversion for `legacy_positions_file` in component `loki.source.file`. (@mattdurham) + ### Features - Added a new CLI flag `--stability.level` which defines the minimum stability diff --git a/internal/component/common/loki/positions/positions.go b/internal/component/common/loki/positions/positions.go index 8733cbe06a8d..f985ff849ead 100644 --- a/internal/component/common/loki/positions/positions.go +++ b/internal/component/common/loki/positions/positions.go @@ -104,7 +104,6 @@ type LegacyFile struct { // ConvertLegacyPositionsFile will convert the legacy positions file to the new format if: // 1. There is no file at the newpath // 2. There is a file at the legacy path and that it is valid yaml -// If all the above is true then the legacy file will be deleted. func ConvertLegacyPositionsFile(legacyPath, newPath string, l log.Logger) { legacyPositions := readLegacyFile(legacyPath, l) // LegacyPositions did not exist or was invalid so return. @@ -131,9 +130,6 @@ func ConvertLegacyPositionsFile(legacyPath, newPath string, l log.Logger) { if err != nil { level.Error(l).Log("msg", "error writing new positions file from legacy", "path", newPath, "error", err) } - - // Finally remove the old path. - _ = os.Remove(legacyPath) } func readLegacyFile(legacyPath string, l log.Logger) *LegacyFile { diff --git a/internal/component/common/loki/positions/positions_test.go b/internal/component/common/loki/positions/positions_test.go index 0357e618a500..9048720f11e4 100644 --- a/internal/component/common/loki/positions/positions_test.go +++ b/internal/component/common/loki/positions/positions_test.go @@ -67,9 +67,6 @@ func TestLegacyConversion(t *testing.T) { require.True(t, k.Path == "/tmp/random.log") require.True(t, v == "17623") } - // Ensure old file is deleted. - _, err = os.Stat(legacy) - require.True(t, os.IsNotExist(err)) } func TestLegacyConversionWithNewFile(t *testing.T) { diff --git a/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go b/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go index 91d0a34a48f8..80aa9c3904f4 100644 --- a/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go +++ b/internal/converter/internal/promtailconvert/internal/build/scrape_builder.go @@ -11,11 +11,13 @@ import ( filematch "github.com/grafana/agent/internal/component/local/file_match" "github.com/grafana/agent/internal/component/loki/process" "github.com/grafana/agent/internal/component/loki/process/stages" + lokirelabel "github.com/grafana/agent/internal/component/loki/relabel" lokisourcefile "github.com/grafana/agent/internal/component/loki/source/file" "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/common" "github.com/grafana/agent/internal/converter/internal/prometheusconvert/component" + "github.com/grafana/loki/clients/pkg/promtail/positions" "github.com/grafana/loki/clients/pkg/promtail/scrapeconfig" "github.com/grafana/loki/clients/pkg/promtail/targets/file" "github.com/grafana/river/scanner" @@ -42,7 +44,6 @@ func NewScrapeConfigBuilder( diags *diag.Diagnostics, cfg *scrapeconfig.Config, globalCtx *GlobalContext, - ) *ScrapeConfigBuilder { return &ScrapeConfigBuilder{ @@ -61,7 +62,7 @@ func (s *ScrapeConfigBuilder) Sanitize() { } } -func (s *ScrapeConfigBuilder) AppendLokiSourceFile(watchConfig *file.WatchConfig) { +func (s *ScrapeConfigBuilder) AppendLokiSourceFile(watchConfig *file.WatchConfig, positionsCfg *positions.Config) { // If there were no targets expressions collected, that means // we didn't have any components that produced SD targets, so // we can skip this component. @@ -76,6 +77,7 @@ func (s *ScrapeConfigBuilder) AppendLokiSourceFile(watchConfig *file.WatchConfig Encoding: s.cfg.Encoding, DecompressionConfig: convertDecompressionConfig(s.cfg.DecompressionCfg), FileWatch: convertFileWatchConfig(watchConfig), + LegacyPositionsFile: positionsCfg.PositionsFile, } overrideHook := func(val interface{}) interface{} { if _, ok := val.([]discovery.Target); ok { diff --git a/internal/converter/internal/promtailconvert/promtailconvert.go b/internal/converter/internal/promtailconvert/promtailconvert.go index 8c3664881c1b..515579f11472 100644 --- a/internal/converter/internal/promtailconvert/promtailconvert.go +++ b/internal/converter/internal/promtailconvert/promtailconvert.go @@ -93,8 +93,8 @@ func Convert(in []byte, extraArgs []string) ([]byte, diag.Diagnostics) { func AppendAll(f *builder.File, cfg *promtailcfg.Config, labelPrefix string, diags diag.Diagnostics) diag.Diagnostics { validateTopLevelConfig(cfg, &diags) - var writeReceivers = make([]loki.LogsReceiver, len(cfg.ClientConfigs)) - var writeBlocks = make([]*builder.Block, len(cfg.ClientConfigs)) + writeReceivers := make([]loki.LogsReceiver, len(cfg.ClientConfigs)) + writeBlocks := make([]*builder.Block, len(cfg.ClientConfigs)) // Each client config needs to be a separate remote_write, // because they may have different ExternalLabels fields. for i, cc := range cfg.ClientConfigs { @@ -108,7 +108,7 @@ func AppendAll(f *builder.File, cfg *promtailcfg.Config, labelPrefix string, dia } for _, sc := range cfg.ScrapeConfig { - appendScrapeConfig(f, &sc, &diags, gc, &cfg.Global.FileWatch) + appendScrapeConfig(f, &sc, &diags, gc, &cfg.Global.FileWatch, &cfg.PositionsConfig) } for _, write := range writeBlocks { @@ -137,6 +137,7 @@ func appendScrapeConfig( diags *diag.Diagnostics, gctx *build.GlobalContext, watchConfig *file.WatchConfig, + positionsCfg *positions.Config, ) { b := build.NewScrapeConfigBuilder(f, diags, cfg, gctx) @@ -151,7 +152,7 @@ func appendScrapeConfig( // If any relabelling is required, it will be done via a discovery.relabel component. // The files will be watched and the globs in file paths will be expanded using discovery.file component. // The log entries are sent to loki.process if processing is needed, or directly to loki.write components. - b.AppendLokiSourceFile(watchConfig) + b.AppendLokiSourceFile(watchConfig, positionsCfg) // Append all the components that produce logs directly. // If any relabelling is required, it will be done via a loki.relabel component. diff --git a/internal/converter/internal/promtailconvert/testdata/azure.river b/internal/converter/internal/promtailconvert/testdata/azure.river index 90a652e05dab..3a26563523a9 100644 --- a/internal/converter/internal/promtailconvert/testdata/azure.river +++ b/internal/converter/internal/promtailconvert/testdata/azure.river @@ -17,6 +17,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/consul.river b/internal/converter/internal/promtailconvert/testdata/consul.river index 72563a502d95..4e3e4a42f459 100644 --- a/internal/converter/internal/promtailconvert/testdata/consul.river +++ b/internal/converter/internal/promtailconvert/testdata/consul.river @@ -33,6 +33,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/consulagent.river b/internal/converter/internal/promtailconvert/testdata/consulagent.river index cbadc748d70c..518b9422d2cb 100644 --- a/internal/converter/internal/promtailconvert/testdata/consulagent.river +++ b/internal/converter/internal/promtailconvert/testdata/consulagent.river @@ -32,6 +32,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/digitalocean.river b/internal/converter/internal/promtailconvert/testdata/digitalocean.river index fb71e471c56f..4e567a19ff99 100644 --- a/internal/converter/internal/promtailconvert/testdata/digitalocean.river +++ b/internal/converter/internal/promtailconvert/testdata/digitalocean.river @@ -8,6 +8,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/dockerswarm.river b/internal/converter/internal/promtailconvert/testdata/dockerswarm.river index f436e8fc9010..17ea37ef93d8 100644 --- a/internal/converter/internal/promtailconvert/testdata/dockerswarm.river +++ b/internal/converter/internal/promtailconvert/testdata/dockerswarm.river @@ -33,6 +33,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/ec2.river b/internal/converter/internal/promtailconvert/testdata/ec2.river index 94d70e08c802..b1b264fef899 100644 --- a/internal/converter/internal/promtailconvert/testdata/ec2.river +++ b/internal/converter/internal/promtailconvert/testdata/ec2.river @@ -10,6 +10,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/file.river b/internal/converter/internal/promtailconvert/testdata/file.river index 03b1c400c4ce..10297de361d0 100644 --- a/internal/converter/internal/promtailconvert/testdata/file.river +++ b/internal/converter/internal/promtailconvert/testdata/file.river @@ -15,6 +15,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/gce.river b/internal/converter/internal/promtailconvert/testdata/gce.river index e11993c8bfdb..d9abfe8b4492 100644 --- a/internal/converter/internal/promtailconvert/testdata/gce.river +++ b/internal/converter/internal/promtailconvert/testdata/gce.river @@ -9,6 +9,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/globalpositions.river b/internal/converter/internal/promtailconvert/testdata/globalpositions.river new file mode 100644 index 000000000000..d26820f5c8e1 --- /dev/null +++ b/internal/converter/internal/promtailconvert/testdata/globalpositions.river @@ -0,0 +1,43 @@ +discovery.file "fun" { + files = ["/etc/prometheus/targets/*.json"] +} + +discovery.file "fun_2" { + files = ["/etc/agent/targets/*.json"] + refresh_interval = "30m0s" +} + +local.file_match "fun" { + path_targets = concat( + discovery.file.fun.targets, + discovery.file.fun_2.targets, + ) +} + +loki.source.file "fun" { + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/good/positions.yml" +} + +discovery.file "fun2" { + files = ["/etc/prometheus/targets2/*.json"] +} + +discovery.file "fun2_2" { + files = ["/etc/agent/targets2/*.json"] + refresh_interval = "30m0s" +} + +local.file_match "fun2" { + path_targets = concat( + discovery.file.fun2.targets, + discovery.file.fun2_2.targets, + ) +} + +loki.source.file "fun2" { + targets = local.file_match.fun2.targets + forward_to = [] + legacy_positions_file = "/good/positions.yml" +} diff --git a/internal/converter/internal/promtailconvert/testdata/globalpositions.yaml b/internal/converter/internal/promtailconvert/testdata/globalpositions.yaml new file mode 100644 index 000000000000..29298f6cb69e --- /dev/null +++ b/internal/converter/internal/promtailconvert/testdata/globalpositions.yaml @@ -0,0 +1,21 @@ +positions: + filename: /good/positions.yml +scrape_configs: + - job_name: fun + file_sd_configs: + - files: + - /etc/prometheus/targets/*.json + refresh_interval: 5m + - files: + - /etc/agent/targets/*.json + refresh_interval: 30m + - job_name: fun2 + file_sd_configs: + - files: + - /etc/prometheus/targets2/*.json + refresh_interval: 5m + - files: + - /etc/agent/targets2/*.json + refresh_interval: 30m +tracing: {enabled: false} +server: {register_instrumentation: false} \ No newline at end of file diff --git a/internal/converter/internal/promtailconvert/testdata/kubernetes.river b/internal/converter/internal/promtailconvert/testdata/kubernetes.river index a343f038ab2e..eb54f0a9af42 100644 --- a/internal/converter/internal/promtailconvert/testdata/kubernetes.river +++ b/internal/converter/internal/promtailconvert/testdata/kubernetes.river @@ -72,6 +72,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/marathon.river b/internal/converter/internal/promtailconvert/testdata/marathon.river index 03abe507541b..9c97f18cc03e 100644 --- a/internal/converter/internal/promtailconvert/testdata/marathon.river +++ b/internal/converter/internal/promtailconvert/testdata/marathon.river @@ -21,6 +21,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/mixed_pipeline.river b/internal/converter/internal/promtailconvert/testdata/mixed_pipeline.river index 24fe5221cfc5..6dd55cf2a576 100644 --- a/internal/converter/internal/promtailconvert/testdata/mixed_pipeline.river +++ b/internal/converter/internal/promtailconvert/testdata/mixed_pipeline.river @@ -37,8 +37,9 @@ loki.process "uber_pipeline" { } loki.source.file "uber_pipeline" { - targets = local.file_match.uber_pipeline.targets - forward_to = [loki.process.uber_pipeline.receiver] + targets = local.file_match.uber_pipeline.targets + forward_to = [loki.process.uber_pipeline.receiver] + legacy_positions_file = "/tmp/positions.yaml" } loki.source.api "uber_pipeline" { diff --git a/internal/converter/internal/promtailconvert/testdata/nerve.river b/internal/converter/internal/promtailconvert/testdata/nerve.river index 4f224f34cd5e..9d5faac92696 100644 --- a/internal/converter/internal/promtailconvert/testdata/nerve.river +++ b/internal/converter/internal/promtailconvert/testdata/nerve.river @@ -9,6 +9,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/openstack.river b/internal/converter/internal/promtailconvert/testdata/openstack.river index 703518396e68..0d6253e11441 100644 --- a/internal/converter/internal/promtailconvert/testdata/openstack.river +++ b/internal/converter/internal/promtailconvert/testdata/openstack.river @@ -42,6 +42,7 @@ local.file_match "prometheus1" { } loki.source.file "prometheus1" { - targets = local.file_match.prometheus1.targets - forward_to = [] + targets = local.file_match.prometheus1.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_cri_empty.river b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_cri_empty.river index a8890b1a3908..773ae329028b 100644 --- a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_cri_empty.river +++ b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_cri_empty.river @@ -14,8 +14,9 @@ loki.process "example" { } loki.source.file "example" { - targets = local.file_match.example.targets - forward_to = [loki.process.example.receiver] + targets = local.file_match.example.targets + forward_to = [loki.process.example.receiver] + legacy_positions_file = "/var/log/positions.yaml" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_drop.river b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_drop.river index 5dcb9effcd4c..a3167a73441c 100644 --- a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_drop.river +++ b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_drop.river @@ -52,8 +52,9 @@ loki.process "example" { } loki.source.file "example" { - targets = local.file_match.example.targets - forward_to = [loki.process.example.receiver] + targets = local.file_match.example.targets + forward_to = [loki.process.example.receiver] + legacy_positions_file = "/var/log/positions.yaml" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_match_nested.river b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_match_nested.river index 6012d37e354b..33dcf837a4b4 100644 --- a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_match_nested.river +++ b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_match_nested.river @@ -30,8 +30,9 @@ loki.process "example" { } loki.source.file "example" { - targets = local.file_match.example.targets - forward_to = [loki.process.example.receiver] + targets = local.file_match.example.targets + forward_to = [loki.process.example.receiver] + legacy_positions_file = "/var/log/positions.yaml" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_part1.river b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_part1.river index fc02a5ee9d91..657c4fd7567a 100644 --- a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_part1.river +++ b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_part1.river @@ -90,8 +90,9 @@ loki.process "example" { } loki.source.file "example" { - targets = local.file_match.example.targets - forward_to = [loki.process.example.receiver] + targets = local.file_match.example.targets + forward_to = [loki.process.example.receiver] + legacy_positions_file = "/var/log/positions.yaml" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river index afe10179c0f0..9e26d3d45c5f 100644 --- a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river +++ b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_part2.river @@ -99,8 +99,9 @@ loki.process "example" { } loki.source.file "example" { - targets = local.file_match.example.targets - forward_to = [loki.process.example.receiver] + targets = local.file_match.example.targets + forward_to = [loki.process.example.receiver] + legacy_positions_file = "/var/log/positions.yaml" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_structured_metadata.river b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_structured_metadata.river index 32fb65c1cab9..585d280ebbbf 100644 --- a/internal/converter/internal/promtailconvert/testdata/pipeline_stages_structured_metadata.river +++ b/internal/converter/internal/promtailconvert/testdata/pipeline_stages_structured_metadata.river @@ -24,8 +24,9 @@ loki.process "example" { } loki.source.file "example" { - targets = local.file_match.example.targets - forward_to = [loki.process.example.receiver] + targets = local.file_match.example.targets + forward_to = [loki.process.example.receiver] + legacy_positions_file = "/var/log/positions.yaml" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/sd_pipeline_example.river b/internal/converter/internal/promtailconvert/testdata/sd_pipeline_example.river index 2b5e283f075d..33f11795b424 100644 --- a/internal/converter/internal/promtailconvert/testdata/sd_pipeline_example.river +++ b/internal/converter/internal/promtailconvert/testdata/sd_pipeline_example.river @@ -38,8 +38,9 @@ loki.process "funny_one" { } loki.source.file "funny_one" { - targets = local.file_match.funny_one.targets - forward_to = [loki.process.funny_one.receiver] + targets = local.file_match.funny_one.targets + forward_to = [loki.process.funny_one.receiver] + legacy_positions_file = "/var/log/positions.yaml" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/serverset.river b/internal/converter/internal/promtailconvert/testdata/serverset.river index 500c733c81a1..1e250bb2e9eb 100644 --- a/internal/converter/internal/promtailconvert/testdata/serverset.river +++ b/internal/converter/internal/promtailconvert/testdata/serverset.river @@ -8,6 +8,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/testdata/static_pipeline_example.river b/internal/converter/internal/promtailconvert/testdata/static_pipeline_example.river index 3b7e4b20b436..e5a7e5f28a50 100644 --- a/internal/converter/internal/promtailconvert/testdata/static_pipeline_example.river +++ b/internal/converter/internal/promtailconvert/testdata/static_pipeline_example.river @@ -79,6 +79,7 @@ loki.source.file "example" { initial_delay = "30s" format = "z" } + legacy_positions_file = "/var/log/positions.yaml" } loki.write "default" { diff --git a/internal/converter/internal/promtailconvert/testdata/triton.river b/internal/converter/internal/promtailconvert/testdata/triton.river index d320c84f06b0..2b4386357dc8 100644 --- a/internal/converter/internal/promtailconvert/testdata/triton.river +++ b/internal/converter/internal/promtailconvert/testdata/triton.river @@ -9,6 +9,7 @@ local.file_match "fun" { } loki.source.file "fun" { - targets = local.file_match.fun.targets - forward_to = [] + targets = local.file_match.fun.targets + forward_to = [] + legacy_positions_file = "/var/log/positions.yaml" } diff --git a/internal/converter/internal/promtailconvert/validate.go b/internal/converter/internal/promtailconvert/validate.go index 182203e2876b..04e82f0f665f 100644 --- a/internal/converter/internal/promtailconvert/validate.go +++ b/internal/converter/internal/promtailconvert/validate.go @@ -8,15 +8,6 @@ import ( // validateTopLevelConfig validates the top-level config for any unsupported features. There may still be some // other unsupported features in scope of each config block, which are raised by their respective conversion code. func validateTopLevelConfig(cfg *promtailcfg.Config, diags *diag.Diagnostics) { - // The positions global config is not supported in Flow Mode. - if cfg.PositionsConfig != DefaultPositionsConfig() { - diags.Add( - diag.SeverityLevelInfo, - "global positions configuration is not supported - each Flow Mode's loki.source.file component "+ - "has its own positions file in the component's data directory", - ) - } - // WAL support is still work in progress and not documented. Enabling it won't work, so it's an error. if cfg.WAL.Enabled { diags.Add( diff --git a/internal/converter/internal/staticconvert/testdata/promtail_prom.river b/internal/converter/internal/staticconvert/testdata/promtail_prom.river index 1744d37aee5c..a80fad7ba5e2 100644 --- a/internal/converter/internal/staticconvert/testdata/promtail_prom.river +++ b/internal/converter/internal/staticconvert/testdata/promtail_prom.river @@ -69,6 +69,7 @@ loki.source.file "logs_name_jobName" { min_poll_frequency = "1s" max_poll_frequency = "5s" } + legacy_positions_file = "/path/name.yml" } loki.write "logs_name" { @@ -120,6 +121,7 @@ loki.source.file "logs_name2_jobName" { min_poll_frequency = "1s" max_poll_frequency = "5s" } + legacy_positions_file = "/path/name2.yml" } loki.write "logs_name2" { From 1a035e4f2768d7ff9e9be63d8ae6bc730c5cd31d Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 3 Apr 2024 10:39:01 +0300 Subject: [PATCH 24/83] flow: separate component and controller path IDs into new labels (#6786) Signed-off-by: Paschalis Tsilias --- internal/flow/internal/controller/loader.go | 14 ++++- internal/flow/internal/controller/metrics.go | 16 +++--- .../controller/node_builtin_component.go | 6 +- .../controller/node_builtin_component_test.go | 25 +++++++++ .../internal/controller/node_config_import.go | 6 +- .../controller/node_custom_component.go | 3 +- .../alerts/controller.libsonnet | 2 +- .../dashboards/controller.libsonnet | 4 +- .../dashboards/prometheus.libsonnet | 55 ++++++++++--------- 9 files changed, 86 insertions(+), 45 deletions(-) diff --git a/internal/flow/internal/controller/loader.go b/internal/flow/internal/controller/loader.go index d9b328160497..2514d9f4c873 100644 --- a/internal/flow/internal/controller/loader.go +++ b/internal/flow/internal/controller/loader.go @@ -75,12 +75,14 @@ func NewLoader(opts LoaderOptions) *Loader { reg = opts.ComponentRegistry ) + parent, id := splitPath(globals.ControllerID) + if reg == nil { reg = NewDefaultComponentRegistry(opts.ComponentGlobals.MinStability) } l := &Loader{ - log: log.With(globals.Logger, "controller_id", globals.ControllerID), + log: log.With(globals.Logger, "controller_path", parent, "controller_id", id), tracer: tracing.WrapTracerForLoader(globals.TraceProvider, globals.ControllerID), globals: globals, services: services, @@ -99,9 +101,9 @@ func NewLoader(opts LoaderOptions) *Loader { graph: &dag.Graph{}, originalGraph: &dag.Graph{}, cache: newValueCache(), - cm: newControllerMetrics(globals.ControllerID), + cm: newControllerMetrics(parent, id), } - l.cc = newControllerCollector(l, globals.ControllerID) + l.cc = newControllerCollector(l, parent, id) if globals.Registerer != nil { globals.Registerer.MustRegister(l.cc) @@ -909,3 +911,9 @@ func (l *Loader) collectCustomComponentReferences(stmts ast.Body, uniqueReferenc } } } + +func splitPath(id string) (string, string) { + parent, id := path.Split(id) + parent, _ = strings.CutSuffix(parent, "/") + return "/" + parent, id +} diff --git a/internal/flow/internal/controller/metrics.go b/internal/flow/internal/controller/metrics.go index e60d8a51e391..f640f11b3253 100644 --- a/internal/flow/internal/controller/metrics.go +++ b/internal/flow/internal/controller/metrics.go @@ -18,7 +18,7 @@ type controllerMetrics struct { } // newControllerMetrics inits the metrics for the components controller -func newControllerMetrics(id string) *controllerMetrics { +func newControllerMetrics(parent, id string) *controllerMetrics { cm := &controllerMetrics{ slowComponentThreshold: 1 * time.Minute, } @@ -31,14 +31,14 @@ func newControllerMetrics(id string) *controllerMetrics { cm.controllerEvaluation = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "agent_component_controller_evaluating", Help: "Tracks if the controller is currently in the middle of a graph evaluation", - ConstLabels: map[string]string{"controller_id": id}, + ConstLabels: map[string]string{"controller_path": parent, "controller_id": id}, }) cm.componentEvaluationTime = prometheus.NewHistogram( prometheus.HistogramOpts{ Name: "agent_component_evaluation_seconds", Help: "Time spent performing component evaluation", - ConstLabels: map[string]string{"controller_id": id}, + ConstLabels: map[string]string{"controller_path": parent, "controller_id": id}, Buckets: evaluationTimesBuckets, NativeHistogramBucketFactor: 1.1, NativeHistogramMaxBucketNumber: 100, @@ -49,7 +49,7 @@ func newControllerMetrics(id string) *controllerMetrics { prometheus.HistogramOpts{ Name: "agent_component_dependencies_wait_seconds", Help: "Time spent by components waiting to be evaluated after their dependency is updated.", - ConstLabels: map[string]string{"controller_id": id}, + ConstLabels: map[string]string{"controller_path": parent, "controller_id": id}, Buckets: evaluationTimesBuckets, NativeHistogramBucketFactor: 1.1, NativeHistogramMaxBucketNumber: 100, @@ -60,13 +60,13 @@ func newControllerMetrics(id string) *controllerMetrics { cm.evaluationQueueSize = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "agent_component_evaluation_queue_size", Help: "Tracks the number of components waiting to be evaluated in the worker pool", - ConstLabels: map[string]string{"controller_id": id}, + ConstLabels: map[string]string{"controller_path": parent, "controller_id": id}, }) cm.slowComponentEvaluationTime = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "agent_component_evaluation_slow_seconds", Help: fmt.Sprintf("Number of seconds spent evaluating components that take longer than %v to evaluate", cm.slowComponentThreshold), - ConstLabels: map[string]string{"controller_id": id}, + ConstLabels: map[string]string{"controller_path": parent, "controller_id": id}, }, []string{"component_id"}) return cm @@ -100,14 +100,14 @@ type controllerCollector struct { runningComponentsTotal *prometheus.Desc } -func newControllerCollector(l *Loader, id string) *controllerCollector { +func newControllerCollector(l *Loader, parent, id string) *controllerCollector { return &controllerCollector{ l: l, runningComponentsTotal: prometheus.NewDesc( "agent_component_controller_running_components", "Total number of running components.", []string{"health_type"}, - map[string]string{"controller_id": id}, + map[string]string{"controller_path": parent, "controller_id": id}, ), } } diff --git a/internal/flow/internal/controller/node_builtin_component.go b/internal/flow/internal/controller/node_builtin_component.go index 028ca2362777..6bf5c6bf36e5 100644 --- a/internal/flow/internal/controller/node_builtin_component.go +++ b/internal/flow/internal/controller/node_builtin_component.go @@ -165,11 +165,13 @@ func NewBuiltinComponentNode(globals ComponentGlobals, reg component.Registratio func getManagedOptions(globals ComponentGlobals, cn *BuiltinComponentNode) component.Options { cn.registry = prometheus.NewRegistry() + parent, id := splitPath(cn.globalID) return component.Options{ ID: cn.globalID, - Logger: log.With(globals.Logger, "component", cn.globalID), + Logger: log.With(globals.Logger, "component_path", parent, "component_id", id), Registerer: prometheus.WrapRegistererWith(prometheus.Labels{ - "component_id": cn.globalID, + "component_path": parent, + "component_id": id, }, cn.registry), Tracer: tracing.WrapTracer(globals.TraceProvider, cn.globalID), diff --git a/internal/flow/internal/controller/node_builtin_component_test.go b/internal/flow/internal/controller/node_builtin_component_test.go index 3be8307dd828..9e777490cd7f 100644 --- a/internal/flow/internal/controller/node_builtin_component_test.go +++ b/internal/flow/internal/controller/node_builtin_component_test.go @@ -37,3 +37,28 @@ func TestLocalID(t *testing.T) { }) require.Equal(t, "/data/local.id", filepath.ToSlash(mo.DataPath)) } + +func TestSplitPath(t *testing.T) { + var testcases = []struct { + input string + path string + id string + }{ + {"", "/", ""}, + {"remotecfg", "/", "remotecfg"}, + {"prometheus.remote_write", "/", "prometheus.remote_write"}, + {"custom_component.default/prometheus.remote_write", "/custom_component.default", "prometheus.remote_write"}, + + {"local.file.default", "/", "local.file.default"}, + {"a_namespace.a.default/local.file.default", "/a_namespace.a.default", "local.file.default"}, + {"a_namespace.a.default/b_namespace.b.default/local.file.default", "/a_namespace.a.default/b_namespace.b.default", "local.file.default"}, + + {"a_namespace.a.default/b_namespace.b.default/c_namespace.c.default", "/a_namespace.a.default/b_namespace.b.default", "c_namespace.c.default"}, + } + + for _, tt := range testcases { + path, id := splitPath(tt.input) + require.Equal(t, tt.path, path) + require.Equal(t, tt.id, id) + } +} diff --git a/internal/flow/internal/controller/node_config_import.go b/internal/flow/internal/controller/node_config_import.go index 7cc95bdb4d1f..e87a1223cc5b 100644 --- a/internal/flow/internal/controller/node_config_import.go +++ b/internal/flow/internal/controller/node_config_import.go @@ -90,11 +90,13 @@ func NewImportConfigNode(block *ast.BlockStmt, globals ComponentGlobals, sourceT func getImportManagedOptions(globals ComponentGlobals, cn *ImportConfigNode) component.Options { cn.registry = prometheus.NewRegistry() + parent, id := splitPath(cn.globalID) return component.Options{ ID: cn.globalID, - Logger: log.With(globals.Logger, "config", cn.globalID), + Logger: log.With(globals.Logger, "config_path", parent, "config_id", id), Registerer: prometheus.WrapRegistererWith(prometheus.Labels{ - "config_id": cn.globalID, + "config_path": parent, + "config_id": id, }, cn.registry), Tracer: tracing.WrapTracer(globals.TraceProvider, cn.globalID), DataPath: filepath.Join(globals.DataPath, cn.globalID), diff --git a/internal/flow/internal/controller/node_custom_component.go b/internal/flow/internal/controller/node_custom_component.go index 8b3e05a74e89..d0d53d804e5c 100644 --- a/internal/flow/internal/controller/node_custom_component.go +++ b/internal/flow/internal/controller/node_custom_component.go @@ -104,6 +104,7 @@ func NewCustomComponentNode(globals ComponentGlobals, b *ast.BlockStmt, getConfi componentName := b.GetBlockName() importNamespace, customComponentName := ExtractImportAndDeclare(componentName) + parent, node := splitPath(globalID) cn := &CustomComponentNode{ id: id, @@ -115,7 +116,7 @@ func NewCustomComponentNode(globals ComponentGlobals, b *ast.BlockStmt, getConfi customComponentName: customComponentName, moduleController: globals.NewModuleController(globalID), OnBlockNodeUpdate: globals.OnBlockNodeUpdate, - logger: log.With(globals.Logger, "component", globalID), + logger: log.With(globals.Logger, "component_path", parent, "component_id", node), getConfig: getConfig, block: b, diff --git a/operations/agent-flow-mixin/alerts/controller.libsonnet b/operations/agent-flow-mixin/alerts/controller.libsonnet index 3aeb5eabbb10..5e3454ecb2c8 100644 --- a/operations/agent-flow-mixin/alerts/controller.libsonnet +++ b/operations/agent-flow-mixin/alerts/controller.libsonnet @@ -6,7 +6,7 @@ alert.newGroup( // Component evaluations are taking too long, which can lead to e.g. stale targets. alert.newRule( 'SlowComponentEvaluations', - 'sum by (cluster, namespace, component_id) (rate(agent_component_evaluation_slow_seconds[10m])) > 0', + 'sum by (cluster, namespace, component_path, component_id) (rate(agent_component_evaluation_slow_seconds[10m])) > 0', 'Flow component evaluations are taking too long.', '15m', ), diff --git a/operations/agent-flow-mixin/dashboards/controller.libsonnet b/operations/agent-flow-mixin/dashboards/controller.libsonnet index ec059de98189..e58aa00e2a47 100644 --- a/operations/agent-flow-mixin/dashboards/controller.libsonnet +++ b/operations/agent-flow-mixin/dashboards/controller.libsonnet @@ -264,10 +264,10 @@ local filename = 'agent-flow-controller.json'; panel.withQueries([ panel.newQuery( expr=||| - sum by (component_id) (rate(agent_component_evaluation_slow_seconds{cluster="$cluster", namespace="$namespace"}[$__rate_interval])) + sum by (component_path, component_id) (rate(agent_component_evaluation_slow_seconds{cluster="$cluster", namespace="$namespace"}[$__rate_interval])) / scalar(sum(rate(agent_component_evaluation_seconds_sum{cluster="$cluster", namespace="$namespace"}[$__rate_interval]))) |||, - legendFormat='{{component_id}}', + legendFormat='{{component path}} {{component_id}}', ), ]) ), diff --git a/operations/agent-flow-mixin/dashboards/prometheus.libsonnet b/operations/agent-flow-mixin/dashboards/prometheus.libsonnet index 21ae79f3b063..d88f41e82662 100644 --- a/operations/agent-flow-mixin/dashboards/prometheus.libsonnet +++ b/operations/agent-flow-mixin/dashboards/prometheus.libsonnet @@ -105,13 +105,13 @@ local remoteWritePanels(y_offset) = [ panel.withQueries([ panel.newQuery( expr=||| - sum by (instance, component_id) ( - prometheus_remote_storage_highest_timestamp_in_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"} + sum by (instance, component_path, component_id) ( + prometheus_remote_storage_highest_timestamp_in_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component"} - ignoring(url, remote_name) group_right(instance) - prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"} ) |||, - legendFormat='{{instance}} / {{component_id}}', + legendFormat='{{instance}} / {{component_path}} {{component_id}}', ), ]) ), @@ -130,11 +130,11 @@ local remoteWritePanels(y_offset) = [ panel.newQuery( expr=||| sum without (remote_name, url) ( - rate(prometheus_remote_storage_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + - rate(prometheus_remote_storage_metadata_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + rate(prometheus_remote_storage_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"}[$__rate_interval]) + + rate(prometheus_remote_storage_metadata_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"}[$__rate_interval]) ) |||, - legendFormat='{{instance}} / {{component_id}}', + legendFormat='{{instance}} / {{component_path}} {{component_id}}', ), ]) ), @@ -152,7 +152,7 @@ local remoteWritePanels(y_offset) = [ panel.newQuery( expr=||| histogram_quantile(0.99, sum by (le) ( - rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"}[$__rate_interval]) )) |||, legendFormat='99th percentile', @@ -160,15 +160,15 @@ local remoteWritePanels(y_offset) = [ panel.newQuery( expr=||| histogram_quantile(0.50, sum by (le) ( - rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"}[$__rate_interval]) )) |||, legendFormat='50th percentile', ), panel.newQuery( expr=||| - sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) / - sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_count{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) + sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_path=~"$component_path", component_id=~"$component"}[$__rate_interval])) / + sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_count{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_path=~"$component_path", component_id=~"$component"}[$__rate_interval])) |||, legendFormat='Average', ), @@ -223,15 +223,15 @@ local remoteWritePanels(y_offset) = [ panel.newQuery( expr=||| sum without (remote_name, url) ( - prometheus_remote_storage_shards{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + prometheus_remote_storage_shards{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"} ) |||, - legendFormat='{{instance}} / {{component_id}}', + legendFormat='{{instance}} / {{component_path}} {{component_id}}', ), panel.newQuery( expr=||| min ( - prometheus_remote_storage_shards_min{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + prometheus_remote_storage_shards_min{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"} ) |||, legendFormat='Minimum', @@ -239,7 +239,7 @@ local remoteWritePanels(y_offset) = [ panel.newQuery( expr=||| max ( - prometheus_remote_storage_shards_max{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + prometheus_remote_storage_shards_max{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"} ) |||, legendFormat='Maximum', @@ -260,10 +260,10 @@ local remoteWritePanels(y_offset) = [ panel.newQuery( expr=||| sum without (url, remote_name) ( - rate(prometheus_remote_storage_samples_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + rate(prometheus_remote_storage_samples_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"}[$__rate_interval]) ) |||, - legendFormat='{{instance}} / {{component_id}}', + legendFormat='{{instance}} / {{component_path}} {{component_id}}', ), ]) ), @@ -282,10 +282,10 @@ local remoteWritePanels(y_offset) = [ panel.newQuery( expr=||| sum without (url,remote_name) ( - rate(prometheus_remote_storage_samples_failed_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + rate(prometheus_remote_storage_samples_failed_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"}[$__rate_interval]) ) |||, - legendFormat='{{instance}} / {{component_id}}', + legendFormat='{{instance}} / {{component_path}} {{component_id}}', ), ]) ), @@ -304,10 +304,10 @@ local remoteWritePanels(y_offset) = [ panel.newQuery( expr=||| sum without (url,remote_name) ( - rate(prometheus_remote_storage_samples_retried_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + rate(prometheus_remote_storage_samples_retried_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"}[$__rate_interval]) ) |||, - legendFormat='{{instance}} / {{component_id}}', + legendFormat='{{instance}} / {{component_path}} {{component_id}}', ), ]) ), @@ -333,7 +333,7 @@ local remoteWritePanels(y_offset) = [ panel.withQueries([ panel.newQuery( expr=||| - sum(agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}) + sum(agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_path=~"$component_path", component_id=~"$component", url=~"$url"}) |||, legendFormat='Series', ), @@ -356,9 +356,9 @@ local remoteWritePanels(y_offset) = [ panel.withQueries([ panel.newQuery( expr=||| - agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"} + agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_path=~"$component_path", component_id=~"$component", url=~"$url"} |||, - legendFormat='{{instance}} / {{component_id}}', + legendFormat='{{instance}} / {{component_path}} {{component_id}}', ), ]) ), @@ -379,9 +379,9 @@ local remoteWritePanels(y_offset) = [ panel.withQueries([ panel.newQuery( expr=||| - sum by (component_id) (agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"}) + sum by (component_path, component_id) (agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_path=~"$component_path", component_id=~"$component", url=~"$url"}) |||, - legendFormat='{{component_id}}', + legendFormat='{{component_path}} {{component_id}}', ), ]) ), @@ -406,6 +406,9 @@ local remoteWritePanels(y_offset) = [ dashboard.newMultiTemplateVariable('instance', ||| label_values(agent_component_controller_running_components{cluster="$cluster", namespace="$namespace"}, instance) |||), + dashboard.newMultiTemplateVariable('component_path', ||| + label_values(agent_wal_samples_appended_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"prometheus\\.remote_write\\..*", component_path=~".*"}, component_path) + |||), dashboard.newMultiTemplateVariable('component', ||| label_values(agent_wal_samples_appended_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"prometheus\\.remote_write\\..*"}, component_id) |||), From 2f0ac737e23c9fbacec839f01ee1630f26cdcba4 Mon Sep 17 00:00:00 2001 From: Piotr <17101802+thampiotr@users.noreply.github.com> Date: Wed, 3 Apr 2024 09:54:52 +0100 Subject: [PATCH 25/83] Update postgres exporter (#6780) * Update postgres exporter * Make collector work with first DSN * Enhance docs and test * Update depcheck --- .github/depcheck.yml | 2 +- CHANGELOG.md | 19 +++++- .../prometheus.exporter.postgres.md | 19 +++++- go.mod | 3 +- go.sum | 39 +----------- .../prometheus/exporter/postgres/postgres.go | 25 ++++++-- .../exporter/postgres/postgres_test.go | 49 ++++++++++++++- .../postgres_exporter/postgres_exporter.go | 60 ++++++++++++++----- 8 files changed, 149 insertions(+), 67 deletions(-) diff --git a/.github/depcheck.yml b/.github/depcheck.yml index cc72fb1f6fc7..b3eeb7c9946b 100644 --- a/.github/depcheck.yml +++ b/.github/depcheck.yml @@ -23,7 +23,7 @@ github_repos: - github.com/google/dnsmasq_exporter v0.2.0 - github.com/ncabatoff/process-exporter v0.7.5 - github.com/prometheus/mysqld_exporter v0.13.0 - - github.com/prometheus-community/postgres_exporter v0.10.0 + - github.com/prometheus-community/postgres_exporter v0.15.0 - github.com/prometheus-community/windows_exporter v0.16.0 - github.com/percona/mongodb_exporter v0.20.7 - project: github.com/prometheus/prometheus diff --git a/CHANGELOG.md b/CHANGELOG.md index e9476af3aa28..a99df03268d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,21 @@ Main (unreleased) 4317 to 55678 to align with upstream. (@rfratto) - The default sync interval for `mimir.rules.kubernetes` has changed from `30s` - to `5m` to reduce load on Mimir. (@56quarters) + to `5m` to reduce load on Mimir. (@56quarters) + +- `prometheus.exporter.postgres` has been updated to the latest upstream + version which changes the set of exported metrics. The following metrics were + removed: `pg_stat_database_session_time`, `pg_stat_database_sessions`, + `pg_stat_database_sessions_abandoned`, `pg_stat_database_sessions_fatal`, + `pg_stat_database_sessions_killed`, `pg_stat_database_idle_in_transaction_time`, + `pg_stat_database_checksum_failures`, `pg_stat_database_checksum_last_failure`, + `pg_stat_database_active_time`. The following metrics were + renamed: `pg_stat_bgwriter_buffers_alloc`, `pg_stat_bgwriter_buffers_backend`, + `pg_stat_bgwriter_buffers_backend_fsync`, `pg_stat_bgwriter_buffers_checkpoint`, + `pg_stat_bgwriter_buffers_clean`, `pg_stat_bgwriter_checkpoint_sync_time`, + `pg_stat_bgwriter_checkpoint_write_time`, `pg_stat_bgwriter_checkpoints_req`, + `pg_stat_bgwriter_checkpoints_timed`, `pg_stat_bgwriter_maxwritten_clean`, + `pg_stat_bgwriter_stats_reset` - the new names include the `_total` suffix. (@thampiotr) ### Enhancements @@ -59,6 +73,9 @@ Main (unreleased) - The `import.git` config block did not work with branches or tags this now fixes that behavior. (@mattdurham) +- Fixed an issue where creating a `prometheus.exporter.postgres` component with + multiple `data_source_names` would result in an error. (@thampiotr) + ### Other changes - Clustering for Grafana Agent in Flow mode has graduated from beta to stable. diff --git a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md b/docs/sources/flow/reference/components/prometheus.exporter.postgres.md index d5f6cc78ea5e..5778217cfa95 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.postgres.md @@ -32,11 +32,12 @@ prometheus.exporter.postgres "LABEL" { The following arguments are supported: | Name | Type | Description | Default | Required | -| ---------------------------- | -------------- | ----------------------------------------------------------------------------- | ------- | -------- | +|------------------------------|----------------|-------------------------------------------------------------------------------|---------|----------| | `data_source_names` | `list(secret)` | Specifies the Postgres server(s) to connect to. | | yes | | `disable_settings_metrics` | `bool` | Disables collection of metrics from pg_settings. | `false` | no | | `disable_default_metrics` | `bool` | When `true`, only exposes metrics supplied from `custom_queries_config_path`. | `false` | no | | `custom_queries_config_path` | `string` | Path to YAML file containing custom queries to expose as metrics. | "" | no | +| `enabled_collectors` | `list(string)` | List of collectors to enable. See below for more detail. | [] | no | The format for connection strings in `data_source_names` can be found in the [official postgresql documentation](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). @@ -44,6 +45,22 @@ See examples for the `custom_queries_config_path` file in the [postgres_exporter **NOTE**: There are a number of environment variables that are not recommended for use, as they will affect _all_ `prometheus.exporter.postgres` components. A full list can be found in the [postgres_exporter repository](https://github.com/prometheus-community/postgres_exporter#environment-variables). +By default, the same set of metrics is enabled as in the upstream [postgres_exporter](https://github.com/prometheus-community/postgres_exporter/). If `custom_queries_config_path` is set, additional metrics defined in the given config file will be exposed. +If `disable_default_metrics` is set to `true`, only the metrics defined in the `custom_queries_config_path` file will be exposed. + +A subset of metrics collectors can be controlled by setting the `enabled_collectors` argument. The following collectors are available for selection: +`database`, `database_wraparound`, `locks`, `long_running_transactions`, `postmaster`, `process_idle`, +`replication`, `replication_slot`, `stat_activity_autovacuum`, `stat_bgwriter`, `stat_database`, +`stat_statements`, `stat_user_tables`, `stat_wal_receiver`, `statio_user_indexes`, `statio_user_tables`, +`wal`, `xlog_location`. + +By default, the following collectors are enabled: `database`, `locks`, `replication`, `replication_slot`, `stat_bgwriter`, `stat_database`, +`stat_user_tables`, `statio_user_tables`, `wal`. + +{{< admonition type="note" >}} +Due to a limitation of the upstream exporter, when multiple `data_source_names` are used, the collectors that are controlled via the `enabled_collectors` argument will only be applied to the first data source in the list. +{{< /admonition >}} + ## Blocks The following blocks are supported: diff --git a/go.mod b/go.mod index bd466bed8759..0ab4536248dc 100644 --- a/go.mod +++ b/go.mod @@ -301,7 +301,6 @@ require ( github.com/aws/smithy-go v1.20.1 // indirect github.com/beevik/ntp v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/boynux/squid-exporter v1.10.5-0.20230618153315-c1fae094e18e github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect @@ -752,7 +751,7 @@ replace ( // https://github.com/grafana/cadvisor/tree/grafana-v0.47-noglobals github.com/google/cadvisor => github.com/grafana/cadvisor v0.0.0-20231110094609-5f7917925dea - github.com/prometheus-community/postgres_exporter => github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520 + github.com/prometheus-community/postgres_exporter => github.com/grafana/postgres_exporter v0.15.1-0.20240402092333-fad5f95ea113 // exporter-package-v0.15.0 branch // TODO(marctc): remove once this PR is merged upstream: https://github.com/prometheus/mysqld_exporter/pull/774 github.com/prometheus/mysqld_exporter => github.com/grafana/mysqld_exporter v0.12.2-0.20231005125903-364b9c41e595 diff --git a/go.sum b/go.sum index 1fe561b288b1..d571819b47d9 100644 --- a/go.sum +++ b/go.sum @@ -309,11 +309,9 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.50.27 h1:96ifhrSuja+AzdP3W/T2337igqVQ2FcSIJYkk+0rCeA= github.com/aws/aws-sdk-go v1.50.27/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w= github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo= @@ -348,7 +346,6 @@ github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.19.0/go.mod h1:sfDv1ZbBmaID github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.38.0 h1:BnElrrgowaG50hoUCbBc5lq5XX7Fr7F4nvZovCDjevk= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.38.0/go.mod h1:6ioQn0JPZSvTdXmnUAQa9h7x8m+KU63rkgiAD1ZLnqc= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.36.0 h1:aQD36/NeII5cKl5tDgGgFRIIVCVofPsYQ/tYJnlVkqY= github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.36.0/go.mod h1:EF/UkL+0uEqcqr0sKFJJIT3Jbcxgt2oWz9R0vaLNSVU= github.com/aws/aws-sdk-go-v2/service/ec2 v1.147.0 h1:m9+QgPg/qzlxL0Oxb/dD12jzeWfuQGn9XqCWyDAipi8= @@ -380,7 +377,6 @@ github.com/aws/aws-sdk-go-v2/service/storagegateway v1.26.0/go.mod h1:vs7VbPSVlT github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew= github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA= -github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= @@ -406,9 +402,6 @@ github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENU github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk= github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible h1:8fBbhRkI5/0ocLFbrhPgnGUm0ogc+Gko1cRodPWDKX4= -github.com/blang/semver v3.5.2-0.20180723201105-3c1074078d32+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= @@ -424,7 +417,6 @@ github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdS github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -719,7 +711,6 @@ github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQr github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= @@ -812,7 +803,6 @@ github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= @@ -1064,8 +1054,8 @@ github.com/grafana/opentelemetry-collector/otelcol v0.0.0-20240321103955-8919a1c github.com/grafana/opentelemetry-collector/otelcol v0.0.0-20240321103955-8919a1c85cbe/go.mod h1:Xo58hEmoZFLyOIs9Wk400ME9gEFV+ttxCGcls6NxbhI= github.com/grafana/opentelemetry-collector/service v0.0.0-20240321103955-8919a1c85cbe h1:LEmmaAnTjtp7pWCsnc8iMfuHIHzDbYIiCXnxpMTOLms= github.com/grafana/opentelemetry-collector/service v0.0.0-20240321103955-8919a1c85cbe/go.mod h1:9El7PPhnV+2xPXLlyileLaUa5mOE+vw6sswmcZBaUlc= -github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520 h1:HnFWqxhoSF3WC7sKAdMZ+SRXvHLVZlZ3sbQjuUlTqkw= -github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520/go.mod h1:+HPXgiOV0InDHcZ2jNijL1SOKvo0eEPege5fQA0+ICI= +github.com/grafana/postgres_exporter v0.15.1-0.20240402092333-fad5f95ea113 h1:fDgs4u+TGjrv7TIGJObZwKSkOFbhLpnBhnebBU7gWiQ= +github.com/grafana/postgres_exporter v0.15.1-0.20240402092333-fad5f95ea113/go.mod h1:4mAJeXjzbzXMWIffmquWgGR2W0PEV/OiW+AEPb5Ghik= github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406 h1:LVIOYe5j92m10wluP5hgeHqSkOLnZzcPxhYCkdbLXCE= github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= github.com/grafana/pyroscope-go/godeltaprof v0.1.7 h1:C11j63y7gymiW8VugJ9ZW0pWfxTZugdSJyC48olk5KY= @@ -1109,14 +1099,12 @@ github.com/hashicorp/consul v1.5.1 h1:p7tRmQ4m3ZMYkGQkuyjLXKbdU1weeumgZFqZOvw7o4 github.com/hashicorp/consul v1.5.1/go.mod h1:QsmgXh2YA9Njv6y3/FHXqHYhsMye++3oBoAZ6SR8R8I= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= github.com/hashicorp/consul/api v1.27.0 h1:gmJ6DPKQog1426xsdmgk5iqDyoRiNc+ipBdJOqKQFjc= github.com/hashicorp/consul/api v1.27.0/go.mod h1:JkekNRSou9lANFdt+4IKx3Za7XY0JzzpQjEb4Ivo1c8= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/consul/sdk v0.15.1 h1:kKIGxc7CZtflcF5DLfHeq7rOQmRq3vk7kwISN9bif8Q= github.com/hashicorp/consul/sdk v0.15.1/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= @@ -1218,7 +1206,6 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= @@ -1229,7 +1216,6 @@ github.com/hashicorp/raft v1.0.1-0.20190409200437-d9fe23f7d472/go.mod h1:DVSAWIt github.com/hashicorp/raft-boltdb v0.0.0-20150201200839-d1e82c1ec3f1/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= @@ -1298,7 +1284,6 @@ github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4 h1:2r2WiFeAwiJ/uyx1qIKnV1L4C9w/2V8ehlbJY4gjFaM= github.com/influxdata/go-syslog/v3 v3.0.1-0.20230911200830-875f5bc594a4/go.mod h1:1yEQhaLb/cETXCqQmdh7lDjupNAReO7c83AHyK2dJ48= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68ZBRvtCjBi3QSosCIKrjmMbYlQMFAwVLds4= github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= @@ -1443,7 +1428,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= @@ -1486,7 +1470,6 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -1579,7 +1562,6 @@ github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpsp github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -1658,17 +1640,11 @@ github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0 github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= -github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= -github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/ncabatoff/fakescraper v0.0.0-20201102132415-4b37ba603d65/go.mod h1:Tx6UMSMyIsjLG/VU/F6xA1+0XI+/f9o1dGJnf1l+bPg= github.com/ncabatoff/go-seq v0.0.0-20180805175032-b08ef85ed833 h1:t4WWQ9I797y7QUgeEjeXnVb+oYuEDQc6gLvrZJTYo94= @@ -1852,7 +1828,6 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= @@ -1975,7 +1950,6 @@ github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdD github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/consul_exporter v0.8.0 h1:2z3drFic65WFoHaJRKkmnJRRlBLmmxVqT8L9LO2yxAo= github.com/prometheus/consul_exporter v0.8.0/go.mod h1:KHTgkT1/oLpXKC4+mKZV63hZSMHuKskUnHoenEave4Y= -github.com/prometheus/exporter-toolkit v0.6.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= @@ -2150,9 +2124,7 @@ github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMV github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= -github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -2306,16 +2278,12 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= @@ -2497,13 +2465,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -2684,7 +2650,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/internal/component/prometheus/exporter/postgres/postgres.go b/internal/component/prometheus/exporter/postgres/postgres.go index d67eeafb61f1..5fddac862184 100644 --- a/internal/component/prometheus/exporter/postgres/postgres.go +++ b/internal/component/prometheus/exporter/postgres/postgres.go @@ -27,7 +27,7 @@ func init() { func createExporter(opts component.Options, args component.Arguments, defaultInstanceKey string) (integrations.Integration, string, error) { a := args.(Arguments) - return integrations.NewIntegrationWithInstanceKey(opts.Logger, a.Convert(), defaultInstanceKey) + return integrations.NewIntegrationWithInstanceKey(opts.Logger, a.convert(opts.ID), defaultInstanceKey) } func parsePostgresURL(url string) (map[string]string, error) { @@ -80,15 +80,26 @@ type Arguments struct { DataSourceNames []rivertypes.Secret `river:"data_source_names,attr,optional"` // Attributes - DisableSettingsMetrics bool `river:"disable_settings_metrics,attr,optional"` - DisableDefaultMetrics bool `river:"disable_default_metrics,attr,optional"` - CustomQueriesConfigPath string `river:"custom_queries_config_path,attr,optional"` + DisableSettingsMetrics bool `river:"disable_settings_metrics,attr,optional"` + DisableDefaultMetrics bool `river:"disable_default_metrics,attr,optional"` + CustomQueriesConfigPath string `river:"custom_queries_config_path,attr,optional"` + EnabledCollectors []string `river:"enabled_collectors,attr,optional"` // Blocks AutoDiscovery AutoDiscovery `river:"autodiscovery,block,optional"` } -// Autodiscovery controls discovery of databases outside any specified in DataSourceNames. +func (a *Arguments) Validate() error { + if a.DisableDefaultMetrics && a.CustomQueriesConfigPath == "" { + return fmt.Errorf("custom_queries_config_path must be set when disable_default_metrics is true") + } + if a.DisableDefaultMetrics && len(a.EnabledCollectors) != 0 { + return fmt.Errorf("enabled_collectors cannot be set when disable_default_metrics is true") + } + return nil +} + +// AutoDiscovery controls discovery of databases outside any specified in DataSourceNames. type AutoDiscovery struct { Enabled bool `river:"enabled,attr,optional"` DatabaseAllowlist []string `river:"database_allowlist,attr,optional"` @@ -100,7 +111,7 @@ func (a *Arguments) SetToDefault() { *a = DefaultArguments } -func (a *Arguments) Convert() *postgres_exporter.Config { +func (a *Arguments) convert(instanceName string) *postgres_exporter.Config { return &postgres_exporter.Config{ DataSourceNames: a.convertDataSourceNames(), DisableSettingsMetrics: a.DisableSettingsMetrics, @@ -109,6 +120,8 @@ func (a *Arguments) Convert() *postgres_exporter.Config { IncludeDatabases: a.AutoDiscovery.DatabaseAllowlist, DisableDefaultMetrics: a.DisableDefaultMetrics, QueryPath: a.CustomQueriesConfigPath, + Instance: instanceName, + EnabledCollectors: a.EnabledCollectors, } } diff --git a/internal/component/prometheus/exporter/postgres/postgres_test.go b/internal/component/prometheus/exporter/postgres/postgres_test.go index b4a84f6c4f9b..ad20bce7e44c 100644 --- a/internal/component/prometheus/exporter/postgres/postgres_test.go +++ b/internal/component/prometheus/exporter/postgres/postgres_test.go @@ -46,8 +46,9 @@ func TestRiverConfigConvert(t *testing.T) { var exampleRiverConfig = ` data_source_names = ["postgresql://username:password@localhost:5432/database?sslmode=disable"] disable_settings_metrics = true - disable_default_metrics = true + disable_default_metrics = false custom_queries_config_path = "/tmp/queries.yaml" + enabled_collectors = ["collector1", "collector2"] autodiscovery { enabled = false @@ -59,7 +60,7 @@ func TestRiverConfigConvert(t *testing.T) { err := river.Unmarshal([]byte(exampleRiverConfig), &args) require.NoError(t, err) - c := args.Convert() + c := args.convert("test-instance") expected := postgres_exporter.Config{ DataSourceNames: []config_util.Secret{config_util.Secret("postgresql://username:password@localhost:5432/database?sslmode=disable")}, @@ -67,12 +68,54 @@ func TestRiverConfigConvert(t *testing.T) { AutodiscoverDatabases: false, ExcludeDatabases: []string{"exclude1", "exclude2"}, IncludeDatabases: []string{"include1"}, - DisableDefaultMetrics: true, + DisableDefaultMetrics: false, QueryPath: "/tmp/queries.yaml", + Instance: "test-instance", + EnabledCollectors: []string{"collector1", "collector2"}, } require.Equal(t, expected, *c) } +func TestRiverConfigValidate(t *testing.T) { + var tc = []struct { + name string + args Arguments + expectedErr string + }{ + { + name: "no errors on default config", + args: Arguments{}, + }, + { + name: "missing custom queries file path", + args: Arguments{ + DisableDefaultMetrics: true, + }, + expectedErr: "custom_queries_config_path must be set when disable_default_metrics is true", + }, + { + name: "disabled default metrics with enabled collectors", + args: Arguments{ + DisableDefaultMetrics: true, + CustomQueriesConfigPath: "/tmp/queries.yaml", + EnabledCollectors: []string{"collector1"}, + }, + expectedErr: "enabled_collectors cannot be set when disable_default_metrics is true", + }, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + err := tt.args.Validate() + if tt.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) + } +} + func TestParsePostgresURL(t *testing.T) { dsn := "postgresql://linus:42secret@localhost:5432/postgres?sslmode=disable" expected := map[string]string{ diff --git a/internal/static/integrations/postgres_exporter/postgres_exporter.go b/internal/static/integrations/postgres_exporter/postgres_exporter.go index aa375be8c41f..d1384cc46633 100644 --- a/internal/static/integrations/postgres_exporter/postgres_exporter.go +++ b/internal/static/integrations/postgres_exporter/postgres_exporter.go @@ -6,14 +6,14 @@ import ( "os" "strings" - config_util "github.com/prometheus/common/config" - "github.com/go-kit/log" "github.com/grafana/agent/internal/static/integrations" integrations_v2 "github.com/grafana/agent/internal/static/integrations/v2" "github.com/grafana/agent/internal/static/integrations/v2/metricsutils" "github.com/lib/pq" - "github.com/prometheus-community/postgres_exporter/exporter" + "github.com/prometheus-community/postgres_exporter/cmd/postgres_exporter" + "github.com/prometheus-community/postgres_exporter/collector" + config_util "github.com/prometheus/common/config" ) // Config controls the postgres_exporter integration. @@ -27,6 +27,16 @@ type Config struct { IncludeDatabases []string `yaml:"include_databases,omitempty"` DisableDefaultMetrics bool `yaml:"disable_default_metrics,omitempty"` QueryPath string `yaml:"query_path,omitempty"` + + //-- The fields below are only used in flow mode and not (yet) exposed in the static mode.-- + + // Instance is used by the flow mode to specify the instance name manually. This is only used when there are multiple + // DSNs provided. + Instance string + // EnabledCollectors is a list of additional collectors to enable. NOTE: Due to limitations of the postgres_exporter, + // this is only used for the first DSN provided and only some collectors can be enabled/disabled this way. See the + // user-facing docs for more information. + EnabledCollectors []string } // Name returns the name of the integration this config is for. @@ -47,6 +57,10 @@ func (c *Config) InstanceKey(_ string) (string, error) { return "", err } if len(dsn) != 1 { + if c.Instance != "" { + return c.Instance, nil + } + // This should not be possible in the flow mode, because `c.Instance` is always set. return "", fmt.Errorf("can't automatically determine a value for `instance` with %d DSN. either use 1 DSN or manually assign a value for `instance` in the integration config", len(dsn)) } @@ -128,23 +142,37 @@ func init() { // New creates a new postgres_exporter integration. The integration scrapes // metrics from a postgres process. -func New(log log.Logger, c *Config) (integrations.Integration, error) { - dsn, err := c.getDataSourceNames() +func New(log log.Logger, cfg *Config) (integrations.Integration, error) { + dsns, err := cfg.getDataSourceNames() if err != nil { return nil, err } - e := exporter.NewExporter( - dsn, - log, - exporter.DisableDefaultMetrics(c.DisableDefaultMetrics), - exporter.WithUserQueriesPath(c.QueryPath), - exporter.DisableSettingsMetrics(c.DisableSettingsMetrics), - exporter.AutoDiscoverDatabases(c.AutodiscoverDatabases), - exporter.ExcludeDatabases(strings.Join(c.ExcludeDatabases, ",")), - exporter.IncludeDatabases(strings.Join(c.IncludeDatabases, ",")), - exporter.MetricPrefix("pg"), + e := postgres_exporter.NewExporter( + dsns, + postgres_exporter.DisableDefaultMetrics(cfg.DisableDefaultMetrics), + postgres_exporter.WithUserQueriesPath(cfg.QueryPath), + postgres_exporter.DisableSettingsMetrics(cfg.DisableSettingsMetrics), + postgres_exporter.AutoDiscoverDatabases(cfg.AutodiscoverDatabases), + postgres_exporter.ExcludeDatabases(cfg.ExcludeDatabases), + postgres_exporter.IncludeDatabases(strings.Join(cfg.IncludeDatabases, ",")), + postgres_exporter.WithLogger(log), + postgres_exporter.WithMetricPrefix("pg"), ) - return integrations.NewCollectorIntegration(c.Name(), integrations.WithCollectors(e)), nil + if cfg.DisableDefaultMetrics { + // Don't include the collector metrics if the default metrics are disabled. + return integrations.NewCollectorIntegration(cfg.Name(), integrations.WithCollectors(e)), nil + } + + // On top of the exporter's metrics, the postgres exporter also has metrics exposed via collector package. + // However, these can only work for the first DSN provided. This matches the current implementation of the exporter. + // TODO: Once https://github.com/prometheus-community/postgres_exporter/issues/999 is addressed, update the exporter + // and change this. + c, err := collector.NewPostgresCollector(log, cfg.ExcludeDatabases, dsns[0], cfg.EnabledCollectors) + if err != nil { + return nil, fmt.Errorf("failed to create postgres_exporter collector: %w", err) + } + + return integrations.NewCollectorIntegration(cfg.Name(), integrations.WithCollectors(e, c)), nil } From 53a13ba5c3c5ec416263818300943c4b13234f1c Mon Sep 17 00:00:00 2001 From: kgeckhart Date: Wed, 3 Apr 2024 06:58:03 -0400 Subject: [PATCH 26/83] azure_exporter: Fix bug which prevents subscription scope from working with only a single region (#6719) --- CHANGELOG.md | 2 ++ internal/static/integrations/azure_exporter/azure_exporter.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a99df03268d8..268c15b4bca0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,8 @@ Main (unreleased) - Fix an issue where JSON string array elements were not parsed correctly in `loki.source.cloudflare`. (@thampiotr) +- Fix an issue where the azure exporter was not correctly gathering subscription scoped metrics when only one region was configured (@kgeckhart) + - Update gcp_exporter to a newer version with a patch for incorrect delta histograms (@kgeckhart) - Fix an issue where the default values of some component's arguments change diff --git a/internal/static/integrations/azure_exporter/azure_exporter.go b/internal/static/integrations/azure_exporter/azure_exporter.go index dd8e6b267790..1f18e240e744 100644 --- a/internal/static/integrations/azure_exporter/azure_exporter.go +++ b/internal/static/integrations/azure_exporter/azure_exporter.go @@ -81,7 +81,7 @@ func (e Exporter) MetricsHandler() (http.Handler, error) { // "RunOnSubscriptionScope" uses a different API, https://github.com/Azure/azure-rest-api-specs/blob/main/specification/monitor/resource-manager/Microsoft.Insights/stable/2021-05-01/metrics_API.json#L40, // which can get metric data for all resources in a single API call reducing overhead/likelihood of being rate limited. // Limiting to specific resources requires 1 API call per resource to get metrics which can easily lead to rate limiting - if len(settings.Regions) > 1 { + if len(settings.Regions) > 0 { prober.RunOnSubscriptionScope() } else { err = prober.ServiceDiscovery.FindResourceGraph(ctx, settings.Subscriptions, settings.ResourceType, settings.Filter) From 6428b69c66638e9793ace8665a4f41ede17a2a9b Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 3 Apr 2024 08:44:42 -0400 Subject: [PATCH 27/83] flowmode: unexpose stability level flag (#6815) This removes the stability level flag, but keeps all the code for stability in place for when the flag is added back. --- CHANGELOG.md | 15 ++++++--------- internal/flowmode/cmd_run.go | 1 - 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 268c15b4bca0..ec8f42a47ae2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,9 +54,6 @@ Main (unreleased) ### Features -- Added a new CLI flag `--stability.level` which defines the minimum stability - level required for the features that the agent is allowed to use. Default is `experimental`. (@thampiotr) - - A new `loki.rules.kubernetes` component that discovers `PrometheusRule` Kubernetes resources and loads them into a Loki Ruler instance. (@EStork09) ### Bugfixes @@ -73,7 +70,7 @@ Main (unreleased) - Fix a bug where a panic could occur when reloading custom components. (@wildum) -- The `import.git` config block did not work with branches or tags this now fixes that behavior. (@mattdurham) +- The `import.git` config block did not work with branches or tags this now fixes that behavior. (@mattdurham) - Fixed an issue where creating a `prometheus.exporter.postgres` component with multiple `data_source_names` would result in an error. (@thampiotr) @@ -100,9 +97,9 @@ v0.40.3 (2024-03-14) - Upgrade to Go 1.22.1 (@thampiotr) - Upgrade from OpenTelemetry Collector v0.87.0 to v0.96.0: - * [ottl]: Fix bug where named parameters needed a space after the equal sign (`=`) + * [ottl]: Fix bug where named parameters needed a space after the equal sign (`=`) https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/28511 - * [exporters] Additional enqueue_failed metrics + * [exporters] Additional enqueue_failed metrics https://github.com/open-telemetry/opentelemetry-collector/issues/8673 * [otelcol.receiver.kafka]: Fix issue where counting number of logs emitted could cause panic * [otelcol.processor.k8sattributes]: The time format of k8s.pod.start_time attribute value migrated to RFC3339: @@ -115,9 +112,9 @@ https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/26115 https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27451 * [otelcol.connector.spanmetrics] A new `max_per_data_point` argument for exemplar generation. * https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29242 - * [ottl] Add IsBool Converter + * [ottl] Add IsBool Converter https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27897 - * [otelcol.processor.tail_sampling] Optimize memory performance of tailsamplingprocessor + * [otelcol.processor.tail_sampling] Optimize memory performance of tailsamplingprocessor https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/27889 * [otelcol.connector.servicegraph] Add a `metrics_flush_interval` argument. https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27679 @@ -137,7 +134,7 @@ https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30162 https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30274 * [ottl] Add Hour converter https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/29468 - * [otelcol.connector.spanmetrics] A new `resource_metrics_key_attributes` argument to fix broken spanmetrics counters + * [otelcol.connector.spanmetrics] A new `resource_metrics_key_attributes` argument to fix broken spanmetrics counters after a span producing service restart, when resource attributes contain dynamic/ephemeral values (e.g. process id). https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/29711 * [ottl] Issue with the hash value of a match group in the replace_pattern editors diff --git a/internal/flowmode/cmd_run.go b/internal/flowmode/cmd_run.go index 3bbd9889a32c..15ed6384503c 100644 --- a/internal/flowmode/cmd_run.go +++ b/internal/flowmode/cmd_run.go @@ -136,7 +136,6 @@ depending on the nature of the reload error. cmd.Flags(). BoolVar(&r.disableReporting, "disable-reporting", r.disableReporting, "Disable reporting of enabled components to Grafana.") cmd.Flags().StringVar(&r.storagePath, "storage.path", r.storagePath, "Base directory where components can store data") - cmd.Flags().Var(&r.minStability, "stability.level", fmt.Sprintf("Minimum stability level of features to enable. Supported values: %s", strings.Join(featuregate.AllowedValues(), ", "))) return cmd } From 633ad9d4125b8d227e44a3a020a1dd8187456541 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Tr=E1=BB=8Dng=20H=E1=BA=A3i?= <41283691+hainenber@users.noreply.github.com> Date: Wed, 3 Apr 2024 21:08:39 +0700 Subject: [PATCH 28/83] fix(faro/receiver): not download source map if configure `download=false` (#6686) Signed-off-by: hainenber Co-authored-by: Paschalis Tsilias --- CHANGELOG.md | 3 + .../component/faro/receiver/sourcemaps.go | 6 +- .../faro/receiver/sourcemaps_test.go | 82 +++++++++++++++++++ 3 files changed, 87 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ec8f42a47ae2..55c75ad9fdf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,9 @@ Main (unreleased) - Fix an issue where JSON string array elements were not parsed correctly in `loki.source.cloudflare`. (@thampiotr) + +- Fix SSRF vulnerability in `faro.receiver` by disabling source map download. (@hainenber) + - Fix an issue where the azure exporter was not correctly gathering subscription scoped metrics when only one region was configured (@kgeckhart) - Update gcp_exporter to a newer version with a patch for incorrect delta histograms (@kgeckhart) diff --git a/internal/component/faro/receiver/sourcemaps.go b/internal/component/faro/receiver/sourcemaps.go index 5dc1e7643f32..c2c099d5c7cb 100644 --- a/internal/component/faro/receiver/sourcemaps.go +++ b/internal/component/faro/receiver/sourcemaps.go @@ -172,10 +172,8 @@ func (store *sourceMapsStoreImpl) getSourceMapContent(sourceURL string, release } } - // Attempt to download the sourcemap. - // - // TODO(rfratto): check if downloading is enabled. - if strings.HasPrefix(sourceURL, "http") && urlMatchesOrigins(sourceURL, store.args.DownloadFromOrigins) { + // Attempt to download the sourcemap if enabled. + if strings.HasPrefix(sourceURL, "http") && urlMatchesOrigins(sourceURL, store.args.DownloadFromOrigins) && store.args.Download { return store.downloadSourceMapContent(sourceURL) } return nil, "", nil diff --git a/internal/component/faro/receiver/sourcemaps_test.go b/internal/component/faro/receiver/sourcemaps_test.go index 6ad1f7fcdd33..b154820bfe67 100644 --- a/internal/component/faro/receiver/sourcemaps_test.go +++ b/internal/component/faro/receiver/sourcemaps_test.go @@ -349,6 +349,88 @@ func Test_sourceMapsStoreImpl_ReadFromFileSystemAndDownload(t *testing.T) { require.Equal(t, expect, actual) } +func Test_sourceMapsStoreImpl_ReadFromFileSystemAndNotDownloadIfDisabled(t *testing.T) { + var ( + logger = util.TestLogger(t) + + httpClient = &mockHTTPClient{ + responses: []struct { + *http.Response + error + }{ + {newResponseFromTestData(t, "foo.js"), nil}, + {newResponseFromTestData(t, "foo.js.map"), nil}, + }, + } + + fileService = &mockFileService{ + files: map[string][]byte{ + filepath.FromSlash("/var/build/latest/foo.js.map"): loadTestData(t, "foo.js.map"), + }, + } + + store = newSourceMapsStore( + logger, + SourceMapsArguments{ + Download: false, + DownloadFromOrigins: []string{"*"}, + Locations: []LocationArguments{ + { + MinifiedPathPrefix: "http://foo.com/", + Path: filepath.FromSlash("/var/build/latest/"), + }, + }, + }, + newSourceMapMetrics(prometheus.NewRegistry()), + httpClient, + fileService, + ) + ) + + expect := &payload.Exception{ + Stacktrace: &payload.Stacktrace{ + Frames: []payload.Frame{ + { + Colno: 37, + Filename: "/__parcel_source_root/demo/src/actions.ts", + Function: "?", + Lineno: 6, + }, + { + Colno: 5, + Filename: "http://bar.com/foo.js", + Function: "callUndefined", + Lineno: 6, + }, + }, + }, + } + + actual := transformException(logger, store, &payload.Exception{ + Stacktrace: &payload.Stacktrace{ + Frames: []payload.Frame{ + { + Colno: 6, + Filename: "http://foo.com/foo.js", + Function: "eval", + Lineno: 5, + }, + { + Colno: 5, + Filename: "http://bar.com/foo.js", + Function: "callUndefined", + Lineno: 6, + }, + }, + }, + }, "123") + + require.Equal(t, []string{filepath.FromSlash("/var/build/latest/foo.js.map")}, fileService.stats) + require.Equal(t, []string{filepath.FromSlash("/var/build/latest/foo.js.map")}, fileService.reads) + require.Nil(t, httpClient.requests) + require.Equal(t, expect, actual) +} + func Test_sourceMapsStoreImpl_FilepathSanitized(t *testing.T) { var ( logger = util.TestLogger(t) From 9646f5c2a1fec98be9dc6a344fcc2f10e2809c91 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Wed, 3 Apr 2024 10:55:10 -0400 Subject: [PATCH 29/83] Seperate windows and non windows tests. (#6816) --- .../promtailconvert/promtailconvert_test.go | 2 ++ .../promtailconvert_windows_test.go | 15 +++++++++++++++ .../windowsevents.river | 0 .../windowsevents.yaml | 0 .../windowsevents_relabel.river | 0 .../windowsevents_relabel.yaml | 0 6 files changed, 17 insertions(+) create mode 100644 internal/converter/internal/promtailconvert/promtailconvert_windows_test.go rename internal/converter/internal/promtailconvert/{testdata => testdatawindows}/windowsevents.river (100%) rename internal/converter/internal/promtailconvert/{testdata => testdatawindows}/windowsevents.yaml (100%) rename internal/converter/internal/promtailconvert/{testdata => testdatawindows}/windowsevents_relabel.river (100%) rename internal/converter/internal/promtailconvert/{testdata => testdatawindows}/windowsevents_relabel.yaml (100%) diff --git a/internal/converter/internal/promtailconvert/promtailconvert_test.go b/internal/converter/internal/promtailconvert/promtailconvert_test.go index 63ea75afbe1c..0932e62eaeea 100644 --- a/internal/converter/internal/promtailconvert/promtailconvert_test.go +++ b/internal/converter/internal/promtailconvert/promtailconvert_test.go @@ -1,3 +1,5 @@ +//go:build linux + package promtailconvert_test import ( diff --git a/internal/converter/internal/promtailconvert/promtailconvert_windows_test.go b/internal/converter/internal/promtailconvert/promtailconvert_windows_test.go new file mode 100644 index 000000000000..82d85b7e13aa --- /dev/null +++ b/internal/converter/internal/promtailconvert/promtailconvert_windows_test.go @@ -0,0 +1,15 @@ +//go:build windows + +package promtailconvert_test + +import ( + "testing" + + "github.com/grafana/agent/internal/converter/internal/promtailconvert" + "github.com/grafana/agent/internal/converter/internal/test_common" + _ "github.com/grafana/agent/internal/static/metrics/instance" // Imported to override default values via the init function. +) + +func TestConvert(t *testing.T) { + test_common.TestDirectory(t, "testdatawindows", ".yaml", true, []string{}, promtailconvert.Convert) +} diff --git a/internal/converter/internal/promtailconvert/testdata/windowsevents.river b/internal/converter/internal/promtailconvert/testdatawindows/windowsevents.river similarity index 100% rename from internal/converter/internal/promtailconvert/testdata/windowsevents.river rename to internal/converter/internal/promtailconvert/testdatawindows/windowsevents.river diff --git a/internal/converter/internal/promtailconvert/testdata/windowsevents.yaml b/internal/converter/internal/promtailconvert/testdatawindows/windowsevents.yaml similarity index 100% rename from internal/converter/internal/promtailconvert/testdata/windowsevents.yaml rename to internal/converter/internal/promtailconvert/testdatawindows/windowsevents.yaml diff --git a/internal/converter/internal/promtailconvert/testdata/windowsevents_relabel.river b/internal/converter/internal/promtailconvert/testdatawindows/windowsevents_relabel.river similarity index 100% rename from internal/converter/internal/promtailconvert/testdata/windowsevents_relabel.river rename to internal/converter/internal/promtailconvert/testdatawindows/windowsevents_relabel.river diff --git a/internal/converter/internal/promtailconvert/testdata/windowsevents_relabel.yaml b/internal/converter/internal/promtailconvert/testdatawindows/windowsevents_relabel.yaml similarity index 100% rename from internal/converter/internal/promtailconvert/testdata/windowsevents_relabel.yaml rename to internal/converter/internal/promtailconvert/testdatawindows/windowsevents_relabel.yaml From 7393c83716dbb2b39146d9e1c312ccde8fa1b0e9 Mon Sep 17 00:00:00 2001 From: Craig Peterson <192540+captncraig@users.noreply.github.com> Date: Wed, 3 Apr 2024 14:04:56 -0400 Subject: [PATCH 30/83] set bind permissions on the executable in dockerfile (#6817) * set bind capacity in docker file * add test for nonroot * newline --- cmd/grafana-agent/Dockerfile | 3 +- .../grafana-agent/ci/nonroot-values.yaml | 7 ++ .../grafana-agent/templates/configmap.yaml | 42 +++++++ .../templates/controllers/daemonset.yaml | 81 ++++++++++++ .../nonroot/grafana-agent/templates/rbac.yaml | 117 ++++++++++++++++++ .../grafana-agent/templates/service.yaml | 23 ++++ .../templates/serviceaccount.yaml | 13 ++ 7 files changed, 285 insertions(+), 1 deletion(-) create mode 100644 operations/helm/charts/grafana-agent/ci/nonroot-values.yaml create mode 100644 operations/helm/tests/nonroot/grafana-agent/templates/configmap.yaml create mode 100644 operations/helm/tests/nonroot/grafana-agent/templates/controllers/daemonset.yaml create mode 100644 operations/helm/tests/nonroot/grafana-agent/templates/rbac.yaml create mode 100644 operations/helm/tests/nonroot/grafana-agent/templates/service.yaml create mode 100644 operations/helm/tests/nonroot/grafana-agent/templates/serviceaccount.yaml diff --git a/cmd/grafana-agent/Dockerfile b/cmd/grafana-agent/Dockerfile index 09b38ea7d6e9..d855e2f54d8e 100644 --- a/cmd/grafana-agent/Dockerfile +++ b/cmd/grafana-agent/Dockerfile @@ -41,7 +41,7 @@ LABEL org.opencontainers.image.source="https://github.com/grafana/agent" # Install dependencies needed at runtime. RUN < Date: Wed, 3 Apr 2024 17:29:07 -0400 Subject: [PATCH 31/83] converting spanmetrics from static traces to flow (#6806) * convert spanmetrics from static traces to flow Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --------- Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --- .../internal/otelcolconvert/otelcolconvert.go | 29 +++-- .../internal/build/builder_traces.go | 98 +++++++++++++++++ .../build/converter_discoveryprocessor.go | 3 +- .../build/converter_remotewriteexporter.go | 102 ++++++++++++++++++ .../internal/build/self_exporter.go | 8 +- .../staticconvert/testdata/traces.river | 75 ++++++++++++- .../staticconvert/testdata/traces.yaml | 19 ++-- .../staticconvert/testdata/unsupported.diags | 1 + .../staticconvert/testdata/unsupported.yaml | 2 + 9 files changed, 313 insertions(+), 24 deletions(-) create mode 100644 internal/converter/internal/staticconvert/internal/build/converter_remotewriteexporter.go diff --git a/internal/converter/internal/otelcolconvert/otelcolconvert.go b/internal/converter/internal/otelcolconvert/otelcolconvert.go index d24e072b94f1..d06fc78882c2 100644 --- a/internal/converter/internal/otelcolconvert/otelcolconvert.go +++ b/internal/converter/internal/otelcolconvert/otelcolconvert.go @@ -291,28 +291,39 @@ func validateNoDuplicateReceivers(groups []pipelineGroup, connectorIDs []compone func buildConverterTable(extraConverters []ComponentConverter) map[converterKey]ComponentConverter { table := make(map[converterKey]ComponentConverter) - allConverters := append(converters, extraConverters...) + + // Ordering is critical here because conflicting converters are resolved with + // the first one in the list winning. + allConverters := append(extraConverters, converters...) for _, conv := range allConverters { fact := conv.Factory() - + var kinds []component.Kind switch fact.(type) { case receiver.Factory: - table[converterKey{Kind: component.KindReceiver, Type: fact.Type()}] = conv + kinds = append(kinds, component.KindReceiver) case processor.Factory: - table[converterKey{Kind: component.KindProcessor, Type: fact.Type()}] = conv + kinds = append(kinds, component.KindProcessor) case exporter.Factory: - table[converterKey{Kind: component.KindExporter, Type: fact.Type()}] = conv + kinds = append(kinds, component.KindExporter) case connector.Factory: - table[converterKey{Kind: component.KindConnector, Type: fact.Type()}] = conv + kinds = append(kinds, component.KindConnector) // We need this so the connector is available as a destination for state.Next - table[converterKey{Kind: component.KindExporter, Type: fact.Type()}] = conv + kinds = append(kinds, component.KindExporter) // Technically, this isn't required to be here since the entry // won't be required to look up a destination for state.Next, but // adding to reinforce the idea of how connectors are used. - table[converterKey{Kind: component.KindReceiver, Type: fact.Type()}] = conv + kinds = append(kinds, component.KindReceiver) case extension.Factory: - table[converterKey{Kind: component.KindExtension, Type: fact.Type()}] = conv + kinds = append(kinds, component.KindExtension) + } + + for _, kind := range kinds { + // If a converter for this kind and type already exists, skip it. + if _, ok := table[converterKey{Kind: kind, Type: fact.Type()}]; ok { + continue + } + table[converterKey{Kind: kind, Type: fact.Type()}] = conv } } diff --git a/internal/converter/internal/staticconvert/internal/build/builder_traces.go b/internal/converter/internal/staticconvert/internal/build/builder_traces.go index da2411b9eaa3..1122ae9cec1b 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_traces.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_traces.go @@ -7,9 +7,11 @@ import ( "github.com/grafana/agent/internal/converter/diag" "github.com/grafana/agent/internal/converter/internal/otelcolconvert" "github.com/grafana/agent/internal/static/traces" + "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector" otel_component "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/loggingexporter" "go.opentelemetry.io/collector/otelcol" + "go.opentelemetry.io/collector/service/pipelines" ) // List of component converters. This slice is appended to by init functions in @@ -38,6 +40,7 @@ func (b *ConfigBuilder) appendTraces() { removeReceiver(otelCfg, "traces", "push_receiver") b.translateAutomaticLogging(otelCfg, cfg) + b.translateSpanMetrics(otelCfg, cfg) b.diags.AddAll(otelcolconvert.AppendConfig(b.f, otelCfg, labelPrefix, converters)) } @@ -68,6 +71,74 @@ func (b *ConfigBuilder) translateAutomaticLogging(otelCfg *otelcol.Config, cfg t removeProcessor(otelCfg, "traces", "automatic_logging") } +func (b *ConfigBuilder) translateSpanMetrics(otelCfg *otelcol.Config, cfg traces.InstanceConfig) { + if _, ok := otelCfg.Processors[otel_component.NewID("spanmetrics")]; !ok { + return + } + + // Remove the custom otel components and delete the custom metrics pipeline + removeProcessor(otelCfg, "traces", "spanmetrics") + removeReceiver(otelCfg, "metrics", "noop") + removeExporter(otelCfg, "metrics", "prometheus") + removePipeline(otelCfg, "metrics", "spanmetrics") + + // If the spanmetrics configuration includes a handler_endpoint, we cannot convert it. + // This is intentionally after the section above which removes the custom spanmetrics processor + // so that the rest of the configuration can optionally be converted with the error. + if cfg.SpanMetrics.HandlerEndpoint != "" { + b.diags.Add(diag.SeverityLevelError, "Cannot convert using configuration including spanmetrics handler_endpoint. "+ + "No equivalent exists for exposing a known /metrics endpoint. You can use metrics_instance instead to enabled conversion.") + return + } + + // Add the spanmetrics connector to the otel config with the converted configuration + if otelCfg.Connectors == nil { + otelCfg.Connectors = map[otel_component.ID]otel_component.Config{} + } + otelCfg.Connectors[otel_component.NewID("spanmetrics")] = toSpanmetricsConnector(cfg.SpanMetrics) + + // Add the spanmetrics connector to each traces pipelines as an exporter and create metrics pipelines. + // The processing ordering for the span metrics connector differs from the static pipelines since tail sampling + // in static mode processes after the custom span metrics processor. This is ok because the tail sampling + // processor is not processing metrics. + spanmetricsID := otel_component.NewID("spanmetrics") + remoteWriteID := otel_component.NewID("remote_write") + for ix, pipeline := range otelCfg.Service.Pipelines { + if ix.Type() == "traces" { + pipeline.Exporters = append(pipeline.Exporters, spanmetricsID) + + metricsId := otel_component.NewIDWithName("metrics", ix.Name()) + otelCfg.Service.Pipelines[metricsId] = &pipelines.PipelineConfig{} + otelCfg.Service.Pipelines[metricsId].Receivers = append(otelCfg.Service.Pipelines[metricsId].Receivers, spanmetricsID) + otelCfg.Service.Pipelines[metricsId].Exporters = append(otelCfg.Service.Pipelines[metricsId].Exporters, remoteWriteID) + } + } +} + +func toSpanmetricsConnector(cfg *traces.SpanMetricsConfig) *spanmetricsconnector.Config { + smc := spanmetricsconnector.NewFactory().CreateDefaultConfig().(*spanmetricsconnector.Config) + for _, dim := range cfg.Dimensions { + smc.Dimensions = append(smc.Dimensions, spanmetricsconnector.Dimension{Name: dim.Name, Default: dim.Default}) + } + if cfg.DimensionsCacheSize != 0 { + smc.DimensionsCacheSize = cfg.DimensionsCacheSize + } + if cfg.AggregationTemporality != "" { + smc.AggregationTemporality = cfg.AggregationTemporality + } + if len(cfg.LatencyHistogramBuckets) != 0 { + smc.Histogram.Explicit = &spanmetricsconnector.ExplicitHistogramConfig{Buckets: cfg.LatencyHistogramBuckets} + } + if cfg.MetricsFlushInterval != 0 { + smc.MetricsFlushInterval = cfg.MetricsFlushInterval + } + if cfg.Namespace != "" { + smc.Namespace = cfg.Namespace + } + + return smc +} + // removeReceiver removes a receiver from the otel config for a specific pipeline type. func removeReceiver(otelCfg *otelcol.Config, pipelineType otel_component.Type, receiverType otel_component.Type) { if _, ok := otelCfg.Receivers[otel_component.NewID(receiverType)]; !ok { @@ -111,3 +182,30 @@ func removeProcessor(otelCfg *otelcol.Config, pipelineType otel_component.Type, otelCfg.Service.Pipelines[ix].Processors = spr } } + +// removeExporter removes an exporter from the otel config for a specific pipeline type. +func removeExporter(otelCfg *otelcol.Config, pipelineType otel_component.Type, exporterType otel_component.Type) { + if _, ok := otelCfg.Exporters[otel_component.NewID(exporterType)]; !ok { + return + } + + delete(otelCfg.Exporters, otel_component.NewID(exporterType)) + for ix, p := range otelCfg.Service.Pipelines { + if ix.Type() != pipelineType { + continue + } + + spr := make([]otel_component.ID, 0) + for _, r := range p.Exporters { + if r.Type() != exporterType { + spr = append(spr, r) + } + } + otelCfg.Service.Pipelines[ix].Exporters = spr + } +} + +// removePipeline removes a pipeline from the otel config for a specific pipeline type. +func removePipeline(otelCfg *otelcol.Config, pipelineType otel_component.Type, pipelineName string) { + delete(otelCfg.Service.Pipelines, otel_component.NewIDWithName(pipelineType, pipelineName)) +} diff --git a/internal/converter/internal/staticconvert/internal/build/converter_discoveryprocessor.go b/internal/converter/internal/staticconvert/internal/build/converter_discoveryprocessor.go index 42142dc610e6..6739d13479f5 100644 --- a/internal/converter/internal/staticconvert/internal/build/converter_discoveryprocessor.go +++ b/internal/converter/internal/staticconvert/internal/build/converter_discoveryprocessor.go @@ -13,7 +13,6 @@ import ( "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" prometheus_component "github.com/grafana/agent/internal/converter/internal/prometheusconvert/component" "github.com/grafana/agent/internal/static/traces/promsdprocessor" - "github.com/grafana/river/scanner" prom_config "github.com/prometheus/prometheus/config" "go.opentelemetry.io/collector/component" "gopkg.in/yaml.v3" @@ -81,7 +80,7 @@ func toDiscoveryProcessor(state *otelcolconvert.State, id component.InstanceID, if label != "" { labelConcat = label + "_" + scrapeConfig.JobName } - label, _ := scanner.SanitizeIdentifier(labelConcat) + label := common.SanitizeIdentifierPanics(labelConcat) scrapeTargets := prometheusconvert.AppendServiceDiscoveryConfigs(pb, scrapeConfig.ServiceDiscoveryConfigs, label) promDiscoveryRelabelExports := prometheus_component.AppendDiscoveryRelabel(pb, scrapeConfig.RelabelConfigs, scrapeTargets, label) if promDiscoveryRelabelExports != nil { diff --git a/internal/converter/internal/staticconvert/internal/build/converter_remotewriteexporter.go b/internal/converter/internal/staticconvert/internal/build/converter_remotewriteexporter.go new file mode 100644 index 000000000000..7d17bcfa1d1c --- /dev/null +++ b/internal/converter/internal/staticconvert/internal/build/converter_remotewriteexporter.go @@ -0,0 +1,102 @@ +package build + +import ( + "fmt" + "sort" + + flow_relabel "github.com/grafana/agent/internal/component/common/relabel" + "github.com/grafana/agent/internal/component/otelcol/exporter/prometheus" + "github.com/grafana/agent/internal/component/prometheus/relabel" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/agent/internal/converter/internal/otelcolconvert" + "github.com/grafana/agent/internal/converter/internal/prometheusconvert/build" + prometheus_component "github.com/grafana/agent/internal/converter/internal/prometheusconvert/component" + "github.com/grafana/agent/internal/static/traces/remotewriteexporter" + prom_relabel "github.com/prometheus/prometheus/model/relabel" + "github.com/prometheus/prometheus/storage" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, remoteWriteExporterConverter{}) +} + +type remoteWriteExporterConverter struct{} + +func (remoteWriteExporterConverter) Factory() component.Factory { + return remotewriteexporter.NewFactory() +} + +func (remoteWriteExporterConverter) InputComponentName() string { + return "otelcol.exporter.prometheus" +} + +func (remoteWriteExporterConverter) ConvertAndAppend(state *otelcolconvert.State, id component.InstanceID, cfg component.Config) diag.Diagnostics { + label := state.FlowComponentLabel() + + // We overloaded the ServerConfig.Endpoint field to be the prometheus.remote_write label + rwLabel := "metrics_" + cfg.(*remotewriteexporter.Config).PromInstance + forwardTo := []storage.Appendable{common.ConvertAppendable{Expr: fmt.Sprintf("prometheus.remote_write.%s.receiver", rwLabel)}} + if len(cfg.(*remotewriteexporter.Config).ConstLabels) > 0 { + exports := includeRelabelConfig(label, cfg, state, forwardTo) + forwardTo = []storage.Appendable{exports.Receiver} + } + + args := toremotewriteexporterConfig(cfg.(*remotewriteexporter.Config), forwardTo) + block := common.NewBlockWithOverride([]string{"otelcol", "exporter", "prometheus"}, label, args) + + var diags diag.Diagnostics + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", otelcolconvert.StringifyInstanceID(id), otelcolconvert.StringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func includeRelabelConfig(label string, cfg component.Config, state *otelcolconvert.State, forwardTo []storage.Appendable) *relabel.Exports { + pb := build.NewPrometheusBlocks() + + defaultRelabelConfigs := &flow_relabel.Config{} + defaultRelabelConfigs.SetToDefault() + relabelConfigs := []*prom_relabel.Config{} + + // sort they keys for consistency in map iteration + keys := make([]string, 0, len(cfg.(*remotewriteexporter.Config).ConstLabels)) + for label := range cfg.(*remotewriteexporter.Config).ConstLabels { + keys = append(keys, label) + } + sort.Strings(keys) + + for _, label := range keys { + relabelConfigs = append(relabelConfigs, &prom_relabel.Config{ + Separator: defaultRelabelConfigs.Separator, + Regex: prom_relabel.Regexp(defaultRelabelConfigs.Regex), + Modulus: defaultRelabelConfigs.Modulus, + TargetLabel: label, + Replacement: cfg.(*remotewriteexporter.Config).ConstLabels[label], + Action: prom_relabel.Action(defaultRelabelConfigs.Action), + }) + } + + exports := prometheus_component.AppendPrometheusRelabel(pb, relabelConfigs, forwardTo, label) + pb.AppendToBody(state.Body()) + return exports +} + +func toremotewriteexporterConfig(cfg *remotewriteexporter.Config, forwardTo []storage.Appendable) *prometheus.Arguments { + defaultArgs := &prometheus.Arguments{} + defaultArgs.SetToDefault() + + return &prometheus.Arguments{ + IncludeTargetInfo: defaultArgs.IncludeTargetInfo, + IncludeScopeInfo: defaultArgs.IncludeScopeInfo, + IncludeScopeLabels: defaultArgs.IncludeScopeLabels, + GCFrequency: cfg.StaleTime, + ForwardTo: forwardTo, + AddMetricSuffixes: defaultArgs.AddMetricSuffixes, + ResourceToTelemetryConversion: defaultArgs.ResourceToTelemetryConversion, + } +} diff --git a/internal/converter/internal/staticconvert/internal/build/self_exporter.go b/internal/converter/internal/staticconvert/internal/build/self_exporter.go index 31e7b50551e7..51c1049349d5 100644 --- a/internal/converter/internal/staticconvert/internal/build/self_exporter.go +++ b/internal/converter/internal/staticconvert/internal/build/self_exporter.go @@ -8,19 +8,19 @@ import ( ) func (b *ConfigBuilder) appendAgentExporter(config *agent_exporter.Config) discovery.Exports { - args := toAgentExporter(config) + args := toAgentExporter() return b.appendExporterBlock(args, config.Name(), nil, "self") } -func toAgentExporter(config *agent_exporter.Config) *self.Arguments { +func toAgentExporter() *self.Arguments { return &self.Arguments{} } func (b *ConfigBuilder) appendAgentExporterV2(config *agent_exporter_v2.Config) discovery.Exports { - args := toAgentExporterV2(config) + args := toAgentExporterV2() return b.appendExporterBlock(args, config.Name(), config.Common.InstanceKey, "self") } -func toAgentExporterV2(config *agent_exporter_v2.Config) *self.Arguments { +func toAgentExporterV2() *self.Arguments { return &self.Arguments{} } diff --git a/internal/converter/internal/staticconvert/testdata/traces.river b/internal/converter/internal/staticconvert/testdata/traces.river index f8d8a4e8f584..0e1684d924b1 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.river +++ b/internal/converter/internal/staticconvert/testdata/traces.river @@ -1,3 +1,14 @@ +prometheus.remote_write "metrics_remote_write_name" { + endpoint { + name = "remote_write_name-149bbd" + url = "http://localhost:9009/api/prom/push" + + queue_config { } + + metadata_config { } + } +} + otelcol.extension.jaeger_remote_sampling "default_0" { grpc { } @@ -88,10 +99,29 @@ otelcol.processor.attributes "_0_default" { output { metrics = [] logs = [] - traces = [otelcol.exporter.loadbalancing._0_default.input, otelcol.exporter.logging._0_default.input] + traces = [otelcol.exporter.loadbalancing._0_default.input, otelcol.exporter.logging._0_default.input, otelcol.connector.spanmetrics._0_default.input] } } +prometheus.relabel "_0_default" { + forward_to = [prometheus.remote_write.metrics_remote_write_name.receiver] + + rule { + target_label = "fizz" + replacement = "buzz" + } + + rule { + target_label = "foo" + replacement = "bar" + } +} + +otelcol.exporter.prometheus "_0_default" { + gc_frequency = "0s" + forward_to = [prometheus.relabel._0_default.receiver] +} + otelcol.exporter.loadbalancing "_0_default" { protocol { otlp { @@ -114,6 +144,17 @@ otelcol.exporter.loadbalancing "_0_default" { otelcol.exporter.logging "_0_default" { } +otelcol.connector.spanmetrics "_0_default" { + histogram { + explicit { } + } + namespace = "metrics_prefix" + + output { + metrics = [otelcol.exporter.prometheus._0_default.input] + } +} + otelcol.receiver.otlp "_1_lb" { grpc { endpoint = "0.0.0.0:4318" @@ -146,10 +187,29 @@ otelcol.processor.batch "_1_default" { output { metrics = [] logs = [] - traces = [otelcol.exporter.otlp._1_0.input, otelcol.exporter.logging._1_default.input] + traces = [otelcol.exporter.otlp._1_0.input, otelcol.exporter.logging._1_default.input, otelcol.connector.spanmetrics._1_default.input] + } +} + +prometheus.relabel "_1_default" { + forward_to = [prometheus.remote_write.metrics_remote_write_name.receiver] + + rule { + target_label = "fizz" + replacement = "buzz" + } + + rule { + target_label = "foo" + replacement = "bar" } } +otelcol.exporter.prometheus "_1_default" { + gc_frequency = "0s" + forward_to = [prometheus.relabel._1_default.receiver] +} + otelcol.exporter.otlp "_1_0" { retry_on_failure { max_elapsed_time = "1m0s" @@ -165,3 +225,14 @@ otelcol.exporter.otlp "_1_0" { } otelcol.exporter.logging "_1_default" { } + +otelcol.connector.spanmetrics "_1_default" { + histogram { + explicit { } + } + namespace = "metrics_prefix" + + output { + metrics = [otelcol.exporter.prometheus._1_default.input] + } +} diff --git a/internal/converter/internal/staticconvert/testdata/traces.yaml b/internal/converter/internal/staticconvert/testdata/traces.yaml index 5a4cb2dfd332..a97262722fc5 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.yaml +++ b/internal/converter/internal/staticconvert/testdata/traces.yaml @@ -40,9 +40,12 @@ traces: prom_sd_pod_associations: - ip - net.host.ip - # spanmetrics: - # namespace: testing - # metrics_instance: default + spanmetrics: + metrics_instance: remote_write_name + namespace: metrics_prefix + const_labels: + foo: bar + fizz: buzz tail_sampling: policies: [ @@ -67,7 +70,9 @@ traces: # This metrics config is needed when we enable spanmetrics for traces # -# metrics: -# global: -# remote_write: -# - url: http://localhost:9009/api/prom/push +metrics: + global: + remote_write: + - url: http://localhost:9009/api/prom/push + configs: + - name: remote_write_name \ No newline at end of file diff --git a/internal/converter/internal/staticconvert/testdata/unsupported.diags b/internal/converter/internal/staticconvert/testdata/unsupported.diags index 0958f0e79e69..c2b745cc05a2 100644 --- a/internal/converter/internal/staticconvert/testdata/unsupported.diags +++ b/internal/converter/internal/staticconvert/testdata/unsupported.diags @@ -1,6 +1,7 @@ (Error) The converter does not support handling integrations which are not being scraped: mssql. (Error) mapping_config is not supported in statsd_exporter integrations config (Error) automatic_logging for traces has no direct flow equivalent. A best effort translation can be made which only outputs to stdout and not directly to loki by bypassing errors. +(Error) Cannot convert using configuration including spanmetrics handler_endpoint. No equivalent exists for exposing a known /metrics endpoint. You can use metrics_instance instead to enabled conversion. (Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. (Error) The converter does not support converting the provided grpc_tls_config server config: flow mode does not have a gRPC server to configure. (Error) The converter does not support converting the provided prefer_server_cipher_suites server config. diff --git a/internal/converter/internal/staticconvert/testdata/unsupported.yaml b/internal/converter/internal/staticconvert/testdata/unsupported.yaml index 8dd14ab12531..d43a369fadfc 100644 --- a/internal/converter/internal/staticconvert/testdata/unsupported.yaml +++ b/internal/converter/internal/staticconvert/testdata/unsupported.yaml @@ -64,6 +64,8 @@ traces: - endpoint: http://localhost:1234/write automatic_logging: backend: "something else" + spanmetrics: + handler_endpoint: http://localhost:1234/write agent_management: host: host_name From 68820116e99fa45329a944a6d0ce86daf9b8171a Mon Sep 17 00:00:00 2001 From: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> Date: Wed, 3 Apr 2024 19:21:38 -0400 Subject: [PATCH 32/83] Added support for static configuration conversion of the traces subsystem (#6820) Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --- CHANGELOG.md | 2 ++ docs/sources/flow/tasks/migrate/from-static.md | 2 +- .../staticconvert/internal/build/builder_traces.go | 9 +++++++++ .../internal/staticconvert/testdata/traces.yaml | 2 -- .../internal/staticconvert/testdata/unsupported.diags | 1 + .../internal/staticconvert/testdata/unsupported.yaml | 2 ++ 6 files changed, 15 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55c75ad9fdf7..b54c0b1d69ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,8 @@ Main (unreleased) - Added support for `otelcol` configuration conversion in `grafana-agent convert` and `grafana-agent run` commands. (@rfratto, @erikbaranowski, @tpaschalis, @hainenber) +- Added support for `static` configuration conversion of the `traces` subsystem. (@erikbaranowski, @wildum) + - Add automatic conversion for `legacy_positions_file` in component `loki.source.file`. (@mattdurham) ### Features diff --git a/docs/sources/flow/tasks/migrate/from-static.md b/docs/sources/flow/tasks/migrate/from-static.md index 5d1b73626f60..eb035223ce80 100644 --- a/docs/sources/flow/tasks/migrate/from-static.md +++ b/docs/sources/flow/tasks/migrate/from-static.md @@ -339,7 +339,7 @@ After the configuration is converted, review the {{< param "PRODUCT_NAME" >}} co The following list is specific to the convert command and not {{< param "PRODUCT_NAME" >}}: -* The [Traces][] and [Agent Management][] configuration options can't be automatically converted to {{< param "PRODUCT_NAME" >}}. However, traces are fully supported in {{< param "PRODUCT_NAME" >}} and you can build your configuration manually. +* The [Agent Management][] configuration options can't be automatically converted to {{< param "PRODUCT_NAME" >}}. Any additional unsupported features are returned as errors during conversion. * There is no gRPC server to configure for {{< param "PRODUCT_NAME" >}}, as any non-default configuration will show as unsupported during the conversion. * Check if you are using any extra command line arguments with Static that aren't present in your configuration file. For example, `-server.http.address`. diff --git a/internal/converter/internal/staticconvert/internal/build/builder_traces.go b/internal/converter/internal/staticconvert/internal/build/builder_traces.go index 1122ae9cec1b..d13a06e27ae0 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_traces.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_traces.go @@ -39,6 +39,15 @@ func (b *ConfigBuilder) appendTraces() { // Remove the push receiver which is an implementation detail for static mode and unnecessary for the otel config. removeReceiver(otelCfg, "traces", "push_receiver") + // Remove the service_graphs processor which is an implementation detail for static mode and unnecessary for the otel config. + if _, ok := otelCfg.Processors[otel_component.NewID("service_graphs")]; ok { + removeProcessor(otelCfg, "traces", "service_graphs") + b.diags.Add(diag.SeverityLevelError, "The service_graphs processor for traces has no direct flow equivalent. "+ + "This configuration appends metrics to the /metrics endpoint of the agent which is not possible in flow. "+ + "Alternatively, you can use the otelcol.connector.servicegraph component to build a pipeline which generates "+ + "and forwards service graph metrics.") + } + b.translateAutomaticLogging(otelCfg, cfg) b.translateSpanMetrics(otelCfg, cfg) diff --git a/internal/converter/internal/staticconvert/testdata/traces.yaml b/internal/converter/internal/staticconvert/testdata/traces.yaml index a97262722fc5..c6e295294130 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.yaml +++ b/internal/converter/internal/staticconvert/testdata/traces.yaml @@ -60,8 +60,6 @@ traces: hostnames: - tempo1.example.com - tempo2.example.com - # service_graphs: - # enabled: true jaeger_remote_sampling: - source: reload_interval: 30s diff --git a/internal/converter/internal/staticconvert/testdata/unsupported.diags b/internal/converter/internal/staticconvert/testdata/unsupported.diags index c2b745cc05a2..5687193a097d 100644 --- a/internal/converter/internal/staticconvert/testdata/unsupported.diags +++ b/internal/converter/internal/staticconvert/testdata/unsupported.diags @@ -1,5 +1,6 @@ (Error) The converter does not support handling integrations which are not being scraped: mssql. (Error) mapping_config is not supported in statsd_exporter integrations config +(Error) The service_graphs processor for traces has no direct flow equivalent. This configuration appends metrics to the /metrics endpoint of the agent which is not possible in flow. Alternatively, you can use the otelcol.connector.servicegraph component to build a pipeline which generates and forwards service graph metrics. (Error) automatic_logging for traces has no direct flow equivalent. A best effort translation can be made which only outputs to stdout and not directly to loki by bypassing errors. (Error) Cannot convert using configuration including spanmetrics handler_endpoint. No equivalent exists for exposing a known /metrics endpoint. You can use metrics_instance instead to enabled conversion. (Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. diff --git a/internal/converter/internal/staticconvert/testdata/unsupported.yaml b/internal/converter/internal/staticconvert/testdata/unsupported.yaml index d43a369fadfc..bb34aaac4c8a 100644 --- a/internal/converter/internal/staticconvert/testdata/unsupported.yaml +++ b/internal/converter/internal/staticconvert/testdata/unsupported.yaml @@ -66,6 +66,8 @@ traces: backend: "something else" spanmetrics: handler_endpoint: http://localhost:1234/write + service_graphs: + enabled: true agent_management: host: host_name From 077d70b3394791740485f0f46e5669d1e669c16c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Tr=E1=BB=8Dng=20H=E1=BA=A3i?= <41283691+hainenber@users.noreply.github.com> Date: Thu, 4 Apr 2024 15:52:15 +0700 Subject: [PATCH 33/83] chore(converter/otelcol): remove deep pretty printer clause added during implementation debugging (#6737) Signed-off-by: hainenber --- go.mod | 2 +- .../internal/otelcolconvert/converter_kafkareceiver.go | 3 --- .../otelcolconvert/converter_tailsamplingprocessor.go | 4 ---- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 0ab4536248dc..17f9c5f8f50a 100644 --- a/go.mod +++ b/go.mod @@ -320,7 +320,7 @@ require ( github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/danieljoos/wincred v1.2.0 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/btrfs v0.0.0-20230312211831-a1f570bd01a1 // indirect github.com/dennwc/ioctl v1.0.0 // indirect github.com/dennwc/varint v1.0.0 // indirect diff --git a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go index 7f46b5157b4b..b7532432dbd9 100644 --- a/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go +++ b/internal/converter/internal/otelcolconvert/converter_kafkareceiver.go @@ -3,7 +3,6 @@ package otelcolconvert import ( "fmt" - "github.com/davecgh/go-spew/spew" "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/receiver/kafka" "github.com/grafana/agent/internal/converter/diag" @@ -78,8 +77,6 @@ func toKafkaReceiver(state *State, id component.InstanceID, cfg *kafkareceiver.C } func toKafkaAuthentication(cfg map[string]any) kafka.AuthenticationArguments { - spew.Dump(cfg) - return kafka.AuthenticationArguments{ Plaintext: toKafkaPlaintext(encodeMapstruct(cfg["plain_text"])), SASL: toKafkaSASL(encodeMapstruct(cfg["sasl"])), diff --git a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go index 439a84f8572a..7eee487b00ab 100644 --- a/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go +++ b/internal/converter/internal/otelcolconvert/converter_tailsamplingprocessor.go @@ -3,7 +3,6 @@ package otelcolconvert import ( "fmt" - "github.com/davecgh/go-spew/spew" "github.com/grafana/agent/internal/component/otelcol" "github.com/grafana/agent/internal/component/otelcol/processor/tail_sampling" "github.com/grafana/agent/internal/converter/diag" @@ -48,9 +47,6 @@ func toTailSamplingProcessor(state *State, id component.InstanceID, cfg *tailsam nextTraces = state.Next(id, component.DataTypeTraces) ) - testEncode := encodeMapstruct(cfg.PolicyCfgs[0]) - spew.Dump(testEncode) - return &tail_sampling.Arguments{ PolicyCfgs: toPolicyCfgs(cfg.PolicyCfgs), DecisionWait: cfg.DecisionWait, From bc294b1baede2335df52f55aefe80e83d7417f66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=90=E1=BB=97=20Tr=E1=BB=8Dng=20H=E1=BA=A3i?= <41283691+hainenber@users.noreply.github.com> Date: Thu, 4 Apr 2024 15:52:27 +0700 Subject: [PATCH 34/83] fix(otelcol/fanoutconsumer): fix panic when fanning out to invalid receivers (#6759) Signed-off-by: hainenber Co-authored-by: Paschalis Tsilias --- CHANGELOG.md | 2 ++ internal/component/otelcol/internal/fanoutconsumer/metrics.go | 3 +++ internal/component/otelcol/internal/fanoutconsumer/traces.go | 3 +++ 3 files changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b54c0b1d69ed..2ca991d172a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,8 @@ Main (unreleased) whenever that argument is explicitly configured. This issue only affected a small subset of arguments across 15 components. (@erikbaranowski, @rfratto) +- Fix panic when fanning out to invalid receivers. (@hainenber) + - Fix a bug where a panic could occur when reloading custom components. (@wildum) - The `import.git` config block did not work with branches or tags this now fixes that behavior. (@mattdurham) diff --git a/internal/component/otelcol/internal/fanoutconsumer/metrics.go b/internal/component/otelcol/internal/fanoutconsumer/metrics.go index 4ace8e35a4b0..bceaaaa78234 100644 --- a/internal/component/otelcol/internal/fanoutconsumer/metrics.go +++ b/internal/component/otelcol/internal/fanoutconsumer/metrics.go @@ -28,6 +28,9 @@ func Metrics(in []otelcol.Consumer) otelconsumer.Metrics { // Iterate through all the consumers besides the last. for i := 0; i < len(in)-1; i++ { consumer := in[i] + if consumer == nil { + continue + } if consumer.Capabilities().MutatesData { clone = append(clone, consumer) diff --git a/internal/component/otelcol/internal/fanoutconsumer/traces.go b/internal/component/otelcol/internal/fanoutconsumer/traces.go index 51403b6a04f8..9f024c1b1720 100644 --- a/internal/component/otelcol/internal/fanoutconsumer/traces.go +++ b/internal/component/otelcol/internal/fanoutconsumer/traces.go @@ -28,6 +28,9 @@ func Traces(in []otelcol.Consumer) otelconsumer.Traces { // Iterate through all the consumers besides the last. for i := 0; i < len(in)-1; i++ { consumer := in[i] + if consumer == nil { + continue + } if consumer.Capabilities().MutatesData { clone = append(clone, consumer) From 196ea058806a42c8b24723fde82e8dcfc5f3e919 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Tudur=C3=AD?= Date: Thu, 4 Apr 2024 13:15:19 +0200 Subject: [PATCH 35/83] Add Grafana Beyla Flow component (#6789) Co-authored-by: Piotr Gwizdala <17101802+thampiotr@users.noreply.github.com> Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> --- CHANGELOG.md | 2 + CODEOWNERS | 3 +- .../flow/reference/compatibility/_index.md | 8 + .../flow/reference/components/beyla.ebpf.md | 276 +++++++++++++++++ go.mod | 26 +- go.sum | 51 +++- internal/component/all/all.go | 1 + internal/component/beyla/ebpf/args.go | 48 +++ internal/component/beyla/ebpf/beyla_linux.go | 287 ++++++++++++++++++ .../component/beyla/ebpf/beyla_linux_test.go | 200 ++++++++++++ .../component/beyla/ebpf/beyla_placeholder.go | 43 +++ 11 files changed, 925 insertions(+), 20 deletions(-) create mode 100644 docs/sources/flow/reference/components/beyla.ebpf.md create mode 100644 internal/component/beyla/ebpf/args.go create mode 100644 internal/component/beyla/ebpf/beyla_linux.go create mode 100644 internal/component/beyla/ebpf/beyla_linux_test.go create mode 100644 internal/component/beyla/ebpf/beyla_placeholder.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ca991d172a2..d74e2c156e4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,8 @@ Main (unreleased) - A new `loki.rules.kubernetes` component that discovers `PrometheusRule` Kubernetes resources and loads them into a Loki Ruler instance. (@EStork09) +- Add `beyla.ebpf` component to automatically instrument services with eBPF. (@marctc) + ### Bugfixes - Fix an issue where JSON string array elements were not parsed correctly in `loki.source.cloudflare`. (@thampiotr) diff --git a/CODEOWNERS b/CODEOWNERS index 4f1541f12dbd..b48a1816109b 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -18,4 +18,5 @@ /docs/sources/ @clayton-cornell # Components: -/component/pyroscope/ @grafana/grafana-agent-profiling-maintainers +/internal/component/pyroscope/ @grafana/grafana-agent-profiling-maintainers +/internal/component/beyla/ @marctc \ No newline at end of file diff --git a/docs/sources/flow/reference/compatibility/_index.md b/docs/sources/flow/reference/compatibility/_index.md index 61775bcf26b5..30825a901f4b 100644 --- a/docs/sources/flow/reference/compatibility/_index.md +++ b/docs/sources/flow/reference/compatibility/_index.md @@ -44,6 +44,10 @@ The following components, grouped by namespace, _export_ Targets. +{{< collapse title="beyla" >}} +- [beyla.ebpf](../components/beyla.ebpf) +{{< /collapse >}} + {{< collapse title="discovery" >}} - [discovery.azure](../components/discovery.azure) - [discovery.consul](../components/discovery.consul) @@ -315,6 +319,10 @@ The following components, grouped by namespace, _consume_ OpenTelemetry `otelcol +{{< collapse title="beyla" >}} +- [beyla.ebpf](../components/beyla.ebpf) +{{< /collapse >}} + {{< collapse title="faro" >}} - [faro.receiver](../components/faro.receiver) {{< /collapse >}} diff --git a/docs/sources/flow/reference/components/beyla.ebpf.md b/docs/sources/flow/reference/components/beyla.ebpf.md new file mode 100644 index 000000000000..ad35b9ccba45 --- /dev/null +++ b/docs/sources/flow/reference/components/beyla.ebpf.md @@ -0,0 +1,276 @@ +--- +canonical: https://grafana.com/docs/agent/latest/flow/reference/components/beyla.ebpf/ +description: Learn about beyla.ebpf +title: beyla.ebpf +labels: + stage: beta +--- + +# beyla.ebpf + +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} + +The `beyla.ebpf` component is used as a wrapper for [Grafana Beyla][] which uses [eBPF][] to automatically inspect application executables and the OS networking layer, and capture trace spans related to web transactions and Rate Errors Duration (RED) metrics for Linux HTTP/S and gRPC services. +You can configure the component to collect telemetry data from a specific port or executable path, and other criteria from Kubernetes metadata. +The component exposes metrics that can be collected by a Prometheus scrape component, and traces that can be forwarded to an OTEL exporter component. + +{{< admonition type="note" >}} +To run this component, {{< param "PRODUCT_NAME" >}} requires administrative (`sudo`) privileges, or at least it needs to be granted the `CAP_SYS_ADMIN` and `CAP_SYS_PTRACE` capability. In Kubernetes environments, app armour must be disabled for the Deployment or DaemonSet running {{< param "PRODUCT_NAME" >}}. +{{< /admonition >}} + +[Grafana Beyla]: https://github.com/grafana/beyla +[eBPF]: https://ebpf.io/ + +## Usage + +```river +beyla.ebpf "