diff --git a/.chloggen/routing-by-metrics.yaml b/.chloggen/routing-by-metrics.yaml new file mode 100644 index 000000000000..070210e19a55 --- /dev/null +++ b/.chloggen/routing-by-metrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: routingconnector + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add ability to route by metric context + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36236] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/CHANGELOG.md b/CHANGELOG.md index 225fe8e15ace..8e9d1664c999 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -368,7 +368,7 @@ arrow.waiter_limit -> admission.waiter_limit https://github.com/open-telemetry/opentelemetry-collector/pull/6250 - `spanmetricsconnector`: Improve consistency between metrics generated by spanmetricsconnector. Added traces.span.metrics as default namespace (#33227, #32818) - Default namespace for the generated metrics is traces.span.metrics now. | The deprecated metrics are: calls, duration and events. | The feature flag connector.spanmetrics.legacyLatencyMetricNames was added to revert the behavior. + Default namespace for the generated metrics is traces.span.metrics now. | The deprecated metrics are: calls, duration and events. | The feature flag connector.spanmetrics.legacyMetricNames was added to revert the behavior. - `servicegraphconnector`: Fix histogram metrics miss unit (#34511) All metrics will remove the suffix `_seconds`. It will not introduce breaking change if users use | `prometheusexporter` or `prometheusremotewriteexporter` to exporter metrics in pipeline. | In some cases, like using `clickhouseexporter`(save data in native OTLP format), it will be a breaking change. | Users can use `transformprocessor` to add back this suffix. - `gitproviderreceiver`: The Git Provider Receiver has been renamed to GitHub Receiver. (#34731) diff --git a/connector/countconnector/factory.go b/connector/countconnector/factory.go index e9a44d3111a4..ce4d35524fe5 100644 --- a/connector/countconnector/factory.go +++ b/connector/countconnector/factory.go @@ -13,7 +13,6 @@ import ( "go.opentelemetry.io/collector/consumer" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" @@ -156,7 +155,7 @@ func createLogsToMetrics( } type metricDef[K any] struct { - condition expr.BoolExpr[K] + condition *ottl.ConditionSequence[K] desc string attrs []AttributeConfig } diff --git a/connector/routingconnector/README.md b/connector/routingconnector/README.md index 6a45f230fb20..02ad40317832 100644 --- a/connector/routingconnector/README.md +++ b/connector/routingconnector/README.md @@ -33,7 +33,7 @@ If you are not already familiar with connectors, you may find it helpful to firs The following settings are available: - `table (required)`: the routing table for this connector. -- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource`, `log`, and `request` are supported. +- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource`, `metric`, `log`, and `request` are supported. - `table.statement`: the routing condition provided as the [OTTL] statement. Required if `table.condition` is not provided. May not be used for `request` context. - `table.condition`: the routing condition provided as the [OTTL] condition. Required if `table.statement` is not provided. Required for `request` context. - `table.pipelines (required)`: the list of pipelines to use when the routing condition is met. @@ -43,7 +43,7 @@ The following settings are available: ### Limitations -- The `match_once` setting is only supported when using the `resource` context. If any routes use `log` or `request` context, `match_once` must be set to `true`. +- The `match_once` setting is only supported when using the `resource` context. If any routes use `metric`, `log` or `request` context, `match_once` must be set to `true`. - The `request` context requires use of the `condition` setting, and relies on a very limited grammar. Conditions must be in the form of `request["key"] == "value"` or `request["key"] != "value"`. (In the future, this grammar may be expanded to support more complex conditions.) ### Supported [OTTL] functions diff --git a/connector/routingconnector/config.go b/connector/routingconnector/config.go index f526ec460ab9..fb2f838474c7 100644 --- a/connector/routingconnector/config.go +++ b/connector/routingconnector/config.go @@ -77,7 +77,7 @@ func (c *Config) Validate() error { return err } fallthrough - case "log": // ok + case "metric", "log": // ok if !c.MatchOnce { return fmt.Errorf(`%q context is not supported with "match_once: false"`, item.Context) } diff --git a/connector/routingconnector/config_test.go b/connector/routingconnector/config_test.go index 0cd0456ec8af..b79eb4ee1bf3 100644 --- a/connector/routingconnector/config_test.go +++ b/connector/routingconnector/config_test.go @@ -218,6 +218,22 @@ func TestValidateConfig(t *testing.T) { }, error: "invalid context: invalid", }, + { + name: "metric context with match_once false", + config: &Config{ + MatchOnce: false, + Table: []RoutingTableItem{ + { + Context: "metric", + Statement: `route() where attributes["attr"] == "acme"`, + Pipelines: []pipeline.ID{ + pipeline.NewIDWithName(pipeline.SignalTraces, "otlp"), + }, + }, + }, + }, + error: `"metric" context is not supported with "match_once: false"`, + }, { name: "log context with match_once false", config: &Config{ diff --git a/connector/routingconnector/internal/pmetricutil/metrics.go b/connector/routingconnector/internal/pmetricutil/metrics.go index 1ca6d23b1ad7..58199dc02fe8 100644 --- a/connector/routingconnector/internal/pmetricutil/metrics.go +++ b/connector/routingconnector/internal/pmetricutil/metrics.go @@ -16,3 +16,46 @@ func MoveResourcesIf(from, to pmetric.Metrics, f func(pmetric.ResourceMetrics) b return true }) } + +// MoveMetricsWithContextIf calls f sequentially for each Metric present in the first pmetric.Metrics. +// If f returns true, the element is removed from the first pmetric.Metrics and added to the second pmetric.Metrics. +// Notably, the Resource and Scope associated with the Metric are created in the second pmetric.Metrics only once. +// Resources or Scopes are removed from the original if they become empty. All ordering is preserved. +func MoveMetricsWithContextIf(from, to pmetric.Metrics, f func(pmetric.ResourceMetrics, pmetric.ScopeMetrics, pmetric.Metric) bool) { + rms := from.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + sms := rm.ScopeMetrics() + var rmCopy *pmetric.ResourceMetrics + for j := 0; j < sms.Len(); j++ { + sm := sms.At(j) + ms := sm.Metrics() + var smCopy *pmetric.ScopeMetrics + ms.RemoveIf(func(m pmetric.Metric) bool { + if !f(rm, sm, m) { + return false + } + if rmCopy == nil { + rmc := to.ResourceMetrics().AppendEmpty() + rmCopy = &rmc + rm.Resource().CopyTo(rmCopy.Resource()) + rmCopy.SetSchemaUrl(rm.SchemaUrl()) + } + if smCopy == nil { + smc := rmCopy.ScopeMetrics().AppendEmpty() + smCopy = &smc + sm.Scope().CopyTo(smCopy.Scope()) + smCopy.SetSchemaUrl(sm.SchemaUrl()) + } + m.CopyTo(smCopy.Metrics().AppendEmpty()) + return true + }) + } + sms.RemoveIf(func(sm pmetric.ScopeMetrics) bool { + return sm.Metrics().Len() == 0 + }) + } + rms.RemoveIf(func(rm pmetric.ResourceMetrics) bool { + return rm.ScopeMetrics().Len() == 0 + }) +} diff --git a/connector/routingconnector/internal/pmetricutil/metrics_test.go b/connector/routingconnector/internal/pmetricutil/metrics_test.go index 5b3d751c6826..8c23b4232246 100644 --- a/connector/routingconnector/internal/pmetricutil/metrics_test.go +++ b/connector/routingconnector/internal/pmetricutil/metrics_test.go @@ -80,3 +80,147 @@ func TestMoveResourcesIf(t *testing.T) { }) } } + +func TestMoveMetricsWithContextIf(t *testing.T) { + testCases := []struct { + name string + moveIf func(pmetric.ResourceMetrics, pmetric.ScopeMetrics, pmetric.Metric) bool + from pmetric.Metrics + to pmetric.Metrics + expectFrom pmetric.Metrics + expectTo pmetric.Metrics + }{ + { + name: "move_none", + moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric) bool { + return false + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectTo: pmetric.NewMetrics(), + }, + { + name: "move_all", + moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric) bool { + return true + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetric.NewMetrics(), + expectTo: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + }, + { + name: "move_all_from_one_resource", + moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + expectTo: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), + }, + { + name: "move_all_from_one_scope", + moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + ), + expectTo: pmetricutiltest.NewMetrics("B", "C", "EF", "GH"), + }, + { + name: "move_all_from_one_scope_in_each_resource", + moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric) bool { + return sl.Scope().Name() == "scopeD" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"), + expectTo: pmetricutiltest.NewMetrics("AB", "D", "EF", "GH"), + }, + { + name: "move_one", + moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "metricF" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH")), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + ), + expectTo: pmetricutiltest.NewMetrics("A", "D", "F", "GH"), + }, + { + name: "move_one_from_each_scope", + moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric) bool { + return m.Name() == "metricE" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectTo: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + }, + { + name: "move_one_from_each_scope_in_one_resource", + moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" && m.Name() == "metricE" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('F', "GH")), + ), + ), + expectTo: pmetricutiltest.NewMetrics("B", "CD", "E", "GH"), + }, + { + name: "move_some_to_preexisting", + moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric) bool { + return sl.Scope().Name() == "scopeD" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetricutiltest.NewMetrics("1", "2", "3", "4"), + expectFrom: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"), + expectTo: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('1', pmetricutiltest.WithScope('2', pmetricutiltest.WithMetric('3', "4"))), + pmetricutiltest.WithResource('A', pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH"))), + pmetricutiltest.WithResource('B', pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH"))), + ), + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + pmetricutil.MoveMetricsWithContextIf(tt.from, tt.to, tt.moveIf) + assert.NoError(t, pmetrictest.CompareMetrics(tt.expectFrom, tt.from), "from not modified as expected") + assert.NoError(t, pmetrictest.CompareMetrics(tt.expectTo, tt.to), "to not as expected") + }) + } +} diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics.go b/connector/routingconnector/internal/pmetricutiltest/metrics.go index ce8b2cb06d5e..a908e1638e63 100644 --- a/connector/routingconnector/internal/pmetricutiltest/metrics.go +++ b/connector/routingconnector/internal/pmetricutiltest/metrics.go @@ -43,3 +43,59 @@ func NewMetrics(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.M } return md } + +type Resource struct { + id byte + scopes []Scope +} + +type Scope struct { + id byte + metrics []Metric +} + +type Metric struct { + id byte + dataPoints string +} + +func WithResource(id byte, scopes ...Scope) Resource { + r := Resource{id: id} + r.scopes = append(r.scopes, scopes...) + return r +} + +func WithScope(id byte, metrics ...Metric) Scope { + s := Scope{id: id} + s.metrics = append(s.metrics, metrics...) + return s +} + +func WithMetric(id byte, dataPoints string) Metric { + return Metric{id: id, dataPoints: dataPoints} +} + +// NewMetricsFromOpts creates a pmetric.Metrics with the specified resources, scopes, metrics, +// and data points. The general idea is the same as NewMetrics, but this function allows for +// more flexibility in creating non-uniform structures. +func NewMetricsFromOpts(resources ...Resource) pmetric.Metrics { + md := pmetric.NewMetrics() + for _, resource := range resources { + r := md.ResourceMetrics().AppendEmpty() + r.Resource().Attributes().PutStr("resourceName", "resource"+string(resource.id)) + for _, scope := range resource.scopes { + s := r.ScopeMetrics().AppendEmpty() + s.Scope().SetName("scope" + string(scope.id)) + for _, metric := range scope.metrics { + m := s.Metrics().AppendEmpty() + m.SetName("metric" + string(metric.id)) + dps := m.SetEmptyGauge().DataPoints() + for i := 0; i < len(metric.dataPoints); i++ { + dp := dps.AppendEmpty() + dp.Attributes().PutStr("dpName", "dp"+string(metric.dataPoints[i])) + } + } + } + } + return md +} diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics_test.go b/connector/routingconnector/internal/pmetricutiltest/metrics_test.go index 069a27f8282c..3be7405a1e14 100644 --- a/connector/routingconnector/internal/pmetricutiltest/metrics_test.go +++ b/connector/routingconnector/internal/pmetricutiltest/metrics_test.go @@ -18,6 +18,7 @@ func TestNewMetrics(t *testing.T) { t.Run("empty", func(t *testing.T) { expected := pmetric.NewMetrics() assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("", "", "", ""))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts())) }) t.Run("simple", func(t *testing.T) { @@ -34,7 +35,15 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('B', + pmetricutiltest.WithMetric('C', "D"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "C", "D"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) t.Run("two_resources", func(t *testing.T) { @@ -60,7 +69,20 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceB.scopeC.metricD.dpE return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', + pmetricutiltest.WithMetric('D', "E"), + ), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('C', + pmetricutiltest.WithMetric('D', "E"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("AB", "C", "D", "E"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) t.Run("two_scopes", func(t *testing.T) { @@ -84,7 +106,18 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeC.metricD.dpE return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('B', + pmetricutiltest.WithMetric('D', "E"), + ), + pmetricutiltest.WithScope('C', + pmetricutiltest.WithMetric('D', "E"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "BC", "D", "E"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) t.Run("two_metrics", func(t *testing.T) { @@ -106,7 +139,16 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('B', + pmetricutiltest.WithMetric('C', "E"), + pmetricutiltest.WithMetric('D', "E"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "CD", "E"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) t.Run("two_datapoints", func(t *testing.T) { @@ -125,6 +167,14 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('B', + pmetricutiltest.WithMetric('C', "DE"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "C", "DE"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) } diff --git a/connector/routingconnector/logs_test.go b/connector/routingconnector/logs_test.go index 24747154c213..c0198fe16523 100644 --- a/connector/routingconnector/logs_test.go +++ b/connector/routingconnector/logs_test.go @@ -475,22 +475,20 @@ func TestLogsConnectorDetailed(t *testing.T) { isAcme := `request["X-Tenant"] == "acme"` - isAnyResource := `attributes["resourceName"] != nil` isResourceA := `attributes["resourceName"] == "resourceA"` isResourceB := `attributes["resourceName"] == "resourceB"` isResourceX := `attributes["resourceName"] == "resourceX"` isResourceY := `attributes["resourceName"] == "resourceY"` - isScopeC := `instrumentation_scope.name == "scopeC"` - isScopeD := `instrumentation_scope.name == "scopeD"` - - isAnyLog := `body != nil` isLogE := `body == "logE"` isLogF := `body == "logF"` isLogX := `body == "logX"` isLogY := `body == "logY"` - and, or := " and ", " or " + isScopeCFromLowerContext := `instrumentation_scope.name == "scopeC"` + isScopeDFromLowerContext := `instrumentation_scope.name == "scopeD"` + + isResourceBFromLowerContext := `resource.attributes["resourceName"] == "resourceB"` testCases := []struct { name string @@ -594,7 +592,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "resource/all_match_first_only", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), + withRoute("resource", "true", idSink0), withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), @@ -607,7 +605,7 @@ func TestLogsConnectorDetailed(t *testing.T) { name: "resource/all_match_last_only", cfg: testConfig( withRoute("resource", isResourceX, idSink0), - withRoute("resource", isAnyResource, idSink1), + withRoute("resource", "true", idSink1), withDefault(idSinkD), ), input: plogutiltest.NewLogs("AB", "CD", "EF"), @@ -618,8 +616,8 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "resource/all_match_only_once", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), - withRoute("resource", isResourceA+or+isResourceB, idSink1), + withRoute("resource", "true", idSink0), + withRoute("resource", isResourceA+" or "+isResourceB, idSink1), withDefault(idSinkD), ), input: plogutiltest.NewLogs("AB", "CD", "EF"), @@ -688,7 +686,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/all_match_first_only", cfg: testConfig( - withRoute("log", isAnyLog, idSink0), + withRoute("log", "true", idSink0), withRoute("log", isLogY, idSink1), withDefault(idSinkD), ), @@ -701,7 +699,7 @@ func TestLogsConnectorDetailed(t *testing.T) { name: "log/all_match_last_only", cfg: testConfig( withRoute("log", isLogX, idSink0), - withRoute("log", isAnyLog, idSink1), + withRoute("log", "true", idSink1), withDefault(idSinkD), ), input: plogutiltest.NewLogs("AB", "CD", "EF"), @@ -712,8 +710,8 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/all_match_only_once", cfg: testConfig( - withRoute("log", isAnyLog, idSink0), - withRoute("log", isLogE+or+isLogF, idSink1), + withRoute("log", "true", idSink0), + withRoute("log", isLogE+" or "+isLogF, idSink1), withDefault(idSinkD), ), input: plogutiltest.NewLogs("AB", "CD", "EF"), @@ -782,7 +780,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/with_resource_condition", cfg: testConfig( - withRoute("log", "resource."+isResourceB+and+isAnyLog, idSink0), + withRoute("log", isResourceBFromLowerContext, idSink0), withRoute("log", isLogY, idSink1), withDefault(idSinkD), ), @@ -794,7 +792,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/with_scope_condition", cfg: testConfig( - withRoute("log", isScopeC+and+isAnyLog, idSink0), + withRoute("log", isScopeCFromLowerContext, idSink0), withRoute("log", isLogY, idSink1), withDefault(idSinkD), ), @@ -806,7 +804,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/with_resource_and_scope_conditions", cfg: testConfig( - withRoute("log", "resource."+isResourceB+and+isScopeD+and+isAnyLog, idSink0), + withRoute("log", isResourceBFromLowerContext+" and "+isScopeDFromLowerContext, idSink0), withRoute("log", isLogY, idSink1), withDefault(idSinkD), ), diff --git a/connector/routingconnector/metrics.go b/connector/routingconnector/metrics.go index 8f25c586bf71..874d8c2d9887 100644 --- a/connector/routingconnector/metrics.go +++ b/connector/routingconnector/metrics.go @@ -15,6 +15,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/pmetricutil" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" ) @@ -88,6 +89,15 @@ func (c *metricsConnector) switchMetrics(ctx context.Context, md pmetric.Metrics return isMatch }, ) + case "metric": + pmetricutil.MoveMetricsWithContextIf(md, matchedMetrics, + func(rm pmetric.ResourceMetrics, sm pmetric.ScopeMetrics, m pmetric.Metric) bool { + mtx := ottlmetric.NewTransformContext(m, sm.Metrics(), sm.Scope(), rm.Resource(), sm, rm) + _, isMatch, err := route.metricStatement.Execute(ctx, mtx) + errs = errors.Join(errs, err) + return isMatch + }, + ) } if errs != nil { if c.config.ErrorMode == ottl.PropagateError { diff --git a/connector/routingconnector/metrics_test.go b/connector/routingconnector/metrics_test.go index 0fba4eabc748..f87a15ff613c 100644 --- a/connector/routingconnector/metrics_test.go +++ b/connector/routingconnector/metrics_test.go @@ -505,12 +505,21 @@ func TestMetricsConnectorDetailed(t *testing.T) { isAcme := `request["X-Tenant"] == "acme"` - isAnyResource := `attributes["resourceName"] != nil` isResourceA := `attributes["resourceName"] == "resourceA"` isResourceB := `attributes["resourceName"] == "resourceB"` isResourceX := `attributes["resourceName"] == "resourceX"` isResourceY := `attributes["resourceName"] == "resourceY"` + isMetricE := `name == "metricE"` + isMetricF := `name == "metricF"` + isMetricX := `name == "metricX"` + isMetricY := `name == "metricY"` + + isScopeCFromLowerContext := `instrumentation_scope.name == "scopeC"` + isScopeDFromLowerContext := `instrumentation_scope.name == "scopeD"` + + isResourceBFromLowerContext := `resource.attributes["resourceName"] == "resourceB"` + testCases := []struct { name string cfg *Config @@ -613,7 +622,7 @@ func TestMetricsConnectorDetailed(t *testing.T) { { name: "resource/all_match_first_only", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), + withRoute("resource", "true", idSink0), withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), @@ -626,7 +635,7 @@ func TestMetricsConnectorDetailed(t *testing.T) { name: "resource/all_match_last_only", cfg: testConfig( withRoute("resource", isResourceX, idSink0), - withRoute("resource", isAnyResource, idSink1), + withRoute("resource", "true", idSink1), withDefault(idSinkD), ), input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), @@ -637,7 +646,7 @@ func TestMetricsConnectorDetailed(t *testing.T) { { name: "resource/all_match_only_once", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), + withRoute("resource", "true", idSink0), withRoute("resource", isResourceA+" or "+isResourceB, idSink1), withDefault(idSinkD), ), @@ -704,6 +713,168 @@ func TestMetricsConnectorDetailed(t *testing.T) { expectSink1: pmetric.Metrics{}, expectSinkD: pmetric.Metrics{}, }, + { + name: "metric/all_match_first_only", + cfg: testConfig( + withRoute("metric", "true", idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/all_match_last_only", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", "true", idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/all_match_only_once", + cfg: testConfig( + withRoute("metric", "true", idSink0), + withRoute("metric", isMetricE+" or "+isMetricF, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/each_matches_one", + cfg: testConfig( + withRoute("metric", isMetricE, idSink0), + withRoute("metric", isMetricF, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/some_match_with_default", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", isMetricF, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + }, + { + name: "metric/some_match_without_default", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", isMetricF, idSink1), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/match_none_with_default", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + }, + { + name: "metric/match_none_without_default", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", isMetricY, idSink1), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/with_resource_condition", + cfg: testConfig( + withRoute("metric", isResourceBFromLowerContext, idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + }, + { + name: "metric/with_scope_condition", + cfg: testConfig( + withRoute("metric", isScopeCFromLowerContext, idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("AB", "D", "EF", "GH"), + }, + { + name: "metric/with_resource_and_scope_conditions", + cfg: testConfig( + withRoute("metric", isResourceBFromLowerContext+" and "+isScopeDFromLowerContext, idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("B", "D", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + ), + }, + { + name: "mixed/match_resource_then_metrics", + cfg: testConfig( + withRoute("resource", isResourceA, idSink0), + withRoute("metric", isMetricE, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "E", "GH"), + expectSinkD: pmetricutiltest.NewMetrics("B", "CD", "F", "GH"), + }, + { + name: "mixed/match_metrics_then_resource", + cfg: testConfig( + withRoute("metric", isMetricE, idSink0), + withRoute("resource", isResourceB, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "F", "GH"), + expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "F", "GH"), + }, { name: "mixed/match_resource_then_grpc_request", cfg: testConfig( @@ -717,6 +888,19 @@ func TestMetricsConnectorDetailed(t *testing.T) { expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), expectSinkD: pmetric.Metrics{}, }, + { + name: "mixed/match_metrics_then_grpc_request", + cfg: testConfig( + withRoute("metric", isMetricF, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + expectSinkD: pmetric.Metrics{}, + }, { name: "mixed/match_resource_then_http_request", cfg: testConfig( @@ -730,6 +914,19 @@ func TestMetricsConnectorDetailed(t *testing.T) { expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), expectSinkD: pmetric.Metrics{}, }, + { + name: "mixed/match_metrics_then_http_request", + cfg: testConfig( + withRoute("metric", isMetricF, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + expectSinkD: pmetric.Metrics{}, + }, } for _, tt := range testCases { diff --git a/connector/routingconnector/router.go b/connector/routingconnector/router.go index 9114695bab67..01dd13143261 100644 --- a/connector/routingconnector/router.go +++ b/connector/routingconnector/router.go @@ -15,6 +15,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" ) @@ -31,6 +32,7 @@ type consumerProvider[C any] func(...pipeline.ID) (C, error) type router[C any] struct { logger *zap.Logger resourceParser ottl.Parser[ottlresource.TransformContext] + metricParser ottl.Parser[ottlmetric.TransformContext] logParser ottl.Parser[ottllog.TransformContext] table []RoutingTableItem @@ -72,15 +74,18 @@ type routingItem[C any] struct { statementContext string requestCondition *requestCondition resourceStatement *ottl.Statement[ottlresource.TransformContext] + metricStatement *ottl.Statement[ottlmetric.TransformContext] logStatement *ottl.Statement[ottllog.TransformContext] } func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.TelemetrySettings) error { - var buildResource, buildLog bool + var buildResource, buildMetric, buildLog bool for _, item := range table { switch item.Context { case "", "resource": buildResource = true + case "metric": + buildMetric = true case "log": buildLog = true } @@ -98,6 +103,17 @@ func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.Te errs = errors.Join(errs, err) } } + if buildMetric { + parser, err := ottlmetric.NewParser( + common.Functions[ottlmetric.TransformContext](), + settings, + ) + if err == nil { + r.metricParser = parser + } else { + errs = errors.Join(errs, err) + } + } if buildLog { parser, err := ottllog.NewParser( common.Functions[ottllog.TransformContext](), @@ -174,6 +190,12 @@ func (r *router[C]) registerRouteConsumers() (err error) { return err } route.resourceStatement = statement + case "metric": + statement, err := r.metricParser.ParseStatement(item.Statement) + if err != nil { + return err + } + route.metricStatement = statement case "log": statement, err := r.logParser.ParseStatement(item.Statement) if err != nil { diff --git a/connector/sumconnector/factory.go b/connector/sumconnector/factory.go index 122484c9a843..7ebf2b263a4d 100644 --- a/connector/sumconnector/factory.go +++ b/connector/sumconnector/factory.go @@ -13,7 +13,6 @@ import ( "go.opentelemetry.io/collector/consumer" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/sumconnector/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" @@ -161,7 +160,7 @@ func createLogsToMetrics( } type metricDef[K any] struct { - condition expr.BoolExpr[K] + condition *ottl.ConditionSequence[K] desc string attrs []AttributeConfig sourceAttr string diff --git a/exporter/honeycombmarkerexporter/logs_exporter.go b/exporter/honeycombmarkerexporter/logs_exporter.go index 31aa476e0508..0e9bf4f7bc0b 100644 --- a/exporter/honeycombmarkerexporter/logs_exporter.go +++ b/exporter/honeycombmarkerexporter/logs_exporter.go @@ -19,7 +19,6 @@ import ( "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/pdata/plog" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" @@ -34,7 +33,7 @@ const ( type marker struct { Marker - logBoolExpr expr.BoolExpr[ottllog.TransformContext] + logBoolExpr *ottl.ConditionSequence[ottllog.TransformContext] } type honeycombLogsExporter struct { diff --git a/internal/filter/filterottl/filter.go b/internal/filter/filterottl/filter.go index e4dad6ee9359..705b2acf5a4e 100644 --- a/internal/filter/filterottl/filter.go +++ b/internal/filter/filterottl/filter.go @@ -6,7 +6,6 @@ package filterottl // import "github.com/open-telemetry/opentelemetry-collector- import ( "go.opentelemetry.io/collector/component" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" @@ -20,7 +19,7 @@ import ( // NewBoolExprForSpan creates a BoolExpr[ottlspan.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlspan.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspan.TransformContext], error) { +func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlspan.TransformContext], error) { parser, err := ottlspan.NewParser(functions, set) if err != nil { return nil, err @@ -36,7 +35,7 @@ func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[o // NewBoolExprForSpanEvent creates a BoolExpr[ottlspanevent.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlspanevent.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspanevent.TransformContext], error) { +func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlspanevent.TransformContext], error) { parser, err := ottlspanevent.NewParser(functions, set) if err != nil { return nil, err @@ -52,7 +51,7 @@ func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Fact // NewBoolExprForMetric creates a BoolExpr[ottlmetric.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlmetric.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlmetric.TransformContext], error) { +func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlmetric.TransformContext], error) { parser, err := ottlmetric.NewParser(functions, set) if err != nil { return nil, err @@ -68,7 +67,7 @@ func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory // NewBoolExprForDataPoint creates a BoolExpr[ottldatapoint.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottldatapoint.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottldatapoint.TransformContext], error) { +func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottldatapoint.TransformContext], error) { parser, err := ottldatapoint.NewParser(functions, set) if err != nil { return nil, err @@ -84,7 +83,7 @@ func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Fact // NewBoolExprForLog creates a BoolExpr[ottllog.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottllog.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottllog.TransformContext], error) { +func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottllog.TransformContext], error) { parser, err := ottllog.NewParser(functions, set) if err != nil { return nil, err @@ -100,7 +99,7 @@ func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ot // NewBoolExprForResource creates a BoolExpr[ottlresource.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlresource.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForResource(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlresource.TransformContext], error) { +func NewBoolExprForResource(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlresource.TransformContext], error) { parser, err := ottlresource.NewParser(functions, set) if err != nil { return nil, err @@ -116,7 +115,7 @@ func NewBoolExprForResource(conditions []string, functions map[string]ottl.Facto // NewBoolExprForScope creates a BoolExpr[ottlscope.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlresource.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlscope.TransformContext], error) { +func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlscope.TransformContext], error) { parser, err := ottlscope.NewParser(functions, set) if err != nil { return nil, err diff --git a/processor/logdedupprocessor/processor.go b/processor/logdedupprocessor/processor.go index 804c312bff0d..efd5095a2eaf 100644 --- a/processor/logdedupprocessor/processor.go +++ b/processor/logdedupprocessor/processor.go @@ -16,7 +16,7 @@ import ( "go.opentelemetry.io/collector/processor" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata" ) @@ -24,7 +24,7 @@ import ( // logDedupProcessor is a logDedupProcessor that counts duplicate instances of logs. type logDedupProcessor struct { emitInterval time.Duration - conditions expr.BoolExpr[ottllog.TransformContext] + conditions *ottl.ConditionSequence[ottllog.TransformContext] aggregator *logAggregator remover *fieldRemover nextConsumer consumer.Logs diff --git a/processor/tailsamplingprocessor/internal/sampling/ottl.go b/processor/tailsamplingprocessor/internal/sampling/ottl.go index 4e50358b002e..7d5f520ece50 100644 --- a/processor/tailsamplingprocessor/internal/sampling/ottl.go +++ b/processor/tailsamplingprocessor/internal/sampling/ottl.go @@ -11,7 +11,6 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" @@ -19,8 +18,8 @@ import ( ) type ottlConditionFilter struct { - sampleSpanExpr expr.BoolExpr[ottlspan.TransformContext] - sampleSpanEventExpr expr.BoolExpr[ottlspanevent.TransformContext] + sampleSpanExpr *ottl.ConditionSequence[ottlspan.TransformContext] + sampleSpanEventExpr *ottl.ConditionSequence[ottlspanevent.TransformContext] errorMode ottl.ErrorMode logger *zap.Logger } diff --git a/processor/transformprocessor/internal/common/processor.go b/processor/transformprocessor/internal/common/processor.go index 137cac8ffeac..dee7d24e7ba9 100644 --- a/processor/transformprocessor/internal/common/processor.go +++ b/processor/transformprocessor/internal/common/processor.go @@ -212,7 +212,7 @@ func (pc parserCollection) parseCommonContextStatements(contextStatement Context } func parseGlobalExpr[K any]( - boolExprFunc func([]string, map[string]ottl.Factory[K], ottl.ErrorMode, component.TelemetrySettings) (expr.BoolExpr[K], error), + boolExprFunc func([]string, map[string]ottl.Factory[K], ottl.ErrorMode, component.TelemetrySettings) (*ottl.ConditionSequence[K], error), conditions []string, pc parserCollection, standardFuncs map[string]ottl.Factory[K]) (expr.BoolExpr[K], error) { diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go index a8da66e22996..898f77757d1e 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go @@ -38,7 +38,6 @@ func (acc *metricDataAccumulator) getMetricsData(containerStatsMap map[string]*C acc.accumulate(convertToOTLPMetrics(containerPrefix, containerMetrics, containerResource, timestamp)) aggregateTaskMetrics(&taskMetrics, containerMetrics) } else if containerMetadata.FinishedAt != "" && containerMetadata.StartedAt != "" { - duration, err := calculateDuration(containerMetadata.StartedAt, containerMetadata.FinishedAt) if err != nil { diff --git a/receiver/awsxrayreceiver/internal/translator/aws.go b/receiver/awsxrayreceiver/internal/translator/aws.go index c7b65d765d6a..d77cb5185d22 100644 --- a/receiver/awsxrayreceiver/internal/translator/aws.go +++ b/receiver/awsxrayreceiver/internal/translator/aws.go @@ -7,7 +7,7 @@ import ( "strconv" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/aws_test.go b/receiver/awsxrayreceiver/internal/translator/aws_test.go index f72b83012329..593b24738610 100644 --- a/receiver/awsxrayreceiver/internal/translator/aws_test.go +++ b/receiver/awsxrayreceiver/internal/translator/aws_test.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/cause.go b/receiver/awsxrayreceiver/internal/translator/cause.go index de8154452181..99eed7d80afe 100644 --- a/receiver/awsxrayreceiver/internal/translator/cause.go +++ b/receiver/awsxrayreceiver/internal/translator/cause.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/http.go b/receiver/awsxrayreceiver/internal/translator/http.go index bac348352e62..79a66b394cef 100644 --- a/receiver/awsxrayreceiver/internal/translator/http.go +++ b/receiver/awsxrayreceiver/internal/translator/http.go @@ -5,7 +5,7 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector- import ( "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" diff --git a/receiver/awsxrayreceiver/internal/translator/sdk.go b/receiver/awsxrayreceiver/internal/translator/sdk.go index 97cdc53d10f2..8d55e3dab457 100644 --- a/receiver/awsxrayreceiver/internal/translator/sdk.go +++ b/receiver/awsxrayreceiver/internal/translator/sdk.go @@ -7,7 +7,7 @@ import ( "strings" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/sql.go b/receiver/awsxrayreceiver/internal/translator/sql.go index 7659fd7e45a8..6a88511d6fdd 100644 --- a/receiver/awsxrayreceiver/internal/translator/sql.go +++ b/receiver/awsxrayreceiver/internal/translator/sql.go @@ -8,7 +8,7 @@ import ( "regexp" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/translator.go b/receiver/awsxrayreceiver/internal/translator/translator.go index e3a05d40d560..331195a5fe8c 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator.go +++ b/receiver/awsxrayreceiver/internal/translator/translator.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray/telemetry" diff --git a/receiver/awsxrayreceiver/internal/translator/translator_test.go b/receiver/awsxrayreceiver/internal/translator/translator_test.go index c0f32a852a79..68231cd57d8e 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator_test.go +++ b/receiver/awsxrayreceiver/internal/translator/translator_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray/telemetry" diff --git a/receiver/k8sobjectsreceiver/config.go b/receiver/k8sobjectsreceiver/config.go index d341edc555d3..62c303748e35 100644 --- a/receiver/k8sobjectsreceiver/config.go +++ b/receiver/k8sobjectsreceiver/config.go @@ -57,7 +57,6 @@ type Config struct { } func (c *Config) Validate() error { - validObjects, err := c.getValidObjects() if err != nil { return err @@ -149,7 +148,6 @@ func (c *Config) getValidObjects() (map[string][]*schema.GroupVersionResource, e Resource: resource.Name, }) } - } return validObjects, nil } diff --git a/receiver/k8sobjectsreceiver/mock_dynamic_client_test.go b/receiver/k8sobjectsreceiver/mock_dynamic_client_test.go index 267559d0aedc..a075baa8a297 100644 --- a/receiver/k8sobjectsreceiver/mock_dynamic_client_test.go +++ b/receiver/k8sobjectsreceiver/mock_dynamic_client_test.go @@ -30,7 +30,6 @@ func newMockDynamicClient() mockDynamicClient { return mockDynamicClient{ client: fakeClient, } - } func (c mockDynamicClient) getMockDynamicClient() (dynamic.Interface, error) { diff --git a/receiver/k8sobjectsreceiver/receiver.go b/receiver/k8sobjectsreceiver/receiver.go index f10754c4f628..d69df14a19e6 100644 --- a/receiver/k8sobjectsreceiver/receiver.go +++ b/receiver/k8sobjectsreceiver/receiver.go @@ -154,9 +154,7 @@ func (kr *k8sobjectsreceiver) startPull(ctx context.Context, config *K8sObjectsC case <-stopperChan: return } - } - } func (kr *k8sobjectsreceiver) startWatch(ctx context.Context, config *K8sObjectsConfig, resource dynamic.ResourceInterface) { diff --git a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go index 8315452a260b..da71680ba5b1 100644 --- a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go +++ b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go @@ -89,7 +89,6 @@ func TestUnstructuredListToLogData(t *testing.T) { assert.False(t, ok) assert.Equal(t, 1, rl.ScopeLogs().Len()) assert.Equal(t, 3, logRecords.Len()) - }) t.Run("Test event.name in watch events", func(t *testing.T) { @@ -129,7 +128,6 @@ func TestUnstructuredListToLogData(t *testing.T) { eventName, ok := attrs.Get("event.name") require.True(t, ok) assert.EqualValues(t, "generic-name", eventName.AsRaw()) - }) t.Run("Test event observed timestamp is present", func(t *testing.T) { @@ -168,5 +166,4 @@ func TestUnstructuredListToLogData(t *testing.T) { assert.Positive(t, logRecords.At(0).ObservedTimestamp().AsTime().Unix()) assert.Equal(t, logRecords.At(0).ObservedTimestamp().AsTime().Unix(), observedAt.Unix()) }) - } diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index 6aed30726d2d..07aea6821634 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -47,7 +47,6 @@ func (s *brokerScraper) shutdown(context.Context) error { } func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { - var scrapeErrors = scrapererror.ScrapeErrors{} if s.client == nil { diff --git a/receiver/kafkareceiver/header_extraction_test.go b/receiver/kafkareceiver/header_extraction_test.go index c2dacfff103f..76f72b8595ce 100644 --- a/receiver/kafkareceiver/header_extraction_test.go +++ b/receiver/kafkareceiver/header_extraction_test.go @@ -85,7 +85,6 @@ func TestHeaderExtractionTraces(t *testing.T) { } cancelFunc() wg.Wait() - } func TestHeaderExtractionLogs(t *testing.T) { @@ -147,7 +146,6 @@ func TestHeaderExtractionLogs(t *testing.T) { } cancelFunc() wg.Wait() - } func TestHeaderExtractionMetrics(t *testing.T) { @@ -210,7 +208,6 @@ func TestHeaderExtractionMetrics(t *testing.T) { } cancelFunc() wg.Wait() - } func validateHeader(t *testing.T, rs pcommon.Resource, headerKey string, headerValue string) { diff --git a/receiver/kubeletstatsreceiver/config_test.go b/receiver/kubeletstatsreceiver/config_test.go index f3baf8a2fb8f..03290d362cd5 100644 --- a/receiver/kubeletstatsreceiver/config_test.go +++ b/receiver/kubeletstatsreceiver/config_test.go @@ -303,7 +303,6 @@ func TestLoadConfig(t *testing.T) { assert.NoError(t, err) assert.Equal(t, tt.expected, cfg) } - }) } } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index 954a01f03cd3..55f539044a70 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -207,7 +207,6 @@ func (m *Metadata) getContainerID(podUID string, containerName string) (string, return stripContainerID(containerStatus.ContainerID), nil } } - } } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go index 7ba5e4eabd44..fa35e320c2d0 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go @@ -390,7 +390,6 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) { // Test happy paths for volume type metadata. func TestCpuAndMemoryGetters(t *testing.T) { - tests := []struct { name string metadata Metadata diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go index 649766adff75..e089046240e6 100644 --- a/receiver/kubeletstatsreceiver/scraper_test.go +++ b/receiver/kubeletstatsreceiver/scraper_test.go @@ -303,7 +303,6 @@ func TestScraperWithMetadata(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreMetricsOrder())) - }) } } diff --git a/receiver/lokireceiver/factory.go b/receiver/lokireceiver/factory.go index 08e0c9aa0539..8faa7d0d128c 100644 --- a/receiver/lokireceiver/factory.go +++ b/receiver/lokireceiver/factory.go @@ -56,7 +56,6 @@ func createLogsReceiver( cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { - rCfg := cfg.(*Config) return newLokiReceiver(rCfg, consumer, settings) } diff --git a/receiver/lokireceiver/loki_test.go b/receiver/lokireceiver/loki_test.go index 00087b11d26c..85e0af28499a 100644 --- a/receiver/lokireceiver/loki_test.go +++ b/receiver/lokireceiver/loki_test.go @@ -365,7 +365,6 @@ func TestSendingPushRequestToGRPCEndpoint(t *testing.T) { } func TestExpectedStatus(t *testing.T) { - testcases := []struct { name string err error diff --git a/receiver/mongodbatlasreceiver/alerts.go b/receiver/mongodbatlasreceiver/alerts.go index 982970214537..2be154637407 100644 --- a/receiver/mongodbatlasreceiver/alerts.go +++ b/receiver/mongodbatlasreceiver/alerts.go @@ -498,7 +498,6 @@ func payloadToLogs(now time.Time, payload []byte) (plog.Logs, error) { attrs.PutStr("net.peer.name", host) attrs.PutInt("net.peer.port", port) - } return logs, nil diff --git a/receiver/mongodbatlasreceiver/alerts_test.go b/receiver/mongodbatlasreceiver/alerts_test.go index bb65b257d603..77e176e71a0a 100644 --- a/receiver/mongodbatlasreceiver/alerts_test.go +++ b/receiver/mongodbatlasreceiver/alerts_test.go @@ -243,7 +243,6 @@ func TestVerifyHMACSignature(t *testing.T) { } else { require.NoError(t, err) } - }) } } diff --git a/receiver/mongodbatlasreceiver/events.go b/receiver/mongodbatlasreceiver/events.go index 2dd9787ab02a..7b9ed58557ce 100644 --- a/receiver/mongodbatlasreceiver/events.go +++ b/receiver/mongodbatlasreceiver/events.go @@ -239,7 +239,6 @@ func (er *eventsReceiver) transformOrgEvents(now pcommon.Timestamp, events []*mo func (er *eventsReceiver) transformEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, resourceLogs *plog.ResourceLogs) { for _, event := range events { - logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() bodyBytes, err := json.Marshal(event) if err != nil { diff --git a/receiver/mongodbatlasreceiver/factory.go b/receiver/mongodbatlasreceiver/factory.go index 899d8b519ba6..0f094a53e283 100644 --- a/receiver/mongodbatlasreceiver/factory.go +++ b/receiver/mongodbatlasreceiver/factory.go @@ -31,7 +31,6 @@ func NewFactory() receiver.Factory { createDefaultConfig, receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability), receiver.WithLogs(createCombinedLogReceiver, metadata.LogsStability)) - } func createMetricsReceiver( diff --git a/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go b/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go index 8ba8f2acbb5a..3f1e1bbcab2d 100644 --- a/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go +++ b/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go @@ -217,7 +217,6 @@ func (s *MongoDBAtlasClient) GetOrganization(ctx context.Context, orgID string) return nil, fmt.Errorf("error retrieving project page: %w", err) } return org, nil - } // Projects returns a list of projects accessible within the provided organization @@ -719,7 +718,6 @@ type GetAccessLogsOptions struct { // GetAccessLogs returns the access logs specified for the cluster requested func (s *MongoDBAtlasClient) GetAccessLogs(ctx context.Context, groupID string, clusterName string, opts *GetAccessLogsOptions) (ret []*mongodbatlas.AccessLogs, err error) { - options := mongodbatlas.AccessLogOptions{ // Earliest Timestamp in epoch milliseconds from when Atlas should access log results Start: fmt.Sprintf("%d", opts.MinDate.UTC().UnixMilli()), diff --git a/receiver/mongodbatlasreceiver/logs_test.go b/receiver/mongodbatlasreceiver/logs_test.go index 7a7c721715e9..03cc74f40132 100644 --- a/receiver/mongodbatlasreceiver/logs_test.go +++ b/receiver/mongodbatlasreceiver/logs_test.go @@ -41,7 +41,6 @@ func TestFilterClusters(t *testing.T) { ic, err := filterClusters(clusters, includeProject) require.NoError(t, err) require.Equal(t, []mongodbatlas.Cluster{{Name: "cluster1", ID: "1"}, {Name: "cluster3", ID: "3"}}, ic) - } func TestDefaultLoggingConfig(t *testing.T) { diff --git a/receiver/mongodbatlasreceiver/receiver.go b/receiver/mongodbatlasreceiver/receiver.go index bd97b1edad15..67ffe30f503d 100644 --- a/receiver/mongodbatlasreceiver/receiver.go +++ b/receiver/mongodbatlasreceiver/receiver.go @@ -211,7 +211,6 @@ func (s *mongodbatlasreceiver) getNodeClusterNameMap( // Remove the port from the node n, _, _ := strings.Cut(node, ":") clusterMap[n] = cluster.Name - } providerMap[cluster.Name] = providerValues{ diff --git a/receiver/mongodbreceiver/client_test.go b/receiver/mongodbreceiver/client_test.go index f6ed99ebfcc2..b4657ef87fc3 100644 --- a/receiver/mongodbreceiver/client_test.go +++ b/receiver/mongodbreceiver/client_test.go @@ -92,7 +92,6 @@ func TestListDatabaseNames(t *testing.T) { require.NoError(t, err) require.Equal(t, "admin", dbNames[0]) }) - } type commandString = string @@ -232,7 +231,6 @@ func TestGetVersionFailures(t *testing.T) { require.ErrorContains(t, err, tc.partialError) }) } - } func loadDBStats() (bson.D, error) { diff --git a/receiver/mysqlreceiver/scraper_test.go b/receiver/mysqlreceiver/scraper_test.go index 8753752e948a..f2a1ef0ce5ba 100644 --- a/receiver/mysqlreceiver/scraper_test.go +++ b/receiver/mysqlreceiver/scraper_test.go @@ -123,7 +123,6 @@ func TestScrape(t *testing.T) { // and the other failure comes from a row that fails to parse as a number require.Equal(t, 5, partialError.Failed, "Expected partial error count to be 5") }) - } var _ client = (*mockClient)(nil) @@ -194,7 +193,6 @@ func (c *mockClient) getTableStats() ([]TableStats, error) { stats = append(stats, s) } return stats, nil - } func (c *mockClient) getTableIoWaitsStats() ([]TableIoWaitsStats, error) { diff --git a/receiver/opencensusreceiver/internal/octrace/opencensus.go b/receiver/opencensusreceiver/internal/octrace/opencensus.go index 68190af59528..9977d9fa6700 100644 --- a/receiver/opencensusreceiver/internal/octrace/opencensus.go +++ b/receiver/opencensusreceiver/internal/octrace/opencensus.go @@ -33,7 +33,6 @@ type Receiver struct { // New creates a new opencensus.Receiver reference. func New(nextConsumer consumer.Traces, set receiver.Settings) (*Receiver, error) { - obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: set.ID, Transport: receiverTransport, diff --git a/receiver/opencensusreceiver/opencensus.go b/receiver/opencensusreceiver/opencensus.go index 3ea3c2f4a2bd..ee2de0d033af 100644 --- a/receiver/opencensusreceiver/opencensus.go +++ b/receiver/opencensusreceiver/opencensus.go @@ -189,7 +189,6 @@ func (ocr *ocReceiver) Start(ctx context.Context, host component.Host) error { // Shutdown is a method to turn off receiving. func (ocr *ocReceiver) Shutdown(context.Context) error { - if ocr.cancel != nil { ocr.cancel() } diff --git a/receiver/opencensusreceiver/opencensus_test.go b/receiver/opencensusreceiver/opencensus_test.go index 80002b99aaed..668df1bd9438 100644 --- a/receiver/opencensusreceiver/opencensus_test.go +++ b/receiver/opencensusreceiver/opencensus_test.go @@ -303,7 +303,6 @@ func TestStartWithoutConsumersShouldFail(t *testing.T) { } func TestStartListenerClosed(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) // Set the buffer count to 1 to make it flush the test span immediately. @@ -503,7 +502,6 @@ func TestOCReceiverTrace_HandleNextConsumerResponse(t *testing.T) { t *testing.T, cc *grpc.ClientConn, msg *agenttracepb.ExportTraceServiceRequest) error { - acc := agenttracepb.NewTraceServiceClient(cc) stream, err := acc.Export(context.Background()) require.NoError(t, err) @@ -661,7 +659,6 @@ func TestOCReceiverMetrics_HandleNextConsumerResponse(t *testing.T) { t *testing.T, cc *grpc.ClientConn, msg *agentmetricspb.ExportMetricsServiceRequest) error { - acc := agentmetricspb.NewMetricsServiceClient(cc) stream, err := acc.Export(context.Background()) require.NoError(t, err) diff --git a/receiver/oracledbreceiver/config.go b/receiver/oracledbreceiver/config.go index c14cd60beac8..70c21c2e9264 100644 --- a/receiver/oracledbreceiver/config.go +++ b/receiver/oracledbreceiver/config.go @@ -41,7 +41,6 @@ func (c Config) Validate() error { // If DataSource is defined it takes precedence over the rest of the connection options. if c.DataSource == "" { - if c.Endpoint == "" { allErrs = multierr.Append(allErrs, errEmptyEndpoint) } diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index 228023c390a7..608cf1facd47 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -40,7 +40,6 @@ var queryResponses = map[string][]metricRow{ } func TestScraper_Scrape(t *testing.T) { - tests := []struct { name string dbclientFn func(db *sql.DB, s string, logger *zap.Logger) dbClient @@ -165,5 +164,4 @@ func TestScraper_Scrape(t *testing.T) { assert.Equal(t, int64(78944), found.Sum().DataPoints().At(0).IntValue()) }) } - } diff --git a/receiver/otelarrowreceiver/config_test.go b/receiver/otelarrowreceiver/config_test.go index 60edaf00cf61..29a1e8a08ad5 100644 --- a/receiver/otelarrowreceiver/config_test.go +++ b/receiver/otelarrowreceiver/config_test.go @@ -85,7 +85,6 @@ func TestUnmarshalConfig(t *testing.T) { WaiterLimit: 100, }, }, cfg) - } // Tests that a deprecated config validation sets RequestLimitMiB and WaiterLimit in the correct config block. diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow.go b/receiver/otelarrowreceiver/internal/arrow/arrow.go index 13fdcd2395f8..50653b2e9a3a 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow.go @@ -545,7 +545,6 @@ func (id *inFlightData) anyDone(ctx context.Context) { // tracks everything that needs to be used by instrumention when the // batch finishes. func (r *receiverStream) recvOne(streamCtx context.Context, serverStream anyStreamServer, hrcv *headerReceiver, pendingCh chan<- batchResp, method string, ac arrowRecord.ConsumerAPI) (retErr error) { - // Receive a batch corresponding with one ptrace.Traces, pmetric.Metrics, // or plog.Logs item. req, recvErr := serverStream.Recv() @@ -565,12 +564,10 @@ func (r *receiverStream) recvOne(streamCtx context.Context, serverStream anyStre if recvErr != nil { if errors.Is(recvErr, io.EOF) { return recvErr - } else if errors.Is(recvErr, context.Canceled) { // This is a special case to avoid introducing a span error // for a canceled operation. return io.EOF - } else if status, ok := status.FromError(recvErr); ok && status.Code() == codes.Canceled { // This is a special case to avoid introducing a span error // for a canceled operation. @@ -773,7 +770,6 @@ func (r *receiverStream) srvSendLoop(ctx context.Context, serverStream anyStream // slice of pdata objects of the corresponding data type as `any`. // along with the number of items and true uncompressed size. func (r *Receiver) consumeBatch(arrowConsumer arrowRecord.ConsumerAPI, records *arrowpb.BatchArrowRecords) (retData any, numItems int, uncompSize int64, retErr error) { - payloads := records.GetArrowPayloads() if len(payloads) == 0 { return nil, 0, 0, nil diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go index dcbe0f8546d3..ec35cb2ef992 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go @@ -1275,7 +1275,6 @@ func testReceiverAuthHeaders(t *testing.T, includeMeta bool, dataAuth bool) { batch = copyBatch(batch) if len(md) != 0 { - hpb.Reset() for key, vals := range md { for _, val := range vals { diff --git a/receiver/podmanreceiver/libpod_client_test.go b/receiver/podmanreceiver/libpod_client_test.go index 65d511c5aa85..80d50ffd265f 100644 --- a/receiver/podmanreceiver/libpod_client_test.go +++ b/receiver/podmanreceiver/libpod_client_test.go @@ -238,7 +238,6 @@ func TestEvents(t *testing.T) { loop: for { - select { case err := <-errs: if err != nil && !errors.Is(err, io.EOF) { @@ -252,5 +251,4 @@ loop: } assert.Equal(t, expectedEvents, actualEvents) - } diff --git a/receiver/podmanreceiver/podman.go b/receiver/podmanreceiver/podman.go index 6aa8d0fd8c6f..277720f87a6c 100644 --- a/receiver/podmanreceiver/podman.go +++ b/receiver/podmanreceiver/podman.go @@ -98,7 +98,6 @@ EVENT_LOOP: for { eventCh, errCh := pc.events(ctx, filters) for { - select { case <-ctx.Done(): return @@ -132,7 +131,6 @@ EVENT_LOOP: } } } - } } } diff --git a/receiver/prometheusreceiver/config.go b/receiver/prometheusreceiver/config.go index 672f0e437b2c..556dd4244518 100644 --- a/receiver/prometheusreceiver/config.go +++ b/receiver/prometheusreceiver/config.go @@ -144,7 +144,6 @@ func validateHTTPClientConfig(cfg *commonconfig.HTTPClientConfig) error { return err } return nil - } func checkFile(fn string) error { diff --git a/receiver/prometheusreceiver/internal/transaction_test.go b/receiver/prometheusreceiver/internal/transaction_test.go index f15b06e402cb..b47b8ce83b96 100644 --- a/receiver/prometheusreceiver/internal/transaction_test.go +++ b/receiver/prometheusreceiver/internal/transaction_test.go @@ -1742,7 +1742,6 @@ func TestMetricBuilderSummary(t *testing.T) { }) } } - } func TestMetricBuilderNativeHistogram(t *testing.T) { @@ -2003,5 +2002,4 @@ func assertEquivalentMetrics(t *testing.T, want, got pmetric.Metrics) { assert.EqualValues(t, wmap, gmap) } } - } diff --git a/receiver/prometheusreceiver/metrics_receiver_helper_test.go b/receiver/prometheusreceiver/metrics_receiver_helper_test.go index 0755cf0c7e9a..cea8d429af86 100644 --- a/receiver/prometheusreceiver/metrics_receiver_helper_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_helper_test.go @@ -185,7 +185,6 @@ func waitForScrapeResults(t *testing.T, targets []*testData, cms *consumertest.M // only count target pages that are not 404, matching mock ServerHTTP func response logic want++ } - } if len(scrapes) < want { // If we don't have enough scrapes yet lets return false and wait for another tick diff --git a/receiver/prometheusreceiver/metrics_receiver_labels_test.go b/receiver/prometheusreceiver/metrics_receiver_labels_test.go index f7eb6289c1cd..b120df3d3341 100644 --- a/receiver/prometheusreceiver/metrics_receiver_labels_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_labels_test.go @@ -722,7 +722,6 @@ func verifyRelabelJobInstance(t *testing.T, td *testData, rms []pmetric.Resource }, }, })(t, rms[0]) - } const targetResourceAttsInTargetInfo = ` diff --git a/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go b/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go index ae22d4389aee..1eec3fbe16c6 100644 --- a/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go @@ -92,7 +92,6 @@ func verifyFailTarget(t *testing.T, td *testData, mds []pmetric.ResourceMetrics) // Test open metrics negative test cases func TestOpenMetricsFail(t *testing.T) { - targetsMap := getOpenMetricsFailTestData() var targets []*testData for k, v := range targetsMap { @@ -127,7 +126,6 @@ func verifyInvalidTarget(t *testing.T, td *testData, mds []pmetric.ResourceMetri } func TestOpenMetricsInvalid(t *testing.T) { - targetsMap := getOpenMetricsInvalidTestData() var targets []*testData for k, v := range targetsMap { @@ -229,7 +227,6 @@ func TestInfoStatesetMetrics(t *testing.T) { } testComponent(t, targets, nil) - } func verifyInfoStatesetMetrics(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { diff --git a/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go b/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go index ecf7ab8b7ba1..adc0cadcb310 100644 --- a/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go +++ b/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go @@ -102,7 +102,6 @@ func TestMetricRenamingKeepAction(t *testing.T) { } } }) - } func verifyRenameMetric(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { @@ -266,7 +265,6 @@ func TestLabelRenaming(t *testing.T) { } } }) - } func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { @@ -373,7 +371,6 @@ func TestLabelRenamingKeepAction(t *testing.T) { } } }) - } func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { diff --git a/receiver/prometheusreceiver/targetallocator/config.go b/receiver/prometheusreceiver/targetallocator/config.go index 07fc1d579a83..5cd9d719574b 100644 --- a/receiver/prometheusreceiver/targetallocator/config.go +++ b/receiver/prometheusreceiver/targetallocator/config.go @@ -87,7 +87,6 @@ func validateHTTPClientConfig(cfg *commonconfig.HTTPClientConfig) error { return err } return nil - } func checkFile(fn string) error { diff --git a/receiver/pulsarreceiver/factory.go b/receiver/pulsarreceiver/factory.go index 8814484af3c2..aa15ed799f58 100644 --- a/receiver/pulsarreceiver/factory.go +++ b/receiver/pulsarreceiver/factory.go @@ -55,7 +55,6 @@ func withLogsUnmarshalers(logsUnmarshalers ...LogsUnmarshaler) FactoryOption { // NewFactory creates Pulsar receiver factory. func NewFactory(options ...FactoryOption) receiver.Factory { - f := &pulsarReceiverFactory{ tracesUnmarshalers: defaultTracesUnmarshalers(), metricsUnmarshalers: defaultMetricsUnmarshalers(), diff --git a/receiver/rabbitmqreceiver/config_test.go b/receiver/rabbitmqreceiver/config_test.go index f95175d341b2..dfcdfefe5cda 100644 --- a/receiver/rabbitmqreceiver/config_test.go +++ b/receiver/rabbitmqreceiver/config_test.go @@ -92,7 +92,6 @@ func TestValidate(t *testing.T) { } else { require.NoError(t, actualErr) } - }) } } diff --git a/receiver/rabbitmqreceiver/scraper.go b/receiver/rabbitmqreceiver/scraper.go index 6b488e9e463e..fd0ad5d5f529 100644 --- a/receiver/rabbitmqreceiver/scraper.go +++ b/receiver/rabbitmqreceiver/scraper.go @@ -78,7 +78,6 @@ func (r *rabbitmqScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Collect metrics for each queue for _, queue := range queues { - r.collectQueue(queue, now) } diff --git a/receiver/rabbitmqreceiver/scraper_test.go b/receiver/rabbitmqreceiver/scraper_test.go index 7c97bb737ca0..112941b136d9 100644 --- a/receiver/rabbitmqreceiver/scraper_test.go +++ b/receiver/rabbitmqreceiver/scraper_test.go @@ -101,7 +101,6 @@ func TestScaperScrape(t *testing.T) { return &mockClient }, expectedMetricGen: func(*testing.T) pmetric.Metrics { - return pmetric.NewMetrics() }, expectedErr: errors.New("some api error"), diff --git a/receiver/receivercreator/observerhandler_test.go b/receiver/receivercreator/observerhandler_test.go index 3968402f58e9..14cd5e7a7c97 100644 --- a/receiver/receivercreator/observerhandler_test.go +++ b/receiver/receivercreator/observerhandler_test.go @@ -321,7 +321,6 @@ func TestOnAddForTraces(t *testing.T) { t.Fatalf("unexpected startedComponent: %T", v) } require.Equal(t, test.expectedReceiverConfig, actualConfig) - }) } } diff --git a/receiver/riakreceiver/config_test.go b/receiver/riakreceiver/config_test.go index a503e4d05e86..97eaf508d23c 100644 --- a/receiver/riakreceiver/config_test.go +++ b/receiver/riakreceiver/config_test.go @@ -93,7 +93,6 @@ func TestValidate(t *testing.T) { } else { require.NoError(t, actualErr) } - }) } } diff --git a/receiver/saphanareceiver/config_test.go b/receiver/saphanareceiver/config_test.go index 0a53bcf7b428..104a6659b7d3 100644 --- a/receiver/saphanareceiver/config_test.go +++ b/receiver/saphanareceiver/config_test.go @@ -93,5 +93,4 @@ func TestLoadConfig(t *testing.T) { if diff := cmp.Diff(expected, cfg, cmpopts.IgnoreUnexported(metadata.MetricConfig{}), cmpopts.IgnoreUnexported(metadata.ResourceAttributeConfig{})); diff != "" { t.Errorf("Config mismatch (-expected +actual):\n%s", diff) } - } diff --git a/receiver/signalfxreceiver/receiver.go b/receiver/signalfxreceiver/receiver.go index ecadab4bd51e..587072e65e44 100644 --- a/receiver/signalfxreceiver/receiver.go +++ b/receiver/signalfxreceiver/receiver.go @@ -123,7 +123,6 @@ func (r *sfxReceiver) RegisterLogsConsumer(lc consumer.Logs) { // By convention the consumer of the received data is set when the receiver // instance is created. func (r *sfxReceiver) Start(ctx context.Context, host component.Host) error { - if r.server != nil { return nil } diff --git a/receiver/skywalkingreceiver/factory.go b/receiver/skywalkingreceiver/factory.go index c29e9f7abf68..458bed44ad23 100644 --- a/receiver/skywalkingreceiver/factory.go +++ b/receiver/skywalkingreceiver/factory.go @@ -66,7 +66,6 @@ func createTracesReceiver( cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { - // Convert settings in the source c to configuration struct // that Skywalking receiver understands. rCfg := cfg.(*Config) @@ -94,7 +93,6 @@ func createMetricsReceiver( cfg component.Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { - // Convert settings in the source c to configuration struct // that Skywalking receiver understands. rCfg := cfg.(*Config) diff --git a/receiver/skywalkingreceiver/factory_test.go b/receiver/skywalkingreceiver/factory_test.go index 6fd8d870d358..05d8a2a2fde1 100644 --- a/receiver/skywalkingreceiver/factory_test.go +++ b/receiver/skywalkingreceiver/factory_test.go @@ -57,7 +57,6 @@ func TestCreateReceiver(t *testing.T) { mReceiver, err := factory.CreateMetrics(context.Background(), set, cfg, metricSink) assert.NoError(t, err, "metric receiver creation failed") assert.NotNil(t, mReceiver, "metric receiver creation failed") - } func TestCreateReceiverGeneralConfig(t *testing.T) { diff --git a/receiver/skywalkingreceiver/internal/metrics/metric_report_service.go b/receiver/skywalkingreceiver/internal/metrics/metric_report_service.go index e65afbe3e742..db9e34ada43d 100644 --- a/receiver/skywalkingreceiver/internal/metrics/metric_report_service.go +++ b/receiver/skywalkingreceiver/internal/metrics/metric_report_service.go @@ -65,5 +65,4 @@ func consumeMetrics(ctx context.Context, collection *agent.JVMMetricCollection, } pmd := SwMetricsToMetrics(collection) return nextConsumer.ConsumeMetrics(ctx, pmd) - } diff --git a/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go b/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go index 3d784b5c2b31..733865ebdb2c 100644 --- a/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go +++ b/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go @@ -96,7 +96,6 @@ func memoryPoolMetricToMetrics(timestamp int64, memoryPools []*agent.MemoryPool, fillNumberDataPointIntValue(timestamp, memoryPool.Used, dpsMp[MemoryPoolUsedName].AppendEmpty(), attrs) fillNumberDataPointIntValue(timestamp, memoryPool.Committed, dpsMp[MemoryPoolCommittedName].AppendEmpty(), attrs) } - } func buildMemoryPoolAttrs(pool *agent.MemoryPool) pcommon.Map { @@ -110,7 +109,6 @@ func buildMemoryPoolAttrs(pool *agent.MemoryPool) pcommon.Map { case agent.PoolType_NEWGEN_USAGE, agent.PoolType_OLDGEN_USAGE, agent.PoolType_SURVIVOR_USAGE: memoryType = "heap" default: - } attrs.PutStr("jvm.memory.type", memoryType) return attrs diff --git a/receiver/skywalkingreceiver/skywalking_receiver_test.go b/receiver/skywalkingreceiver/skywalking_receiver_test.go index 28d336cc9cd4..02218321a97d 100644 --- a/receiver/skywalkingreceiver/skywalking_receiver_test.go +++ b/receiver/skywalkingreceiver/skywalking_receiver_test.go @@ -83,7 +83,6 @@ func TestStartAndShutdown(t *testing.T) { require.NoError(t, err) require.NoError(t, sr.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { require.NoError(t, sr.Shutdown(context.Background())) }) - } func TestGRPCReception(t *testing.T) { @@ -151,7 +150,6 @@ func TestHttpReception(t *testing.T) { // verify assert.NoError(t, err, "send skywalking segment successful.") assert.NotNil(t, response) - } func mockGrpcTraceSegment(sequence int) *agent.SegmentObject { diff --git a/receiver/snmpreceiver/config.go b/receiver/snmpreceiver/config.go index 2e6200ec1ae8..08ae73a29034 100644 --- a/receiver/snmpreceiver/config.go +++ b/receiver/snmpreceiver/config.go @@ -500,7 +500,6 @@ func validateScalarOID(metricName string, scalarOID ScalarOID, cfg *Config) erro combinedErr = errors.Join(combinedErr, fmt.Errorf(errMsgScalarMetricHasIndexedResourceAttribute, metricName, name)) continue } - } if len(scalarOID.Attributes) == 0 { @@ -593,7 +592,6 @@ func validateResourceAttributeConfigs(cfg *Config) error { // Make sure each Resource Attribute has exactly one of OID or ScalarOID or IndexedValuePrefix, and check that scalar and column OIDs end in the right digit for attrName, attrCfg := range resourceAttributes { - hasOID := attrCfg.OID != "" hasScalarOID := attrCfg.ScalarOID != "" hasIVP := attrCfg.IndexedValuePrefix != "" diff --git a/receiver/snmpreceiver/scraper_test.go b/receiver/snmpreceiver/scraper_test.go index b5c3100c4388..1076011ae9a7 100644 --- a/receiver/snmpreceiver/scraper_test.go +++ b/receiver/snmpreceiver/scraper_test.go @@ -42,7 +42,6 @@ func (_m *MockClient) Close() error { // Connect provides a mock function with given fields: func (_m *MockClient) Connect() error { - ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { @@ -101,7 +100,6 @@ func TestStart(t *testing.T) { { desc: "Valid Config", testFunc: func(t *testing.T) { - scraper := &snmpScraper{ cfg: createDefaultConfig().(*Config), settings: receivertest.NewNopSettings(), diff --git a/receiver/solacereceiver/messaging_service.go b/receiver/solacereceiver/messaging_service.go index b139f7d782b2..d1903e3e5031 100644 --- a/receiver/solacereceiver/messaging_service.go +++ b/receiver/solacereceiver/messaging_service.go @@ -69,7 +69,6 @@ func newAMQPMessagingServiceFactory(cfg *Config, logger *zap.Logger) (messagingS logger: logger, } }, nil - } type amqpConnectConfig struct { diff --git a/receiver/solacereceiver/receiver.go b/receiver/solacereceiver/receiver.go index a50f138e703c..4398008982c4 100644 --- a/receiver/solacereceiver/receiver.go +++ b/receiver/solacereceiver/receiver.go @@ -69,7 +69,6 @@ type solaceTracesReceiver struct { // newTracesReceiver creates a new solaceTraceReceiver as a receiver.Traces func newTracesReceiver(config *Config, set receiver.Settings, nextConsumer consumer.Traces) (receiver.Traces, error) { - factory, err := newAMQPMessagingServiceFactory(config, set.Logger) if err != nil { set.Logger.Warn("Error validating messaging service configuration", zap.Any("error", err)) @@ -222,7 +221,6 @@ func (s *solaceTracesReceiver) receiveMessages(ctx context.Context, service mess return err } } - } // receiveMessage is the heart of the receiver's control flow. It will receive messages, unmarshal the message and forward the trace. diff --git a/receiver/solacereceiver/unmarshaller_move.go b/receiver/solacereceiver/unmarshaller_move.go index 5027c27f26ed..aa220c277b1c 100644 --- a/receiver/solacereceiver/unmarshaller_move.go +++ b/receiver/solacereceiver/unmarshaller_move.go @@ -70,7 +70,6 @@ func (u *brokerTraceMoveUnmarshallerV1) mapResourceSpanAttributes(spanData *move } func (u *brokerTraceMoveUnmarshallerV1) mapMoveSpanTracingInfo(spanData *move_v1.SpanData, span ptrace.Span) { - // hard coded to internal span // SPAN_KIND_CONSUMER == 1 span.SetKind(ptrace.SpanKindInternal) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 4e78e709908a..54af1eae90da 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -426,7 +426,6 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco errs <- errMaxSearchWaitTimeExceeded return } - } // Record the results var host string @@ -1665,7 +1664,6 @@ func (s *splunkScraper) scrapeSearchArtifacts(ctx context.Context, now pcommon.T } for _, f := range da.Entries { - if s.conf.MetricsBuilderConfig.Metrics.SplunkServerSearchartifactsAdhoc.Enabled { adhocCount, err := strconv.ParseInt(f.Content.AdhocCount, 10, 64) if err != nil { diff --git a/receiver/splunkhecreceiver/receiver.go b/receiver/splunkhecreceiver/receiver.go index 26043b1dfc7e..6d095ef61101 100644 --- a/receiver/splunkhecreceiver/receiver.go +++ b/receiver/splunkhecreceiver/receiver.go @@ -455,7 +455,6 @@ func (r *splunkReceiver) handleReq(resp http.ResponseWriter, req *http.Request) } events = append(events, &msg) } - } resourceCustomizer := r.createResourceCustomizer(req) if r.logsConsumer != nil && len(events) > 0 { diff --git a/receiver/splunkhecreceiver/receiver_test.go b/receiver/splunkhecreceiver/receiver_test.go index 85a1662ca9bf..e09e2abfaead 100644 --- a/receiver/splunkhecreceiver/receiver_test.go +++ b/receiver/splunkhecreceiver/receiver_test.go @@ -628,7 +628,6 @@ func Test_splunkhecReceiver_AccessTokenPassthrough(t *testing.T) { case <-time.After(5 * time.Second): assert.Fail(t, "Timeout") } - }) } } @@ -1786,7 +1785,6 @@ func Test_splunkhecreceiver_handle_nested_fields(t *testing.T) { assert.Equal(t, http.StatusBadRequest, w.Code) assert.JSONEq(t, fmt.Sprintf(responseErrHandlingIndexedFields, 0), w.Body.String()) } - }) } } diff --git a/receiver/splunkhecreceiver/splunk_to_logdata.go b/receiver/splunkhecreceiver/splunk_to_logdata.go index bb5b07b897f7..7b76755ae8b7 100644 --- a/receiver/splunkhecreceiver/splunk_to_logdata.go +++ b/receiver/splunkhecreceiver/splunk_to_logdata.go @@ -140,7 +140,6 @@ func convertToValue(logger *zap.Logger, src any, dest pcommon.Value) error { default: logger.Debug("Unsupported value conversion", zap.Any("value", src)) return errCannotConvertValue - } return nil } diff --git a/receiver/splunkhecreceiver/splunk_to_logdata_test.go b/receiver/splunkhecreceiver/splunk_to_logdata_test.go index f99d05b35d3d..15e8bc95df80 100644 --- a/receiver/splunkhecreceiver/splunk_to_logdata_test.go +++ b/receiver/splunkhecreceiver/splunk_to_logdata_test.go @@ -29,7 +29,6 @@ var defaultTestingHecConfig = &Config{ } func Test_SplunkHecToLogData(t *testing.T) { - time := 0.123 nanoseconds := 123000000 diff --git a/receiver/splunkhecreceiver/splunkhec_to_metricdata.go b/receiver/splunkhecreceiver/splunkhec_to_metricdata.go index d148576c0fac..568bc35cafbc 100644 --- a/receiver/splunkhecreceiver/splunkhec_to_metricdata.go +++ b/receiver/splunkhecreceiver/splunkhec_to_metricdata.go @@ -127,7 +127,6 @@ func buildAttributes(dimensions map[string]any) pcommon.Map { attributes := pcommon.NewMap() attributes.EnsureCapacity(len(dimensions)) for key, val := range dimensions { - if strings.HasPrefix(key, "metric_name") || key == "_value" { continue } diff --git a/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go b/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go index a218bbd83a88..9801b2350abd 100644 --- a/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go +++ b/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go @@ -62,7 +62,6 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name"] = "single" pt.Fields["_value"] = int64Ptr(13) return pt - }(), wantMetricsData: buildDefaultMetricsData(nanos), hecConfig: defaultTestingHecConfig, diff --git a/receiver/sqlqueryreceiver/logs_receiver.go b/receiver/sqlqueryreceiver/logs_receiver.go index f6d68978487c..06ae82fc3fbc 100644 --- a/receiver/sqlqueryreceiver/logs_receiver.go +++ b/receiver/sqlqueryreceiver/logs_receiver.go @@ -48,7 +48,6 @@ func newLogsReceiver( createClient sqlquery.ClientProviderFunc, nextConsumer consumer.Logs, ) (*logsReceiver, error) { - obsr, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: settings.ID, ReceiverCreateSettings: settings, @@ -268,7 +267,6 @@ func (queryReceiver *logsQueryReceiver) retrieveTrackingValue(ctx context.Contex } return string(storedTrackingValueBytes) - } func (queryReceiver *logsQueryReceiver) collect(ctx context.Context) (plog.Logs, error) { diff --git a/receiver/sqlserverreceiver/factory.go b/receiver/sqlserverreceiver/factory.go index 5b374edb232f..00c235170b71 100644 --- a/receiver/sqlserverreceiver/factory.go +++ b/receiver/sqlserverreceiver/factory.go @@ -52,7 +52,6 @@ func setupQueries(cfg *Config) []string { cfg.MetricsBuilderConfig.Metrics.SqlserverBatchSQLRecompilationRate.Enabled || cfg.MetricsBuilderConfig.Metrics.SqlserverBatchSQLCompilationRate.Enabled || cfg.MetricsBuilderConfig.Metrics.SqlserverUserConnectionCount.Enabled { - queries = append(queries, getSQLServerPerformanceCounterQuery(cfg.InstanceName)) } diff --git a/receiver/sqlserverreceiver/queries_test.go b/receiver/sqlserverreceiver/queries_test.go index 65e3cb050094..f45bf5742fdd 100644 --- a/receiver/sqlserverreceiver/queries_test.go +++ b/receiver/sqlserverreceiver/queries_test.go @@ -68,5 +68,4 @@ func TestQueryContents(t *testing.T) { require.Equal(t, expected, actual) }) } - } diff --git a/receiver/sqlserverreceiver/scraper.go b/receiver/sqlserverreceiver/scraper.go index 487ba5176c63..a373719cb0ed 100644 --- a/receiver/sqlserverreceiver/scraper.go +++ b/receiver/sqlserverreceiver/scraper.go @@ -51,7 +51,6 @@ func newSQLServerScraper(id component.ID, dbProviderFunc sqlquery.DbProviderFunc, clientProviderFunc sqlquery.ClientProviderFunc, mb *metadata.MetricsBuilder) *sqlServerScraperHelper { - return &sqlServerScraperHelper{ id: id, sqlQuery: query, diff --git a/receiver/sqlserverreceiver/scraper_test.go b/receiver/sqlserverreceiver/scraper_test.go index fb52efe620d4..dd1e4d654809 100644 --- a/receiver/sqlserverreceiver/scraper_test.go +++ b/receiver/sqlserverreceiver/scraper_test.go @@ -165,7 +165,6 @@ func readFile(fname string) ([]sqlquery.StringMap, error) { } return metrics, nil - } func (mc mockClient) QueryRows(context.Context, ...any) ([]sqlquery.StringMap, error) { diff --git a/receiver/sshcheckreceiver/factory.go b/receiver/sshcheckreceiver/factory.go index 003fb710a045..15ae2fbb0496 100644 --- a/receiver/sshcheckreceiver/factory.go +++ b/receiver/sshcheckreceiver/factory.go @@ -38,7 +38,6 @@ func createDefaultConfig() component.Config { } func createMetricsReceiver(_ context.Context, params receiver.Settings, rConf component.Config, consumer consumer.Metrics) (receiver.Metrics, error) { - cfg, ok := rConf.(*Config) if !ok { return nil, errConfigNotSSHCheck diff --git a/receiver/sshcheckreceiver/scraper_test.go b/receiver/sshcheckreceiver/scraper_test.go index 592a4a854350..899d89bce5a9 100644 --- a/receiver/sshcheckreceiver/scraper_test.go +++ b/receiver/sshcheckreceiver/scraper_test.go @@ -314,7 +314,6 @@ func TestCancellation(t *testing.T) { _, err := scrpr.scrape(ctx) require.Error(t, err, "should have returned error on canceled context") require.EqualValues(t, err.Error(), ctx.Err().Error(), "scrape should return context's error") - } // issue # 18193 diff --git a/receiver/statsdreceiver/config.go b/receiver/statsdreceiver/config.go index 2cb36e8837f7..92a6cc653b14 100644 --- a/receiver/statsdreceiver/config.go +++ b/receiver/statsdreceiver/config.go @@ -34,7 +34,6 @@ func (c *Config) Validate() error { var TimerHistogramMappingMissingObjectName bool for _, eachMap := range c.TimerHistogramMapping { - if eachMap.StatsdType == "" { TimerHistogramMappingMissingObjectName = true break diff --git a/receiver/statsdreceiver/internal/protocol/metric_translator_test.go b/receiver/statsdreceiver/internal/protocol/metric_translator_test.go index a4609d502643..95f18b16f01f 100644 --- a/receiver/statsdreceiver/internal/protocol/metric_translator_test.go +++ b/receiver/statsdreceiver/internal/protocol/metric_translator_test.go @@ -63,7 +63,6 @@ func TestSetTimestampsForCounterMetric(t *testing.T) { metric.Metrics().At(0).Sum().DataPoints().At(0).Timestamp(), expectedMetrics.Metrics().At(0).Sum().DataPoints().At(0).Timestamp(), ) - } func TestBuildGaugeMetric(t *testing.T) { @@ -272,5 +271,4 @@ func TestBuildHistogramMetric(t *testing.T) { require.Equal(t, "myvalue", val.Str()) val, _ = datapoint.Attributes().Get("mykey2") require.Equal(t, "myvalue2", val.Str()) - } diff --git a/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go b/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go index c908e6991aa4..bfdab7167128 100644 --- a/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go +++ b/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go @@ -1636,7 +1636,6 @@ func TestStatsDParser_Mappings(t *testing.T) { } func TestStatsDParser_ScopeIsIncluded(t *testing.T) { - const devVersion = "dev-0.0.1" p := &StatsDParser{ @@ -1671,7 +1670,6 @@ func TestStatsDParser_ScopeIsIncluded(t *testing.T) { assert.Equal(t, receiverName, scope.Name()) assert.Equal(t, devVersion, scope.Version()) } - } func TestTimeNowFunc(t *testing.T) { @@ -1963,5 +1961,4 @@ func TestStatsDParser_IPOnlyAggregation(t *testing.T) { Metrics().At(0).Sum().DataPoints().At(0).IntValue() assert.Equal(t, int64(4), value) - } diff --git a/receiver/statsdreceiver/receiver.go b/receiver/statsdreceiver/receiver.go index d22c5a5be10b..81e11a67b1e5 100644 --- a/receiver/statsdreceiver/receiver.go +++ b/receiver/statsdreceiver/receiver.go @@ -46,7 +46,6 @@ func newReceiver( config Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { - if config.NetAddr.Endpoint == "" { config.NetAddr.Endpoint = "localhost:8125" } diff --git a/receiver/tlscheckreceiver/config_test.go b/receiver/tlscheckreceiver/config_test.go index 54e1748352c9..2aa2c47cd4bc 100644 --- a/receiver/tlscheckreceiver/config_test.go +++ b/receiver/tlscheckreceiver/config_test.go @@ -89,7 +89,6 @@ func TestValidate(t *testing.T) { } else { require.NoError(t, actualErr) } - }) } } diff --git a/receiver/vcenterreceiver/config_test.go b/receiver/vcenterreceiver/config_test.go index e4ce36c3e617..1d714f4f2348 100644 --- a/receiver/vcenterreceiver/config_test.go +++ b/receiver/vcenterreceiver/config_test.go @@ -110,5 +110,4 @@ func TestLoadConfig(t *testing.T) { if diff := cmp.Diff(expected, cfg, cmpopts.IgnoreUnexported(metadata.MetricConfig{}), cmpopts.IgnoreUnexported(metadata.ResourceAttributeConfig{})); diff != "" { t.Errorf("Config mismatch (-expected +actual):\n%s", diff) } - } diff --git a/receiver/vcenterreceiver/metrics.go b/receiver/vcenterreceiver/metrics.go index 863d1d3ee8e8..ab3bd4927940 100644 --- a/receiver/vcenterreceiver/metrics.go +++ b/receiver/vcenterreceiver/metrics.go @@ -73,7 +73,6 @@ func (v *vcenterMetricScraper) recordDatacenterStats( v.mb.RecordVcenterDatacenterDiskSpaceDataPoint(ts, dcStat.DiskFree, metadata.AttributeDiskStateAvailable) v.mb.RecordVcenterDatacenterCPULimitDataPoint(ts, dcStat.CPULimit) v.mb.RecordVcenterDatacenterMemoryLimitDataPoint(ts, dcStat.MemoryLimit) - } func getEntityStatusAttribute(status types.ManagedEntityStatus) (metadata.AttributeEntityStatus, bool) { @@ -200,7 +199,6 @@ func (v *vcenterMetricScraper) recordResourcePoolStats( v.mb.RecordVcenterResourcePoolCPUSharesDataPoint(ts, int64(s.Config.CpuAllocation.Shares.Shares)) v.mb.RecordVcenterResourcePoolMemorySharesDataPoint(ts, int64(s.Config.MemoryAllocation.Shares.Shares)) - } // recordClusterStats records stat metrics for a vSphere Host @@ -312,7 +310,6 @@ func (v *vcenterMetricScraper) recordVMStats( cpuReadiness := vm.Summary.QuickStats.OverallCpuReadiness v.mb.RecordVcenterVMCPUReadinessDataPoint(ts, int64(cpuReadiness)) - } var hostPerfMetricList = []string{ diff --git a/receiver/webhookeventreceiver/req_to_log.go b/receiver/webhookeventreceiver/req_to_log.go index ea29a57e3e32..5a8ceb13ddeb 100644 --- a/receiver/webhookeventreceiver/req_to_log.go +++ b/receiver/webhookeventreceiver/req_to_log.go @@ -58,5 +58,4 @@ func appendMetadata(resourceLog plog.ResourceLogs, query url.Values) { resourceLog.Resource().Attributes().PutStr(k, query.Get(k)) } } - } diff --git a/receiver/zipkinreceiver/trace_receiver.go b/receiver/zipkinreceiver/trace_receiver.go index 0918f254a2b9..23c77a186d4b 100644 --- a/receiver/zipkinreceiver/trace_receiver.go +++ b/receiver/zipkinreceiver/trace_receiver.go @@ -254,7 +254,6 @@ func (zr *zipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write(errNextConsumerRespBody) } - } func transportType(r *http.Request, asZipkinv1 bool) string { diff --git a/receiver/zookeeperreceiver/metrics.go b/receiver/zookeeperreceiver/metrics.go index 80a8d49a12f1..86ae2eeb0049 100644 --- a/receiver/zookeeperreceiver/metrics.go +++ b/receiver/zookeeperreceiver/metrics.go @@ -111,7 +111,6 @@ func (m *metricCreator) generateComputedMetrics(logger *zap.Logger, ts pcommon.T if err := m.computeNotSyncedFollowersMetric(ts); err != nil { logger.Debug("metric computation failed", zap.Error(err)) } - } func (m *metricCreator) computeNotSyncedFollowersMetric(ts pcommon.Timestamp) error { diff --git a/testbed/correctnesstests/connectors/correctness_test.go b/testbed/correctnesstests/connectors/correctness_test.go index 40a41d63c3c5..903e329e5a4b 100644 --- a/testbed/correctnesstests/connectors/correctness_test.go +++ b/testbed/correctnesstests/connectors/correctness_test.go @@ -43,7 +43,6 @@ func TestGoldenData(t *testing.T) { t.Run(sampleTest.TestName, func(t *testing.T) { testWithGoldenDataset(t, sampleTest.DataSender, sampleTest.DataReceiver, sampleTest.ResourceSpec, sampleTest.DataConnector, processors) }) - } func testWithGoldenDataset( @@ -96,5 +95,4 @@ func testWithGoldenDataset( 3*time.Second, "all data items received") tc.StopAgent() - } diff --git a/testbed/correctnesstests/utils.go b/testbed/correctnesstests/utils.go index 38dbf813d00d..8ffd41ad0330 100644 --- a/testbed/correctnesstests/utils.go +++ b/testbed/correctnesstests/utils.go @@ -33,7 +33,6 @@ func CreateConfigYaml( connector testbed.DataConnector, processors []ProcessorNameAndConfigBody, ) string { - // Prepare extra processor config section and comma-separated list of extra processor // names to use in corresponding "processors" settings. processorsSections := "" diff --git a/testbed/datasenders/jaeger.go b/testbed/datasenders/jaeger.go index 52adcebee9ae..eb1cb488238a 100644 --- a/testbed/datasenders/jaeger.go +++ b/testbed/datasenders/jaeger.go @@ -145,7 +145,6 @@ func (s *protoGRPCSender) pushTraces( ctx context.Context, td ptrace.Traces, ) error { - batches := jaeger.ProtoFromTraces(td) if s.metadata.Len() > 0 { diff --git a/testbed/datasenders/syslog.go b/testbed/datasenders/syslog.go index 24cec672a6c7..04f6c1140b33 100644 --- a/testbed/datasenders/syslog.go +++ b/testbed/datasenders/syslog.go @@ -122,7 +122,6 @@ func (f *SyslogWriter) SendCheck() error { if err != nil { return nil } - } return nil } diff --git a/testbed/datasenders/tcpudp.go b/testbed/datasenders/tcpudp.go index 7ed2af93a50f..cdf973142b41 100644 --- a/testbed/datasenders/tcpudp.go +++ b/testbed/datasenders/tcpudp.go @@ -115,7 +115,6 @@ func (f *TCPUDPWriter) SendCheck() error { if err != nil { return nil } - } return nil } diff --git a/testbed/testbed/child_process_collector.go b/testbed/testbed/child_process_collector.go index 61114c5b95b5..6661f35595dd 100644 --- a/testbed/testbed/child_process_collector.go +++ b/testbed/testbed/child_process_collector.go @@ -176,7 +176,6 @@ func expandExeFileName(exeName string) string { // the process to. // cmdArgs is the command line arguments to pass to the process. func (cp *childProcessCollector) Start(params StartParams) error { - cp.name = params.Name cp.doneSignal = make(chan struct{}) cp.resourceSpec = params.resourceSpec @@ -249,7 +248,6 @@ func (cp *childProcessCollector) Stop() (stopped bool, err error) { return false, nil } cp.stopOnce.Do(func() { - if !cp.isStarted { // Process wasn't started, nothing to stop. return diff --git a/testbed/testbed/data_providers.go b/testbed/testbed/data_providers.go index ea0393e53dde..a5808a2aa81a 100644 --- a/testbed/testbed/data_providers.go +++ b/testbed/testbed/data_providers.go @@ -60,7 +60,6 @@ func (dp *perfTestDataProvider) GenerateTraces() (ptrace.Traces, bool) { traceID := dp.traceIDSequence.Add(1) for i := 0; i < dp.options.ItemsPerBatch; i++ { - startTime := time.Now().Add(time.Duration(i+int(traceID)*1000) * time.Second) endTime := startTime.Add(time.Millisecond) diff --git a/testbed/testbed/mock_backend.go b/testbed/testbed/mock_backend.go index 982e1c63cdca..9bca45545bc1 100644 --- a/testbed/testbed/mock_backend.go +++ b/testbed/testbed/mock_backend.go @@ -218,7 +218,6 @@ func (tc *MockTraceConsumer) ConsumeTraces(_ context.Context, td ptrace.Traces) // Ignore the seqnums for now. We will use them later. _ = spanSeqnum _ = traceSeqnum - } } } diff --git a/testbed/testbed/test_case.go b/testbed/testbed/test_case.go index 749527cf0c0b..aaca07c86d0a 100644 --- a/testbed/testbed/test_case.go +++ b/testbed/testbed/test_case.go @@ -345,5 +345,4 @@ func (tc *TestCase) AgentLogsContains(text string) bool { res, _ := grep.Output() return string(res) != "" - } diff --git a/testbed/testbed/validator.go b/testbed/testbed/validator.go index 6fed95ee857b..671d3503408f 100644 --- a/testbed/testbed/validator.go +++ b/testbed/testbed/validator.go @@ -48,7 +48,6 @@ func (v *LogPresentValidator) Validate(tc *TestCase) { } func (v *LogPresentValidator) RecordResults(tc *TestCase) { - var result string if tc.t.Failed() { result = "FAIL" @@ -418,7 +417,6 @@ func (v *CorrectnessTestValidator) diffSpanLinks(sentSpan ptrace.Span, recdSpan } v.assertionFailures = append(v.assertionFailures, af) } - } } if sentSpan.DroppedLinksCount() != recdSpan.DroppedLinksCount() { diff --git a/testbed/tests/e2e_test.go b/testbed/tests/e2e_test.go index 6d1b6800dcc3..4173a78978df 100644 --- a/testbed/tests/e2e_test.go +++ b/testbed/tests/e2e_test.go @@ -17,7 +17,6 @@ import ( ) func TestIdleMode(t *testing.T) { - options := testbed.LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} dataProvider := testbed.NewPerfTestDataProvider(options) diff --git a/testbed/tests/log_test.go b/testbed/tests/log_test.go index 8b44f83f670a..41cb83ed0797 100644 --- a/testbed/tests/log_test.go +++ b/testbed/tests/log_test.go @@ -241,7 +241,6 @@ func TestLogOtlpSendingQueue(t *testing.T) { nil, nil) }) - } func TestLogLargeFiles(t *testing.T) { diff --git a/testbed/tests/metric_test.go b/testbed/tests/metric_test.go index 03dd0ccc26ab..6eb8b7fd9829 100644 --- a/testbed/tests/metric_test.go +++ b/testbed/tests/metric_test.go @@ -91,7 +91,6 @@ func TestMetric10kDPS(t *testing.T) { ) }) } - } func TestMetricsFromFile(t *testing.T) { diff --git a/testbed/tests/resource_processor_test.go b/testbed/tests/resource_processor_test.go index ebe91c449a2d..0e720f7ff076 100644 --- a/testbed/tests/resource_processor_test.go +++ b/testbed/tests/resource_processor_test.go @@ -49,7 +49,6 @@ type resourceProcessorTestCase struct { } func getResourceProcessorTestCases() []resourceProcessorTestCase { - tests := []resourceProcessorTestCase{ { name: "update_and_rename_existing_attributes", diff --git a/testbed/tests/scenarios.go b/testbed/tests/scenarios.go index 85973f89b29a..2b1d7c185a45 100644 --- a/testbed/tests/scenarios.go +++ b/testbed/tests/scenarios.go @@ -44,7 +44,6 @@ func createConfigYaml( processors []ProcessorNameAndConfigBody, extensions map[string]string, ) string { - // Create a config. Note that our DataSender is used to generate a config for Collector's // receiver and our DataReceiver is used to generate a config for Collector's exporter. // This is because our DataSender sends to Collector's receiver and our DataReceiver @@ -271,7 +270,6 @@ func Scenario1kSPSWithAttrs(t *testing.T, args []string, tests []TestCase, proce test := tests[i] t.Run(fmt.Sprintf("%d*%dbytes", test.attrCount, test.attrSizeByte), func(t *testing.T) { - options := constructLoadOptions(test) agentProc := testbed.NewChildProcessCollector(testbed.WithEnvVar("GOMAXPROCS", "2")) diff --git a/testbed/tests/trace_test.go b/testbed/tests/trace_test.go index 4cb95241e7a9..b0a84e209649 100644 --- a/testbed/tests/trace_test.go +++ b/testbed/tests/trace_test.go @@ -180,7 +180,6 @@ func TestTrace10kSPSJaegerGRPC(t *testing.T) { } func TestTraceNoBackend10kSPS(t *testing.T) { - limitProcessors := []ProcessorNameAndConfigBody{ { Name: "memory_limiter", @@ -277,7 +276,6 @@ func verifySingleSpan( spanName string, verifyReceived func(span ptrace.Span), ) { - // Clear previously received traces. tc.MockBackend.ClearReceivedItems() startCounter := tc.MockBackend.DataItemsReceived()