diff --git a/.chloggen/add-decode-function.yaml b/.chloggen/add-decode-function.yaml new file mode 100644 index 000000000000..8bddaea308c8 --- /dev/null +++ b/.chloggen/add-decode-function.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/ottl + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added Decode() converter function + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [32493] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/deltatocumulative-apitest.yaml b/.chloggen/deltatocumulative-apitest.yaml new file mode 100644 index 000000000000..cc5f5fa95774 --- /dev/null +++ b/.chloggen/deltatocumulative-apitest.yaml @@ -0,0 +1,28 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: deltatocumulative + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: drop bad samples + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34979] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + removes bad (rejected) samples from output. previously identified and metric-tracked those as such, but didn't actually drop them. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/elasticsearchexporter_encode-double-as-double.yaml b/.chloggen/elasticsearchexporter_encode-double-as-double.yaml new file mode 100644 index 000000000000..a897e7850763 --- /dev/null +++ b/.chloggen/elasticsearchexporter_encode-double-as-double.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix dynamic mapping for double values storing integers + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34680] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/elasticsearchexporter_exponential-histogram.yaml b/.chloggen/elasticsearchexporter_exponential-histogram.yaml new file mode 100644 index 000000000000..31bb58e2c7fc --- /dev/null +++ b/.chloggen/elasticsearchexporter_exponential-histogram.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add exponential histogram support + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34813] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/elasticsearchexporter_otel-mode-traces-span-events.yaml b/.chloggen/elasticsearchexporter_otel-mode-traces-span-events.yaml new file mode 100644 index 000000000000..73e7e06cd4e5 --- /dev/null +++ b/.chloggen/elasticsearchexporter_otel-mode-traces-span-events.yaml @@ -0,0 +1,29 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add span event support to traces OTel mapping mode + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34831] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + Span events are now supported in OTel mapping mode. + They will be routed to `logs-${data_stream.dataset}-${data_stream.namespace}` if `traces_dynamic_index::enabled` is `true`. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/fix-hisgogram-metrics-miss-unit.yaml b/.chloggen/fix-hisgogram-metrics-miss-unit.yaml new file mode 100644 index 000000000000..9a0e8117c85d --- /dev/null +++ b/.chloggen/fix-hisgogram-metrics-miss-unit.yaml @@ -0,0 +1,32 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: servicegraphconnector + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Fix histogram metrics miss unit + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34511] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + All metrics will remove the suffix `_seconds`. It will not introduce breaking change if users use + | `prometheusexporter` or `prometheusremotewriteexporter` to exporter metrics in pipeline. + | In some cases, like using `clickhouseexporter`(save data in native OTLP format), it will be a breaking change. + | Users can use `transformprocessor` to add back this suffix. + + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/fix_set_data_race.yaml b/.chloggen/fix_set_data_race.yaml new file mode 100644 index 000000000000..1186fe0955d2 --- /dev/null +++ b/.chloggen/fix_set_data_race.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'bug_fix' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: geoipprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Avoid using internal empty attribute.Set pointer + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34882] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.chloggen/otelarrow-exporttimeout.yaml b/.chloggen/otelarrow-exporttimeout.yaml new file mode 100644 index 000000000000..ea97db44729e --- /dev/null +++ b/.chloggen/otelarrow-exporttimeout.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: otelarrowexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add gRPC timeout propagation. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34733] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/ottl_sort_func.yaml b/.chloggen/ottl_sort_func.yaml new file mode 100644 index 000000000000..7b9d32749d9c --- /dev/null +++ b/.chloggen/ottl_sort_func.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/ottl + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add `Sort` function to sort array to ascending order or descending order + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34200] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/.chloggen/supervisor-healthcheck-port-configurable.yaml b/.chloggen/supervisor-healthcheck-port-configurable.yaml new file mode 100644 index 000000000000..e0e137835d0a --- /dev/null +++ b/.chloggen/supervisor-healthcheck-port-configurable.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: opampsupervisor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Add new config parameter `agent.health_check_port` to allow configuring the port used by the agent healthcheck extension." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34643] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/.golangci.yml b/.golangci.yml index 9fa06db90729..edf02ba1dcdd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -134,8 +134,6 @@ linters-settings: testifylint: disable: - - error-is-as - - expected-actual - float-compare - formatter - go-require diff --git a/Makefile.Common b/Makefile.Common index 7f5ce2dff66e..53584bf57e14 100644 --- a/Makefile.Common +++ b/Makefile.Common @@ -75,7 +75,7 @@ GOTESTSUM := $(TOOLS_BIN_DIR)/gotestsum TESTIFYLINT := $(TOOLS_BIN_DIR)/testifylint GOTESTSUM_OPT?= --rerun-fails=1 -TESTIFYLINT_OPT?= --enable-all --disable=error-is-as,expected-actual,float-compare,formatter,go-require,negative-positive,require-error,suite-dont-use-pkg,suite-subtest-run,useless-assert +TESTIFYLINT_OPT?= --enable-all --disable=float-compare,formatter,go-require,negative-positive,require-error,suite-dont-use-pkg,suite-subtest-run,useless-assert # BUILD_TYPE should be one of (dev, release). BUILD_TYPE?=release diff --git a/cmd/opampsupervisor/e2e_test.go b/cmd/opampsupervisor/e2e_test.go index 22a1bcb755bd..9316e8ffde8a 100644 --- a/cmd/opampsupervisor/e2e_test.go +++ b/cmd/opampsupervisor/e2e_test.go @@ -337,8 +337,9 @@ func TestSupervisorStartsWithNoOpAMPServer(t *testing.T) { // The supervisor is started without a running OpAMP server. // The supervisor should start successfully, even if the OpAMP server is stopped. - s := newSupervisor(t, "basic", map[string]string{ - "url": server.addr, + s := newSupervisor(t, "healthcheck_port", map[string]string{ + "url": server.addr, + "healthcheck_port": "12345", }) require.Nil(t, s.Start()) @@ -346,9 +347,9 @@ func TestSupervisorStartsWithNoOpAMPServer(t *testing.T) { // Verify the collector is running by checking the metrics endpoint require.Eventually(t, func() bool { - resp, err := http.DefaultClient.Get("http://localhost:8888/metrics") + resp, err := http.DefaultClient.Get("http://localhost:12345") if err != nil { - t.Logf("Failed check for prometheus metrics: %s", err) + t.Logf("Failed agent healthcheck request: %s", err) return false } require.NoError(t, resp.Body.Close()) diff --git a/cmd/opampsupervisor/supervisor/config/config.go b/cmd/opampsupervisor/supervisor/config/config.go index 60244e9d9c9e..7e8d2124c356 100644 --- a/cmd/opampsupervisor/supervisor/config/config.go +++ b/cmd/opampsupervisor/supervisor/config/config.go @@ -121,6 +121,7 @@ type Agent struct { Executable string OrphanDetectionInterval time.Duration `mapstructure:"orphan_detection_interval"` Description AgentDescription `mapstructure:"description"` + HealthCheckPort int `mapstructure:"health_check_port"` } func (a Agent) Validate() error { @@ -128,6 +129,10 @@ func (a Agent) Validate() error { return errors.New("agent::orphan_detection_interval must be positive") } + if a.HealthCheckPort < 0 || a.HealthCheckPort > 65535 { + return errors.New("agent::health_check_port must be a valid port number") + } + if a.Executable == "" { return errors.New("agent::executable must be specified") } diff --git a/cmd/opampsupervisor/supervisor/config/config_test.go b/cmd/opampsupervisor/supervisor/config/config_test.go index afc3e9c0f462..776523ab0646 100644 --- a/cmd/opampsupervisor/supervisor/config/config_test.go +++ b/cmd/opampsupervisor/supervisor/config/config_test.go @@ -223,6 +223,82 @@ func TestValidate(t *testing.T) { }, expectedError: "agent::orphan_detection_interval must be positive", }, + { + name: "Invalid port number", + config: Supervisor{ + Server: OpAMPServer{ + Endpoint: "wss://localhost:9090/opamp", + Headers: http.Header{ + "Header1": []string{"HeaderValue"}, + }, + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + }, + Agent: Agent{ + Executable: "${file_path}", + OrphanDetectionInterval: 5 * time.Second, + HealthCheckPort: 65536, + }, + Capabilities: Capabilities{ + AcceptsRemoteConfig: true, + }, + Storage: Storage{ + Directory: "/etc/opamp-supervisor/storage", + }, + }, + expectedError: "agent::health_check_port must be a valid port number", + }, + { + name: "Zero value port number", + config: Supervisor{ + Server: OpAMPServer{ + Endpoint: "wss://localhost:9090/opamp", + Headers: http.Header{ + "Header1": []string{"HeaderValue"}, + }, + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + }, + Agent: Agent{ + Executable: "${file_path}", + OrphanDetectionInterval: 5 * time.Second, + HealthCheckPort: 0, + }, + Capabilities: Capabilities{ + AcceptsRemoteConfig: true, + }, + Storage: Storage{ + Directory: "/etc/opamp-supervisor/storage", + }, + }, + }, + { + name: "Normal port number", + config: Supervisor{ + Server: OpAMPServer{ + Endpoint: "wss://localhost:9090/opamp", + Headers: http.Header{ + "Header1": []string{"HeaderValue"}, + }, + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + }, + Agent: Agent{ + Executable: "${file_path}", + OrphanDetectionInterval: 5 * time.Second, + HealthCheckPort: 29848, + }, + Capabilities: Capabilities{ + AcceptsRemoteConfig: true, + }, + Storage: Storage{ + Directory: "/etc/opamp-supervisor/storage", + }, + }, + }, } // create some fake files for validating agent config diff --git a/cmd/opampsupervisor/supervisor/supervisor.go b/cmd/opampsupervisor/supervisor/supervisor.go index 804face7d1ae..2521a413825c 100644 --- a/cmd/opampsupervisor/supervisor/supervisor.go +++ b/cmd/opampsupervisor/supervisor/supervisor.go @@ -179,10 +179,13 @@ func (s *Supervisor) Start() error { return fmt.Errorf("could not get bootstrap info from the Collector: %w", err) } - healthCheckPort, err := s.findRandomPort() + healthCheckPort := s.config.Agent.HealthCheckPort + if healthCheckPort == 0 { + healthCheckPort, err = s.findRandomPort() - if err != nil { - return fmt.Errorf("could not find port for health check: %w", err) + if err != nil { + return fmt.Errorf("could not find port for health check: %w", err) + } } s.agentHealthCheckEndpoint = fmt.Sprintf("localhost:%d", healthCheckPort) diff --git a/cmd/opampsupervisor/testdata/supervisor/supervisor_healthcheck_port.yaml b/cmd/opampsupervisor/testdata/supervisor/supervisor_healthcheck_port.yaml new file mode 100644 index 000000000000..10ba3976615e --- /dev/null +++ b/cmd/opampsupervisor/testdata/supervisor/supervisor_healthcheck_port.yaml @@ -0,0 +1,19 @@ +server: + endpoint: ws://{{.url}}/v1/opamp + tls: + insecure: true + +capabilities: + reports_effective_config: true + reports_own_metrics: true + reports_health: true + accepts_remote_config: true + reports_remote_config: true + accepts_restart_command: true + +storage: + directory: "{{.storage_dir}}" + +agent: + executable: ../../bin/otelcontribcol_{{.goos}}_{{.goarch}}{{.extension}} + health_check_port: "{{ .healthcheck_port }}" diff --git a/cmd/otelcontribcol/builder-config.yaml b/cmd/otelcontribcol/builder-config.yaml index cdabdd632afe..c97896352518 100644 --- a/cmd/otelcontribcol/builder-config.yaml +++ b/cmd/otelcontribcol/builder-config.yaml @@ -499,3 +499,5 @@ replaces: - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/cfgardenobserver => ../../extension/observer/cfgardenobserver - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/rabbitmqexporter => ../../exporter/rabbitmqexporter - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/githubreceiver => ../../receiver/githubreceiver + - github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil => ../../internal/grpcutil + diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 2ba453c67adc..8f4d6e81bb74 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -250,13 +250,13 @@ require ( cloud.google.com/go/auth v0.9.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect + cloud.google.com/go/iam v1.1.13 // indirect cloud.google.com/go/logging v1.11.0 // indirect - cloud.google.com/go/longrunning v0.5.11 // indirect - cloud.google.com/go/monitoring v1.20.3 // indirect + cloud.google.com/go/longrunning v0.5.12 // indirect + cloud.google.com/go/monitoring v1.20.4 // indirect cloud.google.com/go/pubsub v1.42.0 // indirect cloud.google.com/go/spanner v1.67.0 // indirect - cloud.google.com/go/trace v1.10.11 // indirect + cloud.google.com/go/trace v1.10.12 // indirect code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c // indirect code.cloudfoundry.org/go-diodes v0.0.0-20211115184647-b584dd5df32c // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible // indirect @@ -409,7 +409,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 // indirect github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6 // indirect github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.5 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect @@ -512,7 +512,7 @@ require ( github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-github/v63 v63.0.0 // indirect + github.com/google/go-github/v64 v64.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.8 // indirect @@ -641,6 +641,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.108.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.108.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.108.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil v0.108.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.108.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.108.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.108.0 // indirect @@ -839,8 +840,8 @@ require ( golang.org/x/tools v0.24.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gonum.org/v1/gonum v0.15.1 // indirect - google.golang.org/api v0.194.0 // indirect - google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/api v0.195.0 // indirect + google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect google.golang.org/grpc v1.66.0 // indirect @@ -1371,3 +1372,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/obse replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/rabbitmqexporter => ../../exporter/rabbitmqexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/githubreceiver => ../../receiver/githubreceiver + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil => ../../internal/grpcutil diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index 90ea0a31600e..d71f2ac01bae 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -321,8 +321,8 @@ cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGE cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= +cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -357,8 +357,8 @@ cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6R cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= +cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= +cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -382,8 +382,8 @@ cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhI cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.20.3 h1:v/7MXFxYrhXLEZ9sSfwXdlTLLB/xrU7xTyYjY5acynQ= -cloud.google.com/go/monitoring v1.20.3/go.mod h1:GPIVIdNznIdGqEjtRKQWTLcUeRnPjZW85szouimiczU= +cloud.google.com/go/monitoring v1.20.4 h1:zwcViK7mT9SV0kzKqLOI3spRadvsmvw/R9z1MHNeC0E= +cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -570,8 +570,8 @@ cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/trace v1.10.11 h1:+Y1emOgcyGy6OdJ2KQbT4t2oecPp49GtJn8j3GM1pWo= -cloud.google.com/go/trace v1.10.11/go.mod h1:fUr5L3wSXerNfT0f1bBg08W4axS2VbHGgYcfH4KuTXU= +cloud.google.com/go/trace v1.10.12 h1:GoGZv1iAXEa73HgSGNjRl2vKqp5/f2AeKqErRFXA2kg= +cloud.google.com/go/trace v1.10.12/go.mod h1:tYkAIta/gxgbBZ/PIzFxSH5blajgX4D00RpQqCG/GZs= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -1044,8 +1044,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 h1:iirGMva2IXw4kcqsvuF+uc8ARweuVqoQJjzRZGaiV1E= github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5/go.mod h1:pKTvEQz1PcNd+gKArVyeHpVM63AWnFqYyg07WAQQANQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6 h1:3TZlWvCC813uhS1Z4fVTmBhg41OYUrgSlvXqIDDkurw= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6/go.mod h1:5NPkI3RsTOhwz1CuG7VVSgJCm3CINKkoIaUbUZWQ67w= github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.5 h1:z7nPig/pFU+TAAKouI51pCVQPEeQHZC2mZXSK+g0Av8= @@ -1483,8 +1483,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github/v63 v63.0.0 h1:13xwK/wk9alSokujB9lJkuzdmQuVn2QCPeck76wR3nE= -github.com/google/go-github/v63 v63.0.0/go.mod h1:IqbcrgUmIcEaioWrGYei/09o+ge5vhffGOcxrO0AfmA= +github.com/google/go-github/v64 v64.0.0 h1:4G61sozmY3eiPAjjoOHponXDBONm+utovTKbyUb2Qdg= +github.com/google/go-github/v64 v64.0.0/go.mod h1:xB3vqMQNdHzilXBiO2I+M7iEFtHf+DP/omBOv6tQzVo= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -2258,8 +2258,12 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3C github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmihailenco/msgpack/v4 v4.3.13 h1:A2wsiTbvp63ilDaWmsk2wjx6xZdxQOvpiNlKBGKKXKI= github.com/vmihailenco/msgpack/v4 v4.3.13/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/vmware/go-vmware-nsxt v0.0.0-20230223012718-d31b8a1ca05e h1:Vu41Q0Pv3yMdd+tcDW6QeEUIK2L+9ZrPrq8NAMrKSLc= github.com/vmware/go-vmware-nsxt v0.0.0-20230223012718-d31b8a1ca05e/go.mod h1:aRq5pxwgdJpAuP97SCjX1+Db32z/b0dggQ07FDF+fqE= github.com/vmware/govmomi v0.42.0 h1:MbvAlVfjNBE1mHMaQ7yOSop1KLB0/93x6VAGuCtjqtI= @@ -3049,8 +3053,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.194.0 h1:dztZKG9HgtIpbI35FhfuSNR/zmaMVdxNlntHj1sIS4s= -google.golang.org/api v0.194.0/go.mod h1:AgvUFdojGANh3vI+P7EVnxj3AISHllxGCJSFmggmnd0= +google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= +google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3193,8 +3197,8 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22duwci1+TG7bg2/L1LQsXwfjPlmuJA0= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= diff --git a/cmd/telemetrygen/internal/logs/worker_test.go b/cmd/telemetrygen/internal/logs/worker_test.go index ccfd5728f6fd..3e759b606154 100644 --- a/cmd/telemetrygen/internal/logs/worker_test.go +++ b/cmd/telemetrygen/internal/logs/worker_test.go @@ -177,7 +177,7 @@ func TestLogsWithOneTelemetryAttributes(t *testing.T) { l.WalkAttributes(func(attr log.KeyValue) bool { if attr.Key == telemetryAttrKeyOne { - assert.EqualValues(t, attr.Value.AsString(), telemetryAttrValueOne) + assert.EqualValues(t, telemetryAttrValueOne, attr.Value.AsString()) } return true }) diff --git a/cmd/telemetrygen/internal/metrics/worker_test.go b/cmd/telemetrygen/internal/metrics/worker_test.go index 6e450db04758..67539d5f009a 100644 --- a/cmd/telemetrygen/internal/metrics/worker_test.go +++ b/cmd/telemetrygen/internal/metrics/worker_test.go @@ -202,7 +202,7 @@ func TestSumSingleTelemetryAttr(t *testing.T) { attr := ms.Data.(metricdata.Sum[int64]).DataPoints[0].Attributes assert.Equal(t, 1, attr.Len(), "it must have a single attribute here") actualValue, _ := attr.Value(telemetryAttrKeyOne) - assert.Equal(t, actualValue.AsString(), telemetryAttrValueOne, "it should be "+telemetryAttrValueOne) + assert.Equal(t, telemetryAttrValueOne, actualValue.AsString(), "it should be "+telemetryAttrValueOne) } } @@ -232,7 +232,7 @@ func TestGaugeSingleTelemetryAttr(t *testing.T) { attr := ms.Data.(metricdata.Gauge[int64]).DataPoints[0].Attributes assert.Equal(t, 1, attr.Len(), "it must have a single attribute here") actualValue, _ := attr.Value(telemetryAttrKeyOne) - assert.Equal(t, actualValue.AsString(), telemetryAttrValueOne, "it should be "+telemetryAttrValueOne) + assert.Equal(t, telemetryAttrValueOne, actualValue.AsString(), "it should be "+telemetryAttrValueOne) } } diff --git a/cmd/telemetrygen/internal/traces/worker_test.go b/cmd/telemetrygen/internal/traces/worker_test.go index c3aff22a8803..33c921febfe5 100644 --- a/cmd/telemetrygen/internal/traces/worker_test.go +++ b/cmd/telemetrygen/internal/traces/worker_test.go @@ -185,7 +185,7 @@ func TestSpanKind(t *testing.T) { // verify that the default Span Kind is being overridden for _, span := range syncer.spans { - assert.NotEqual(t, span.SpanKind(), trace.SpanKindInternal) + assert.NotEqual(t, trace.SpanKindInternal, span.SpanKind()) } } diff --git a/confmap/provider/s3provider/go.mod b/confmap/provider/s3provider/go.mod index 66d058884a61..6e1aa745eeee 100644 --- a/confmap/provider/s3provider/go.mod +++ b/confmap/provider/s3provider/go.mod @@ -5,7 +5,7 @@ go 1.22.0 require ( github.com/aws/aws-sdk-go-v2 v1.30.4 github.com/aws/aws-sdk-go-v2/config v1.27.31 - github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/confmap v1.14.2-0.20240904075637-48b11ba1c5f8 go.uber.org/goleak v1.3.0 diff --git a/confmap/provider/s3provider/go.sum b/confmap/provider/s3provider/go.sum index 2df3c9ab78f8..ef3506b3fe12 100644 --- a/confmap/provider/s3provider/go.sum +++ b/confmap/provider/s3provider/go.sum @@ -24,8 +24,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHC github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= diff --git a/connector/failoverconnector/failover_test.go b/connector/failoverconnector/failover_test.go index 9b21d58ccec3..ddee5aaaf653 100644 --- a/connector/failoverconnector/failover_test.go +++ b/connector/failoverconnector/failover_test.go @@ -58,7 +58,7 @@ func TestFailoverRecovery(t *testing.T) { require.NoError(t, conn.ConsumeTraces(context.Background(), tr)) idx := failoverConnector.failover.pS.TestStableIndex() - require.Equal(t, idx, 1) + require.Equal(t, 1, idx) failoverConnector.failover.ModifyConsumerAtIndex(0, &sinkFirst) @@ -75,7 +75,7 @@ func TestFailoverRecovery(t *testing.T) { require.NoError(t, conn.ConsumeTraces(context.Background(), tr)) idx := failoverConnector.failover.pS.TestStableIndex() - require.Equal(t, idx, 1) + require.Equal(t, 1, idx) failoverConnector.failover.ModifyConsumerAtIndex(0, &sinkFirst) @@ -97,7 +97,7 @@ func TestFailoverRecovery(t *testing.T) { require.NoError(t, conn.ConsumeTraces(context.Background(), tr)) idx := failoverConnector.failover.pS.TestStableIndex() - require.Equal(t, idx, 2) + require.Equal(t, 2, idx) // Simulate recovery of exporter failoverConnector.failover.ModifyConsumerAtIndex(1, &sinkSecond) diff --git a/connector/failoverconnector/internal/state/pipeline_selector_test.go b/connector/failoverconnector/internal/state/pipeline_selector_test.go index 7123b3b70496..13e7d06f5364 100644 --- a/connector/failoverconnector/internal/state/pipeline_selector_test.go +++ b/connector/failoverconnector/internal/state/pipeline_selector_test.go @@ -22,8 +22,8 @@ func TestSelectPipeline(t *testing.T) { idx, ch := pS.SelectedPipeline() - require.Equal(t, idx, 0) - require.Equal(t, pS.ChannelIndex(ch), 0) + require.Equal(t, 0, idx) + require.Equal(t, 0, pS.ChannelIndex(ch)) } func TestHandlePipelineError(t *testing.T) { @@ -44,7 +44,7 @@ func TestHandlePipelineError(t *testing.T) { }() idx, ch := pS.SelectedPipeline() - require.Equal(t, idx, 0) + require.Equal(t, 0, idx) ch <- false require.Eventually(t, func() bool { diff --git a/connector/routingconnector/logs_test.go b/connector/routingconnector/logs_test.go index f303bf65ccd8..6b291c7d6005 100644 --- a/connector/routingconnector/logs_test.go +++ b/connector/routingconnector/logs_test.go @@ -177,8 +177,8 @@ func TestLogsAreCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { assert.Len(t, sink0.AllLogs(), 1) assert.Len(t, sink1.AllLogs(), 1) - assert.Equal(t, sink0.AllLogs()[0].LogRecordCount(), 2) - assert.Equal(t, sink1.AllLogs()[0].LogRecordCount(), 2) + assert.Equal(t, 2, sink0.AllLogs()[0].LogRecordCount()) + assert.Equal(t, 2, sink1.AllLogs()[0].LogRecordCount()) assert.Equal(t, sink0.AllLogs(), sink1.AllLogs()) }) @@ -206,7 +206,7 @@ func TestLogsAreCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { rlog := defaultSink.AllLogs()[0].ResourceLogs().At(0) attr, ok := rlog.Resource().Attributes().Get("X-Tenant") assert.True(t, ok, "routing attribute must exists") - assert.Equal(t, attr.AsString(), "something-else") + assert.Equal(t, "something-else", attr.AsString()) }) t.Run("logs matched by one expression, multiple pipelines", func(t *testing.T) { @@ -224,8 +224,8 @@ func TestLogsAreCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { assert.Len(t, sink0.AllLogs(), 1) assert.Empty(t, sink1.AllLogs()) - assert.Equal(t, defaultSink.AllLogs()[0].LogRecordCount(), 1) - assert.Equal(t, sink0.AllLogs()[0].LogRecordCount(), 1) + assert.Equal(t, 1, defaultSink.AllLogs()[0].LogRecordCount()) + assert.Equal(t, 1, sink0.AllLogs()[0].LogRecordCount()) assert.Equal(t, defaultSink.AllLogs(), sink0.AllLogs()) }) } @@ -333,7 +333,7 @@ func TestLogsAreCorrectlyMatchOnceWithOTTL(t *testing.T) { assert.Len(t, sink0.AllLogs(), 1) assert.Empty(t, sink1.AllLogs()) - assert.Equal(t, sink0.AllLogs()[0].LogRecordCount(), 2) + assert.Equal(t, 2, sink0.AllLogs()[0].LogRecordCount()) }) t.Run("one log matched by multiple expressions, other matched none", func(t *testing.T) { @@ -358,7 +358,7 @@ func TestLogsAreCorrectlyMatchOnceWithOTTL(t *testing.T) { rlog := defaultSink.AllLogs()[0].ResourceLogs().At(0) attr, ok := rlog.Resource().Attributes().Get("X-Tenant") assert.True(t, ok, "routing attribute must exists") - assert.Equal(t, attr.AsString(), "something-else") + assert.Equal(t, "something-else", attr.AsString()) }) t.Run("logs matched by one expression, multiple pipelines", func(t *testing.T) { @@ -376,8 +376,8 @@ func TestLogsAreCorrectlyMatchOnceWithOTTL(t *testing.T) { assert.Len(t, sink0.AllLogs(), 1) assert.Empty(t, sink1.AllLogs()) - assert.Equal(t, defaultSink.AllLogs()[0].LogRecordCount(), 1) - assert.Equal(t, sink0.AllLogs()[0].LogRecordCount(), 1) + assert.Equal(t, 1, defaultSink.AllLogs()[0].LogRecordCount()) + assert.Equal(t, 1, sink0.AllLogs()[0].LogRecordCount()) assert.Equal(t, defaultSink.AllLogs(), sink0.AllLogs()) }) } diff --git a/connector/routingconnector/metrics_test.go b/connector/routingconnector/metrics_test.go index e1add4559fb3..82fe74855bc9 100644 --- a/connector/routingconnector/metrics_test.go +++ b/connector/routingconnector/metrics_test.go @@ -186,8 +186,8 @@ func TestMetricsAreCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { assert.Len(t, sink0.AllMetrics(), 1) assert.Len(t, sink1.AllMetrics(), 1) - assert.Equal(t, sink0.AllMetrics()[0].MetricCount(), 2) - assert.Equal(t, sink1.AllMetrics()[0].MetricCount(), 2) + assert.Equal(t, 2, sink0.AllMetrics()[0].MetricCount()) + assert.Equal(t, 2, sink1.AllMetrics()[0].MetricCount()) assert.Equal(t, sink0.AllMetrics(), sink1.AllMetrics()) }) @@ -239,8 +239,8 @@ func TestMetricsAreCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { assert.Len(t, sink0.AllMetrics(), 1) assert.Empty(t, sink1.AllMetrics()) - assert.Equal(t, defaultSink.AllMetrics()[0].MetricCount(), 1) - assert.Equal(t, sink0.AllMetrics()[0].MetricCount(), 1) + assert.Equal(t, 1, defaultSink.AllMetrics()[0].MetricCount()) + assert.Equal(t, 1, sink0.AllMetrics()[0].MetricCount()) assert.Equal(t, defaultSink.AllMetrics(), sink0.AllMetrics()) }) } @@ -357,7 +357,7 @@ func TestMetricsAreCorrectlyMatchOnceWithOTTL(t *testing.T) { assert.Len(t, sink0.AllMetrics(), 1) assert.Empty(t, sink1.AllMetrics()) - assert.Equal(t, sink0.AllMetrics()[0].MetricCount(), 2) + assert.Equal(t, 2, sink0.AllMetrics()[0].MetricCount()) }) t.Run("one metric matched by 2 expressions, others matched by none", func(t *testing.T) { @@ -406,8 +406,8 @@ func TestMetricsAreCorrectlyMatchOnceWithOTTL(t *testing.T) { assert.Len(t, sink0.AllMetrics(), 1) assert.Empty(t, sink1.AllMetrics()) - assert.Equal(t, defaultSink.AllMetrics()[0].MetricCount(), 1) - assert.Equal(t, sink0.AllMetrics()[0].MetricCount(), 1) + assert.Equal(t, 1, defaultSink.AllMetrics()[0].MetricCount()) + assert.Equal(t, 1, sink0.AllMetrics()[0].MetricCount()) assert.Equal(t, defaultSink.AllMetrics(), sink0.AllMetrics()) }) } diff --git a/connector/routingconnector/traces_test.go b/connector/routingconnector/traces_test.go index 61f515461d4f..33f39cd270c1 100644 --- a/connector/routingconnector/traces_test.go +++ b/connector/routingconnector/traces_test.go @@ -179,8 +179,8 @@ func TestTracesCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { assert.Len(t, sink0.AllTraces(), 1) assert.Len(t, sink1.AllTraces(), 1) - assert.Equal(t, sink0.AllTraces()[0].SpanCount(), 2) - assert.Equal(t, sink1.AllTraces()[0].SpanCount(), 2) + assert.Equal(t, 2, sink0.AllTraces()[0].SpanCount()) + assert.Equal(t, 2, sink1.AllTraces()[0].SpanCount()) assert.Equal(t, sink0.AllTraces(), sink1.AllTraces()) }) @@ -199,8 +199,8 @@ func TestTracesCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { assert.Len(t, sink0.AllTraces(), 1) assert.Empty(t, sink1.AllTraces()) - assert.Equal(t, defaultSink.AllTraces()[0].SpanCount(), 1) - assert.Equal(t, sink0.AllTraces()[0].SpanCount(), 1) + assert.Equal(t, 1, defaultSink.AllTraces()[0].SpanCount()) + assert.Equal(t, 1, sink0.AllTraces()[0].SpanCount()) assert.Equal(t, defaultSink.AllTraces(), sink0.AllTraces()) }) } @@ -310,7 +310,7 @@ func TestTracesCorrectlyMatchOnceWithOTTL(t *testing.T) { assert.Len(t, sink0.AllTraces(), 1) assert.Empty(t, sink1.AllTraces()) - assert.Equal(t, sink0.AllTraces()[0].SpanCount(), 2) + assert.Equal(t, 2, sink0.AllTraces()[0].SpanCount()) }) t.Run("span matched by one expression, multiple pipelines", func(t *testing.T) { @@ -328,8 +328,8 @@ func TestTracesCorrectlyMatchOnceWithOTTL(t *testing.T) { assert.Len(t, sink0.AllTraces(), 1) assert.Empty(t, sink1.AllTraces()) - assert.Equal(t, defaultSink.AllTraces()[0].SpanCount(), 1) - assert.Equal(t, sink0.AllTraces()[0].SpanCount(), 1) + assert.Equal(t, 1, defaultSink.AllTraces()[0].SpanCount()) + assert.Equal(t, 1, sink0.AllTraces()[0].SpanCount()) assert.Equal(t, defaultSink.AllTraces(), sink0.AllTraces()) }) } diff --git a/connector/servicegraphconnector/connector.go b/connector/servicegraphconnector/connector.go index f3d3909a2074..587daa1e6e2e 100644 --- a/connector/servicegraphconnector/connector.go +++ b/connector/servicegraphconnector/connector.go @@ -31,6 +31,8 @@ const ( clientKind = "client" serverKind = "server" virtualNodeLabel = "virtual_node" + millisecondsUnit = "ms" + secondsUnit = "s" ) var ( @@ -522,10 +524,10 @@ func (p *serviceGraphConnector) collectCountMetrics(ilm pmetric.ScopeMetrics) er func (p *serviceGraphConnector) collectLatencyMetrics(ilm pmetric.ScopeMetrics) error { // TODO: Remove this once legacy metric names are removed if legacyMetricNamesFeatureGate.IsEnabled() { - return p.collectServerLatencyMetrics(ilm, "traces_service_graph_request_duration_seconds") + return p.collectServerLatencyMetrics(ilm, "traces_service_graph_request_duration") } - if err := p.collectServerLatencyMetrics(ilm, "traces_service_graph_request_server_seconds"); err != nil { + if err := p.collectServerLatencyMetrics(ilm, "traces_service_graph_request_server"); err != nil { return err } @@ -535,7 +537,11 @@ func (p *serviceGraphConnector) collectLatencyMetrics(ilm pmetric.ScopeMetrics) func (p *serviceGraphConnector) collectClientLatencyMetrics(ilm pmetric.ScopeMetrics) error { if len(p.reqServerDurationSecondsCount) > 0 { mDuration := ilm.Metrics().AppendEmpty() - mDuration.SetName("traces_service_graph_request_client_seconds") + mDuration.SetName("traces_service_graph_request_client") + mDuration.SetUnit(secondsUnit) + if legacyLatencyUnitMsFeatureGate.IsEnabled() { + mDuration.SetUnit(millisecondsUnit) + } // TODO: Support other aggregation temporalities mDuration.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) timestamp := pcommon.NewTimestampFromTime(time.Now()) @@ -565,6 +571,10 @@ func (p *serviceGraphConnector) collectServerLatencyMetrics(ilm pmetric.ScopeMet if len(p.reqServerDurationSecondsCount) > 0 { mDuration := ilm.Metrics().AppendEmpty() mDuration.SetName(mName) + mDuration.SetUnit(secondsUnit) + if legacyLatencyUnitMsFeatureGate.IsEnabled() { + mDuration.SetUnit(millisecondsUnit) + } // TODO: Support other aggregation temporalities mDuration.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) timestamp := pcommon.NewTimestampFromTime(time.Now()) diff --git a/connector/servicegraphconnector/connector_test.go b/connector/servicegraphconnector/connector_test.go index f014cf5fd655..4c52d2733788 100644 --- a/connector/servicegraphconnector/connector_test.go +++ b/connector/servicegraphconnector/connector_test.go @@ -163,7 +163,7 @@ func TestConnectorConsume(t *testing.T) { }, sampleTraces: buildSampleTrace(t, "val"), gates: []*featuregate.Gate{legacyLatencyUnitMsFeatureGate}, - verifyMetrics: verifyHappyCaseMetricsWithDuration(2000, 1000), + verifyMetrics: verifyHappyCaseLatencyMetrics(), }, } { t.Run(tc.name, func(t *testing.T) { @@ -226,15 +226,22 @@ func verifyHappyCaseMetricsWithDuration(serverDurationSum, clientDurationSum flo verifyCount(t, mCount) mServerDuration := ms.At(1) - assert.Equal(t, "traces_service_graph_request_server_seconds", mServerDuration.Name()) + assert.Equal(t, "traces_service_graph_request_server", mServerDuration.Name()) verifyDuration(t, mServerDuration, serverDurationSum, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}) mClientDuration := ms.At(2) - assert.Equal(t, "traces_service_graph_request_client_seconds", mClientDuration.Name()) + assert.Equal(t, "traces_service_graph_request_client", mClientDuration.Name()) verifyDuration(t, mClientDuration, clientDurationSum, []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}) } } +func verifyHappyCaseLatencyMetrics() func(t *testing.T, md pmetric.Metrics) { + return func(t *testing.T, md pmetric.Metrics) { + verifyHappyCaseMetricsWithDuration(2000, 1000)(t, md) + verifyUnit(t, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(1).Unit(), millisecondsUnit) + } +} + func verifyCount(t *testing.T, m pmetric.Metric) { assert.Equal(t, "traces_service_graph_request_total", m.Name()) @@ -281,6 +288,10 @@ func verifyAttr(t *testing.T, attrs pcommon.Map, k, expected string) { assert.Equal(t, expected, v.AsString()) } +func verifyUnit(t *testing.T, expected, actual string) { + assert.Equal(t, expected, actual) +} + func buildSampleTrace(t *testing.T, attrValue string) ptrace.Traces { tStart := time.Date(2022, 1, 2, 3, 4, 5, 6, time.UTC) // client: 1s diff --git a/connector/servicegraphconnector/testdata/extra-dimensions-queue-db-expected-metrics.yaml b/connector/servicegraphconnector/testdata/extra-dimensions-queue-db-expected-metrics.yaml index 7d02666be8f1..cf030bca2ddb 100644 --- a/connector/servicegraphconnector/testdata/extra-dimensions-queue-db-expected-metrics.yaml +++ b/connector/servicegraphconnector/testdata/extra-dimensions-queue-db-expected-metrics.yaml @@ -11,6 +11,9 @@ resourceMetrics: - key: client value: stringValue: foo-server + - key: client_messaging.system + value: + stringValue: kafka - key: connection_type value: stringValue: "" @@ -20,9 +23,6 @@ resourceMetrics: - key: server value: stringValue: bar-requester - - key: client_messaging.system - value: - stringValue: kafka - key: server_db.system value: stringValue: postgresql @@ -36,6 +36,9 @@ resourceMetrics: - key: client value: stringValue: foo-server + - key: client_messaging.system + value: + stringValue: kafka - key: connection_type value: stringValue: "" @@ -45,9 +48,6 @@ resourceMetrics: - key: server value: stringValue: bar-requester - - key: client_messaging.system - value: - stringValue: kafka - key: server_db.system value: stringValue: postgresql @@ -62,9 +62,10 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000001 + sum: 1e-06 timeUnixNano: "2000000" - name: traces_service_graph_request_server_seconds + name: traces_service_graph_request_server + unit: s - histogram: aggregationTemporality: 2 dataPoints: @@ -72,6 +73,9 @@ resourceMetrics: - key: client value: stringValue: foo-server + - key: client_messaging.system + value: + stringValue: kafka - key: connection_type value: stringValue: "" @@ -81,9 +85,6 @@ resourceMetrics: - key: server value: stringValue: bar-requester - - key: client_messaging.system - value: - stringValue: kafka - key: server_db.system value: stringValue: postgresql @@ -98,8 +99,9 @@ resourceMetrics: - 1 - 10 startTimeUnixNano: "1000000" - sum: 0.000001 + sum: 1e-06 timeUnixNano: "2000000" - name: traces_service_graph_request_client_seconds + name: traces_service_graph_request_client + unit: s scope: name: traces_service_graph diff --git a/connector/servicegraphconnector/testdata/failed-label-not-work-expect-metrics.yaml b/connector/servicegraphconnector/testdata/failed-label-not-work-expect-metrics.yaml index fe258bac5b50..7012b3020524 100644 --- a/connector/servicegraphconnector/testdata/failed-label-not-work-expect-metrics.yaml +++ b/connector/servicegraphconnector/testdata/failed-label-not-work-expect-metrics.yaml @@ -72,7 +72,7 @@ resourceMetrics: stringValue: "" - key: failed value: - boolValue: true + boolValue: false - key: server value: stringValue: bar @@ -87,14 +87,14 @@ resourceMetrics: - "0" - "0" - "0" - - "1" + - "2" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "2" explicitBounds: - 0.002 - 0.004 @@ -113,7 +113,7 @@ resourceMetrics: - 10 - 15 startTimeUnixNano: "1000000" - sum: 1 + sum: 2 timeUnixNano: "2000000" - attributes: - key: client @@ -124,7 +124,7 @@ resourceMetrics: stringValue: "" - key: failed value: - boolValue: false + boolValue: true - key: server value: stringValue: bar @@ -139,14 +139,14 @@ resourceMetrics: - "0" - "0" - "0" - - "2" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "2" + count: "1" explicitBounds: - 0.002 - 0.004 @@ -165,9 +165,10 @@ resourceMetrics: - 10 - 15 startTimeUnixNano: "1000000" - sum: 2 + sum: 1 timeUnixNano: "2000000" - name: traces_service_graph_request_server_seconds + name: traces_service_graph_request_server + unit: s - histogram: aggregationTemporality: 2 dataPoints: @@ -180,7 +181,7 @@ resourceMetrics: stringValue: "" - key: failed value: - boolValue: true + boolValue: false - key: server value: stringValue: bar @@ -195,14 +196,14 @@ resourceMetrics: - "0" - "0" - "0" - - "1" + - "2" - "0" - "0" - "0" - "0" - "0" - "0" - count: "1" + count: "2" explicitBounds: - 0.002 - 0.004 @@ -221,7 +222,7 @@ resourceMetrics: - 10 - 15 startTimeUnixNano: "1000000" - sum: 1 + sum: 2 timeUnixNano: "2000000" - attributes: - key: client @@ -232,7 +233,7 @@ resourceMetrics: stringValue: "" - key: failed value: - boolValue: false + boolValue: true - key: server value: stringValue: bar @@ -247,14 +248,14 @@ resourceMetrics: - "0" - "0" - "0" - - "2" + - "1" - "0" - "0" - "0" - "0" - "0" - "0" - count: "2" + count: "1" explicitBounds: - 0.002 - 0.004 @@ -273,8 +274,9 @@ resourceMetrics: - 10 - 15 startTimeUnixNano: "1000000" - sum: 2 + sum: 1 timeUnixNano: "2000000" - name: traces_service_graph_request_client_seconds + name: traces_service_graph_request_client + unit: s scope: name: traces_service_graph diff --git a/connector/servicegraphconnector/testdata/virtual-node-label-client-expected-metrics.yaml b/connector/servicegraphconnector/testdata/virtual-node-label-client-expected-metrics.yaml index 36511e580846..43d39a40b5cf 100644 --- a/connector/servicegraphconnector/testdata/virtual-node-label-client-expected-metrics.yaml +++ b/connector/servicegraphconnector/testdata/virtual-node-label-client-expected-metrics.yaml @@ -64,7 +64,8 @@ resourceMetrics: startTimeUnixNano: "1000000" sum: 1e-06 timeUnixNano: "2000000" - name: traces_service_graph_request_server_seconds + name: traces_service_graph_request_server + unit: s - histogram: aggregationTemporality: 2 dataPoints: @@ -100,6 +101,7 @@ resourceMetrics: startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "2000000" - name: traces_service_graph_request_client_seconds + name: traces_service_graph_request_client + unit: s scope: name: traces_service_graph diff --git a/connector/servicegraphconnector/testdata/virtual-node-label-server-expected-metrics.yaml b/connector/servicegraphconnector/testdata/virtual-node-label-server-expected-metrics.yaml index 362898084b2c..f3b152ff0f04 100644 --- a/connector/servicegraphconnector/testdata/virtual-node-label-server-expected-metrics.yaml +++ b/connector/servicegraphconnector/testdata/virtual-node-label-server-expected-metrics.yaml @@ -58,7 +58,8 @@ resourceMetrics: startTimeUnixNano: "1000000" sum: 0 timeUnixNano: "2000000" - name: traces_service_graph_request_server_seconds + name: traces_service_graph_request_server + unit: s - histogram: aggregationTemporality: 2 dataPoints: @@ -91,6 +92,7 @@ resourceMetrics: startTimeUnixNano: "1000000" sum: 1e-06 timeUnixNano: "2000000" - name: traces_service_graph_request_client_seconds + name: traces_service_graph_request_client + unit: s scope: name: traces_service_graph diff --git a/connector/spanmetricsconnector/connector_test.go b/connector/spanmetricsconnector/connector_test.go index 47758797235f..2a2cf445455d 100644 --- a/connector/spanmetricsconnector/connector_test.go +++ b/connector/spanmetricsconnector/connector_test.go @@ -1647,7 +1647,7 @@ func assertDataPointsHaveExactlyOneExemplarForTrace(t *testing.T, metrics pmetri assert.Greater(t, dps.Len(), 0) for dpi := 0; dpi < dps.Len(); dpi++ { dp := dps.At(dpi) - assert.Equal(t, dp.Exemplars().Len(), 1) + assert.Equal(t, 1, dp.Exemplars().Len()) assert.Equal(t, dp.Exemplars().At(0).TraceID(), traceID) } case pmetric.MetricTypeHistogram: @@ -1655,7 +1655,7 @@ func assertDataPointsHaveExactlyOneExemplarForTrace(t *testing.T, metrics pmetri assert.Greater(t, dps.Len(), 0) for dpi := 0; dpi < dps.Len(); dpi++ { dp := dps.At(dpi) - assert.Equal(t, dp.Exemplars().Len(), 1) + assert.Equal(t, 1, dp.Exemplars().Len()) assert.Equal(t, dp.Exemplars().At(0).TraceID(), traceID) } case pmetric.MetricTypeExponentialHistogram: @@ -1663,7 +1663,7 @@ func assertDataPointsHaveExactlyOneExemplarForTrace(t *testing.T, metrics pmetri assert.Greater(t, dps.Len(), 0) for dpi := 0; dpi < dps.Len(); dpi++ { dp := dps.At(dpi) - assert.Equal(t, dp.Exemplars().Len(), 1) + assert.Equal(t, 1, dp.Exemplars().Len()) assert.Equal(t, dp.Exemplars().At(0).TraceID(), traceID) } default: diff --git a/connector/spanmetricsconnector/internal/cache/cache_test.go b/connector/spanmetricsconnector/internal/cache/cache_test.go index 374e8e2f9ba8..83e9b16333d1 100644 --- a/connector/spanmetricsconnector/internal/cache/cache_test.go +++ b/connector/spanmetricsconnector/internal/cache/cache_test.go @@ -127,7 +127,7 @@ func TestCache_Get(t *testing.T) { t.Parallel() c := tt.lruCache() gotValue, gotOk := c.Get(tt.key) - if !assert.Equal(t, gotValue, tt.wantValue) { + if !assert.Equal(t, tt.wantValue, gotValue) { t.Errorf("Get() gotValue = %v, want %v", gotValue, tt.wantValue) } if gotOk != tt.wantOk { diff --git a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go index 400cd5736949..981e7be2dc59 100644 --- a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go @@ -130,12 +130,12 @@ func TestMetricDataToLogService(t *testing.T) { } func TestMetricCornerCases(t *testing.T) { - assert.Equal(t, min(1, 2), 1) - assert.Equal(t, min(2, 1), 1) - assert.Equal(t, min(1, 1), 1) + assert.Equal(t, 1, min(1, 2)) + assert.Equal(t, 1, min(2, 1)) + assert.Equal(t, 1, min(1, 1)) var label KeyValues label.Append("a", "b") - assert.Equal(t, label.String(), "a#$#b") + assert.Equal(t, "a#$#b", label.String()) } func TestMetricLabelSanitize(t *testing.T) { @@ -144,7 +144,7 @@ func TestMetricLabelSanitize(t *testing.T) { label.Append("0test", "key_0test") label.Append("test_normal", "test_normal") label.Append("0test", "key_0test") - assert.Equal(t, label.String(), "key_test#$#key_test|key_0test#$#key_0test|test_normal#$#test_normal|key_0test#$#key_0test") + assert.Equal(t, "key_test#$#key_test|key_0test#$#key_0test|test_normal#$#test_normal|key_0test#$#key_0test", label.String()) label.Sort() - assert.Equal(t, label.String(), "key_0test#$#key_0test|key_0test#$#key_0test|key_test#$#key_test|test_normal#$#test_normal") + assert.Equal(t, "key_0test#$#key_0test|key_0test#$#key_0test|key_test#$#key_test|test_normal#$#test_normal", label.String()) } diff --git a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go index 8cf82da88c4a..276cee9252cc 100644 --- a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go @@ -176,16 +176,16 @@ func newSegmentID() pcommon.SpanID { } func TestSpanKindToShortString(t *testing.T) { - assert.Equal(t, spanKindToShortString(ptrace.SpanKindConsumer), "consumer") - assert.Equal(t, spanKindToShortString(ptrace.SpanKindProducer), "producer") - assert.Equal(t, spanKindToShortString(ptrace.SpanKindClient), "client") - assert.Equal(t, spanKindToShortString(ptrace.SpanKindServer), "server") - assert.Equal(t, spanKindToShortString(ptrace.SpanKindInternal), "internal") - assert.Equal(t, spanKindToShortString(ptrace.SpanKindUnspecified), "") + assert.Equal(t, "consumer", spanKindToShortString(ptrace.SpanKindConsumer)) + assert.Equal(t, "producer", spanKindToShortString(ptrace.SpanKindProducer)) + assert.Equal(t, "client", spanKindToShortString(ptrace.SpanKindClient)) + assert.Equal(t, "server", spanKindToShortString(ptrace.SpanKindServer)) + assert.Equal(t, "internal", spanKindToShortString(ptrace.SpanKindInternal)) + assert.Equal(t, "", spanKindToShortString(ptrace.SpanKindUnspecified)) } func TestStatusCodeToShortString(t *testing.T) { - assert.Equal(t, statusCodeToShortString(ptrace.StatusCodeOk), "OK") - assert.Equal(t, statusCodeToShortString(ptrace.StatusCodeError), "ERROR") - assert.Equal(t, statusCodeToShortString(ptrace.StatusCodeUnset), "UNSET") + assert.Equal(t, "OK", statusCodeToShortString(ptrace.StatusCodeOk)) + assert.Equal(t, "ERROR", statusCodeToShortString(ptrace.StatusCodeError)) + assert.Equal(t, "UNSET", statusCodeToShortString(ptrace.StatusCodeUnset)) } diff --git a/exporter/awsemfexporter/config_test.go b/exporter/awsemfexporter/config_test.go index b160ccc8e2d6..249e86ee1d80 100644 --- a/exporter/awsemfexporter/config_test.go +++ b/exporter/awsemfexporter/config_test.go @@ -267,7 +267,7 @@ func TestNoDimensionRollupFeatureGate(t *testing.T) { require.NoError(t, err) cfg := createDefaultConfig() - assert.Equal(t, cfg.(*Config).DimensionRollupOption, "NoDimensionRollup") + assert.Equal(t, "NoDimensionRollup", cfg.(*Config).DimensionRollupOption) _ = featuregate.GlobalRegistry().Set("awsemf.nodimrollupdefault", false) } @@ -320,7 +320,7 @@ func TestIsApplicationSignalsEnabled(t *testing.T) { cfg.LogGroupName = tc.logGroupName } - assert.Equal(t, cfg.isAppSignalsEnabled(), tc.expectedResult) + assert.Equal(t, tc.expectedResult, cfg.isAppSignalsEnabled()) }) } } diff --git a/exporter/awsemfexporter/emf_exporter_test.go b/exporter/awsemfexporter/emf_exporter_test.go index 6ed83acd3ddb..b27c756ef8d0 100644 --- a/exporter/awsemfexporter/emf_exporter_test.go +++ b/exporter/awsemfexporter/emf_exporter_test.go @@ -329,7 +329,7 @@ func TestNewExporterWithoutConfig(t *testing.T) { exp, err := newEmfExporter(expCfg, settings) assert.Error(t, err) assert.Nil(t, exp) - assert.Equal(t, settings.Logger, expCfg.logger) + assert.Equal(t, expCfg.logger, settings.Logger) } func TestNewExporterWithMetricDeclarations(t *testing.T) { @@ -421,5 +421,5 @@ func TestNewEmfExporterWithoutConfig(t *testing.T) { exp, err := newEmfExporter(expCfg, settings) assert.Error(t, err) assert.Nil(t, exp) - assert.Equal(t, settings.Logger, expCfg.logger) + assert.Equal(t, expCfg.logger, settings.Logger) } diff --git a/exporter/awskinesisexporter/exporter_test.go b/exporter/awskinesisexporter/exporter_test.go index 59d2f5fe4084..ffab5b0c58c3 100644 --- a/exporter/awskinesisexporter/exporter_test.go +++ b/exporter/awskinesisexporter/exporter_test.go @@ -35,7 +35,7 @@ func TestCreatingExporter(t *testing.T) { }), validateNew: func(tb testing.TB) func(conf aws.Config, opts ...func(*kinesis.Options)) *kinesis.Client { return func(conf aws.Config, opts ...func(*kinesis.Options)) *kinesis.Client { - assert.Equal(tb, conf.Region, "us-west-2", "Must match the expected region") + assert.Equal(tb, "us-west-2", conf.Region, "Must match the expected region") k := kinesis.NewFromConfig(conf, opts...) return k } @@ -50,7 +50,7 @@ func TestCreatingExporter(t *testing.T) { }), validateNew: func(tb testing.TB) func(conf aws.Config, opts ...func(*kinesis.Options)) *kinesis.Client { return func(conf aws.Config, opts ...func(*kinesis.Options)) *kinesis.Client { - assert.Equal(tb, conf.Region, "us-east-1", "Must match the expected region") + assert.Equal(tb, "us-east-1", conf.Region, "Must match the expected region") k := kinesis.NewFromConfig(conf, opts...) return k } diff --git a/exporter/awss3exporter/config_test.go b/exporter/awss3exporter/config_test.go index 11ba3298ce2c..3fe0561772b6 100644 --- a/exporter/awss3exporter/config_test.go +++ b/exporter/awss3exporter/config_test.go @@ -32,17 +32,16 @@ func TestLoadConfig(t *testing.T) { e := cfg.Exporters[component.MustNewID("awss3")].(*Config) encoding := component.MustNewIDWithName("foo", "bar") - assert.Equal(t, e, - &Config{ - Encoding: &encoding, - EncodingFileExtension: "baz", - S3Uploader: S3UploaderConfig{ - Region: "us-east-1", - S3Bucket: "foo", - S3Partition: "minute", - }, - MarshalerName: "otlp_json", + assert.Equal(t, &Config{ + Encoding: &encoding, + EncodingFileExtension: "baz", + S3Uploader: S3UploaderConfig{ + Region: "us-east-1", + S3Bucket: "foo", + S3Partition: "minute", }, + MarshalerName: "otlp_json", + }, e, ) } @@ -62,17 +61,16 @@ func TestConfig(t *testing.T) { e := cfg.Exporters[component.MustNewID("awss3")].(*Config) - assert.Equal(t, e, - &Config{ - S3Uploader: S3UploaderConfig{ - Region: "us-east-1", - S3Bucket: "foo", - S3Prefix: "bar", - S3Partition: "minute", - Endpoint: "http://endpoint.com", - }, - MarshalerName: "otlp_json", + assert.Equal(t, &Config{ + S3Uploader: S3UploaderConfig{ + Region: "us-east-1", + S3Bucket: "foo", + S3Prefix: "bar", + S3Partition: "minute", + Endpoint: "http://endpoint.com", }, + MarshalerName: "otlp_json", + }, e, ) } @@ -92,19 +90,18 @@ func TestConfigForS3CompatibleSystems(t *testing.T) { e := cfg.Exporters[component.MustNewID("awss3")].(*Config) - assert.Equal(t, e, - &Config{ - S3Uploader: S3UploaderConfig{ - Region: "us-east-1", - S3Bucket: "foo", - S3Prefix: "bar", - S3Partition: "minute", - Endpoint: "alternative-s3-system.example.com", - S3ForcePathStyle: true, - DisableSSL: true, - }, - MarshalerName: "otlp_json", + assert.Equal(t, &Config{ + S3Uploader: S3UploaderConfig{ + Region: "us-east-1", + S3Bucket: "foo", + S3Prefix: "bar", + S3Partition: "minute", + Endpoint: "alternative-s3-system.example.com", + S3ForcePathStyle: true, + DisableSSL: true, }, + MarshalerName: "otlp_json", + }, e, ) } @@ -205,28 +202,26 @@ func TestMarshallerName(t *testing.T) { e := cfg.Exporters[component.MustNewID("awss3")].(*Config) - assert.Equal(t, e, - &Config{ - S3Uploader: S3UploaderConfig{ - Region: "us-east-1", - S3Bucket: "foo", - S3Partition: "minute", - }, - MarshalerName: "sumo_ic", + assert.Equal(t, &Config{ + S3Uploader: S3UploaderConfig{ + Region: "us-east-1", + S3Bucket: "foo", + S3Partition: "minute", }, + MarshalerName: "sumo_ic", + }, e, ) e = cfg.Exporters[component.MustNewIDWithName("awss3", "proto")].(*Config) - assert.Equal(t, e, - &Config{ - S3Uploader: S3UploaderConfig{ - Region: "us-east-1", - S3Bucket: "bar", - S3Partition: "minute", - }, - MarshalerName: "otlp_proto", + assert.Equal(t, &Config{ + S3Uploader: S3UploaderConfig{ + Region: "us-east-1", + S3Bucket: "bar", + S3Partition: "minute", }, + MarshalerName: "otlp_proto", + }, e, ) } @@ -247,30 +242,28 @@ func TestCompressionName(t *testing.T) { e := cfg.Exporters[component.MustNewID("awss3")].(*Config) - assert.Equal(t, e, - &Config{ - S3Uploader: S3UploaderConfig{ - Region: "us-east-1", - S3Bucket: "foo", - S3Partition: "minute", - Compression: "gzip", - }, - MarshalerName: "otlp_json", + assert.Equal(t, &Config{ + S3Uploader: S3UploaderConfig{ + Region: "us-east-1", + S3Bucket: "foo", + S3Partition: "minute", + Compression: "gzip", }, + MarshalerName: "otlp_json", + }, e, ) e = cfg.Exporters[component.MustNewIDWithName("awss3", "proto")].(*Config) - assert.Equal(t, e, - &Config{ - S3Uploader: S3UploaderConfig{ - Region: "us-east-1", - S3Bucket: "bar", - S3Partition: "minute", - Compression: "none", - }, - MarshalerName: "otlp_proto", + assert.Equal(t, &Config{ + S3Uploader: S3UploaderConfig{ + Region: "us-east-1", + S3Bucket: "bar", + S3Partition: "minute", + Compression: "none", }, + MarshalerName: "otlp_proto", + }, e, ) } diff --git a/exporter/awss3exporter/marshaler_test.go b/exporter/awss3exporter/marshaler_test.go index 9a56d83d1f6f..bfa39d6914c3 100644 --- a/exporter/awss3exporter/marshaler_test.go +++ b/exporter/awss3exporter/marshaler_test.go @@ -19,19 +19,19 @@ func TestMarshaler(t *testing.T) { m, err := newMarshaler("otlp_json", zap.NewNop()) assert.NoError(t, err) require.NotNil(t, m) - assert.Equal(t, m.format(), "json") + assert.Equal(t, "json", m.format()) } { m, err := newMarshaler("otlp_proto", zap.NewNop()) assert.NoError(t, err) require.NotNil(t, m) - assert.Equal(t, m.format(), "binpb") + assert.Equal(t, "binpb", m.format()) } { m, err := newMarshaler("sumo_ic", zap.NewNop()) assert.NoError(t, err) require.NotNil(t, m) - assert.Equal(t, m.format(), "json.gz") + assert.Equal(t, "json.gz", m.format()) } { m, err := newMarshaler("unknown", zap.NewNop()) @@ -42,7 +42,7 @@ func TestMarshaler(t *testing.T) { m, err := newMarshaler("body", zap.NewNop()) assert.NoError(t, err) require.NotNil(t, m) - assert.Equal(t, m.format(), "txt") + assert.Equal(t, "txt", m.format()) } } diff --git a/exporter/awss3exporter/s3_writer_test.go b/exporter/awss3exporter/s3_writer_test.go index 350e3284f7d9..cdd5e1f025e3 100644 --- a/exporter/awss3exporter/s3_writer_test.go +++ b/exporter/awss3exporter/s3_writer_test.go @@ -126,7 +126,7 @@ func TestGetSessionConfigWithRoleArn(t *testing.T) { assert.NoError(t, err) assert.Equal(t, sessionConfig.Region, aws.String(region)) - assert.Equal(t, creds.ProviderName, "AssumeRoleProvider") + assert.Equal(t, "AssumeRoleProvider", creds.ProviderName) } func TestGetSessionConfigWithoutRoleArn(t *testing.T) { @@ -144,5 +144,5 @@ func TestGetSessionConfigWithoutRoleArn(t *testing.T) { assert.NoError(t, err) assert.Equal(t, sessionConfig.Region, aws.String(region)) - assert.NotEqual(t, creds.ProviderName, "AssumeRoleProvider") + assert.NotEqual(t, "AssumeRoleProvider", creds.ProviderName) } diff --git a/exporter/awsxrayexporter/factory_test.go b/exporter/awsxrayexporter/factory_test.go index 9ed5509c9b19..a80873e5ac85 100644 --- a/exporter/awsxrayexporter/factory_test.go +++ b/exporter/awsxrayexporter/factory_test.go @@ -23,7 +23,7 @@ import ( func TestCreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{ + assert.Equal(t, &Config{ AWSSessionSettings: awsutil.AWSSessionSettings{ NumberOfWorkers: 8, Endpoint: "", @@ -37,7 +37,7 @@ func TestCreateDefaultConfig(t *testing.T) { RoleARN: "", }, skipTimestampValidation: true, - }, "failed to create default config") + }, cfg, "failed to create default config") assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } @@ -48,7 +48,7 @@ func TestCreateDefaultConfigWithSkipTimestampValidation(t *testing.T) { assert.NoError(t, err) cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{ + assert.Equal(t, &Config{ AWSSessionSettings: awsutil.AWSSessionSettings{ NumberOfWorkers: 8, Endpoint: "", @@ -62,7 +62,7 @@ func TestCreateDefaultConfigWithSkipTimestampValidation(t *testing.T) { RoleARN: "", }, skipTimestampValidation: true, - }, "failed to create default config") + }, cfg, "failed to create default config") assert.NoError(t, componenttest.CheckConfigStruct(cfg)) err = featuregate.GlobalRegistry().Set("exporter.awsxray.skiptimestampvalidation", false) diff --git a/exporter/azuremonitorexporter/metricexporter_test.go b/exporter/azuremonitorexporter/metricexporter_test.go index 0be8c6c86a73..520f3e627aef 100644 --- a/exporter/azuremonitorexporter/metricexporter_test.go +++ b/exporter/azuremonitorexporter/metricexporter_test.go @@ -35,74 +35,74 @@ func TestDoubleGaugeEnvelopes(t *testing.T) { gaugeMetric := getDoubleTestGaugeMetric() dataPoint := getDataPoint(t, gaugeMetric) - assert.Equal(t, dataPoint.Name, "Gauge") - assert.Equal(t, dataPoint.Value, float64(1)) - assert.Equal(t, dataPoint.Count, 1) - assert.Equal(t, dataPoint.Kind, contracts.Measurement) + assert.Equal(t, "Gauge", dataPoint.Name) + assert.Equal(t, float64(1), dataPoint.Value) + assert.Equal(t, 1, dataPoint.Count) + assert.Equal(t, contracts.Measurement, dataPoint.Kind) } func TestIntGaugeEnvelopes(t *testing.T) { gaugeMetric := getIntTestGaugeMetric() dataPoint := getDataPoint(t, gaugeMetric) - assert.Equal(t, dataPoint.Name, "Gauge") - assert.Equal(t, dataPoint.Value, float64(1)) - assert.Equal(t, dataPoint.Count, 1) - assert.Equal(t, dataPoint.Kind, contracts.Measurement) + assert.Equal(t, "Gauge", dataPoint.Name) + assert.Equal(t, float64(1), dataPoint.Value) + assert.Equal(t, 1, dataPoint.Count) + assert.Equal(t, contracts.Measurement, dataPoint.Kind) } func TestDoubleSumEnvelopes(t *testing.T) { sumMetric := getDoubleTestSumMetric() dataPoint := getDataPoint(t, sumMetric) - assert.Equal(t, dataPoint.Name, "Sum") - assert.Equal(t, dataPoint.Value, float64(2)) - assert.Equal(t, dataPoint.Count, 1) - assert.Equal(t, dataPoint.Kind, contracts.Measurement) + assert.Equal(t, "Sum", dataPoint.Name) + assert.Equal(t, float64(2), dataPoint.Value) + assert.Equal(t, 1, dataPoint.Count) + assert.Equal(t, contracts.Measurement, dataPoint.Kind) } func TestIntSumEnvelopes(t *testing.T) { sumMetric := getIntTestSumMetric() dataPoint := getDataPoint(t, sumMetric) - assert.Equal(t, dataPoint.Name, "Sum") - assert.Equal(t, dataPoint.Value, float64(2)) - assert.Equal(t, dataPoint.Count, 1) - assert.Equal(t, dataPoint.Kind, contracts.Measurement) + assert.Equal(t, "Sum", dataPoint.Name) + assert.Equal(t, float64(2), dataPoint.Value) + assert.Equal(t, 1, dataPoint.Count) + assert.Equal(t, contracts.Measurement, dataPoint.Kind) } func TestHistogramEnvelopes(t *testing.T) { histogramMetric := getTestHistogramMetric() dataPoint := getDataPoint(t, histogramMetric) - assert.Equal(t, dataPoint.Name, "Histogram") - assert.Equal(t, dataPoint.Value, float64(3)) - assert.Equal(t, dataPoint.Count, 3) - assert.Equal(t, dataPoint.Min, float64(0)) - assert.Equal(t, dataPoint.Max, float64(2)) - assert.Equal(t, dataPoint.Kind, contracts.Aggregation) + assert.Equal(t, "Histogram", dataPoint.Name) + assert.Equal(t, float64(3), dataPoint.Value) + assert.Equal(t, 3, dataPoint.Count) + assert.Equal(t, float64(0), dataPoint.Min) + assert.Equal(t, float64(2), dataPoint.Max) + assert.Equal(t, contracts.Aggregation, dataPoint.Kind) } func TestExponentialHistogramEnvelopes(t *testing.T) { exponentialHistogramMetric := getTestExponentialHistogramMetric() dataPoint := getDataPoint(t, exponentialHistogramMetric) - assert.Equal(t, dataPoint.Name, "ExponentialHistogram") - assert.Equal(t, dataPoint.Value, float64(4)) - assert.Equal(t, dataPoint.Count, 4) - assert.Equal(t, dataPoint.Min, float64(1)) - assert.Equal(t, dataPoint.Max, float64(3)) - assert.Equal(t, dataPoint.Kind, contracts.Aggregation) + assert.Equal(t, "ExponentialHistogram", dataPoint.Name) + assert.Equal(t, float64(4), dataPoint.Value) + assert.Equal(t, 4, dataPoint.Count) + assert.Equal(t, float64(1), dataPoint.Min) + assert.Equal(t, float64(3), dataPoint.Max) + assert.Equal(t, contracts.Aggregation, dataPoint.Kind) } func TestSummaryEnvelopes(t *testing.T) { summaryMetric := getTestSummaryMetric() dataPoint := getDataPoint(t, summaryMetric) - assert.Equal(t, dataPoint.Name, "Summary") - assert.Equal(t, dataPoint.Value, float64(5)) - assert.Equal(t, dataPoint.Count, 5) - assert.Equal(t, dataPoint.Kind, contracts.Aggregation) + assert.Equal(t, "Summary", dataPoint.Name) + assert.Equal(t, float64(5), dataPoint.Value) + assert.Equal(t, 5, dataPoint.Count) + assert.Equal(t, contracts.Aggregation, dataPoint.Kind) } func getDataPoint(t testing.TB, metric pmetric.Metric) *contracts.DataPoint { @@ -117,7 +117,7 @@ func getDataPoint(t testing.TB, metric pmetric.Metric) *contracts.DataPoint { require.NotNil(t, envelope.Data) envelopeData := envelope.Data.(*contracts.Data) - assert.Equal(t, envelopeData.BaseType, "MetricData") + assert.Equal(t, "MetricData", envelopeData.BaseType) require.NotNil(t, envelopeData.BaseData) diff --git a/exporter/coralogixexporter/factory_test.go b/exporter/coralogixexporter/factory_test.go index 9bc2f9e85506..66966c68bcaf 100644 --- a/exporter/coralogixexporter/factory_test.go +++ b/exporter/coralogixexporter/factory_test.go @@ -215,7 +215,7 @@ func TestCreateTracesExporter(t *testing.T) { if err != nil { // Since the endpoint of OTLP exporter doesn't actually exist, // exporter may already stop because it cannot connect. - assert.Equal(t, err.Error(), "rpc error: code = Canceled desc = grpc: the client connection is closing") + assert.Equal(t, "rpc error: code = Canceled desc = grpc: the client connection is closing", err.Error()) } }) } @@ -240,7 +240,7 @@ func TestCreateLogsExporterWithDomainAndEndpoint(t *testing.T) { if err != nil { // Since the endpoint of OTLP exporter doesn't actually exist, // exporter may already stop because it cannot connect. - assert.Equal(t, err.Error(), "rpc error: code = Canceled desc = grpc: the client connection is closing") + assert.Equal(t, "rpc error: code = Canceled desc = grpc: the client connection is closing", err.Error()) } } diff --git a/exporter/datadogexporter/examples_test.go b/exporter/datadogexporter/examples_test.go index e13f354406f7..64484e953f78 100644 --- a/exporter/datadogexporter/examples_test.go +++ b/exporter/datadogexporter/examples_test.go @@ -74,7 +74,7 @@ func TestExamples(t *testing.T) { require.NoError(t, err) err = yaml.Unmarshal(slurp, &out) require.NoError(t, err) - require.Equal(t, out.Kind, "ConfigMap") + require.Equal(t, "ConfigMap", out.Kind) require.NotEmpty(t, out.Data.YAML) data := []byte(out.Data.YAML) diff --git a/exporter/datadogexporter/factory_test.go b/exporter/datadogexporter/factory_test.go index c0fa7d9f2251..29da32051e91 100644 --- a/exporter/datadogexporter/factory_test.go +++ b/exporter/datadogexporter/factory_test.go @@ -685,7 +685,7 @@ func TestOnlyMetadata(t *testing.T) { require.NoError(t, err) recvMetadata := <-server.MetadataChan - assert.Equal(t, recvMetadata.InternalHostname, "custom-hostname") + assert.Equal(t, "custom-hostname", recvMetadata.InternalHostname) } func TestStopExporters(t *testing.T) { diff --git a/exporter/datadogexporter/integrationtest/integration_test.go b/exporter/datadogexporter/integrationtest/integration_test.go index d80ef5f51b28..b7b48eebfcfe 100644 --- a/exporter/datadogexporter/integrationtest/integration_test.go +++ b/exporter/datadogexporter/integrationtest/integration_test.go @@ -509,12 +509,12 @@ func TestIntegrationLogs(t *testing.T) { if s.Metric == "otelcol_receiver_accepted_log_records" { numAcceptedLogRecords++ assert.Len(t, s.Points, 1) - assert.Equal(t, s.Points[0].Value, 5.0) + assert.Equal(t, 5.0, s.Points[0].Value) } if s.Metric == "otelcol_exporter_sent_log_records" { numSentLogRecords++ assert.Len(t, s.Points, 1) - assert.Equal(t, s.Points[0].Value, 5.0) + assert.Equal(t, 5.0, s.Points[0].Value) } } assert.Equal(t, 2, numAcceptedLogRecords) diff --git a/exporter/datadogexporter/internal/clientutil/http_test.go b/exporter/datadogexporter/internal/clientutil/http_test.go index 6a09dc2ba1b0..73420c1f1863 100644 --- a/exporter/datadogexporter/internal/clientutil/http_test.go +++ b/exporter/datadogexporter/internal/clientutil/http_test.go @@ -161,7 +161,7 @@ func TestNewHTTPClient(t *testing.T) { func TestUserAgent(t *testing.T) { - assert.Equal(t, UserAgent(buildInfo), "otelcontribcol/1.0") + assert.Equal(t, "otelcontribcol/1.0", UserAgent(buildInfo)) } func TestDDHeaders(t *testing.T) { @@ -169,6 +169,6 @@ func TestDDHeaders(t *testing.T) { apiKey := "apikey" SetDDHeaders(header, buildInfo, apiKey) assert.Equal(t, header.Get("DD-Api-Key"), apiKey) - assert.Equal(t, header.Get("USer-Agent"), "otelcontribcol/1.0") + assert.Equal(t, "otelcontribcol/1.0", header.Get("USer-Agent")) } diff --git a/exporter/datadogexporter/internal/clientutil/retrier_test.go b/exporter/datadogexporter/internal/clientutil/retrier_test.go index a52ec759d56b..c919e4553245 100644 --- a/exporter/datadogexporter/internal/clientutil/retrier_test.go +++ b/exporter/datadogexporter/internal/clientutil/retrier_test.go @@ -26,7 +26,7 @@ func TestDoWithRetries(t *testing.T) { retryNum, err := retrier.DoWithRetries(ctx, func(context.Context) error { return nil }) require.NoError(t, err) - assert.Equal(t, retryNum, int64(0)) + assert.Equal(t, int64(0), retryNum) retrier = NewRetrier(zap.NewNop(), configretry.BackOffConfig{ @@ -52,5 +52,5 @@ func TestNoRetriesOnPermanentError(t *testing.T) { return WrapError(fmt.Errorf("test"), &respNonRetriable) }) require.Error(t, err) - assert.Equal(t, retryNum, int64(0)) + assert.Equal(t, int64(0), retryNum) } diff --git a/exporter/datadogexporter/internal/hostmetadata/host_test.go b/exporter/datadogexporter/internal/hostmetadata/host_test.go index f4a45c947124..f58b2fbea4b7 100644 --- a/exporter/datadogexporter/internal/hostmetadata/host_test.go +++ b/exporter/datadogexporter/internal/hostmetadata/host_test.go @@ -18,5 +18,5 @@ func TestHost(t *testing.T) { require.NoError(t, err) src, err := p.Source(context.Background()) require.NoError(t, err) - assert.Equal(t, src.Identifier, "test-host") + assert.Equal(t, "test-host", src.Identifier) } diff --git a/exporter/datadogexporter/internal/hostmetadata/internal/system/host_test.go b/exporter/datadogexporter/internal/hostmetadata/internal/system/host_test.go index 9939324eb5d4..9e1dceae16d1 100644 --- a/exporter/datadogexporter/internal/hostmetadata/internal/system/host_test.go +++ b/exporter/datadogexporter/internal/hostmetadata/internal/system/host_test.go @@ -30,17 +30,17 @@ func TestGetHostname(t *testing.T) { FQDN: "fqdn", OS: "os", } - assert.Equal(t, hostInfoAll.GetHostname(logger), "fqdn") + assert.Equal(t, "fqdn", hostInfoAll.GetHostname(logger)) hostInfoInvalid := &HostInfo{ FQDN: "fqdn_invalid", OS: "os", } - assert.Equal(t, hostInfoInvalid.GetHostname(logger), "os") + assert.Equal(t, "os", hostInfoInvalid.GetHostname(logger)) hostInfoMissingFQDN := &HostInfo{ OS: "os", } - assert.Equal(t, hostInfoMissingFQDN.GetHostname(logger), "os") + assert.Equal(t, "os", hostInfoMissingFQDN.GetHostname(logger)) } diff --git a/exporter/datadogexporter/internal/hostmetadata/metadata_test.go b/exporter/datadogexporter/internal/hostmetadata/metadata_test.go index c2588fabd818..4d2100046e80 100644 --- a/exporter/datadogexporter/internal/hostmetadata/metadata_test.go +++ b/exporter/datadogexporter/internal/hostmetadata/metadata_test.go @@ -71,10 +71,10 @@ func TestFillHostMetadata(t *testing.T) { metadata := payload.NewEmpty() fillHostMetadata(params, pcfg, hostProvider, &metadata) - assert.Equal(t, metadata.InternalHostname, "hostname") - assert.Equal(t, metadata.Flavor, "otelcontribcol") - assert.Equal(t, metadata.Version, "1.0") - assert.Equal(t, metadata.Meta.Hostname, "hostname") + assert.Equal(t, "hostname", metadata.InternalHostname) + assert.Equal(t, "otelcontribcol", metadata.Flavor) + assert.Equal(t, "1.0", metadata.Version) + assert.Equal(t, "hostname", metadata.Meta.Hostname) assert.ElementsMatch(t, metadata.Tags.OTel, []string{"key1:tag1", "key2:tag2", "env:prod"}) metadataWithVals := payload.HostMetadata{ @@ -84,10 +84,10 @@ func TestFillHostMetadata(t *testing.T) { } fillHostMetadata(params, pcfg, hostProvider, &metadataWithVals) - assert.Equal(t, metadataWithVals.InternalHostname, "my-custom-hostname") - assert.Equal(t, metadataWithVals.Flavor, "otelcontribcol") - assert.Equal(t, metadataWithVals.Version, "1.0") - assert.Equal(t, metadataWithVals.Meta.Hostname, "my-custom-hostname") + assert.Equal(t, "my-custom-hostname", metadataWithVals.InternalHostname) + assert.Equal(t, "otelcontribcol", metadataWithVals.Flavor) + assert.Equal(t, "1.0", metadataWithVals.Version) + assert.Equal(t, "my-custom-hostname", metadataWithVals.Meta.Hostname) assert.ElementsMatch(t, metadataWithVals.Tags.OTel, []string{"key1:tag1", "key2:tag2", "env:prod"}) } @@ -187,8 +187,8 @@ func TestPushMetadata(t *testing.T) { handler := http.NewServeMux() handler.HandleFunc("/intake", func(_ http.ResponseWriter, r *http.Request) { - assert.Equal(t, r.Header.Get("DD-Api-Key"), "apikey") - assert.Equal(t, r.Header.Get("User-Agent"), "otelcontribcol/1.0") + assert.Equal(t, "apikey", r.Header.Get("DD-Api-Key")) + assert.Equal(t, "otelcontribcol/1.0", r.Header.Get("User-Agent")) reader, err := gzip.NewReader(r.Body) require.NoError(t, err) body, err := io.ReadAll(reader) @@ -253,7 +253,7 @@ func TestPusher(t *testing.T) { go RunPusher(ctx, params, pcfg, hostProvider, attrs, reporter) recvMetadata := <-server.MetadataChan - assert.Equal(t, recvMetadata.InternalHostname, "datadog-hostname") + assert.Equal(t, "datadog-hostname", recvMetadata.InternalHostname) assert.Equal(t, recvMetadata.Version, mockBuildInfo.Version) assert.Equal(t, recvMetadata.Flavor, mockBuildInfo.Command) require.NotNil(t, recvMetadata.Meta) diff --git a/exporter/datadogexporter/internal/metrics/series_test.go b/exporter/datadogexporter/internal/metrics/series_test.go index e195fc271bfc..7df7e9b258d1 100644 --- a/exporter/datadogexporter/internal/metrics/series_test.go +++ b/exporter/datadogexporter/internal/metrics/series_test.go @@ -35,10 +35,10 @@ func TestNewType(t *testing.T) { tags := []string{"tag:value"} gauge := NewGauge(name, ts, value, tags) - assert.Equal(t, gauge.GetType(), datadogV2.METRICINTAKETYPE_GAUGE) + assert.Equal(t, datadogV2.METRICINTAKETYPE_GAUGE, gauge.GetType()) count := NewCount(name, ts, value, tags) - assert.Equal(t, count.GetType(), datadogV2.METRICINTAKETYPE_COUNT) + assert.Equal(t, datadogV2.METRICINTAKETYPE_COUNT, count.GetType()) } func TestDefaultMetrics(t *testing.T) { diff --git a/exporter/datadogexporter/metrics_exporter_test.go b/exporter/datadogexporter/metrics_exporter_test.go index 891da576c415..f92293240261 100644 --- a/exporter/datadogexporter/metrics_exporter_test.go +++ b/exporter/datadogexporter/metrics_exporter_test.go @@ -81,7 +81,7 @@ func TestNewExporter(t *testing.T) { err = exp.ConsumeMetrics(context.Background(), testMetrics) require.NoError(t, err) recvMetadata := <-server.MetadataChan - assert.Equal(t, recvMetadata.InternalHostname, "custom-hostname") + assert.Equal(t, "custom-hostname", recvMetadata.InternalHostname) } func Test_metricsExporter_PushMetricsData(t *testing.T) { @@ -402,7 +402,7 @@ func TestNewExporter_Zorkian(t *testing.T) { err = exp.ConsumeMetrics(context.Background(), testMetrics) require.NoError(t, err) recvMetadata := <-server.MetadataChan - assert.Equal(t, recvMetadata.InternalHostname, "custom-hostname") + assert.Equal(t, "custom-hostname", recvMetadata.InternalHostname) } func Test_metricsExporter_PushMetricsData_Zorkian(t *testing.T) { diff --git a/exporter/datadogexporter/traces_exporter_test.go b/exporter/datadogexporter/traces_exporter_test.go index fa5174155ec1..64ae87375141 100644 --- a/exporter/datadogexporter/traces_exporter_test.go +++ b/exporter/datadogexporter/traces_exporter_test.go @@ -328,7 +328,7 @@ func TestPushTraceData(t *testing.T) { assert.NoError(t, err) recvMetadata := <-server.MetadataChan - assert.Equal(t, recvMetadata.InternalHostname, "custom-hostname") + assert.Equal(t, "custom-hostname", recvMetadata.InternalHostname) } func simpleTraces() ptrace.Traces { diff --git a/exporter/datasetexporter/logs_exporter_stress_test.go b/exporter/datasetexporter/logs_exporter_stress_test.go index 5f1a37f693a5..4f8a66e07ac3 100644 --- a/exporter/datasetexporter/logs_exporter_stress_test.go +++ b/exporter/datasetexporter/logs_exporter_stress_test.go @@ -144,7 +144,7 @@ func TestConsumeLogsManyLogsShouldSucceed(t *testing.T) { assert.True(t, wasSuccessful.Load()) - assert.Equal(t, seenKeys, expectedKeys) + assert.Equal(t, expectedKeys, seenKeys) assert.Equal(t, expectedLogs, processedEvents.Load(), "processed items") assert.Equal(t, expectedLogs, uint64(len(seenKeys)), "unique items") } diff --git a/exporter/datasetexporter/logs_exporter_test.go b/exporter/datasetexporter/logs_exporter_test.go index f350f070eeae..2a75855b10a2 100644 --- a/exporter/datasetexporter/logs_exporter_test.go +++ b/exporter/datasetexporter/logs_exporter_test.go @@ -712,7 +712,7 @@ func TestBuildEventFromLogEventWithoutTimestampWithOutObservedTimestampUseCurren now = func() time.Time { return time.Unix(123456789, 0) } currentTime := now() assert.Equal(t, currentTime, time.Unix(123456789, 0)) - assert.Equal(t, strconv.FormatInt(currentTime.UnixNano(), 10), "123456789000000000") + assert.Equal(t, "123456789000000000", strconv.FormatInt(currentTime.UnixNano(), 10)) lr := testdata.GenerateLogsOneLogRecord() ld := lr.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md index 4b3af781306a..73b72bec82a2 100644 --- a/exporter/elasticsearchexporter/README.md +++ b/exporter/elasticsearchexporter/README.md @@ -135,7 +135,7 @@ This can be customised through the following settings: - `traces_dynamic_index` (optional): uses resource, scope, or span attributes to dynamically construct index name. - `enabled`(default=false): Enable/Disable dynamic index for trace spans. If `data_stream.dataset` or `data_stream.namespace` exist in attributes (precedence: span attribute > scope attribute > resource attribute), they will be used to dynamically construct index name in the form `traces-${data_stream.dataset}-${data_stream.namespace}`. Otherwise, if - `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > span attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${traces_index}${elasticsearch.index.suffix}`. Otherwise, the index name falls back to `traces-generic-default`, and `traces_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields. + `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > span attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${traces_index}${elasticsearch.index.suffix}`. Otherwise, the index name falls back to `traces-generic-default`, and `traces_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields. There is an exception for span events under OTel mapping mode (`mapping::mode: otel`), where span event attributes instead of span attributes are considered, and `data_stream.type` is always `logs` instead of `traces` such that documents are routed to `logs-${data_stream.dataset}-${data_stream.namespace}`. - `logstash_format` (optional): Logstash format compatibility. Logs, metrics and traces can be written into an index in Logstash format. - `enabled`(default=false): Enable/disable Logstash format compatibility. When `logstash_format.enabled` is `true`, the index name is composed using `(logs|metrics|traces)_index` or `(logs|metrics|traces)_dynamic_index` as prefix and the date as suffix, @@ -155,8 +155,10 @@ behaviours, which may be configured through the following settings: - `none`: Use original fields and event structure from the OTLP event. - `ecs`: Try to map fields to [Elastic Common Schema (ECS)][ECS] - `otel`: Elastic's preferred "OTel-native" mapping mode. Uses original fields and event structure from the OTLP event. - :warning: This mode's behavior is unstable, it is currently is experimental and undergoing changes. - There's a special treatment for the following attributes: `data_stream.type`, `data_stream.dataset`, `data_stream.namespace`. Instead of serializing these values under the `*attributes.*` namespace, they're put at the root of the document, to conform with the conventions of the data stream naming scheme that maps these as `constant_keyword` fields. + - :warning: This mode's behavior is unstable, it is currently is experimental and undergoing changes. + - There's a special treatment for the following attributes: `data_stream.type`, `data_stream.dataset`, `data_stream.namespace`. Instead of serializing these values under the `*attributes.*` namespace, they're put at the root of the document, to conform with the conventions of the data stream naming scheme that maps these as `constant_keyword` fields. + - `data_stream.dataset` will always be appended with `.otel`. It is recommended to use with `*_dynamic_index.enabled: true` to route documents to data stream `${data_stream.type}-${data_stream.dataset}-${data_stream.namespace}`. + - Span events are stored in separate documents. They will be routed with `data_stream.type` set to `logs` if `traces_dynamic_index::enabled` is `true`. - `raw`: Omit the `Attributes.` string prefixed to field names for log and span attributes as well as omit the `Events.` string prefixed to @@ -234,10 +236,9 @@ The metric types supported are: - Gauge - Sum - Histogram +- Exponential histogram - Summary -Exponential Histograms are ignored. - [confighttp]: https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp/README.md#http-configuration-settings [configtls]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md#tls-configuration-settings [configauth]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configauth/README.md#authentication-configuration diff --git a/exporter/elasticsearchexporter/data_stream_router.go b/exporter/elasticsearchexporter/data_stream_router.go index 028fd183aa2d..851bb92d9756 100644 --- a/exporter/elasticsearchexporter/data_stream_router.go +++ b/exporter/elasticsearchexporter/data_stream_router.go @@ -11,7 +11,7 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" ) -func routeWithDefaults(defaultDSType, defaultDSDataset, defaultDSNamespace string) func( +func routeWithDefaults(defaultDSType string) func( pcommon.Map, pcommon.Map, pcommon.Map, @@ -29,8 +29,8 @@ func routeWithDefaults(defaultDSType, defaultDSDataset, defaultDSNamespace strin // 1. read data_stream.* from attributes // 2. read elasticsearch.index.* from attributes // 3. use default hardcoded data_stream.* - dataset, datasetExists := getFromAttributes(dataStreamDataset, defaultDSDataset, recordAttr, scopeAttr, resourceAttr) - namespace, namespaceExists := getFromAttributes(dataStreamNamespace, defaultDSNamespace, recordAttr, scopeAttr, resourceAttr) + dataset, datasetExists := getFromAttributes(dataStreamDataset, defaultDataStreamDataset, recordAttr, scopeAttr, resourceAttr) + namespace, namespaceExists := getFromAttributes(dataStreamNamespace, defaultDataStreamNamespace, recordAttr, scopeAttr, resourceAttr) dataStreamMode := datasetExists || namespaceExists if !dataStreamMode { prefix, prefixExists := getFromAttributes(indexPrefix, "", resourceAttr, scopeAttr, recordAttr) @@ -62,7 +62,7 @@ func routeLogRecord( fIndex string, otel bool, ) string { - route := routeWithDefaults(defaultDataStreamTypeLogs, defaultDataStreamDataset, defaultDataStreamNamespace) + route := routeWithDefaults(defaultDataStreamTypeLogs) return route(record.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, otel) } @@ -75,7 +75,7 @@ func routeDataPoint( fIndex string, otel bool, ) string { - route := routeWithDefaults(defaultDataStreamTypeMetrics, defaultDataStreamDataset, defaultDataStreamNamespace) + route := routeWithDefaults(defaultDataStreamTypeMetrics) return route(dataPoint.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, otel) } @@ -88,6 +88,20 @@ func routeSpan( fIndex string, otel bool, ) string { - route := routeWithDefaults(defaultDataStreamTypeTraces, defaultDataStreamDataset, defaultDataStreamNamespace) + route := routeWithDefaults(defaultDataStreamTypeTraces) return route(span.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, otel) } + +// routeSpanEvent returns the name of the index to send the span event to according to data stream routing attributes. +// This function may mutate record attributes. +func routeSpanEvent( + spanEvent ptrace.SpanEvent, + scope pcommon.InstrumentationScope, + resource pcommon.Resource, + fIndex string, + otel bool, +) string { + // span events are sent to logs-*, not traces-* + route := routeWithDefaults(defaultDataStreamTypeLogs) + return route(spanEvent.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, otel) +} diff --git a/exporter/elasticsearchexporter/exporter.go b/exporter/elasticsearchexporter/exporter.go index 339c7c637623..2bf4c0250fa4 100644 --- a/exporter/elasticsearchexporter/exporter.go +++ b/exporter/elasticsearchexporter/exporter.go @@ -222,7 +222,6 @@ func (e *elasticsearchExporter) pushMetricsData( return nil } - // TODO: support exponential histogram switch metric.Type() { case pmetric.MetricTypeSum: dps := metric.Sum().DataPoints() @@ -252,6 +251,16 @@ func (e *elasticsearchExporter) pushMetricsData( continue } } + case pmetric.MetricTypeExponentialHistogram: + dps := metric.ExponentialHistogram().DataPoints() + for l := 0; l < dps.Len(); l++ { + dp := dps.At(l) + val := exponentialHistogramToValue(dp) + if err := upsertDataPoint(dp, val); err != nil { + errs = append(errs, err) + continue + } + } case pmetric.MetricTypeHistogram: dps := metric.Histogram().DataPoints() for l := 0; l < dps.Len(); l++ { @@ -361,6 +370,12 @@ func (e *elasticsearchExporter) pushTraceData( } errs = append(errs, err) } + for ii := 0; ii < span.Events().Len(); ii++ { + spanEvent := span.Events().At(ii) + if err := e.pushSpanEvent(ctx, resource, il.SchemaUrl(), span, spanEvent, scope, scopeSpan.SchemaUrl(), session); err != nil { + errs = append(errs, err) + } + } } } } @@ -402,3 +417,37 @@ func (e *elasticsearchExporter) pushTraceRecord( } return bulkIndexerSession.Add(ctx, fIndex, bytes.NewReader(document), nil) } + +func (e *elasticsearchExporter) pushSpanEvent( + ctx context.Context, + resource pcommon.Resource, + resourceSchemaURL string, + span ptrace.Span, + spanEvent ptrace.SpanEvent, + scope pcommon.InstrumentationScope, + scopeSchemaURL string, + bulkIndexerSession bulkIndexerSession, +) error { + fIndex := e.index + if e.dynamicIndex { + fIndex = routeSpanEvent(spanEvent, scope, resource, fIndex, e.otel) + } + + if e.logstashFormat.Enabled { + formattedIndex, err := generateIndexWithLogstashFormat(fIndex, &e.logstashFormat, time.Now()) + if err != nil { + return err + } + fIndex = formattedIndex + } + + document := e.model.encodeSpanEvent(resource, resourceSchemaURL, span, spanEvent, scope, scopeSchemaURL) + if document == nil { + return nil + } + docBytes, err := e.model.encodeDocument(*document) + if err != nil { + return err + } + return bulkIndexerSession.Add(ctx, fIndex, bytes.NewReader(docBytes), nil) +} diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go index 7c12cee62960..c53f97a872b4 100644 --- a/exporter/elasticsearchexporter/exporter_test.go +++ b/exporter/elasticsearchexporter/exporter_test.go @@ -639,35 +639,35 @@ func TestExporterMetrics(t *testing.T) { expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic-bar"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"bar","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"bar","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1.0}}`), }, { Action: []byte(`{"create":{"_index":"metrics-generic-resource.namespace"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1,"foo":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1.0,"foo":1.0}}`), }, { Action: []byte(`{"create":{"_index":"metrics-generic-resource.namespace"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"metric":{"bar":1,"foo":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"metric":{"bar":1.0,"foo":1}}`), }, { Action: []byte(`{"create":{"_index":"metrics-generic-resource.namespace"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"metric":{"baz":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"resource.namespace","type":"metrics"},"metric":{"baz":1.0}}`), }, { Action: []byte(`{"create":{"_index":"metrics-scope.b-bar"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"bar","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"bar","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1.0}}`), }, { Action: []byte(`{"create":{"_index":"metrics-scope.b-resource.namespace"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1,"foo":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"dp":{"attribute":"dp.attribute.value"},"metric":{"bar":1.0,"foo":1.0}}`), }, { Action: []byte(`{"create":{"_index":"metrics-scope.b-resource.namespace"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"metric":{"bar":1,"foo":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"metric":{"bar":1.0,"foo":1}}`), }, { Action: []byte(`{"create":{"_index":"metrics-scope.b-resource.namespace"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"metric":{"baz":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"scope.b","namespace":"resource.namespace","type":"metrics"},"metric":{"baz":1.0}}`), }, } @@ -707,11 +707,51 @@ func TestExporterMetrics(t *testing.T) { expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[1,2,3,4],"values":[0.5,1.5,2.5,3]}}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[1,2,3,4],"values":[0.5,1.5,2.5,3.0]}}}`), }, { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[4,5,6,7],"values":[2,4.5,5.5,6]}}}`), + Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[4,5,6,7],"values":[2.0,4.5,5.5,6.0]}}}`), + }, + } + + assertItemsEqual(t, expected, rec.Items(), false) + }) + + t.Run("publish exponential histogram", func(t *testing.T) { + rec := newBulkRecorder() + server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) { + rec.Record(docs) + return itemsAllOK(docs) + }) + + exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) { + cfg.Mapping.Mode = "ecs" + }) + + metrics := pmetric.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + scopeA := resourceMetrics.ScopeMetrics().AppendEmpty() + metricSlice := scopeA.Metrics() + fooMetric := metricSlice.AppendEmpty() + fooMetric.SetName("metric.foo") + fooDps := fooMetric.SetEmptyExponentialHistogram().DataPoints() + fooDp := fooDps.AppendEmpty() + fooDp.SetZeroCount(2) + fooDp.Positive().SetOffset(1) + fooDp.Positive().BucketCounts().FromRaw([]uint64{0, 1, 1, 0}) + + fooDp.Negative().SetOffset(1) + fooDp.Negative().BucketCounts().FromRaw([]uint64{1, 0, 0, 1}) + + mustSendMetrics(t, exporter, metrics) + + rec.WaitItems(1) + + expected := []itemRequest{ + { + Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[1,1,2,1,1],"values":[-24.0,-3.0,0.0,6.0,12.0]}}}`), }, } @@ -760,11 +800,11 @@ func TestExporterMetrics(t *testing.T) { expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"bar":1}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"bar":1.0}}`), }, { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[4,5,6,7],"values":[2,4.5,5.5,6]}}}`), + Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"counts":[4,5,6,7],"values":[2.0,4.5,5.5,6.0]}}}`), }, } @@ -821,11 +861,11 @@ func TestExporterMetrics(t *testing.T) { expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.metric.foo":"histogram"}}}`), - Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"metrics":{"metric.foo":{"counts":[1,2,3,4],"values":[0.5,1.5,2.5,3]}},"resource":{"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"metrics":{"metric.foo":{"counts":[1,2,3,4],"values":[0.5,1.5,2.5,3.0]}},"resource":{"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0}}`), }, { Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.metric.foo":"histogram"}}}`), - Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"metrics":{"metric.foo":{"counts":[4,5,6,7],"values":[2,4.5,5.5,6]}},"resource":{"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0}}`), + Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"metrics":{"metric.foo":{"counts":[4,5,6,7],"values":[2.0,4.5,5.5,6.0]}},"resource":{"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0}}`), }, { Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.metric.sum":"gauge_double"}}}`), @@ -877,7 +917,7 @@ func TestExporterMetrics(t *testing.T) { }, { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), - Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"sum":2,"value_count":3}}}`), + Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic","namespace":"default","type":"metrics"},"metric":{"foo":{"sum":2.0,"value_count":3}}}`), }, } @@ -1060,6 +1100,11 @@ func TestExporterTraces(t *testing.T) { span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(3600, 0))) span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.Unix(7200, 0))) + event := span.Events().AppendEmpty() + event.SetName("exception") + event.Attributes().PutStr("event.attr.foo", "event.attr.bar") + event.SetDroppedAttributesCount(1) + scopeAttr := span.Attributes() fillResourceAttributeMap(scopeAttr, map[string]string{ "attr.foo": "attr.bar", @@ -1082,13 +1127,17 @@ func TestExporterTraces(t *testing.T) { mustSendTraces(t, exporter, traces) - rec.WaitItems(1) + rec.WaitItems(2) expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"traces-generic.otel-default"}}`), Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","attributes":{"attr.foo":"attr.bar"},"data_stream":{"dataset":"generic.otel","namespace":"default","type":"traces"},"dropped_attributes_count":2,"dropped_events_count":3,"dropped_links_count":4,"duration":3600000000000,"kind":"Unspecified","links":[{"attributes":{"link.attr.foo":"link.attr.bar"},"dropped_attributes_count":11,"span_id":"","trace_id":"","trace_state":"bar"}],"name":"name","resource":{"attributes":{"resource.foo":"resource.bar"},"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0},"status":{"code":"Unset"},"trace_state":"foo"}`), }, + { + Action: []byte(`{"create":{"_index":"logs-generic.otel-default"}}`), + Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","attributes":{"event.attr.foo":"event.attr.bar","event.name":"exception"},"data_stream":{"dataset":"generic.otel","namespace":"default","type":"logs"},"dropped_attributes_count":1,"resource":{"attributes":{"resource.foo":"resource.bar"},"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0}}`), + }, } assertItemsEqual(t, expected, rec.Items(), false) diff --git a/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go b/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go new file mode 100644 index 000000000000..255328f38f1d --- /dev/null +++ b/exporter/elasticsearchexporter/internal/exphistogram/exphistogram.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package exphistogram contains utility functions for exponential histogram conversions. +package exphistogram // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/exphistogram" + +import ( + "math" + + "go.opentelemetry.io/collector/pdata/pmetric" +) + +// LowerBoundary calculates the lower boundary given index and scale. +// Adopted from https://opentelemetry.io/docs/specs/otel/metrics/data-model/#producer-expectations +func LowerBoundary(index, scale int) float64 { + if scale <= 0 { + return LowerBoundaryNegativeScale(index, scale) + } + // Use this form in case the equation above computes +Inf + // as the lower boundary of a valid bucket. + inverseFactor := math.Ldexp(math.Ln2, -scale) + return 2.0 * math.Exp(float64(index-(1<= 0; i-- { + count := bucketCounts.At(i) + if count == 0 { + continue + } + lb := -LowerBoundary(offset+i+1, scale) + ub := -LowerBoundary(offset+i, scale) + counts = append(counts, int64(count)) + values = append(values, lb+(ub-lb)/2) + } + + if zeroCount := dp.ZeroCount(); zeroCount != 0 { + counts = append(counts, int64(zeroCount)) + values = append(values, 0) + } + + offset = int(dp.Positive().Offset()) + bucketCounts = dp.Positive().BucketCounts() + for i := 0; i < bucketCounts.Len(); i++ { + count := bucketCounts.At(i) + if count == 0 { + continue + } + lb := LowerBoundary(offset+i, scale) + ub := LowerBoundary(offset+i+1, scale) + counts = append(counts, int64(count)) + values = append(values, lb+(ub-lb)/2) + } + return +} diff --git a/exporter/elasticsearchexporter/internal/exphistogram/exphistogram_test.go b/exporter/elasticsearchexporter/internal/exphistogram/exphistogram_test.go new file mode 100644 index 000000000000..654b765eab1a --- /dev/null +++ b/exporter/elasticsearchexporter/internal/exphistogram/exphistogram_test.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exphistogram + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestToTDigest(t *testing.T) { + for _, tc := range []struct { + name string + scale int32 + zeroCount uint64 + positiveOffset int32 + positiveBuckets []uint64 + negativeOffset int32 + negativeBuckets []uint64 + + expectedCounts []int64 + expectedValues []float64 + }{ + { + name: "empty", + scale: 0, + expectedCounts: nil, + expectedValues: nil, + }, + { + name: "empty, scale=1", + scale: 1, + expectedCounts: nil, + expectedValues: nil, + }, + { + name: "empty, scale=-1", + scale: -1, + expectedCounts: nil, + expectedValues: nil, + }, + { + name: "zeros", + scale: 0, + zeroCount: 1, + expectedCounts: []int64{1}, + expectedValues: []float64{0}, + }, + { + name: "scale=0", + scale: 0, + zeroCount: 1, + positiveBuckets: []uint64{1, 1}, + negativeBuckets: []uint64{1, 1}, + expectedCounts: []int64{1, 1, 1, 1, 1}, + expectedValues: []float64{-3, -1.5, 0, 1.5, 3}, + }, + { + name: "scale=0, no zeros", + scale: 0, + zeroCount: 0, + positiveBuckets: []uint64{1, 1}, + negativeBuckets: []uint64{1, 1}, + expectedCounts: []int64{1, 1, 1, 1}, + expectedValues: []float64{-3, -1.5, 1.5, 3}, + }, + { + name: "scale=0, offset=1", + scale: 0, + zeroCount: 1, + positiveOffset: 1, + positiveBuckets: []uint64{1, 1}, + negativeOffset: 1, + negativeBuckets: []uint64{1, 1}, + expectedCounts: []int64{1, 1, 1, 1, 1}, + expectedValues: []float64{-6, -3, 0, 3, 6}, + }, + { + name: "scale=0, offset=-1", + scale: 0, + zeroCount: 1, + positiveOffset: -1, + positiveBuckets: []uint64{1, 1}, + negativeOffset: -1, + negativeBuckets: []uint64{1, 1}, + expectedCounts: []int64{1, 1, 1, 1, 1}, + expectedValues: []float64{-1.5, -0.75, 0, 0.75, 1.5}, + }, + { + name: "scale=0, different offsets", + scale: 0, + zeroCount: 1, + positiveOffset: -1, + positiveBuckets: []uint64{1, 1}, + negativeOffset: 1, + negativeBuckets: []uint64{1, 1}, + expectedCounts: []int64{1, 1, 1, 1, 1}, + expectedValues: []float64{-6, -3, 0, 0.75, 1.5}, + }, + { + name: "scale=-1", + scale: -1, + zeroCount: 1, + positiveBuckets: []uint64{1, 1}, + negativeBuckets: []uint64{1, 1}, + expectedCounts: []int64{1, 1, 1, 1, 1}, + expectedValues: []float64{-10, -2.5, 0, 2.5, 10}, + }, + { + name: "scale=1", + scale: 1, + zeroCount: 1, + positiveBuckets: []uint64{1, 1}, + negativeBuckets: []uint64{1, 1}, + expectedCounts: []int64{1, 1, 1, 1, 1}, + expectedValues: []float64{-1.7071067811865475, -1.2071067811865475, 0, 1.2071067811865475, 1.7071067811865475}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + dp := pmetric.NewExponentialHistogramDataPoint() + dp.SetScale(tc.scale) + dp.SetZeroCount(tc.zeroCount) + dp.Positive().SetOffset(tc.positiveOffset) + dp.Positive().BucketCounts().FromRaw(tc.positiveBuckets) + dp.Negative().SetOffset(tc.negativeOffset) + dp.Negative().BucketCounts().FromRaw(tc.negativeBuckets) + + counts, values := ToTDigest(dp) + assert.Equal(t, tc.expectedCounts, counts) + assert.Equal(t, tc.expectedValues, values) + }) + } +} diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go index 0907f2fe2223..f20f9b1d213b 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go @@ -255,11 +255,19 @@ func (doc *Document) Dedup() { } } +func newJSONVisitor(w io.Writer) *json.Visitor { + v := json.NewVisitor(w) + // Enable ExplicitRadixPoint such that 1.0 is encoded as 1.0 instead of 1. + // This is required to generate the correct dynamic mapping in ES. + v.SetExplicitRadixPoint(true) + return v +} + // Serialize writes the document to the given writer. The serializer will create nested objects if dedot is true. // // NOTE: The documented MUST be sorted if dedot is true. func (doc *Document) Serialize(w io.Writer, dedot bool, otel bool) error { - v := json.NewVisitor(w) + v := newJSONVisitor(w) return doc.iterJSON(v, dedot, otel) } diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go index b0f791700567..1961f716db05 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/elastic/go-structform/json" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" @@ -358,14 +357,15 @@ func TestValue_Serialize(t *testing.T) { value Value want string }{ - "nil value": {value: nilValue, want: "null"}, - "bool value: true": {value: BoolValue(true), want: "true"}, - "bool value: false": {value: BoolValue(false), want: "false"}, - "int value": {value: IntValue(42), want: "42"}, - "double value": {value: DoubleValue(3.14), want: "3.14"}, - "NaN is undefined": {value: DoubleValue(math.NaN()), want: "null"}, - "Inf is undefined": {value: DoubleValue(math.Inf(0)), want: "null"}, - "string value": {value: StringValue("Hello World!"), want: `"Hello World!"`}, + "nil value": {value: nilValue, want: "null"}, + "bool value: true": {value: BoolValue(true), want: "true"}, + "bool value: false": {value: BoolValue(false), want: "false"}, + "int value": {value: IntValue(42), want: "42"}, + "double value: 3.14": {value: DoubleValue(3.14), want: "3.14"}, + "double value: 1.0": {value: DoubleValue(1.0), want: "1.0"}, + "NaN is undefined": {value: DoubleValue(math.NaN()), want: "null"}, + "Inf is undefined": {value: DoubleValue(math.Inf(0)), want: "null"}, + "string value": {value: StringValue("Hello World!"), want: `"Hello World!"`}, "timestamp": { value: TimestampValue(dijkstra), want: `"1930-05-11T16:33:11.123456789Z"`, @@ -391,7 +391,7 @@ func TestValue_Serialize(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { var buf strings.Builder - err := test.value.iterJSON(json.NewVisitor(&buf), false, false) + err := test.value.iterJSON(newJSONVisitor(&buf), false, false) require.NoError(t, err) assert.Equal(t, test.want, buf.String()) }) diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index bdf030bfc282..55af4eb45db9 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -20,6 +20,7 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" semconv "go.opentelemetry.io/collector/semconv/v1.22.0" + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/exphistogram" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/objmodel" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil" ) @@ -66,6 +67,7 @@ var resourceAttrsToPreserve = map[string]bool{ type mappingModel interface { encodeLog(pcommon.Resource, string, plog.LogRecord, pcommon.InstrumentationScope, string) ([]byte, error) encodeSpan(pcommon.Resource, string, ptrace.Span, pcommon.InstrumentationScope, string) ([]byte, error) + encodeSpanEvent(resource pcommon.Resource, resourceSchemaURL string, span ptrace.Span, spanEvent ptrace.SpanEvent, scope pcommon.InstrumentationScope, scopeSchemaURL string) *objmodel.Document upsertMetricDataPointValue(map[uint32]objmodel.Document, pcommon.Resource, string, pcommon.InstrumentationScope, string, pmetric.Metric, dataPoint, pcommon.Value) error encodeDocument(objmodel.Document) ([]byte, error) } @@ -353,6 +355,25 @@ func summaryToValue(dp pmetric.SummaryDataPoint) pcommon.Value { return vm } +func exponentialHistogramToValue(dp pmetric.ExponentialHistogramDataPoint) pcommon.Value { + counts, values := exphistogram.ToTDigest(dp) + + vm := pcommon.NewValueMap() + m := vm.Map() + vmCounts := m.PutEmptySlice("counts") + vmCounts.EnsureCapacity(len(counts)) + for _, c := range counts { + vmCounts.AppendEmpty().SetInt(c) + } + vmValues := m.PutEmptySlice("values") + vmValues.EnsureCapacity(len(values)) + for _, v := range values { + vmValues.AppendEmpty().SetDouble(v) + } + + return vm +} + func histogramToValue(dp pmetric.HistogramDataPoint) (pcommon.Value, error) { // Histogram conversion function is from // https://github.com/elastic/apm-data/blob/3b28495c3cbdc0902983134276eb114231730249/input/otlp/metrics.go#L277 @@ -463,7 +484,9 @@ func (m *encodeModel) encodeScopeOTelMode(document *objmodel.Document, scope pco } func (m *encodeModel) encodeAttributesOTelMode(document *objmodel.Document, attributeMap pcommon.Map) { - attributeMap.RemoveIf(func(key string, val pcommon.Value) bool { + attrsCopy := pcommon.NewMap() // Copy to avoid mutating original map + attributeMap.CopyTo(attrsCopy) + attrsCopy.RemoveIf(func(key string, val pcommon.Value) bool { switch key { case dataStreamType, dataStreamDataset, dataStreamNamespace: // At this point the data_stream attributes are expected to be in the record attributes, @@ -474,7 +497,7 @@ func (m *encodeModel) encodeAttributesOTelMode(document *objmodel.Document, attr } return false }) - document.AddAttributes("attributes", attributeMap) + document.AddAttributes("attributes", attrsCopy) } func (m *encodeModel) encodeSpan(resource pcommon.Resource, resourceSchemaURL string, span ptrace.Span, scope pcommon.InstrumentationScope, scopeSchemaURL string) ([]byte, error) { @@ -529,8 +552,6 @@ func (m *encodeModel) encodeSpanOTelMode(resource pcommon.Resource, resourceSche m.encodeResourceOTelMode(&document, resource, resourceSchemaURL) m.encodeScopeOTelMode(&document, scope, scopeSchemaURL) - // TODO: add span events to log data streams - return document } @@ -554,6 +575,26 @@ func (m *encodeModel) encodeSpanDefaultMode(resource pcommon.Resource, span ptra return document } +func (m *encodeModel) encodeSpanEvent(resource pcommon.Resource, resourceSchemaURL string, span ptrace.Span, spanEvent ptrace.SpanEvent, scope pcommon.InstrumentationScope, scopeSchemaURL string) *objmodel.Document { + if m.mode != MappingOTel { + // Currently span events are stored separately only in OTel mapping mode. + // In other modes, they are stored within the span document. + return nil + } + var document objmodel.Document + document.AddTimestamp("@timestamp", spanEvent.Timestamp()) + document.AddString("attributes.event.name", spanEvent.Name()) + document.AddSpanID("span_id", span.SpanID()) + document.AddTraceID("trace_id", span.TraceID()) + document.AddInt("dropped_attributes_count", int64(spanEvent.DroppedAttributesCount())) + + m.encodeAttributesOTelMode(&document, spanEvent.Attributes()) + m.encodeResourceOTelMode(&document, resource, resourceSchemaURL) + m.encodeScopeOTelMode(&document, scope, scopeSchemaURL) + + return &document +} + func (m *encodeModel) encodeAttributes(document *objmodel.Document, attributes pcommon.Map) { key := "Attributes" if m.mode == MappingRaw { diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go index eb9aa1b5453c..0433ee561845 100644 --- a/exporter/elasticsearchexporter/model_test.go +++ b/exporter/elasticsearchexporter/model_test.go @@ -32,18 +32,18 @@ var expectedSpanBody = `{"@timestamp":"2023-04-19T03:04:05.000000006Z","Attribut var expectedLogBody = `{"@timestamp":"2023-04-19T03:04:05.000000006Z","Attributes.log-attr1":"value1","Body":"log-body","Resource.key1":"value1","Scope.name":"","Scope.version":"","SeverityNumber":0,"TraceFlags":0}` var expectedMetricsEncoded = `{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"idle","system":{"cpu":{"time":440.23}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"interrupt","system":{"cpu":{"time":0}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"interrupt","system":{"cpu":{"time":0.0}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"nice","system":{"cpu":{"time":0.14}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"softirq","system":{"cpu":{"time":0.77}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"steal","system":{"cpu":{"time":0}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"steal","system":{"cpu":{"time":0.0}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"system","system":{"cpu":{"time":24.8}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"user","system":{"cpu":{"time":64.78}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu0","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"wait","system":{"cpu":{"time":1.65}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"idle","system":{"cpu":{"time":475.69}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"interrupt","system":{"cpu":{"time":0}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"interrupt","system":{"cpu":{"time":0.0}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"nice","system":{"cpu":{"time":0.1}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"softirq","system":{"cpu":{"time":0.57}}} -{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"steal","system":{"cpu":{"time":0}}} +{"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"steal","system":{"cpu":{"time":0.0}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"system","system":{"cpu":{"time":15.88}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"user","system":{"cpu":{"time":50.09}}} {"@timestamp":"2024-06-12T10:20:16.419290690Z","cpu":"cpu1","host":{"hostname":"my-host","name":"my-host","os":{"platform":"linux"}},"state":"wait","system":{"cpu":{"time":0.95}}}` diff --git a/exporter/elasticsearchexporter/utils_test.go b/exporter/elasticsearchexporter/utils_test.go index ca73aaddf844..09403a24271b 100644 --- a/exporter/elasticsearchexporter/utils_test.go +++ b/exporter/elasticsearchexporter/utils_test.go @@ -300,21 +300,21 @@ func TestGetSuffixTime(t *testing.T) { testTime := time.Date(2023, 12, 2, 10, 10, 10, 1, time.UTC) index, err := generateIndexWithLogstashFormat(defaultCfg.LogsIndex, &defaultCfg.LogstashFormat, testTime) assert.NoError(t, err) - assert.Equal(t, index, "logs-generic-default-2023.12.02") + assert.Equal(t, "logs-generic-default-2023.12.02", index) defaultCfg.LogsIndex = "logstash" defaultCfg.LogstashFormat.PrefixSeparator = "." otelLogsIndex, err := generateIndexWithLogstashFormat(defaultCfg.LogsIndex, &defaultCfg.LogstashFormat, testTime) assert.NoError(t, err) - assert.Equal(t, otelLogsIndex, "logstash.2023.12.02") + assert.Equal(t, "logstash.2023.12.02", otelLogsIndex) defaultCfg.LogstashFormat.DateFormat = "%Y-%m-%d" newOtelLogsIndex, err := generateIndexWithLogstashFormat(defaultCfg.LogsIndex, &defaultCfg.LogstashFormat, testTime) assert.NoError(t, err) - assert.Equal(t, newOtelLogsIndex, "logstash.2023-12-02") + assert.Equal(t, "logstash.2023-12-02", newOtelLogsIndex) defaultCfg.LogstashFormat.DateFormat = "%d/%m/%Y" newOtelLogsIndexWithSpecDataFormat, err := generateIndexWithLogstashFormat(defaultCfg.LogsIndex, &defaultCfg.LogstashFormat, testTime) assert.NoError(t, err) - assert.Equal(t, newOtelLogsIndexWithSpecDataFormat, "logstash.02/12/2023") + assert.Equal(t, "logstash.02/12/2023", newOtelLogsIndexWithSpecDataFormat) } diff --git a/exporter/googlecloudpubsubexporter/go.mod b/exporter/googlecloudpubsubexporter/go.mod index 69cdb0a27019..3c305d6c2a85 100644 --- a/exporter/googlecloudpubsubexporter/go.mod +++ b/exporter/googlecloudpubsubexporter/go.mod @@ -13,7 +13,7 @@ require ( go.opentelemetry.io/collector/exporter v0.108.2-0.20240904075637-48b11ba1c5f8 go.opentelemetry.io/collector/pdata v1.14.2-0.20240904075637-48b11ba1c5f8 go.uber.org/zap v1.27.0 - google.golang.org/api v0.194.0 + google.golang.org/api v0.195.0 google.golang.org/grpc v1.66.0 ) @@ -22,7 +22,7 @@ require ( cloud.google.com/go/auth v0.9.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect + cloud.google.com/go/iam v1.1.13 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -80,9 +80,9 @@ require ( golang.org/x/sys v0.24.0 // indirect golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporter/googlecloudpubsubexporter/go.sum b/exporter/googlecloudpubsubexporter/go.sum index a93ff6872bfa..56242959cb0f 100644 --- a/exporter/googlecloudpubsubexporter/go.sum +++ b/exporter/googlecloudpubsubexporter/go.sum @@ -7,8 +7,8 @@ cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= +cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -244,19 +244,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.194.0 h1:dztZKG9HgtIpbI35FhfuSNR/zmaMVdxNlntHj1sIS4s= -google.golang.org/api v0.194.0/go.mod h1:AgvUFdojGANh3vI+P7EVnxj3AISHllxGCJSFmggmnd0= +google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= +google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22duwci1+TG7bg2/L1LQsXwfjPlmuJA0= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/exporter/googlemanagedprometheusexporter/config_test.go b/exporter/googlemanagedprometheusexporter/config_test.go index 5f9c118eed5c..70c1a405961d 100644 --- a/exporter/googlemanagedprometheusexporter/config_test.go +++ b/exporter/googlemanagedprometheusexporter/config_test.go @@ -38,52 +38,51 @@ func TestLoadConfig(t *testing.T) { assert.Equal(t, r0, factory.CreateDefaultConfig().(*Config)) r1 := cfg.Exporters[component.NewIDWithName(metadata.Type, "customname")].(*Config) - assert.Equal(t, r1, - &Config{ - TimeoutSettings: exporterhelper.TimeoutSettings{ - Timeout: 20 * time.Second, - }, - GMPConfig: GMPConfig{ - ProjectID: "my-project", - UserAgent: "opentelemetry-collector-contrib {{version}}", - MetricConfig: MetricConfig{ - Config: googlemanagedprometheus.Config{ - AddMetricSuffixes: false, - ExtraMetricsConfig: googlemanagedprometheus.ExtraMetricsConfig{ - EnableTargetInfo: false, - EnableScopeInfo: false, - }, + assert.Equal(t, &Config{ + TimeoutSettings: exporterhelper.TimeoutSettings{ + Timeout: 20 * time.Second, + }, + GMPConfig: GMPConfig{ + ProjectID: "my-project", + UserAgent: "opentelemetry-collector-contrib {{version}}", + MetricConfig: MetricConfig{ + Config: googlemanagedprometheus.Config{ + AddMetricSuffixes: false, + ExtraMetricsConfig: googlemanagedprometheus.ExtraMetricsConfig{ + EnableTargetInfo: false, + EnableScopeInfo: false, + }, + }, + Prefix: "my-metric-domain.com", + ResourceFilters: []collector.ResourceFilter{ + { + Prefix: "cloud", + }, + { + Prefix: "k8s", + }, + { + Prefix: "faas", }, - Prefix: "my-metric-domain.com", - ResourceFilters: []collector.ResourceFilter{ - { - Prefix: "cloud", - }, - { - Prefix: "k8s", - }, - { - Prefix: "faas", - }, - { - Regex: "container.id", - }, - { - Regex: "process.pid", - }, - { - Regex: "host.name", - }, - { - Regex: "host.id", - }, + { + Regex: "container.id", + }, + { + Regex: "process.pid", + }, + { + Regex: "host.name", + }, + { + Regex: "host.id", }, }, }, - QueueSettings: exporterhelper.QueueSettings{ - Enabled: true, - NumConsumers: 2, - QueueSize: 10, - }, - }) + }, + QueueSettings: exporterhelper.QueueSettings{ + Enabled: true, + NumConsumers: 2, + QueueSize: 10, + }, + }, r1) } diff --git a/exporter/kafkaexporter/config_test.go b/exporter/kafkaexporter/config_test.go index b3542d236438..da2cebf8e808 100644 --- a/exporter/kafkaexporter/config_test.go +++ b/exporter/kafkaexporter/config_test.go @@ -335,8 +335,8 @@ func Test_saramaProducerCompressionCodec(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { c, err := saramaProducerCompressionCodec(test.compression) - assert.Equal(t, c, test.expectedCompression) - assert.Equal(t, err, test.expectedError) + assert.Equal(t, test.expectedCompression, c) + assert.Equal(t, test.expectedError, err) }) } } diff --git a/exporter/kafkaexporter/factory_test.go b/exporter/kafkaexporter/factory_test.go index cc0df18074e5..55dce9c8505d 100644 --- a/exporter/kafkaexporter/factory_test.go +++ b/exporter/kafkaexporter/factory_test.go @@ -39,7 +39,7 @@ func TestCreateMetricExporter(t *testing.T) { name string conf *Config marshalers []MetricsMarshaler - err error + err *net.DNSError }{ { name: "valid config (no validating broker)", @@ -104,7 +104,7 @@ func TestCreateLogExporter(t *testing.T) { name string conf *Config marshalers []LogsMarshaler - err error + err *net.DNSError }{ { name: "valid config (no validating broker)", @@ -169,7 +169,7 @@ func TestCreateTraceExporter(t *testing.T) { name string conf *Config marshalers []TracesMarshaler - err error + err *net.DNSError }{ { name: "valid config (no validating brokers)", diff --git a/exporter/loadbalancingexporter/resolver_k8s_test.go b/exporter/loadbalancingexporter/resolver_k8s_test.go index 3225f11fe535..b382d5624d37 100644 --- a/exporter/loadbalancingexporter/resolver_k8s_test.go +++ b/exporter/loadbalancingexporter/resolver_k8s_test.go @@ -245,7 +245,7 @@ func Test_newK8sResolver(t *testing.T) { _, tb := getTelemetryAssets(t) got, err := newK8sResolver(fake.NewSimpleClientset(), tt.args.logger, tt.args.service, tt.args.ports, defaultListWatchTimeout, tb) if tt.wantErr != nil { - require.Error(t, err, tt.wantErr) + require.ErrorIs(t, err, tt.wantErr) } else { require.NoError(t, err) require.Equal(t, tt.wantNil, got == nil) diff --git a/exporter/loadbalancingexporter/trace_exporter_test.go b/exporter/loadbalancingexporter/trace_exporter_test.go index 49a9cff2048d..370b0a5ddb72 100644 --- a/exporter/loadbalancingexporter/trace_exporter_test.go +++ b/exporter/loadbalancingexporter/trace_exporter_test.go @@ -131,7 +131,7 @@ func TestConsumeTraces(t *testing.T) { p, err := newTracesExporter(ts, simpleConfig()) require.NotNil(t, p) require.NoError(t, err) - assert.Equal(t, p.routingKey, traceIDRouting) + assert.Equal(t, traceIDRouting, p.routingKey) // pre-load an exporter here, so that we don't use the actual OTLP exporter lb.addMissingExporters(context.Background(), []string{"endpoint-1"}) @@ -179,7 +179,7 @@ func TestConsumeTraces_ConcurrentResolverChange(t *testing.T) { p, err := newTracesExporter(ts, simpleConfig()) require.NotNil(t, p) require.NoError(t, err) - assert.Equal(t, p.routingKey, traceIDRouting) + assert.Equal(t, traceIDRouting, p.routingKey) endpoints := []string{"endpoint-1"} lb.res = &mockResolver{ @@ -222,7 +222,7 @@ func TestConsumeTracesServiceBased(t *testing.T) { p, err := newTracesExporter(ts, serviceBasedRoutingConfig()) require.NotNil(t, p) require.NoError(t, err) - assert.Equal(t, p.routingKey, svcRouting) + assert.Equal(t, svcRouting, p.routingKey) // pre-load an exporter here, so that we don't use the actual OTLP exporter lb.addMissingExporters(context.Background(), []string{"endpoint-1"}) @@ -407,7 +407,7 @@ func TestBatchWithTwoTraces(t *testing.T) { // verify assert.NoError(t, err) assert.Len(t, sink.AllTraces(), 1) - assert.Equal(t, sink.AllTraces()[0].SpanCount(), 2) + assert.Equal(t, 2, sink.AllTraces()[0].SpanCount()) } func TestNoTracesInBatch(t *testing.T) { diff --git a/exporter/logzioexporter/factory_test.go b/exporter/logzioexporter/factory_test.go index bbf1b0d1ca44..bdfd78833d2a 100644 --- a/exporter/logzioexporter/factory_test.go +++ b/exporter/logzioexporter/factory_test.go @@ -92,6 +92,6 @@ func TestGetListenerURL(t *testing.T) { } for _, test := range getListenerURLTests { output := getListenerURL(test.arg1) - require.Equal(t, output, test.expected) + require.Equal(t, test.expected, output) } } diff --git a/exporter/logzioexporter/jsonlog_test.go b/exporter/logzioexporter/jsonlog_test.go index 8241553dcfd3..a3643a5dc81a 100644 --- a/exporter/logzioexporter/jsonlog_test.go +++ b/exporter/logzioexporter/jsonlog_test.go @@ -71,7 +71,7 @@ func TestConvertLogRecordToJSON(t *testing.T) { } for _, test := range convertLogRecordToJSONTests { output := convertLogRecordToJSON(test.log, test.log.Attributes()) - require.Equal(t, output, test.expected) + require.Equal(t, test.expected, output) } } diff --git a/exporter/logzioexporter/logger_test.go b/exporter/logzioexporter/logger_test.go index 8821f7f76c0b..3c74dbf60723 100644 --- a/exporter/logzioexporter/logger_test.go +++ b/exporter/logzioexporter/logger_test.go @@ -17,7 +17,7 @@ func TestLoggerConfigs(tester *testing.T) { name: loggerName, } - assert.Equal(tester, exporterLogger.Name(), loggerName) + assert.Equal(tester, loggerName, exporterLogger.Name()) assert.NotNil(tester, exporterLogger.Named("logger")) assert.NotNil(tester, exporterLogger.With("key", "val")) assert.NotNil(tester, exporterLogger.ResetNamed(loggerName)) diff --git a/exporter/mezmoexporter/exporter_test.go b/exporter/mezmoexporter/exporter_test.go index 5d90592f471b..a6299ec0f689 100644 --- a/exporter/mezmoexporter/exporter_test.go +++ b/exporter/mezmoexporter/exporter_test.go @@ -213,9 +213,9 @@ func TestAddsRequiredAttributes(t *testing.T) { lines := body.Lines for _, line := range lines { assert.Greater(t, line.Timestamp, int64(0)) - assert.Equal(t, line.Level, "info") - assert.Equal(t, line.App, "") - assert.Equal(t, line.Line, "minimal attribute log") + assert.Equal(t, "info", line.Level) + assert.Equal(t, "", line.App) + assert.Equal(t, "minimal attribute log", line.Line) } return http.StatusOK, "" @@ -256,17 +256,17 @@ func Test404IngestError(t *testing.T) { err := exporter.pushLogData(context.Background(), logs) require.NoError(t, err) - assert.Equal(t, logObserver.Len(), 2) + assert.Equal(t, 2, logObserver.Len()) logLine := logObserver.All()[0] - assert.Equal(t, logLine.Message, "got http status (/foobar): 404 Not Found") - assert.Equal(t, logLine.Level, zapcore.ErrorLevel) + assert.Equal(t, "got http status (/foobar): 404 Not Found", logLine.Message) + assert.Equal(t, zapcore.ErrorLevel, logLine.Level) logLine = logObserver.All()[1] - assert.Equal(t, logLine.Message, "http response") - assert.Equal(t, logLine.Level, zapcore.DebugLevel) + assert.Equal(t, "http response", logLine.Message) + assert.Equal(t, zapcore.DebugLevel, logLine.Level) responseField := logLine.Context[0] - assert.Equal(t, responseField.Key, "response") - assert.Equal(t, responseField.String, `{"foo":"bar"}`) + assert.Equal(t, "response", responseField.Key) + assert.Equal(t, `{"foo":"bar"}`, responseField.String) } diff --git a/exporter/mezmoexporter/factory_test.go b/exporter/mezmoexporter/factory_test.go index b97a978e4dec..2d86751b4831 100644 --- a/exporter/mezmoexporter/factory_test.go +++ b/exporter/mezmoexporter/factory_test.go @@ -28,7 +28,7 @@ func TestCreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{ + assert.Equal(t, &Config{ IngestURL: defaultIngestURL, IngestKey: "", @@ -37,7 +37,7 @@ func TestCreateDefaultConfig(t *testing.T) { }, BackOffConfig: configretry.NewDefaultBackOffConfig(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), - }) + }, cfg) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/exporter/mezmoexporter/utils_test.go b/exporter/mezmoexporter/utils_test.go index c8441c2e43f8..c8d6b860334f 100644 --- a/exporter/mezmoexporter/utils_test.go +++ b/exporter/mezmoexporter/utils_test.go @@ -20,21 +20,21 @@ func TestTruncateString(t *testing.T) { t.Run("Test shorter string", func(t *testing.T) { s := truncateString("short", 10) require.Len(t, s, 5) - require.Equal(t, s, "short") + require.Equal(t, "short", s) }) // Test string is equal to the maximum length t.Run("Test equal string", func(t *testing.T) { s := truncateString("short", 5) require.Len(t, s, 5) - require.Equal(t, s, "short") + require.Equal(t, "short", s) }) // Test string is longer than the maximum length t.Run("Test longer string", func(t *testing.T) { s := truncateString("longstring", 4) require.Len(t, s, 4) - require.Equal(t, s, "long") + require.Equal(t, "long", s) }) } diff --git a/exporter/opencensusexporter/go.mod b/exporter/opencensusexporter/go.mod index 3c70a4651dba..b33b0cf0dbae 100644 --- a/exporter/opencensusexporter/go.mod +++ b/exporter/opencensusexporter/go.mod @@ -58,7 +58,7 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.57.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/rs/cors v1.11.0 // indirect + github.com/rs/cors v1.11.1 // indirect github.com/soheilhy/cmux v0.1.5 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector v0.108.2-0.20240904075637-48b11ba1c5f8 // indirect diff --git a/exporter/opencensusexporter/go.sum b/exporter/opencensusexporter/go.sum index 3418461910ad..2bfab3cc711c 100644 --- a/exporter/opencensusexporter/go.sum +++ b/exporter/opencensusexporter/go.sum @@ -107,8 +107,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/exporter/otelarrowexporter/factory_test.go b/exporter/otelarrowexporter/factory_test.go index 9d0e212090fc..66ece922ef06 100644 --- a/exporter/otelarrowexporter/factory_test.go +++ b/exporter/otelarrowexporter/factory_test.go @@ -36,15 +36,15 @@ func TestCreateDefaultConfig(t *testing.T) { assert.Equal(t, ocfg.RetryConfig, configretry.NewDefaultBackOffConfig()) assert.Equal(t, ocfg.QueueSettings, exporterhelper.NewDefaultQueueSettings()) assert.Equal(t, ocfg.TimeoutSettings, exporterhelper.NewDefaultTimeoutSettings()) - assert.Equal(t, ocfg.Compression, configcompression.TypeZstd) - assert.Equal(t, ocfg.Arrow, ArrowConfig{ + assert.Equal(t, configcompression.TypeZstd, ocfg.Compression) + assert.Equal(t, ArrowConfig{ Disabled: false, NumStreams: runtime.NumCPU(), MaxStreamLifetime: time.Hour, PayloadCompression: "", Zstd: zstd.DefaultEncoderConfig(), Prioritizer: arrow.DefaultPrioritizer, - }) + }, ocfg.Arrow) } func TestCreateMetricsExporter(t *testing.T) { @@ -207,7 +207,7 @@ func TestCreateTracesExporter(t *testing.T) { if err != nil { // Since the endpoint of OTLP exporter doesn't actually exist, // exporter may already stop because it cannot connect. - assert.Equal(t, err.Error(), "rpc error: code = Canceled desc = grpc: the client connection is closing") + assert.Equal(t, "rpc error: code = Canceled desc = grpc: the client connection is closing", err.Error()) } }) } diff --git a/exporter/otelarrowexporter/go.mod b/exporter/otelarrowexporter/go.mod index b342b39f77af..3fbcf9990075 100644 --- a/exporter/otelarrowexporter/go.mod +++ b/exporter/otelarrowexporter/go.mod @@ -4,6 +4,7 @@ go 1.22.0 require ( github.com/apache/arrow/go/v16 v16.1.0 + github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil v0.108.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow v0.108.0 github.com/open-telemetry/otel-arrow v0.25.0 github.com/stretchr/testify v1.9.0 @@ -105,3 +106,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/otela replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otelarrowreceiver => ../../receiver/otelarrowreceiver replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent => ../../internal/sharedcomponent + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil => ../../internal/grpcutil diff --git a/exporter/otelarrowexporter/internal/arrow/exporter.go b/exporter/otelarrowexporter/internal/arrow/exporter.go index 8903e707a549..e42205af197a 100644 --- a/exporter/otelarrowexporter/internal/arrow/exporter.go +++ b/exporter/otelarrowexporter/internal/arrow/exporter.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/status" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats" ) @@ -310,6 +311,10 @@ func (e *Exporter) SendAndWait(ctx context.Context, data any) (bool, error) { } md["otlp-pdata-size"] = strconv.Itoa(uncompSize) + if dead, ok := ctx.Deadline(); ok { + md["grpc-timeout"] = grpcutil.EncodeTimeout(time.Until(dead)) + } + wri := writeItem{ records: data, md: md, diff --git a/exporter/otelarrowexporter/internal/arrow/exporter_test.go b/exporter/otelarrowexporter/internal/arrow/exporter_test.go index 4f488af53e87..fc749a7f961d 100644 --- a/exporter/otelarrowexporter/internal/arrow/exporter_test.go +++ b/exporter/otelarrowexporter/internal/arrow/exporter_test.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/netstats" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/otelarrow/testdata" ) @@ -318,7 +319,7 @@ func TestArrowExporterStreamConnectError(t *testing.T) { require.NoError(t, tc.exporter.Shutdown(bg)) require.NotEmpty(t, tc.observedLogs.All(), "should have at least one log: %v", tc.observedLogs.All()) - require.Equal(t, tc.observedLogs.All()[0].Message, "cannot start arrow stream") + require.Equal(t, "cannot start arrow stream", tc.observedLogs.All()[0].Message) }) } } @@ -344,7 +345,7 @@ func TestArrowExporterDowngrade(t *testing.T) { require.NoError(t, tc.exporter.Shutdown(bg)) require.Less(t, 1, len(tc.observedLogs.All()), "should have at least two logs: %v", tc.observedLogs.All()) - require.Equal(t, tc.observedLogs.All()[0].Message, "arrow is not supported") + require.Equal(t, "arrow is not supported", tc.observedLogs.All()[0].Message) require.Contains(t, tc.observedLogs.All()[1].Message, "downgrading") }) } @@ -393,7 +394,7 @@ func TestArrowExporterDisableDowngrade(t *testing.T) { require.NoError(t, tc.exporter.Shutdown(bg)) require.Less(t, 1, len(tc.observedLogs.All()), "should have at least two logs: %v", tc.observedLogs.All()) - require.Equal(t, tc.observedLogs.All()[0].Message, "arrow is not supported") + require.Equal(t, "arrow is not supported", tc.observedLogs.All()[0].Message) require.NotContains(t, tc.observedLogs.All()[1].Message, "downgrading") }) } @@ -576,65 +577,94 @@ func TestArrowExporterStreaming(t *testing.T) { // TestArrowExporterHeaders tests a mix of outgoing context headers. func TestArrowExporterHeaders(t *testing.T) { - tc := newSingleStreamMetadataTestCase(t) - channel := newHealthyTestChannel() + for _, withDeadline := range []bool{true, false} { + t.Run(fmt.Sprint("with_deadline=", withDeadline), func(t *testing.T) { - tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel)) + tc := newSingleStreamMetadataTestCase(t) + channel := newHealthyTestChannel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - require.NoError(t, tc.exporter.Start(ctx)) + tc.traceCall.AnyTimes().DoAndReturn(tc.returnNewStream(channel)) - var expectOutput []metadata.MD - var actualOutput []metadata.MD + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - md := metadata.MD{} - hpd := hpack.NewDecoder(4096, func(f hpack.HeaderField) { - md[f.Name] = append(md[f.Name], f.Value) - }) - for data := range channel.sendChannel() { - if len(data.Headers) == 0 { - actualOutput = append(actualOutput, nil) - } else { - _, err := hpd.Write(data.Headers) + require.NoError(t, tc.exporter.Start(ctx)) + + var expectOutput []metadata.MD + var actualOutput []metadata.MD + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + md := metadata.MD{} + hpd := hpack.NewDecoder(4096, func(f hpack.HeaderField) { + md[f.Name] = append(md[f.Name], f.Value) + }) + for data := range channel.sendChannel() { + if len(data.Headers) == 0 { + actualOutput = append(actualOutput, nil) + } else { + _, err := hpd.Write(data.Headers) + require.NoError(t, err) + actualOutput = append(actualOutput, md) + md = metadata.MD{} + } + channel.recv <- statusOKFor(data.BatchId) + } + }() + + for times := 0; times < 10; times++ { + input := testdata.GenerateTraces(2) + + if times%2 == 1 { + md := metadata.MD{ + "expected1": []string{"metadata1"}, + "expected2": []string{fmt.Sprint(times)}, + "otlp-pdata-size": []string{"329"}, + } + expectOutput = append(expectOutput, md) + } else { + expectOutput = append(expectOutput, metadata.MD{ + "otlp-pdata-size": []string{"329"}, + }) + } + + sendCtx := ctx + if withDeadline { + var sendCancel context.CancelFunc + sendCtx, sendCancel = context.WithTimeout(sendCtx, time.Second) + defer sendCancel() + } + + sent, err := tc.exporter.SendAndWait(sendCtx, input) require.NoError(t, err) - actualOutput = append(actualOutput, md) - md = metadata.MD{} + require.True(t, sent) } - channel.recv <- statusOKFor(data.BatchId) - } - }() - - for times := 0; times < 10; times++ { - input := testdata.GenerateTraces(2) + // Stop the test conduit started above. + cancel() + wg.Wait() - if times%2 == 1 { - md := metadata.MD{ - "expected1": []string{"metadata1"}, - "expected2": []string{fmt.Sprint(times)}, - "otlp-pdata-size": []string{"329"}, + // Manual check for proper deadline propagation. Since the test + // is timed we don't expect an exact match. + if withDeadline { + for _, out := range actualOutput { + dead := out.Get("grpc-timeout") + require.Len(t, dead, 1) + require.NotEmpty(t, dead[0]) + to, err := grpcutil.DecodeTimeout(dead[0]) + require.NoError(t, err) + // Allow the test to lapse for 0.5s. + require.Less(t, time.Second/2, to) + require.GreaterOrEqual(t, time.Second, to) + out.Delete("grpc-timeout") + } } - expectOutput = append(expectOutput, md) - } else { - expectOutput = append(expectOutput, metadata.MD{ - "otlp-pdata-size": []string{"329"}, - }) - } - sent, err := tc.exporter.SendAndWait(context.Background(), input) - require.NoError(t, err) - require.True(t, sent) + require.Equal(t, expectOutput, actualOutput) + require.NoError(t, tc.exporter.Shutdown(ctx)) + }) } - // Stop the test conduit started above. - cancel() - wg.Wait() - - require.Equal(t, expectOutput, actualOutput) - require.NoError(t, tc.exporter.Shutdown(ctx)) } // TestArrowExporterIsTraced tests whether trace and span ID are diff --git a/exporter/otelarrowexporter/internal/arrow/stream_test.go b/exporter/otelarrowexporter/internal/arrow/stream_test.go index 100e6f131c9f..9b39d4d9c644 100644 --- a/exporter/otelarrowexporter/internal/arrow/stream_test.go +++ b/exporter/otelarrowexporter/internal/arrow/stream_test.go @@ -5,7 +5,6 @@ package arrow import ( "context" - "errors" "fmt" "sync" "testing" @@ -216,7 +215,7 @@ func TestStreamUnknownBatchError(t *testing.T) { // sender should get ErrStreamRestarting err := tc.mustSendAndWait() require.Error(t, err) - require.True(t, errors.Is(err, ErrStreamRestarting)) + require.ErrorIs(t, err, ErrStreamRestarting) }) } } @@ -322,7 +321,7 @@ func TestStreamUnsupported(t *testing.T) { tc.waitForShutdown() require.NotEmpty(t, tc.observedLogs.All(), "should have at least one log: %v", tc.observedLogs.All()) - require.Equal(t, tc.observedLogs.All()[0].Message, "arrow is not supported") + require.Equal(t, "arrow is not supported", tc.observedLogs.All()[0].Message) }) } } @@ -347,7 +346,7 @@ func TestStreamSendError(t *testing.T) { // sender should get ErrStreamRestarting err := tc.mustSendAndWait() require.Error(t, err) - require.True(t, errors.Is(err, ErrStreamRestarting)) + require.ErrorIs(t, err, ErrStreamRestarting) }) } } diff --git a/exporter/otelarrowexporter/otelarrow_test.go b/exporter/otelarrowexporter/otelarrow_test.go index 1be964b98401..dfa73f7417cc 100644 --- a/exporter/otelarrowexporter/otelarrow_test.go +++ b/exporter/otelarrowexporter/otelarrow_test.go @@ -566,7 +566,7 @@ func TestSendMetrics(t *testing.T) { assert.EqualValues(t, md, rcv.getLastRequest()) mdata := rcv.getMetadata() - require.EqualValues(t, mdata.Get("header"), expectedHeader) + require.EqualValues(t, expectedHeader, mdata.Get("header")) require.Len(t, mdata.Get("User-Agent"), 1) require.Contains(t, mdata.Get("User-Agent")[0], "Collector/1.2.3test") diff --git a/exporter/prometheusexporter/accumulator_test.go b/exporter/prometheusexporter/accumulator_test.go index 49b39c4412bb..d8858c569c9c 100644 --- a/exporter/prometheusexporter/accumulator_test.go +++ b/exporter/prometheusexporter/accumulator_test.go @@ -248,7 +248,7 @@ func TestAccumulateMetrics(t *testing.T) { v := m.(*accumulatedValue) vLabels, vTS, vValue, vTemporality, vIsMonotonic := getMetricProperties(ilm2.Metrics().At(0)) - require.Equal(t, v.scope.Name(), "test") + require.Equal(t, "test", v.scope.Name()) require.Equal(t, v.value.Type(), ilm2.Metrics().At(0).Type()) vLabels.Range(func(k string, v pcommon.Value) bool { r, _ := m2Labels.Get(k) @@ -360,7 +360,7 @@ func TestAccumulateDeltaToCumulative(t *testing.T) { v := m.(*accumulatedValue) vLabels, vTS, vValue, vTemporality, vIsMonotonic := getMetricProperties(v.value) - require.Equal(t, v.scope.Name(), "test") + require.Equal(t, "test", v.scope.Name()) require.Equal(t, v.value.Type(), ilm.Metrics().At(0).Type()) require.Equal(t, v.value.Type(), ilm.Metrics().At(1).Type()) diff --git a/exporter/prometheusremotewriteexporter/README.md b/exporter/prometheusremotewriteexporter/README.md index 64413927fb52..806ee037e46c 100644 --- a/exporter/prometheusremotewriteexporter/README.md +++ b/exporter/prometheusremotewriteexporter/README.md @@ -54,7 +54,7 @@ The following settings can be optionally configured: - `remote_write_queue`: fine tuning for queueing and sending of the outgoing remote writes. - `enabled`: enable the sending queue (default: `true`) - `queue_size`: number of OTLP metrics that can be queued. Ignored if `enabled` is `false` (default: `10000`) - - `num_consumers`: minimum number of workers to use to fan out the outgoing requests. (default: `5`) + - `num_consumers`: minimum number of workers to use to fan out the outgoing requests. (default: `5`) **WARNING:** Currently, num_consumers doesn't have any effect due to incompatibility with Prometheus remote write API. The value will be ignored. Please see https://github.com/open-telemetry/opentelemetry-collector/issues/2949 for more information. - `resource_to_telemetry_conversion` - `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default. - `target_info`: customize `target_info` metric diff --git a/exporter/prometheusremotewriteexporter/factory.go b/exporter/prometheusremotewriteexporter/factory.go index e23b0ed0fc9e..151ad5a81e5c 100644 --- a/exporter/prometheusremotewriteexporter/factory.go +++ b/exporter/prometheusremotewriteexporter/factory.go @@ -43,6 +43,10 @@ func createMetricsExporter(ctx context.Context, set exporter.Settings, return nil, errors.New("invalid configuration") } + if prwCfg.RemoteWriteQueue.NumConsumers != 0 { + set.Logger.Warn("Currently, remote_write_queue.num_consumers doesn't have any effect due to incompatibility with Prometheus remote write API. The value will be ignored. Please see https://github.com/open-telemetry/opentelemetry-collector/issues/2949 for more information.") + } + prwe, err := newPRWExporter(prwCfg, set) if err != nil { return nil, err diff --git a/exporter/prometheusremotewriteexporter/helper_test.go b/exporter/prometheusremotewriteexporter/helper_test.go index d0454d4cb98b..f464c25071b0 100644 --- a/exporter/prometheusremotewriteexporter/helper_test.go +++ b/exporter/prometheusremotewriteexporter/helper_test.go @@ -233,7 +233,7 @@ func TestEnsureTimeseriesPointsAreSortedByTimestamp(t *testing.T) { }, }, } - assert.Equal(t, got, want) + assert.Equal(t, want, got) // For a full sanity/logical check, assert that EVERY // Sample has a Timestamp bigger than its prior values. diff --git a/exporter/pulsarexporter/factory_test.go b/exporter/pulsarexporter/factory_test.go index 1cd6cc0f6432..05338c7d5823 100644 --- a/exporter/pulsarexporter/factory_test.go +++ b/exporter/pulsarexporter/factory_test.go @@ -18,19 +18,19 @@ import ( func Test_createDefaultConfig(t *testing.T) { cfg := createDefaultConfig() - assert.Equal(t, cfg, &Config{ + assert.Equal(t, &Config{ TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), BackOffConfig: configretry.NewDefaultBackOffConfig(), QueueSettings: exporterhelper.NewDefaultQueueSettings(), Endpoint: defaultBroker, - // using an empty topic to track when it has not been set by user, default is based on traces or metrics. + Topic: "", Encoding: defaultEncoding, Authentication: Authentication{}, MaxConnectionsPerBroker: 1, ConnectionTimeout: 5 * time.Second, OperationTimeout: 30 * time.Second, - }) + }, cfg) } func TestWithTracesMarshalers_err(t *testing.T) { diff --git a/exporter/sentryexporter/sentry_exporter_test.go b/exporter/sentryexporter/sentry_exporter_test.go index 7e1a8f95ed90..8b44d63bccf8 100644 --- a/exporter/sentryexporter/sentry_exporter_test.go +++ b/exporter/sentryexporter/sentry_exporter_test.go @@ -461,13 +461,13 @@ func TestGenerateTagsFromAttributes(t *testing.T) { tags := generateTagsFromAttributes(attrs) stringVal := tags["string-key"] - assert.Equal(t, stringVal, "string-value") + assert.Equal(t, "string-value", stringVal) boolVal := tags["bool-key"] - assert.Equal(t, boolVal, "true") + assert.Equal(t, "true", boolVal) doubleVal := tags["double-key"] - assert.Equal(t, doubleVal, "123.123") + assert.Equal(t, "123.123", doubleVal) intVal := tags["int-key"] - assert.Equal(t, intVal, "321") + assert.Equal(t, "321", intVal) } type SpanStatusCase struct { diff --git a/exporter/signalfxexporter/exporter_test.go b/exporter/signalfxexporter/exporter_test.go index ef55bea36865..88ec35ba2904 100644 --- a/exporter/signalfxexporter/exporter_test.go +++ b/exporter/signalfxexporter/exporter_test.go @@ -807,7 +807,7 @@ func TestConsumeLogsDataWithAccessTokenPassthrough(t *testing.T) { defer receivedTokens.Unlock() return len(receivedTokens.tokens) == 1 }, 1*time.Second, 10*time.Millisecond) - assert.Equal(t, receivedTokens.tokens[0], tt.expectedToken) + assert.Equal(t, tt.expectedToken, receivedTokens.tokens[0]) }) } } diff --git a/exporter/signalfxexporter/internal/correlation/logshims_test.go b/exporter/signalfxexporter/internal/correlation/logshims_test.go index bc421d0bd59c..3e337dc0dcd9 100644 --- a/exporter/signalfxexporter/internal/correlation/logshims_test.go +++ b/exporter/signalfxexporter/internal/correlation/logshims_test.go @@ -102,5 +102,5 @@ func TestZapShim_Fields(t *testing.T) { c := e.Context[0] assert.Equal(t, "field", c.Key) require.Equal(t, zapcore.StringType, c.Type) - assert.Equal(t, c.String, "field value") + assert.Equal(t, "field value", c.String) } diff --git a/exporter/signalfxexporter/internal/dimensions/dimclient_test.go b/exporter/signalfxexporter/internal/dimensions/dimclient_test.go index 17b618a1fe6f..12b180e8e1fb 100644 --- a/exporter/signalfxexporter/internal/dimensions/dimclient_test.go +++ b/exporter/signalfxexporter/internal/dimensions/dimclient_test.go @@ -136,7 +136,7 @@ func TestDimensionClient(t *testing.T) { })) dims := waitForDims(dimCh, 1, 3) - require.Equal(t, dims, []dim{ + require.Equal(t, []dim{ { Key: "host", Value: "test-box", @@ -148,7 +148,7 @@ func TestDimensionClient(t *testing.T) { Tags: []string{"active"}, TagsToRemove: []string{"terminated"}, }, - }) + }, dims) }) t.Run("same dimension with different values", func(t *testing.T) { @@ -164,7 +164,7 @@ func TestDimensionClient(t *testing.T) { })) dims := waitForDims(dimCh, 1, 3) - require.Equal(t, dims, []dim{ + require.Equal(t, []dim{ { Key: "host", Value: "test-box", @@ -173,7 +173,7 @@ func TestDimensionClient(t *testing.T) { }, TagsToRemove: []string{"active"}, }, - }) + }, dims) }) t.Run("send a distinct prop/tag set for existing dim with server error", func(t *testing.T) { @@ -197,7 +197,7 @@ func TestDimensionClient(t *testing.T) { dims = waitForDims(dimCh, 1, 3) // After the server recovers the dim should be resent. - require.Equal(t, dims, []dim{ + require.Equal(t, []dim{ { Key: "AWSUniqueID", Value: "abcd", @@ -206,7 +206,7 @@ func TestDimensionClient(t *testing.T) { }, Tags: []string{"running"}, }, - }) + }, dims) }) t.Run("does not retry 4xx responses", func(t *testing.T) { @@ -245,7 +245,7 @@ func TestDimensionClient(t *testing.T) { forcedResp.Store(200) dims = waitForDims(dimCh, 1, 3) - require.Equal(t, dims, []dim{ + require.Equal(t, []dim{ { Key: "AWSUniqueID", Value: "id404", @@ -253,7 +253,7 @@ func TestDimensionClient(t *testing.T) { "z": newString("x"), }, }, - }) + }, dims) }) t.Run("send successive quick updates to same dim", func(t *testing.T) { @@ -294,7 +294,7 @@ func TestDimensionClient(t *testing.T) { dims := waitForDims(dimCh, 1, 3) - require.Equal(t, dims, []dim{ + require.Equal(t, []dim{ { Key: "AWSUniqueID", Value: "abcd", @@ -305,7 +305,7 @@ func TestDimensionClient(t *testing.T) { Tags: []string{"dev"}, TagsToRemove: []string{"running"}, }, - }) + }, dims) }) } diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go index 767e9fbd004e..595eb89bf9c7 100644 --- a/exporter/splunkhecexporter/client_test.go +++ b/exporter/splunkhecexporter/client_test.go @@ -1509,7 +1509,7 @@ func Test_pushLogData_nil_Logs(t *testing.T) { return logs }(), requires: func(t *testing.T, logs plog.Logs) { - require.Equal(t, logs.ResourceLogs().Len(), 1) + require.Equal(t, 1, logs.ResourceLogs().Len()) require.Zero(t, logs.ResourceLogs().At(0).ScopeLogs().Len()) }, }, @@ -1523,8 +1523,8 @@ func Test_pushLogData_nil_Logs(t *testing.T) { return logs }(), requires: func(t *testing.T, logs plog.Logs) { - require.Equal(t, logs.ResourceLogs().Len(), 1) - require.Equal(t, logs.ResourceLogs().At(0).ScopeLogs().Len(), 1) + require.Equal(t, 1, logs.ResourceLogs().Len()) + require.Equal(t, 1, logs.ResourceLogs().At(0).ScopeLogs().Len()) require.Zero(t, logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().Len()) }, }, diff --git a/exporter/sumologicexporter/exporter_test.go b/exporter/sumologicexporter/exporter_test.go index 421bc7839c1a..df93e5ce201c 100644 --- a/exporter/sumologicexporter/exporter_test.go +++ b/exporter/sumologicexporter/exporter_test.go @@ -5,7 +5,6 @@ package sumologicexporter import ( "context" - "errors" "net/http" "net/http/httptest" "sync" @@ -192,7 +191,7 @@ func TestAllFailed(t *testing.T) { assert.EqualError(t, err, "failed sending data: status: 500 Internal Server Error") var partial consumererror.Logs - require.True(t, errors.As(err, &partial)) + require.ErrorAs(t, err, &partial) assert.Equal(t, logsExpected, partial.Data()) } @@ -231,7 +230,7 @@ func TestPartiallyFailed(t *testing.T) { assert.EqualError(t, err, "failed sending data: status: 500 Internal Server Error") var partial consumererror.Logs - require.True(t, errors.As(err, &partial)) + require.ErrorAs(t, err, &partial) assert.Equal(t, logsExpected, partial.Data()) } @@ -462,7 +461,7 @@ gauge_metric_name{foo="bar",remote_name="156955",url="http://another_url"} 245 1 assert.EqualError(t, err, tc.expectedError) var partial consumererror.Metrics - require.True(t, errors.As(err, &partial)) + require.ErrorAs(t, err, &partial) // TODO fix // assert.Equal(t, metrics, partial.GetMetrics()) }) diff --git a/exporter/sumologicexporter/factory_test.go b/exporter/sumologicexporter/factory_test.go index 53bfac1b4241..ff646f3561e3 100644 --- a/exporter/sumologicexporter/factory_test.go +++ b/exporter/sumologicexporter/factory_test.go @@ -29,7 +29,7 @@ func TestCreateDefaultConfig(t *testing.T) { qs := exporterhelper.NewDefaultQueueSettings() qs.Enabled = false - assert.Equal(t, cfg, &Config{ + assert.Equal(t, &Config{ MaxRequestBodySize: 1_048_576, LogFormat: "otlp", MetricFormat: "otlp", @@ -44,7 +44,7 @@ func TestCreateDefaultConfig(t *testing.T) { }, BackOffConfig: configretry.NewDefaultBackOffConfig(), QueueSettings: qs, - }) + }, cfg) assert.NoError(t, component.ValidateConfig(cfg)) } diff --git a/exporter/syslogexporter/exporter_test.go b/exporter/syslogexporter/exporter_test.go index 212ea99d687c..db643caf4bb4 100644 --- a/exporter/syslogexporter/exporter_test.go +++ b/exporter/syslogexporter/exporter_test.go @@ -157,7 +157,7 @@ func TestSyslogExportSuccess(t *testing.T) { defer conn.Close() b, err := io.ReadAll(conn) require.NoError(t, err, "could not read all") - assert.Equal(t, string(b), expectedForm) + assert.Equal(t, expectedForm, string(b)) } func TestSyslogExportFail(t *testing.T) { diff --git a/exporter/syslogexporter/factory_test.go b/exporter/syslogexporter/factory_test.go index 47cfc42a35d2..a4cebc3bcf7a 100644 --- a/exporter/syslogexporter/factory_test.go +++ b/exporter/syslogexporter/factory_test.go @@ -24,7 +24,7 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { cfg := createDefaultConfig() - assert.Equal(t, cfg, &Config{ + assert.Equal(t, &Config{ Port: 514, Network: "tcp", Protocol: "rfc5424", @@ -44,5 +44,5 @@ func TestCreateDefaultConfig(t *testing.T) { TimeoutSettings: exporterhelper.TimeoutSettings{ Timeout: 5 * time.Second, }, - }) + }, cfg) } diff --git a/extension/basicauthextension/extension_test.go b/extension/basicauthextension/extension_test.go index 265689d0b6c6..d68cb069f4bd 100644 --- a/extension/basicauthextension/extension_test.go +++ b/extension/basicauthextension/extension_test.go @@ -172,7 +172,7 @@ func TestBasicAuth_HtpasswdInlinePrecedence(t *testing.T) { auth = base64.StdEncoding.EncodeToString([]byte("username:fromfile")) _, err = ext.Authenticate(context.Background(), map[string][]string{"authorization": {"Basic " + auth}}) - assert.Error(t, errInvalidCredentials, err) + assert.ErrorIs(t, errInvalidCredentials, err) } func TestBasicAuth_SupportedHeaders(t *testing.T) { @@ -265,7 +265,7 @@ func TestBasicAuth_ClientValid(t *testing.T) { expectedMd := map[string]string{ "authorization": fmt.Sprintf("Basic %s", authCreds), } - assert.Equal(t, md, expectedMd) + assert.Equal(t, expectedMd, md) assert.NoError(t, err) assert.True(t, credential.RequireTransportSecurity()) diff --git a/extension/bearertokenauthextension/bearertokenauth_test.go b/extension/bearertokenauthextension/bearertokenauth_test.go index 2d921e117ab3..4bdec8b461e6 100644 --- a/extension/bearertokenauthextension/bearertokenauth_test.go +++ b/extension/bearertokenauthextension/bearertokenauth_test.go @@ -82,7 +82,7 @@ func TestBearerAuthenticator(t *testing.T) { expectedMd := map[string]string{ "authorization": fmt.Sprintf("Bearer %s", string(cfg.BearerToken)), } - assert.Equal(t, md, expectedMd) + assert.Equal(t, expectedMd, md) assert.NoError(t, err) assert.True(t, credential.RequireTransportSecurity()) @@ -123,7 +123,7 @@ func TestBearerStartWatchStop(t *testing.T) { expectedMd := map[string]string{ "authorization": tokenStr, } - assert.Equal(t, md, expectedMd) + assert.Equal(t, expectedMd, md) assert.NoError(t, err) assert.True(t, credential.RequireTransportSecurity()) @@ -133,7 +133,7 @@ func TestBearerStartWatchStop(t *testing.T) { credential, _ = bauth.PerRPCCredentials() md, err = credential.GetRequestMetadata(context.Background()) expectedMd["authorization"] = tokenStr + "test" - assert.Equal(t, md, expectedMd) + assert.Equal(t, expectedMd, md) assert.NoError(t, err) // change file content back @@ -143,7 +143,7 @@ func TestBearerStartWatchStop(t *testing.T) { md, err = credential.GetRequestMetadata(context.Background()) expectedMd["authorization"] = tokenStr time.Sleep(5 * time.Second) - assert.Equal(t, md, expectedMd) + assert.Equal(t, expectedMd, md) assert.NoError(t, err) assert.NoError(t, bauth.Shutdown(context.Background())) diff --git a/extension/encoding/jsonlogencodingextension/json_test.go b/extension/encoding/jsonlogencodingextension/json_test.go index ba2a9800182b..2fcfa3d13129 100644 --- a/extension/encoding/jsonlogencodingextension/json_test.go +++ b/extension/encoding/jsonlogencodingextension/json_test.go @@ -59,7 +59,7 @@ func TestPrettyLogProcessor(t *testing.T) { lp, err := j.logProcessor(sampleLog()) assert.NoError(t, err) assert.NotNil(t, lp) - assert.Equal(t, string(lp), `[{"body":{"log":"test"},"logAttributes":{"foo":"bar"},"resourceAttributes":{"test":"logs-test"}},{"body":"log testing","resourceAttributes":{"test":"logs-test"}}]`) + assert.Equal(t, `[{"body":{"log":"test"},"logAttributes":{"foo":"bar"},"resourceAttributes":{"test":"logs-test"}},{"body":"log testing","resourceAttributes":{"test":"logs-test"}}]`, string(lp)) } func sampleLog() plog.Logs { diff --git a/extension/headerssetterextension/config_test.go b/extension/headerssetterextension/config_test.go index 9fab6b3d0327..11ae3b04ab79 100644 --- a/extension/headerssetterextension/config_test.go +++ b/extension/headerssetterextension/config_test.go @@ -69,7 +69,7 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, sub.Unmarshal(cfg)) if tt.expectedError != nil { - assert.Error(t, component.ValidateConfig(cfg), tt.expectedError) + assert.ErrorIs(t, component.ValidateConfig(cfg), tt.expectedError) return } assert.NoError(t, component.ValidateConfig(cfg)) diff --git a/extension/healthcheckv2extension/README.md b/extension/healthcheckv2extension/README.md index 07e40e40177a..20f6242c1974 100644 --- a/extension/healthcheckv2extension/README.md +++ b/extension/healthcheckv2extension/README.md @@ -1,15 +1,12 @@ -# Health Check Extension - -> ⚠️⚠️⚠️ **Warning** ⚠️⚠️⚠️ -> This extension is not ready for use. The code is written, but the -[original PR](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/30673) -is being sliced up into smaller PRs that are being reviewed and merged -incrementally. - -## Forward Looking README - -The remainder of this README is forward looking and serves as a reference for -the future functionality that will be provided and how it will be configured. +# Health Check Extension V2 + +This is an experimental extension that is intended to replace the existing +health check extension. As the stability level is currently development, users +wishing to experiment with this extension will have to build a custom collector +binary using the [OpenTelemetry Collector Builder](https://github.com/open-telemetry/opentelemetry-collector/tree/main/cmd/builder). +Health check extension V2 has new functionality that can be opted-in to, and +also supports original healthcheck extension functionality with the exception +of the `check_collector_pipeline` feature. See the warning below. > ⚠️⚠️⚠️ **Warning** ⚠️⚠️⚠️ > diff --git a/extension/healthcheckv2extension/extension_test.go b/extension/healthcheckv2extension/extension_test.go index 662a7d3576ba..bb15441a1aae 100644 --- a/extension/healthcheckv2extension/extension_test.go +++ b/extension/healthcheckv2extension/extension_test.go @@ -35,7 +35,7 @@ func TestComponentStatus(t *testing.T) { // Status before Start will be StatusNone st, ok := ext.aggregator.AggregateStatus(status.ScopeAll, status.Concise) require.True(t, ok) - assert.Equal(t, st.Status(), componentstatus.StatusNone) + assert.Equal(t, componentstatus.StatusNone, st.Status()) require.NoError(t, ext.Start(context.Background(), componenttest.NewNopHost())) diff --git a/extension/healthcheckv2extension/internal/http/responders_test.go b/extension/healthcheckv2extension/internal/http/responders_test.go index b01624fb78ed..c2bef14a1706 100644 --- a/extension/healthcheckv2extension/internal/http/responders_test.go +++ b/extension/healthcheckv2extension/internal/http/responders_test.go @@ -29,7 +29,7 @@ func TestRespondWithJSON(t *testing.T) { require.NoError(t, respondWithJSON(http.StatusOK, content, w)) resp := w.Result() assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.Equal(t, resp.Header.Get("Content-Type"), "application/json") + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) body, err := io.ReadAll(resp.Body) require.NoError(t, err) diff --git a/extension/oauth2clientauthextension/extension_test.go b/extension/oauth2clientauthextension/extension_test.go index 915edf1ae653..c22561fb5c0e 100644 --- a/extension/oauth2clientauthextension/extension_test.go +++ b/extension/oauth2clientauthextension/extension_test.go @@ -116,7 +116,7 @@ func TestOAuthClientSettingsCredsConfig(t *testing.T) { settings *Config expectedClientConfig *clientcredentials.Config shouldError bool - expectedError *error + expectedError error }{ { name: "client_id_file", @@ -151,7 +151,7 @@ func TestOAuthClientSettingsCredsConfig(t *testing.T) { ClientSecret: "testsecret", }, shouldError: true, - expectedError: &errNoClientIDProvided, + expectedError: errNoClientIDProvided, }, { name: "missing_client_creds_file", @@ -160,7 +160,7 @@ func TestOAuthClientSettingsCredsConfig(t *testing.T) { ClientSecretFile: testCredsMissingFile, }, shouldError: true, - expectedError: &errNoClientSecretProvided, + expectedError: errNoClientSecretProvided, }, } @@ -170,7 +170,7 @@ func TestOAuthClientSettingsCredsConfig(t *testing.T) { cfg, err := rc.clientCredentials.createConfig() if test.shouldError { assert.Error(t, err) - assert.ErrorAs(t, err, test.expectedError) + assert.ErrorIs(t, err, test.expectedError) return } assert.NoError(t, err) diff --git a/extension/observer/ecsobserver/exporter_test.go b/extension/observer/ecsobserver/exporter_test.go index 8ba905c85db9..eb767eaebfda 100644 --- a/extension/observer/ecsobserver/exporter_test.go +++ b/extension/observer/ecsobserver/exporter_test.go @@ -4,7 +4,6 @@ package ecsobserver import ( - "errors" "testing" "github.com/aws/aws-sdk-go/aws" @@ -27,7 +26,7 @@ func TestTaskExporter(t *testing.T) { }) assert.Error(t, err) v := &errPrivateIPNotFound{} - assert.True(t, errors.As(err, &v)) + assert.ErrorAs(t, err, &v) }) awsVpcTask := &ecs.Task{ @@ -118,7 +117,7 @@ func TestTaskExporter(t *testing.T) { merr := multierr.Errors(err) require.Len(t, merr, 1) v := &errMappedPortNotFound{} - assert.True(t, errors.As(merr[0], &v)) + assert.ErrorAs(t, merr[0], &v) assert.Len(t, targets, 2) }) diff --git a/extension/observer/ecsobserver/internal/ecsmock/service_test.go b/extension/observer/ecsobserver/internal/ecsmock/service_test.go index 0bc09b6fbcd5..8105596b008e 100644 --- a/extension/observer/ecsobserver/internal/ecsmock/service_test.go +++ b/extension/observer/ecsobserver/internal/ecsmock/service_test.go @@ -5,7 +5,6 @@ package ecsmock import ( "context" - "errors" "fmt" "testing" @@ -28,7 +27,7 @@ func TestCluster_ListTasksWithContext(t *testing.T) { _, err := c.ListTasksWithContext(ctx, req) require.Error(t, err) var aerr awserr.Error - assert.True(t, errors.As(err, &aerr)) + assert.ErrorAs(t, err, &aerr) assert.Equal(t, ecs.ErrCodeClusterNotFoundException, aerr.Code()) assert.Equal(t, "code "+ecs.ErrCodeClusterNotFoundException+" message "+aerr.Message(), aerr.Error()) assert.NoError(t, aerr.OrigErr()) diff --git a/extension/observer/ecsobserver/matcher_test.go b/extension/observer/ecsobserver/matcher_test.go index bcfdeed717f5..11ab05d56401 100644 --- a/extension/observer/ecsobserver/matcher_test.go +++ b/extension/observer/ecsobserver/matcher_test.go @@ -92,7 +92,7 @@ func TestMatchedContainer_MergeTargets(t *testing.T) { } m.MergeTargets(newTargets) assert.Len(t, m.Targets, 4) - assert.Equal(t, m.Targets[3].MetricsPath, "/m1") // order is append + assert.Equal(t, "/m1", m.Targets[3].MetricsPath) // order is append }) t.Run("respect existing targets", func(t *testing.T) { diff --git a/extension/observer/k8sobserver/factory_test.go b/extension/observer/k8sobserver/factory_test.go index 02f59947d7fe..1c1ee208d152 100644 --- a/extension/observer/k8sobserver/factory_test.go +++ b/extension/observer/k8sobserver/factory_test.go @@ -18,7 +18,7 @@ import ( func TestFactory_CreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig().(*Config) - assert.Equal(t, cfg.APIConfig, k8sconfig.APIConfig{AuthType: k8sconfig.AuthTypeServiceAccount}) + assert.Equal(t, k8sconfig.APIConfig{AuthType: k8sconfig.AuthTypeServiceAccount}, cfg.APIConfig) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/extension/sigv4authextension/signingroundtripper_test.go b/extension/sigv4authextension/signingroundtripper_test.go index 4195faa297e3..fb1f074b1a1f 100644 --- a/extension/sigv4authextension/signingroundtripper_test.go +++ b/extension/sigv4authextension/signingroundtripper_test.go @@ -89,7 +89,7 @@ func TestRoundTrip(t *testing.T) { return } assert.NoError(t, err) - assert.Equal(t, res.StatusCode, 200) + assert.Equal(t, 200, res.StatusCode) }) } } diff --git a/extension/solarwindsapmsettingsextension/factory_test.go b/extension/solarwindsapmsettingsextension/factory_test.go index d30f14fbaa44..d328af08ef4d 100644 --- a/extension/solarwindsapmsettingsextension/factory_test.go +++ b/extension/solarwindsapmsettingsextension/factory_test.go @@ -17,7 +17,7 @@ func TestCreateDefaultConfig(t *testing.T) { assert.NoError(t, componenttest.CheckConfigStruct(cfg)) ocfg, ok := factory.CreateDefaultConfig().(*Config) assert.True(t, ok) - assert.Equal(t, ocfg.ClientConfig.Endpoint, DefaultEndpoint, "Wrong default endpoint") + assert.Equal(t, DefaultEndpoint, ocfg.ClientConfig.Endpoint, "Wrong default endpoint") assert.Empty(t, ocfg.Key, "There is no default key") - assert.Equal(t, ocfg.Interval, DefaultInterval, "Wrong default interval") + assert.Equal(t, DefaultInterval, ocfg.Interval, "Wrong default interval") } diff --git a/extension/sumologicextension/credentials/credentialsstore_localfs_test.go b/extension/sumologicextension/credentials/credentialsstore_localfs_test.go index c64599df7591..e367b9a0586b 100644 --- a/extension/sumologicextension/credentials/credentialsstore_localfs_test.go +++ b/extension/sumologicextension/credentials/credentialsstore_localfs_test.go @@ -62,7 +62,7 @@ func TestCredentialsStoreLocalFs(t *testing.T) { }, ), ) - require.EqualValues(t, fileCounter, 0) + require.EqualValues(t, 0, fileCounter) } func TestCredentialsStoreValidate(t *testing.T) { diff --git a/extension/sumologicextension/extension_test.go b/extension/sumologicextension/extension_test.go index 2e22649d5f7d..fd09a1b64f0b 100644 --- a/extension/sumologicextension/extension_test.go +++ b/extension/sumologicextension/extension_test.go @@ -387,7 +387,7 @@ func TestStoreCredentials_PreexistingCredentialsAreUsed(t *testing.T) { require.NoError(t, se.Shutdown(context.Background())) require.FileExists(t, credsPath) - require.EqualValues(t, atomic.LoadInt32(&reqCount), 2) + require.EqualValues(t, 2, atomic.LoadInt32(&reqCount)) } func TestLocalFSCredentialsStore_WorkCorrectlyForMultipleExtensions(t *testing.T) { @@ -1476,10 +1476,10 @@ func TestWatchCredentialKey(t *testing.T) { ctxc, cancel := context.WithCancel(ctx) cancel() v := se.WatchCredentialKey(ctxc, "") - require.Equal(t, v, "") + require.Equal(t, "", v) v = se.WatchCredentialKey(context.Background(), "foobar") - require.Equal(t, v, "") + require.Equal(t, "", v) go func() { time.Sleep(time.Millisecond * 100) @@ -1490,7 +1490,7 @@ func TestWatchCredentialKey(t *testing.T) { }() v = se.WatchCredentialKey(context.Background(), "") - require.Equal(t, v, "test-credential-key") + require.Equal(t, "test-credential-key", v) } func TestCreateCredentialsHeader(t *testing.T) { @@ -1535,11 +1535,11 @@ func TestUpdateMetadataRequestPayload(t *testing.T) { // @sumo-drosiek: It happened to be empty OsVersion on my machine // require.NotEmpty(t, reqPayload.HostDetails.OsVersion) require.NotEmpty(t, reqPayload.NetworkDetails.HostIPAddress) - require.EqualValues(t, reqPayload.HostDetails.Environment, "EKS-1.20.2") - require.EqualValues(t, reqPayload.CollectorDetails.RunningVersion, "1.0.0") - require.EqualValues(t, reqPayload.TagDetails["team"], "A") - require.EqualValues(t, reqPayload.TagDetails["app"], "linux") - require.EqualValues(t, reqPayload.TagDetails["sumo.disco.enabled"], "true") + require.EqualValues(t, "EKS-1.20.2", reqPayload.HostDetails.Environment) + require.EqualValues(t, "1.0.0", reqPayload.CollectorDetails.RunningVersion) + require.EqualValues(t, "A", reqPayload.TagDetails["team"]) + require.EqualValues(t, "linux", reqPayload.TagDetails["app"]) + require.EqualValues(t, "true", reqPayload.TagDetails["sumo.disco.enabled"]) _, err := w.Write([]byte(``)) diff --git a/internal/aws/awsutil/conn_test.go b/internal/aws/awsutil/conn_test.go index 56d6228e38d4..363b5ac5ec49 100644 --- a/internal/aws/awsutil/conn_test.go +++ b/internal/aws/awsutil/conn_test.go @@ -46,7 +46,7 @@ func TestEC2Session(t *testing.T) { expectedSession, _ = session.NewSession() m.sn = expectedSession cfg, s, err := GetAWSConfigSession(logger, m, &sessionCfg) - assert.Equal(t, s, expectedSession, "Expect the session object is not overridden") + assert.Equal(t, expectedSession, s, "Expect the session object is not overridden") assert.Equal(t, *cfg.Region, ec2Region, "Region value fetched from ec2-metadata service") assert.NoError(t, err) } @@ -63,7 +63,7 @@ func TestRegionEnv(t *testing.T) { expectedSession, _ = session.NewSession() m.sn = expectedSession cfg, s, err := GetAWSConfigSession(logger, m, &sessionCfg) - assert.Equal(t, s, expectedSession, "Expect the session object is not overridden") + assert.Equal(t, expectedSession, s, "Expect the session object is not overridden") assert.Equal(t, *cfg.Region, region, "Region value fetched from environment") assert.NoError(t, err) } diff --git a/internal/aws/k8s/k8sclient/node_test.go b/internal/aws/k8s/k8sclient/node_test.go index 6ffbaa363992..b10a32e32184 100644 --- a/internal/aws/k8s/k8sclient/node_test.go +++ b/internal/aws/k8s/k8sclient/node_test.go @@ -302,8 +302,8 @@ func TestNodeClient(t *testing.T) { clusterFailedNodeCount := client.ClusterFailedNodeCount() log.Printf("clusterNodeCount: %v, clusterFailedNodeCount: %v", clusterNodeCount, clusterFailedNodeCount) - assert.Equal(t, clusterNodeCount, expectedClusterNodeCount) - assert.Equal(t, clusterFailedNodeCount, expectedClusterFailedNodeCount) + assert.Equal(t, expectedClusterNodeCount, clusterNodeCount) + assert.Equal(t, expectedClusterFailedNodeCount, clusterFailedNodeCount) client.shutdown() assert.True(t, client.stopped) } diff --git a/internal/aws/proxy/conn_test.go b/internal/aws/proxy/conn_test.go index fec2f9328573..77b3ddc8d0c8 100644 --- a/internal/aws/proxy/conn_test.go +++ b/internal/aws/proxy/conn_test.go @@ -174,7 +174,7 @@ func TestRegionFromEC2(t *testing.T) { logs := recordedLogs.All() lastEntry := logs[len(logs)-1] assert.Contains(t, lastEntry.Message, "Fetched region from EC2 metadata", "expected log message") - assert.Equal(t, lastEntry.Context[0].Key, "region", "expected log key") + assert.Equal(t, "region", lastEntry.Context[0].Key, "expected log key") assert.Equal(t, lastEntry.Context[0].String, ec2Region) } @@ -356,8 +356,8 @@ func TestGetSTSCredsFromPrimaryRegionEndpoint(t *testing.T) { fake := &stsCalls{ log: zap.NewNop(), getSTSCredsFromRegionEndpoint: func(_ *zap.Logger, _ *session.Session, region, roleArn string) *credentials.Credentials { - assert.Equal(t, region, endpoints.UsEast1RegionID, "expected region differs") - assert.Equal(t, roleArn, expectedRoleARN, "expected role ARN differs") + assert.Equal(t, endpoints.UsEast1RegionID, region, "expected region differs") + assert.Equal(t, expectedRoleARN, roleArn, "expected role ARN differs") called = true return nil }, @@ -368,8 +368,8 @@ func TestGetSTSCredsFromPrimaryRegionEndpoint(t *testing.T) { called = false fake.getSTSCredsFromRegionEndpoint = func(_ *zap.Logger, _ *session.Session, region, roleArn string) *credentials.Credentials { - assert.Equal(t, region, endpoints.CnNorth1RegionID, "expected region differs") - assert.Equal(t, roleArn, expectedRoleARN, "expected role ARN differs") + assert.Equal(t, endpoints.CnNorth1RegionID, region, "expected region differs") + assert.Equal(t, expectedRoleARN, roleArn, "expected role ARN differs") called = true return nil } @@ -379,8 +379,8 @@ func TestGetSTSCredsFromPrimaryRegionEndpoint(t *testing.T) { called = false fake.getSTSCredsFromRegionEndpoint = func(_ *zap.Logger, _ *session.Session, region, roleArn string) *credentials.Credentials { - assert.Equal(t, region, endpoints.UsGovWest1RegionID, "expected region differs") - assert.Equal(t, roleArn, expectedRoleARN, "expected role ARN differs") + assert.Equal(t, endpoints.UsGovWest1RegionID, region, "expected region differs") + assert.Equal(t, expectedRoleARN, roleArn, "expected role ARN differs") called = true return nil } @@ -461,8 +461,7 @@ func TestSTSRegionalEndpointDisabled(t *testing.T) { "STS regional endpoint disabled. Credentials for provided RoleARN will be fetched from STS primary region endpoint instead", "expected log message") assert.Equal(t, - lastEntry.Context[0].String, - expectedRegion, "expected error") + expectedRegion, lastEntry.Context[0].String, "expected error") assert.EqualError(t, lastEntry.Context[1].Interface.(error), expectedErr.Error(), "expected error") diff --git a/internal/common/ttlmap/ttl_map_test.go b/internal/common/ttlmap/ttl_map_test.go index c5bc890e5a46..d8a98db8a0a5 100644 --- a/internal/common/ttlmap/ttl_map_test.go +++ b/internal/common/ttlmap/ttl_map_test.go @@ -22,8 +22,8 @@ func TestTTLMapData(t *testing.T) { func TestTTLMapSimple(t *testing.T) { m := New(5, 10, make(chan struct{})) - require.EqualValues(t, m.sweepInterval, 5) - require.EqualValues(t, m.md.maxAge, 10) + require.EqualValues(t, 5, m.sweepInterval) + require.EqualValues(t, 10, m.md.maxAge) m.Put("foo", "bar") s := m.Get("foo").(string) require.Equal(t, "bar", s) diff --git a/internal/filter/filterspan/filterspan_test.go b/internal/filter/filterspan/filterspan_test.go index 5a71facf3a17..4b407eb77d71 100644 --- a/internal/filter/filterspan/filterspan_test.go +++ b/internal/filter/filterspan/filterspan_test.go @@ -298,12 +298,12 @@ func TestSpan_Matching_True(t *testing.T) { func TestServiceNameForResource(t *testing.T) { td := testdata.GenerateTracesOneSpanNoResource() name := serviceNameForResource(td.ResourceSpans().At(0).Resource()) - require.Equal(t, name, "") + require.Equal(t, "", name) td = testdata.GenerateTracesOneSpan() resource := td.ResourceSpans().At(0).Resource() name = serviceNameForResource(resource) - require.Equal(t, name, "") + require.Equal(t, "", name) } diff --git a/internal/kubelet/client_test.go b/internal/kubelet/client_test.go index 5c7aa63d6681..938bad01a176 100644 --- a/internal/kubelet/client_test.go +++ b/internal/kubelet/client_test.go @@ -105,7 +105,7 @@ func TestDefaultTLSClient(t *testing.T) { func TestSvcAcctClient(t *testing.T) { server := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { // Check if call is authenticated using token from test file - require.Equal(t, req.Header.Get("Authorization"), "Bearer s3cr3t") + require.Equal(t, "Bearer s3cr3t", req.Header.Get("Authorization")) _, err := rw.Write([]byte(`OK`)) require.NoError(t, err) })) @@ -174,7 +174,7 @@ func TestNewKubeConfigClient(t *testing.T) { t.Run(tt.name, func(t *testing.T) { server := httptest.NewUnstartedServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { // Check if call is authenticated using provided kubeconfig - require.Equal(t, req.Header.Get("Authorization"), "Bearer my-token") + require.Equal(t, "Bearer my-token", req.Header.Get("Authorization")) require.Equal(t, "/api/v1/nodes/nodename/proxy/", req.URL.EscapedPath()) // Send response to be tested _, err := rw.Write([]byte(`OK`)) diff --git a/internal/otelarrow/go.mod b/internal/otelarrow/go.mod index d449b9df33d5..8c6792439ffc 100644 --- a/internal/otelarrow/go.mod +++ b/internal/otelarrow/go.mod @@ -64,6 +64,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.3 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil v0.108.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.108.0 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -110,3 +111,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otela replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter => ../../exporter/otelarrowexporter replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent => ../sharedcomponent + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil => ../grpcutil diff --git a/internal/sqlquery/db_client_test.go b/internal/sqlquery/db_client_test.go index 9b149979a943..a95521f73da7 100644 --- a/internal/sqlquery/db_client_test.go +++ b/internal/sqlquery/db_client_test.go @@ -70,7 +70,7 @@ func TestDBSQLClient_Nulls(t *testing.T) { } rows, err := cl.QueryRows(context.Background()) assert.Error(t, err) - assert.True(t, errors.Is(err, ErrNullValueWarning)) + assert.ErrorIs(t, err, ErrNullValueWarning) assert.Len(t, rows, 1) assert.EqualValues(t, map[string]string{ "col_0": "42", @@ -96,7 +96,7 @@ func TestDBSQLClient_Nulls_MultiRow(t *testing.T) { assert.Len(t, uw, 2) for _, err := range uw { - assert.True(t, errors.Is(err, ErrNullValueWarning)) + assert.ErrorIs(t, err, ErrNullValueWarning) } } assert.Len(t, rows, 2) diff --git a/pkg/ottl/contexts/ottldatapoint/datapoint_test.go b/pkg/ottl/contexts/ottldatapoint/datapoint_test.go index e8d837fddaa5..73e7d697b03a 100644 --- a/pkg/ottl/contexts/ottldatapoint/datapoint_test.go +++ b/pkg/ottl/contexts/ottldatapoint/datapoint_test.go @@ -2130,7 +2130,7 @@ func Test_ParseEnum(t *testing.T) { t.Run(tt.name, func(t *testing.T) { actual, err := parseEnum((*ottl.EnumSymbol)(ottltest.Strp(tt.name))) assert.NoError(t, err) - assert.Equal(t, *actual, tt.want) + assert.Equal(t, tt.want, *actual) }) } } diff --git a/pkg/ottl/contexts/ottllog/log_test.go b/pkg/ottl/contexts/ottllog/log_test.go index e84cf74d21dd..5beda9fe137f 100644 --- a/pkg/ottl/contexts/ottllog/log_test.go +++ b/pkg/ottl/contexts/ottllog/log_test.go @@ -855,7 +855,7 @@ func Test_ParseEnum(t *testing.T) { t.Run(tt.name, func(t *testing.T) { actual, err := parseEnum((*ottl.EnumSymbol)(ottltest.Strp(tt.name))) assert.NoError(t, err) - assert.Equal(t, *actual, tt.want) + assert.Equal(t, tt.want, *actual) }) } } diff --git a/pkg/ottl/contexts/ottlmetric/metrics_test.go b/pkg/ottl/contexts/ottlmetric/metrics_test.go index 3ab79f61e295..d81458f76e1f 100644 --- a/pkg/ottl/contexts/ottlmetric/metrics_test.go +++ b/pkg/ottl/contexts/ottlmetric/metrics_test.go @@ -224,7 +224,7 @@ func Test_ParseEnum(t *testing.T) { t.Run(tt.name, func(t *testing.T) { actual, err := parseEnum((*ottl.EnumSymbol)(ottltest.Strp(tt.name))) assert.NoError(t, err) - assert.Equal(t, *actual, tt.want) + assert.Equal(t, tt.want, *actual) }) } } diff --git a/pkg/ottl/contexts/ottlspan/span_test.go b/pkg/ottl/contexts/ottlspan/span_test.go index 041d64f80a36..05782cb543ba 100644 --- a/pkg/ottl/contexts/ottlspan/span_test.go +++ b/pkg/ottl/contexts/ottlspan/span_test.go @@ -832,7 +832,7 @@ func Test_ParseEnum(t *testing.T) { t.Run(tt.name, func(t *testing.T) { actual, err := parseEnum((*ottl.EnumSymbol)(ottltest.Strp(tt.name))) assert.NoError(t, err) - assert.Equal(t, *actual, tt.want) + assert.Equal(t, tt.want, *actual) }) } } diff --git a/pkg/ottl/contexts/ottlspanevent/span_events_test.go b/pkg/ottl/contexts/ottlspanevent/span_events_test.go index 57e87ced6636..15b53aed0ad1 100644 --- a/pkg/ottl/contexts/ottlspanevent/span_events_test.go +++ b/pkg/ottl/contexts/ottlspanevent/span_events_test.go @@ -571,7 +571,7 @@ func Test_ParseEnum(t *testing.T) { t.Run(tt.name, func(t *testing.T) { actual, err := parseEnum((*ottl.EnumSymbol)(ottltest.Strp(tt.name))) assert.NoError(t, err) - assert.Equal(t, *actual, tt.want) + assert.Equal(t, tt.want, *actual) }) } } diff --git a/pkg/ottl/e2e/e2e_test.go b/pkg/ottl/e2e/e2e_test.go index 97eda4f67e8c..878ed6384507 100644 --- a/pkg/ottl/e2e/e2e_test.go +++ b/pkg/ottl/e2e/e2e_test.go @@ -311,6 +311,12 @@ func Test_e2e_converters(t *testing.T) { tCtx.GetLogRecord().Attributes().PutStr("test", "pass") }, }, + { + statement: `set(attributes["test"], Decode("cGFzcw==", "base64"))`, + want: func(tCtx ottllog.TransformContext) { + tCtx.GetLogRecord().Attributes().PutStr("test", "pass") + }, + }, { statement: `set(attributes["test"], Concat(["A","B"], ":"))`, want: func(tCtx ottllog.TransformContext) { @@ -643,6 +649,62 @@ func Test_e2e_converters(t *testing.T) { tCtx.GetLogRecord().Attributes().PutStr("test", "5b722b307fce6c944905d132691d5e4a2214b7fe92b738920eb3fce3a90420a19511c3010a0e7712b054daef5b57bad59ecbd93b3280f210578f547f4aed4d25") }, }, + { + statement: `set(attributes["test"], Sort(Split(attributes["flags"], "|"), "desc"))`, + want: func(tCtx ottllog.TransformContext) { + s := tCtx.GetLogRecord().Attributes().PutEmptySlice("test") + s.AppendEmpty().SetStr("C") + s.AppendEmpty().SetStr("B") + s.AppendEmpty().SetStr("A") + }, + }, + { + statement: `set(attributes["test"], Sort([true, false, false]))`, + want: func(tCtx ottllog.TransformContext) { + s := tCtx.GetLogRecord().Attributes().PutEmptySlice("test") + s.AppendEmpty().SetBool(false) + s.AppendEmpty().SetBool(false) + s.AppendEmpty().SetBool(true) + }, + }, + { + statement: `set(attributes["test"], Sort([3, 6, 9], "desc"))`, + want: func(tCtx ottllog.TransformContext) { + s := tCtx.GetLogRecord().Attributes().PutEmptySlice("test") + s.AppendEmpty().SetInt(9) + s.AppendEmpty().SetInt(6) + s.AppendEmpty().SetInt(3) + }, + }, + { + statement: `set(attributes["test"], Sort([Double(1.5), Double(10.2), Double(2.3), Double(0.5)]))`, + want: func(tCtx ottllog.TransformContext) { + s := tCtx.GetLogRecord().Attributes().PutEmptySlice("test") + s.AppendEmpty().SetDouble(0.5) + s.AppendEmpty().SetDouble(1.5) + s.AppendEmpty().SetDouble(2.3) + s.AppendEmpty().SetDouble(10.2) + }, + }, + { + statement: `set(attributes["test"], Sort([Int(11), Double(2.2), Double(-1)]))`, + want: func(tCtx ottllog.TransformContext) { + s := tCtx.GetLogRecord().Attributes().PutEmptySlice("test") + s.AppendEmpty().SetDouble(-1) + s.AppendEmpty().SetDouble(2.2) + s.AppendEmpty().SetInt(11) + }, + }, + { + statement: `set(attributes["test"], Sort([false, Int(11), Double(2.2), "three"]))`, + want: func(tCtx ottllog.TransformContext) { + s := tCtx.GetLogRecord().Attributes().PutEmptySlice("test") + s.AppendEmpty().SetInt(11) + s.AppendEmpty().SetDouble(2.2) + s.AppendEmpty().SetBool(false) + s.AppendEmpty().SetStr("three") + }, + }, { statement: `set(span_id, SpanID(0x0000000000000000))`, want: func(tCtx ottllog.TransformContext) { diff --git a/pkg/ottl/go.mod b/pkg/ottl/go.mod index 74ac2cdc7171..583a660f1d76 100644 --- a/pkg/ottl/go.mod +++ b/pkg/ottl/go.mod @@ -21,6 +21,7 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/net v0.28.0 + golang.org/x/text v0.17.0 ) require ( @@ -50,7 +51,6 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/grpc v1.66.0 // indirect google.golang.org/protobuf v1.34.2 // indirect diff --git a/pkg/ottl/ottlfuncs/README.md b/pkg/ottl/ottlfuncs/README.md index 129104fe65fb..7c21b4cd6dcc 100644 --- a/pkg/ottl/ottlfuncs/README.md +++ b/pkg/ottl/ottlfuncs/README.md @@ -410,6 +410,7 @@ Unlike functions, they do not modify any input telemetry and always return a val Available Converters: - [Base64Decode](#base64decode) +- [Decode](#decode) - [Concat](#concat) - [ConvertCase](#convertcase) - [Day](#day) @@ -449,6 +450,7 @@ Available Converters: - [SHA1](#sha1) - [SHA256](#sha256) - [SHA512](#sha512) +- [Sort](#sort) - [SpanID](#spanid) - [Split](#split) - [String](#string) @@ -465,7 +467,9 @@ Available Converters: - [UUID](#UUID) - [Year](#year) -### Base64Decode +### Base64Decode (Deprecated) + +*This function has been deprecated. Please use the [Decode](#decode) function instead.* `Base64Decode(value)` @@ -480,6 +484,22 @@ Examples: - `Base64Decode(attributes["encoded field"])` +### Decode + +`Decode(value, encoding)` + +The `Decode` Converter takes a string or byte array encoded with the specified encoding and returns the decoded string. + +`value` is a valid encoded string or byte array. +`encoding` is a valid encoding name included in the [IANA encoding index](https://www.iana.org/assignments/character-sets/character-sets.xhtml). + +Examples: + +- `Decode("aGVsbG8gd29ybGQ=", "base64")` + + +- `Decode(attributes["encoded field"], "us-ascii")` + ### Concat `Concat(values[], delimiter)` @@ -1318,7 +1338,6 @@ Examples: - `SHA256(attributes["device.name"])` - - `SHA256("name")` ### SHA512 @@ -1338,6 +1357,34 @@ Examples: - `SHA512("name")` +### Sort + +`Sort(target, Optional[order])` + +The `Sort` Converter sorts the `target` array in either ascending or descending order. + +`target` is an array or `pcommon.Slice` typed field containing the elements to be sorted. + +`order` is a string specifying the sort order. Must be either `asc` or `desc`. The default value is `asc`. + +The Sort Converter preserves the data type of the original elements while sorting. +The behavior varies based on the types of elements in the target slice: + +| Element Types | Sorting Behavior | Return Value | +|---------------|-------------------------------------|--------------| +| Integers | Sorts as integers | Sorted array of integers | +| Doubles | Sorts as doubles | Sorted array of doubles | +| Integers and doubles | Converts all to doubles, then sorts | Sorted array of integers and doubles | +| Strings | Sorts as strings | Sorted array of strings | +| Booleans | Converts all to strings, then sorts | Sorted array of booleans | +| Mix of integers, doubles, booleans, and strings | Converts all to strings, then sorts | Sorted array of mixed types | +| Any other types | N/A | Returns an error | + +Examples: + +- `Sort(attributes["device.tags"])` +- `Sort(attributes["device.tags"], "desc")` + ### SpanID `SpanID(bytes)` diff --git a/pkg/ottl/ottlfuncs/func_decode.go b/pkg/ottl/ottlfuncs/func_decode.go new file mode 100644 index 000000000000..374bb897de79 --- /dev/null +++ b/pkg/ottl/ottlfuncs/func_decode.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "encoding/base64" + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/ianaindex" + "golang.org/x/text/encoding/unicode" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type DecodeArguments[K any] struct { + Target ottl.Getter[K] + Encoding string +} + +func NewDecodeFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Decode", &DecodeArguments[K]{}, createDecodeFunction[K]) +} + +func createDecodeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*DecodeArguments[K]) + if !ok { + return nil, fmt.Errorf("DecodeFactory args must be of type *DecodeArguments[K]") + } + + return Decode(args.Target, args.Encoding) +} + +func Decode[K any](target ottl.Getter[K], encoding string) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + var stringValue string + + switch v := val.(type) { + case []byte: + stringValue = string(v) + case *string: + stringValue = *v + case string: + stringValue = v + case pcommon.ByteSlice: + stringValue = string(v.AsRaw()) + case *pcommon.ByteSlice: + stringValue = string(v.AsRaw()) + case pcommon.Value: + stringValue = v.AsString() + case *pcommon.Value: + stringValue = v.AsString() + default: + return nil, fmt.Errorf("unsupported type provided to Decode function: %T", v) + } + + switch encoding { + case "base64": + // base64 is not in IANA index, so we have to deal with this encoding separately + decodedBytes, err := base64.StdEncoding.DecodeString(stringValue) + if err != nil { + return nil, fmt.Errorf("could not decode: %w", err) + } + return string(decodedBytes), nil + default: + e, err := getEncoding(encoding) + if err != nil { + return nil, err + } + + decodedString, err := e.NewDecoder().String(stringValue) + if err != nil { + return nil, fmt.Errorf("could not decode: %w", err) + } + + return decodedString, nil + } + }, nil +} + +func getEncoding(encoding string) (encoding.Encoding, error) { + if e, ok := encodingOverrides[strings.ToLower(encoding)]; ok { + return e, nil + } + e, err := ianaindex.IANA.Encoding(encoding) + if err != nil { + return nil, fmt.Errorf("could not get encoding for %s: %w", encoding, err) + } + if e == nil { + // for some encodings a nil error and a nil encoding is returned, so we need to double check + // if the encoding is actually set here + return nil, fmt.Errorf("no decoder available for encoding: %s", encoding) + } + return e, nil +} + +var encodingOverrides = map[string]encoding.Encoding{ + "": unicode.UTF8, + "nop": encoding.Nop, + "ascii": unicode.UTF8, + "us-ascii": unicode.UTF8, + "utf8": unicode.UTF8, + "utf-8": unicode.UTF8, + "utf16": unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), + "utf-16": unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), +} diff --git a/pkg/ottl/ottlfuncs/func_decode_test.go b/pkg/ottl/ottlfuncs/func_decode_test.go new file mode 100644 index 000000000000..e4ef6bea27fe --- /dev/null +++ b/pkg/ottl/ottlfuncs/func_decode_test.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +func TestDecode(t *testing.T) { + + testByteSlice := pcommon.NewByteSlice() + testByteSlice.FromRaw([]byte("test string")) + testByteSliceB64 := pcommon.NewByteSlice() + testByteSliceB64.FromRaw([]byte("aGVsbG8gd29ybGQ=")) + + testValue := pcommon.NewValueEmpty() + _ = testValue.FromRaw("test string") + testValueB64 := pcommon.NewValueEmpty() + _ = testValueB64.FromRaw("aGVsbG8gd29ybGQ=") + + type testCase struct { + name string + value any + encoding string + want any + expectedError string + } + tests := []testCase{ + { + name: "convert base64 byte array", + value: []byte("dGVzdAo="), + encoding: "base64", + want: "test\n", + }, + { + name: "convert base64 string", + value: "aGVsbG8gd29ybGQ=", + encoding: "base64", + want: "hello world", + }, + { + name: "convert base64 ByteSlice", + value: testByteSliceB64, + encoding: "base64", + want: "hello world", + }, + { + name: "convert base64 Value", + value: testValueB64, + encoding: "base64", + want: "hello world", + }, + { + name: "convert base64 ByteSlice pointer", + value: &testByteSliceB64, + encoding: "base64", + want: "hello world", + }, + { + name: "convert base64 Value pointer", + value: &testValueB64, + encoding: "base64", + want: "hello world", + }, + { + name: "decode us-ascii encoded string", + value: "test string", + encoding: "us-ascii", + want: "test string", + }, + { + name: "decode us-ascii encoded byte array", + value: []byte("test string"), + encoding: "us-ascii", + want: "test string", + }, + { + name: "decode us-ascii encoded byte slice", + value: testByteSlice, + encoding: "us-ascii", + want: "test string", + }, + { + name: "decode us-ascii encoded Value", + value: testValue, + encoding: "us-ascii", + want: "test string", + }, + { + name: "decode us-ascii encoded byte slice pointer", + value: &testByteSlice, + encoding: "us-ascii", + want: "test string", + }, + { + name: "decode us-ascii encoded Value pointer", + value: &testValue, + encoding: "us-ascii", + want: "test string", + }, + { + name: "decode ISO-8859-1 encoded string", + value: "test string", + encoding: "ISO-8859-1", + want: "test string", + }, + { + name: "decode WINDOWS-1251 encoded string", + value: "test string", + encoding: "WINDOWS-1251", + want: "test string", + }, + { + name: "decode WINDOWS-1252 encoded string", + value: "test string", + encoding: "WINDOWS-1252", + want: "test string", + }, + { + name: "decode UTF-8 encoded string", + value: "test string", + encoding: "UTF-8", + want: "test string", + }, + { + name: "decode UTF-16 encoded string 1", + value: []byte{116, 0, 101, 0, 115, 0, 116, 0, 32, 0, 115, 0, 116, 0, 114, 0, 105, 0, 110, 0, 103, 0}, + encoding: "UTF-16", + want: "test string", + }, + { + name: "decode UTF-16 encoded string 2", + value: []byte{116, 0, 101, 0, 115, 0, 116, 0, 32, 0, 115, 0, 116, 0, 114, 0, 105, 0, 110, 0, 103, 0}, + encoding: "UTF16", + want: "test string", + }, + { + name: "decode GB2312 encoded string; no decoder available", + value: "test string", + encoding: "GB2312", + want: nil, + expectedError: "no decoder available for encoding: GB2312", + }, + { + name: "non-string", + value: 10, + encoding: "base64", + expectedError: "unsupported type provided to Decode function: int", + }, + { + name: "nil", + value: nil, + encoding: "base64", + expectedError: "unsupported type provided to Decode function: ", + }, + { + name: "not-base64-string", + value: "!@#$%^&*()_+", + encoding: "base64", + expectedError: "illegal base64 data at input byte", + }, + { + name: "missing-base64-padding", + value: "cmVtb3ZlZCBwYWRkaW5nCg", + encoding: "base64", + expectedError: "illegal base64 data at input byte", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expressionFunc, err := createDecodeFunction[any](ottl.FunctionContext{}, &DecodeArguments[any]{ + Target: &ottl.StandardGetSetter[any]{ + Getter: func(context.Context, any) (any, error) { + return tt.value, nil + }, + }, + Encoding: tt.encoding, + }) + + require.NoError(t, err) + + result, err := expressionFunc(nil, nil) + if tt.expectedError != "" { + require.ErrorContains(t, err, tt.expectedError) + return + } + + require.NoError(t, err) + require.Equal(t, tt.want, result) + }) + } +} diff --git a/pkg/ottl/ottlfuncs/func_sort.go b/pkg/ottl/ottlfuncs/func_sort.go new file mode 100644 index 000000000000..4c9f56c820ce --- /dev/null +++ b/pkg/ottl/ottlfuncs/func_sort.go @@ -0,0 +1,253 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "cmp" + "context" + "fmt" + "slices" + "strconv" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +const ( + sortAsc = "asc" + sortDesc = "desc" +) + +type SortArguments[K any] struct { + Target ottl.Getter[K] + Order ottl.Optional[string] +} + +func NewSortFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Sort", &SortArguments[K]{}, createSortFunction[K]) +} + +func createSortFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*SortArguments[K]) + + if !ok { + return nil, fmt.Errorf("SortFactory args must be of type *SortArguments[K]") + } + + order := sortAsc + if !args.Order.IsEmpty() { + o := args.Order.Get() + switch o { + case sortAsc, sortDesc: + order = o + default: + return nil, fmt.Errorf("invalid arguments: %s. Order should be either \"%s\" or \"%s\"", o, sortAsc, sortDesc) + } + } + + return sort(args.Target, order), nil +} + +func sort[K any](target ottl.Getter[K], order string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + switch v := val.(type) { + case pcommon.Slice: + return sortSlice(v, order) + case pcommon.Value: + if v.Type() == pcommon.ValueTypeSlice { + return sortSlice(v.Slice(), order) + } + return nil, fmt.Errorf("sort with unsupported type: '%s'. Target is not a list", v.Type().String()) + case []any: + // handle Sort([1,2,3]) + slice := pcommon.NewValueSlice().SetEmptySlice() + if err := slice.FromRaw(v); err != nil { + return nil, fmt.Errorf("sort with unsupported type: '%T'. Target is not a list of primitive types; %w", v, err) + } + return sortSlice(slice, order) + case []string: + dup := makeCopy(v) + return sortTypedSlice(dup, order), nil + case []int64: + dup := makeCopy(v) + return sortTypedSlice(dup, order), nil + case []float64: + dup := makeCopy(v) + return sortTypedSlice(dup, order), nil + case []bool: + var strings []string + for _, b := range v { + strings = append(strings, strconv.FormatBool(b)) + } + + sortTypedSlice(strings, order) + + bools := make([]bool, len(strings)) + for i, s := range strings { + boolValue, _ := strconv.ParseBool(s) + bools[i] = boolValue + } + return bools, nil + default: + return nil, fmt.Errorf("sort with unsupported type: '%T'. Target is not a list", v) + } + } +} + +// sortSlice sorts a pcommon.Slice based on the specified order. +// It gets the common type for all elements in the slice and converts all elements to this common type, creating a new copy +// Parameters: +// - slice: The pcommon.Slice to be sorted +// - order: The sort order. "asc" for ascending, "desc" for descending +// +// Returns: +// - A sorted slice as []any or the original pcommon.Slice +// - An error if an unsupported type is encountered +func sortSlice(slice pcommon.Slice, order string) (any, error) { + length := slice.Len() + if length == 0 { + return slice, nil + } + + commonType, ok := findCommonValueType(slice) + if !ok { + return slice, nil + } + + switch commonType { + case pcommon.ValueTypeInt: + arr := makeConvertedCopy(slice, func(idx int) int64 { + return slice.At(idx).Int() + }) + return sortConvertedSlice(arr, order), nil + case pcommon.ValueTypeDouble: + arr := makeConvertedCopy(slice, func(idx int) float64 { + s := slice.At(idx) + if s.Type() == pcommon.ValueTypeInt { + return float64(s.Int()) + } + + return s.Double() + }) + return sortConvertedSlice(arr, order), nil + case pcommon.ValueTypeStr: + arr := makeConvertedCopy(slice, func(idx int) string { + return slice.At(idx).AsString() + }) + return sortConvertedSlice(arr, order), nil + default: + return nil, fmt.Errorf("sort with unsupported type: '%T'", commonType) + } +} + +type targetType interface { + ~int64 | ~float64 | ~string +} + +// findCommonValueType determines the most appropriate common type for all elements in a pcommon.Slice. +// It returns two values: +// - A pcommon.ValueType representing the desired common type for all elements. +// Mixed Numeric types return ValueTypeDouble. Integer type returns ValueTypeInt. Double type returns ValueTypeDouble. +// String, Bool, Empty and mixed of the mentioned types return ValueTypeStr, as they require string conversion for comparison. +// - A boolean indicating whether a common type could be determined (true) or not (false). +// returns false for ValueTypeMap, ValueTypeSlice and ValueTypeBytes. They are unsupported types for sort. +func findCommonValueType(slice pcommon.Slice) (pcommon.ValueType, bool) { + length := slice.Len() + if length == 0 { + return pcommon.ValueTypeEmpty, false + } + + wantType := slice.At(0).Type() + wantStr := false + wantDouble := false + + for i := 0; i < length; i++ { + value := slice.At(i) + currType := value.Type() + + switch currType { + case pcommon.ValueTypeInt: + if wantType == pcommon.ValueTypeDouble { + wantDouble = true + } + case pcommon.ValueTypeDouble: + if wantType == pcommon.ValueTypeInt { + wantDouble = true + } + case pcommon.ValueTypeStr, pcommon.ValueTypeBool, pcommon.ValueTypeEmpty: + wantStr = true + default: + return pcommon.ValueTypeEmpty, false + } + } + + if wantStr { + wantType = pcommon.ValueTypeStr + } else if wantDouble { + wantType = pcommon.ValueTypeDouble + } + + return wantType, true +} + +func makeCopy[T targetType](src []T) []T { + dup := make([]T, len(src)) + copy(dup, src) + return dup +} + +func sortTypedSlice[T targetType](arr []T, order string) []T { + if len(arr) == 0 { + return arr + } + + slices.SortFunc(arr, func(a, b T) int { + if order == sortDesc { + return cmp.Compare(b, a) + } + return cmp.Compare(a, b) + }) + + return arr +} + +type convertedValue[T targetType] struct { + value T + originalValue any +} + +func makeConvertedCopy[T targetType](slice pcommon.Slice, converter func(idx int) T) []convertedValue[T] { + length := slice.Len() + var out []convertedValue[T] + for i := 0; i < length; i++ { + cv := convertedValue[T]{ + value: converter(i), + originalValue: slice.At(i).AsRaw(), + } + out = append(out, cv) + } + return out +} + +func sortConvertedSlice[T targetType](cvs []convertedValue[T], order string) []any { + slices.SortFunc(cvs, func(a, b convertedValue[T]) int { + if order == sortDesc { + return cmp.Compare(b.value, a.value) + } + return cmp.Compare(a.value, b.value) + }) + + var out []any + for _, cv := range cvs { + out = append(out, cv.originalValue) + } + + return out +} diff --git a/pkg/ottl/ottlfuncs/func_sort_test.go b/pkg/ottl/ottlfuncs/func_sort_test.go new file mode 100644 index 000000000000..48dede0a2fa9 --- /dev/null +++ b/pkg/ottl/ottlfuncs/func_sort_test.go @@ -0,0 +1,280 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +func Test_Sort(t *testing.T) { + + pMap := pcommon.NewValueMap().SetEmptyMap() + pMap.PutStr("k", "v") + emptySlice := pcommon.NewValueSlice().SetEmptySlice() + + tests := []struct { + name string + getter ottl.Getter[any] + order string + expected any + err bool + }{ + { + name: "int slice", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{9, 6, 3}) + return s, nil + }, + }, + order: sortAsc, + expected: []any{int64(3), int64(6), int64(9)}, + }, + { + name: "int slice desc", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{3, 6, 9}) + return s, nil + }, + }, + order: sortDesc, + expected: []any{int64(9), int64(6), int64(3)}, + }, + { + name: "string slice", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{"i", "am", "awesome", "slice"}) + return s, nil + }, + }, + order: sortAsc, + expected: []any{"am", "awesome", "i", "slice"}, + }, + { + name: "double slice", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{1.5, 10.2, 2.3, 0.5}) + return s, nil + }, + }, + order: sortAsc, + expected: []any{0.5, 1.5, 2.3, 10.2}, + }, + { + name: "empty slice", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + return s, nil + }, + }, + order: sortAsc, + expected: emptySlice, + }, + { + name: "bool slice compares as string", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{true, false, true, false}) + return s, nil + }, + }, + order: sortAsc, + expected: []any{false, false, true, true}, + }, + { + name: "mixed types slice compares as string", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{1, "two", 3.33, false}) + return s, nil + }, + }, + order: sortAsc, + expected: []any{int64(1), 3.33, false, "two"}, + }, + { + name: "double and string slice compares as string", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{1.5, "10.2", 2.3, 0.5}) + return s, nil + }, + }, + order: sortAsc, + expected: []any{0.5, 1.5, "10.2", 2.3}, + }, + { + name: "mixed numeric types slice compares as double", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{0, 2, 3.33, 0}) + return s, nil + }, + }, + order: sortAsc, + expected: []any{int64(0), int64(0), int64(2), 3.33}, + }, + { + name: "mixed numeric types slice compares as double desc", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + s := pcommon.NewValueSlice().SetEmptySlice() + _ = s.FromRaw([]any{3.14, 2, 3.33, 0}) + return s, nil + }, + }, + order: sortDesc, + expected: []any{3.33, 3.14, int64(2), int64(0)}, + }, + { + name: "[]any compares as string", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return []any{1, "two", 3.33, false}, nil + }, + }, + order: sortAsc, + expected: []any{int64(1), 3.33, false, "two"}, + }, + { + name: "[]string", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return []string{"A", "a", "aa"}, nil + }, + }, + order: sortAsc, + expected: []string{"A", "a", "aa"}, + }, + { + name: "[]bool compares as string", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return []bool{true, false}, nil + }, + }, + order: sortAsc, + expected: []bool{false, true}, + }, + { + name: "[]int64", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return []int64{6, 3, 9}, nil + }, + }, + order: sortAsc, + expected: []int64{3, 6, 9}, + }, + { + name: "[]float64", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return []float64{1.5, 10.2, 2.3, 0.5}, nil + }, + }, + order: sortAsc, + expected: []float64{0.5, 1.5, 2.3, 10.2}, + }, + { + name: "pcommon.Value is a slice", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + pv := pcommon.NewValueEmpty() + s := pv.SetEmptySlice() + _ = s.FromRaw([]any{"a", "slice", "a"}) + return pv, nil + }, + }, + order: sortAsc, + expected: []any{"a", "a", "slice"}, + }, + { + name: "pcommon.Value is empty", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + pv := pcommon.NewValueEmpty() + return pv, nil + }, + }, + order: sortAsc, + expected: nil, + err: true, + }, + { + name: "unsupported ValueTypeMap", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return pMap, nil + }, + }, + order: sortAsc, + expected: nil, + err: true, + }, + { + name: "unsupported bytes", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return []byte("still fine"), nil + }, + }, + order: sortAsc, + expected: nil, + err: true, + }, + { + name: "unsupported string", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return "no change", nil + }, + }, + order: sortAsc, + expected: nil, + err: true, + }, + { + name: "[]any with a map", + getter: ottl.StandardGetSetter[any]{ + Getter: func(_ context.Context, _ any) (any, error) { + return []any{map[string]string{"some": "invalid kv"}}, nil + }, + }, + order: sortAsc, + expected: nil, + err: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + exprFunc := sort(tt.getter, tt.order) + result, err := exprFunc(nil, nil) + if tt.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/ottl/ottlfuncs/functions.go b/pkg/ottl/ottlfuncs/functions.go index 1f0dfd68644a..fa38402aec3e 100644 --- a/pkg/ottl/ottlfuncs/functions.go +++ b/pkg/ottl/ottlfuncs/functions.go @@ -37,6 +37,7 @@ func converters[K any]() []ottl.Factory[K] { return []ottl.Factory[K]{ // Converters NewBase64DecodeFactory[K](), + NewDecodeFactory[K](), NewConcatFactory[K](), NewConvertCaseFactory[K](), NewDayFactory[K](), @@ -73,6 +74,7 @@ func converters[K any]() []ottl.Factory[K] { NewSHA1Factory[K](), NewSHA256Factory[K](), NewSHA512Factory[K](), + NewSortFactory[K](), NewSpanIDFactory[K](), NewSplitFactory[K](), NewFormatFactory[K](), diff --git a/pkg/sampling/encoding_test.go b/pkg/sampling/encoding_test.go index 2d4bbcd86fab..7a0fc3defc02 100644 --- a/pkg/sampling/encoding_test.go +++ b/pkg/sampling/encoding_test.go @@ -5,7 +5,6 @@ package sampling import ( "encoding/binary" - "errors" "fmt" "math/rand" "strconv" @@ -187,7 +186,7 @@ func TestRValueSyntax(t *testing.T) { rnd, err := RValueToRandomness(test.in) if test.expectErr != nil { - require.True(t, errors.Is(err, test.expectErr), + require.ErrorIs(t, err, test.expectErr, "%q: not expecting %v wanted %v", test.in, err, test.expectErr, ) require.Equal(t, must(RValueToRandomness("00000000000000")), rnd) @@ -241,7 +240,7 @@ func TestTValueSyntax(t *testing.T) { _, err := TValueToThreshold(test.in) if test.expectErr != nil { - require.True(t, errors.Is(err, test.expectErr), + require.ErrorIs(t, err, test.expectErr, "%q: not expecting %v wanted %v", test.in, err, test.expectErr, ) } else { diff --git a/pkg/sampling/oteltracestate_test.go b/pkg/sampling/oteltracestate_test.go index 4330c01466ab..6763e6d9e177 100644 --- a/pkg/sampling/oteltracestate_test.go +++ b/pkg/sampling/oteltracestate_test.go @@ -4,7 +4,6 @@ package sampling import ( - "errors" "fmt" "strconv" "strings" @@ -233,7 +232,7 @@ func TestParseOpenTelemetryTraceState(t *testing.T) { otts, err := NewOpenTelemetryTraceState(test.in) if test.expectErr != nil { - require.True(t, errors.Is(err, test.expectErr), "%q: not expecting %v wanted %v", test.in, err, test.expectErr) + require.ErrorIs(t, err, test.expectErr, "%q: not expecting %v wanted %v", test.in, err, test.expectErr) } else { require.NoError(t, err) } diff --git a/pkg/sampling/w3ctracestate_test.go b/pkg/sampling/w3ctracestate_test.go index 02eccf35c01b..b97e4b246a3d 100644 --- a/pkg/sampling/w3ctracestate_test.go +++ b/pkg/sampling/w3ctracestate_test.go @@ -4,7 +4,6 @@ package sampling import ( - "errors" "fmt" "strconv" "strings" @@ -116,7 +115,7 @@ func TestParseW3CTraceState(t *testing.T) { w3c, err := NewW3CTraceState(test.in) if test.expectErr != nil { - require.True(t, errors.Is(err, test.expectErr), + require.ErrorIs(t, err, test.expectErr, "%q: not expecting %v wanted %v", test.in, err, test.expectErr, ) } else { diff --git a/pkg/stanza/entry/nil_field_test.go b/pkg/stanza/entry/nil_field_test.go index 754af8d97d49..1193b9d3410f 100644 --- a/pkg/stanza/entry/nil_field_test.go +++ b/pkg/stanza/entry/nil_field_test.go @@ -22,7 +22,7 @@ func TestNilFieldSet(t *testing.T) { nilField := NewNilField() err := nilField.Set(entry, "value") require.NoError(t, err) - require.Equal(t, *entry, Entry{}) + require.Equal(t, Entry{}, *entry) } func TestNilFieldDelete(t *testing.T) { @@ -31,7 +31,7 @@ func TestNilFieldDelete(t *testing.T) { value, ok := nilField.Delete(entry) require.True(t, ok) require.Nil(t, value) - require.Equal(t, *entry, Entry{}) + require.Equal(t, Entry{}, *entry) } func TestNilFieldString(t *testing.T) { diff --git a/pkg/stanza/errors/error_test.go b/pkg/stanza/errors/error_test.go index b8b270d7e3c8..650303dbe874 100644 --- a/pkg/stanza/errors/error_test.go +++ b/pkg/stanza/errors/error_test.go @@ -18,26 +18,26 @@ func TestWithDetails(t *testing.T) { err := NewError("Test error", "") err2 := WithDetails(err, "foo", "bar") - require.Equal(t, err2.Details, ErrorDetails{"foo": "bar"}) + require.Equal(t, ErrorDetails{"foo": "bar"}, err2.Details) }) t.Run("AgentErrorWithExistingDetails", func(t *testing.T) { err := NewError("Test error", "", "foo1", "bar1") err2 := WithDetails(err, "foo2", "bar2") - require.Equal(t, err2.Details, ErrorDetails{"foo1": "bar1", "foo2": "bar2"}) + require.Equal(t, ErrorDetails{"foo1": "bar1", "foo2": "bar2"}, err2.Details) }) t.Run("StandardError", func(t *testing.T) { err := fmt.Errorf("Test error") err2 := WithDetails(err, "foo", "bar") - require.Equal(t, err2.Details, ErrorDetails{"foo": "bar"}) + require.Equal(t, ErrorDetails{"foo": "bar"}, err2.Details) }) t.Run("AgentMethod", func(t *testing.T) { err := NewError("Test error", "").WithDetails("foo", "bar") - require.Equal(t, err.Details, ErrorDetails{"foo": "bar"}) + require.Equal(t, ErrorDetails{"foo": "bar"}, err.Details) }) } diff --git a/pkg/stanza/fileconsumer/config_test.go b/pkg/stanza/fileconsumer/config_test.go index eeb43c67cd83..145c43491266 100644 --- a/pkg/stanza/fileconsumer/config_test.go +++ b/pkg/stanza/fileconsumer/config_test.go @@ -456,7 +456,7 @@ func TestBuild(t *testing.T) { func(_ *Config) {}, require.NoError, func(t *testing.T, m *Manager) { - require.Equal(t, m.pollInterval, 10*time.Millisecond) + require.Equal(t, 10*time.Millisecond, m.pollInterval) }, }, { @@ -665,7 +665,7 @@ func TestBuildWithSplitFunc(t *testing.T) { func(_ *Config) {}, require.NoError, func(t *testing.T, m *Manager) { - require.Equal(t, m.pollInterval, 10*time.Millisecond) + require.Equal(t, 10*time.Millisecond, m.pollInterval) }, }, { diff --git a/pkg/stanza/fileconsumer/internal/fileset/fileset_test.go b/pkg/stanza/fileconsumer/internal/fileset/fileset_test.go index 0d83ef8d5db6..0c1de5c8a8e6 100644 --- a/pkg/stanza/fileconsumer/internal/fileset/fileset_test.go +++ b/pkg/stanza/fileconsumer/internal/fileset/fileset_test.go @@ -36,7 +36,7 @@ func pop[T Matchable](expectedErr error, expectedElemet T) func(t *testing.T, fi el, err := fileset.Pop() if expectedErr == nil { require.NoError(t, err) - require.Equal(t, el, expectedElemet) + require.Equal(t, expectedElemet, el) require.Equal(t, pr-1, fileset.Len()) } else { require.ErrorIs(t, err, expectedErr) diff --git a/pkg/stanza/operator/helper/input_test.go b/pkg/stanza/operator/helper/input_test.go index bf077071e1f3..8d3e16a704c7 100644 --- a/pkg/stanza/operator/helper/input_test.go +++ b/pkg/stanza/operator/helper/input_test.go @@ -89,7 +89,7 @@ func TestInputOperatorProcess(t *testing.T) { ctx := context.Background() err := input.Process(ctx, entry) require.Error(t, err) - require.Equal(t, err.Error(), "Operator can not process logs.") + require.Equal(t, "Operator can not process logs.", err.Error()) } func TestInputOperatorNewEntry(t *testing.T) { diff --git a/pkg/stanza/operator/parser/container/parser_test.go b/pkg/stanza/operator/parser/container/parser_test.go index 2a204ae82ca7..6e7f410ef388 100644 --- a/pkg/stanza/operator/parser/container/parser_test.go +++ b/pkg/stanza/operator/parser/container/parser_test.go @@ -91,7 +91,7 @@ func TestInternalRecombineCfg(t *testing.T) { expected.CombineWith = "" expected.SourceIdentifier = entry.NewAttributeField("log.file.path") expected.MaxLogSize = 102400 - require.Equal(t, cfg, expected) + require.Equal(t, expected, cfg) } func TestProcess(t *testing.T) { diff --git a/pkg/stanza/operator/parser/regex/parser_test.go b/pkg/stanza/operator/parser/regex/parser_test.go index b6eb97079bc9..8a44342e69ec 100644 --- a/pkg/stanza/operator/parser/regex/parser_test.go +++ b/pkg/stanza/operator/parser/regex/parser_test.go @@ -70,7 +70,7 @@ func TestParserCache(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "type '[]int' cannot be parsed as regex") require.NotNil(t, parser.cache, "expected cache to be configured") - require.Equal(t, parser.cache.maxSize(), uint16(200)) + require.Equal(t, uint16(200), parser.cache.maxSize()) } func TestParserRegex(t *testing.T) { diff --git a/pkg/stanza/pipeline/config_test.go b/pkg/stanza/pipeline/config_test.go index 64f10b92e171..b2d916680e7a 100644 --- a/pkg/stanza/pipeline/config_test.go +++ b/pkg/stanza/pipeline/config_test.go @@ -243,7 +243,7 @@ func TestDeduplicateIDs(t *testing.T) { t.Run("Deduplicate/"+tc.name, func(t *testing.T) { ops := tc.ops() dedeplucateIDs(ops) - require.Equal(t, ops, tc.expectedOps) + require.Equal(t, tc.expectedOps, ops) }) } } diff --git a/pkg/translator/jaeger/jaegerproto_to_traces_test.go b/pkg/translator/jaeger/jaegerproto_to_traces_test.go index 56d3b2d95550..96eb84d64a54 100644 --- a/pkg/translator/jaeger/jaegerproto_to_traces_test.go +++ b/pkg/translator/jaeger/jaegerproto_to_traces_test.go @@ -363,8 +363,8 @@ func TestProtoBatchToInternalTracesWithTwoLibraries(t *testing.T) { actual, err := ProtoToTraces([]*model.Batch{jb}) assert.NoError(t, err) - assert.Equal(t, actual.ResourceSpans().Len(), 1) - assert.Equal(t, actual.ResourceSpans().At(0).ScopeSpans().Len(), 2) + assert.Equal(t, 1, actual.ResourceSpans().Len()) + assert.Equal(t, 2, actual.ResourceSpans().At(0).ScopeSpans().Len()) ils0 := actual.ResourceSpans().At(0).ScopeSpans().At(0) ils1 := actual.ResourceSpans().At(0).ScopeSpans().At(1) diff --git a/pkg/translator/prometheusremotewrite/helper_test.go b/pkg/translator/prometheusremotewrite/helper_test.go index 6c1942af0282..8894c0f7a27d 100644 --- a/pkg/translator/prometheusremotewrite/helper_test.go +++ b/pkg/translator/prometheusremotewrite/helper_test.go @@ -1050,7 +1050,7 @@ func TestCreateLabels(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { lbls := createLabels(tc.metricName, tc.baseLabels, tc.extras...) - assert.Equal(t, lbls, tc.expected) + assert.Equal(t, tc.expected, lbls) }) } } diff --git a/pkg/translator/signalfx/to_metrics_test.go b/pkg/translator/signalfx/to_metrics_test.go index 8c644e1b293a..f6f61cfb332d 100644 --- a/pkg/translator/signalfx/to_metrics_test.go +++ b/pkg/translator/signalfx/to_metrics_test.go @@ -19,7 +19,7 @@ import ( func TestNumMetricTypes(t *testing.T) { // Assert that all values for the metric types are less than numMetricTypes. - assert.Equal(t, len(sfxpb.MetricType_value), numMetricTypes) + assert.Len(t, sfxpb.MetricType_value, numMetricTypes) for _, v := range sfxpb.MetricType_value { assert.Less(t, v, int32(numMetricTypes)) } diff --git a/processor/attributesprocessor/factory_test.go b/processor/attributesprocessor/factory_test.go index 3c10a03bc88a..ea4e3dd3b756 100644 --- a/processor/attributesprocessor/factory_test.go +++ b/processor/attributesprocessor/factory_test.go @@ -26,7 +26,7 @@ func TestFactory_Type(t *testing.T) { func TestFactory_CreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{}) + assert.Equal(t, &Config{}, cfg) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/processor/cumulativetodeltaprocessor/factory_test.go b/processor/cumulativetodeltaprocessor/factory_test.go index bb3b4358edc5..6e25b2cc35ed 100644 --- a/processor/cumulativetodeltaprocessor/factory_test.go +++ b/processor/cumulativetodeltaprocessor/factory_test.go @@ -27,7 +27,7 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{}) + assert.Equal(t, &Config{}, cfg) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/processor/deltatocumulativeprocessor/internal/data/datatest/compare/compare.go b/processor/deltatocumulativeprocessor/internal/data/datatest/compare/compare.go index 91f58ff8b0f0..eb8c0f11174a 100644 --- a/processor/deltatocumulativeprocessor/internal/data/datatest/compare/compare.go +++ b/processor/deltatocumulativeprocessor/internal/data/datatest/compare/compare.go @@ -14,14 +14,14 @@ import ( var Opts = []cmp.Option{ cmpopts.EquateApprox(0, 1e-9), cmp.Exporter(func(ty reflect.Type) bool { - return strings.HasPrefix(ty.PkgPath(), "go.opentelemetry.io/collector/pdata") + return strings.HasPrefix(ty.PkgPath(), "go.opentelemetry.io/collector/pdata") || strings.HasPrefix(ty.PkgPath(), "github.com/open-telemetry/opentelemetry-collector-contrib") }), } -func Equal[T any](a, b T) bool { - return cmp.Equal(a, b, Opts...) +func Equal[T any](a, b T, opts ...cmp.Option) bool { + return cmp.Equal(a, b, append(Opts, opts...)...) } -func Diff[T any](a, b T) string { - return cmp.Diff(a, b, Opts...) +func Diff[T any](a, b T, opts ...cmp.Option) string { + return cmp.Diff(a, b, append(Opts, opts...)...) } diff --git a/processor/deltatocumulativeprocessor/internal/metrics/iter.go b/processor/deltatocumulativeprocessor/internal/metrics/iter.go new file mode 100644 index 000000000000..9902d22a2eec --- /dev/null +++ b/processor/deltatocumulativeprocessor/internal/metrics/iter.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice" +) + +func All(md pmetric.Metrics) func(func(Metric) bool) { + return func(yield func(Metric) bool) { + var ok bool + pslice.All(md.ResourceMetrics())(func(rm pmetric.ResourceMetrics) bool { + pslice.All(rm.ScopeMetrics())(func(sm pmetric.ScopeMetrics) bool { + pslice.All(sm.Metrics())(func(m pmetric.Metric) bool { + ok = yield(From(rm.Resource(), sm.Scope(), m)) + return ok + }) + return ok + }) + return ok + }) + } +} + +func Filter(md pmetric.Metrics, keep func(Metric) bool) { + md.ResourceMetrics().RemoveIf(func(rm pmetric.ResourceMetrics) bool { + rm.ScopeMetrics().RemoveIf(func(sm pmetric.ScopeMetrics) bool { + sm.Metrics().RemoveIf(func(m pmetric.Metric) bool { + return !keep(From(rm.Resource(), sm.Scope(), m)) + }) + return sm.Metrics().Len() == 0 + }) + return rm.ScopeMetrics().Len() == 0 + }) +} diff --git a/processor/deltatocumulativeprocessor/internal/metrics/metrics.go b/processor/deltatocumulativeprocessor/internal/metrics/metrics.go index 6b705f5a7d24..50c802c70e1d 100644 --- a/processor/deltatocumulativeprocessor/internal/metrics/metrics.go +++ b/processor/deltatocumulativeprocessor/internal/metrics/metrics.go @@ -22,6 +22,14 @@ func (m *Metric) Ident() Ident { return identity.OfResourceMetric(m.res, m.scope, m.Metric) } +func (m *Metric) Resource() pcommon.Resource { + return m.res +} + +func (m *Metric) Scope() pcommon.InstrumentationScope { + return m.scope +} + func From(res pcommon.Resource, scope pcommon.InstrumentationScope, metric pmetric.Metric) Metric { return Metric{res: res, scope: scope, Metric: metric} } diff --git a/processor/deltatocumulativeprocessor/internal/metrics/util.go b/processor/deltatocumulativeprocessor/internal/metrics/util.go deleted file mode 100644 index 985716b3cc0f..000000000000 --- a/processor/deltatocumulativeprocessor/internal/metrics/util.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" - -import "go.opentelemetry.io/collector/pdata/pmetric" - -func Filter(metrics pmetric.Metrics, fn func(m Metric) bool) { - metrics.ResourceMetrics().RemoveIf(func(rm pmetric.ResourceMetrics) bool { - rm.ScopeMetrics().RemoveIf(func(sm pmetric.ScopeMetrics) bool { - sm.Metrics().RemoveIf(func(m pmetric.Metric) bool { - return !fn(From(rm.Resource(), sm.Scope(), m)) - }) - return false - }) - return false - }) -} - -func Each(metrics pmetric.Metrics, fn func(m Metric)) { - Filter(metrics, func(m Metric) bool { - fn(m) - return true - }) -} diff --git a/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go b/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go index 5a0c2b64d863..6cc97af04132 100644 --- a/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go +++ b/processor/deltatocumulativeprocessor/internal/putil/pslice/pslice.go @@ -19,3 +19,13 @@ func Equal[E comparable, S Slice[E]](a, b S) bool { } return true } + +func All[E any, S Slice[E]](slice S) func(func(E) bool) { + return func(yield func(E) bool) { + for i := 0; i < slice.Len(); i++ { + if !yield(slice.At(i)) { + break + } + } + } +} diff --git a/processor/deltatocumulativeprocessor/internal/streams/data.go b/processor/deltatocumulativeprocessor/internal/streams/data.go index 0c54be543c45..532b4b8289e1 100644 --- a/processor/deltatocumulativeprocessor/internal/streams/data.go +++ b/processor/deltatocumulativeprocessor/internal/streams/data.go @@ -9,21 +9,16 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/putil/pslice" ) -// Samples returns an Iterator over each sample of all streams in the metric -func Samples[D data.Point[D]](m metrics.Data[D]) Seq[D] { - mid := m.Ident() - - return func(yield func(Ident, D) bool) bool { - for i := 0; i < m.Len(); i++ { - dp := m.At(i) +func Datapoints[P data.Point[P], List metrics.Data[P]](dps List) func(func(identity.Stream, P) bool) { + return func(yield func(identity.Stream, P) bool) { + mid := dps.Ident() + pslice.All(dps)(func(dp P) bool { id := identity.OfStream(mid, dp) - if !yield(id, dp) { - break - } - } - return false + return yield(id, dp) + }) } } diff --git a/processor/deltatocumulativeprocessor/internal/streams/data_test.go b/processor/deltatocumulativeprocessor/internal/streams/data_test.go index f8180713f86f..76ae72ee1ec5 100644 --- a/processor/deltatocumulativeprocessor/internal/streams/data_test.go +++ b/processor/deltatocumulativeprocessor/internal/streams/data_test.go @@ -24,7 +24,7 @@ func BenchmarkSamples(b *testing.B) { dps := generate(b.N) b.ResetTimer() - streams.Samples(dps)(func(id streams.Ident, dp data.Number) bool { + streams.Datapoints(dps)(func(id streams.Ident, dp data.Number) bool { rdp = dp rid = id return true diff --git a/processor/deltatocumulativeprocessor/internal/streams/errors.go b/processor/deltatocumulativeprocessor/internal/streams/errors.go index e69827a6212c..c0638e091502 100644 --- a/processor/deltatocumulativeprocessor/internal/streams/errors.go +++ b/processor/deltatocumulativeprocessor/internal/streams/errors.go @@ -19,3 +19,7 @@ type StreamErr struct { func (e StreamErr) Error() string { return fmt.Sprintf("%s: %s", e.Ident, e.Err) } + +func (e StreamErr) Unwrap() error { + return e.Err +} diff --git a/processor/deltatocumulativeprocessor/internal/telemetry/faults_test.go b/processor/deltatocumulativeprocessor/internal/telemetry/faults_test.go index 6e9540f829c8..f159ba11dc83 100644 --- a/processor/deltatocumulativeprocessor/internal/telemetry/faults_test.go +++ b/processor/deltatocumulativeprocessor/internal/telemetry/faults_test.go @@ -27,9 +27,13 @@ func TestFaults(t *testing.T) { type Case struct { Name string Map Map - Pre func(Map, identity.Stream, data.Number) error - Bad func(Map, identity.Stream, data.Number) error - Err error + // data preparation, etc + Pre func(Map, identity.Stream, data.Number) error + // cause an error + Bad func(Map, identity.Stream, data.Number) error + // expected error that was caused + Err error + // expected return above error was converted into Want error } @@ -49,7 +53,8 @@ func TestFaults(t *testing.T) { dp.SetTimestamp(ts(40)) return dps.Store(id, dp) }, - Err: delta.ErrOlderStart{Start: ts(20), Sample: ts(10)}, + Err: delta.ErrOlderStart{Start: ts(20), Sample: ts(10)}, + Want: streams.Drop, }, { Name: "out-of-order", @@ -61,7 +66,8 @@ func TestFaults(t *testing.T) { dp.SetTimestamp(ts(10)) return dps.Store(id, dp) }, - Err: delta.ErrOutOfOrder{Last: ts(20), Sample: ts(10)}, + Err: delta.ErrOutOfOrder{Last: ts(20), Sample: ts(10)}, + Want: streams.Drop, }, { Name: "gap", @@ -75,7 +81,8 @@ func TestFaults(t *testing.T) { dp.SetTimestamp(ts(40)) return dps.Store(id, dp) }, - Err: delta.ErrGap{From: ts(20), To: ts(30)}, + Err: delta.ErrGap{From: ts(20), To: ts(30)}, + Want: nil, }, { Name: "limit", @@ -109,7 +116,8 @@ func TestFaults(t *testing.T) { dp.SetTimestamp(ts(20)) return dps.Store(id, dp) }, - Err: streams.ErrEvicted{Ident: evid, ErrLimit: streams.ErrLimit(1)}, + Err: streams.ErrEvicted{Ident: evid, ErrLimit: streams.ErrLimit(1)}, + Want: nil, }, } @@ -125,17 +133,17 @@ func TestFaults(t *testing.T) { if dps == nil { dps = delta.New[data.Number]() } - onf := telemetry.ObserveNonFatal(dps, &tel.Metrics) + var realErr error + dps = errGrab[data.Number]{Map: dps, err: &realErr} + dps = telemetry.ObserveNonFatal(dps, &tel.Metrics) if c.Pre != nil { - err := c.Pre(onf, id, dp.Clone()) + err := c.Pre(dps, id, dp.Clone()) require.NoError(t, err) } err := c.Bad(dps, id, dp.Clone()) - require.Equal(t, c.Err, err) - - err = c.Bad(onf, id, dp.Clone()) + require.Equal(t, c.Err, realErr) require.Equal(t, c.Want, err) }) } @@ -154,3 +162,14 @@ func (e HeadEvictor[T]) Evict() (evicted identity.Stream, ok bool) { }) return evicted, true } + +// errGrab stores any error that happens on Store() for later inspection +type errGrab[T any] struct { + streams.Map[T] + err *error +} + +func (e errGrab[T]) Store(id identity.Stream, dp T) error { + *e.err = e.Map.Store(id, dp) + return *e.err +} diff --git a/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go b/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go index cbf52c09ff94..8062fc8388a8 100644 --- a/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go +++ b/processor/deltatocumulativeprocessor/internal/telemetry/metrics.go @@ -119,8 +119,10 @@ func (f Faults[T]) Store(id streams.Ident, v T) error { return err case errors.As(err, &olderStart): inc(f.dps.dropped, reason("older-start")) + return streams.Drop case errors.As(err, &outOfOrder): inc(f.dps.dropped, reason("out-of-order")) + return streams.Drop case errors.As(err, &limit): inc(f.dps.dropped, reason("stream-limit")) // no space to store stream, drop it instead of failing silently diff --git a/processor/deltatocumulativeprocessor/processor.go b/processor/deltatocumulativeprocessor/processor.go index cc63f2c90e40..e0448b350c32 100644 --- a/processor/deltatocumulativeprocessor/processor.go +++ b/processor/deltatocumulativeprocessor/processor.go @@ -136,7 +136,8 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro defer p.mtx.Unlock() var errs error - metrics.Each(md, func(m metrics.Metric) { + metrics.Filter(md, func(m metrics.Metric) bool { + var n int switch m.Type() { case pmetric.MetricTypeSum: sum := m.Sum() @@ -145,6 +146,7 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro errs = errors.Join(errs, err) sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } + n = sum.DataPoints().Len() case pmetric.MetricTypeHistogram: hist := m.Histogram() if hist.AggregationTemporality() == pmetric.AggregationTemporalityDelta { @@ -152,6 +154,7 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro errs = errors.Join(errs, err) hist.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } + n = hist.DataPoints().Len() case pmetric.MetricTypeExponentialHistogram: expo := m.ExponentialHistogram() if expo.AggregationTemporality() == pmetric.AggregationTemporalityDelta { @@ -159,11 +162,16 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro errs = errors.Join(errs, err) expo.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) } + n = expo.DataPoints().Len() } + return n > 0 }) if errs != nil { return errs } + if md.MetricCount() == 0 { + return nil + } return p.next.ConsumeMetrics(ctx, md) } diff --git a/processor/deltatocumulativeprocessor/processor_test.go b/processor/deltatocumulativeprocessor/processor_test.go new file mode 100644 index 000000000000..9b95e615fea5 --- /dev/null +++ b/processor/deltatocumulativeprocessor/processor_test.go @@ -0,0 +1,284 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package deltatocumulativeprocessor_test + +import ( + "context" + "math" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processortest" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics/identity" + self "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/data/datatest/compare" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/metrics" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/streams" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor/internal/testdata/random" +) + +func setup(t *testing.T, cfg *self.Config) (processor.Metrics, *consumertest.MetricsSink) { + t.Helper() + + next := &consumertest.MetricsSink{} + if cfg == nil { + cfg = &self.Config{MaxStale: 0, MaxStreams: math.MaxInt} + } + + proc, err := self.NewFactory().CreateMetricsProcessor( + context.Background(), + processortest.NewNopSettings(), + cfg, + next, + ) + require.NoError(t, err) + + return proc, next +} + +// TestAccumulation verifies stream identification works correctly by writing +// 100 random dps spread across 10 different streams. +// Processor output is compared against a manual aggregation on a per-stream basis. +// +// Uses Sum datatype for testing, as we are not testing actual aggregation (see +// internal/data for tests), but proper stream separation +func TestAccumulation(t *testing.T) { + proc, sink := setup(t, nil) + + sum := random.Sum() + + // create 10 distinct streams + const N = 10 + sbs := make([]SumBuilder, N) + for i := range sbs { + _, base := sum.Stream() + sbs[i] = SumBuilder{Metric: sum, base: base} + } + + // init manual aggregation state + want := make(map[identity.Stream]data.Number) + for _, s := range sbs { + id := s.id(pmetric.AggregationTemporalityCumulative) + want[id] = s.point(0, 0, 0) + } + + for i := 0; i < 100; i++ { + s := sbs[rand.Intn(N)] + + v := int64(rand.Intn(255)) + ts := pcommon.Timestamp(i) + + // write to processor + in := s.delta(s.point(0, ts, v)) + rms := s.resourceMetrics(in) + err := proc.ConsumeMetrics(context.Background(), rms) + require.NoError(t, err) + + // aggregate manually + wantv := want[s.id(pmetric.AggregationTemporalityCumulative)] + wantv.SetIntValue(wantv.IntValue() + v) + wantv.SetTimestamp(ts) + } + + // get the final processor output for each stream + got := make(map[identity.Stream]data.Number) + for _, md := range sink.AllMetrics() { + metrics.All(md)(func(m metrics.Metric) bool { + sum := metrics.Sum(m) + streams.Datapoints(sum)(func(id identity.Stream, dp data.Number) bool { + got[id] = dp + return true + }) + return true + }) + } + + sort := cmpopts.SortMaps(func(a, b identity.Stream) bool { + return a.Hash().Sum64() < b.Hash().Sum64() + }) + if diff := compare.Diff(want, got, sort); diff != "" { + t.Fatal(diff) + } +} + +// TestTimestamp verifies timestamp handling, most notably: +// - Timestamp() keeps getting advanced +// - StartTimestamp() stays the same +func TestTimestamps(t *testing.T) { + proc, sink := setup(t, nil) + + sb := stream() + point := func(start, last pcommon.Timestamp) data.Number { + return sb.point(start, last, 0) + } + + cases := []struct { + in data.Number + out data.Number + drop bool + }{{ + // first: take as-is + in: point(1000, 1100), + out: point(1000, 1100), + }, { + // subsequent: take, but keep start-ts + in: point(1100, 1200), + out: point(1000, 1200), + }, { + // gap: take + in: point(1300, 1400), + out: point(1000, 1400), + }, { + // out of order + in: point(1200, 1300), + drop: true, + }, { + // older start + in: point(500, 550), + drop: true, + }} + + for i, cs := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + sink.Reset() + + in := sb.resourceMetrics(sb.delta(cs.in)) + want := make([]pmetric.Metrics, 0) + if !cs.drop { + want = []pmetric.Metrics{sb.resourceMetrics(sb.cumul(cs.out))} + } + + err := proc.ConsumeMetrics(context.Background(), in) + require.NoError(t, err) + + out := sink.AllMetrics() + if diff := compare.Diff(want, out); diff != "" { + t.Fatal(diff) + } + }) + } +} + +func TestStreamLimit(t *testing.T) { + proc, sink := setup(t, &self.Config{MaxStale: 5 * time.Minute, MaxStreams: 10}) + + good := make([]SumBuilder, 10) + for i := range good { + good[i] = stream() + } + bad := stream() + _ = bad + + diff := func(want, got []pmetric.Metrics) { + t.Helper() + if diff := compare.Diff(want, got); diff != "" { + t.Fatal(diff) + } + } + + writeGood := func(ts pcommon.Timestamp) { + for i, sb := range good { + in := sb.resourceMetrics(sb.delta(sb.point(0, ts+pcommon.Timestamp(i), 0))) + want := sb.resourceMetrics(sb.cumul(sb.point(0, ts+pcommon.Timestamp(i), 0))) + + err := proc.ConsumeMetrics(context.Background(), in) + require.NoError(t, err) + + diff([]pmetric.Metrics{want}, sink.AllMetrics()) + sink.Reset() + } + } + + // write up to limit must work + writeGood(0) + + // extra stream must be dropped, nothing written + in := bad.resourceMetrics(bad.delta(bad.point(0, 0, 0))) + err := proc.ConsumeMetrics(context.Background(), in) + require.NoError(t, err) + diff([]pmetric.Metrics{}, sink.AllMetrics()) + sink.Reset() + + // writing existing streams must still work + writeGood(100) +} + +type copyable interface { + CopyTo(pmetric.Metric) +} + +func (s SumBuilder) resourceMetrics(metrics ...copyable) pmetric.Metrics { + md := pmetric.NewMetrics() + + rm := md.ResourceMetrics().AppendEmpty() + s.Resource().CopyTo(rm.Resource()) + + sm := rm.ScopeMetrics().AppendEmpty() + s.Scope().CopyTo(sm.Scope()) + + for _, m := range metrics { + m.CopyTo(sm.Metrics().AppendEmpty()) + } + return md +} + +type SumBuilder struct { + random.Metric[data.Number] + base data.Number +} + +func (s SumBuilder) with(dps ...data.Number) pmetric.Metric { + m := pmetric.NewMetric() + s.Metric.CopyTo(m) + + for _, dp := range dps { + dp.NumberDataPoint.CopyTo(m.Sum().DataPoints().AppendEmpty()) + } + + return m +} + +func (s SumBuilder) delta(dps ...data.Number) pmetric.Metric { + m := s.with(dps...) + m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + return m +} + +func (s SumBuilder) cumul(dps ...data.Number) pmetric.Metric { + m := s.with(dps...) + m.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + return m +} + +func (s SumBuilder) id(temp pmetric.AggregationTemporality) identity.Stream { + m := s.with(s.base) + m.Sum().SetAggregationTemporality(temp) + + mid := identity.OfMetric(s.Ident().Scope(), m) + return identity.OfStream(mid, s.base) +} + +func (s SumBuilder) point(start, ts pcommon.Timestamp, value int64) data.Number { + dp := s.base.Clone() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(value) + return dp +} + +func stream() SumBuilder { + sum := random.Sum() + _, base := sum.Stream() + return SumBuilder{Metric: sum, base: base} +} diff --git a/processor/deltatorateprocessor/factory_test.go b/processor/deltatorateprocessor/factory_test.go index b566f8da17ee..073f14743467 100644 --- a/processor/deltatorateprocessor/factory_test.go +++ b/processor/deltatorateprocessor/factory_test.go @@ -27,7 +27,7 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{}) + assert.Equal(t, &Config{}, cfg) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/processor/filterprocessor/factory_test.go b/processor/filterprocessor/factory_test.go index b0772fbfba45..516cd7e42eaf 100644 --- a/processor/filterprocessor/factory_test.go +++ b/processor/filterprocessor/factory_test.go @@ -30,9 +30,9 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{ + assert.Equal(t, &Config{ ErrorMode: ottl.PropagateError, - }) + }, cfg) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/processor/geoipprocessor/geoip_processor.go b/processor/geoipprocessor/geoip_processor.go index e1716b4a3862..f358cfabcaf8 100644 --- a/processor/geoipprocessor/geoip_processor.go +++ b/processor/geoipprocessor/geoip_processor.go @@ -69,7 +69,7 @@ func ipFromAttributes(attributes []attribute.Key, resource pcommon.Map) (net.IP, // geoLocation fetches geolocation information for the given IP address using the configured providers. // It returns a set of attributes containing the geolocation data, or an error if the location could not be determined. func (g *geoIPProcessor) geoLocation(ctx context.Context, ip net.IP) (attribute.Set, error) { - allAttributes := attribute.EmptySet() + allAttributes := &attribute.Set{} for _, provider := range g.providers { geoAttributes, err := provider.Location(ctx, ip) if err != nil { diff --git a/processor/groupbyattrsprocessor/processor_test.go b/processor/groupbyattrsprocessor/processor_test.go index a6722ddb6f97..9f9fb1e09c87 100644 --- a/processor/groupbyattrsprocessor/processor_test.go +++ b/processor/groupbyattrsprocessor/processor_test.go @@ -366,7 +366,7 @@ func TestComplexAttributeGrouping(t *testing.T) { metrics := rm.ScopeMetrics().At(j).Metrics() for k := 0; k < metrics.Len(); k++ { metric := metrics.At(k) - assert.Equal(t, metric.Histogram().AggregationTemporality(), pmetric.AggregationTemporalityCumulative) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, metric.Histogram().AggregationTemporality()) for l := 0; l < metric.Histogram().DataPoints().Len(); l++ { assert.EqualValues(t, outputRecordAttrs, metric.Histogram().DataPoints().At(l).Attributes()) } @@ -1109,7 +1109,7 @@ func Test_GetMetricInInstrumentationLibrary(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, getMetricInInstrumentationLibrary(tt.ilm, tt.searched), tt.want) + require.Equal(t, tt.want, getMetricInInstrumentationLibrary(tt.ilm, tt.searched)) }) } } diff --git a/processor/groupbytraceprocessor/event_test.go b/processor/groupbytraceprocessor/event_test.go index b820508e470c..eb7fd5065745 100644 --- a/processor/groupbytraceprocessor/event_test.go +++ b/processor/groupbytraceprocessor/event_test.go @@ -541,5 +541,5 @@ func assertGaugeNotCreated(t *testing.T, name string, tt componentTestTelemetry) var md metricdata.ResourceMetrics require.NoError(t, tt.reader.Collect(context.Background(), &md)) got := tt.getMetric(name, md) - assert.Equal(t, got, metricdata.Metrics{}, "gauge exists already but shouldn't") + assert.Equal(t, metricdata.Metrics{}, got, "gauge exists already but shouldn't") } diff --git a/processor/groupbytraceprocessor/factory_test.go b/processor/groupbytraceprocessor/factory_test.go index 7ca4bb54c643..1680e8eb4d0b 100644 --- a/processor/groupbytraceprocessor/factory_test.go +++ b/processor/groupbytraceprocessor/factory_test.go @@ -60,7 +60,7 @@ func TestCreateTestProcessorWithNotImplementedOptions(t *testing.T) { p, err := f.CreateTracesProcessor(context.Background(), processortest.NewNopSettings(), tt.config, consumertest.NewNop()) // verify - assert.Error(t, tt.expectedErr, err) + assert.ErrorIs(t, tt.expectedErr, err) assert.Nil(t, p) } } diff --git a/processor/groupbytraceprocessor/processor_test.go b/processor/groupbytraceprocessor/processor_test.go index 1a9056eadf0c..77864d1bc4c4 100644 --- a/processor/groupbytraceprocessor/processor_test.go +++ b/processor/groupbytraceprocessor/processor_test.go @@ -253,7 +253,7 @@ func TestTraceErrorFromStorageWhileReleasing(t *testing.T) { err = p.markAsReleased(traceID, p.eventMachine.workers[workerIndexForTraceID(traceID, config.NumWorkers)].fire) // verify - assert.True(t, errors.Is(err, expectedError)) + assert.ErrorIs(t, err, expectedError) } func TestTraceErrorFromStorageWhileProcessingTrace(t *testing.T) { @@ -290,7 +290,7 @@ func TestTraceErrorFromStorageWhileProcessingTrace(t *testing.T) { err := p.onTraceReceived(tracesWithID{id: traceID, td: batch[0]}, p.eventMachine.workers[0]) // verify - assert.True(t, errors.Is(err, expectedError)) + assert.ErrorIs(t, err, expectedError) } func TestAddSpansToExistingTrace(t *testing.T) { @@ -385,7 +385,7 @@ func TestTraceErrorFromStorageWhileProcessingSecondTrace(t *testing.T) { ) // verify - assert.True(t, errors.Is(err, expectedError)) + assert.ErrorIs(t, err, expectedError) } func TestErrorFromStorageWhileRemovingTrace(t *testing.T) { @@ -412,7 +412,7 @@ func TestErrorFromStorageWhileRemovingTrace(t *testing.T) { err := p.onTraceRemoved(traceID) // verify - assert.True(t, errors.Is(err, expectedError)) + assert.ErrorIs(t, err, expectedError) } func TestTraceNotFoundWhileRemovingTrace(t *testing.T) { diff --git a/processor/k8sattributesprocessor/internal/kube/client_test.go b/processor/k8sattributesprocessor/internal/kube/client_test.go index 4fc4802aa35e..97b0cdc06b16 100644 --- a/processor/k8sattributesprocessor/internal/kube/client_test.go +++ b/processor/k8sattributesprocessor/internal/kube/client_test.go @@ -258,13 +258,13 @@ func TestReplicaSetHandler(t *testing.T) { c.handleReplicaSetAdd(replicaset) assert.Len(t, c.ReplicaSets, 1) got := c.ReplicaSets[string(replicaset.UID)] - assert.Equal(t, got.Name, "deployment-aaa") - assert.Equal(t, got.Namespace, "namespaceA") - assert.Equal(t, got.UID, "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") - assert.Equal(t, got.Deployment, Deployment{ + assert.Equal(t, "deployment-aaa", got.Name) + assert.Equal(t, "namespaceA", got.Namespace) + assert.Equal(t, "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", got.UID) + assert.Equal(t, Deployment{ Name: "deployment", UID: "ffffffff-gggg-hhhh-iiii-jjjjjjjjjjj", - }) + }, got.Deployment) // test update replicaset updatedReplicaset := replicaset @@ -272,13 +272,13 @@ func TestReplicaSetHandler(t *testing.T) { c.handleReplicaSetUpdate(replicaset, updatedReplicaset) assert.Len(t, c.ReplicaSets, 1) got = c.ReplicaSets[string(replicaset.UID)] - assert.Equal(t, got.Name, "deployment-aaa") - assert.Equal(t, got.Namespace, "namespaceA") - assert.Equal(t, got.UID, "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee") - assert.Equal(t, got.Deployment, Deployment{ + assert.Equal(t, "deployment-aaa", got.Name) + assert.Equal(t, "namespaceA", got.Namespace) + assert.Equal(t, "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", got.UID) + assert.Equal(t, Deployment{ Name: "deployment", UID: "ffffffff-gggg-hhhh-iiii-jjjjjjjjjjj", - }) + }, got.Deployment) // test delete replicaset c.handleReplicaSetDelete(updatedReplicaset) diff --git a/processor/metricsgenerationprocessor/factory_test.go b/processor/metricsgenerationprocessor/factory_test.go index 65b6e5a876e9..891920dbd192 100644 --- a/processor/metricsgenerationprocessor/factory_test.go +++ b/processor/metricsgenerationprocessor/factory_test.go @@ -27,7 +27,7 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{}) + assert.Equal(t, &Config{}, cfg) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/processor/metricstransformprocessor/factory_test.go b/processor/metricstransformprocessor/factory_test.go index fddfb0984bf1..1ae4f3116406 100644 --- a/processor/metricstransformprocessor/factory_test.go +++ b/processor/metricstransformprocessor/factory_test.go @@ -29,7 +29,7 @@ func TestType(t *testing.T) { func TestCreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{}) + assert.Equal(t, &Config{}, cfg) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/processor/resourcedetectionprocessor/config_test.go b/processor/resourcedetectionprocessor/config_test.go index 0140ebfc3914..499474e81d17 100644 --- a/processor/resourcedetectionprocessor/config_test.go +++ b/processor/resourcedetectionprocessor/config_test.go @@ -212,7 +212,7 @@ func TestGetConfigFromType(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { output := tt.inputDetectorConfig.GetConfigFromType(tt.detectorType) - assert.Equal(t, output, tt.expectedConfig) + assert.Equal(t, tt.expectedConfig, output) }) } } diff --git a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go index bf0385758595..1a8c29266151 100644 --- a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go +++ b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go @@ -353,7 +353,7 @@ func TestEC2Tags(t *testing.T) { return } assert.NoError(t, err) - assert.Equal(t, output, tt.expectedOutput) + assert.Equal(t, tt.expectedOutput, output) }) } } diff --git a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go index 67e49ca4f620..5817b6ad3e7a 100644 --- a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go +++ b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go @@ -281,7 +281,7 @@ func TestFilterAttributes_NoMatch(t *testing.T) { _, ok = attr.Get("host.id") assert.False(t, ok) - assert.EqualValues(t, droppedAttributes, []string{"host.name", "host.id"}) + assert.EqualValues(t, []string{"host.name", "host.id"}, droppedAttributes) } func TestFilterAttributes_NilAttributes(t *testing.T) { diff --git a/processor/routingprocessor/logs_test.go b/processor/routingprocessor/logs_test.go index ead511b87fac..e6572f2f43ee 100644 --- a/processor/routingprocessor/logs_test.go +++ b/processor/routingprocessor/logs_test.go @@ -365,8 +365,8 @@ func TestLogsAreCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { assert.Len(t, firstExp.AllLogs(), 1) assert.Len(t, secondExp.AllLogs(), 1) - assert.Equal(t, firstExp.AllLogs()[0].LogRecordCount(), 2) - assert.Equal(t, secondExp.AllLogs()[0].LogRecordCount(), 2) + assert.Equal(t, 2, firstExp.AllLogs()[0].LogRecordCount()) + assert.Equal(t, 2, secondExp.AllLogs()[0].LogRecordCount()) assert.Equal(t, firstExp.AllLogs(), secondExp.AllLogs()) }) @@ -396,7 +396,7 @@ func TestLogsAreCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { rspan := defaultExp.AllLogs()[0].ResourceLogs().At(0) attr, ok := rspan.Resource().Attributes().Get("X-Tenant") assert.True(t, ok, "routing attribute must exists") - assert.Equal(t, attr.AsString(), "something-else") + assert.Equal(t, "something-else", attr.AsString()) }) } diff --git a/processor/routingprocessor/metrics_test.go b/processor/routingprocessor/metrics_test.go index 70e706a03f87..87e6e1614eb4 100644 --- a/processor/routingprocessor/metrics_test.go +++ b/processor/routingprocessor/metrics_test.go @@ -433,8 +433,8 @@ func TestMetricsAreCorrectlySplitPerResourceAttributeRoutingWithOTTL(t *testing. assert.Len(t, firstExp.AllMetrics(), 1) assert.Len(t, secondExp.AllMetrics(), 1) - assert.Equal(t, firstExp.AllMetrics()[0].MetricCount(), 2) - assert.Equal(t, secondExp.AllMetrics()[0].MetricCount(), 2) + assert.Equal(t, 2, firstExp.AllMetrics()[0].MetricCount()) + assert.Equal(t, 2, secondExp.AllMetrics()[0].MetricCount()) assert.Equal(t, firstExp.AllMetrics(), secondExp.AllMetrics()) }) diff --git a/processor/routingprocessor/traces_test.go b/processor/routingprocessor/traces_test.go index 6d4e584aa4b2..55b5de17146d 100644 --- a/processor/routingprocessor/traces_test.go +++ b/processor/routingprocessor/traces_test.go @@ -421,8 +421,8 @@ func TestTracesAreCorrectlySplitPerResourceAttributeWithOTTL(t *testing.T) { assert.Len(t, firstExp.AllTraces(), 1) assert.Len(t, secondExp.AllTraces(), 1) - assert.Equal(t, firstExp.AllTraces()[0].SpanCount(), 2) - assert.Equal(t, secondExp.AllTraces()[0].SpanCount(), 2) + assert.Equal(t, 2, firstExp.AllTraces()[0].SpanCount()) + assert.Equal(t, 2, secondExp.AllTraces()[0].SpanCount()) assert.Equal(t, firstExp.AllTraces(), secondExp.AllTraces()) }) diff --git a/processor/sumologicprocessor/aggregate_attributes_processor_test.go b/processor/sumologicprocessor/aggregate_attributes_processor_test.go index 53aa738d00a7..b6a8deb097cd 100644 --- a/processor/sumologicprocessor/aggregate_attributes_processor_test.go +++ b/processor/sumologicprocessor/aggregate_attributes_processor_test.go @@ -133,7 +133,7 @@ func TestMetrics(t *testing.T) { name: "empty", createMetric: pmetric.NewMetric, test: func(m pmetric.Metric) { - require.Equal(t, m.Type(), pmetric.MetricTypeEmpty) + require.Equal(t, pmetric.MetricTypeEmpty, m.Type()) }, }, { @@ -149,7 +149,7 @@ func TestMetrics(t *testing.T) { s := pmetric.NewMetric().SetEmptySum() s.DataPoints().AppendEmpty().Attributes().PutEmptyMap("a").PutStr("c", "x") - require.Equal(t, m.Type(), pmetric.MetricTypeSum) + require.Equal(t, pmetric.MetricTypeSum, m.Type()) require.Equal(t, s.DataPoints().At(0).Attributes().AsRaw(), m.Sum().DataPoints().At(0).Attributes().AsRaw()) }, }, @@ -166,7 +166,7 @@ func TestMetrics(t *testing.T) { s := pmetric.NewMetric().SetEmptyGauge() s.DataPoints().AppendEmpty().Attributes().PutEmptyMap("a").PutStr("c", "x") - require.Equal(t, m.Type(), pmetric.MetricTypeGauge) + require.Equal(t, pmetric.MetricTypeGauge, m.Type()) require.Equal(t, s.DataPoints().At(0).Attributes().AsRaw(), m.Gauge().DataPoints().At(0).Attributes().AsRaw()) }, }, @@ -183,7 +183,7 @@ func TestMetrics(t *testing.T) { s := pmetric.NewMetric().SetEmptyHistogram() s.DataPoints().AppendEmpty().Attributes().PutEmptyMap("a").PutStr("c", "x") - require.Equal(t, m.Type(), pmetric.MetricTypeHistogram) + require.Equal(t, pmetric.MetricTypeHistogram, m.Type()) require.Equal(t, s.DataPoints().At(0).Attributes().AsRaw(), m.Histogram().DataPoints().At(0).Attributes().AsRaw()) }, }, @@ -200,7 +200,7 @@ func TestMetrics(t *testing.T) { s := pmetric.NewMetric().SetEmptyExponentialHistogram() s.DataPoints().AppendEmpty().Attributes().PutEmptyMap("a").PutStr("c", "x") - require.Equal(t, m.Type(), pmetric.MetricTypeExponentialHistogram) + require.Equal(t, pmetric.MetricTypeExponentialHistogram, m.Type()) require.Equal(t, s.DataPoints().At(0).Attributes().AsRaw(), m.ExponentialHistogram().DataPoints().At(0).Attributes().AsRaw()) }, }, @@ -217,7 +217,7 @@ func TestMetrics(t *testing.T) { s := pmetric.NewMetric().SetEmptySummary() s.DataPoints().AppendEmpty().Attributes().PutEmptyMap("a").PutStr("c", "x") - require.Equal(t, m.Type(), pmetric.MetricTypeSummary) + require.Equal(t, pmetric.MetricTypeSummary, m.Type()) require.Equal(t, s.DataPoints().At(0).Attributes().AsRaw(), m.Summary().DataPoints().At(0).Attributes().AsRaw()) }, }, diff --git a/processor/sumologicprocessor/translate_docker_metrics_processor_test.go b/processor/sumologicprocessor/translate_docker_metrics_processor_test.go index d452fd3a2375..a6dd12f78ae6 100644 --- a/processor/sumologicprocessor/translate_docker_metrics_processor_test.go +++ b/processor/sumologicprocessor/translate_docker_metrics_processor_test.go @@ -95,7 +95,7 @@ func TestTranslateDockerMetric_ResourceAttrbutesAreTranslatedCorrectly(t *testin res, ok := actual.Get(tc.nameOut) assert.True(t, ok) - assert.Equal(t, res.AsString(), "a") + assert.Equal(t, "a", res.AsString()) }) } } diff --git a/processor/tailsamplingprocessor/config_test.go b/processor/tailsamplingprocessor/config_test.go index 164aa318013d..c94b3fc6b12e 100644 --- a/processor/tailsamplingprocessor/config_test.go +++ b/processor/tailsamplingprocessor/config_test.go @@ -31,7 +31,6 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, sub.Unmarshal(cfg)) assert.Equal(t, - cfg, &Config{ DecisionWait: 10 * time.Second, NumTraces: 100, @@ -185,5 +184,5 @@ func TestLoadConfig(t *testing.T) { }, }, }, - }) + }, cfg) } diff --git a/processor/tailsamplingprocessor/internal/sampling/always_sample_test.go b/processor/tailsamplingprocessor/internal/sampling/always_sample_test.go index f05fd3be3ccf..2be833dc655c 100644 --- a/processor/tailsamplingprocessor/internal/sampling/always_sample_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/always_sample_test.go @@ -17,5 +17,5 @@ func TestEvaluate_AlwaysSample(t *testing.T) { decision, err := filter.Evaluate(context.Background(), pcommon.TraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), newTraceStringAttrs(nil, "example", "value")) assert.NoError(t, err) - assert.Equal(t, decision, Sampled) + assert.Equal(t, Sampled, decision) } diff --git a/processor/tailsamplingprocessor/internal/sampling/and_test.go b/processor/tailsamplingprocessor/internal/sampling/and_test.go index 6c68279cf183..0094768f7590 100644 --- a/processor/tailsamplingprocessor/internal/sampling/and_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/and_test.go @@ -35,7 +35,7 @@ func TestAndEvaluatorNotSampled(t *testing.T) { } decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) - assert.Equal(t, decision, NotSampled) + assert.Equal(t, NotSampled, decision) } @@ -61,7 +61,7 @@ func TestAndEvaluatorSampled(t *testing.T) { } decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) - assert.Equal(t, decision, Sampled) + assert.Equal(t, Sampled, decision) } @@ -87,7 +87,7 @@ func TestAndEvaluatorStringInvertSampled(t *testing.T) { } decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) - assert.Equal(t, decision, Sampled) + assert.Equal(t, Sampled, decision) } @@ -113,6 +113,6 @@ func TestAndEvaluatorStringInvertNotSampled(t *testing.T) { } decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) - assert.Equal(t, decision, InvertNotSampled) + assert.Equal(t, InvertNotSampled, decision) } diff --git a/processor/tailsamplingprocessor/internal/sampling/composite_test.go b/processor/tailsamplingprocessor/internal/sampling/composite_test.go index d7ef82fd7333..66a7d1606c34 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite_test.go @@ -71,7 +71,7 @@ func TestCompositeEvaluatorNotSampled(t *testing.T) { // None of the numeric filters should match since input trace data does not contain // the "tag", so the decision should be NotSampled. expected := NotSampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } func TestCompositeEvaluatorSampled(t *testing.T) { @@ -88,7 +88,7 @@ func TestCompositeEvaluatorSampled(t *testing.T) { // The second policy is AlwaysSample, so the decision should be Sampled. expected := Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { @@ -107,7 +107,7 @@ func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { // The first policy is NewNumericAttributeFilter and trace tag matches criteria, so the decision should be Sampled. expected := Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) trace = newTraceWithKV(traceID, "tag", int64(11)) @@ -116,7 +116,7 @@ func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { // The first policy is NewNumericAttributeFilter and trace tag matches criteria, so the decision should be Sampled. expected = NotSampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) trace = newTraceWithKV(traceID, "tag", int64(1001)) decision, err = c.Evaluate(context.Background(), traceID, trace) @@ -124,7 +124,7 @@ func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { // The first policy fails as the tag value is higher than the range set where as the second policy is AlwaysSample, so the decision should be Sampled. expected = Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } func TestCompositeEvaluatorSampled_AlwaysSampled(t *testing.T) { @@ -142,7 +142,7 @@ func TestCompositeEvaluatorSampled_AlwaysSampled(t *testing.T) { // The second policy is AlwaysSample, so the decision should be Sampled. expected := Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } } @@ -161,7 +161,7 @@ func TestCompositeEvaluatorInverseSampled_AlwaysSampled(t *testing.T) { // The second policy is AlwaysSample, so the decision should be Sampled. expected := Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } } @@ -181,7 +181,7 @@ func TestCompositeEvaluatorThrottling(t *testing.T) { require.NoError(t, err, "Failed to evaluate composite policy: %v", err) expected := Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } // Now we hit the rate limit, so subsequent evaluations should result in 100% NotSampled @@ -190,7 +190,7 @@ func TestCompositeEvaluatorThrottling(t *testing.T) { require.NoError(t, err, "Failed to evaluate composite policy: %v", err) expected := NotSampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } // Let the time advance by one second. @@ -202,7 +202,7 @@ func TestCompositeEvaluatorThrottling(t *testing.T) { require.NoError(t, err, "Failed to evaluate composite policy: %v", err) expected := Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } } @@ -249,7 +249,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { require.NoError(t, err, "Failed to evaluate composite policy: %v", err) expected := Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } // Now let's hit the hard limit and exceed the total by a factor of 2 @@ -258,7 +258,7 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { require.NoError(t, err, "Failed to evaluate composite policy: %v", err) expected := NotSampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } // Let the time advance by one second. @@ -270,6 +270,6 @@ func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { require.NoError(t, err, "Failed to evaluate composite policy: %v", err) expected := Sampled - assert.Equal(t, decision, expected) + assert.Equal(t, expected, decision) } } diff --git a/processor/tailsamplingprocessor/internal/sampling/rate_limiting_test.go b/processor/tailsamplingprocessor/internal/sampling/rate_limiting_test.go index 84918bb66eed..afa58ca78484 100644 --- a/processor/tailsamplingprocessor/internal/sampling/rate_limiting_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/rate_limiting_test.go @@ -24,7 +24,7 @@ func TestRateLimiter(t *testing.T) { trace.SpanCount = traceSpanCount decision, err := rateLimiter.Evaluate(context.Background(), traceID, trace) assert.NoError(t, err) - assert.Equal(t, decision, NotSampled) + assert.Equal(t, NotSampled, decision) // Trace span count equal to spans per second traceSpanCount = &atomic.Int64{} @@ -32,7 +32,7 @@ func TestRateLimiter(t *testing.T) { trace.SpanCount = traceSpanCount decision, err = rateLimiter.Evaluate(context.Background(), traceID, trace) assert.NoError(t, err) - assert.Equal(t, decision, NotSampled) + assert.Equal(t, NotSampled, decision) // Trace span count less than spans per second traceSpanCount = &atomic.Int64{} @@ -40,12 +40,12 @@ func TestRateLimiter(t *testing.T) { trace.SpanCount = traceSpanCount decision, err = rateLimiter.Evaluate(context.Background(), traceID, trace) assert.NoError(t, err) - assert.Equal(t, decision, Sampled) + assert.Equal(t, Sampled, decision) // Trace span count less than spans per second traceSpanCount = &atomic.Int64{} trace.SpanCount = traceSpanCount decision, err = rateLimiter.Evaluate(context.Background(), traceID, trace) assert.NoError(t, err) - assert.Equal(t, decision, Sampled) + assert.Equal(t, Sampled, decision) } diff --git a/processor/transformprocessor/README.md b/processor/transformprocessor/README.md index 8189ef104896..abed1de6ac36 100644 --- a/processor/transformprocessor/README.md +++ b/processor/transformprocessor/README.md @@ -130,12 +130,12 @@ transform: - context: metric statements: - set(description, "Sum") where type == "Sum" + - convert_sum_to_gauge() where name == "system.processes.count" + - convert_gauge_to_sum("cumulative", false) where name == "prometheus_metric" - context: datapoint statements: - limit(attributes, 100, ["host.name"]) - truncate_all(attributes, 4096) - - convert_sum_to_gauge() where metric.name == "system.processes.count" - - convert_gauge_to_sum("cumulative", false) where metric.name == "prometheus_metric" log_statements: - context: resource diff --git a/processor/transformprocessor/factory_test.go b/processor/transformprocessor/factory_test.go index f3e7a8c7e52a..97a459a096ba 100644 --- a/processor/transformprocessor/factory_test.go +++ b/processor/transformprocessor/factory_test.go @@ -29,12 +29,12 @@ func TestFactory_Type(t *testing.T) { func TestFactory_CreateDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - assert.Equal(t, cfg, &Config{ + assert.Equal(t, &Config{ ErrorMode: ottl.PropagateError, TraceStatements: []common.ContextStatements{}, MetricStatements: []common.ContextStatements{}, LogStatements: []common.ContextStatements{}, - }) + }, cfg) assert.NoError(t, componenttest.CheckConfigStruct(cfg)) } diff --git a/receiver/awscloudwatchreceiver/config_test.go b/receiver/awscloudwatchreceiver/config_test.go index 4da170bc895a..2f85c8cd01b4 100644 --- a/receiver/awscloudwatchreceiver/config_test.go +++ b/receiver/awscloudwatchreceiver/config_test.go @@ -260,7 +260,7 @@ func TestLoadConfig(t *testing.T) { loaded, err := cm.Sub(component.NewIDWithName(metadata.Type, tc.name).String()) require.NoError(t, err) require.NoError(t, loaded.Unmarshal(cfg)) - require.Equal(t, cfg, tc.expectedConfig) + require.Equal(t, tc.expectedConfig, cfg) require.NoError(t, component.ValidateConfig(cfg)) }) } diff --git a/receiver/awscloudwatchreceiver/logs_test.go b/receiver/awscloudwatchreceiver/logs_test.go index 6861abe2280c..e1558eba9316 100644 --- a/receiver/awscloudwatchreceiver/logs_test.go +++ b/receiver/awscloudwatchreceiver/logs_test.go @@ -95,7 +95,7 @@ func TestPrefixedNamedStreamsConfig(t *testing.T) { groupRequests := alertRcvr.groupRequests require.Len(t, groupRequests, 1) - require.Equal(t, groupRequests[0].groupName(), "test-log-group-name") + require.Equal(t, "test-log-group-name", groupRequests[0].groupName()) err = alertRcvr.Shutdown(context.Background()) require.NoError(t, err) @@ -129,7 +129,7 @@ func TestNamedConfigNoStreamFilter(t *testing.T) { groupRequests := alertRcvr.groupRequests require.Len(t, groupRequests, 1) - require.Equal(t, groupRequests[0].groupName(), "test-log-group-name") + require.Equal(t, "test-log-group-name", groupRequests[0].groupName()) err = alertRcvr.Shutdown(context.Background()) require.NoError(t, err) diff --git a/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go b/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go index d40fce2cb845..cefbe4f56c8a 100644 --- a/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go +++ b/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go @@ -351,7 +351,7 @@ func TestPodStore_addContainerID(t *testing.T) { expected := map[string]any{} expected["docker"] = map[string]string{"container_id": "637631e2634ea92c0c1aa5d24734cfe794f09c57933026592c12acafbaf6972c"} assert.Equal(t, expected, kubernetesBlob) - assert.Equal(t, metric.GetTag(ci.ContainerNamekey), "ubuntu") + assert.Equal(t, "ubuntu", metric.GetTag(ci.ContainerNamekey)) tags = map[string]string{ci.ContainerNamekey: "notUbuntu", ci.ContainerIDkey: "123"} kubernetesBlob = map[string]any{} @@ -361,7 +361,7 @@ func TestPodStore_addContainerID(t *testing.T) { expected = map[string]any{} expected["container_id"] = "123" assert.Equal(t, expected, kubernetesBlob) - assert.Equal(t, metric.GetTag(ci.ContainerNamekey), "notUbuntu") + assert.Equal(t, "notUbuntu", metric.GetTag(ci.ContainerNamekey)) } func TestPodStore_addLabel(t *testing.T) { diff --git a/receiver/awss3receiver/go.mod b/receiver/awss3receiver/go.mod index 849fbc55ed43..8681295c3104 100644 --- a/receiver/awss3receiver/go.mod +++ b/receiver/awss3receiver/go.mod @@ -5,8 +5,8 @@ go 1.22.0 require ( github.com/aws/aws-sdk-go-v2 v1.30.4 github.com/aws/aws-sdk-go-v2/config v1.27.31 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15 - github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.16 + github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.108.2-0.20240904075637-48b11ba1c5f8 go.opentelemetry.io/collector/confmap v1.14.2-0.20240904075637-48b11ba1c5f8 diff --git a/receiver/awss3receiver/go.sum b/receiver/awss3receiver/go.sum index ad6d534f6f37..acdb886b2d49 100644 --- a/receiver/awss3receiver/go.sum +++ b/receiver/awss3receiver/go.sum @@ -8,8 +8,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BT github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15 h1:ijB7hr56MngOiELJe0C5aQRaBQ11LveNgWFyG02AUto= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15/go.mod h1:0QEmQSSWMVfiAk93l1/ayR9DQ9+jwni7gHS2NARZXB0= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.16 h1:1FWqcOnvnO0lRsv0kLACwwQquoZIoS5tD0MtfoNdnkk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.16/go.mod h1:+E8OuB446P/5Swajo40TqenLMzm6aYDEEz6FZDn/u1E= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= @@ -26,8 +26,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHC github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= diff --git a/receiver/awsxrayreceiver/internal/tracesegment/util_test.go b/receiver/awsxrayreceiver/internal/tracesegment/util_test.go index d2cdeed55440..e59ee4d7ab2e 100644 --- a/receiver/awsxrayreceiver/internal/tracesegment/util_test.go +++ b/receiver/awsxrayreceiver/internal/tracesegment/util_test.go @@ -3,7 +3,6 @@ package tracesegment import ( - "errors" "fmt" "testing" @@ -31,7 +30,7 @@ func TestSplitHeaderBodyWithSeparatorDoesNotExist(t *testing.T) { _, _, err := SplitHeaderBody(buf) var errRecv *recvErr.ErrRecoverable - assert.True(t, errors.As(err, &errRecv), "should return recoverable error") + assert.ErrorAs(t, err, &errRecv, "should return recoverable error") assert.EqualError(t, err, fmt.Sprintf("unable to split incoming data as header and segment, incoming bytes: %v", buf), "expected error messages") @@ -41,7 +40,7 @@ func TestSplitHeaderBodyNilBuf(t *testing.T) { _, _, err := SplitHeaderBody(nil) var errRecv *recvErr.ErrRecoverable - assert.True(t, errors.As(err, &errRecv), "should return recoverable error") + assert.ErrorAs(t, err, &errRecv, "should return recoverable error") assert.EqualError(t, err, "buffer to split is nil", "expected error messages") } @@ -52,7 +51,7 @@ func TestSplitHeaderBodyNonJsonHeader(t *testing.T) { _, _, err := SplitHeaderBody(buf) var errRecv *recvErr.ErrRecoverable - assert.True(t, errors.As(err, &errRecv), "should return recoverable error") + assert.ErrorAs(t, err, &errRecv, "should return recoverable error") assert.Contains(t, err.Error(), "invalid character 'o'") } @@ -76,7 +75,7 @@ func TestSplitHeaderBodyInvalidJsonHeader(t *testing.T) { assert.Error(t, err, "should fail because version is invalid") var errRecv *recvErr.ErrRecoverable - assert.True(t, errors.As(err, &errRecv), "should return recoverable error") + assert.ErrorAs(t, err, &errRecv, "should return recoverable error") assert.Contains(t, err.Error(), fmt.Sprintf("invalid header %+v", Header{ Format: "json", diff --git a/receiver/awsxrayreceiver/internal/translator/cause_test.go b/receiver/awsxrayreceiver/internal/translator/cause_test.go index a87549c1bc03..5e6aee268c7b 100644 --- a/receiver/awsxrayreceiver/internal/translator/cause_test.go +++ b/receiver/awsxrayreceiver/internal/translator/cause_test.go @@ -30,7 +30,7 @@ func TestConvertStackFramesToStackTraceStr(t *testing.T) { }, } actual := convertStackFramesToStackTraceStr(excp) - assert.Equal(t, actual, "exceptionType: exceptionMessage\n\tat label0(path0: 10)\n\tat label1(path1: 11)\n") + assert.Equal(t, "exceptionType: exceptionMessage\n\tat label0(path0: 10)\n\tat label1(path1: 11)\n", actual) } func TestConvertStackFramesToStackTraceStrNoPath(t *testing.T) { @@ -50,7 +50,7 @@ func TestConvertStackFramesToStackTraceStrNoPath(t *testing.T) { }, } actual := convertStackFramesToStackTraceStr(excp) - assert.Equal(t, actual, "exceptionType: exceptionMessage\n\tat label0(path0: 10)\n\tat label1(: 11)\n") + assert.Equal(t, "exceptionType: exceptionMessage\n\tat label0(path0: 10)\n\tat label1(: 11)\n", actual) } func TestConvertStackFramesToStackTraceStrNoLine(t *testing.T) { @@ -70,7 +70,7 @@ func TestConvertStackFramesToStackTraceStrNoLine(t *testing.T) { }, } actual := convertStackFramesToStackTraceStr(excp) - assert.Equal(t, actual, "exceptionType: exceptionMessage\n\tat label0(path0: 10)\n\tat label1(path1: )\n") + assert.Equal(t, "exceptionType: exceptionMessage\n\tat label0(path0: 10)\n\tat label1(path1: )\n", actual) } func TestConvertStackFramesToStackTraceStrNoLabel(t *testing.T) { @@ -90,7 +90,7 @@ func TestConvertStackFramesToStackTraceStrNoLabel(t *testing.T) { }, } actual := convertStackFramesToStackTraceStr(excp) - assert.Equal(t, actual, "exceptionType: exceptionMessage\n\tat label0(path0: 10)\n\tat (path1: 11)\n") + assert.Equal(t, "exceptionType: exceptionMessage\n\tat label0(path0: 10)\n\tat (path1: 11)\n", actual) } func TestConvertStackFramesToStackTraceStrNoErrorMessage(t *testing.T) { @@ -108,5 +108,5 @@ func TestConvertStackFramesToStackTraceStrNoErrorMessage(t *testing.T) { }, } actual := convertStackFramesToStackTraceStr(excp) - assert.Equal(t, actual, ": \n\tat label0(path0: 10)\n\tat (path1: 11)\n") + assert.Equal(t, ": \n\tat label0(path0: 10)\n\tat (path1: 11)\n", actual) } diff --git a/receiver/awsxrayreceiver/internal/translator/translator_test.go b/receiver/awsxrayreceiver/internal/translator/translator_test.go index 2fa7015fad27..f241cca3ef34 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator_test.go +++ b/receiver/awsxrayreceiver/internal/translator/translator_test.go @@ -1092,7 +1092,7 @@ func TestDecodeXRayTraceID(t *testing.T) { traceIDBytes, err := decodeXRayTraceID(&traceID) expectedTraceIDBytes := [16]byte{0x5f, 0x84, 0xc7, 0xa1, 0xe7, 0xd1, 0x85, 0x2d, 0xb8, 0xc4, 0xfd, 0x35, 0xd8, 0x8b, 0xf4, 0x9a} if assert.NoError(t, err) { - assert.Equal(t, traceIDBytes, expectedTraceIDBytes) + assert.Equal(t, expectedTraceIDBytes, traceIDBytes) } // invalid format @@ -1111,7 +1111,7 @@ func TestDecodeXRaySpanID(t *testing.T) { spanIDBytes, err := decodeXRaySpanID(&spanID) expectedSpanIDBytes := [8]byte{0xde, 0xfd, 0xfd, 0x99, 0x12, 0xdc, 0x5a, 0x56} if assert.NoError(t, err) { - assert.Equal(t, spanIDBytes, expectedSpanIDBytes) + assert.Equal(t, expectedSpanIDBytes, spanIDBytes) } // invalid format diff --git a/receiver/azureblobreceiver/receiver_test.go b/receiver/azureblobreceiver/receiver_test.go index 614850d47b23..8670ebea1508 100644 --- a/receiver/azureblobreceiver/receiver_test.go +++ b/receiver/azureblobreceiver/receiver_test.go @@ -38,7 +38,7 @@ func TestConsumeLogsJSON(t *testing.T) { err := logsConsumer.consumeLogsJSON(context.Background(), logsJSON) require.NoError(t, err) - assert.Equal(t, logsSink.LogRecordCount(), 1) + assert.Equal(t, 1, logsSink.LogRecordCount()) } func TestConsumeTracesJSON(t *testing.T) { @@ -52,7 +52,7 @@ func TestConsumeTracesJSON(t *testing.T) { err := tracesConsumer.consumeTracesJSON(context.Background(), tracesJSON) require.NoError(t, err) - assert.Equal(t, tracesSink.SpanCount(), 2) + assert.Equal(t, 2, tracesSink.SpanCount()) } func getBlobReceiver(t *testing.T) (component.Component, error) { diff --git a/receiver/azuremonitorreceiver/internal/metadata/metrics_test.go b/receiver/azuremonitorreceiver/internal/metadata/metrics_test.go index 1f953fe270a4..8b09ba23407f 100644 --- a/receiver/azuremonitorreceiver/internal/metadata/metrics_test.go +++ b/receiver/azuremonitorreceiver/internal/metadata/metrics_test.go @@ -84,7 +84,7 @@ func TestMetricsBuilder(t *testing.T) { assert.EqualValues(t, "attr-val", attrVal.Str()) } assert.Equal(t, enabledAttrCount, rm.Resource().Attributes().Len()) - assert.Equal(t, attrCount, 2) + assert.Equal(t, 2, attrCount) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() diff --git a/receiver/couchdbreceiver/scraper_test.go b/receiver/couchdbreceiver/scraper_test.go index a4e64ae77bbc..6528a43ad696 100644 --- a/receiver/couchdbreceiver/scraper_test.go +++ b/receiver/couchdbreceiver/scraper_test.go @@ -81,7 +81,7 @@ func TestScrape(t *testing.T) { assert.Equal(t, 0, metrics.DataPointCount(), "Expected 0 datapoints to be collected") var partialScrapeErr scrapererror.PartialScrapeError - require.True(t, errors.As(err, &partialScrapeErr), "returned error was not PartialScrapeError") + require.ErrorAs(t, err, &partialScrapeErr, "returned error was not PartialScrapeError") require.Greater(t, partialScrapeErr.Failed, 0, "Expected scrape failures, but none were recorded!") }) @@ -172,7 +172,7 @@ func TestMetricSettings(t *testing.T) { require.NoError(t, pmetrictest.CompareMetrics(expected, metrics, pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreTimestamp())) - require.Equal(t, metrics.MetricCount(), 1) + require.Equal(t, 1, metrics.MetricCount()) } func getStats(filename string) (map[string]any, error) { diff --git a/receiver/datadogreceiver/go.mod b/receiver/datadogreceiver/go.mod index 8ce4634b074b..dfefa9acfaf0 100644 --- a/receiver/datadogreceiver/go.mod +++ b/receiver/datadogreceiver/go.mod @@ -13,7 +13,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.108.0 github.com/stretchr/testify v1.9.0 github.com/tinylib/msgp v1.2.0 - github.com/vmihailenco/msgpack/v4 v4.3.13 + github.com/vmihailenco/msgpack/v5 v5.4.1 go.opentelemetry.io/collector/component v0.108.2-0.20240904075637-48b11ba1c5f8 go.opentelemetry.io/collector/component/componentstatus v0.108.2-0.20240904075637-48b11ba1c5f8 go.opentelemetry.io/collector/config/confighttp v0.108.2-0.20240904075637-48b11ba1c5f8 @@ -80,7 +80,8 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect - github.com/vmihailenco/tagparser v0.1.2 // indirect + github.com/vmihailenco/msgpack/v4 v4.3.13 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/collector v0.108.2-0.20240904075637-48b11ba1c5f8 // indirect go.opentelemetry.io/collector/client v1.14.2-0.20240904075637-48b11ba1c5f8 // indirect @@ -111,7 +112,6 @@ require ( golang.org/x/sys v0.24.0 // indirect golang.org/x/text v0.17.0 // indirect golang.org/x/tools v0.22.0 // indirect - google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/grpc v1.66.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/receiver/datadogreceiver/go.sum b/receiver/datadogreceiver/go.sum index f13b6a638869..ce8a2b43c55c 100644 --- a/receiver/datadogreceiver/go.sum +++ b/receiver/datadogreceiver/go.sum @@ -59,13 +59,10 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -166,13 +163,16 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/vmihailenco/msgpack/v4 v4.3.13 h1:A2wsiTbvp63ilDaWmsk2wjx6xZdxQOvpiNlKBGKKXKI= github.com/vmihailenco/msgpack/v4 v4.3.13/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/collector v0.108.2-0.20240904075637-48b11ba1c5f8 h1:qGTe/9zMGoWc9OVx++BTErlSMNURVzSUEtkXKm66u2M= @@ -249,13 +249,11 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -264,9 +262,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= @@ -275,7 +271,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -288,22 +283,16 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -313,7 +302,6 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -327,8 +315,6 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/receiver/datadogreceiver/internal/translator/traces_translator_test.go b/receiver/datadogreceiver/internal/translator/traces_translator_test.go index 60cb51029bd8..c197d5bb6d74 100644 --- a/receiver/datadogreceiver/internal/translator/traces_translator_test.go +++ b/receiver/datadogreceiver/internal/translator/traces_translator_test.go @@ -13,7 +13,7 @@ import ( pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - vmsgp "github.com/vmihailenco/msgpack/v4" + vmsgp "github.com/vmihailenco/msgpack/v5" "go.opentelemetry.io/collector/pdata/pcommon" semconv "go.opentelemetry.io/collector/semconv/v1.16.0" "google.golang.org/protobuf/proto" diff --git a/receiver/datadogreceiver/receiver_test.go b/receiver/datadogreceiver/receiver_test.go index 526b8967fccc..e172735c3bcf 100644 --- a/receiver/datadogreceiver/receiver_test.go +++ b/receiver/datadogreceiver/receiver_test.go @@ -295,7 +295,7 @@ func TestDatadogMetricsV1_EndToEnd(t *testing.T) { body, err := io.ReadAll(resp.Body) require.NoError(t, multierr.Combine(err, resp.Body.Close()), "Must not error when reading body") - require.Equal(t, string(body), "OK", "Expected response to be 'OK', got %s", string(body)) + require.Equal(t, "OK", string(body), "Expected response to be 'OK', got %s", string(body)) require.Equal(t, http.StatusAccepted, resp.StatusCode) mds := sink.AllMetrics() @@ -373,7 +373,7 @@ func TestDatadogMetricsV2_EndToEnd(t *testing.T) { body, err := io.ReadAll(resp.Body) require.NoError(t, multierr.Combine(err, resp.Body.Close()), "Must not error when reading body") - require.Equal(t, string(body), "OK", "Expected response to be 'OK', got %s", string(body)) + require.Equal(t, "OK", string(body), "Expected response to be 'OK', got %s", string(body)) require.Equal(t, http.StatusAccepted, resp.StatusCode) mds := sink.AllMetrics() @@ -464,7 +464,7 @@ func TestStats_EndToEnd(t *testing.T) { body, err := io.ReadAll(resp.Body) require.NoError(t, multierr.Combine(err, resp.Body.Close()), "Must not error when reading body") - require.Equal(t, string(body), "OK", "Expected response to be 'OK', got %s", string(body)) + require.Equal(t, "OK", string(body), "Expected response to be 'OK', got %s", string(body)) require.Equal(t, http.StatusOK, resp.StatusCode) mds := sink.AllMetrics() diff --git a/receiver/elasticsearchreceiver/scraper_test.go b/receiver/elasticsearchreceiver/scraper_test.go index 72e99092d9ba..c34601544200 100644 --- a/receiver/elasticsearchreceiver/scraper_test.go +++ b/receiver/elasticsearchreceiver/scraper_test.go @@ -314,7 +314,7 @@ func TestScrapingError(t *testing.T) { require.Contains(t, err.Error(), err404.Error()) require.Contains(t, err.Error(), err500.Error()) - require.Equal(t, m.DataPointCount(), 0) + require.Equal(t, 0, m.DataPointCount()) }, }, { @@ -369,7 +369,7 @@ func TestScrapingError(t *testing.T) { require.Contains(t, err.Error(), err404.Error()) require.Contains(t, err.Error(), err500.Error()) - require.Equal(t, m.DataPointCount(), 0) + require.Equal(t, 0, m.DataPointCount()) }, }, { diff --git a/receiver/flinkmetricsreceiver/client_test.go b/receiver/flinkmetricsreceiver/client_test.go index 019b8d6c701f..d11d9b018d94 100644 --- a/receiver/flinkmetricsreceiver/client_test.go +++ b/receiver/flinkmetricsreceiver/client_test.go @@ -544,7 +544,7 @@ func TestGetSubtasksMetrics(t *testing.T) { var e *models.JobsResponse _ = json.Unmarshal(jobsData, &e) - require.EqualValues(t, e.Jobs[0].ID, "54a5c6e527e00e1bb861272a39fe13e4") + require.EqualValues(t, "54a5c6e527e00e1bb861272a39fe13e4", e.Jobs[0].ID) // Load the valid data into a struct to compare var expected *models.MetricsResponse diff --git a/receiver/githubreceiver/go.mod b/receiver/githubreceiver/go.mod index ec7b0440ea88..379b39814aed 100644 --- a/receiver/githubreceiver/go.mod +++ b/receiver/githubreceiver/go.mod @@ -5,7 +5,7 @@ go 1.22.0 require ( github.com/Khan/genqlient v0.7.0 github.com/google/go-cmp v0.6.0 - github.com/google/go-github/v63 v63.0.0 + github.com/google/go-github/v64 v64.0.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.108.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.108.0 github.com/stretchr/testify v1.9.0 diff --git a/receiver/githubreceiver/go.sum b/receiver/githubreceiver/go.sum index e0d1df56722d..570c59cce9a3 100644 --- a/receiver/githubreceiver/go.sum +++ b/receiver/githubreceiver/go.sum @@ -33,8 +33,8 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github/v63 v63.0.0 h1:13xwK/wk9alSokujB9lJkuzdmQuVn2QCPeck76wR3nE= -github.com/google/go-github/v63 v63.0.0/go.mod h1:IqbcrgUmIcEaioWrGYei/09o+ge5vhffGOcxrO0AfmA= +github.com/google/go-github/v64 v64.0.0 h1:4G61sozmY3eiPAjjoOHponXDBONm+utovTKbyUb2Qdg= +github.com/google/go-github/v64 v64.0.0/go.mod h1:xB3vqMQNdHzilXBiO2I+M7iEFtHf+DP/omBOv6tQzVo= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= diff --git a/receiver/githubreceiver/internal/scraper/githubscraper/github_scraper_test.go b/receiver/githubreceiver/internal/scraper/githubscraper/github_scraper_test.go index e5def344c713..56fb777911b7 100644 --- a/receiver/githubreceiver/internal/scraper/githubscraper/github_scraper_test.go +++ b/receiver/githubreceiver/internal/scraper/githubscraper/github_scraper_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/google/go-github/v63/github" + "github.com/google/go-github/v64/github" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" diff --git a/receiver/githubreceiver/internal/scraper/githubscraper/helpers.go b/receiver/githubreceiver/internal/scraper/githubscraper/helpers.go index e88a26f6bab0..e49a43ce1f6f 100644 --- a/receiver/githubreceiver/internal/scraper/githubscraper/helpers.go +++ b/receiver/githubreceiver/internal/scraper/githubscraper/helpers.go @@ -12,7 +12,7 @@ import ( "time" "github.com/Khan/genqlient/graphql" - "github.com/google/go-github/v63/github" + "github.com/google/go-github/v64/github" ) const ( diff --git a/receiver/githubreceiver/internal/scraper/githubscraper/helpers_test.go b/receiver/githubreceiver/internal/scraper/githubscraper/helpers_test.go index 4007da15dfa8..1edaec0bc111 100644 --- a/receiver/githubreceiver/internal/scraper/githubscraper/helpers_test.go +++ b/receiver/githubreceiver/internal/scraper/githubscraper/helpers_test.go @@ -15,7 +15,7 @@ import ( "time" "github.com/Khan/genqlient/graphql" - "github.com/google/go-github/v63/github" + "github.com/google/go-github/v64/github" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/receiver/receivertest" ) diff --git a/receiver/googlecloudpubsubreceiver/go.mod b/receiver/googlecloudpubsubreceiver/go.mod index 87bb1c110b9c..4ead14012076 100644 --- a/receiver/googlecloudpubsubreceiver/go.mod +++ b/receiver/googlecloudpubsubreceiver/go.mod @@ -19,8 +19,8 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - google.golang.org/api v0.194.0 - google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 + google.golang.org/api v0.195.0 + google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd google.golang.org/grpc v1.66.0 google.golang.org/protobuf v1.34.2 @@ -31,8 +31,8 @@ require ( cloud.google.com/go/auth v0.9.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect - cloud.google.com/go/longrunning v0.5.11 // indirect + cloud.google.com/go/iam v1.1.13 // indirect + cloud.google.com/go/longrunning v0.5.12 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -86,7 +86,7 @@ require ( golang.org/x/sys v0.24.0 // indirect golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.6.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/receiver/googlecloudpubsubreceiver/go.sum b/receiver/googlecloudpubsubreceiver/go.sum index 5b75d343babb..e83380d95432 100644 --- a/receiver/googlecloudpubsubreceiver/go.sum +++ b/receiver/googlecloudpubsubreceiver/go.sum @@ -7,12 +7,12 @@ cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= +cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= -cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= +cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= +cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -250,19 +250,19 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.194.0 h1:dztZKG9HgtIpbI35FhfuSNR/zmaMVdxNlntHj1sIS4s= -google.golang.org/api v0.194.0/go.mod h1:AgvUFdojGANh3vI+P7EVnxj3AISHllxGCJSFmggmnd0= +google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= +google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22duwci1+TG7bg2/L1LQsXwfjPlmuJA0= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/receiver/googlecloudspannerreceiver/go.mod b/receiver/googlecloudspannerreceiver/go.mod index 063e016a9649..e4141cea3feb 100644 --- a/receiver/googlecloudspannerreceiver/go.mod +++ b/receiver/googlecloudspannerreceiver/go.mod @@ -16,7 +16,7 @@ require ( go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - google.golang.org/api v0.194.0 + google.golang.org/api v0.195.0 google.golang.org/grpc v1.66.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -27,8 +27,8 @@ require ( cloud.google.com/go/auth v0.9.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect - cloud.google.com/go/longrunning v0.5.11 // indirect + cloud.google.com/go/iam v1.1.13 // indirect + cloud.google.com/go/longrunning v0.5.12 // indirect github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect @@ -86,9 +86,9 @@ require ( golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect - google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect google.golang.org/protobuf v1.34.2 // indirect ) diff --git a/receiver/googlecloudspannerreceiver/go.sum b/receiver/googlecloudspannerreceiver/go.sum index 2b099226eb3e..471cdcd05f6b 100644 --- a/receiver/googlecloudspannerreceiver/go.sum +++ b/receiver/googlecloudspannerreceiver/go.sum @@ -319,8 +319,8 @@ cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGE cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= +cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -353,8 +353,8 @@ cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeN cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= +cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= +cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -1435,8 +1435,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.194.0 h1:dztZKG9HgtIpbI35FhfuSNR/zmaMVdxNlntHj1sIS4s= -google.golang.org/api v0.194.0/go.mod h1:AgvUFdojGANh3vI+P7EVnxj3AISHllxGCJSFmggmnd0= +google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= +google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1576,12 +1576,12 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22duwci1+TG7bg2/L1LQsXwfjPlmuJA0= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok= +google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c/go.mod h1:2rC5OendXvZ8wGEo/cSLheztrZDZaSoHanUcd1xtZnw= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd h1:BBOTEWLuuEGQy9n1y9MhVJ9Qt0BDu21X8qZs71/uPZo= google.golang.org/genproto/googleapis/api v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go index 54e1347d23d6..6c6f13aabc7a 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go @@ -15,21 +15,21 @@ func TestNewMetricType(t *testing.T) { metricDataType := NewMetricType(pmetric.MetricTypeGauge, pmetric.AggregationTemporalityDelta, true) require.NotNil(t, metricDataType) - assert.Equal(t, metricDataType.MetricType(), pmetric.MetricTypeGauge) - assert.Equal(t, metricDataType.AggregationTemporality(), pmetric.AggregationTemporalityDelta) + assert.Equal(t, pmetric.MetricTypeGauge, metricDataType.MetricType()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, metricDataType.AggregationTemporality()) assert.True(t, metricDataType.IsMonotonic()) } func TestMetricValueDataType_MetricType(t *testing.T) { valueDataType := metricValueDataType{dataType: pmetric.MetricTypeGauge} - assert.Equal(t, valueDataType.MetricType(), pmetric.MetricTypeGauge) + assert.Equal(t, pmetric.MetricTypeGauge, valueDataType.MetricType()) } func TestMetricValueDataType_AggregationTemporality(t *testing.T) { valueDataType := metricValueDataType{aggregationTemporality: pmetric.AggregationTemporalityDelta} - assert.Equal(t, valueDataType.AggregationTemporality(), pmetric.AggregationTemporalityDelta) + assert.Equal(t, pmetric.AggregationTemporalityDelta, valueDataType.AggregationTemporality()) } func TestMetricValueDataType_IsMonotonic(t *testing.T) { diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go index 6167a5e37f1c..b837b0f6e04a 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go @@ -169,7 +169,7 @@ func TestMetricsDataPoint_TruncateQueryText(t *testing.T) { metricsDataPoint.TruncateQueryText(6) assert.Len(t, metricsDataPoint.labelValues, 1) - assert.Equal(t, metricsDataPoint.labelValues[0].Value(), "SELECT") + assert.Equal(t, "SELECT", metricsDataPoint.labelValues[0].Value()) } func allPossibleLabelValues() []LabelValue { diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go index 783a7e774f2a..c4204a574e67 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go @@ -227,7 +227,7 @@ func assertNetworkIOMetricValid(t *testing.T, metric pmetric.Metric, expectedNam } func assertNetworkConnectionsMetricValid(t *testing.T, metric pmetric.Metric) { - assert.Equal(t, metric.Name(), "system.network.connections") + assert.Equal(t, "system.network.connections", metric.Name()) internal.AssertSumMetricHasAttributeValue(t, metric, 0, "protocol", pcommon.NewValueStr(metadata.AttributeProtocolTcp.String())) internal.AssertSumMetricHasAttribute(t, metric, 0, "state") diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/pagefile_linux_test.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/pagefile_linux_test.go index 7afe4c0b34ce..40f7213706dd 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/pagefile_linux_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/pagefile_linux_test.go @@ -27,19 +27,19 @@ func TestGetPageFileStats_ValidFile(t *testing.T) { stats, err := parseSwapsFile(strings.NewReader(validFile)) assert.NoError(err) - assert.Equal(*stats[0], pageFileStats{ + assert.Equal(pageFileStats{ deviceName: "/dev/dm-2", usedBytes: 502566912, freeBytes: 68128825344, totalBytes: 68631392256, - }) + }, *stats[0]) - assert.Equal(*stats[1], pageFileStats{ + assert.Equal(pageFileStats{ deviceName: "/swapfile", usedBytes: 1024, freeBytes: 1024, totalBytes: 2048, - }) + }, *stats[1]) } func TestGetPageFileStats_InvalidFile(t *testing.T) { diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go index 53f90293f21e..3c853a817d46 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go @@ -203,7 +203,7 @@ func validateFakeData(t *testing.T, metrics pmetric.MetricSlice) { attrs[val.Str()] = point.IntValue() } - assert.Equal(t, attrs, map[string]int64{ + assert.Equal(t, map[string]int64{ metadata.AttributeStatusBlocked.String(): 3, metadata.AttributeStatusPaging.String(): 1, metadata.AttributeStatusRunning.String(): 2, @@ -211,7 +211,7 @@ func validateFakeData(t *testing.T, metrics pmetric.MetricSlice) { metadata.AttributeStatusStopped.String(): 5, metadata.AttributeStatusUnknown.String(): 9, metadata.AttributeStatusZombies.String(): 6, - }) + }, attrs) } if expectProcessesCreatedMetric { diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows_test.go index 8827b4ce3043..9ce32bf51e51 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/handlecount/handles_windows_test.go @@ -25,11 +25,11 @@ func TestHandleCountManager(t *testing.T) { count, err := m.GetProcessHandleCount(1) assert.NoError(t, err) - assert.Equal(t, count, uint32(3)) + assert.Equal(t, uint32(3), count) count, err = m.GetProcessHandleCount(2) assert.NoError(t, err) - assert.Equal(t, count, uint32(5)) + assert.Equal(t, uint32(5), count) _, err = m.GetProcessHandleCount(3) assert.ErrorIs(t, errors.Unwrap(err), ErrNoHandleCountForProcess) diff --git a/receiver/iisreceiver/scraper_test.go b/receiver/iisreceiver/scraper_test.go index a66eb27123d9..632be6f11305 100644 --- a/receiver/iisreceiver/scraper_test.go +++ b/receiver/iisreceiver/scraper_test.go @@ -87,7 +87,7 @@ func TestScrapeFailure(t *testing.T) { require.Equal(t, 1, obs.Len()) log := obs.All()[0] - require.Equal(t, log.Level, zapcore.WarnLevel) + require.Equal(t, zapcore.WarnLevel, log.Level) require.Equal(t, "error", log.Context[0].Key) require.EqualError(t, log.Context[0].Interface.(error), expectedError) } @@ -121,7 +121,7 @@ func TestMaxQueueItemAgeScrapeFailure(t *testing.T) { require.Equal(t, 1, obs.Len()) log := obs.All()[0] - require.Equal(t, log.Level, zapcore.WarnLevel) + require.Equal(t, zapcore.WarnLevel, log.Level) require.Equal(t, "error", log.Context[0].Key) require.EqualError(t, log.Context[0].Interface.(error), expectedError) } diff --git a/receiver/jmxreceiver/internal/subprocess/subprocess_test.go b/receiver/jmxreceiver/internal/subprocess/subprocess_test.go index f5ed2a274007..7ef92c5593a5 100644 --- a/receiver/jmxreceiver/internal/subprocess/subprocess_test.go +++ b/receiver/jmxreceiver/internal/subprocess/subprocess_test.go @@ -21,8 +21,8 @@ func TestSubprocessAndConfig(t *testing.T) { require.Same(t, logger, subprocess.logger) require.NotNil(t, subprocess.Stdout) - require.Equal(t, *config.ShutdownTimeout, 5*time.Second) - require.Equal(t, *config.RestartDelay, 5*time.Second) + require.Equal(t, 5*time.Second, *config.ShutdownTimeout) + require.Equal(t, 5*time.Second, *config.RestartDelay) } func TestConfigDurations(t *testing.T) { diff --git a/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go b/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go index 7b0a84a7d163..a603a40bc67e 100644 --- a/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go +++ b/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go @@ -19,14 +19,14 @@ func TestK8sEventToLogData(t *testing.T) { resourceAttrs := rl.Resource().Attributes() lr := rl.ScopeLogs().At(0) attrs := lr.LogRecords().At(0).Attributes() - assert.Equal(t, ld.ResourceLogs().Len(), 1) - assert.Equal(t, resourceAttrs.Len(), 7) - assert.Equal(t, attrs.Len(), 7) + assert.Equal(t, 1, ld.ResourceLogs().Len()) + assert.Equal(t, 7, resourceAttrs.Len()) + assert.Equal(t, 7, attrs.Len()) // Count attribute will not be present in the LogData k8sEvent.Count = 0 ld = k8sEventToLogData(zap.NewNop(), k8sEvent) - assert.Equal(t, ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Len(), 6) + assert.Equal(t, 6, ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Len()) } func TestK8sEventToLogDataWithApiAndResourceVersion(t *testing.T) { @@ -59,6 +59,6 @@ func TestUnknownSeverity(t *testing.T) { rl := ld.ResourceLogs().At(0) logEntry := rl.ScopeLogs().At(0).LogRecords().At(0) - assert.Equal(t, logEntry.SeverityNumber(), plog.SeverityNumberUnspecified) - assert.Equal(t, logEntry.SeverityText(), "") + assert.Equal(t, plog.SeverityNumberUnspecified, logEntry.SeverityNumber()) + assert.Equal(t, "", logEntry.SeverityText()) } diff --git a/receiver/k8seventsreceiver/receiver_test.go b/receiver/k8seventsreceiver/receiver_test.go index 88d00b204e89..ee39b2ed113f 100644 --- a/receiver/k8seventsreceiver/receiver_test.go +++ b/receiver/k8seventsreceiver/receiver_test.go @@ -66,7 +66,7 @@ func TestHandleEvent(t *testing.T) { k8sEvent := getEvent() recv.handleEvent(k8sEvent) - assert.Equal(t, sink.LogRecordCount(), 1) + assert.Equal(t, 1, sink.LogRecordCount()) } func TestDropEventsOlderThanStartupTime(t *testing.T) { @@ -85,7 +85,7 @@ func TestDropEventsOlderThanStartupTime(t *testing.T) { k8sEvent.FirstTimestamp = v1.Time{Time: time.Now().Add(-time.Hour)} recv.handleEvent(k8sEvent) - assert.Equal(t, sink.LogRecordCount(), 0) + assert.Equal(t, 0, sink.LogRecordCount()) } func TestGetEventTimestamp(t *testing.T) { diff --git a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go index 56623eecadd8..61620fa940b7 100644 --- a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go +++ b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go @@ -41,10 +41,10 @@ func TestUnstructuredListToLogData(t *testing.T) { } logs := pullObjectsToLogData(&objects, time.Now(), config) - assert.Equal(t, logs.LogRecordCount(), 4) + assert.Equal(t, 4, logs.LogRecordCount()) resourceLogs := logs.ResourceLogs() - assert.Equal(t, resourceLogs.Len(), 2) + assert.Equal(t, 2, resourceLogs.Len()) namespaces = []string{"ns1", "ns2"} for i, namespace := range namespaces { @@ -52,8 +52,8 @@ func TestUnstructuredListToLogData(t *testing.T) { resourceAttributes := rl.Resource().Attributes() ns, _ := resourceAttributes.Get(semconv.AttributeK8SNamespaceName) assert.Equal(t, ns.AsString(), namespace) - assert.Equal(t, rl.ScopeLogs().Len(), 1) - assert.Equal(t, rl.ScopeLogs().At(0).LogRecords().Len(), 2) + assert.Equal(t, 1, rl.ScopeLogs().Len()) + assert.Equal(t, 2, rl.ScopeLogs().At(0).LogRecords().Len()) } }) @@ -78,17 +78,17 @@ func TestUnstructuredListToLogData(t *testing.T) { logs := pullObjectsToLogData(&objects, time.Now(), config) - assert.Equal(t, logs.LogRecordCount(), 3) + assert.Equal(t, 3, logs.LogRecordCount()) resourceLogs := logs.ResourceLogs() - assert.Equal(t, resourceLogs.Len(), 1) + assert.Equal(t, 1, resourceLogs.Len()) rl := resourceLogs.At(0) resourceAttributes := rl.Resource().Attributes() logRecords := rl.ScopeLogs().At(0).LogRecords() _, ok := resourceAttributes.Get(semconv.AttributeK8SNamespaceName) assert.False(t, ok) - assert.Equal(t, rl.ScopeLogs().Len(), 1) - assert.Equal(t, logRecords.Len(), 3) + assert.Equal(t, 1, rl.ScopeLogs().Len()) + assert.Equal(t, 3, logRecords.Len()) }) @@ -116,14 +116,14 @@ func TestUnstructuredListToLogData(t *testing.T) { logs, err := watchObjectsToLogData(event, time.Now(), config) assert.NoError(t, err) - assert.Equal(t, logs.LogRecordCount(), 1) + assert.Equal(t, 1, logs.LogRecordCount()) resourceLogs := logs.ResourceLogs() - assert.Equal(t, resourceLogs.Len(), 1) + assert.Equal(t, 1, resourceLogs.Len()) rl := resourceLogs.At(0) logRecords := rl.ScopeLogs().At(0).LogRecords() - assert.Equal(t, rl.ScopeLogs().Len(), 1) - assert.Equal(t, logRecords.Len(), 1) + assert.Equal(t, 1, rl.ScopeLogs().Len()) + assert.Equal(t, 1, logRecords.Len()) attrs := logRecords.At(0).Attributes() eventName, ok := attrs.Get("event.name") @@ -157,14 +157,14 @@ func TestUnstructuredListToLogData(t *testing.T) { logs, err := watchObjectsToLogData(event, observedAt, config) assert.NoError(t, err) - assert.Equal(t, logs.LogRecordCount(), 1) + assert.Equal(t, 1, logs.LogRecordCount()) resourceLogs := logs.ResourceLogs() - assert.Equal(t, resourceLogs.Len(), 1) + assert.Equal(t, 1, resourceLogs.Len()) rl := resourceLogs.At(0) logRecords := rl.ScopeLogs().At(0).LogRecords() - assert.Equal(t, rl.ScopeLogs().Len(), 1) - assert.Equal(t, logRecords.Len(), 1) + assert.Equal(t, 1, rl.ScopeLogs().Len()) + assert.Equal(t, 1, logRecords.Len()) assert.Greater(t, logRecords.At(0).ObservedTimestamp().AsTime().Unix(), int64(0)) assert.Equal(t, logRecords.At(0).ObservedTimestamp().AsTime().Unix(), observedAt.Unix()) }) diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index 590454bb8b80..eabcbb1fc3e2 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -49,7 +49,7 @@ func TestBrokerShutdown_closed(t *testing.T) { func TestBrokerScraper_Name(t *testing.T) { s := brokerScraper{} - assert.Equal(t, s.Name(), brokersScraperName) + assert.Equal(t, brokersScraperName, s.Name()) } func TestBrokerScraper_createBrokerScraper(t *testing.T) { diff --git a/receiver/kafkametricsreceiver/consumer_scraper_test.go b/receiver/kafkametricsreceiver/consumer_scraper_test.go index 06b2f6f0cc51..f01217066922 100644 --- a/receiver/kafkametricsreceiver/consumer_scraper_test.go +++ b/receiver/kafkametricsreceiver/consumer_scraper_test.go @@ -44,7 +44,7 @@ func TestConsumerShutdown_closed(t *testing.T) { func TestConsumerScraper_Name(t *testing.T) { s := consumerScraper{} - assert.Equal(t, s.Name(), consumersScraperName) + assert.Equal(t, consumersScraperName, s.Name()) } func TestConsumerScraper_createConsumerScraper(t *testing.T) { diff --git a/receiver/kafkametricsreceiver/topic_scraper_test.go b/receiver/kafkametricsreceiver/topic_scraper_test.go index af04850e34b6..71c96ef7ee3c 100644 --- a/receiver/kafkametricsreceiver/topic_scraper_test.go +++ b/receiver/kafkametricsreceiver/topic_scraper_test.go @@ -48,7 +48,7 @@ func TestTopicShutdown_closed(t *testing.T) { func TestTopicScraper_Name(t *testing.T) { s := topicScraper{} - assert.Equal(t, s.Name(), topicsScraperName) + assert.Equal(t, topicsScraperName, s.Name()) } func TestTopicScraper_createsScraper(t *testing.T) { diff --git a/receiver/kafkareceiver/kafka_receiver_test.go b/receiver/kafkareceiver/kafka_receiver_test.go index 9bea18c48ecd..755fe2ea351a 100644 --- a/receiver/kafkareceiver/kafka_receiver_test.go +++ b/receiver/kafkareceiver/kafka_receiver_test.go @@ -1080,7 +1080,7 @@ func TestLogsConsumerGroupHandler_unmarshal_text(t *testing.T) { groupClaim.messageChan <- &sarama.ConsumerMessage{Value: encoded} close(groupClaim.messageChan) wg.Wait() - require.Equal(t, sink.LogRecordCount(), 1) + require.Equal(t, 1, sink.LogRecordCount()) log := sink.AllLogs()[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) assert.Equal(t, log.Body().Str(), test.text) assert.LessOrEqual(t, t1, log.ObservedTimestamp().AsTime()) diff --git a/receiver/mongodbatlasreceiver/alerts_integration_test.go b/receiver/mongodbatlasreceiver/alerts_integration_test.go index 5d59c01a686d..268e999e0beb 100644 --- a/receiver/mongodbatlasreceiver/alerts_integration_test.go +++ b/receiver/mongodbatlasreceiver/alerts_integration_test.go @@ -93,7 +93,7 @@ func TestAlertsReceiver(t *testing.T) { defer resp.Body.Close() - require.Equal(t, resp.StatusCode, http.StatusOK) + require.Equal(t, http.StatusOK, resp.StatusCode) require.Eventually(t, func() bool { return sink.LogRecordCount() > 0 @@ -167,7 +167,7 @@ func TestAlertsReceiverTLS(t *testing.T) { defer resp.Body.Close() - require.Equal(t, resp.StatusCode, http.StatusOK) + require.Equal(t, http.StatusOK, resp.StatusCode) require.Eventually(t, func() bool { return sink.LogRecordCount() > 0 diff --git a/receiver/mongodbatlasreceiver/alerts_test.go b/receiver/mongodbatlasreceiver/alerts_test.go index 098b3f6cda48..95736392f955 100644 --- a/receiver/mongodbatlasreceiver/alerts_test.go +++ b/receiver/mongodbatlasreceiver/alerts_test.go @@ -515,7 +515,7 @@ func TestAlertsRetrieval(t *testing.T) { return testClient() }, validateEntries: func(t *testing.T, logs plog.Logs) { - require.Equal(t, logs.LogRecordCount(), 1) + require.Equal(t, 1, logs.LogRecordCount()) }, }, { @@ -572,7 +572,7 @@ func TestAlertsRetrieval(t *testing.T) { return tc }, validateEntries: func(t *testing.T, l plog.Logs) { - require.Equal(t, l.LogRecordCount(), 1) + require.Equal(t, 1, l.LogRecordCount()) rl := l.ResourceLogs().At(0) sl := rl.ScopeLogs().At(0) lr := sl.LogRecords().At(0) diff --git a/receiver/mongodbatlasreceiver/mongodb_event_to_logdata_test.go b/receiver/mongodbatlasreceiver/mongodb_event_to_logdata_test.go index 99a426122417..2d7bf692184b 100644 --- a/receiver/mongodbatlasreceiver/mongodb_event_to_logdata_test.go +++ b/receiver/mongodbatlasreceiver/mongodb_event_to_logdata_test.go @@ -126,8 +126,8 @@ func TestUnknownSeverity(t *testing.T) { rl := ld.ResourceLogs().At(0) logEntry := rl.ScopeLogs().At(0).LogRecords().At(0) - assert.Equal(t, logEntry.SeverityNumber(), plog.SeverityNumberUnspecified) - assert.Equal(t, logEntry.SeverityText(), "") + assert.Equal(t, plog.SeverityNumberUnspecified, logEntry.SeverityNumber()) + assert.Equal(t, "", logEntry.SeverityText()) } func TestMongoEventToAuditLogData5_0(t *testing.T) { @@ -152,8 +152,8 @@ func TestMongoEventToAuditLogData5_0(t *testing.T) { lr := sl.LogRecords().At(0) attrs := lr.Attributes() - assert.Equal(t, ld.ResourceLogs().Len(), 1) - assert.Equal(t, resourceAttrs.Len(), 6) + assert.Equal(t, 1, ld.ResourceLogs().Len()) + assert.Equal(t, 6, resourceAttrs.Len()) assertString(t, resourceAttrs, "mongodb_atlas.org", "Org") assertString(t, resourceAttrs, "mongodb_atlas.project", "Project") assertString(t, resourceAttrs, "mongodb_atlas.cluster", "clusterName") @@ -176,13 +176,13 @@ func TestMongoEventToAuditLogData5_0(t *testing.T) { roles, ok := attrs.Get("roles") require.True(t, ok, "roles key does not exist") - require.Equal(t, roles.Slice().Len(), 1) + require.Equal(t, 1, roles.Slice().Len()) assertString(t, roles.Slice().At(0).Map(), "role", "test_role") assertString(t, roles.Slice().At(0).Map(), "db", "test_db") users, ok := attrs.Get("users") require.True(t, ok, "users key does not exist") - require.Equal(t, users.Slice().Len(), 1) + require.Equal(t, 1, users.Slice().Len()) assertString(t, users.Slice().At(0).Map(), "user", "mongo_user") assertString(t, users.Slice().At(0).Map(), "db", "my_db") @@ -218,8 +218,8 @@ func TestMongoEventToAuditLogData4_2(t *testing.T) { lr := sl.LogRecords().At(0) attrs := lr.Attributes() - assert.Equal(t, ld.ResourceLogs().Len(), 1) - assert.Equal(t, resourceAttrs.Len(), 6) + assert.Equal(t, 1, ld.ResourceLogs().Len()) + assert.Equal(t, 6, resourceAttrs.Len()) assertString(t, resourceAttrs, "mongodb_atlas.org", "Org") assertString(t, resourceAttrs, "mongodb_atlas.project", "Project") assertString(t, resourceAttrs, "mongodb_atlas.cluster", "clusterName") @@ -239,13 +239,13 @@ func TestMongoEventToAuditLogData4_2(t *testing.T) { roles, ok := attrs.Get("roles") require.True(t, ok, "roles key does not exist") - require.Equal(t, roles.Slice().Len(), 1) + require.Equal(t, 1, roles.Slice().Len()) assertString(t, roles.Slice().At(0).Map(), "role", "test_role") assertString(t, roles.Slice().At(0).Map(), "db", "test_db") users, ok := attrs.Get("users") require.True(t, ok, "users key does not exist") - require.Equal(t, users.Slice().Len(), 1) + require.Equal(t, 1, users.Slice().Len()) assertString(t, users.Slice().At(0).Map(), "user", "mongo_user") assertString(t, users.Slice().At(0).Map(), "db", "my_db") diff --git a/receiver/mongodbatlasreceiver/receiver_test.go b/receiver/mongodbatlasreceiver/receiver_test.go index 89631c861573..9d1c4a9eaf22 100644 --- a/receiver/mongodbatlasreceiver/receiver_test.go +++ b/receiver/mongodbatlasreceiver/receiver_test.go @@ -17,7 +17,7 @@ import ( func TestDefaultConfig(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig() - require.Equal(t, cfg.(*Config).ControllerConfig.CollectionInterval, 3*time.Minute) + require.Equal(t, 3*time.Minute, cfg.(*Config).ControllerConfig.CollectionInterval) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/receiver/mongodbreceiver/client_test.go b/receiver/mongodbreceiver/client_test.go index a2808b3f1332..5f4fdc24cd7d 100644 --- a/receiver/mongodbreceiver/client_test.go +++ b/receiver/mongodbreceiver/client_test.go @@ -90,7 +90,7 @@ func TestListDatabaseNames(t *testing.T) { } dbNames, err := client.ListDatabaseNames(context.Background(), bson.D{}) require.NoError(t, err) - require.Equal(t, dbNames[0], "admin") + require.Equal(t, "admin", dbNames[0]) }) } diff --git a/receiver/mysqlreceiver/scraper_test.go b/receiver/mysqlreceiver/scraper_test.go index f68cd3692bfd..01dc4dd11840 100644 --- a/receiver/mysqlreceiver/scraper_test.go +++ b/receiver/mysqlreceiver/scraper_test.go @@ -7,7 +7,6 @@ import ( "bufio" "context" "database/sql" - "errors" "os" "path/filepath" "strings" @@ -118,10 +117,10 @@ func TestScrape(t *testing.T) { pmetrictest.IgnoreTimestamp())) var partialError scrapererror.PartialScrapeError - require.True(t, errors.As(scrapeErr, &partialError), "returned error was not PartialScrapeError") + require.ErrorAs(t, scrapeErr, &partialError, "returned error was not PartialScrapeError") // 5 comes from 4 failed "must-have" metrics that aren't present, // and the other failure comes from a row that fails to parse as a number - require.Equal(t, partialError.Failed, 5, "Expected partial error count to be 5") + require.Equal(t, 5, partialError.Failed, "Expected partial error count to be 5") }) } diff --git a/receiver/opencensusreceiver/go.mod b/receiver/opencensusreceiver/go.mod index 630da01c2094..f544a40ff63d 100644 --- a/receiver/opencensusreceiver/go.mod +++ b/receiver/opencensusreceiver/go.mod @@ -8,7 +8,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.108.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.108.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.108.0 - github.com/rs/cors v1.11.0 + github.com/rs/cors v1.11.1 github.com/soheilhy/cmux v0.1.5 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.108.2-0.20240904075637-48b11ba1c5f8 diff --git a/receiver/opencensusreceiver/go.sum b/receiver/opencensusreceiver/go.sum index b3bdce9aa541..3e402d946594 100644 --- a/receiver/opencensusreceiver/go.sum +++ b/receiver/opencensusreceiver/go.sum @@ -105,8 +105,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/receiver/oracledbreceiver/factory_test.go b/receiver/oracledbreceiver/factory_test.go index c7f1bc440435..a8abbdebd8c7 100644 --- a/receiver/oracledbreceiver/factory_test.go +++ b/receiver/oracledbreceiver/factory_test.go @@ -93,7 +93,7 @@ func TestGetDataSource(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { dataSource := getDataSource(*tc.config) - require.Equal(t, dataSource, tc.expected) + require.Equal(t, tc.expected, dataSource) _, err := url.PathUnescape(dataSource) require.NoError(t, err) }) diff --git a/receiver/otelarrowreceiver/go.mod b/receiver/otelarrowreceiver/go.mod index ed374f7926ac..748d9bdb7ba0 100644 --- a/receiver/otelarrowreceiver/go.mod +++ b/receiver/otelarrowreceiver/go.mod @@ -106,3 +106,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/otela replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent => ../../internal/sharedcomponent replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/otelarrowexporter => ../../exporter/otelarrowexporter + +replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/grpcutil => ../../internal/grpcutil diff --git a/receiver/podmanreceiver/record_metrics_test.go b/receiver/podmanreceiver/record_metrics_test.go index 922a2d1f83f4..ac949613a488 100644 --- a/receiver/podmanreceiver/record_metrics_test.go +++ b/receiver/podmanreceiver/record_metrics_test.go @@ -20,7 +20,7 @@ type point struct { } func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pmetric.Metrics) { - assert.Equal(t, md.ResourceMetrics().Len(), 1) + assert.Equal(t, 1, md.ResourceMetrics().Len()) rsm := md.ResourceMetrics().At(0) resourceAttrs := map[string]string{ @@ -35,10 +35,10 @@ func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pme assert.Equal(t, v, attr.Str()) } - assert.Equal(t, rsm.ScopeMetrics().Len(), 1) + assert.Equal(t, 1, rsm.ScopeMetrics().Len()) metrics := rsm.ScopeMetrics().At(0).Metrics() - assert.Equal(t, metrics.Len(), 11) + assert.Equal(t, 11, metrics.Len()) for i := 0; i < metrics.Len(); i++ { m := metrics.At(i) @@ -103,11 +103,11 @@ func assertPoints(t *testing.T, dpts pmetric.NumberDataPointSlice, pts []point) for i, expected := range pts { got := dpts.At(i) assert.Equal(t, got.IntValue(), int64(expected.intVal)) - assert.Equal(t, got.DoubleValue(), expected.doubleVal) + assert.Equal(t, expected.doubleVal, got.DoubleValue()) for k, expectedV := range expected.attributes { gotV, exists := got.Attributes().Get(k) assert.True(t, exists) - assert.Equal(t, gotV.Str(), expectedV) + assert.Equal(t, expectedV, gotV.Str()) } } } diff --git a/receiver/prometheusreceiver/config_test.go b/receiver/prometheusreceiver/config_test.go index 2903a940f972..02b72f9eb989 100644 --- a/receiver/prometheusreceiver/config_test.go +++ b/receiver/prometheusreceiver/config_test.go @@ -41,11 +41,11 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, sub.Unmarshal(cfg)) r1 := cfg.(*Config) - assert.Equal(t, r1.PrometheusConfig.ScrapeConfigs[0].JobName, "demo") - assert.Equal(t, time.Duration(r1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval), 5*time.Second) + assert.Equal(t, "demo", r1.PrometheusConfig.ScrapeConfigs[0].JobName) + assert.Equal(t, 5*time.Second, time.Duration(r1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval)) assert.True(t, r1.UseStartTimeMetric) assert.True(t, r1.TrimMetricSuffixes) - assert.Equal(t, r1.StartTimeMetricRegex, "^(.+_)*process_start_time_seconds$") + assert.Equal(t, "^(.+_)*process_start_time_seconds$", r1.StartTimeMetricRegex) assert.True(t, r1.ReportExtraScrapeMetrics) assert.Equal(t, "http://my-targetallocator-service", r1.TargetAllocator.Endpoint) diff --git a/receiver/prometheusreceiver/internal/util_test.go b/receiver/prometheusreceiver/internal/util_test.go index 3e9e121f5ee3..755f531730cc 100644 --- a/receiver/prometheusreceiver/internal/util_test.go +++ b/receiver/prometheusreceiver/internal/util_test.go @@ -111,7 +111,7 @@ func TestConvToMetricType(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, monotonic := convToMetricType(tt.mtype) require.Equal(t, got.String(), tt.want.String()) - require.Equal(t, monotonic, tt.wantMonotonic) + require.Equal(t, tt.wantMonotonic, monotonic) }) } } @@ -172,7 +172,7 @@ func TestGetBoundary(t *testing.T) { } assert.NoError(t, err) - assert.Equal(t, value, tt.wantValue) + assert.Equal(t, tt.wantValue, value) }) } } diff --git a/receiver/prometheusreceiver/metrics_receiver_helper_test.go b/receiver/prometheusreceiver/metrics_receiver_helper_test.go index b4297dfb7040..0ab15d8c885b 100644 --- a/receiver/prometheusreceiver/metrics_receiver_helper_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_helper_test.go @@ -626,7 +626,7 @@ func compareDoubleValue(doubleVal float64) numberPointComparator { func assertNormalNan() numberPointComparator { return func(t *testing.T, numberDataPoint pmetric.NumberDataPoint) { - assert.Equal(t, math.Float64bits(numberDataPoint.DoubleValue()), value.NormalNaN, + assert.Equal(t, value.NormalNaN, math.Float64bits(numberDataPoint.DoubleValue()), "Metric double value is not normalNaN as expected") } } @@ -663,7 +663,7 @@ func compareSummary(count uint64, sum float64, quantiles [][]float64) summaryPoi assert.Equal(t, quantiles[i][0], summaryDataPoint.QuantileValues().At(i).Quantile(), "Summary quantile do not match") if math.IsNaN(quantiles[i][1]) { - assert.Equal(t, math.Float64bits(summaryDataPoint.QuantileValues().At(i).Value()), value.NormalNaN, + assert.Equal(t, value.NormalNaN, math.Float64bits(summaryDataPoint.QuantileValues().At(i).Value()), "Summary quantile value is not normalNaN as expected") } else { assert.Equal(t, quantiles[i][1], summaryDataPoint.QuantileValues().At(i).Value(), diff --git a/receiver/prometheusreceiver/metrics_receiver_labels_test.go b/receiver/prometheusreceiver/metrics_receiver_labels_test.go index af253535cdc3..ebc4744bbbca 100644 --- a/receiver/prometheusreceiver/metrics_receiver_labels_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_labels_test.go @@ -803,19 +803,19 @@ func verifyMultipleScopes(t *testing.T, td *testData, rms []pmetric.ResourceMetr require.NotEmpty(t, rms, "At least one resource metric should be present") sms := rms[0].ScopeMetrics() - require.Equal(t, sms.Len(), 3, "Three scope metrics should be present") + require.Equal(t, 3, sms.Len(), "Three scope metrics should be present") sms.Sort(func(a, b pmetric.ScopeMetrics) bool { return a.Scope().Name() < b.Scope().Name() }) - require.Equal(t, sms.At(0).Scope().Name(), "fake.scope.name") - require.Equal(t, sms.At(0).Scope().Version(), "v0.1.0") - require.Equal(t, sms.At(0).Scope().Attributes().Len(), 0) - require.Equal(t, sms.At(1).Scope().Name(), "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver") - require.Equal(t, sms.At(1).Scope().Attributes().Len(), 0) - require.Equal(t, sms.At(2).Scope().Name(), "scope.with.attributes") - require.Equal(t, sms.At(2).Scope().Version(), "v1.5.0") - require.Equal(t, sms.At(2).Scope().Attributes().Len(), 1) + require.Equal(t, "fake.scope.name", sms.At(0).Scope().Name()) + require.Equal(t, "v0.1.0", sms.At(0).Scope().Version()) + require.Equal(t, 0, sms.At(0).Scope().Attributes().Len()) + require.Equal(t, "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver", sms.At(1).Scope().Name()) + require.Equal(t, 0, sms.At(1).Scope().Attributes().Len()) + require.Equal(t, "scope.with.attributes", sms.At(2).Scope().Name()) + require.Equal(t, "v1.5.0", sms.At(2).Scope().Version()) + require.Equal(t, 1, sms.At(2).Scope().Attributes().Len()) scopeAttrVal, found := sms.At(2).Scope().Attributes().Get("animal") require.True(t, found) - require.Equal(t, scopeAttrVal.Str(), "bear") + require.Equal(t, "bear", scopeAttrVal.Str()) } diff --git a/receiver/redisreceiver/latencystats_test.go b/receiver/redisreceiver/latencystats_test.go index 9f1c6e6d1eae..06cf32a672ec 100644 --- a/receiver/redisreceiver/latencystats_test.go +++ b/receiver/redisreceiver/latencystats_test.go @@ -12,10 +12,10 @@ import ( func TestParseLatencyStats(t *testing.T) { ls, err := parseLatencyStats("p50=181.247,p55=182.271,p99=309.247,p99.9=1023.999") require.NoError(t, err) - require.Equal(t, ls["p50"], 181.247) - require.Equal(t, ls["p55"], 182.271) - require.Equal(t, ls["p99"], 309.247) - require.Equal(t, ls["p99.9"], 1023.999) + require.Equal(t, 181.247, ls["p50"]) + require.Equal(t, 182.271, ls["p55"]) + require.Equal(t, 309.247, ls["p99"]) + require.Equal(t, 1023.999, ls["p99.9"]) } func TestParseMalformedLatencyStats(t *testing.T) { diff --git a/receiver/skywalkingreceiver/README.md b/receiver/skywalkingreceiver/README.md index 1cffd714d9bf..f7f22ffb3016 100644 --- a/receiver/skywalkingreceiver/README.md +++ b/receiver/skywalkingreceiver/README.md @@ -18,6 +18,10 @@ Receives trace data and metric data in [Skywalking](https://skywalking.apache.or Note: The current metrics receiver only supports receiving JVM data. +## Prerequisites + +This receiver supports [Apache Skywalking-Java Agent](https://github.com/apache/skywalking-java) version 8.9.0+ + ## Getting Started By default, the Skywalking receiver will not serve any protocol. A protocol must be diff --git a/receiver/snmpreceiver/scraper_test.go b/receiver/snmpreceiver/scraper_test.go index 50a2275852e7..b5c3100c4388 100644 --- a/receiver/snmpreceiver/scraper_test.go +++ b/receiver/snmpreceiver/scraper_test.go @@ -137,7 +137,7 @@ func TestScrape(t *testing.T) { } metrics, err := scraper.scrape(context.Background()) require.NoError(t, err) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { @@ -172,7 +172,7 @@ func TestScrape(t *testing.T) { } metrics, err := scraper.scrape(context.Background()) require.EqualError(t, err, clientErr.Error()) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { @@ -208,7 +208,7 @@ func TestScrape(t *testing.T) { } metrics, err := scraper.scrape(context.Background()) require.EqualError(t, err, expectedScrapeErr.Error()) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { @@ -639,7 +639,7 @@ func TestScrape(t *testing.T) { } metrics, err := scraper.scrape(context.Background()) require.EqualError(t, err, clientErr.Error()) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { @@ -687,7 +687,7 @@ func TestScrape(t *testing.T) { } metrics, err := scraper.scrape(context.Background()) require.EqualError(t, err, expectedScrapeErrMsg) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { @@ -1289,7 +1289,7 @@ func TestScrape(t *testing.T) { metrics, err := scraper.scrape(context.Background()) require.EqualError(t, err, fmt.Sprintf("%s; %s; %s", clientErr, expectedErr1, expectedErr2)) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { @@ -1373,7 +1373,7 @@ func TestScrape(t *testing.T) { metrics, err := scraper.scrape(context.Background()) require.EqualError(t, err, fmt.Sprintf("%s; %s; %s; %s", expectedErr1, expectedErr2, expectedErr3, expectedErr4)) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { @@ -1838,7 +1838,7 @@ func TestScrape(t *testing.T) { metrics, err := scraper.scrape(context.Background()) require.EqualError(t, err, fmt.Sprintf("%s; %s; %s", clientErr, expectedErr1, expectedErr2)) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { @@ -1918,7 +1918,7 @@ func TestScrape(t *testing.T) { metrics, err := scraper.scrape(context.Background()) require.EqualError(t, err, fmt.Sprintf("%s; %s; %s; %s", expectedErr1, expectedErr2, expectedErr3, expectedErr4)) - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) }, }, { diff --git a/receiver/splunkhecreceiver/receiver_test.go b/receiver/splunkhecreceiver/receiver_test.go index b222a6befcf3..e2f2590c3590 100644 --- a/receiver/splunkhecreceiver/receiver_test.go +++ b/receiver/splunkhecreceiver/receiver_test.go @@ -1768,7 +1768,7 @@ func Test_splunkhecreceiver_handleHealthPath(t *testing.T) { respBytes, err := io.ReadAll(resp.Body) assert.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, string(respBytes), responseHecHealthy) + assert.Equal(t, responseHecHealthy, string(respBytes)) assert.Equal(t, 200, resp.StatusCode) } diff --git a/receiver/statsdreceiver/internal/protocol/metric_translator_test.go b/receiver/statsdreceiver/internal/protocol/metric_translator_test.go index e880ad976372..a4609d502643 100644 --- a/receiver/statsdreceiver/internal/protocol/metric_translator_test.go +++ b/receiver/statsdreceiver/internal/protocol/metric_translator_test.go @@ -37,7 +37,7 @@ func TestBuildCounterMetric(t *testing.T) { dp := expectedMetric.Sum().DataPoints().AppendEmpty() dp.SetIntValue(32) dp.Attributes().PutStr("mykey", "myvalue") - assert.Equal(t, metric, expectedMetrics) + assert.Equal(t, expectedMetrics, metric) } func TestSetTimestampsForCounterMetric(t *testing.T) { @@ -90,7 +90,7 @@ func TestBuildGaugeMetric(t *testing.T) { dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) dp.Attributes().PutStr("mykey", "myvalue") dp.Attributes().PutStr("mykey2", "myvalue2") - assert.Equal(t, metric, expectedMetrics) + assert.Equal(t, expectedMetrics, metric) } func TestBuildSummaryMetricUnsampled(t *testing.T) { diff --git a/receiver/vcenterreceiver/scraper_test.go b/receiver/vcenterreceiver/scraper_test.go index c550f4d441f4..ce703c1e04b5 100644 --- a/receiver/vcenterreceiver/scraper_test.go +++ b/receiver/vcenterreceiver/scraper_test.go @@ -75,7 +75,7 @@ func testScrape(ctx context.Context, t *testing.T, cfg *Config, fileName string) metrics, err := scraper.scrape(ctx) require.NoError(t, err) - require.NotEqual(t, metrics.MetricCount(), 0) + require.NotEqual(t, 0, metrics.MetricCount()) goldenPath := filepath.Join("testdata", "metrics", fileName) expectedMetrics, err := golden.ReadMetrics(goldenPath) @@ -119,7 +119,7 @@ func TestScrape_NoClient(t *testing.T) { } metrics, err := scraper.scrape(ctx) require.ErrorContains(t, err, "unable to connect to vSphere SDK") - require.Equal(t, metrics.MetricCount(), 0) + require.Equal(t, 0, metrics.MetricCount()) require.NoError(t, scraper.Shutdown(ctx)) } diff --git a/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper_test.go b/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper_test.go index cc26a90bbd82..b7e2ce005235 100644 --- a/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper_test.go +++ b/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper_test.go @@ -219,7 +219,7 @@ func Test_WindowsPerfCounterScraper(t *testing.T) { } else { require.Equal(t, 1, obs.Len()) log := obs.All()[0] - assert.Equal(t, log.Level, zapcore.WarnLevel) + assert.Equal(t, zapcore.WarnLevel, log.Level) assert.Equal(t, test.startMessage, log.Message) assert.Equal(t, "error", log.Context[0].Key) assert.EqualError(t, log.Context[0].Interface.(error), test.startErr) diff --git a/receiver/zipkinreceiver/proto_parse_test.go b/receiver/zipkinreceiver/proto_parse_test.go index 2dbdbd7a2200..105e3b200b21 100644 --- a/receiver/zipkinreceiver/proto_parse_test.go +++ b/receiver/zipkinreceiver/proto_parse_test.go @@ -95,7 +95,7 @@ func TestConvertSpansToTraceSpans_protobuf(t *testing.T) { // 3. Get that payload converted to OpenCensus proto spans. reqs, err := zi.v2ToTraceSpans(protoBlob, hdr) require.NoError(t, err, "Failed to parse convert Zipkin spans in Protobuf to Trace spans: %v", err) - require.Equal(t, reqs.ResourceSpans().Len(), 2, "Expecting exactly 2 requests since spans have different node/localEndpoint: %v", reqs.ResourceSpans().Len()) + require.Equal(t, 2, reqs.ResourceSpans().Len(), "Expecting exactly 2 requests since spans have different node/localEndpoint: %v", reqs.ResourceSpans().Len()) want := ptrace.NewTraces() want.ResourceSpans().EnsureCapacity(2) diff --git a/testbed/go.sum b/testbed/go.sum index 28ea407b789b..4c6e67ac2a2a 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -703,9 +703,13 @@ github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLr github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v4 v4.3.13 h1:A2wsiTbvp63ilDaWmsk2wjx6xZdxQOvpiNlKBGKKXKI= github.com/vmihailenco/msgpack/v4 v4.3.13/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/testbed/tests/syslog_integration_test.go b/testbed/tests/syslog_integration_test.go index 97dd08583cb1..bd7576a923e3 100644 --- a/testbed/tests/syslog_integration_test.go +++ b/testbed/tests/syslog_integration_test.go @@ -183,8 +183,8 @@ service: } require.Len(t, backend.ReceivedLogs, 1) - require.Equal(t, backend.ReceivedLogs[0].ResourceLogs().Len(), 1) - require.Equal(t, backend.ReceivedLogs[0].ResourceLogs().At(0).ScopeLogs().Len(), 1) + require.Equal(t, 1, backend.ReceivedLogs[0].ResourceLogs().Len()) + require.Equal(t, 1, backend.ReceivedLogs[0].ResourceLogs().At(0).ScopeLogs().Len()) require.Len(t, expectedData, backend.ReceivedLogs[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().Len()) // Clean received logs diff --git a/testbed/tests/trace_test.go b/testbed/tests/trace_test.go index aef4184b8773..3e713e2330f7 100644 --- a/testbed/tests/trace_test.go +++ b/testbed/tests/trace_test.go @@ -382,7 +382,7 @@ func TestTraceAttributesProcessor(t *testing.T) { // verifySpan verifies that attributes was added to the internal data span. verifySpan := func(span ptrace.Span) { require.NotNil(t, span) - require.Equal(t, span.Attributes().Len(), 1) + require.Equal(t, 1, span.Attributes().Len()) attrVal, ok := span.Attributes().Get("new_attr") assert.True(t, ok) assert.EqualValues(t, "string value", attrVal.Str()) @@ -395,14 +395,14 @@ func TestTraceAttributesProcessor(t *testing.T) { verifySingleSpan(t, tc, nodeToExclude, spanToInclude, func(span ptrace.Span) { // Verify attributes was not added to the new internal data span. - assert.Equal(t, span.Attributes().Len(), 0) + assert.Equal(t, 0, span.Attributes().Len()) }) // Create another span that does not match "include" filter. spanToExclude := "span-not-to-add-attr" verifySingleSpan(t, tc, nodeToInclude, spanToExclude, func(span ptrace.Span) { // Verify attributes was not added to the new internal data span. - assert.Equal(t, span.Attributes().Len(), 0) + assert.Equal(t, 0, span.Attributes().Len()) }) }) } @@ -470,7 +470,7 @@ func TestTraceAttributesProcessorJaegerGRPC(t *testing.T) { // verifySpan verifies that attributes was added to the internal data span. verifySpan := func(span ptrace.Span) { require.NotNil(t, span) - require.Equal(t, span.Attributes().Len(), 1) + require.Equal(t, 1, span.Attributes().Len()) attrVal, ok := span.Attributes().Get("new_attr") assert.True(t, ok) assert.EqualValues(t, "string value", attrVal.Str()) @@ -483,13 +483,13 @@ func TestTraceAttributesProcessorJaegerGRPC(t *testing.T) { verifySingleSpan(t, tc, nodeToExclude, spanToInclude, func(span ptrace.Span) { // Verify attributes was not added to the new internal data span. - assert.Equal(t, span.Attributes().Len(), 0) + assert.Equal(t, 0, span.Attributes().Len()) }) // Create another span that does not match "include" filter. spanToExclude := "span-not-to-add-attr" verifySingleSpan(t, tc, nodeToInclude, spanToExclude, func(span ptrace.Span) { // Verify attributes was not added to the new internal data span. - assert.Equal(t, span.Attributes().Len(), 0) + assert.Equal(t, 0, span.Attributes().Len()) }) }