From 2ad68301cf66c66399c6df62acb589f34c7f0de1 Mon Sep 17 00:00:00 2001 From: Dmitry Date: Tue, 30 Aug 2022 11:40:16 -0700 Subject: [PATCH] Replace usages of Map.Insert* This change replaces usages on Map.Insert* with Map.Upsert* where there is no data under the key. This covers only part of the usages. More changes will be added in next PRs --- cmd/mdatagen/metrics.tmpl | 10 +- .../logs_exporter_test.go | 8 +- .../logsdata_to_logservice_test.go | 36 ++- .../metricsdata_to_logservice_test.go | 18 +- .../tracedata_to_logservice_test.go | 28 +-- .../exporter_test.go | 22 +- exporter/awsxrayexporter/awsxray_test.go | 19 +- .../internal/translator/cause_test.go | 24 +- .../trace_to_envelope_test.go | 2 +- .../internal/metrics/consumer_test.go | 6 +- .../datadogexporter/traces_exporter_test.go | 5 +- .../internal/objmodel/objmodel_test.go | 25 +-- .../humioexporter/traces_exporter_test.go | 37 ++- .../trace_exporter_test.go | 2 +- exporter/logzioexporter/exporter_test.go | 14 +- exporter/lokiexporter/exporter_test.go | 5 +- exporter/mezmoexporter/exporter_test.go | 8 +- exporter/prometheusexporter/collector_test.go | 44 ++-- exporter/sapmexporter/exporter_test.go | 10 +- .../internal/correlation/spanshims_test.go | 8 +- .../logrecord_to_logdata_test.go | 40 ++-- exporter/splunkhecexporter/exporter_test.go | 14 +- .../logdata_to_splunk_test.go | 108 ++++----- .../tracedata_to_splunk_test.go | 31 ++- .../graphite_formatter_test.go | 40 ++-- exporter/sumologicexporter/sender_test.go | 38 ++-- .../exporter_test.go | 24 +- .../transformer_test.go | 7 +- .../logs_exporter_test.go | 8 +- .../logsdata_to_logservice_test.go | 50 ++--- .../processor/filterexpr/matcher_test.go | 4 +- .../processor/filterlog/filterlog_test.go | 2 +- .../filtermatcher/filtermatcher_test.go | 4 +- .../processor/filterspan/filterspan_test.go | 12 +- internal/coreinternal/testdata/log.go | 8 +- pkg/stanza/adapter/frompdataconverter_test.go | 56 ++--- .../tqlotel/func_delete_matching_keys_test.go | 14 +- .../functions/tqlotel/func_limit_test.go | 20 +- pkg/translator/opencensus/oc_to_metrics.go | 2 +- .../attributes_metric_test.go | 2 +- .../attribute_groups_test.go | 6 +- .../internal/aws/ecs/ecs.go | 22 +- .../internal/aws/eks/detector.go | 4 +- .../aws/elasticbeanstalk/elasticbeanstalk.go | 10 +- .../internal/metadata/generated_metrics.go | 20 +- .../internal/metadata/generated_metrics.go | 16 +- .../internal/metadata/generated_metrics.go | 34 +-- .../internal/metadata/generated_metrics.go | 10 +- receiver/cloudfoundryreceiver/converter.go | 6 +- .../internal/metadata/generated_metrics.go | 8 +- .../internal/metadata/generated_metrics.go | 210 +++++++++--------- .../internal/metadata/generated_metrics.go | 60 ++--- .../internal/metadata/generated_metrics.go | 14 +- .../internal/metadata/generated_metrics.go | 8 +- .../internal/metadata/generated_metrics.go | 38 ++-- .../internal/metadata/generated_metrics.go | 28 +-- .../internal/metadata/generated_metrics.go | 4 +- .../internal/metadata/generated_metrics.go | 36 +-- .../internal/metadata/generated_metrics.go | 18 +- .../internal/metadata/generated_metrics.go | 2 +- .../internal/metadata/generated_metrics.go | 4 +- .../internal/metadata/generated_metrics.go | 6 +- .../internal/metadata/generated_metrics.go | 40 ++-- .../internal/metadata/generated_metrics.go | 32 +-- .../internal/metadata/generated_metrics.go | 12 +- .../internal/metadata/generated_metrics.go | 108 ++++----- .../internal/metadata/generated_metrics.go | 36 +-- .../internal/metadata/generated_metrics.go | 30 +-- .../internal/metadata/generated_metrics.go | 2 +- .../internal/metadata/generated_metrics.go | 10 +- .../internal/metadata/custom.go | 6 +- .../internal/metadata/generated_metrics.go | 38 ++-- .../internal/metadata/generated_metrics.go | 2 +- .../internal/metadata/generated_metrics.go | 14 +- .../internal/metadata/generated_metrics.go | 8 +- .../internal/metadata/generated_metrics.go | 134 +++++------ receiver/sapmreceiver/trace_receiver_test.go | 8 +- .../signalfxv2_event_to_logdata.go | 30 ++- .../internal/metadata/generated_metrics.go | 2 +- .../protocol/metric_translator.go | 4 +- .../protocol/metric_translator_test.go | 10 +- .../internal/metadata/generated_metrics.go | 30 +-- .../internal/metadata/generated_metrics.go | 4 +- testbed/testbed/data_providers.go | 4 +- testbed/tests/trace_test.go | 2 +- 85 files changed, 934 insertions(+), 1011 deletions(-) diff --git a/cmd/mdatagen/metrics.tmpl b/cmd/mdatagen/metrics.tmpl index abbdb3c7ab8d..68157ad0b7f1 100644 --- a/cmd/mdatagen/metrics.tmpl +++ b/cmd/mdatagen/metrics.tmpl @@ -107,15 +107,15 @@ func (m *metric{{ $name.Render }}) recordDataPoint(start pcommon.Timestamp, ts p dp.Set{{ $metric.Data.MetricValueType }}Val(val) {{- range $metric.Attributes }} {{- if eq (attributeInfo .).Type.Primitive "bool" }} - dp.Attributes().InsertBool("{{ attributeKey .}}", {{ .RenderUnexported }}AttributeValue) + dp.Attributes().UpsertBool("{{ attributeKey .}}", {{ .RenderUnexported }}AttributeValue) {{- else if eq (attributeInfo .).Type.Primitive "int64" }} - dp.Attributes().InsertInt("{{ attributeKey .}}", {{ .RenderUnexported }}AttributeValue) + dp.Attributes().UpsertInt("{{ attributeKey .}}", {{ .RenderUnexported }}AttributeValue) {{- else if eq (attributeInfo .).Type.Primitive "float64" }} - dp.Attributes().InsertDouble("{{ attributeKey .}}", {{ .RenderUnexported }}AttributeValue) + dp.Attributes().UpsertDouble("{{ attributeKey .}}", {{ .RenderUnexported }}AttributeValue) {{- else if eq (attributeInfo .).Type.Primitive "[]byte" }} - dp.Attributes().InsertBytes("{{ attributeKey .}}", pcommon.NewImmutableByteSlice({{ .RenderUnexported }}AttributeValue)) + dp.Attributes().UpsertBytes("{{ attributeKey .}}", pcommon.NewImmutableByteSlice({{ .RenderUnexported }}AttributeValue)) {{- else }} - dp.Attributes().InsertString("{{ attributeKey .}}", {{ .RenderUnexported }}AttributeValue) + dp.Attributes().UpsertString("{{ attributeKey .}}", {{ .RenderUnexported }}AttributeValue) {{- end }} {{- end }} } diff --git a/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go b/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go index d88e1b39b355..a50fe610570f 100644 --- a/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go +++ b/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go @@ -40,10 +40,10 @@ func createSimpleLogData(numberOfLogs int) plog.Logs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp") - logRecord.Attributes().InsertString("my-label", "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(conventions.AttributeServiceName, "myapp") + logRecord.Attributes().UpsertString("my-label", "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) } sl.LogRecords().AppendEmpty() diff --git a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go index 097830200b65..bfc7ae5839eb 100644 --- a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go @@ -26,33 +26,24 @@ import ( conventions "go.opentelemetry.io/collector/semconv/v1.6.1" ) -func getComplexAttributeValueMap() pcommon.Value { - mapVal := pcommon.NewValueMap() - mapValReal := mapVal.MapVal() - mapValReal.InsertBool("result", true) - mapValReal.InsertString("status", "ok") - mapValReal.InsertDouble("value", 1.3) - mapValReal.InsertInt("code", 200) - mapValReal.Insert("null", pcommon.NewValueEmpty()) - arrayVal := pcommon.NewValueSlice() - arrayVal.SliceVal().AppendEmpty().SetStringVal("array") - mapValReal.Insert("array", arrayVal) - - subMapVal := pcommon.NewValueMap() - subMapVal.MapVal().InsertString("data", "hello world") - mapValReal.Insert("map", subMapVal) - - mapValReal.InsertString("status", "ok") - return mapVal +func fillComplexAttributeValueMap(m pcommon.Map) { + m.UpsertBool("result", true) + m.UpsertString("status", "ok") + m.UpsertDouble("value", 1.3) + m.UpsertInt("code", 200) + m.UpsertEmpty("null") + m.UpsertEmptySlice("array").AppendEmpty().SetStringVal("array") + m.UpsertEmptyMap("map").UpsertString("data", "hello world") + m.UpsertString("status", "ok") } func createLogData(numberOfLogs int) plog.Logs { logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs rl := logs.ResourceLogs().AppendEmpty() - rl.Resource().Attributes().InsertString("resouceKey", "resourceValue") - rl.Resource().Attributes().InsertString(conventions.AttributeServiceName, "test-log-service-exporter") - rl.Resource().Attributes().InsertString(conventions.AttributeHostName, "test-host") + rl.Resource().Attributes().UpsertString("resouceKey", "resourceValue") + rl.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "test-log-service-exporter") + rl.Resource().Attributes().UpsertString(conventions.AttributeHostName, "test-host") sl := rl.ScopeLogs().AppendEmpty() sl.Scope().SetName("collector") sl.Scope().SetVersion("v0.1.0") @@ -72,8 +63,7 @@ func createLogData(numberOfLogs int) plog.Logs { case 4: logRecord.Body().SetStringVal("4") case 5: - - logRecord.Attributes().Insert("map-value", getComplexAttributeValueMap()) + fillComplexAttributeValueMap(logRecord.Attributes().UpsertEmptyMap("map-value")) logRecord.Body().SetStringVal("log contents") case 6: arrayVal := pcommon.NewValueSlice() diff --git a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go index 58182f85ddfc..1f116d5cb1e7 100644 --- a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go @@ -32,9 +32,9 @@ func TestMetricDataToLogService(t *testing.T) { md.ResourceMetrics().AppendEmpty() // Add an empty ResourceMetrics rm := md.ResourceMetrics().AppendEmpty() - rm.Resource().Attributes().InsertString("labelB", "valueB") - rm.Resource().Attributes().InsertString("labelA", "valueA") - rm.Resource().Attributes().InsertString("a", "b") + rm.Resource().Attributes().UpsertString("labelB", "valueB") + rm.Resource().Attributes().UpsertString("labelA", "valueA") + rm.Resource().Attributes().UpsertString("a", "b") sms := rm.ScopeMetrics() sms.AppendEmpty() // Add an empty ScopeMetrics sm := sms.AppendEmpty() @@ -53,7 +53,7 @@ func TestMetricDataToLogService(t *testing.T) { intGauge := intGaugeMetric.Gauge() intGaugeDataPoints := intGauge.DataPoints() intGaugeDataPoint := intGaugeDataPoints.AppendEmpty() - intGaugeDataPoint.Attributes().InsertString("innerLabel", "innerValue") + intGaugeDataPoint.Attributes().UpsertString("innerLabel", "innerValue") intGaugeDataPoint.SetIntVal(10) intGaugeDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) @@ -63,7 +63,7 @@ func TestMetricDataToLogService(t *testing.T) { doubleGauge := doubleGaugeMetric.Gauge() doubleGaugeDataPoints := doubleGauge.DataPoints() doubleGaugeDataPoint := doubleGaugeDataPoints.AppendEmpty() - doubleGaugeDataPoint.Attributes().InsertString("innerLabel", "innerValue") + doubleGaugeDataPoint.Attributes().UpsertString("innerLabel", "innerValue") doubleGaugeDataPoint.SetDoubleVal(10.1) doubleGaugeDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) @@ -73,7 +73,7 @@ func TestMetricDataToLogService(t *testing.T) { intSum := intSumMetric.Sum() intSumDataPoints := intSum.DataPoints() intSumDataPoint := intSumDataPoints.AppendEmpty() - intSumDataPoint.Attributes().InsertString("innerLabel", "innerValue") + intSumDataPoint.Attributes().UpsertString("innerLabel", "innerValue") intSumDataPoint.SetIntVal(11) intSumDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) @@ -83,7 +83,7 @@ func TestMetricDataToLogService(t *testing.T) { doubleSum := doubleSumMetric.Sum() doubleSumDataPoints := doubleSum.DataPoints() doubleSumDataPoint := doubleSumDataPoints.AppendEmpty() - doubleSumDataPoint.Attributes().InsertString("innerLabel", "innerValue") + doubleSumDataPoint.Attributes().UpsertString("innerLabel", "innerValue") doubleSumDataPoint.SetDoubleVal(10.1) doubleSumDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) @@ -93,7 +93,7 @@ func TestMetricDataToLogService(t *testing.T) { doubleHistogram := doubleHistogramMetric.Histogram() doubleHistogramDataPoints := doubleHistogram.DataPoints() doubleHistogramDataPoint := doubleHistogramDataPoints.AppendEmpty() - doubleHistogramDataPoint.Attributes().InsertString("innerLabel", "innerValue") + doubleHistogramDataPoint.Attributes().UpsertString("innerLabel", "innerValue") doubleHistogramDataPoint.SetCount(2) doubleHistogramDataPoint.SetSum(10.1) doubleHistogramDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) @@ -109,7 +109,7 @@ func TestMetricDataToLogService(t *testing.T) { doubleSummaryDataPoint.SetCount(2) doubleSummaryDataPoint.SetSum(10.1) doubleSummaryDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) - doubleSummaryDataPoint.Attributes().InsertString("innerLabel", "innerValue") + doubleSummaryDataPoint.Attributes().UpsertString("innerLabel", "innerValue") quantileVal := doubleSummaryDataPoint.QuantileValues().AppendEmpty() quantileVal.SetValue(10.2) quantileVal.SetQuantile(0.9) diff --git a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go index 5dc7b7df9bf6..94ec44e4513d 100644 --- a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go @@ -99,15 +99,15 @@ func constructSpanData() ptrace.Traces { func fillResource(resource pcommon.Resource) { attrs := resource.Attributes() - attrs.InsertString(conventions.AttributeServiceName, "signup_aggregator") - attrs.InsertString(conventions.AttributeHostName, "xxx.et15") - attrs.InsertString(conventions.AttributeContainerName, "signup_aggregator") - attrs.InsertString(conventions.AttributeContainerImageName, "otel/signupaggregator") - attrs.InsertString(conventions.AttributeContainerImageTag, "v1") - attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) - attrs.InsertString(conventions.AttributeCloudAccountID, "999999998") - attrs.InsertString(conventions.AttributeCloudRegion, "us-west-2") - attrs.InsertString(conventions.AttributeCloudAvailabilityZone, "us-west-1b") + attrs.UpsertString(conventions.AttributeServiceName, "signup_aggregator") + attrs.UpsertString(conventions.AttributeHostName, "xxx.et15") + attrs.UpsertString(conventions.AttributeContainerName, "signup_aggregator") + attrs.UpsertString(conventions.AttributeContainerImageName, "otel/signupaggregator") + attrs.UpsertString(conventions.AttributeContainerImageTag, "v1") + attrs.UpsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) + attrs.UpsertString(conventions.AttributeCloudAccountID, "999999998") + attrs.UpsertString(conventions.AttributeCloudRegion, "us-west-2") + attrs.UpsertString(conventions.AttributeCloudAvailabilityZone, "us-west-1b") } func fillHTTPClientSpan(span ptrace.Span) { @@ -131,11 +131,11 @@ func fillHTTPClientSpan(span ptrace.Span) { event := span.Events().AppendEmpty() event.SetName("event") event.SetTimestamp(1024) - event.Attributes().InsertString("key", "value") + event.Attributes().UpsertString("key", "value") link := span.Links().AppendEmpty() link.SetTraceState("link:state") - link.Attributes().InsertString("link", "true") + link.Attributes().UpsertString("link", "true") status := span.Status() status.SetCode(1) @@ -169,11 +169,11 @@ func constructSpanAttributes(attributes map[string]interface{}) pcommon.Map { attrs := pcommon.NewMap() for key, value := range attributes { if cast, ok := value.(int); ok { - attrs.InsertInt(key, int64(cast)) + attrs.UpsertInt(key, int64(cast)) } else if cast, ok := value.(int64); ok { - attrs.InsertInt(key, cast) + attrs.UpsertInt(key, cast) } else { - attrs.InsertString(key, fmt.Sprintf("%v", value)) + attrs.UpsertString(key, fmt.Sprintf("%v", value)) } } return attrs diff --git a/exporter/awscloudwatchlogsexporter/exporter_test.go b/exporter/awscloudwatchlogsexporter/exporter_test.go index 92d7b9702c75..bcc0a2a2e902 100644 --- a/exporter/awscloudwatchlogsexporter/exporter_test.go +++ b/exporter/awscloudwatchlogsexporter/exporter_test.go @@ -119,8 +119,8 @@ func BenchmarkLogToCWLog(b *testing.B) { func testResource() pcommon.Resource { resource := pcommon.NewResource() - resource.Attributes().InsertString("host", "abc123") - resource.Attributes().InsertInt("node", 5) + resource.Attributes().UpsertString("host", "abc123") + resource.Attributes().UpsertInt("node", 5) return resource } @@ -130,8 +130,8 @@ func testLogRecord() plog.LogRecord { record.SetSeverityText("debug") record.SetDroppedAttributesCount(4) record.Body().SetStringVal("hello world") - record.Attributes().InsertInt("key1", 1) - record.Attributes().InsertString("key2", "attr2") + record.Attributes().UpsertInt("key1", 1) + record.Attributes().UpsertString("key2", "attr2") record.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) record.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) record.FlagsStruct().SetIsSampled(true) @@ -145,8 +145,8 @@ func testLogRecordWithoutTrace() plog.LogRecord { record.SetSeverityText("debug") record.SetDroppedAttributesCount(4) record.Body().SetStringVal("hello world") - record.Attributes().InsertInt("key1", 1) - record.Attributes().InsertString("key2", "attr2") + record.Attributes().UpsertInt("key1", 1) + record.Attributes().UpsertString("key2", "attr2") record.SetTimestamp(1609719139000000) return record } @@ -182,11 +182,11 @@ func TestAttrValue(t *testing.T) { value: func() pcommon.Value { mAttr := pcommon.NewValueMap() m := mAttr.MapVal() - m.InsertString("key1", "value1") - m.Insert("key2", pcommon.NewValueEmpty()) - m.InsertBool("key3", true) - m.InsertInt("key4", 4) - m.InsertDouble("key5", 5.6) + m.UpsertString("key1", "value1") + m.Upsert("key2", pcommon.NewValueEmpty()) + m.UpsertBool("key3", true) + m.UpsertInt("key4", 4) + m.UpsertDouble("key5", 5.6) return mAttr }(), want: map[string]interface{}{ diff --git a/exporter/awsxrayexporter/awsxray_test.go b/exporter/awsxrayexporter/awsxray_test.go index 3a8c2db4d708..97971b2606c6 100644 --- a/exporter/awsxrayexporter/awsxray_test.go +++ b/exporter/awsxrayexporter/awsxray_test.go @@ -152,16 +152,15 @@ func constructW3CFormatTraceSpanData(ispans ptrace.ScopeSpans) { func constructResource() pcommon.Resource { resource := pcommon.NewResource() - attrs := pcommon.NewMap() - attrs.InsertString(conventions.AttributeServiceName, "signup_aggregator") - attrs.InsertString(conventions.AttributeContainerName, "signup_aggregator") - attrs.InsertString(conventions.AttributeContainerImageName, "otel/signupaggregator") - attrs.InsertString(conventions.AttributeContainerImageTag, "v1") - attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) - attrs.InsertString(conventions.AttributeCloudAccountID, "999999998") - attrs.InsertString(conventions.AttributeCloudRegion, "us-west-2") - attrs.InsertString(conventions.AttributeCloudAvailabilityZone, "us-west-1b") - attrs.CopyTo(resource.Attributes()) + attrs := resource.Attributes() + attrs.UpsertString(conventions.AttributeServiceName, "signup_aggregator") + attrs.UpsertString(conventions.AttributeContainerName, "signup_aggregator") + attrs.UpsertString(conventions.AttributeContainerImageName, "otel/signupaggregator") + attrs.UpsertString(conventions.AttributeContainerImageTag, "v1") + attrs.UpsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) + attrs.UpsertString(conventions.AttributeCloudAccountID, "999999998") + attrs.UpsertString(conventions.AttributeCloudRegion, "us-west-2") + attrs.UpsertString(conventions.AttributeCloudAvailabilityZone, "us-west-1b") return resource } diff --git a/exporter/awsxrayexporter/internal/translator/cause_test.go b/exporter/awsxrayexporter/internal/translator/cause_test.go index b31e65c35e47..8725787fa5ae 100644 --- a/exporter/awsxrayexporter/internal/translator/cause_test.go +++ b/exporter/awsxrayexporter/internal/translator/cause_test.go @@ -34,26 +34,22 @@ func TestCauseWithExceptions(t *testing.T) { event1 := span.Events().AppendEmpty() event1.SetName(ExceptionEventName) - attributes := pcommon.NewMap() - attributes.InsertString(conventions.AttributeExceptionType, "java.lang.IllegalStateException") - attributes.InsertString(conventions.AttributeExceptionMessage, "bad state") - attributes.InsertString(conventions.AttributeExceptionStacktrace, `java.lang.IllegalStateException: state is not legal + event1.Attributes().UpsertString(conventions.AttributeExceptionType, "java.lang.IllegalStateException") + event1.Attributes().UpsertString(conventions.AttributeExceptionMessage, "bad state") + event1.Attributes().UpsertString(conventions.AttributeExceptionStacktrace, `java.lang.IllegalStateException: state is not legal at io.opentelemetry.sdk.trace.RecordEventsReadableSpanTest.recordException(RecordEventsReadableSpanTest.java:626) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) Caused by: java.lang.IllegalArgumentException: bad argument`) - attributes.CopyTo(event1.Attributes()) event2 := span.Events().AppendEmpty() event2.SetName(ExceptionEventName) - attributes = pcommon.NewMap() - attributes.InsertString(conventions.AttributeExceptionType, "EmptyError") - attributes.CopyTo(event2.Attributes()) + event2.Attributes().UpsertString(conventions.AttributeExceptionType, "EmptyError") filtered, _ := makeHTTP(span) res := pcommon.NewResource() - res.Attributes().InsertString(conventions.AttributeTelemetrySDKLanguage, "java") + res.Attributes().UpsertString(conventions.AttributeTelemetrySDKLanguage, "java") isError, isFault, isThrottle, filteredResult, cause := makeCause(span, filtered, res) assert.True(t, isFault) @@ -97,16 +93,14 @@ Caused by: java.lang.IllegalArgumentException: bad argument` event1 := span.Events().AppendEmpty() event1.SetName(ExceptionEventName) - attributes := pcommon.NewMap() - attributes.InsertString(conventions.AttributeExceptionType, "java.lang.IllegalStateException") - attributes.InsertString(conventions.AttributeExceptionMessage, "bad state") - attributes.InsertString(conventions.AttributeExceptionStacktrace, exceptionStack) - attributes.CopyTo(event1.Attributes()) + event1.Attributes().UpsertString(conventions.AttributeExceptionType, "java.lang.IllegalStateException") + event1.Attributes().UpsertString(conventions.AttributeExceptionMessage, "bad state") + event1.Attributes().UpsertString(conventions.AttributeExceptionStacktrace, exceptionStack) filtered, _ := makeHTTP(span) res := pcommon.NewResource() - res.Attributes().InsertString(conventions.AttributeTelemetrySDKLanguage, "java") + res.Attributes().UpsertString(conventions.AttributeTelemetrySDKLanguage, "java") isError, isFault, isThrottle, filteredResult, cause := makeCause(span, filtered, res) assert.False(t, isFault) diff --git a/exporter/azuremonitorexporter/trace_to_envelope_test.go b/exporter/azuremonitorexporter/trace_to_envelope_test.go index 647d415fd62c..71d3efdbd156 100644 --- a/exporter/azuremonitorexporter/trace_to_envelope_test.go +++ b/exporter/azuremonitorexporter/trace_to_envelope_test.go @@ -409,7 +409,7 @@ func TestRPCClientSpanToRemoteDependencyData(t *testing.T) { // test RPC error using the new rpc.grpc.status_code attribute span.Status().SetCode(ptrace.StatusCodeError) span.Status().SetMessage("Resource exhausted") - spanAttributes.InsertInt(attributeRPCGRPCStatusCode, 8) + spanAttributes.UpsertInt(attributeRPCGRPCStatusCode, 8) envelope, _ = spanToEnvelope(defaultResource, defaultInstrumentationLibrary, span, zap.NewNop()) data = envelope.Data.(*contracts.Data).BaseData.(*contracts.RemoteDependencyData) diff --git a/exporter/datadogexporter/internal/metrics/consumer_test.go b/exporter/datadogexporter/internal/metrics/consumer_test.go index bb33673fe2d0..7f437a672e75 100644 --- a/exporter/datadogexporter/internal/metrics/consumer_test.go +++ b/exporter/datadogexporter/internal/metrics/consumer_test.go @@ -99,15 +99,15 @@ func TestTagsMetrics(t *testing.T) { conventions.AttributeAWSECSLaunchtype: conventions.AttributeAWSECSLaunchtypeFargate, }) baseAttrs.CopyTo(rm.Resource().Attributes()) - rm.Resource().Attributes().InsertString(conventions.AttributeAWSECSTaskARN, "task-arn-1") + rm.Resource().Attributes().UpsertString(conventions.AttributeAWSECSTaskARN, "task-arn-1") rm = rms.AppendEmpty() baseAttrs.CopyTo(rm.Resource().Attributes()) - rm.Resource().Attributes().InsertString(conventions.AttributeAWSECSTaskARN, "task-arn-2") + rm.Resource().Attributes().UpsertString(conventions.AttributeAWSECSTaskARN, "task-arn-2") rm = rms.AppendEmpty() baseAttrs.CopyTo(rm.Resource().Attributes()) - rm.Resource().Attributes().InsertString(conventions.AttributeAWSECSTaskARN, "task-arn-3") + rm.Resource().Attributes().UpsertString(conventions.AttributeAWSECSTaskARN, "task-arn-3") logger, _ := zap.NewProduction() tr := newTranslator(t, logger) diff --git a/exporter/datadogexporter/traces_exporter_test.go b/exporter/datadogexporter/traces_exporter_test.go index 89c7e25a4ad0..8ebe5b60b3a3 100644 --- a/exporter/datadogexporter/traces_exporter_test.go +++ b/exporter/datadogexporter/traces_exporter_test.go @@ -343,9 +343,6 @@ func genTraces(traceID pcommon.TraceID, attrs map[string]interface{}) ptrace.Tra if attrs == nil { return traces } - pcommon.NewMapFromRaw(attrs).Range(func(k string, v pcommon.Value) bool { - rspans.Resource().Attributes().Insert(k, v) - return true - }) + pcommon.NewMapFromRaw(attrs).CopyTo(rspans.Resource().Attributes()) return traces } diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go index 743755643d92..8b0ed741d47c 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go @@ -79,8 +79,8 @@ func TestObjectModel_CreateMap(t *testing.T) { build: func() (doc Document) { mapVal := pcommon.NewValueMap() m := mapVal.MapVal() - m.InsertInt("i", 42) - m.InsertString("str", "test") + m.UpsertInt("i", 42) + m.UpsertString("str", "test") doc.AddAttribute("prefix", mapVal) return doc }, @@ -155,26 +155,20 @@ func TestObjectModel_Dedup(t *testing.T) { }, "duplicate after flattening from map: namespace object at end": { build: func() Document { - namespace := pcommon.NewValueMap() - namespace.MapVal().InsertInt("a", 23) - am := pcommon.NewMap() - am.InsertInt("namespace.a", 42) - am.InsertString("toplevel", "test") - am.Insert("namespace", namespace) + am.UpsertInt("namespace.a", 42) + am.UpsertString("toplevel", "test") + am.UpsertEmptyMap("namespace").UpsertInt("a", 23) return DocumentFromAttributes(am) }, want: Document{[]field{{"namespace.a", ignoreValue}, {"namespace.a", IntValue(23)}, {"toplevel", StringValue("test")}}}, }, "duplicate after flattening from map: namespace object at beginning": { build: func() Document { - namespace := pcommon.NewValueMap() - namespace.MapVal().InsertInt("a", 23) - am := pcommon.NewMap() - am.Insert("namespace", namespace) - am.InsertInt("namespace.a", 42) - am.InsertString("toplevel", "test") + am.UpsertEmptyMap("namespace").UpsertInt("a", 23) + am.UpsertInt("namespace.a", 42) + am.UpsertString("toplevel", "test") return DocumentFromAttributes(am) }, want: Document{[]field{{"namespace.a", ignoreValue}, {"namespace.a", IntValue(42)}, {"toplevel", StringValue("test")}}}, @@ -269,8 +263,7 @@ func TestValue_FromAttribute(t *testing.T) { "non-empty map": { in: func() pcommon.Value { v := pcommon.NewValueMap() - m := v.MapVal() - m.InsertInt("a", 1) + v.MapVal().UpsertInt("a", 1) return v }(), want: Value{kind: KindObject, doc: Document{[]field{{"a", IntValue(1)}}}}, diff --git a/exporter/humioexporter/traces_exporter_test.go b/exporter/humioexporter/traces_exporter_test.go index 08d4aa74c575..5bfd1d5c50f6 100644 --- a/exporter/humioexporter/traces_exporter_test.go +++ b/exporter/humioexporter/traces_exporter_test.go @@ -157,7 +157,7 @@ func TestPushTraceData_TransientOnPartialFailure(t *testing.T) { traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(2) rspan := traces.ResourceSpans().AppendEmpty() - rspan.Resource().Attributes().InsertString(conventions.AttributeServiceName, "service1") + rspan.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "service1") rspan.ScopeSpans().AppendEmpty().Spans().AppendEmpty() // ...and one without (partial failure) @@ -195,17 +195,17 @@ func TestTracesToHumioEvents_OrganizedByTags(t *testing.T) { // Three spans for the same trace across two different resources, as // well a span from a separate trace res1 := traces.ResourceSpans().AppendEmpty() - res1.Resource().Attributes().InsertString(conventions.AttributeServiceName, "service-A") + res1.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "service-A") ils1 := res1.ScopeSpans().AppendEmpty() ils1.Spans().AppendEmpty().SetTraceID(pcommon.NewTraceID(createTraceID("10000000000000000000000000000000"))) ils1.Spans().AppendEmpty().SetTraceID(pcommon.NewTraceID(createTraceID("10000000000000000000000000000000"))) res2 := traces.ResourceSpans().AppendEmpty() - res2.Resource().Attributes().InsertString(conventions.AttributeServiceName, "service-B") + res2.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "service-B") res2.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(pcommon.NewTraceID(createTraceID("10000000000000000000000000000000"))) res3 := traces.ResourceSpans().AppendEmpty() - res3.Resource().Attributes().InsertString(conventions.AttributeServiceName, "service-C") + res3.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "service-C") res3.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(pcommon.NewTraceID(createTraceID("20000000000000000000000000000000"))) // Organize by trace id @@ -252,14 +252,14 @@ func TestSpanToHumioEvent(t *testing.T) { )) span.Status().SetCode(ptrace.StatusCodeOk) span.Status().SetMessage("done") - span.Attributes().InsertString("key", "val") + span.Attributes().UpsertString("key", "val") inst := pcommon.NewInstrumentationScope() inst.SetName("otel-test") inst.SetVersion("1.0.0") res := pcommon.NewResource() - res.Attributes().InsertString("service.name", "myapp") + res.Attributes().UpsertString("service.name", "myapp") expected := &HumioStructuredEvent{ Timestamp: time.Date(2020, 1, 1, 12, 0, 0, 0, time.UTC), @@ -374,10 +374,10 @@ func TestToHumioAttributes(t *testing.T) { desc: "Simple types", attr: func() pcommon.Map { attrMap := pcommon.NewMap() - attrMap.InsertString("string", "val") - attrMap.InsertInt("integer", 42) - attrMap.InsertDouble("double", 4.2) - attrMap.InsertBool("bool", false) + attrMap.UpsertString("string", "val") + attrMap.UpsertInt("integer", 42) + attrMap.UpsertDouble("double", 4.2) + attrMap.UpsertBool("bool", false) return attrMap }, expected: map[string]interface{}{ @@ -391,7 +391,7 @@ func TestToHumioAttributes(t *testing.T) { desc: "Nil element", attr: func() pcommon.Map { attrMap := pcommon.NewMap() - attrMap.Insert("key", pcommon.NewValueEmpty()) + attrMap.UpsertEmpty("key") return attrMap }, expected: map[string]interface{}{ @@ -402,11 +402,10 @@ func TestToHumioAttributes(t *testing.T) { desc: "Array element", attr: func() pcommon.Map { attrMap := pcommon.NewMap() - arr := pcommon.NewValueSlice() - arr.SliceVal().AppendEmpty().SetStringVal("a") - arr.SliceVal().AppendEmpty().SetStringVal("b") - arr.SliceVal().AppendEmpty().SetIntVal(4) - attrMap.Insert("array", arr) + arr := attrMap.UpsertEmptySlice("array") + arr.AppendEmpty().SetStringVal("a") + arr.AppendEmpty().SetStringVal("b") + arr.AppendEmpty().SetIntVal(4) return attrMap }, expected: map[string]interface{}{ @@ -420,7 +419,7 @@ func TestToHumioAttributes(t *testing.T) { attr: func() pcommon.Map { attrMap := pcommon.NewMap() nested := pcommon.NewValueMap() - nested.MapVal().InsertString("key", "val") + nested.MapVal().UpsertString("key", "val") attrMap.Insert("nested", nested) attrMap.InsertBool("active", true) return attrMap @@ -447,12 +446,12 @@ func TestToHumioAttributes(t *testing.T) { func TestToHumioAttributesShaded(t *testing.T) { // Arrange attrMapA := pcommon.NewMap() - attrMapA.InsertString("string", "val") + attrMapA.UpsertString("string", "val") attrMapA.InsertInt("integer", 42) attrMapB := pcommon.NewMap() attrMapB.InsertInt("integer", 0) - attrMapB.InsertString("key", "val") + attrMapB.UpsertString("key", "val") expected := map[string]interface{}{ "string": "val", diff --git a/exporter/loadbalancingexporter/trace_exporter_test.go b/exporter/loadbalancingexporter/trace_exporter_test.go index e7c72429fbe7..9f193d0db19d 100644 --- a/exporter/loadbalancingexporter/trace_exporter_test.go +++ b/exporter/loadbalancingexporter/trace_exporter_test.go @@ -627,7 +627,7 @@ func simpleTraceWithServiceName(id pcommon.TraceID) ptrace.Traces { func fillResource(resource pcommon.Resource, svc string) { attrs := resource.Attributes() - attrs.InsertString("service.name", svc) + attrs.UpsertString("service.name", svc) } func simpleConfig() *Config { diff --git a/exporter/logzioexporter/exporter_test.go b/exporter/logzioexporter/exporter_test.go index 2521acd5a198..04fffdbb5c88 100644 --- a/exporter/logzioexporter/exporter_test.go +++ b/exporter/logzioexporter/exporter_test.go @@ -73,20 +73,20 @@ func fillLogOne(log plog.LogRecord) { log.SetTraceID(pcommon.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01})) attrs := log.Attributes() - attrs.InsertString("app", "server") - attrs.InsertDouble("instance_num", 1) + attrs.UpsertString("app", "server") + attrs.UpsertDouble("instance_num", 1) // nested body map attVal := pcommon.NewValueMap() attNestedVal := pcommon.NewValueMap() attMap := attVal.MapVal() - attMap.InsertDouble("23", 45) - attMap.InsertString("foo", "bar") - attMap.InsertString("message", "hello there") + attMap.UpsertDouble("23", 45) + attMap.UpsertString("foo", "bar") + attMap.UpsertString("message", "hello there") attNestedMap := attNestedVal.MapVal() - attNestedMap.InsertString("string", "v1") - attNestedMap.InsertDouble("number", 499) + attNestedMap.UpsertString("string", "v1") + attNestedMap.UpsertDouble("number", 499) attMap.Insert("nested", attNestedVal) attVal.CopyTo(log.Body()) diff --git a/exporter/lokiexporter/exporter_test.go b/exporter/lokiexporter/exporter_test.go index 9de0a382f756..4701793ada4f 100644 --- a/exporter/lokiexporter/exporter_test.go +++ b/exporter/lokiexporter/exporter_test.go @@ -65,10 +65,7 @@ func createLogData(numberOfLogs int, attributes pcommon.Map) plog.Logs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") - attributes.Range(func(k string, v pcommon.Value) bool { - logRecord.Attributes().Insert(k, v) - return true - }) + attributes.CopyTo(logRecord.Attributes()) logRecord.SetTimestamp(ts) } diff --git a/exporter/mezmoexporter/exporter_test.go b/exporter/mezmoexporter/exporter_test.go index 81870978663d..8609be42d3ab 100644 --- a/exporter/mezmoexporter/exporter_test.go +++ b/exporter/mezmoexporter/exporter_test.go @@ -52,10 +52,10 @@ func createSimpleLogData(numberOfLogs int) plog.Logs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("10byteslog") - logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp") - logRecord.Attributes().InsertString("my-label", "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(conventions.AttributeServiceName, "myapp") + logRecord.Attributes().UpsertString("my-label", "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) } diff --git a/exporter/prometheusexporter/collector_test.go b/exporter/prometheusexporter/collector_test.go index f342d2a30f20..067f82877cb0 100644 --- a/exporter/prometheusexporter/collector_test.go +++ b/exporter/prometheusexporter/collector_test.go @@ -142,7 +142,7 @@ func TestConvertDoubleHistogramExemplar(t *testing.T) { pde := hd.Exemplars().AppendEmpty() pde.SetDoubleVal(e.Value) for k, v := range e.Labels { - pde.FilteredAttributes().InsertString(k, v) + pde.FilteredAttributes().UpsertString(k, v) } pde.SetTimestamp(pcommon.NewTimestampFromTime(e.Timestamp)) } @@ -219,8 +219,8 @@ func TestCollectMetricsLabelSanitize(t *testing.T) { metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetIntVal(42) - dp.Attributes().InsertString("label.1", "1") - dp.Attributes().InsertString("label/2", "2") + dp.Attributes().UpsertString("label.1", "1") + dp.Attributes().UpsertString("label/2", "2") dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) loggerCore := errorCheckCore{} @@ -274,8 +274,8 @@ func TestCollectMetrics(t *testing.T) { metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetIntVal(42) - dp.Attributes().InsertString("label_1", "1") - dp.Attributes().InsertString("label_2", "2") + dp.Attributes().UpsertString("label_1", "1") + dp.Attributes().UpsertString("label_2", "2") dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return @@ -292,8 +292,8 @@ func TestCollectMetrics(t *testing.T) { metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetDoubleVal(42.42) - dp.Attributes().InsertString("label_1", "1") - dp.Attributes().InsertString("label_2", "2") + dp.Attributes().UpsertString("label_1", "1") + dp.Attributes().UpsertString("label_2", "2") dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return @@ -312,8 +312,8 @@ func TestCollectMetrics(t *testing.T) { metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetIntVal(42) - dp.Attributes().InsertString("label_1", "1") - dp.Attributes().InsertString("label_2", "2") + dp.Attributes().UpsertString("label_1", "1") + dp.Attributes().UpsertString("label_2", "2") dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return @@ -332,8 +332,8 @@ func TestCollectMetrics(t *testing.T) { metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleVal(42.42) - dp.Attributes().InsertString("label_1", "1") - dp.Attributes().InsertString("label_2", "2") + dp.Attributes().UpsertString("label_1", "1") + dp.Attributes().UpsertString("label_2", "2") dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return @@ -352,8 +352,8 @@ func TestCollectMetrics(t *testing.T) { metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetIntVal(42) - dp.Attributes().InsertString("label_1", "1") - dp.Attributes().InsertString("label_2", "2") + dp.Attributes().UpsertString("label_1", "1") + dp.Attributes().UpsertString("label_2", "2") dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return @@ -372,8 +372,8 @@ func TestCollectMetrics(t *testing.T) { metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleVal(42.42) - dp.Attributes().InsertString("label_1", "1") - dp.Attributes().InsertString("label_2", "2") + dp.Attributes().UpsertString("label_1", "1") + dp.Attributes().UpsertString("label_2", "2") dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return @@ -389,9 +389,9 @@ func TestCollectMetrics(t *testing.T) { } rAttrs := pcommon.NewMap() - rAttrs.InsertString(conventions.AttributeServiceInstanceID, "localhost:9090") - rAttrs.InsertString(conventions.AttributeServiceName, "testapp") - rAttrs.InsertString(conventions.AttributeServiceNamespace, "prod") + rAttrs.UpsertString(conventions.AttributeServiceInstanceID, "localhost:9090") + rAttrs.UpsertString(conventions.AttributeServiceName, "testapp") + rAttrs.UpsertString(conventions.AttributeServiceNamespace, "prod") t.Run(name, func(t *testing.T) { ts := time.Now() @@ -492,8 +492,8 @@ func TestAccumulateHistograms(t *testing.T) { dp.SetCount(7) dp.SetExplicitBounds(pcommon.NewImmutableFloat64Slice([]float64{3.5, 10.0})) dp.SetSum(42.42) - dp.Attributes().InsertString("label_1", "1") - dp.Attributes().InsertString("label_2", "2") + dp.Attributes().UpsertString("label_1", "1") + dp.Attributes().UpsertString("label_2", "2") dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return }, @@ -591,8 +591,8 @@ func TestAccumulateSummary(t *testing.T) { sp.SetCount(10) sp.SetSum(0.012) sp.SetCount(10) - sp.Attributes().InsertString("label_1", "1") - sp.Attributes().InsertString("label_2", "2") + sp.Attributes().UpsertString("label_1", "1") + sp.Attributes().UpsertString("label_2", "2") sp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) fillQuantileValue(0.50, 190, sp.QuantileValues().AppendEmpty()) diff --git a/exporter/sapmexporter/exporter_test.go b/exporter/sapmexporter/exporter_test.go index 3b74284b7074..3afa3654af73 100644 --- a/exporter/sapmexporter/exporter_test.go +++ b/exporter/sapmexporter/exporter_test.go @@ -72,15 +72,15 @@ func buildTestTraces(setTokenLabel bool) (traces ptrace.Traces) { for i := 0; i < 20; i++ { rs := rss.AppendEmpty() resource := rs.Resource() - resource.Attributes().InsertString("key1", "value1") + resource.Attributes().UpsertString("key1", "value1") if setTokenLabel && i%2 == 1 { tokenLabel := fmt.Sprintf("MyToken%d", i/5) - resource.Attributes().InsertString("com.splunk.signalfx.access_token", tokenLabel) - resource.Attributes().InsertString("com.splunk.signalfx.access_token", tokenLabel) + resource.Attributes().UpsertString("com.splunk.signalfx.access_token", tokenLabel) + resource.Attributes().UpsertString("com.splunk.signalfx.access_token", tokenLabel) } // Add one last element every 3rd resource, this way we have cases with token last or not. if i%3 == 1 { - resource.Attributes().InsertString("key2", "value2") + resource.Attributes().UpsertString("key2", "value2") } span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() @@ -140,7 +140,7 @@ func buildTestTrace() ptrace.Traces { for i := 0; i < 2; i++ { rs := trace.ResourceSpans().AppendEmpty() resource := rs.Resource() - resource.Attributes().InsertString("com.splunk.signalfx.access_token", fmt.Sprintf("TraceAccessToken%v", i)) + resource.Attributes().UpsertString("com.splunk.signalfx.access_token", fmt.Sprintf("TraceAccessToken%v", i)) span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("MySpan") diff --git a/exporter/signalfxexporter/internal/correlation/spanshims_test.go b/exporter/signalfxexporter/internal/correlation/spanshims_test.go index 90c41470c6e0..01fbfc5ea93f 100644 --- a/exporter/signalfxexporter/internal/correlation/spanshims_test.go +++ b/exporter/signalfxexporter/internal/correlation/spanshims_test.go @@ -43,7 +43,7 @@ func TestSpanShim_Service(t *testing.T) { span := ptrace.NewResourceSpans() res := span.Resource() attr := res.Attributes() - attr.InsertString("service.name", "shopping-cart") + attr.UpsertString("service.name", "shopping-cart") wrapped := spanWrap{span} @@ -57,7 +57,7 @@ func TestSpanShim_Environment(t *testing.T) { span := ptrace.NewResourceSpans() res := span.Resource() attr := res.Attributes() - attr.InsertString("deployment.environment", "prod") + attr.UpsertString("deployment.environment", "prod") wrapped := spanWrap{span} @@ -71,7 +71,7 @@ func TestSpanShim_SignalfxEnvironment(t *testing.T) { span := ptrace.NewResourceSpans() res := span.Resource() attr := res.Attributes() - attr.InsertString("environment", "prod") + attr.UpsertString("environment", "prod") wrapped := spanWrap{span} @@ -110,7 +110,7 @@ func TestSpanShim_Tags(t *testing.T) { span := ptrace.NewResourceSpans() res := span.Resource() attr := res.Attributes() - attr.InsertString("tag1", "tag1val") + attr.UpsertString("tag1", "tag1val") wrapped := spanWrap{span} diff --git a/exporter/skywalkingexporter/logrecord_to_logdata_test.go b/exporter/skywalkingexporter/logrecord_to_logdata_test.go index e603c020100a..305be7056a7b 100644 --- a/exporter/skywalkingexporter/logrecord_to_logdata_test.go +++ b/exporter/skywalkingexporter/logrecord_to_logdata_test.go @@ -25,34 +25,25 @@ import ( logpb "skywalking.apache.org/repo/goapi/collect/logging/v3" ) -func getComplexAttributeValueMap() pcommon.Value { - mapVal := pcommon.NewValueMap() - mapValReal := mapVal.MapVal() - mapValReal.InsertBool("result", true) - mapValReal.InsertString("status", "ok") - mapValReal.InsertDouble("value", 1.3) - mapValReal.InsertInt("code", 200) - mapValReal.Insert("null", pcommon.NewValueEmpty()) - arrayVal := pcommon.NewValueSlice() - arrayVal.SliceVal().AppendEmpty().SetStringVal("array") - mapValReal.Insert("array", arrayVal) - - subMapVal := pcommon.NewValueMap() - subMapVal.MapVal().InsertString("data", "hello world") - mapValReal.Insert("map", subMapVal) - - mapValReal.InsertString("status", "ok") - return mapVal +func fillComplexAttributeValueMap(m pcommon.Map) { + m.UpsertBool("result", true) + m.UpsertString("status", "ok") + m.UpsertDouble("value", 1.3) + m.UpsertInt("code", 200) + m.UpsertEmpty("null") + m.UpsertEmptySlice("array").AppendEmpty().SetStringVal("array") + m.UpsertEmptyMap("map").UpsertString("data", "hello world") + m.UpsertString("status", "ok") } func createLogData(numberOfLogs int) plog.Logs { logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() rl := logs.ResourceLogs().AppendEmpty() - rl.Resource().Attributes().InsertString("resourceKey", "resourceValue") - rl.Resource().Attributes().InsertString(conventions.AttributeServiceName, "test-service") - rl.Resource().Attributes().InsertString(conventions.AttributeHostName, "test-host") - rl.Resource().Attributes().InsertString(conventions.AttributeServiceInstanceID, "test-instance") + rl.Resource().Attributes().UpsertString("resourceKey", "resourceValue") + rl.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "test-service") + rl.Resource().Attributes().UpsertString(conventions.AttributeHostName, "test-host") + rl.Resource().Attributes().UpsertString(conventions.AttributeServiceInstanceID, "test-instance") sl := rl.ScopeLogs().AppendEmpty() sl.Scope().SetName("collector") sl.Scope().SetVersion("v0.1.0") @@ -77,8 +68,7 @@ func createLogData(numberOfLogs int) plog.Logs { case 4: logRecord.Body().SetStringVal("4") case 5: - - logRecord.Attributes().Insert("map-value", getComplexAttributeValueMap()) + fillComplexAttributeValueMap(logRecord.Attributes().UpsertEmptyMap("map-value")) logRecord.Body().SetStringVal("log contents") case 6: arrayVal := pcommon.NewValueSlice() @@ -88,7 +78,7 @@ func createLogData(numberOfLogs int) plog.Logs { default: logRecord.Body().SetStringVal("log contents") } - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString("custom", "custom") } return logs diff --git a/exporter/splunkhecexporter/exporter_test.go b/exporter/splunkhecexporter/exporter_test.go index e08b967549de..341e2b9b10a8 100644 --- a/exporter/splunkhecexporter/exporter_test.go +++ b/exporter/splunkhecexporter/exporter_test.go @@ -226,11 +226,11 @@ func generateLargeLogsBatch() plog.Logs { for i := 0; i < 65000; i++ { logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(splunk.DefaultIndexLabel, "myindex") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(splunk.DefaultIndexLabel, "myindex") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) } @@ -241,8 +241,8 @@ func TestConsumeLogsData(t *testing.T) { smallBatch := plog.NewLogs() logRecord := smallBatch.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(123) tests := []struct { name string diff --git a/exporter/splunkhecexporter/logdata_to_splunk_test.go b/exporter/splunkhecexporter/logdata_to_splunk_test.go index 134adefb2862..aa0d1c2fc0db 100644 --- a/exporter/splunkhecexporter/logdata_to_splunk_test.go +++ b/exporter/splunkhecexporter/logdata_to_splunk_test.go @@ -42,10 +42,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -66,10 +66,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -90,7 +90,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(splunk.HecTokenLabel, "mytoken") + logRecord.Attributes().UpsertString(splunk.HecTokenLabel, "mytoken") logRecord.SetTimestamp(ts) return logRecord }, @@ -111,10 +111,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertDouble("foo", 123) + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertDouble("foo", 123) logRecord.SetTimestamp(ts) return logRecord }, @@ -134,7 +134,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -154,11 +154,11 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString("custom", "custom") - logRecord.Attributes().InsertString("mysource", "mysource") - logRecord.Attributes().InsertString("mysourcetype", "mysourcetype") - logRecord.Attributes().InsertString("myindex", "myindex") - logRecord.Attributes().InsertString("myhost", "myhost") + logRecord.Attributes().UpsertString("custom", "custom") + logRecord.Attributes().UpsertString("mysource", "mysource") + logRecord.Attributes().UpsertString("mysourcetype", "mysourcetype") + logRecord.Attributes().UpsertString("myindex", "myindex") + logRecord.Attributes().UpsertString("myhost", "myhost") logRecord.SetSeverityText("DEBUG") logRecord.SetSeverityNumber(plog.SeverityNumberDebug) logRecord.SetTimestamp(ts) @@ -231,10 +231,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetDoubleVal(42) - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -254,10 +254,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetIntVal(42) - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -277,10 +277,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetBoolVal(true) - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -301,13 +301,13 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord := plog.NewLogRecord() attVal := pcommon.NewValueMap() attMap := attVal.MapVal() - attMap.InsertDouble("23", 45) - attMap.InsertString("foo", "bar") + attMap.UpsertDouble("23", 45) + attMap.UpsertString("foo", "bar") attVal.CopyTo(logRecord.Body()) - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -328,10 +328,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { name: "with nil body", logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -355,10 +355,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { attArray := attVal.SliceVal() attArray.AppendEmpty().SetStringVal("foo") attVal.CopyTo(logRecord.Body()) - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, @@ -384,11 +384,11 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, logResourceFn: func() pcommon.Resource { resource := pcommon.NewResource() - resource.Attributes().InsertString("resourceAttr1", "some_string") - resource.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type-from-resource-attr") - resource.Attributes().InsertString(splunk.DefaultIndexLabel, "index-resource") - resource.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp-resource") - resource.Attributes().InsertString(conventions.AttributeHostName, "myhost-resource") + resource.Attributes().UpsertString("resourceAttr1", "some_string") + resource.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type-from-resource-attr") + resource.Attributes().UpsertString(splunk.DefaultIndexLabel, "index-resource") + resource.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp-resource") + resource.Attributes().UpsertString(conventions.AttributeHostName, "myhost-resource") return resource }, configDataFn: func() *Config { @@ -409,10 +409,10 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecordFn: func() plog.LogRecord { logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") - logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(splunk.DefaultSourceLabel, "myapp") + logRecord.Attributes().UpsertString(splunk.DefaultSourceTypeLabel, "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetSeverityText("DEBUG") logRecord.SetSeverityNumber(plog.SeverityNumberDebug) logRecord.SetTimestamp(ts) diff --git a/exporter/splunkhecexporter/tracedata_to_splunk_test.go b/exporter/splunkhecexporter/tracedata_to_splunk_test.go index 2ec26ccea042..fe053027d0a7 100644 --- a/exporter/splunkhecexporter/tracedata_to_splunk_test.go +++ b/exporter/splunkhecexporter/tracedata_to_splunk_test.go @@ -42,10 +42,10 @@ func Test_traceDataToSplunk(t *testing.T) { traceDataFn: func() ptrace.Traces { traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() - rs.Resource().Attributes().InsertString("com.splunk.source", "myservice") - rs.Resource().Attributes().InsertString("host.name", "myhost") - rs.Resource().Attributes().InsertString("com.splunk.sourcetype", "mysourcetype") - rs.Resource().Attributes().InsertString("com.splunk.index", "myindex") + rs.Resource().Attributes().UpsertString("com.splunk.source", "myservice") + rs.Resource().Attributes().UpsertString("host.name", "myhost") + rs.Resource().Attributes().UpsertString("com.splunk.sourcetype", "mysourcetype") + rs.Resource().Attributes().UpsertString("com.splunk.index", "myindex") ils := rs.ScopeSpans().AppendEmpty() initSpan("myspan", &ts, ils.Spans().AppendEmpty()) return traces @@ -60,10 +60,10 @@ func Test_traceDataToSplunk(t *testing.T) { traceDataFn: func() ptrace.Traces { traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() - rs.Resource().Attributes().InsertString("mysource", "myservice") - rs.Resource().Attributes().InsertString("myhost", "myhost") - rs.Resource().Attributes().InsertString("mysourcetype", "othersourcetype") - rs.Resource().Attributes().InsertString("myindex", "mysourcetype") + rs.Resource().Attributes().UpsertString("mysource", "myservice") + rs.Resource().Attributes().UpsertString("myhost", "myhost") + rs.Resource().Attributes().UpsertString("mysourcetype", "othersourcetype") + rs.Resource().Attributes().UpsertString("myindex", "mysourcetype") ils := rs.ScopeSpans().AppendEmpty() initSpan("myspan", &ts, ils.Spans().AppendEmpty()) return traces @@ -100,7 +100,7 @@ func Test_traceDataToSplunk(t *testing.T) { } func initSpan(name string, ts *pcommon.Timestamp, span ptrace.Span) { - span.Attributes().InsertString("foo", "bar") + span.Attributes().UpsertString("foo", "bar") span.SetName(name) if ts != nil { span.SetStartTimestamp(*ts) @@ -115,15 +115,14 @@ func initSpan(name string, ts *pcommon.Timestamp, span ptrace.Span) { var spanID [8]byte copy(spanID[:], bytes) spanLink.SetSpanID(pcommon.NewSpanID(spanID)) - spanLink.Attributes().InsertInt("foo", 1) - spanLink.Attributes().InsertBool("bar", false) - foobarContents := pcommon.NewValueSlice() - foobarContents.SliceVal().AppendEmpty().SetStringVal("a") - foobarContents.SliceVal().AppendEmpty().SetStringVal("b") - spanLink.Attributes().Insert("foobar", foobarContents) + spanLink.Attributes().UpsertInt("foo", 1) + spanLink.Attributes().UpsertBool("bar", false) + foobarContents := spanLink.Attributes().UpsertEmptySlice("foobar") + foobarContents.AppendEmpty().SetStringVal("a") + foobarContents.AppendEmpty().SetStringVal("b") spanEvent := span.Events().AppendEmpty() - spanEvent.Attributes().InsertString("foo", "bar") + spanEvent.Attributes().UpsertString("foo", "bar") spanEvent.SetName("myEvent") if ts != nil { spanEvent.SetTimestamp(*ts + 3) diff --git a/exporter/sumologicexporter/graphite_formatter_test.go b/exporter/sumologicexporter/graphite_formatter_test.go index 242af42a6c0b..0732ee2550b3 100644 --- a/exporter/sumologicexporter/graphite_formatter_test.go +++ b/exporter/sumologicexporter/graphite_formatter_test.go @@ -48,9 +48,9 @@ func TestGraphiteMetricDataTypeIntGauge(t *testing.T) { gf := newGraphiteFormatter("%{cluster}.%{namespace}.%{pod}.%{_metric_}") metric := exampleIntGaugeMetric() - metric.attributes.InsertString("cluster", "my_cluster") - metric.attributes.InsertString("namespace", "default") - metric.attributes.InsertString("pod", "some pod") + metric.attributes.UpsertString("cluster", "my_cluster") + metric.attributes.UpsertString("namespace", "default") + metric.attributes.UpsertString("pod", "some pod") result := gf.metric2String(metric) expected := `my_cluster.default.some_pod.gauge_metric_name 124 1608124661 @@ -62,9 +62,9 @@ func TestGraphiteMetricDataTypeDoubleGauge(t *testing.T) { gf := newGraphiteFormatter("%{cluster}.%{namespace}.%{pod}.%{_metric_}") metric := exampleDoubleGaugeMetric() - metric.attributes.InsertString("cluster", "my_cluster") - metric.attributes.InsertString("namespace", "default") - metric.attributes.InsertString("pod", "some pod") + metric.attributes.UpsertString("cluster", "my_cluster") + metric.attributes.UpsertString("namespace", "default") + metric.attributes.UpsertString("pod", "some pod") result := gf.metric2String(metric) expected := `my_cluster.default.some_pod.gauge_metric_name_double_test 33.4 1608124661 @@ -76,8 +76,8 @@ func TestGraphiteNoattribute(t *testing.T) { gf := newGraphiteFormatter("%{cluster}.%{namespace}.%{pod}.%{_metric_}") metric := exampleDoubleGaugeMetric() - metric.attributes.InsertString("cluster", "my_cluster") - metric.attributes.InsertString("pod", "some pod") + metric.attributes.UpsertString("cluster", "my_cluster") + metric.attributes.UpsertString("pod", "some pod") result := gf.metric2String(metric) expected := `my_cluster..some_pod.gauge_metric_name_double_test 33.4 1608124661 @@ -89,9 +89,9 @@ func TestGraphiteMetricDataTypeIntSum(t *testing.T) { gf := newGraphiteFormatter("%{cluster}.%{namespace}.%{pod}.%{_metric_}") metric := exampleIntSumMetric() - metric.attributes.InsertString("cluster", "my_cluster") - metric.attributes.InsertString("namespace", "default") - metric.attributes.InsertString("pod", "some pod") + metric.attributes.UpsertString("cluster", "my_cluster") + metric.attributes.UpsertString("namespace", "default") + metric.attributes.UpsertString("pod", "some pod") result := gf.metric2String(metric) expected := `my_cluster.default.some_pod.sum_metric_int_test 45 1608124444 @@ -103,9 +103,9 @@ func TestGraphiteMetricDataTypeDoubleSum(t *testing.T) { gf := newGraphiteFormatter("%{cluster}.%{namespace}.%{pod}.%{_metric_}") metric := exampleDoubleSumMetric() - metric.attributes.InsertString("cluster", "my_cluster") - metric.attributes.InsertString("namespace", "default") - metric.attributes.InsertString("pod", "some pod") + metric.attributes.UpsertString("cluster", "my_cluster") + metric.attributes.UpsertString("namespace", "default") + metric.attributes.UpsertString("pod", "some pod") result := gf.metric2String(metric) expected := `my_cluster.default.some_pod.sum_metric_double_test 45.6 1618124444 @@ -117,9 +117,9 @@ func TestGraphiteMetricDataTypeSummary(t *testing.T) { gf := newGraphiteFormatter("%{cluster}.%{namespace}.%{pod}.%{_metric_}") metric := exampleSummaryMetric() - metric.attributes.InsertString("cluster", "my_cluster") - metric.attributes.InsertString("namespace", "default") - metric.attributes.InsertString("pod", "some pod") + metric.attributes.UpsertString("cluster", "my_cluster") + metric.attributes.UpsertString("namespace", "default") + metric.attributes.UpsertString("pod", "some pod") result := gf.metric2String(metric) expected := `` @@ -134,9 +134,9 @@ func TestGraphiteMetricDataTypeHistogram(t *testing.T) { gf := newGraphiteFormatter("%{cluster}.%{namespace}.%{pod}.%{_metric_}") metric := exampleHistogramMetric() - metric.attributes.InsertString("cluster", "my_cluster") - metric.attributes.InsertString("namespace", "default") - metric.attributes.InsertString("pod", "some pod") + metric.attributes.UpsertString("cluster", "my_cluster") + metric.attributes.UpsertString("namespace", "default") + metric.attributes.UpsertString("pod", "some pod") result := gf.metric2String(metric) expected := `` diff --git a/exporter/sumologicexporter/sender_test.go b/exporter/sumologicexporter/sender_test.go index 27ebfaab3ce1..ba971b51bf72 100644 --- a/exporter/sumologicexporter/sender_test.go +++ b/exporter/sumologicexporter/sender_test.go @@ -118,12 +118,12 @@ func exampleTwoLogs() []plog.LogRecord { buffer := make([]plog.LogRecord, 2) buffer[0] = plog.NewLogRecord() buffer[0].Body().SetStringVal("Example log") - buffer[0].Attributes().InsertString("key1", "value1") - buffer[0].Attributes().InsertString("key2", "value2") + buffer[0].Attributes().UpsertString("key1", "value1") + buffer[0].Attributes().UpsertString("key2", "value2") buffer[1] = plog.NewLogRecord() buffer[1].Body().SetStringVal("Another example log") - buffer[1].Attributes().InsertString("key1", "value1") - buffer[1].Attributes().InsertString("key2", "value2") + buffer[1].Attributes().UpsertString("key1", "value1") + buffer[1].Attributes().UpsertString("key2", "value2") return buffer } @@ -132,12 +132,12 @@ func exampleTwoDifferentLogs() []plog.LogRecord { buffer := make([]plog.LogRecord, 2) buffer[0] = plog.NewLogRecord() buffer[0].Body().SetStringVal("Example log") - buffer[0].Attributes().InsertString("key1", "value1") - buffer[0].Attributes().InsertString("key2", "value2") + buffer[0].Attributes().UpsertString("key1", "value1") + buffer[0].Attributes().UpsertString("key2", "value2") buffer[1] = plog.NewLogRecord() buffer[1].Body().SetStringVal("Another example log") - buffer[1].Attributes().InsertString("key3", "value3") - buffer[1].Attributes().InsertString("key4", "value4") + buffer[1].Attributes().UpsertString("key3", "value3") + buffer[1].Attributes().UpsertString("key4", "value4") return buffer } @@ -147,14 +147,14 @@ func exampleMultitypeLogs() []plog.LogRecord { attVal := pcommon.NewValueMap() attMap := attVal.MapVal() - attMap.InsertString("lk1", "lv1") - attMap.InsertInt("lk2", 13) + attMap.UpsertString("lk1", "lv1") + attMap.UpsertInt("lk2", 13) buffer[0] = plog.NewLogRecord() attVal.CopyTo(buffer[0].Body()) - buffer[0].Attributes().InsertString("key1", "value1") - buffer[0].Attributes().InsertString("key2", "value2") + buffer[0].Attributes().UpsertString("key1", "value1") + buffer[0].Attributes().UpsertString("key2", "value2") buffer[1] = plog.NewLogRecord() @@ -171,8 +171,8 @@ func exampleMultitypeLogs() []plog.LogRecord { intVal.CopyTo(intTgt) attVal.CopyTo(buffer[1].Body()) - buffer[1].Attributes().InsertString("key1", "value1") - buffer[1].Attributes().InsertString("key2", "value2") + buffer[1].Attributes().UpsertString("key1", "value1") + buffer[1].Attributes().UpsertString("key2", "value2") return buffer } @@ -818,9 +818,9 @@ foo=bar metric=gauge_metric_name 245 1608124662` "key2": "value2", }) - test.s.metricBuffer[0].attributes.InsertString("unit", "m/s") - test.s.metricBuffer[0].attributes.InsertString("escape me", "=invalid\n") - test.s.metricBuffer[0].attributes.InsertBool("metric", true) + test.s.metricBuffer[0].attributes.UpsertString("unit", "m/s") + test.s.metricBuffer[0].attributes.UpsertString("escape me", "=invalid\n") + test.s.metricBuffer[0].attributes.UpsertBool("metric", true) _, err := test.s.sendMetrics(context.Background(), flds) assert.NoError(t, err) @@ -854,8 +854,8 @@ gauge_metric_name.. 245 1608124662` "key2": "value2", }) - test.s.metricBuffer[0].attributes.InsertString("unit", "m/s") - test.s.metricBuffer[0].attributes.InsertBool("metric", true) + test.s.metricBuffer[0].attributes.UpsertString("unit", "m/s") + test.s.metricBuffer[0].attributes.UpsertBool("metric", true) _, err := test.s.sendMetrics(context.Background(), flds) assert.NoError(t, err) diff --git a/exporter/tanzuobservabilityexporter/exporter_test.go b/exporter/tanzuobservabilityexporter/exporter_test.go index 38cc25c46a64..e7875e09ea09 100644 --- a/exporter/tanzuobservabilityexporter/exporter_test.go +++ b/exporter/tanzuobservabilityexporter/exporter_test.go @@ -95,9 +95,7 @@ func TestExportTraceDataFullTrace(t *testing.T) { status.SetMessage("an error event occurred") status.CopyTo(clientSpan.Status()) - clientAttrs := pcommon.NewMap() - clientAttrs.InsertString(labelApplication, "test-app") - clientAttrs.CopyTo(clientSpan.Attributes()) + clientSpan.Attributes().UpsertString(labelApplication, "test-app") serverSpan := createSpan( "server", @@ -107,19 +105,17 @@ func TestExportTraceDataFullTrace(t *testing.T) { ) serverSpan.SetKind(ptrace.SpanKindServer) serverSpan.SetTraceState("key=val") - serverAttrs := pcommon.NewMap() - serverAttrs.InsertString(conventions.AttributeServiceName, "the-server") - serverAttrs.InsertString(conventions.AttributeHTTPMethod, "POST") - serverAttrs.InsertInt(conventions.AttributeHTTPStatusCode, 403) - serverAttrs.InsertString(labelSource, "test_source") - serverAttrs.CopyTo(serverSpan.Attributes()) + serverAttrs := serverSpan.Attributes() + serverAttrs.UpsertString(conventions.AttributeServiceName, "the-server") + serverAttrs.UpsertString(conventions.AttributeHTTPMethod, "POST") + serverAttrs.UpsertInt(conventions.AttributeHTTPStatusCode, 403) + serverAttrs.UpsertString(labelSource, "test_source") traces := constructTraces([]ptrace.Span{rootSpan, clientSpan, serverSpan}) - resourceAttrs := pcommon.NewMap() - resourceAttrs.InsertString("resource", "R1") - resourceAttrs.InsertString(conventions.AttributeServiceName, "test-service") - resourceAttrs.InsertString(labelSource, "test-source") - resourceAttrs.CopyTo(traces.ResourceSpans().At(0).Resource().Attributes()) + resourceAttrs := traces.ResourceSpans().At(0).Resource().Attributes() + resourceAttrs.UpsertString("resource", "R1") + resourceAttrs.UpsertString(conventions.AttributeServiceName, "test-service") + resourceAttrs.UpsertString(labelSource, "test-source") expected := []*span{ { diff --git a/exporter/tanzuobservabilityexporter/transformer_test.go b/exporter/tanzuobservabilityexporter/transformer_test.go index e912abe9f067..fc009acac29f 100644 --- a/exporter/tanzuobservabilityexporter/transformer_test.go +++ b/exporter/tanzuobservabilityexporter/transformer_test.go @@ -148,13 +148,10 @@ func TestSpanEventsAreTranslatedToSpanLogs(t *testing.T) { span := ptrace.NewSpan() span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - event := ptrace.NewSpanEvent() + event := span.Events().AppendEmpty() event.SetName("eventName") event.SetTimestamp(pcommon.NewTimestampFromTime(now)) - eventAttrs := pcommon.NewMap() - eventAttrs.InsertString("attrKey", "attrVal") - eventAttrs.CopyTo(event.Attributes()) - event.CopyTo(span.Events().AppendEmpty()) + event.Attributes().UpsertString("attrKey", "attrVal") result, err := transform.Span(span) require.NoError(t, err, "transforming span to wavefront format") diff --git a/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go b/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go index 765648dd37db..5d62ae4aea23 100644 --- a/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go +++ b/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go @@ -39,10 +39,10 @@ func createSimpleLogData(numberOfLogs int) plog.Logs { ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") - logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp") - logRecord.Attributes().InsertString("my-label", "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") + logRecord.Attributes().UpsertString(conventions.AttributeServiceName, "myapp") + logRecord.Attributes().UpsertString("my-label", "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") logRecord.SetTimestamp(ts) } sl.LogRecords().AppendEmpty() diff --git a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go index 04c2fe6b08dc..8ebbef670a32 100644 --- a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go @@ -39,33 +39,24 @@ func (kv logKeyValuePairs) Len() int { return len(kv) } func (kv logKeyValuePairs) Swap(i, j int) { kv[i], kv[j] = kv[j], kv[i] } func (kv logKeyValuePairs) Less(i, j int) bool { return kv[i].Key < kv[j].Key } -func getComplexAttributeValueMap() pcommon.Value { - mapVal := pcommon.NewValueMap() - mapValReal := mapVal.MapVal() - mapValReal.InsertBool("result", true) - mapValReal.InsertString("status", "ok") - mapValReal.InsertDouble("value", 1.3) - mapValReal.InsertInt("code", 200) - mapValReal.Insert("null", pcommon.NewValueEmpty()) - arrayVal := pcommon.NewValueSlice() - arrayVal.SliceVal().AppendEmpty().SetStringVal("array") - mapValReal.Insert("array", arrayVal) - - subMapVal := pcommon.NewValueMap() - subMapVal.MapVal().InsertString("data", "hello world") - mapValReal.Insert("map", subMapVal) - - mapValReal.InsertString("status", "ok") - return mapVal +func fillComplexAttributeValueMap(m pcommon.Map) { + m.UpsertBool("result", true) + m.UpsertString("status", "ok") + m.UpsertDouble("value", 1.3) + m.UpsertInt("code", 200) + m.UpsertEmpty("null") + m.UpsertEmptySlice("array").AppendEmpty().SetStringVal("array") + m.UpsertEmptyMap("map").UpsertString("data", "hello world") + m.UpsertString("status", "ok") } func createLogData(numberOfLogs int) plog.Logs { logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs rl := logs.ResourceLogs().AppendEmpty() - rl.Resource().Attributes().InsertString("resouceKey", "resourceValue") - rl.Resource().Attributes().InsertString(conventions.AttributeServiceName, "test-log-service-exporter") - rl.Resource().Attributes().InsertString(conventions.AttributeHostName, "test-host") + rl.Resource().Attributes().UpsertString("resouceKey", "resourceValue") + rl.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "test-log-service-exporter") + rl.Resource().Attributes().UpsertString(conventions.AttributeHostName, "test-host") sl := rl.ScopeLogs().AppendEmpty() sl.Scope().SetName("collector") sl.Scope().SetVersion("v0.1.0") @@ -85,22 +76,19 @@ func createLogData(numberOfLogs int) plog.Logs { case 4: logRecord.Body().SetStringVal("4") case 5: - - logRecord.Attributes().Insert("map-value", getComplexAttributeValueMap()) + fillComplexAttributeValueMap(logRecord.Attributes().UpsertEmptyMap("map-value")) logRecord.Body().SetStringVal("log contents") case 6: - arrayVal := pcommon.NewValueSlice() - arrayVal.SliceVal().AppendEmpty().SetStringVal("array") - logRecord.Attributes().Insert("array-value", arrayVal) + logRecord.Attributes().UpsertEmptySlice("array-value").AppendEmpty().SetStringVal("array") logRecord.Body().SetStringVal("log contents") default: logRecord.Body().SetStringVal("log contents") } - logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp") - logRecord.Attributes().InsertString("my-label", "myapp-type") - logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") - logRecord.Attributes().InsertString("custom", "custom") - logRecord.Attributes().Insert("null-value", pcommon.NewValueEmpty()) + logRecord.Attributes().UpsertString(conventions.AttributeServiceName, "myapp") + logRecord.Attributes().UpsertString("my-label", "myapp-type") + logRecord.Attributes().UpsertString(conventions.AttributeHostName, "myhost") + logRecord.Attributes().UpsertString("custom", "custom") + logRecord.Attributes().UpsertEmpty("null-value") logRecord.SetTimestamp(ts) } diff --git a/internal/coreinternal/processor/filterexpr/matcher_test.go b/internal/coreinternal/processor/filterexpr/matcher_test.go index 4829ab0017ff..3ab1d609ec92 100644 --- a/internal/coreinternal/processor/filterexpr/matcher_test.go +++ b/internal/coreinternal/processor/filterexpr/matcher_test.go @@ -115,8 +115,8 @@ func TestMatchIntGaugeDataPointByMetricAndSecondPointLabelValue(t *testing.T) { m.SetDataType(pmetric.MetricDataTypeGauge) dps := m.Gauge().DataPoints() - dps.AppendEmpty().Attributes().InsertString("foo", "bar") - dps.AppendEmpty().Attributes().InsertString("baz", "glarch") + dps.AppendEmpty().Attributes().UpsertString("foo", "bar") + dps.AppendEmpty().Attributes().UpsertString("baz", "glarch") matched, err := matcher.MatchMetric(m) assert.NoError(t, err) diff --git a/internal/coreinternal/processor/filterlog/filterlog_test.go b/internal/coreinternal/processor/filterlog/filterlog_test.go index 897fc4b65cb8..8f18f5c2d502 100644 --- a/internal/coreinternal/processor/filterlog/filterlog_test.go +++ b/internal/coreinternal/processor/filterlog/filterlog_test.go @@ -195,7 +195,7 @@ func TestLogRecord_Matching_True(t *testing.T) { } lr := plog.NewLogRecord() - lr.Attributes().InsertString("abc", "def") + lr.Attributes().UpsertString("abc", "def") lr.Body().SetStringVal("AUTHENTICATION FAILED") lr.SetSeverityText("debug") lr.SetSeverityNumber(plog.SeverityNumberDebug) diff --git a/internal/coreinternal/processor/filtermatcher/filtermatcher_test.go b/internal/coreinternal/processor/filtermatcher/filtermatcher_test.go index 203a88329e1a..f29b69a31bbe 100644 --- a/internal/coreinternal/processor/filtermatcher/filtermatcher_test.go +++ b/internal/coreinternal/processor/filtermatcher/filtermatcher_test.go @@ -367,8 +367,8 @@ func Test_Matching_True(t *testing.T) { }) resource := pcommon.NewResource() - resource.Attributes().InsertString(conventions.AttributeServiceName, "svcA") - resource.Attributes().InsertString("resString", "arithmetic") + resource.Attributes().UpsertString(conventions.AttributeServiceName, "svcA") + resource.Attributes().UpsertString("resString", "arithmetic") library := pcommon.NewInstrumentationScope() library.SetName("lib") diff --git a/internal/coreinternal/processor/filterspan/filterspan_test.go b/internal/coreinternal/processor/filterspan/filterspan_test.go index aec2ee298a83..3f9aca72a866 100644 --- a/internal/coreinternal/processor/filterspan/filterspan_test.go +++ b/internal/coreinternal/processor/filterspan/filterspan_test.go @@ -222,15 +222,15 @@ func TestSpan_Matching_True(t *testing.T) { span := ptrace.NewSpan() span.SetName("spanName") - span.Attributes().InsertString("keyString", "arithmetic") - span.Attributes().InsertInt("keyInt", 123) - span.Attributes().InsertDouble("keyDouble", 3245.6) - span.Attributes().InsertBool("keyBool", true) - span.Attributes().InsertString("keyExists", "present") + span.Attributes().UpsertString("keyString", "arithmetic") + span.Attributes().UpsertInt("keyInt", 123) + span.Attributes().UpsertDouble("keyDouble", 3245.6) + span.Attributes().UpsertBool("keyBool", true) + span.Attributes().UpsertString("keyExists", "present") assert.NotNil(t, span) resource := pcommon.NewResource() - resource.Attributes().InsertString(conventions.AttributeServiceName, "svcA") + resource.Attributes().UpsertString(conventions.AttributeServiceName, "svcA") library := pcommon.NewInstrumentationScope() diff --git a/internal/coreinternal/testdata/log.go b/internal/coreinternal/testdata/log.go index 5937c295d942..11a8f73c6840 100644 --- a/internal/coreinternal/testdata/log.go +++ b/internal/coreinternal/testdata/log.go @@ -87,8 +87,8 @@ func fillLogOne(log plog.LogRecord) { log.SetTraceID(pcommon.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01})) attrs := log.Attributes() - attrs.InsertString("app", "server") - attrs.InsertInt("instance_num", 1) + attrs.UpsertString("app", "server") + attrs.UpsertInt("instance_num", 1) log.Body().SetStringVal("This is a log message") } @@ -100,8 +100,8 @@ func fillLogTwo(log plog.LogRecord) { log.SetSeverityText("Info") attrs := log.Attributes() - attrs.InsertString("customer", "acme") - attrs.InsertString("env", "dev") + attrs.UpsertString("customer", "acme") + attrs.UpsertString("env", "dev") log.Body().SetStringVal("something happened") } diff --git a/pkg/stanza/adapter/frompdataconverter_test.go b/pkg/stanza/adapter/frompdataconverter_test.go index 73d12f9d97a8..5b3cff3484d9 100644 --- a/pkg/stanza/adapter/frompdataconverter_test.go +++ b/pkg/stanza/adapter/frompdataconverter_test.go @@ -47,25 +47,15 @@ func BenchmarkConvertFromPdataComplex(b *testing.B) { } } -func baseMap() pcommon.Map { - obj := pcommon.NewMap() - arr := pcommon.NewValueSlice() - arr.SliceVal().AppendEmpty().SetStringVal("666") - arr.SliceVal().AppendEmpty().SetStringVal("777") - obj.Insert("slice", arr) - obj.InsertBool("bool", true) - obj.InsertInt("int", 123) - obj.InsertDouble("double", 12.34) - obj.InsertString("string", "hello") - obj.InsertBytes("bytes", pcommon.NewImmutableByteSlice([]byte{0xa1, 0xf0, 0x02, 0xff})) - return obj -} - -func baseMapValue() pcommon.Value { - v := pcommon.NewValueMap() - baseMap := baseMap() - baseMap.CopyTo(v.MapVal()) - return v +func fillBaseMap(m pcommon.Map) { + arr := m.UpsertEmptySlice("slice") + arr.AppendEmpty().SetStringVal("666") + arr.AppendEmpty().SetStringVal("777") + m.UpsertBool("bool", true) + m.UpsertInt("int", 123) + m.UpsertDouble("double", 12.34) + m.UpsertString("string", "hello") + m.UpsertBytes("bytes", pcommon.NewImmutableByteSlice([]byte{0xa1, 0xf0, 0x02, 0xff})) } func complexPdataForNDifferentHosts(count int, n int) plog.Logs { @@ -76,10 +66,9 @@ func complexPdataForNDifferentHosts(count int, n int) plog.Logs { rls := logs.AppendEmpty() resource := rls.Resource() - attr := baseMap() - attr.Insert("object", baseMapValue()) - attr.InsertString("host", fmt.Sprintf("host-%d", i%n)) - attr.CopyTo(resource.Attributes()) + fillBaseMap(resource.Attributes()) + fillBaseMap(resource.Attributes().UpsertEmptyMap("object")) + resource.Attributes().UpsertString("host", fmt.Sprintf("host-%d", i%n)) scopeLog := rls.ScopeLogs().AppendEmpty() scopeLog.Scope().SetName("myScope") @@ -96,17 +85,16 @@ func complexPdataForNDifferentHosts(count int, n int) plog.Logs { lr.SetTimestamp(pcommon.NewTimestampFromTime(t)) lr.SetObservedTimestamp(pcommon.NewTimestampFromTime(t)) - attr.Remove("double") - attr.Remove("host") - attr.CopyTo(lr.Attributes()) - - body := baseMapValue() - level2 := baseMapValue() - level2.MapVal().Remove("bytes") - level1 := baseMapValue() - level1.MapVal().Insert("object", level2) - body.MapVal().Insert("object", level1) - body.CopyTo(lr.Body()) + resource.Attributes().CopyTo(lr.Attributes()) + lr.Attributes().Remove("double") + lr.Attributes().Remove("host") + + fillBaseMap(lr.Body().SetEmptyMapVal()) + level1 := lr.Body().MapVal().UpsertEmptyMap("object") + fillBaseMap(level1) + level2 := level1.UpsertEmptyMap("object") + fillBaseMap(level2) + level2.Remove("bytes") } return pLogs } diff --git a/pkg/telemetryquerylanguage/functions/tqlotel/func_delete_matching_keys_test.go b/pkg/telemetryquerylanguage/functions/tqlotel/func_delete_matching_keys_test.go index af8ef8d87fed..6c0570712e92 100644 --- a/pkg/telemetryquerylanguage/functions/tqlotel/func_delete_matching_keys_test.go +++ b/pkg/telemetryquerylanguage/functions/tqlotel/func_delete_matching_keys_test.go @@ -26,9 +26,9 @@ import ( func Test_deleteMatchingKeys(t *testing.T) { input := pcommon.NewMap() - input.InsertString("test", "hello world") - input.InsertInt("test2", 3) - input.InsertBool("test3", true) + input.UpsertString("test", "hello world") + input.UpsertInt("test2", 3) + input.UpsertBool("test3", true) target := &tql.StandardGetSetter{ Getter: func(ctx tql.TransformContext) interface{} { @@ -57,7 +57,7 @@ func Test_deleteMatchingKeys(t *testing.T) { pattern: "\\d$", want: func(expectedMap pcommon.Map) { expectedMap.Clear() - expectedMap.InsertString("test", "hello world") + expectedMap.UpsertString("test", "hello world") }, }, { @@ -66,9 +66,9 @@ func Test_deleteMatchingKeys(t *testing.T) { pattern: "not a matching pattern", want: func(expectedMap pcommon.Map) { expectedMap.Clear() - expectedMap.InsertString("test", "hello world") - expectedMap.InsertInt("test2", 3) - expectedMap.InsertBool("test3", true) + expectedMap.UpsertString("test", "hello world") + expectedMap.UpsertInt("test2", 3) + expectedMap.UpsertBool("test3", true) }, }, } diff --git a/pkg/telemetryquerylanguage/functions/tqlotel/func_limit_test.go b/pkg/telemetryquerylanguage/functions/tqlotel/func_limit_test.go index 208d897e5ab4..6fb597813578 100644 --- a/pkg/telemetryquerylanguage/functions/tqlotel/func_limit_test.go +++ b/pkg/telemetryquerylanguage/functions/tqlotel/func_limit_test.go @@ -26,9 +26,9 @@ import ( func Test_limit(t *testing.T) { input := pcommon.NewMap() - input.InsertString("test", "hello world") - input.InsertInt("test2", 3) - input.InsertBool("test3", true) + input.UpsertString("test", "hello world") + input.UpsertInt("test2", 3) + input.UpsertBool("test3", true) target := &tql.StandardGetSetter{ Getter: func(ctx tql.TransformContext) interface{} { @@ -52,7 +52,7 @@ func Test_limit(t *testing.T) { limit: int64(1), want: func(expectedMap pcommon.Map) { expectedMap.Clear() - expectedMap.InsertString("test", "hello world") + expectedMap.UpsertString("test", "hello world") }, }, { @@ -69,9 +69,9 @@ func Test_limit(t *testing.T) { limit: int64(100), want: func(expectedMap pcommon.Map) { expectedMap.Clear() - expectedMap.InsertString("test", "hello world") - expectedMap.InsertInt("test2", 3) - expectedMap.InsertBool("test3", true) + expectedMap.UpsertString("test", "hello world") + expectedMap.UpsertInt("test2", 3) + expectedMap.UpsertBool("test3", true) }, }, { @@ -80,9 +80,9 @@ func Test_limit(t *testing.T) { limit: int64(3), want: func(expectedMap pcommon.Map) { expectedMap.Clear() - expectedMap.InsertString("test", "hello world") - expectedMap.InsertInt("test2", 3) - expectedMap.InsertBool("test3", true) + expectedMap.UpsertString("test", "hello world") + expectedMap.UpsertInt("test2", 3) + expectedMap.UpsertBool("test3", true) }, }, } diff --git a/pkg/translator/opencensus/oc_to_metrics.go b/pkg/translator/opencensus/oc_to_metrics.go index ede0a7cc5d50..60586d82e8ae 100644 --- a/pkg/translator/opencensus/oc_to_metrics.go +++ b/pkg/translator/opencensus/oc_to_metrics.go @@ -221,7 +221,7 @@ func fillAttributesMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocme if !ocLabelValues[i].GetHasValue() { continue } - attributesMap.InsertString(ocLabelsKeys[i].Key, ocLabelValues[i].Value) + attributesMap.UpsertString(ocLabelsKeys[i].Key, ocLabelValues[i].Value) } } diff --git a/processor/attributesprocessor/attributes_metric_test.go b/processor/attributesprocessor/attributes_metric_test.go index f9c805743327..f86a3280e201 100644 --- a/processor/attributesprocessor/attributes_metric_test.go +++ b/processor/attributesprocessor/attributes_metric_test.go @@ -53,7 +53,7 @@ func runIndividualMetricTestCase(t *testing.T, mt metricTestCase, mp component.M func generateMetricData(resourceName string, attrs map[string]interface{}) pmetric.Metrics { md := pmetric.NewMetrics() res := md.ResourceMetrics().AppendEmpty() - res.Resource().Attributes().InsertString("name", resourceName) + res.Resource().Attributes().UpsertString("name", resourceName) sl := res.ScopeMetrics().AppendEmpty() m := sl.Metrics().AppendEmpty() diff --git a/processor/groupbyattrsprocessor/attribute_groups_test.go b/processor/groupbyattrsprocessor/attribute_groups_test.go index 199a5b371381..f06855885cf2 100644 --- a/processor/groupbyattrsprocessor/attribute_groups_test.go +++ b/processor/groupbyattrsprocessor/attribute_groups_test.go @@ -74,7 +74,7 @@ func TestResourceAttributeScenarios(t *testing.T) { name: "When the same key is present at Resource and Record level, the latter value should be used", baseResource: simpleResource(), fillRecordAttributesFun: func(attributeMap pcommon.Map) { - attributeMap.InsertString("somekey1", "replaced-value") + attributeMap.UpsertString("somekey1", "replaced-value") }, fillExpectedResourceFun: func(baseResource pcommon.Resource, expectedResource pcommon.Resource) { baseResource.CopyTo(expectedResource) @@ -91,10 +91,10 @@ func TestResourceAttributeScenarios(t *testing.T) { name: "Empty Resource", baseResource: pcommon.NewResource(), fillRecordAttributesFun: func(attributeMap pcommon.Map) { - attributeMap.InsertString("somekey1", "some-value") + attributeMap.UpsertString("somekey1", "some-value") }, fillExpectedResourceFun: func(_ pcommon.Resource, expectedResource pcommon.Resource) { - expectedResource.Attributes().InsertString("somekey1", "some-value") + expectedResource.Attributes().UpsertString("somekey1", "some-value") }, }, { diff --git a/processor/resourcedetectionprocessor/internal/aws/ecs/ecs.go b/processor/resourcedetectionprocessor/internal/aws/ecs/ecs.go index 08eda53fb7f2..6575cc557664 100644 --- a/processor/resourcedetectionprocessor/internal/aws/ecs/ecs.go +++ b/processor/resourcedetectionprocessor/internal/aws/ecs/ecs.go @@ -71,36 +71,36 @@ func (d *Detector) Detect(context.Context) (resource pcommon.Resource, schemaURL } attr := res.Attributes() - attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) - attr.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSECS) - attr.InsertString(conventions.AttributeAWSECSTaskARN, tmdeResp.TaskARN) - attr.InsertString(conventions.AttributeAWSECSTaskFamily, tmdeResp.Family) - attr.InsertString(conventions.AttributeAWSECSTaskRevision, tmdeResp.Revision) + attr.UpsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) + attr.UpsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSECS) + attr.UpsertString(conventions.AttributeAWSECSTaskARN, tmdeResp.TaskARN) + attr.UpsertString(conventions.AttributeAWSECSTaskFamily, tmdeResp.Family) + attr.UpsertString(conventions.AttributeAWSECSTaskRevision, tmdeResp.Revision) region, account := parseRegionAndAccount(tmdeResp.TaskARN) if account != "" { - attr.InsertString(conventions.AttributeCloudAccountID, account) + attr.UpsertString(conventions.AttributeCloudAccountID, account) } if region != "" { - attr.InsertString(conventions.AttributeCloudRegion, region) + attr.UpsertString(conventions.AttributeCloudRegion, region) } // TMDE returns the the cluster short name or ARN, so we need to construct the ARN if necessary - attr.InsertString(conventions.AttributeAWSECSClusterARN, constructClusterArn(tmdeResp.Cluster, region, account)) + attr.UpsertString(conventions.AttributeAWSECSClusterARN, constructClusterArn(tmdeResp.Cluster, region, account)) // The Availability Zone is not available in all Fargate runtimes if tmdeResp.AvailabilityZone != "" { - attr.InsertString(conventions.AttributeCloudAvailabilityZone, tmdeResp.AvailabilityZone) + attr.UpsertString(conventions.AttributeCloudAvailabilityZone, tmdeResp.AvailabilityZone) } // The launch type and log data attributes are only available in TMDE v4 switch lt := strings.ToLower(tmdeResp.LaunchType); lt { case "ec2": - attr.InsertString(conventions.AttributeAWSECSLaunchtype, "ec2") + attr.UpsertString(conventions.AttributeAWSECSLaunchtype, "ec2") case "fargate": - attr.InsertString(conventions.AttributeAWSECSLaunchtype, "fargate") + attr.UpsertString(conventions.AttributeAWSECSLaunchtype, "fargate") } selfMetaData, err := d.provider.FetchContainerMetadata() diff --git a/processor/resourcedetectionprocessor/internal/aws/eks/detector.go b/processor/resourcedetectionprocessor/internal/aws/eks/detector.go index b1782a5b4610..d2b86d4e2dca 100644 --- a/processor/resourcedetectionprocessor/internal/aws/eks/detector.go +++ b/processor/resourcedetectionprocessor/internal/aws/eks/detector.go @@ -77,8 +77,8 @@ func (detector *detector) Detect(ctx context.Context) (resource pcommon.Resource } attr := res.Attributes() - attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) - attr.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSEKS) + attr.UpsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) + attr.UpsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSEKS) return res, conventions.SchemaURL, nil } diff --git a/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk.go b/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk.go index 8a522b92f141..be7c43a1d490 100644 --- a/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk.go +++ b/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk.go @@ -77,10 +77,10 @@ func (d Detector) Detect(context.Context) (resource pcommon.Resource, schemaURL } attr := res.Attributes() - attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) - attr.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSElasticBeanstalk) - attr.InsertString(conventions.AttributeServiceInstanceID, strconv.Itoa(ebmd.DeploymentID)) - attr.InsertString(conventions.AttributeDeploymentEnvironment, ebmd.EnvironmentName) - attr.InsertString(conventions.AttributeServiceVersion, ebmd.VersionLabel) + attr.UpsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) + attr.UpsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSElasticBeanstalk) + attr.UpsertString(conventions.AttributeServiceInstanceID, strconv.Itoa(ebmd.DeploymentID)) + attr.UpsertString(conventions.AttributeDeploymentEnvironment, ebmd.EnvironmentName) + attr.UpsertString(conventions.AttributeServiceVersion, ebmd.VersionLabel) return res, conventions.SchemaURL, nil } diff --git a/receiver/activedirectorydsreceiver/internal/metadata/generated_metrics.go b/receiver/activedirectorydsreceiver/internal/metadata/generated_metrics.go index c14a05fa2595..40b9a1783a97 100644 --- a/receiver/activedirectorydsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/activedirectorydsreceiver/internal/metadata/generated_metrics.go @@ -311,7 +311,7 @@ func (m *metricActiveDirectoryDsBindRate) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("type", bindTypeAttributeValue) + dp.Attributes().UpsertString("type", bindTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -666,7 +666,7 @@ func (m *metricActiveDirectoryDsOperationRate) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("type", operationTypeAttributeValue) + dp.Attributes().UpsertString("type", operationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -719,8 +719,8 @@ func (m *metricActiveDirectoryDsReplicationNetworkIo) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) - dp.Attributes().InsertString("type", networkDataTypeAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("type", networkDataTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -773,7 +773,7 @@ func (m *metricActiveDirectoryDsReplicationObjectRate) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -877,7 +877,7 @@ func (m *metricActiveDirectoryDsReplicationPropertyRate) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -981,7 +981,7 @@ func (m *metricActiveDirectoryDsReplicationSyncRequestCount) recordDataPoint(sta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("result", syncResultAttributeValue) + dp.Attributes().UpsertString("result", syncResultAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1034,8 +1034,8 @@ func (m *metricActiveDirectoryDsReplicationValueRate) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) - dp.Attributes().InsertString("type", valueTypeAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("type", valueTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1139,7 +1139,7 @@ func (m *metricActiveDirectoryDsSuboperationRate) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("type", suboperationTypeAttributeValue) + dp.Attributes().UpsertString("type", suboperationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/apachereceiver/internal/metadata/generated_metrics.go b/receiver/apachereceiver/internal/metadata/generated_metrics.go index 114db8bbae21..b1d5af0f9180 100644 --- a/receiver/apachereceiver/internal/metadata/generated_metrics.go +++ b/receiver/apachereceiver/internal/metadata/generated_metrics.go @@ -167,7 +167,7 @@ func (m *metricApacheCurrentConnections) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("server_name", serverNameAttributeValue) + dp.Attributes().UpsertString("server_name", serverNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -220,7 +220,7 @@ func (m *metricApacheRequests) recordDataPoint(start pcommon.Timestamp, ts pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("server_name", serverNameAttributeValue) + dp.Attributes().UpsertString("server_name", serverNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -273,8 +273,8 @@ func (m *metricApacheScoreboard) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("server_name", serverNameAttributeValue) - dp.Attributes().InsertString("state", scoreboardStateAttributeValue) + dp.Attributes().UpsertString("server_name", serverNameAttributeValue) + dp.Attributes().UpsertString("state", scoreboardStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -327,7 +327,7 @@ func (m *metricApacheTraffic) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("server_name", serverNameAttributeValue) + dp.Attributes().UpsertString("server_name", serverNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -380,7 +380,7 @@ func (m *metricApacheUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("server_name", serverNameAttributeValue) + dp.Attributes().UpsertString("server_name", serverNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -433,8 +433,8 @@ func (m *metricApacheWorkers) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("server_name", serverNameAttributeValue) - dp.Attributes().InsertString("state", workersStateAttributeValue) + dp.Attributes().UpsertString("server_name", serverNameAttributeValue) + dp.Attributes().UpsertString("state", workersStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/bigipreceiver/internal/metadata/generated_metrics.go b/receiver/bigipreceiver/internal/metadata/generated_metrics.go index ba2980888ef9..2212ad10edcb 100644 --- a/receiver/bigipreceiver/internal/metadata/generated_metrics.go +++ b/receiver/bigipreceiver/internal/metadata/generated_metrics.go @@ -263,7 +263,7 @@ func (m *metricBigipNodeAvailability) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", availabilityStatusAttributeValue) + dp.Attributes().UpsertString("status", availabilityStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -367,7 +367,7 @@ func (m *metricBigipNodeDataTransmitted) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -418,7 +418,7 @@ func (m *metricBigipNodeEnabled) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", enabledStatusAttributeValue) + dp.Attributes().UpsertString("status", enabledStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -471,7 +471,7 @@ func (m *metricBigipNodePacketCount) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -624,7 +624,7 @@ func (m *metricBigipPoolAvailability) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", availabilityStatusAttributeValue) + dp.Attributes().UpsertString("status", availabilityStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -728,7 +728,7 @@ func (m *metricBigipPoolDataTransmitted) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -779,7 +779,7 @@ func (m *metricBigipPoolEnabled) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", enabledStatusAttributeValue) + dp.Attributes().UpsertString("status", enabledStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -832,7 +832,7 @@ func (m *metricBigipPoolMemberCount) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", activeStatusAttributeValue) + dp.Attributes().UpsertString("status", activeStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -885,7 +885,7 @@ func (m *metricBigipPoolPacketCount) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -987,7 +987,7 @@ func (m *metricBigipPoolMemberAvailability) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", availabilityStatusAttributeValue) + dp.Attributes().UpsertString("status", availabilityStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1091,7 +1091,7 @@ func (m *metricBigipPoolMemberDataTransmitted) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1142,7 +1142,7 @@ func (m *metricBigipPoolMemberEnabled) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", enabledStatusAttributeValue) + dp.Attributes().UpsertString("status", enabledStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1195,7 +1195,7 @@ func (m *metricBigipPoolMemberPacketCount) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1348,7 +1348,7 @@ func (m *metricBigipVirtualServerAvailability) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", availabilityStatusAttributeValue) + dp.Attributes().UpsertString("status", availabilityStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1452,7 +1452,7 @@ func (m *metricBigipVirtualServerDataTransmitted) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1503,7 +1503,7 @@ func (m *metricBigipVirtualServerEnabled) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", enabledStatusAttributeValue) + dp.Attributes().UpsertString("status", enabledStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1556,7 +1556,7 @@ func (m *metricBigipVirtualServerPacketCount) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/chronyreceiver/internal/metadata/generated_metrics.go b/receiver/chronyreceiver/internal/metadata/generated_metrics.go index c9880002618f..cbddf6cbaba2 100644 --- a/receiver/chronyreceiver/internal/metadata/generated_metrics.go +++ b/receiver/chronyreceiver/internal/metadata/generated_metrics.go @@ -109,7 +109,7 @@ func (m *metricNtpFrequencyOffset) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("leap.status", leapStatusAttributeValue) + dp.Attributes().UpsertString("leap.status", leapStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -258,7 +258,7 @@ func (m *metricNtpTimeCorrection) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("leap.status", leapStatusAttributeValue) + dp.Attributes().UpsertString("leap.status", leapStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -309,7 +309,7 @@ func (m *metricNtpTimeLastOffset) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("leap.status", leapStatusAttributeValue) + dp.Attributes().UpsertString("leap.status", leapStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -360,7 +360,7 @@ func (m *metricNtpTimeRmsOffset) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("leap.status", leapStatusAttributeValue) + dp.Attributes().UpsertString("leap.status", leapStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -411,7 +411,7 @@ func (m *metricNtpTimeRootDelay) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("leap.status", leapStatusAttributeValue) + dp.Attributes().UpsertString("leap.status", leapStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/cloudfoundryreceiver/converter.go b/receiver/cloudfoundryreceiver/converter.go index c16d7b89a223..9d35275ecd98 100644 --- a/receiver/cloudfoundryreceiver/converter.go +++ b/receiver/cloudfoundryreceiver/converter.go @@ -56,14 +56,14 @@ func convertEnvelopeToMetrics(envelope *loggregator_v2.Envelope, metricSlice pme func copyEnvelopeAttributes(attributes pcommon.Map, envelope *loggregator_v2.Envelope) { for key, value := range envelope.Tags { - attributes.InsertString(attributeNamePrefix+key, value) + attributes.UpsertString(attributeNamePrefix+key, value) } if envelope.SourceId != "" { - attributes.InsertString(attributeNamePrefix+"source_id", envelope.SourceId) + attributes.UpsertString(attributeNamePrefix+"source_id", envelope.SourceId) } if envelope.InstanceId != "" { - attributes.InsertString(attributeNamePrefix+"instance_id", envelope.InstanceId) + attributes.UpsertString(attributeNamePrefix+"instance_id", envelope.InstanceId) } } diff --git a/receiver/couchdbreceiver/internal/metadata/generated_metrics.go b/receiver/couchdbreceiver/internal/metadata/generated_metrics.go index f548c867a047..de3cdc38cd77 100644 --- a/receiver/couchdbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/couchdbreceiver/internal/metadata/generated_metrics.go @@ -279,7 +279,7 @@ func (m *metricCouchdbDatabaseOperations) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -434,7 +434,7 @@ func (m *metricCouchdbHttpdRequests) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("http.method", httpMethodAttributeValue) + dp.Attributes().UpsertString("http.method", httpMethodAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -487,7 +487,7 @@ func (m *metricCouchdbHttpdResponses) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("http.status_code", httpStatusCodeAttributeValue) + dp.Attributes().UpsertString("http.status_code", httpStatusCodeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -540,7 +540,7 @@ func (m *metricCouchdbHttpdViews) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("view", viewAttributeValue) + dp.Attributes().UpsertString("view", viewAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go index cd91b648858b..e159d5c5617c 100644 --- a/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/dockerstatsreceiver/internal/metadata/generated_metrics.go @@ -462,8 +462,8 @@ func (m *metricContainerBlockioIoMergedRecursiveAsync) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -516,8 +516,8 @@ func (m *metricContainerBlockioIoMergedRecursiveDiscard) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -570,8 +570,8 @@ func (m *metricContainerBlockioIoMergedRecursiveRead) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -624,8 +624,8 @@ func (m *metricContainerBlockioIoMergedRecursiveSync) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -678,8 +678,8 @@ func (m *metricContainerBlockioIoMergedRecursiveTotal) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -732,8 +732,8 @@ func (m *metricContainerBlockioIoMergedRecursiveWrite) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -786,8 +786,8 @@ func (m *metricContainerBlockioIoQueuedRecursiveAsync) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -840,8 +840,8 @@ func (m *metricContainerBlockioIoQueuedRecursiveDiscard) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -894,8 +894,8 @@ func (m *metricContainerBlockioIoQueuedRecursiveRead) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -948,8 +948,8 @@ func (m *metricContainerBlockioIoQueuedRecursiveSync) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1002,8 +1002,8 @@ func (m *metricContainerBlockioIoQueuedRecursiveTotal) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1056,8 +1056,8 @@ func (m *metricContainerBlockioIoQueuedRecursiveWrite) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1110,8 +1110,8 @@ func (m *metricContainerBlockioIoServiceBytesRecursiveAsync) recordDataPoint(sta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1164,8 +1164,8 @@ func (m *metricContainerBlockioIoServiceBytesRecursiveDiscard) recordDataPoint(s dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1218,8 +1218,8 @@ func (m *metricContainerBlockioIoServiceBytesRecursiveRead) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1272,8 +1272,8 @@ func (m *metricContainerBlockioIoServiceBytesRecursiveSync) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1326,8 +1326,8 @@ func (m *metricContainerBlockioIoServiceBytesRecursiveTotal) recordDataPoint(sta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1380,8 +1380,8 @@ func (m *metricContainerBlockioIoServiceBytesRecursiveWrite) recordDataPoint(sta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1434,8 +1434,8 @@ func (m *metricContainerBlockioIoServiceTimeRecursiveAsync) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1488,8 +1488,8 @@ func (m *metricContainerBlockioIoServiceTimeRecursiveDiscard) recordDataPoint(st dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1542,8 +1542,8 @@ func (m *metricContainerBlockioIoServiceTimeRecursiveRead) recordDataPoint(start dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1596,8 +1596,8 @@ func (m *metricContainerBlockioIoServiceTimeRecursiveSync) recordDataPoint(start dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1650,8 +1650,8 @@ func (m *metricContainerBlockioIoServiceTimeRecursiveTotal) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1704,8 +1704,8 @@ func (m *metricContainerBlockioIoServiceTimeRecursiveWrite) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1758,8 +1758,8 @@ func (m *metricContainerBlockioIoServicedRecursiveAsync) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1812,8 +1812,8 @@ func (m *metricContainerBlockioIoServicedRecursiveDiscard) recordDataPoint(start dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1866,8 +1866,8 @@ func (m *metricContainerBlockioIoServicedRecursiveRead) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1920,8 +1920,8 @@ func (m *metricContainerBlockioIoServicedRecursiveSync) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1974,8 +1974,8 @@ func (m *metricContainerBlockioIoServicedRecursiveTotal) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2028,8 +2028,8 @@ func (m *metricContainerBlockioIoServicedRecursiveWrite) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2082,8 +2082,8 @@ func (m *metricContainerBlockioIoTimeRecursiveAsync) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2136,8 +2136,8 @@ func (m *metricContainerBlockioIoTimeRecursiveDiscard) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2190,8 +2190,8 @@ func (m *metricContainerBlockioIoTimeRecursiveRead) recordDataPoint(start pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2244,8 +2244,8 @@ func (m *metricContainerBlockioIoTimeRecursiveSync) recordDataPoint(start pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2298,8 +2298,8 @@ func (m *metricContainerBlockioIoTimeRecursiveTotal) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2352,8 +2352,8 @@ func (m *metricContainerBlockioIoTimeRecursiveWrite) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2406,8 +2406,8 @@ func (m *metricContainerBlockioIoWaitTimeRecursiveAsync) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2460,8 +2460,8 @@ func (m *metricContainerBlockioIoWaitTimeRecursiveDiscard) recordDataPoint(start dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2514,8 +2514,8 @@ func (m *metricContainerBlockioIoWaitTimeRecursiveRead) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2568,8 +2568,8 @@ func (m *metricContainerBlockioIoWaitTimeRecursiveSync) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2622,8 +2622,8 @@ func (m *metricContainerBlockioIoWaitTimeRecursiveTotal) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2676,8 +2676,8 @@ func (m *metricContainerBlockioIoWaitTimeRecursiveWrite) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2730,8 +2730,8 @@ func (m *metricContainerBlockioSectorsRecursiveAsync) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2784,8 +2784,8 @@ func (m *metricContainerBlockioSectorsRecursiveDiscard) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2838,8 +2838,8 @@ func (m *metricContainerBlockioSectorsRecursiveRead) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2892,8 +2892,8 @@ func (m *metricContainerBlockioSectorsRecursiveSync) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2946,8 +2946,8 @@ func (m *metricContainerBlockioSectorsRecursiveTotal) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3000,8 +3000,8 @@ func (m *metricContainerBlockioSectorsRecursiveWrite) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device_major", deviceMajorAttributeValue) - dp.Attributes().InsertString("device_minor", deviceMinorAttributeValue) + dp.Attributes().UpsertString("device_major", deviceMajorAttributeValue) + dp.Attributes().UpsertString("device_minor", deviceMinorAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3307,7 +3307,7 @@ func (m *metricContainerCPUUsagePercpu) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("core", coreAttributeValue) + dp.Attributes().UpsertString("core", coreAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -5449,7 +5449,7 @@ func (m *metricContainerNetworkIoUsageRxBytes) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -5502,7 +5502,7 @@ func (m *metricContainerNetworkIoUsageRxDropped) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -5555,7 +5555,7 @@ func (m *metricContainerNetworkIoUsageRxErrors) recordDataPoint(start pcommon.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -5608,7 +5608,7 @@ func (m *metricContainerNetworkIoUsageRxPackets) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -5661,7 +5661,7 @@ func (m *metricContainerNetworkIoUsageTxBytes) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -5714,7 +5714,7 @@ func (m *metricContainerNetworkIoUsageTxDropped) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -5767,7 +5767,7 @@ func (m *metricContainerNetworkIoUsageTxErrors) recordDataPoint(start pcommon.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -5820,7 +5820,7 @@ func (m *metricContainerNetworkIoUsageTxPackets) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go index 6e98c223206e..3d5d50c8f4e3 100644 --- a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go +++ b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics.go @@ -761,7 +761,7 @@ func (m *metricElasticsearchBreakerMemoryEstimated) recordDataPoint(start pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", circuitBreakerNameAttributeValue) + dp.Attributes().UpsertString("name", circuitBreakerNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -814,7 +814,7 @@ func (m *metricElasticsearchBreakerMemoryLimit) recordDataPoint(start pcommon.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", circuitBreakerNameAttributeValue) + dp.Attributes().UpsertString("name", circuitBreakerNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -867,7 +867,7 @@ func (m *metricElasticsearchBreakerTripped) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", circuitBreakerNameAttributeValue) + dp.Attributes().UpsertString("name", circuitBreakerNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -971,7 +971,7 @@ func (m *metricElasticsearchClusterHealth) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", healthStatusAttributeValue) + dp.Attributes().UpsertString("status", healthStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1075,7 +1075,7 @@ func (m *metricElasticsearchClusterPublishedStatesDifferences) recordDataPoint(s dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", clusterPublishedDifferenceStateAttributeValue) + dp.Attributes().UpsertString("state", clusterPublishedDifferenceStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1179,7 +1179,7 @@ func (m *metricElasticsearchClusterShards) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", shardStateAttributeValue) + dp.Attributes().UpsertString("state", shardStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1232,7 +1232,7 @@ func (m *metricElasticsearchClusterStateQueue) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", clusterStateQueueStateAttributeValue) + dp.Attributes().UpsertString("state", clusterStateQueueStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1285,7 +1285,7 @@ func (m *metricElasticsearchClusterStateUpdateCount) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", clusterStateUpdateStateAttributeValue) + dp.Attributes().UpsertString("state", clusterStateUpdateStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1338,8 +1338,8 @@ func (m *metricElasticsearchClusterStateUpdateTime) recordDataPoint(start pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", clusterStateUpdateStateAttributeValue) - dp.Attributes().InsertString("type", clusterStateUpdateTypeAttributeValue) + dp.Attributes().UpsertString("state", clusterStateUpdateStateAttributeValue) + dp.Attributes().UpsertString("type", clusterStateUpdateTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1543,7 +1543,7 @@ func (m *metricElasticsearchMemoryIndexingPressure) recordDataPoint(start pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("stage", indexingPressureStageAttributeValue) + dp.Attributes().UpsertString("stage", indexingPressureStageAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1596,7 +1596,7 @@ func (m *metricElasticsearchNodeCacheEvictions) recordDataPoint(start pcommon.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("cache_name", cacheNameAttributeValue) + dp.Attributes().UpsertString("cache_name", cacheNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1649,7 +1649,7 @@ func (m *metricElasticsearchNodeCacheMemoryUsage) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("cache_name", cacheNameAttributeValue) + dp.Attributes().UpsertString("cache_name", cacheNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1753,7 +1753,7 @@ func (m *metricElasticsearchNodeClusterIo) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2010,7 +2010,7 @@ func (m *metricElasticsearchNodeDocuments) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", documentStateAttributeValue) + dp.Attributes().UpsertString("state", documentStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2369,7 +2369,7 @@ func (m *metricElasticsearchNodeOperationsCompleted) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2422,7 +2422,7 @@ func (m *metricElasticsearchNodeOperationsTime) recordDataPoint(start pcommon.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2475,7 +2475,7 @@ func (m *metricElasticsearchNodePipelineIngestDocumentsCurrent) recordDataPoint( dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", ingestPipelineNameAttributeValue) + dp.Attributes().UpsertString("name", ingestPipelineNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2528,7 +2528,7 @@ func (m *metricElasticsearchNodePipelineIngestDocumentsPreprocessed) recordDataP dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", ingestPipelineNameAttributeValue) + dp.Attributes().UpsertString("name", ingestPipelineNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2581,7 +2581,7 @@ func (m *metricElasticsearchNodePipelineIngestOperationsFailed) recordDataPoint( dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", ingestPipelineNameAttributeValue) + dp.Attributes().UpsertString("name", ingestPipelineNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2940,8 +2940,8 @@ func (m *metricElasticsearchNodeThreadPoolTasksFinished) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("thread_pool_name", threadPoolNameAttributeValue) - dp.Attributes().InsertString("state", taskStateAttributeValue) + dp.Attributes().UpsertString("thread_pool_name", threadPoolNameAttributeValue) + dp.Attributes().UpsertString("state", taskStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2994,7 +2994,7 @@ func (m *metricElasticsearchNodeThreadPoolTasksQueued) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("thread_pool_name", threadPoolNameAttributeValue) + dp.Attributes().UpsertString("thread_pool_name", threadPoolNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3047,8 +3047,8 @@ func (m *metricElasticsearchNodeThreadPoolThreads) recordDataPoint(start pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("thread_pool_name", threadPoolNameAttributeValue) - dp.Attributes().InsertString("state", threadStateAttributeValue) + dp.Attributes().UpsertString("thread_pool_name", threadPoolNameAttributeValue) + dp.Attributes().UpsertString("state", threadStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3448,7 +3448,7 @@ func (m *metricElasticsearchOsMemory) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", memoryStateAttributeValue) + dp.Attributes().UpsertString("state", memoryStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3550,7 +3550,7 @@ func (m *metricJvmGcCollectionsCount) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", collectorNameAttributeValue) + dp.Attributes().UpsertString("name", collectorNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3603,7 +3603,7 @@ func (m *metricJvmGcCollectionsElapsed) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", collectorNameAttributeValue) + dp.Attributes().UpsertString("name", collectorNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3899,7 +3899,7 @@ func (m *metricJvmMemoryPoolMax) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", memoryPoolNameAttributeValue) + dp.Attributes().UpsertString("name", memoryPoolNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3950,7 +3950,7 @@ func (m *metricJvmMemoryPoolUsed) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", memoryPoolNameAttributeValue) + dp.Attributes().UpsertString("name", memoryPoolNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/flinkmetricsreceiver/internal/metadata/generated_metrics.go b/receiver/flinkmetricsreceiver/internal/metadata/generated_metrics.go index b537790423f4..40422136a352 100644 --- a/receiver/flinkmetricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/flinkmetricsreceiver/internal/metadata/generated_metrics.go @@ -257,7 +257,7 @@ func (m *metricFlinkJobCheckpointCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("checkpoint", checkpointAttributeValue) + dp.Attributes().UpsertString("checkpoint", checkpointAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -663,7 +663,7 @@ func (m *metricFlinkJvmGcCollectionsCount) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", garbageCollectorNameAttributeValue) + dp.Attributes().UpsertString("name", garbageCollectorNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -716,7 +716,7 @@ func (m *metricFlinkJvmGcCollectionsTime) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", garbageCollectorNameAttributeValue) + dp.Attributes().UpsertString("name", garbageCollectorNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1585,8 +1585,8 @@ func (m *metricFlinkOperatorRecordCount) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", operatorNameAttributeValue) - dp.Attributes().InsertString("record", recordAttributeValue) + dp.Attributes().UpsertString("name", operatorNameAttributeValue) + dp.Attributes().UpsertString("record", recordAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1639,7 +1639,7 @@ func (m *metricFlinkOperatorWatermarkOutput) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("name", operatorNameAttributeValue) + dp.Attributes().UpsertString("name", operatorNameAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1692,7 +1692,7 @@ func (m *metricFlinkTaskRecordCount) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("record", recordAttributeValue) + dp.Attributes().UpsertString("record", recordAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics.go index e32b4c5d20d2..8c3e38e62554 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics.go @@ -108,8 +108,8 @@ func (m *metricSystemCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu", cpuAttributeValue) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("cpu", cpuAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -160,8 +160,8 @@ func (m *metricSystemCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu", cpuAttributeValue) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("cpu", cpuAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go index 99b4270e5e21..59075fecfc76 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go @@ -136,8 +136,8 @@ func (m *metricSystemDiskIo) recordDataPoint(start pcommon.Timestamp, ts pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -190,7 +190,7 @@ func (m *metricSystemDiskIoRead) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -243,7 +243,7 @@ func (m *metricSystemDiskIoWrite) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -296,7 +296,7 @@ func (m *metricSystemDiskIoTime) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -349,8 +349,8 @@ func (m *metricSystemDiskMerged) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -403,7 +403,7 @@ func (m *metricSystemDiskMergedRead) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -456,7 +456,7 @@ func (m *metricSystemDiskMergedWrite) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -509,8 +509,8 @@ func (m *metricSystemDiskOperationTime) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -563,7 +563,7 @@ func (m *metricSystemDiskOperationTimeRead) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -616,7 +616,7 @@ func (m *metricSystemDiskOperationTimeWrite) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -669,8 +669,8 @@ func (m *metricSystemDiskOperations) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -723,7 +723,7 @@ func (m *metricSystemDiskOperationsRead) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -776,7 +776,7 @@ func (m *metricSystemDiskOperationsWrite) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -829,7 +829,7 @@ func (m *metricSystemDiskPendingOperations) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -882,7 +882,7 @@ func (m *metricSystemDiskWeightedIoTime) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics.go index 5fafdf00f153..7f85d8ff3e23 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics.go @@ -92,11 +92,11 @@ func (m *metricSystemFilesystemInodesUsage) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("mode", modeAttributeValue) - dp.Attributes().InsertString("mountpoint", mountpointAttributeValue) - dp.Attributes().InsertString("type", typeAttributeValue) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("mode", modeAttributeValue) + dp.Attributes().UpsertString("mountpoint", mountpointAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -149,11 +149,11 @@ func (m *metricSystemFilesystemUsage) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("mode", modeAttributeValue) - dp.Attributes().InsertString("mountpoint", mountpointAttributeValue) - dp.Attributes().InsertString("type", typeAttributeValue) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("mode", modeAttributeValue) + dp.Attributes().UpsertString("mountpoint", mountpointAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -204,10 +204,10 @@ func (m *metricSystemFilesystemUtilization) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("mode", modeAttributeValue) - dp.Attributes().InsertString("mountpoint", mountpointAttributeValue) - dp.Attributes().InsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("mode", modeAttributeValue) + dp.Attributes().UpsertString("mountpoint", mountpointAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics.go index b72859363688..d5f098dcaa6d 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics.go @@ -104,7 +104,7 @@ func (m *metricSystemMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -155,7 +155,7 @@ func (m *metricSystemMemoryUtilization) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics.go index 4d893eec67f7..0fb46e1541f9 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics.go @@ -158,8 +158,8 @@ func (m *metricSystemNetworkConnections) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("protocol", protocolAttributeValue) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("protocol", protocolAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -314,8 +314,8 @@ func (m *metricSystemNetworkDropped) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -368,7 +368,7 @@ func (m *metricSystemNetworkDroppedReceive) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -421,7 +421,7 @@ func (m *metricSystemNetworkDroppedTransmit) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -474,8 +474,8 @@ func (m *metricSystemNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -528,7 +528,7 @@ func (m *metricSystemNetworkErrorsReceive) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -581,7 +581,7 @@ func (m *metricSystemNetworkErrorsTransmit) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -634,8 +634,8 @@ func (m *metricSystemNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -688,7 +688,7 @@ func (m *metricSystemNetworkIoReceive) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -741,7 +741,7 @@ func (m *metricSystemNetworkIoTransmit) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -794,8 +794,8 @@ func (m *metricSystemNetworkPackets) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -848,7 +848,7 @@ func (m *metricSystemNetworkPacketsReceive) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -901,7 +901,7 @@ func (m *metricSystemNetworkPacketsTransmit) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics.go index 58bd4bb4b52c..416d7ebd23d2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics.go @@ -156,7 +156,7 @@ func (m *metricSystemPagingFaults) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -209,8 +209,8 @@ func (m *metricSystemPagingOperations) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) - dp.Attributes().InsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -263,7 +263,7 @@ func (m *metricSystemPagingOperationsPageIn) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -316,7 +316,7 @@ func (m *metricSystemPagingOperationsPageOut) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -369,8 +369,8 @@ func (m *metricSystemPagingUsage) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -421,8 +421,8 @@ func (m *metricSystemPagingUtilization) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("device", deviceAttributeValue) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("device", deviceAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics.go index 9a72b87c0604..ee0b56929b37 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics.go @@ -128,7 +128,7 @@ func (m *metricSystemProcessesCount) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", statusAttributeValue) + dp.Attributes().UpsertString("status", statusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics.go index 0a1afa395e6c..0acee14b5477 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics.go @@ -134,7 +134,7 @@ func (m *metricProcessCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -187,7 +187,7 @@ func (m *metricProcessDiskIo) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/iisreceiver/internal/metadata/generated_metrics.go b/receiver/iisreceiver/internal/metadata/generated_metrics.go index 6066e15ad729..c8ecaa798027 100644 --- a/receiver/iisreceiver/internal/metadata/generated_metrics.go +++ b/receiver/iisreceiver/internal/metadata/generated_metrics.go @@ -373,7 +373,7 @@ func (m *metricIisNetworkFileCount) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -426,7 +426,7 @@ func (m *metricIisNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -479,7 +479,7 @@ func (m *metricIisRequestCount) recordDataPoint(start pcommon.Timestamp, ts pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("request", requestAttributeValue) + dp.Attributes().UpsertString("request", requestAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index 4ab70a6b6999..89a9bc705322 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -140,9 +140,9 @@ func (m *metricKafkaConsumerGroupLag) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("group", groupAttributeValue) - dp.Attributes().InsertString("topic", topicAttributeValue) - dp.Attributes().InsertInt("partition", partitionAttributeValue) + dp.Attributes().UpsertString("group", groupAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) + dp.Attributes().UpsertInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -193,8 +193,8 @@ func (m *metricKafkaConsumerGroupLagSum) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("group", groupAttributeValue) - dp.Attributes().InsertString("topic", topicAttributeValue) + dp.Attributes().UpsertString("group", groupAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -245,7 +245,7 @@ func (m *metricKafkaConsumerGroupMembers) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("group", groupAttributeValue) + dp.Attributes().UpsertString("group", groupAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -296,9 +296,9 @@ func (m *metricKafkaConsumerGroupOffset) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("group", groupAttributeValue) - dp.Attributes().InsertString("topic", topicAttributeValue) - dp.Attributes().InsertInt("partition", partitionAttributeValue) + dp.Attributes().UpsertString("group", groupAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) + dp.Attributes().UpsertInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -349,8 +349,8 @@ func (m *metricKafkaConsumerGroupOffsetSum) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("group", groupAttributeValue) - dp.Attributes().InsertString("topic", topicAttributeValue) + dp.Attributes().UpsertString("group", groupAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -401,8 +401,8 @@ func (m *metricKafkaPartitionCurrentOffset) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("topic", topicAttributeValue) - dp.Attributes().InsertInt("partition", partitionAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) + dp.Attributes().UpsertInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -453,8 +453,8 @@ func (m *metricKafkaPartitionOldestOffset) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("topic", topicAttributeValue) - dp.Attributes().InsertInt("partition", partitionAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) + dp.Attributes().UpsertInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -505,8 +505,8 @@ func (m *metricKafkaPartitionReplicas) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("topic", topicAttributeValue) - dp.Attributes().InsertInt("partition", partitionAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) + dp.Attributes().UpsertInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -557,8 +557,8 @@ func (m *metricKafkaPartitionReplicasInSync) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("topic", topicAttributeValue) - dp.Attributes().InsertInt("partition", partitionAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) + dp.Attributes().UpsertInt("partition", partitionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -609,7 +609,7 @@ func (m *metricKafkaTopicPartitions) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("topic", topicAttributeValue) + dp.Attributes().UpsertString("topic", topicAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index 32d49ad9579f..e4e2122ba6fe 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -1357,8 +1357,8 @@ func (m *metricK8sNodeNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1411,7 +1411,7 @@ func (m *metricK8sNodeNetworkErrorsReceive) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1464,7 +1464,7 @@ func (m *metricK8sNodeNetworkErrorsTransmit) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1517,8 +1517,8 @@ func (m *metricK8sNodeNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1571,7 +1571,7 @@ func (m *metricK8sNodeNetworkIoReceive) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1624,7 +1624,7 @@ func (m *metricK8sNodeNetworkIoTransmit) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2218,8 +2218,8 @@ func (m *metricK8sPodNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2272,7 +2272,7 @@ func (m *metricK8sPodNetworkErrorsReceive) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2325,7 +2325,7 @@ func (m *metricK8sPodNetworkErrorsTransmit) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2378,8 +2378,8 @@ func (m *metricK8sPodNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2432,7 +2432,7 @@ func (m *metricK8sPodNetworkIoReceive) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2485,7 +2485,7 @@ func (m *metricK8sPodNetworkIoTransmit) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("interface", interfaceAttributeValue) + dp.Attributes().UpsertString("interface", interfaceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/memcachedreceiver/internal/metadata/generated_metrics.go b/receiver/memcachedreceiver/internal/metadata/generated_metrics.go index 69bd9888d0ff..dd455cd16418 100644 --- a/receiver/memcachedreceiver/internal/metadata/generated_metrics.go +++ b/receiver/memcachedreceiver/internal/metadata/generated_metrics.go @@ -292,7 +292,7 @@ func (m *metricMemcachedCommands) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("command", commandAttributeValue) + dp.Attributes().UpsertString("command", commandAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -447,7 +447,7 @@ func (m *metricMemcachedCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -602,7 +602,7 @@ func (m *metricMemcachedNetwork) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -755,7 +755,7 @@ func (m *metricMemcachedOperationHitRatio) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -808,8 +808,8 @@ func (m *metricMemcachedOperations) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", typeAttributeValue) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go index 7ab7c0c248fb..5558b372e32f 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics.go @@ -1019,7 +1019,7 @@ func (m *metricMongodbatlasDbCounts) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("object_type", objectTypeAttributeValue) + dp.Attributes().UpsertString("object_type", objectTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1070,7 +1070,7 @@ func (m *metricMongodbatlasDbSize) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("object_type", objectTypeAttributeValue) + dp.Attributes().UpsertString("object_type", objectTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1121,7 +1121,7 @@ func (m *metricMongodbatlasDiskPartitionIopsAverage) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_direction", diskDirectionAttributeValue) + dp.Attributes().UpsertString("disk_direction", diskDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1172,7 +1172,7 @@ func (m *metricMongodbatlasDiskPartitionIopsMax) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_direction", diskDirectionAttributeValue) + dp.Attributes().UpsertString("disk_direction", diskDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1223,7 +1223,7 @@ func (m *metricMongodbatlasDiskPartitionLatencyAverage) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_direction", diskDirectionAttributeValue) + dp.Attributes().UpsertString("disk_direction", diskDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1274,7 +1274,7 @@ func (m *metricMongodbatlasDiskPartitionLatencyMax) recordDataPoint(start pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_direction", diskDirectionAttributeValue) + dp.Attributes().UpsertString("disk_direction", diskDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1325,7 +1325,7 @@ func (m *metricMongodbatlasDiskPartitionSpaceAverage) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_status", diskStatusAttributeValue) + dp.Attributes().UpsertString("disk_status", diskStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1376,7 +1376,7 @@ func (m *metricMongodbatlasDiskPartitionSpaceMax) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_status", diskStatusAttributeValue) + dp.Attributes().UpsertString("disk_status", diskStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1427,7 +1427,7 @@ func (m *metricMongodbatlasDiskPartitionUsageAverage) recordDataPoint(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_status", diskStatusAttributeValue) + dp.Attributes().UpsertString("disk_status", diskStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1478,7 +1478,7 @@ func (m *metricMongodbatlasDiskPartitionUsageMax) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_status", diskStatusAttributeValue) + dp.Attributes().UpsertString("disk_status", diskStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1529,7 +1529,7 @@ func (m *metricMongodbatlasDiskPartitionUtilizationAverage) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_status", diskStatusAttributeValue) + dp.Attributes().UpsertString("disk_status", diskStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1580,7 +1580,7 @@ func (m *metricMongodbatlasDiskPartitionUtilizationMax) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("disk_status", diskStatusAttributeValue) + dp.Attributes().UpsertString("disk_status", diskStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1631,7 +1631,7 @@ func (m *metricMongodbatlasProcessAsserts) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("assert_type", assertTypeAttributeValue) + dp.Attributes().UpsertString("assert_type", assertTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1731,7 +1731,7 @@ func (m *metricMongodbatlasProcessCacheIo) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cache_direction", cacheDirectionAttributeValue) + dp.Attributes().UpsertString("cache_direction", cacheDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1784,7 +1784,7 @@ func (m *metricMongodbatlasProcessCacheSize) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cache_status", cacheStatusAttributeValue) + dp.Attributes().UpsertString("cache_status", cacheStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1886,7 +1886,7 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) recordDataP dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1937,7 +1937,7 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) recordDataPoint dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1988,7 +1988,7 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) recordDataPoint(start dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2039,7 +2039,7 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageMax) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2090,7 +2090,7 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) recordDataPoint(sta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2141,7 +2141,7 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2192,7 +2192,7 @@ func (m *metricMongodbatlasProcessCPUUsageAverage) recordDataPoint(start pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2243,7 +2243,7 @@ func (m *metricMongodbatlasProcessCPUUsageMax) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2294,7 +2294,7 @@ func (m *metricMongodbatlasProcessCursors) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cursor_state", cursorStateAttributeValue) + dp.Attributes().UpsertString("cursor_state", cursorStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2345,7 +2345,7 @@ func (m *metricMongodbatlasProcessDbDocumentRate) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("document_status", documentStatusAttributeValue) + dp.Attributes().UpsertString("document_status", documentStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2396,8 +2396,8 @@ func (m *metricMongodbatlasProcessDbOperationsRate) recordDataPoint(start pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) - dp.Attributes().InsertString("cluster_role", clusterRoleAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("cluster_role", clusterRoleAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2450,7 +2450,7 @@ func (m *metricMongodbatlasProcessDbOperationsTime) recordDataPoint(start pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("execution_type", executionTypeAttributeValue) + dp.Attributes().UpsertString("execution_type", executionTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2501,7 +2501,7 @@ func (m *metricMongodbatlasProcessDbQueryExecutorScanned) recordDataPoint(start dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("scanned_type", scannedTypeAttributeValue) + dp.Attributes().UpsertString("scanned_type", scannedTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2552,7 +2552,7 @@ func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) recordData dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("scanned_type", scannedTypeAttributeValue) + dp.Attributes().UpsertString("scanned_type", scannedTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2603,7 +2603,7 @@ func (m *metricMongodbatlasProcessDbStorage) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("storage_status", storageStatusAttributeValue) + dp.Attributes().UpsertString("storage_status", storageStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2654,7 +2654,7 @@ func (m *metricMongodbatlasProcessFtsCPUUsage) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2705,7 +2705,7 @@ func (m *metricMongodbatlasProcessGlobalLock) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("global_lock_state", globalLockStateAttributeValue) + dp.Attributes().UpsertString("global_lock_state", globalLockStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2805,7 +2805,7 @@ func (m *metricMongodbatlasProcessIndexCounters) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("btree_counter_type", btreeCounterTypeAttributeValue) + dp.Attributes().UpsertString("btree_counter_type", btreeCounterTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3003,7 +3003,7 @@ func (m *metricMongodbatlasProcessMemoryUsage) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("memory_state", memoryStateAttributeValue) + dp.Attributes().UpsertString("memory_state", memoryStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3054,7 +3054,7 @@ func (m *metricMongodbatlasProcessNetworkIo) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3205,7 +3205,7 @@ func (m *metricMongodbatlasProcessOplogTime) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("oplog_type", oplogTypeAttributeValue) + dp.Attributes().UpsertString("oplog_type", oplogTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3256,7 +3256,7 @@ func (m *metricMongodbatlasProcessPageFaults) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("memory_issue_type", memoryIssueTypeAttributeValue) + dp.Attributes().UpsertString("memory_issue_type", memoryIssueTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3356,7 +3356,7 @@ func (m *metricMongodbatlasProcessTickets) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("ticket_type", ticketTypeAttributeValue) + dp.Attributes().UpsertString("ticket_type", ticketTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3407,7 +3407,7 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3458,7 +3458,7 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3509,7 +3509,7 @@ func (m *metricMongodbatlasSystemCPUUsageAverage) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3560,7 +3560,7 @@ func (m *metricMongodbatlasSystemCPUUsageMax) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3611,7 +3611,7 @@ func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3662,7 +3662,7 @@ func (m *metricMongodbatlasSystemFtsCPUUsage) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("cpu_state", cpuStateAttributeValue) + dp.Attributes().UpsertString("cpu_state", cpuStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3764,7 +3764,7 @@ func (m *metricMongodbatlasSystemFtsMemoryUsage) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("memory_state", memoryStateAttributeValue) + dp.Attributes().UpsertString("memory_state", memoryStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3815,7 +3815,7 @@ func (m *metricMongodbatlasSystemMemoryUsageAverage) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("memory_status", memoryStatusAttributeValue) + dp.Attributes().UpsertString("memory_status", memoryStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3866,7 +3866,7 @@ func (m *metricMongodbatlasSystemMemoryUsageMax) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("memory_status", memoryStatusAttributeValue) + dp.Attributes().UpsertString("memory_status", memoryStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3917,7 +3917,7 @@ func (m *metricMongodbatlasSystemNetworkIoAverage) recordDataPoint(start pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3968,7 +3968,7 @@ func (m *metricMongodbatlasSystemNetworkIoMax) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -4019,7 +4019,7 @@ func (m *metricMongodbatlasSystemPagingIoAverage) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -4070,7 +4070,7 @@ func (m *metricMongodbatlasSystemPagingIoMax) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -4121,7 +4121,7 @@ func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("memory_state", memoryStateAttributeValue) + dp.Attributes().UpsertString("memory_state", memoryStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -4172,7 +4172,7 @@ func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("memory_state", memoryStateAttributeValue) + dp.Attributes().UpsertString("memory_state", memoryStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 7566f04d75bc..ae4117142df9 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -261,7 +261,7 @@ func (m *metricMongodbCacheOperations) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", typeAttributeValue) + dp.Attributes().UpsertString("type", typeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -314,7 +314,7 @@ func (m *metricMongodbCollectionCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -367,8 +367,8 @@ func (m *metricMongodbConnectionCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) - dp.Attributes().InsertString("type", connectionTypeAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("type", connectionTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -523,7 +523,7 @@ func (m *metricMongodbDataSize) recordDataPoint(start pcommon.Timestamp, ts pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -627,8 +627,8 @@ func (m *metricMongodbDocumentOperationCount) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -681,7 +681,7 @@ func (m *metricMongodbExtentCount) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -785,8 +785,8 @@ func (m *metricMongodbIndexAccessCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) - dp.Attributes().InsertString("collection", collectionAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("collection", collectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -839,7 +839,7 @@ func (m *metricMongodbIndexCount) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -892,7 +892,7 @@ func (m *metricMongodbIndexSize) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -945,8 +945,8 @@ func (m *metricMongodbMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) - dp.Attributes().InsertString("type", memoryTypeAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("type", memoryTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1152,7 +1152,7 @@ func (m *metricMongodbObjectCount) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1205,7 +1205,7 @@ func (m *metricMongodbOperationCount) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1258,7 +1258,7 @@ func (m *metricMongodbOperationTime) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1362,7 +1362,7 @@ func (m *metricMongodbStorageSize) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/mysqlreceiver/internal/metadata/generated_metrics.go b/receiver/mysqlreceiver/internal/metadata/generated_metrics.go index afdd072888eb..befd79a427df 100644 --- a/receiver/mysqlreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mysqlreceiver/internal/metadata/generated_metrics.go @@ -623,7 +623,7 @@ func (m *metricMysqlBufferPoolDataPages) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", bufferPoolDataAttributeValue) + dp.Attributes().UpsertString("status", bufferPoolDataAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -727,7 +727,7 @@ func (m *metricMysqlBufferPoolOperations) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", bufferPoolOperationsAttributeValue) + dp.Attributes().UpsertString("operation", bufferPoolOperationsAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -831,7 +831,7 @@ func (m *metricMysqlBufferPoolPages) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("kind", bufferPoolPagesAttributeValue) + dp.Attributes().UpsertString("kind", bufferPoolPagesAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -884,7 +884,7 @@ func (m *metricMysqlBufferPoolUsage) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", bufferPoolDataAttributeValue) + dp.Attributes().UpsertString("status", bufferPoolDataAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -937,7 +937,7 @@ func (m *metricMysqlCommands) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("command", commandAttributeValue) + dp.Attributes().UpsertString("command", commandAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -990,7 +990,7 @@ func (m *metricMysqlDoubleWrites) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("kind", doubleWritesAttributeValue) + dp.Attributes().UpsertString("kind", doubleWritesAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1043,7 +1043,7 @@ func (m *metricMysqlHandlers) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("kind", handlerAttributeValue) + dp.Attributes().UpsertString("kind", handlerAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1096,7 +1096,7 @@ func (m *metricMysqlLocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("kind", locksAttributeValue) + dp.Attributes().UpsertString("kind", locksAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1149,7 +1149,7 @@ func (m *metricMysqlLogOperations) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", logOperationsAttributeValue) + dp.Attributes().UpsertString("operation", logOperationsAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1202,7 +1202,7 @@ func (m *metricMysqlOperations) recordDataPoint(start pcommon.Timestamp, ts pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", operationsAttributeValue) + dp.Attributes().UpsertString("operation", operationsAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1255,7 +1255,7 @@ func (m *metricMysqlPageOperations) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", pageOperationsAttributeValue) + dp.Attributes().UpsertString("operation", pageOperationsAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1308,7 +1308,7 @@ func (m *metricMysqlRowLocks) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("kind", rowLocksAttributeValue) + dp.Attributes().UpsertString("kind", rowLocksAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1361,7 +1361,7 @@ func (m *metricMysqlRowOperations) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", rowOperationsAttributeValue) + dp.Attributes().UpsertString("operation", rowOperationsAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1414,7 +1414,7 @@ func (m *metricMysqlSorts) recordDataPoint(start pcommon.Timestamp, ts pcommon.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("kind", sortsAttributeValue) + dp.Attributes().UpsertString("kind", sortsAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1467,7 +1467,7 @@ func (m *metricMysqlThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("kind", threadsAttributeValue) + dp.Attributes().UpsertString("kind", threadsAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics.go b/receiver/nginxreceiver/internal/metadata/generated_metrics.go index 16f57ac3a036..99d20b8b1138 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics.go @@ -148,7 +148,7 @@ func (m *metricNginxConnectionsCurrent) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/nsxtreceiver/internal/metadata/generated_metrics.go b/receiver/nsxtreceiver/internal/metadata/generated_metrics.go index 5c3703d30ca2..a1e906ffc822 100644 --- a/receiver/nsxtreceiver/internal/metadata/generated_metrics.go +++ b/receiver/nsxtreceiver/internal/metadata/generated_metrics.go @@ -183,7 +183,7 @@ func (m *metricNsxtNodeCPUUtilization) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("class", classAttributeValue) + dp.Attributes().UpsertString("class", classAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -236,7 +236,7 @@ func (m *metricNsxtNodeFilesystemUsage) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", diskStateAttributeValue) + dp.Attributes().UpsertString("state", diskStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -440,7 +440,7 @@ func (m *metricNsxtNodeNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -493,8 +493,8 @@ func (m *metricNsxtNodeNetworkPacketCount) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) - dp.Attributes().InsertString("type", packetTypeAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("type", packetTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/postgresqlreceiver/internal/metadata/custom.go b/receiver/postgresqlreceiver/internal/metadata/custom.go index 423fdf536621..bdac6869c009 100644 --- a/receiver/postgresqlreceiver/internal/metadata/custom.go +++ b/receiver/postgresqlreceiver/internal/metadata/custom.go @@ -59,7 +59,7 @@ func (m *metricPostgresqlBlocksRead) recordDatapointWithoutDatabaseAndTable(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("source", sourceAttributeValue) + dp.Attributes().UpsertString("source", sourceAttributeValue) } // RecordPostgresqlCommitsDataPointWithoutDatabase adds a data point to postgresql.commits metric without the database metric attribute @@ -105,7 +105,7 @@ func (m *metricPostgresqlRows) recordDatapointWithoutDatabaseAndTable(start pcom dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // RecordPostgresqlOperationsDataPointWithoutDatabaseAndTable adds a data point to postgresql.operations metric without the database or table metric attribute @@ -121,5 +121,5 @@ func (m *metricPostgresqlOperations) recordDatapointWithoutDatabaseAndTable(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go index 392902f85592..b241246816e0 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go @@ -363,7 +363,7 @@ func (m *metricPostgresqlBackends) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -467,7 +467,7 @@ func (m *metricPostgresqlBgwriterBuffersWrites) recordDataPoint(start pcommon.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("source", bgBufferSourceAttributeValue) + dp.Attributes().UpsertString("source", bgBufferSourceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -520,7 +520,7 @@ func (m *metricPostgresqlBgwriterCheckpointCount) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", bgCheckpointTypeAttributeValue) + dp.Attributes().UpsertString("type", bgCheckpointTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -573,7 +573,7 @@ func (m *metricPostgresqlBgwriterDuration) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", bgDurationTypeAttributeValue) + dp.Attributes().UpsertString("type", bgDurationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -677,9 +677,9 @@ func (m *metricPostgresqlBlocksRead) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) - dp.Attributes().InsertString("table", tableAttributeValue) - dp.Attributes().InsertString("source", sourceAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("table", tableAttributeValue) + dp.Attributes().UpsertString("source", sourceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -732,7 +732,7 @@ func (m *metricPostgresqlCommits) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -885,7 +885,7 @@ func (m *metricPostgresqlDbSize) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1038,9 +1038,9 @@ func (m *metricPostgresqlOperations) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) - dp.Attributes().InsertString("table", tableAttributeValue) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("table", tableAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1091,7 +1091,7 @@ func (m *metricPostgresqlReplicationDataDelay) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("replication_client", replicationClientAttributeValue) + dp.Attributes().UpsertString("replication_client", replicationClientAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1144,7 +1144,7 @@ func (m *metricPostgresqlRollbacks) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1197,9 +1197,9 @@ func (m *metricPostgresqlRows) recordDataPoint(start pcommon.Timestamp, ts pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("database", databaseAttributeValue) - dp.Attributes().InsertString("table", tableAttributeValue) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("table", tableAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1452,8 +1452,8 @@ func (m *metricPostgresqlWalLag) recordDataPoint(start pcommon.Timestamp, ts pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", walOperationLagAttributeValue) - dp.Attributes().InsertString("replication_client", replicationClientAttributeValue) + dp.Attributes().UpsertString("operation", walOperationLagAttributeValue) + dp.Attributes().UpsertString("replication_client", replicationClientAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/rabbitmqreceiver/internal/metadata/generated_metrics.go b/receiver/rabbitmqreceiver/internal/metadata/generated_metrics.go index bdac75fc19e1..892125e8a56c 100644 --- a/receiver/rabbitmqreceiver/internal/metadata/generated_metrics.go +++ b/receiver/rabbitmqreceiver/internal/metadata/generated_metrics.go @@ -201,7 +201,7 @@ func (m *metricRabbitmqMessageCurrent) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", messageStateAttributeValue) + dp.Attributes().UpsertString("state", messageStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics.go b/receiver/redisreceiver/internal/metadata/generated_metrics.go index 836c11db90f0..9b49bffe70cb 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics.go @@ -407,7 +407,7 @@ func (m *metricRedisCmdCalls) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("cmd", cmdAttributeValue) + dp.Attributes().UpsertString("cmd", cmdAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -460,7 +460,7 @@ func (m *metricRedisCmdUsec) recordDataPoint(start pcommon.Timestamp, ts pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("cmd", cmdAttributeValue) + dp.Attributes().UpsertString("cmd", cmdAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -715,7 +715,7 @@ func (m *metricRedisCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -766,7 +766,7 @@ func (m *metricRedisDbAvgTTL) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("db", dbAttributeValue) + dp.Attributes().UpsertString("db", dbAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -817,7 +817,7 @@ func (m *metricRedisDbExpires) recordDataPoint(start pcommon.Timestamp, ts pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("db", dbAttributeValue) + dp.Attributes().UpsertString("db", dbAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -868,7 +868,7 @@ func (m *metricRedisDbKeys) recordDataPoint(start pcommon.Timestamp, ts pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("db", dbAttributeValue) + dp.Attributes().UpsertString("db", dbAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1719,7 +1719,7 @@ func (m *metricRedisRole) recordDataPoint(start pcommon.Timestamp, ts pcommon.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("role", roleAttributeValue) + dp.Attributes().UpsertString("role", roleAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/riakreceiver/internal/metadata/generated_metrics.go b/receiver/riakreceiver/internal/metadata/generated_metrics.go index 830a1aaff0a8..cfb0d7e4fa45 100644 --- a/receiver/riakreceiver/internal/metadata/generated_metrics.go +++ b/receiver/riakreceiver/internal/metadata/generated_metrics.go @@ -180,7 +180,7 @@ func (m *metricRiakNodeOperationCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("request", requestAttributeValue) + dp.Attributes().UpsertString("request", requestAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -231,7 +231,7 @@ func (m *metricRiakNodeOperationTimeMean) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("request", requestAttributeValue) + dp.Attributes().UpsertString("request", requestAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -335,7 +335,7 @@ func (m *metricRiakVnodeIndexOperationCount) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("operation", operationAttributeValue) + dp.Attributes().UpsertString("operation", operationAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -388,7 +388,7 @@ func (m *metricRiakVnodeOperationCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("request", requestAttributeValue) + dp.Attributes().UpsertString("request", requestAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/saphanareceiver/internal/metadata/generated_metrics.go b/receiver/saphanareceiver/internal/metadata/generated_metrics.go index aa56efc0f332..b3dc1d4ccdba 100644 --- a/receiver/saphanareceiver/internal/metadata/generated_metrics.go +++ b/receiver/saphanareceiver/internal/metadata/generated_metrics.go @@ -743,7 +743,7 @@ func (m *metricSaphanaAlertCount) recordDataPoint(start pcommon.Timestamp, ts pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("rating", alertRatingAttributeValue) + dp.Attributes().UpsertString("rating", alertRatingAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -845,8 +845,8 @@ func (m *metricSaphanaColumnMemoryUsed) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", columnMemoryTypeAttributeValue) - dp.Attributes().InsertString("subtype", columnMemorySubtypeAttributeValue) + dp.Attributes().UpsertString("type", columnMemoryTypeAttributeValue) + dp.Attributes().UpsertString("subtype", columnMemorySubtypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -899,7 +899,7 @@ func (m *metricSaphanaComponentMemoryUsed) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("component", componentAttributeValue) + dp.Attributes().UpsertString("component", componentAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -952,7 +952,7 @@ func (m *metricSaphanaConnectionCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", connectionStatusAttributeValue) + dp.Attributes().UpsertString("status", connectionStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1005,7 +1005,7 @@ func (m *metricSaphanaCPUUsed) recordDataPoint(start pcommon.Timestamp, ts pcomm dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", cpuTypeAttributeValue) + dp.Attributes().UpsertString("type", cpuTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1058,9 +1058,9 @@ func (m *metricSaphanaDiskSizeCurrent) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("path", pathAttributeValue) - dp.Attributes().InsertString("usage_type", diskUsageTypeAttributeValue) - dp.Attributes().InsertString("state", diskStateUsedFreeAttributeValue) + dp.Attributes().UpsertString("path", pathAttributeValue) + dp.Attributes().UpsertString("usage_type", diskUsageTypeAttributeValue) + dp.Attributes().UpsertString("state", diskStateUsedFreeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1113,7 +1113,7 @@ func (m *metricSaphanaHostMemoryCurrent) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", memoryStateUsedFreeAttributeValue) + dp.Attributes().UpsertString("state", memoryStateUsedFreeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1166,7 +1166,7 @@ func (m *metricSaphanaHostSwapCurrent) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", hostSwapStateAttributeValue) + dp.Attributes().UpsertString("state", hostSwapStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1270,7 +1270,7 @@ func (m *metricSaphanaInstanceMemoryCurrent) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", memoryStateUsedFreeAttributeValue) + dp.Attributes().UpsertString("state", memoryStateUsedFreeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1423,8 +1423,8 @@ func (m *metricSaphanaLicenseExpirationTime) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("system", systemAttributeValue) - dp.Attributes().InsertString("product", productAttributeValue) + dp.Attributes().UpsertString("system", systemAttributeValue) + dp.Attributes().UpsertString("product", productAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1477,8 +1477,8 @@ func (m *metricSaphanaLicenseLimit) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("system", systemAttributeValue) - dp.Attributes().InsertString("product", productAttributeValue) + dp.Attributes().UpsertString("system", systemAttributeValue) + dp.Attributes().UpsertString("product", productAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1531,8 +1531,8 @@ func (m *metricSaphanaLicensePeak) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("system", systemAttributeValue) - dp.Attributes().InsertString("product", productAttributeValue) + dp.Attributes().UpsertString("system", systemAttributeValue) + dp.Attributes().UpsertString("product", productAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1634,7 +1634,7 @@ func (m *metricSaphanaNetworkRequestCount) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", activePendingRequestStateAttributeValue) + dp.Attributes().UpsertString("state", activePendingRequestStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1687,7 +1687,7 @@ func (m *metricSaphanaNetworkRequestFinishedCount) recordDataPoint(start pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", internalExternalRequestTypeAttributeValue) + dp.Attributes().UpsertString("type", internalExternalRequestTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1738,10 +1738,10 @@ func (m *metricSaphanaReplicationAverageTime) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("primary", primaryHostAttributeValue) - dp.Attributes().InsertString("secondary", secondaryHostAttributeValue) - dp.Attributes().InsertString("port", portAttributeValue) - dp.Attributes().InsertString("mode", replicationModeAttributeValue) + dp.Attributes().UpsertString("primary", primaryHostAttributeValue) + dp.Attributes().UpsertString("secondary", secondaryHostAttributeValue) + dp.Attributes().UpsertString("port", portAttributeValue) + dp.Attributes().UpsertString("mode", replicationModeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1794,10 +1794,10 @@ func (m *metricSaphanaReplicationBacklogSize) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("primary", primaryHostAttributeValue) - dp.Attributes().InsertString("secondary", secondaryHostAttributeValue) - dp.Attributes().InsertString("port", portAttributeValue) - dp.Attributes().InsertString("mode", replicationModeAttributeValue) + dp.Attributes().UpsertString("primary", primaryHostAttributeValue) + dp.Attributes().UpsertString("secondary", secondaryHostAttributeValue) + dp.Attributes().UpsertString("port", portAttributeValue) + dp.Attributes().UpsertString("mode", replicationModeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1850,10 +1850,10 @@ func (m *metricSaphanaReplicationBacklogTime) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("primary", primaryHostAttributeValue) - dp.Attributes().InsertString("secondary", secondaryHostAttributeValue) - dp.Attributes().InsertString("port", portAttributeValue) - dp.Attributes().InsertString("mode", replicationModeAttributeValue) + dp.Attributes().UpsertString("primary", primaryHostAttributeValue) + dp.Attributes().UpsertString("secondary", secondaryHostAttributeValue) + dp.Attributes().UpsertString("port", portAttributeValue) + dp.Attributes().UpsertString("mode", replicationModeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1906,7 +1906,7 @@ func (m *metricSaphanaRowStoreMemoryUsed) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", rowMemoryTypeAttributeValue) + dp.Attributes().UpsertString("type", rowMemoryTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1959,8 +1959,8 @@ func (m *metricSaphanaSchemaMemoryUsedCurrent) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("schema", schemaAttributeValue) - dp.Attributes().InsertString("type", schemaMemoryTypeAttributeValue) + dp.Attributes().UpsertString("schema", schemaAttributeValue) + dp.Attributes().UpsertString("type", schemaMemoryTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2013,7 +2013,7 @@ func (m *metricSaphanaSchemaMemoryUsedMax) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("schema", schemaAttributeValue) + dp.Attributes().UpsertString("schema", schemaAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2066,8 +2066,8 @@ func (m *metricSaphanaSchemaOperationCount) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("schema", schemaAttributeValue) - dp.Attributes().InsertString("type", schemaOperationTypeAttributeValue) + dp.Attributes().UpsertString("schema", schemaAttributeValue) + dp.Attributes().UpsertString("type", schemaOperationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2120,7 +2120,7 @@ func (m *metricSaphanaSchemaRecordCompressedCount) recordDataPoint(start pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("schema", schemaAttributeValue) + dp.Attributes().UpsertString("schema", schemaAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2173,8 +2173,8 @@ func (m *metricSaphanaSchemaRecordCount) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("schema", schemaAttributeValue) - dp.Attributes().InsertString("type", schemaRecordTypeAttributeValue) + dp.Attributes().UpsertString("schema", schemaAttributeValue) + dp.Attributes().UpsertString("type", schemaRecordTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2227,7 +2227,7 @@ func (m *metricSaphanaServiceCodeSize) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2280,7 +2280,7 @@ func (m *metricSaphanaServiceCount) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", serviceStatusAttributeValue) + dp.Attributes().UpsertString("status", serviceStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2333,7 +2333,7 @@ func (m *metricSaphanaServiceMemoryCompactorsAllocated) recordDataPoint(start pc dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2386,7 +2386,7 @@ func (m *metricSaphanaServiceMemoryCompactorsFreeable) recordDataPoint(start pco dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2439,7 +2439,7 @@ func (m *metricSaphanaServiceMemoryEffectiveLimit) recordDataPoint(start pcommon dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2492,8 +2492,8 @@ func (m *metricSaphanaServiceMemoryHeapCurrent) recordDataPoint(start pcommon.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) - dp.Attributes().InsertString("state", memoryStateUsedFreeAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("state", memoryStateUsedFreeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2546,7 +2546,7 @@ func (m *metricSaphanaServiceMemoryLimit) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2599,8 +2599,8 @@ func (m *metricSaphanaServiceMemorySharedCurrent) recordDataPoint(start pcommon. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) - dp.Attributes().InsertString("state", memoryStateUsedFreeAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("state", memoryStateUsedFreeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2653,8 +2653,8 @@ func (m *metricSaphanaServiceMemoryUsed) recordDataPoint(start pcommon.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) - dp.Attributes().InsertString("type", serviceMemoryUsedTypeAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("type", serviceMemoryUsedTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2707,7 +2707,7 @@ func (m *metricSaphanaServiceStackSize) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("service", serviceAttributeValue) + dp.Attributes().UpsertString("service", serviceAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2760,7 +2760,7 @@ func (m *metricSaphanaServiceThreadCount) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("status", threadStatusAttributeValue) + dp.Attributes().UpsertString("status", threadStatusAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2864,7 +2864,7 @@ func (m *metricSaphanaTransactionCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("type", transactionTypeAttributeValue) + dp.Attributes().UpsertString("type", transactionTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2917,8 +2917,8 @@ func (m *metricSaphanaUptime) recordDataPoint(start pcommon.Timestamp, ts pcommo dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("system", systemAttributeValue) - dp.Attributes().InsertString("database", databaseAttributeValue) + dp.Attributes().UpsertString("system", systemAttributeValue) + dp.Attributes().UpsertString("database", databaseAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2971,9 +2971,9 @@ func (m *metricSaphanaVolumeOperationCount) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("path", pathAttributeValue) - dp.Attributes().InsertString("usage_type", diskUsageTypeAttributeValue) - dp.Attributes().InsertString("type", volumeOperationTypeAttributeValue) + dp.Attributes().UpsertString("path", pathAttributeValue) + dp.Attributes().UpsertString("usage_type", diskUsageTypeAttributeValue) + dp.Attributes().UpsertString("type", volumeOperationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3026,9 +3026,9 @@ func (m *metricSaphanaVolumeOperationSize) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("path", pathAttributeValue) - dp.Attributes().InsertString("usage_type", diskUsageTypeAttributeValue) - dp.Attributes().InsertString("type", volumeOperationTypeAttributeValue) + dp.Attributes().UpsertString("path", pathAttributeValue) + dp.Attributes().UpsertString("usage_type", diskUsageTypeAttributeValue) + dp.Attributes().UpsertString("type", volumeOperationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3081,9 +3081,9 @@ func (m *metricSaphanaVolumeOperationTime) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("path", pathAttributeValue) - dp.Attributes().InsertString("usage_type", diskUsageTypeAttributeValue) - dp.Attributes().InsertString("type", volumeOperationTypeAttributeValue) + dp.Attributes().UpsertString("path", pathAttributeValue) + dp.Attributes().UpsertString("usage_type", diskUsageTypeAttributeValue) + dp.Attributes().UpsertString("type", volumeOperationTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/sapmreceiver/trace_receiver_test.go b/receiver/sapmreceiver/trace_receiver_test.go index aa2da78b6e2b..20e5ede72487 100644 --- a/receiver/sapmreceiver/trace_receiver_test.go +++ b/receiver/sapmreceiver/trace_receiver_test.go @@ -50,10 +50,10 @@ func expectedTraceData(t1, t2, t3 time.Time) ptrace.Traces { traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() - rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, "issaTest") - rs.Resource().Attributes().InsertBool("bool", true) - rs.Resource().Attributes().InsertString("string", "yes") - rs.Resource().Attributes().InsertInt("int64", 10000000) + rs.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "issaTest") + rs.Resource().Attributes().UpsertBool("bool", true) + rs.Resource().Attributes().UpsertString("string", "yes") + rs.Resource().Attributes().UpsertInt("int64", 10000000) spans := rs.ScopeSpans().AppendEmpty().Spans() span0 := spans.AppendEmpty() diff --git a/receiver/signalfxreceiver/signalfxv2_event_to_logdata.go b/receiver/signalfxreceiver/signalfxv2_event_to_logdata.go index 71fba6e94f08..b64e1b30eeee 100644 --- a/receiver/signalfxreceiver/signalfxv2_event_to_logdata.go +++ b/receiver/signalfxreceiver/signalfxv2_event_to_logdata.go @@ -35,55 +35,51 @@ func signalFxV2EventsToLogRecords(events []*sfxpb.Event, lrs plog.LogRecordSlice attrs.Clear() attrs.EnsureCapacity(2 + len(event.Dimensions) + len(event.Properties)) + for _, dim := range event.Dimensions { + attrs.UpsertString(dim.Key, dim.Value) + } + // The EventType field is stored as an attribute. eventType := event.EventType if eventType == "" { eventType = "unknown" } - attrs.InsertString(splunk.SFxEventType, eventType) + attrs.UpsertString(splunk.SFxEventType, eventType) // SignalFx timestamps are in millis so convert to nanos by multiplying // by 1 million. lr.SetTimestamp(pcommon.Timestamp(event.Timestamp * 1e6)) if event.Category != nil { - attrs.InsertInt(splunk.SFxEventCategoryKey, int64(*event.Category)) + attrs.UpsertInt(splunk.SFxEventCategoryKey, int64(*event.Category)) } else { // This gives us an unambiguous way of determining that a log record // represents a SignalFx event, even if category is missing from the // event. - attrs.Insert(splunk.SFxEventCategoryKey, pcommon.NewValueEmpty()) - } - - for _, dim := range event.Dimensions { - attrs.InsertString(dim.Key, dim.Value) + attrs.UpsertEmpty(splunk.SFxEventCategoryKey) } if len(event.Properties) > 0 { - propMapVal := pcommon.NewValueMap() - propMap := propMapVal.MapVal() + propMap := attrs.UpsertEmptyMap(splunk.SFxEventPropertiesKey) propMap.EnsureCapacity(len(event.Properties)) - for _, prop := range event.Properties { // No way to tell what value type is without testing each // individually. switch { case prop.Value.StrValue != nil: - propMap.InsertString(prop.Key, prop.Value.GetStrValue()) + propMap.UpsertString(prop.Key, prop.Value.GetStrValue()) case prop.Value.IntValue != nil: - propMap.InsertInt(prop.Key, prop.Value.GetIntValue()) + propMap.UpsertInt(prop.Key, prop.Value.GetIntValue()) case prop.Value.DoubleValue != nil: - propMap.InsertDouble(prop.Key, prop.Value.GetDoubleValue()) + propMap.UpsertDouble(prop.Key, prop.Value.GetDoubleValue()) case prop.Value.BoolValue != nil: - propMap.InsertBool(prop.Key, prop.Value.GetBoolValue()) + propMap.UpsertBool(prop.Key, prop.Value.GetBoolValue()) default: // If there is no property value, just insert a null to // record that the key was present. - propMap.Insert(prop.Key, pcommon.NewValueEmpty()) + propMap.UpsertEmpty(prop.Key) } } - - attrs.Insert(splunk.SFxEventPropertiesKey, propMapVal) } } } diff --git a/receiver/sqlserverreceiver/internal/metadata/generated_metrics.go b/receiver/sqlserverreceiver/internal/metadata/generated_metrics.go index 46a4a07dc8da..25cd3c1dd903 100644 --- a/receiver/sqlserverreceiver/internal/metadata/generated_metrics.go +++ b/receiver/sqlserverreceiver/internal/metadata/generated_metrics.go @@ -594,7 +594,7 @@ func (m *metricSqlserverPageOperationRate) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().InsertString("type", pageOperationsAttributeValue) + dp.Attributes().UpsertString("type", pageOperationsAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/statsdreceiver/protocol/metric_translator.go b/receiver/statsdreceiver/protocol/metric_translator.go index 8f5e6b06f1d3..6312a86c71a0 100644 --- a/receiver/statsdreceiver/protocol/metric_translator.go +++ b/receiver/statsdreceiver/protocol/metric_translator.go @@ -44,7 +44,7 @@ func buildCounterMetric(parsedMetric statsDMetric, isMonotonicCounter bool, time dp.SetStartTimestamp(pcommon.NewTimestampFromTime(lastIntervalTime)) dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for i := parsedMetric.description.attrs.Iter(); i.Next(); { - dp.Attributes().InsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) + dp.Attributes().UpsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) } return ilm @@ -62,7 +62,7 @@ func buildGaugeMetric(parsedMetric statsDMetric, timeNow time.Time) pmetric.Scop dp.SetDoubleVal(parsedMetric.gaugeValue()) dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for i := parsedMetric.description.attrs.Iter(); i.Next(); { - dp.Attributes().InsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) + dp.Attributes().UpsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) } return ilm diff --git a/receiver/statsdreceiver/protocol/metric_translator_test.go b/receiver/statsdreceiver/protocol/metric_translator_test.go index fce1297e0f20..bb2e84b6249d 100644 --- a/receiver/statsdreceiver/protocol/metric_translator_test.go +++ b/receiver/statsdreceiver/protocol/metric_translator_test.go @@ -50,7 +50,7 @@ func TestBuildCounterMetric(t *testing.T) { dp.SetIntVal(32) dp.SetStartTimestamp(pcommon.NewTimestampFromTime(lastUpdateInterval)) dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) - dp.Attributes().InsertString("mykey", "myvalue") + dp.Attributes().UpsertString("mykey", "myvalue") assert.Equal(t, metric, expectedMetrics) } @@ -77,8 +77,8 @@ func TestBuildGaugeMetric(t *testing.T) { dp := expectedMetric.Gauge().DataPoints().AppendEmpty() dp.SetDoubleVal(32.3) dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) - dp.Attributes().InsertString("mykey", "myvalue") - dp.Attributes().InsertString("mykey2", "myvalue2") + dp.Attributes().UpsertString("mykey", "myvalue") + dp.Attributes().UpsertString("mykey2", "myvalue2") assert.Equal(t, metric, expectedMetrics) } @@ -114,7 +114,7 @@ func TestBuildSummaryMetricUnsampled(t *testing.T) { dp.SetStartTimestamp(pcommon.NewTimestampFromTime(timeNow.Add(-time.Minute))) dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for _, kv := range desc.attrs.ToSlice() { - dp.Attributes().InsertString(string(kv.Key), kv.Value.AsString()) + dp.Attributes().UpsertString(string(kv.Key), kv.Value.AsString()) } quantile := []float64{0, 10, 50, 90, 95, 100} value := []float64{1, 1, 3, 6, 6, 6} @@ -197,7 +197,7 @@ func TestBuildSummaryMetricSampled(t *testing.T) { dp.SetStartTimestamp(pcommon.NewTimestampFromTime(timeNow.Add(-time.Minute))) dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for _, kv := range desc.attrs.ToSlice() { - dp.Attributes().InsertString(string(kv.Key), kv.Value.AsString()) + dp.Attributes().UpsertString(string(kv.Key), kv.Value.AsString()) } for i := range test.percentiles { eachQuantile := dp.QuantileValues().AppendEmpty() diff --git a/receiver/vcenterreceiver/internal/metadata/generated_metrics.go b/receiver/vcenterreceiver/internal/metadata/generated_metrics.go index fd2ed62b6c0c..94560c5ea70f 100644 --- a/receiver/vcenterreceiver/internal/metadata/generated_metrics.go +++ b/receiver/vcenterreceiver/internal/metadata/generated_metrics.go @@ -507,7 +507,7 @@ func (m *metricVcenterClusterHostCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertBool("effective", hostEffectiveAttributeValue) + dp.Attributes().UpsertBool("effective", hostEffectiveAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -713,7 +713,7 @@ func (m *metricVcenterClusterVMCount) recordDataPoint(start pcommon.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("power_state", vmCountPowerStateAttributeValue) + dp.Attributes().UpsertString("power_state", vmCountPowerStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -766,7 +766,7 @@ func (m *metricVcenterDatastoreDiskUsage) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("disk_state", diskStateAttributeValue) + dp.Attributes().UpsertString("disk_state", diskStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -966,7 +966,7 @@ func (m *metricVcenterHostDiskLatencyAvg) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", diskDirectionAttributeValue) + dp.Attributes().UpsertString("direction", diskDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1166,7 +1166,7 @@ func (m *metricVcenterHostDiskThroughput) recordDataPoint(start pcommon.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", diskDirectionAttributeValue) + dp.Attributes().UpsertString("direction", diskDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1421,7 +1421,7 @@ func (m *metricVcenterHostNetworkPacketCount) recordDataPoint(start pcommon.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", throughputDirectionAttributeValue) + dp.Attributes().UpsertString("direction", throughputDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1576,7 +1576,7 @@ func (m *metricVcenterHostNetworkPacketErrors) recordDataPoint(start pcommon.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", throughputDirectionAttributeValue) + dp.Attributes().UpsertString("direction", throughputDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1731,7 +1731,7 @@ func (m *metricVcenterHostNetworkThroughput) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", throughputDirectionAttributeValue) + dp.Attributes().UpsertString("direction", throughputDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2139,8 +2139,8 @@ func (m *metricVcenterVMDiskLatencyAvg) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", diskDirectionAttributeValue) - dp.Attributes().InsertString("disk_type", diskTypeAttributeValue) + dp.Attributes().UpsertString("direction", diskDirectionAttributeValue) + dp.Attributes().UpsertString("disk_type", diskTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2191,7 +2191,7 @@ func (m *metricVcenterVMDiskLatencyAvgRead) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("disk_type", diskTypeAttributeValue) + dp.Attributes().UpsertString("disk_type", diskTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2242,7 +2242,7 @@ func (m *metricVcenterVMDiskLatencyAvgWrite) recordDataPoint(start pcommon.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("disk_type", diskTypeAttributeValue) + dp.Attributes().UpsertString("disk_type", diskTypeAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2395,7 +2395,7 @@ func (m *metricVcenterVMDiskUsage) recordDataPoint(start pcommon.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("disk_state", diskStateAttributeValue) + dp.Attributes().UpsertString("disk_state", diskStateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2599,7 +2599,7 @@ func (m *metricVcenterVMNetworkPacketCount) recordDataPoint(start pcommon.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", throughputDirectionAttributeValue) + dp.Attributes().UpsertString("direction", throughputDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2754,7 +2754,7 @@ func (m *metricVcenterVMNetworkThroughput) recordDataPoint(start pcommon.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", throughputDirectionAttributeValue) + dp.Attributes().UpsertString("direction", throughputDirectionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/receiver/zookeeperreceiver/internal/metadata/generated_metrics.go b/receiver/zookeeperreceiver/internal/metadata/generated_metrics.go index 4251947e169f..8386cbb8700e 100644 --- a/receiver/zookeeperreceiver/internal/metadata/generated_metrics.go +++ b/receiver/zookeeperreceiver/internal/metadata/generated_metrics.go @@ -422,7 +422,7 @@ func (m *metricZookeeperFollowerCount) recordDataPoint(start pcommon.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("state", stateAttributeValue) + dp.Attributes().UpsertString("state", stateAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -673,7 +673,7 @@ func (m *metricZookeeperPacketCount) recordDataPoint(start pcommon.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().InsertString("direction", directionAttributeValue) + dp.Attributes().UpsertString("direction", directionAttributeValue) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. diff --git a/testbed/testbed/data_providers.go b/testbed/testbed/data_providers.go index 3a66e3727f09..fcba178af40c 100644 --- a/testbed/testbed/data_providers.go +++ b/testbed/testbed/data_providers.go @@ -131,8 +131,8 @@ func (dp *perfTestDataProvider) GenerateMetrics() (pmetric.Metrics, bool) { dataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) value := dp.dataItemsGenerated.Inc() dataPoint.SetIntVal(int64(value)) - dataPoint.Attributes().InsertString("item_index", "item_"+strconv.Itoa(j)) - dataPoint.Attributes().InsertString("batch_index", "batch_"+strconv.Itoa(int(batchIndex))) + dataPoint.Attributes().UpsertString("item_index", "item_"+strconv.Itoa(j)) + dataPoint.Attributes().UpsertString("batch_index", "batch_"+strconv.Itoa(int(batchIndex))) } } return md, false diff --git a/testbed/tests/trace_test.go b/testbed/tests/trace_test.go index 35809ebbf796..3f9e168cf973 100644 --- a/testbed/tests/trace_test.go +++ b/testbed/tests/trace_test.go @@ -345,7 +345,7 @@ func verifySingleSpan( // Send one span. td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() - rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, serviceName) + rs.Resource().Attributes().UpsertString(conventions.AttributeServiceName, serviceName) span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetTraceID(idutils.UInt64ToTraceID(0, 1)) span.SetSpanID(idutils.UInt64ToSpanID(1))