diff --git a/consumer/pdata/metric.go b/consumer/pdata/metric.go index 0cc2d26ecda..8c737e0cf26 100644 --- a/consumer/pdata/metric.go +++ b/consumer/pdata/metric.go @@ -76,12 +76,9 @@ func (mdt MetricDataType) String() string { return "" } -// Type returns the type of the data for this Metric. +// DataType returns the type of the data for this Metric. // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) DataType() MetricDataType { - if *ms.orig == nil || (*ms.orig).Data == nil { - return MetricDataTypeNone - } switch (*ms.orig).Data.(type) { case *otlpmetrics.Metric_IntGauge: return MetricDataTypeIntGauge @@ -99,6 +96,25 @@ func (ms Metric) DataType() MetricDataType { return MetricDataTypeNone } +// SetDataType clears any existing data and initialize it with an empty data of the given type. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) SetDataType(ty MetricDataType) { + switch ty { + case MetricDataTypeIntGauge: + (*ms.orig).Data = &otlpmetrics.Metric_IntGauge{} + case MetricDataTypeDoubleGauge: + (*ms.orig).Data = &otlpmetrics.Metric_DoubleGauge{} + case MetricDataTypeIntSum: + (*ms.orig).Data = &otlpmetrics.Metric_IntSum{} + case MetricDataTypeDoubleSum: + (*ms.orig).Data = &otlpmetrics.Metric_DoubleSum{} + case MetricDataTypeIntHistogram: + (*ms.orig).Data = &otlpmetrics.Metric_IntHistogram{} + case MetricDataTypeDoubleHistogram: + (*ms.orig).Data = &otlpmetrics.Metric_DoubleHistogram{} + } +} + // IntGauge returns the data as IntGauge. This should be called iff DataType() == MetricDataTypeIntGauge. // Calling this function on zero-initialized Metric will cause a panic. func (ms Metric) IntGauge() IntGauge { diff --git a/receiver/hostmetricsreceiver/hostmetrics_receiver.go b/receiver/hostmetricsreceiver/hostmetrics_receiver.go index e7cbf64e8ca..30642432a24 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_receiver.go +++ b/receiver/hostmetricsreceiver/hostmetrics_receiver.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/pdatautil" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/internal/data" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -162,7 +162,7 @@ func (hmr *receiver) scrapeMetrics(ctx context.Context) { defer span.End() var errors []error - metricData := dataold.NewMetricData() + metricData := data.NewMetricData() if err := hmr.scrapeAndAppendHostMetrics(ctx, metricData); err != nil { errors = append(errors, err) @@ -176,13 +176,13 @@ func (hmr *receiver) scrapeMetrics(ctx context.Context) { span.SetStatus(trace.Status{Code: trace.StatusCodeDataLoss, Message: fmt.Sprintf("Error(s) when scraping metrics: %v", componenterror.CombineErrors(errors))}) } - if err := hmr.consumer.ConsumeMetrics(ctx, pdatautil.MetricsFromOldInternalMetrics(metricData)); err != nil { + if err := hmr.consumer.ConsumeMetrics(ctx, pdatautil.MetricsFromInternalMetrics(metricData)); err != nil { span.SetStatus(trace.Status{Code: trace.StatusCodeDataLoss, Message: fmt.Sprintf("Unable to process metrics: %v", err)}) return } } -func (hmr *receiver) scrapeAndAppendHostMetrics(ctx context.Context, metricData dataold.MetricData) error { +func (hmr *receiver) scrapeAndAppendHostMetrics(ctx context.Context, metricData data.MetricData) error { if len(hmr.hostMetricScrapers) == 0 { return nil } @@ -202,7 +202,7 @@ func (hmr *receiver) scrapeAndAppendHostMetrics(ctx context.Context, metricData return componenterror.CombineErrors(errors) } -func (hmr *receiver) scrapeAndAppendResourceMetrics(ctx context.Context, metricData dataold.MetricData) error { +func (hmr *receiver) scrapeAndAppendResourceMetrics(ctx context.Context, metricData data.MetricData) error { if len(hmr.resourceMetricScrapers) == 0 { return nil } diff --git a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go index 1a43adb8268..e58b91ad75d 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go +++ b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go @@ -30,7 +30,6 @@ import ( "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/consumer/pdatautil" "go.opentelemetry.io/collector/exporter/exportertest" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/cpuscraper" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/scraper/diskscraper" @@ -141,7 +140,7 @@ func TestGatherMetrics_EndToEnd(t *testing.T) { } func assertIncludesStandardMetrics(t *testing.T, got pdata.Metrics) { - md := pdatautil.MetricsToOldInternalMetrics(got) + md := pdatautil.MetricsToInternalMetrics(got) // get the first ResourceMetrics object rms := md.ResourceMetrics() @@ -165,7 +164,7 @@ func assertIncludesResourceMetrics(t *testing.T, got pdata.Metrics) { return } - md := pdatautil.MetricsToOldInternalMetrics(got) + md := pdatautil.MetricsToInternalMetrics(got) // get the superset of metrics returned by all resource metrics (excluding the first) returnedMetrics := make(map[string]struct{}) @@ -183,16 +182,16 @@ func assertIncludesResourceMetrics(t *testing.T, got pdata.Metrics) { } } -func getMetricSlice(t *testing.T, rm dataold.ResourceMetrics) dataold.MetricSlice { +func getMetricSlice(t *testing.T, rm pdata.ResourceMetrics) pdata.MetricSlice { ilms := rm.InstrumentationLibraryMetrics() require.Equal(t, 1, ilms.Len()) return ilms.At(0).Metrics() } -func getReturnedMetricNames(metrics dataold.MetricSlice) map[string]struct{} { +func getReturnedMetricNames(metrics pdata.MetricSlice) map[string]struct{} { metricNames := make(map[string]struct{}) for i := 0; i < metrics.Len(); i++ { - metricNames[metrics.At(i).MetricDescriptor().Name()] = struct{}{} + metricNames[metrics.At(i).Name()] = struct{}{} } return metricNames } @@ -212,30 +211,30 @@ type mockFactory struct{ mock.Mock } type mockScraper struct{ mock.Mock } func (m *mockFactory) CreateDefaultConfig() internal.Config { return &mockConfig{} } -func (m *mockFactory) CreateMetricsScraper(ctx context.Context, logger *zap.Logger, cfg internal.Config) (internal.Scraper, error) { +func (m *mockFactory) CreateMetricsScraper(context.Context, *zap.Logger, internal.Config) (internal.Scraper, error) { args := m.MethodCalled("CreateMetricsScraper") return args.Get(0).(internal.Scraper), args.Error(1) } -func (m *mockScraper) Initialize(ctx context.Context) error { return nil } -func (m *mockScraper) Close(ctx context.Context) error { return nil } -func (m *mockScraper) ScrapeMetrics(ctx context.Context) (dataold.MetricSlice, error) { - return dataold.NewMetricSlice(), errors.New("err1") +func (m *mockScraper) Initialize(context.Context) error { return nil } +func (m *mockScraper) Close(context.Context) error { return nil } +func (m *mockScraper) ScrapeMetrics(context.Context) (pdata.MetricSlice, error) { + return pdata.NewMetricSlice(), errors.New("err1") } type mockResourceFactory struct{ mock.Mock } type mockResourceScraper struct{ mock.Mock } func (m *mockResourceFactory) CreateDefaultConfig() internal.Config { return &mockConfig{} } -func (m *mockResourceFactory) CreateMetricsScraper(ctx context.Context, logger *zap.Logger, cfg internal.Config) (internal.ResourceScraper, error) { +func (m *mockResourceFactory) CreateMetricsScraper(context.Context, *zap.Logger, internal.Config) (internal.ResourceScraper, error) { args := m.MethodCalled("CreateMetricsScraper") return args.Get(0).(internal.ResourceScraper), args.Error(1) } -func (m *mockResourceScraper) Initialize(ctx context.Context) error { return nil } -func (m *mockResourceScraper) Close(ctx context.Context) error { return nil } -func (m *mockResourceScraper) ScrapeMetrics(ctx context.Context) (dataold.ResourceMetricsSlice, error) { - return dataold.NewResourceMetricsSlice(), errors.New("err2") +func (m *mockResourceScraper) Initialize(context.Context) error { return nil } +func (m *mockResourceScraper) Close(context.Context) error { return nil } +func (m *mockResourceScraper) ScrapeMetrics(context.Context) (pdata.ResourceMetricsSlice, error) { + return pdata.NewResourceMetricsSlice(), errors.New("err2") } func TestGatherMetrics_ScraperKeyConfigError(t *testing.T) { @@ -301,7 +300,7 @@ func TestGatherMetrics_Error(t *testing.T) { // expect to get one empty resource metrics entry require.Equal(t, 1, len(got)) - rm := pdatautil.MetricsToOldInternalMetrics(got[0]).ResourceMetrics() + rm := pdatautil.MetricsToInternalMetrics(got[0]).ResourceMetrics() require.Equal(t, 1, rm.Len()) ilm := rm.At(0).InstrumentationLibraryMetrics() require.Equal(t, 1, ilm.Len()) diff --git a/receiver/hostmetricsreceiver/internal/scraper.go b/receiver/hostmetricsreceiver/internal/scraper.go index 42fc4e101f9..c7cd62b9936 100644 --- a/receiver/hostmetricsreceiver/internal/scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper.go @@ -19,7 +19,7 @@ import ( "go.uber.org/zap" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // BaseScraper gathers metrics from the host machine. @@ -45,7 +45,7 @@ type Scraper interface { // ScrapeMetrics returns relevant scraped metrics. If errors occur // scraping some metrics, an error should be returned, but any // metrics that were successfully scraped should still be returned. - ScrapeMetrics(ctx context.Context) (dataold.MetricSlice, error) + ScrapeMetrics(ctx context.Context) (pdata.MetricSlice, error) } // ScraperFactory can create a MetricScraper. @@ -66,7 +66,7 @@ type ResourceScraper interface { // If errors occur scraping some metrics, an error should be // returned, but any metrics that were successfully scraped // should still be returned. - ScrapeMetrics(ctx context.Context) (dataold.ResourceMetricsSlice, error) + ScrapeMetrics(ctx context.Context) (pdata.ResourceMetricsSlice, error) } // ResourceScraperFactory can create a ResourceScraper. diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_metadata.go index e9c83786986..003c2a6b50f 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_metadata.go @@ -15,7 +15,7 @@ package cpuscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // labels @@ -40,12 +40,16 @@ const ( // descriptors -var cpuTimeDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.cpu.time") - descriptor.SetDescription("Total CPU seconds broken down by different states.") - descriptor.SetUnit("s") - descriptor.SetType(dataold.MetricTypeMonotonicDouble) - return descriptor +var cpuTimeDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.cpu.time") + metric.SetDescription("Total CPU seconds broken down by different states.") + metric.SetUnit("s") + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + sum := metric.DoubleSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go index 6c74aa7a29b..3e38ade22c5 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go @@ -22,7 +22,6 @@ import ( "github.com/shirou/gopsutil/host" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -58,8 +57,8 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() now := internal.TimeToUnixNano(time.Now()) cpuTimes, err := s.times( /*percpu=*/ true) @@ -72,10 +71,10 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) return metrics, nil } -func initializeCPUTimeMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, cpuTimes []cpu.TimesStat) { - cpuTimeDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeCPUTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, cpuTimes []cpu.TimesStat) { + cpuTimeDescriptor.CopyTo(metric) - ddps := metric.DoubleDataPoints() + ddps := metric.DoubleSum().DataPoints() ddps.Resize(len(cpuTimes) * cpuStatesLen) for i, cpuTime := range cpuTimes { appendCPUTimeStateDataPoints(ddps, i*cpuStatesLen, startTime, now, cpuTime) @@ -84,7 +83,7 @@ func initializeCPUTimeMetric(metric dataold.Metric, startTime, now pdata.Timesta const gopsCPUTotal string = "cpu-total" -func initializeCPUTimeDataPoint(dataPoint dataold.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, cpuLabel string, stateLabel string, value float64) { +func initializeCPUTimeDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, cpuLabel string, stateLabel string, value float64) { labelsMap := dataPoint.LabelsMap() // ignore cpu label if reporting "total" cpu usage if cpuLabel != gopsCPUTotal { diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go index 8ae3591435c..33352128765 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go @@ -20,12 +20,11 @@ import ( "github.com/shirou/gopsutil/cpu" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const cpuStatesLen = 8 -func appendCPUTimeStateDataPoints(ddps dataold.DoubleDataPointSlice, startIdx int, startTime, now pdata.TimestampUnixNano, cpuTime cpu.TimesStat) { +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startIdx int, startTime, now pdata.TimestampUnixNano, cpuTime cpu.TimesStat) { initializeCPUTimeDataPoint(ddps.At(startIdx+0), startTime, now, cpuTime.CPU, userStateLabelValue, cpuTime.User) initializeCPUTimeDataPoint(ddps.At(startIdx+1), startTime, now, cpuTime.CPU, systemStateLabelValue, cpuTime.System) initializeCPUTimeDataPoint(ddps.At(startIdx+2), startTime, now, cpuTime.CPU, idleStateLabelValue, cpuTime.Idle) diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go index bf03555cf5e..804d825dd5b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go @@ -20,12 +20,11 @@ import ( "github.com/shirou/gopsutil/cpu" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const cpuStatesLen = 4 -func appendCPUTimeStateDataPoints(ddps dataold.DoubleDataPointSlice, startIdx int, startTime, now pdata.TimestampUnixNano, cpuTime cpu.TimesStat) { +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startIdx int, startTime, now pdata.TimestampUnixNano, cpuTime cpu.TimesStat) { initializeCPUTimeDataPoint(ddps.At(startIdx+0), startTime, now, cpuTime.CPU, userStateLabelValue, cpuTime.User) initializeCPUTimeDataPoint(ddps.At(startIdx+1), startTime, now, cpuTime.CPU, systemStateLabelValue, cpuTime.System) initializeCPUTimeDataPoint(ddps.At(startIdx+2), startTime, now, cpuTime.CPU, idleStateLabelValue, cpuTime.Idle) diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go index d25e8c22755..e6ac4a6878f 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go @@ -25,7 +25,6 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -98,22 +97,22 @@ func TestScrapeMetrics(t *testing.T) { } } -func assertCPUMetricValid(t *testing.T, metric dataold.Metric, descriptor dataold.MetricDescriptor, startTime pdata.TimestampUnixNano) { - internal.AssertDescriptorEqual(t, descriptor, metric.MetricDescriptor()) +func assertCPUMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, descriptor, metric) if startTime != 0 { - internal.AssertDoubleMetricStartTimeEquals(t, metric, startTime) + internal.AssertDoubleSumMetricStartTimeEquals(t, metric, startTime) } - assert.GreaterOrEqual(t, metric.DoubleDataPoints().Len(), 4*runtime.NumCPU()) - internal.AssertDoubleMetricLabelExists(t, metric, 0, cpuLabelName) - internal.AssertDoubleMetricLabelHasValue(t, metric, 0, stateLabelName, userStateLabelValue) - internal.AssertDoubleMetricLabelHasValue(t, metric, 1, stateLabelName, systemStateLabelValue) - internal.AssertDoubleMetricLabelHasValue(t, metric, 2, stateLabelName, idleStateLabelValue) - internal.AssertDoubleMetricLabelHasValue(t, metric, 3, stateLabelName, interruptStateLabelValue) + assert.GreaterOrEqual(t, metric.DoubleSum().DataPoints().Len(), 4*runtime.NumCPU()) + internal.AssertDoubleSumMetricLabelExists(t, metric, 0, cpuLabelName) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 0, stateLabelName, userStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 1, stateLabelName, systemStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 2, stateLabelName, idleStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 3, stateLabelName, interruptStateLabelValue) } -func assertCPUMetricHasLinuxSpecificStateLabels(t *testing.T, metric dataold.Metric) { - internal.AssertDoubleMetricLabelHasValue(t, metric, 4, stateLabelName, niceStateLabelValue) - internal.AssertDoubleMetricLabelHasValue(t, metric, 5, stateLabelName, softIRQStateLabelValue) - internal.AssertDoubleMetricLabelHasValue(t, metric, 6, stateLabelName, stealStateLabelValue) - internal.AssertDoubleMetricLabelHasValue(t, metric, 7, stateLabelName, waitStateLabelValue) +func assertCPUMetricHasLinuxSpecificStateLabels(t *testing.T, metric pdata.Metric) { + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 4, stateLabelName, niceStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 5, stateLabelName, softIRQStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 6, stateLabelName, stealStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 7, stateLabelName, waitStateLabelValue) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_metadata.go index ff98fd1556b..52105bd80d5 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_metadata.go @@ -15,7 +15,7 @@ package diskscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // labels @@ -34,52 +34,72 @@ const ( // descriptors -var diskIODescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.disk.io") - descriptor.SetDescription("Disk bytes transferred.") - descriptor.SetUnit("bytes") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var diskIODescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.disk.io") + metric.SetDescription("Disk bytes transferred.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var diskOpsDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.disk.ops") - descriptor.SetDescription("Disk operations count.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var diskOpsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.disk.ops") + metric.SetDescription("Disk operations count.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var diskTimeDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.disk.time") - descriptor.SetDescription("Time spent in disk operations.") - descriptor.SetUnit("s") - descriptor.SetType(dataold.MetricTypeMonotonicDouble) - return descriptor +var diskTimeDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.disk.time") + metric.SetDescription("Time spent in disk operations.") + metric.SetUnit("s") + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + sum := metric.DoubleSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var diskPendingOperationsDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.disk.pending_operations") - descriptor.SetDescription("The queue size of pending I/O operations.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var diskPendingOperationsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.disk.pending_operations") + metric.SetDescription("The queue size of pending I/O operations.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var diskMergedDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.disk.merged") - descriptor.SetDescription("The number of disk reads merged into single physical disk access operations.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var diskMergedDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.disk.merged") + metric.SetDescription("The number of disk reads merged into single physical disk access operations.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go index 536c60133db..51b3a82eac8 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go @@ -25,7 +25,6 @@ import ( "github.com/shirou/gopsutil/host" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -82,8 +81,8 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() now := internal.TimeToUnixNano(time.Now()) ioCounters, err := s.ioCounters() @@ -106,10 +105,10 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) return metrics, nil } -func initializeDiskIOMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { - diskIODescriptor.CopyTo(metric.MetricDescriptor()) +func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskIODescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(ioCounters)) idx := 0 @@ -120,10 +119,10 @@ func initializeDiskIOMetric(metric dataold.Metric, startTime, now pdata.Timestam } } -func initializeDiskOpsMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { - diskOpsDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeDiskOpsMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskOpsDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(ioCounters)) idx := 0 @@ -134,10 +133,10 @@ func initializeDiskOpsMetric(metric dataold.Metric, startTime, now pdata.Timesta } } -func initializeDiskTimeMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { - diskTimeDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeDiskTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskTimeDescriptor.CopyTo(metric) - ddps := metric.DoubleDataPoints() + ddps := metric.DoubleSum().DataPoints() ddps.Resize(2 * len(ioCounters)) idx := 0 @@ -148,10 +147,10 @@ func initializeDiskTimeMetric(metric dataold.Metric, startTime, now pdata.Timest } } -func initializeDiskPendingOperationsMetric(metric dataold.Metric, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { - diskPendingOperationsDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeDiskPendingOperationsMetric(metric pdata.Metric, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { + diskPendingOperationsDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(len(ioCounters)) idx := 0 @@ -161,7 +160,7 @@ func initializeDiskPendingOperationsMetric(metric dataold.Metric, now pdata.Time } } -func initializeInt64DataPoint(dataPoint dataold.Int64DataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value int64) { +func initializeInt64DataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) labelsMap.Insert(directionLabelName, directionLabel) @@ -170,7 +169,7 @@ func initializeInt64DataPoint(dataPoint dataold.Int64DataPoint, startTime, now p dataPoint.SetValue(value) } -func initializeDoubleDataPoint(dataPoint dataold.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value float64) { +func initializeDoubleDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value float64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) labelsMap.Insert(directionLabelName, directionLabel) @@ -179,7 +178,7 @@ func initializeDoubleDataPoint(dataPoint dataold.DoubleDataPoint, startTime, now dataPoint.SetValue(value) } -func initializeDiskPendingDataPoint(dataPoint dataold.Int64DataPoint, now pdata.TimestampUnixNano, deviceLabel string, value int64) { +func initializeDiskPendingDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, deviceLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) dataPoint.SetTimestamp(now) diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go index 5d6571874be..5c169a874b4 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go @@ -20,10 +20,9 @@ import ( "github.com/shirou/gopsutil/disk" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const systemSpecificMetricsLen = 0 -func appendSystemSpecificMetrics(metrics dataold.MetricSlice, startIdx int, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go index 92b9f1cce8c..541a28bfdd9 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go @@ -20,16 +20,15 @@ import ( "github.com/shirou/gopsutil/disk" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const systemSpecificMetricsLen = 1 -func appendSystemSpecificMetrics(metrics dataold.MetricSlice, startIdx int, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, startTime, now pdata.TimestampUnixNano, ioCounters map[string]disk.IOCountersStat) { metric := metrics.At(startIdx) - diskMergedDescriptor.CopyTo(metric.MetricDescriptor()) + diskMergedDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(ioCounters)) idx := 0 diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go index 1d064c6899c..f6dbebdde93 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -96,30 +95,30 @@ func TestScrapeMetrics(t *testing.T) { } } -func assertInt64DiskMetricValid(t *testing.T, metric dataold.Metric, expectedDescriptor dataold.MetricDescriptor, startTime pdata.TimestampUnixNano) { - internal.AssertDescriptorEqual(t, expectedDescriptor, metric.MetricDescriptor()) +func assertInt64DiskMetricValid(t *testing.T, metric pdata.Metric, expectedDescriptor pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, expectedDescriptor, metric) if startTime != 0 { - internal.AssertInt64MetricStartTimeEquals(t, metric, startTime) + internal.AssertIntSumMetricStartTimeEquals(t, metric, startTime) } - assert.GreaterOrEqual(t, metric.Int64DataPoints().Len(), 2) - internal.AssertInt64MetricLabelExists(t, metric, 0, deviceLabelName) - internal.AssertInt64MetricLabelHasValue(t, metric, 0, directionLabelName, readDirectionLabelValue) - internal.AssertInt64MetricLabelHasValue(t, metric, 1, directionLabelName, writeDirectionLabelValue) + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), 2) + internal.AssertIntSumMetricLabelExists(t, metric, 0, deviceLabelName) + internal.AssertIntSumMetricLabelHasValue(t, metric, 0, directionLabelName, readDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 1, directionLabelName, writeDirectionLabelValue) } -func assertDoubleDiskMetricValid(t *testing.T, metric dataold.Metric, expectedDescriptor dataold.MetricDescriptor, startTime pdata.TimestampUnixNano) { - internal.AssertDescriptorEqual(t, expectedDescriptor, metric.MetricDescriptor()) +func assertDoubleDiskMetricValid(t *testing.T, metric pdata.Metric, expectedDescriptor pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, expectedDescriptor, metric) if startTime != 0 { - internal.AssertInt64MetricStartTimeEquals(t, metric, startTime) + internal.AssertDoubleSumMetricStartTimeEquals(t, metric, startTime) } - assert.GreaterOrEqual(t, metric.DoubleDataPoints().Len(), 2) - internal.AssertDoubleMetricLabelExists(t, metric, 0, deviceLabelName) - internal.AssertDoubleMetricLabelHasValue(t, metric, 0, directionLabelName, readDirectionLabelValue) - internal.AssertDoubleMetricLabelHasValue(t, metric, metric.DoubleDataPoints().Len()-1, directionLabelName, writeDirectionLabelValue) + assert.GreaterOrEqual(t, metric.DoubleSum().DataPoints().Len(), 2) + internal.AssertDoubleSumMetricLabelExists(t, metric, 0, deviceLabelName) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, 0, directionLabelName, readDirectionLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, metric, metric.DoubleSum().DataPoints().Len()-1, directionLabelName, writeDirectionLabelValue) } -func assertDiskPendingOperationsMetricValid(t *testing.T, metric dataold.Metric) { - internal.AssertDescriptorEqual(t, diskPendingOperationsDescriptor, metric.MetricDescriptor()) - assert.GreaterOrEqual(t, metric.Int64DataPoints().Len(), 1) - internal.AssertInt64MetricLabelExists(t, metric, 0, deviceLabelName) +func assertDiskPendingOperationsMetricValid(t *testing.T, metric pdata.Metric) { + internal.AssertDescriptorEqual(t, diskPendingOperationsDescriptor, metric) + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), 1) + internal.AssertIntSumMetricLabelExists(t, metric, 0, deviceLabelName) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go index ff2bdf75c34..09a68de9124 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go @@ -22,7 +22,6 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/third_party/telegraf/win_perf_counters" @@ -203,13 +202,13 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { now := time.Now() durationSinceLastScraped := now.Sub(s.prevScrapeTime).Seconds() s.prevScrapeTime = now nowUnixTime := pdata.TimestampUnixNano(uint64(now.UnixNano())) - metrics := dataold.NewMetricSlice() + metrics := pdata.NewMetricSlice() var errors []error @@ -231,7 +230,7 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) return metrics, componenterror.CombineErrors(errors) } -func (s *scraper) scrapeAndAppendDiskIOMetric(metrics dataold.MetricSlice, now pdata.TimestampUnixNano, durationSinceLastScraped float64) error { +func (s *scraper) scrapeAndAppendDiskIOMetric(metrics pdata.MetricSlice, now pdata.TimestampUnixNano, durationSinceLastScraped float64) error { diskReadBytesPerSecValues, err := s.diskReadBytesPerSecCounter.ScrapeData() if err != nil { return err @@ -264,7 +263,7 @@ func (s *scraper) scrapeAndAppendDiskIOMetric(metrics dataold.MetricSlice, now p return nil } -func (s *scraper) scrapeAndAppendDiskOpsMetric(metrics dataold.MetricSlice, now pdata.TimestampUnixNano, durationSinceLastScraped float64) error { +func (s *scraper) scrapeAndAppendDiskOpsMetric(metrics pdata.MetricSlice, now pdata.TimestampUnixNano, durationSinceLastScraped float64) error { diskReadsPerSecValues, err := s.diskReadsPerSecCounter.ScrapeData() if err != nil { return err @@ -330,7 +329,7 @@ func (s *scraper) scrapeAndAppendDiskOpsMetric(metrics dataold.MetricSlice, now return nil } -func (s *scraper) scrapeAndAppendDiskPendingOperationsMetric(metrics dataold.MetricSlice, now pdata.TimestampUnixNano) error { +func (s *scraper) scrapeAndAppendDiskPendingOperationsMetric(metrics pdata.MetricSlice, now pdata.TimestampUnixNano) error { diskQueueLengthValues, err := s.diskQueueLengthCounter.ScrapeData() if err != nil { return err @@ -347,10 +346,10 @@ func (s *scraper) scrapeAndAppendDiskPendingOperationsMetric(metrics dataold.Met return nil } -func initializeDiskInt64Metric(metric dataold.Metric, descriptor dataold.MetricDescriptor, startTime, now pdata.TimestampUnixNano, ops cumulativeDiskValues) { - descriptor.CopyTo(metric.MetricDescriptor()) +func initializeDiskInt64Metric(metric pdata.Metric, descriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ops cumulativeDiskValues) { + descriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(ops)) idx := 0 @@ -361,10 +360,10 @@ func initializeDiskInt64Metric(metric dataold.Metric, descriptor dataold.MetricD } } -func initializeDiskDoubleMetric(metric dataold.Metric, descriptor dataold.MetricDescriptor, startTime, now pdata.TimestampUnixNano, ops cumulativeDiskValues) { - descriptor.CopyTo(metric.MetricDescriptor()) +func initializeDiskDoubleMetric(metric pdata.Metric, descriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ops cumulativeDiskValues) { + descriptor.CopyTo(metric) - ddps := metric.DoubleDataPoints() + ddps := metric.DoubleSum().DataPoints() ddps.Resize(2 * len(ops)) idx := 0 @@ -375,10 +374,10 @@ func initializeDiskDoubleMetric(metric dataold.Metric, descriptor dataold.Metric } } -func initializeDiskPendingOperationsMetric(metric dataold.Metric, now pdata.TimestampUnixNano, avgDiskQueueLengthValues []win_perf_counters.CounterValue) { - diskPendingOperationsDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeDiskPendingOperationsMetric(metric pdata.Metric, now pdata.TimestampUnixNano, avgDiskQueueLengthValues []win_perf_counters.CounterValue) { + diskPendingOperationsDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(len(avgDiskQueueLengthValues)) for idx, avgDiskQueueLengthValue := range avgDiskQueueLengthValues { @@ -386,7 +385,7 @@ func initializeDiskPendingOperationsMetric(metric dataold.Metric, now pdata.Time } } -func initializeInt64DataPoint(dataPoint dataold.Int64DataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value int64) { +func initializeInt64DataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) labelsMap.Insert(directionLabelName, directionLabel) @@ -395,7 +394,7 @@ func initializeInt64DataPoint(dataPoint dataold.Int64DataPoint, startTime, now p dataPoint.SetValue(value) } -func initializeDoubleDataPoint(dataPoint dataold.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value float64) { +func initializeDoubleDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, deviceLabel string, directionLabel string, value float64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) labelsMap.Insert(directionLabelName, directionLabel) @@ -404,7 +403,7 @@ func initializeDoubleDataPoint(dataPoint dataold.DoubleDataPoint, startTime, now dataPoint.SetValue(value) } -func initializeDiskPendingDataPoint(dataPoint dataold.Int64DataPoint, now pdata.TimestampUnixNano, deviceLabel string, value int64) { +func initializeDiskPendingDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, deviceLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) dataPoint.SetTimestamp(now) diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_metadata.go index 4b4ae67716c..d7b32e4e778 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_metadata.go @@ -15,7 +15,7 @@ package filesystemscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // labels @@ -35,22 +35,30 @@ const ( // descriptors -var fileSystemUsageDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.filesystem.usage") - descriptor.SetDescription("Filesystem bytes used.") - descriptor.SetUnit("bytes") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var fileSystemUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.filesystem.usage") + metric.SetDescription("Filesystem bytes used.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var fileSystemINodesUsageDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.filesystem.inodes.usage") - descriptor.SetDescription("FileSystem operations count.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var fileSystemINodesUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.filesystem.inodes.usage") + metric.SetDescription("FileSystem iNodes used.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index afa9b5ecf9d..bff133ac5cb 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -23,7 +23,6 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -78,8 +77,8 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() now := internal.TimeToUnixNano(time.Now()) @@ -128,17 +127,17 @@ func deviceUsageAlreadySet(device string, usages []*deviceUsage) bool { return false } -func initializeFileSystemUsageMetric(metric dataold.Metric, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { - fileSystemUsageDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeFileSystemUsageMetric(metric pdata.Metric, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { + fileSystemUsageDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(fileSystemStatesLen * len(deviceUsages)) for i, deviceUsage := range deviceUsages { appendFileSystemUsageStateDataPoints(idps, i*fileSystemStatesLen, now, deviceUsage) } } -func initializeFileSystemUsageDataPoint(dataPoint dataold.Int64DataPoint, now pdata.TimestampUnixNano, deviceLabel string, stateLabel string, value int64) { +func initializeFileSystemUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, deviceLabel string, stateLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) labelsMap.Insert(stateLabelName, stateLabel) diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go index b7844106eef..e2f6067be14 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go @@ -18,17 +18,16 @@ package filesystemscraper import ( "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const fileSystemStatesLen = 2 -func appendFileSystemUsageStateDataPoints(idps dataold.Int64DataPointSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsage *deviceUsage) { +func appendFileSystemUsageStateDataPoints(idps pdata.IntDataPointSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsage *deviceUsage) { initializeFileSystemUsageDataPoint(idps.At(startIdx+0), now, deviceUsage.deviceName, usedLabelValue, int64(deviceUsage.usage.Used)) initializeFileSystemUsageDataPoint(idps.At(startIdx+1), now, deviceUsage.deviceName, freeLabelValue, int64(deviceUsage.usage.Free)) } const systemSpecificMetricsLen = 0 -func appendSystemSpecificMetrics(metrics dataold.MetricSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { } diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go index 279b70c5ba8..d0070d588c8 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -146,19 +146,19 @@ func TestScrapeMetrics(t *testing.T) { } } -func assertFileSystemUsageMetricValid(t *testing.T, metric dataold.Metric, descriptor dataold.MetricDescriptor, expectedDeviceDataPoints int) { - internal.AssertDescriptorEqual(t, descriptor, metric.MetricDescriptor()) +func assertFileSystemUsageMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric, expectedDeviceDataPoints int) { + internal.AssertDescriptorEqual(t, descriptor, metric) if expectedDeviceDataPoints > 0 { - assert.Equal(t, expectedDeviceDataPoints, metric.Int64DataPoints().Len()) + assert.Equal(t, expectedDeviceDataPoints, metric.IntSum().DataPoints().Len()) } else { - assert.GreaterOrEqual(t, metric.Int64DataPoints().Len(), fileSystemStatesLen) + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), fileSystemStatesLen) } - internal.AssertInt64MetricLabelHasValue(t, metric, 0, stateLabelName, usedLabelValue) - internal.AssertInt64MetricLabelHasValue(t, metric, 1, stateLabelName, freeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 0, stateLabelName, usedLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 1, stateLabelName, freeLabelValue) } -func assertFileSystemUsageMetricHasUnixSpecificStateLabels(t *testing.T, metric dataold.Metric) { - internal.AssertInt64MetricLabelHasValue(t, metric, 2, stateLabelName, reservedLabelValue) +func assertFileSystemUsageMetricHasUnixSpecificStateLabels(t *testing.T, metric pdata.Metric) { + internal.AssertIntSumMetricLabelHasValue(t, metric, 2, stateLabelName, reservedLabelValue) } func isUnix() bool { diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go index e854ab88646..ec251a1f702 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go @@ -18,12 +18,11 @@ package filesystemscraper import ( "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const fileSystemStatesLen = 3 -func appendFileSystemUsageStateDataPoints(idps dataold.Int64DataPointSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsage *deviceUsage) { +func appendFileSystemUsageStateDataPoints(idps pdata.IntDataPointSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsage *deviceUsage) { initializeFileSystemUsageDataPoint(idps.At(startIdx+0), now, deviceUsage.deviceName, usedLabelValue, int64(deviceUsage.usage.Used)) initializeFileSystemUsageDataPoint(idps.At(startIdx+1), now, deviceUsage.deviceName, freeLabelValue, int64(deviceUsage.usage.Free)) initializeFileSystemUsageDataPoint(idps.At(startIdx+2), now, deviceUsage.deviceName, reservedLabelValue, int64(deviceUsage.usage.Total-deviceUsage.usage.Used-deviceUsage.usage.Free)) @@ -31,11 +30,11 @@ func appendFileSystemUsageStateDataPoints(idps dataold.Int64DataPointSlice, star const systemSpecificMetricsLen = 1 -func appendSystemSpecificMetrics(metrics dataold.MetricSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { +func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startIdx int, now pdata.TimestampUnixNano, deviceUsages []*deviceUsage) { metric := metrics.At(startIdx) - fileSystemINodesUsageDescriptor.CopyTo(metric.MetricDescriptor()) + fileSystemINodesUsageDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(deviceUsages)) for idx, deviceUsage := range deviceUsages { startIndex := 2 * idx diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_metadata.go index 2cd5d3b1bc7..6e1b182609b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_metadata.go @@ -15,37 +15,40 @@ package loadscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // descriptors -var loadAvg1MDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.cpu.load_average.1m") - descriptor.SetDescription("Average CPU Load over 1 minute.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeDouble) - return descriptor +var loadAvg1MDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.cpu.load_average.1m") + metric.SetDescription("Average CPU Load over 1 minute.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + metric.DoubleGauge().InitEmpty() + return metric }() -var loadAvg5mDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.cpu.load_average.5m") - descriptor.SetDescription("Average CPU Load over 5 minutes.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeDouble) - return descriptor +var loadAvg5mDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.cpu.load_average.5m") + metric.SetDescription("Average CPU Load over 5 minutes.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + metric.DoubleGauge().InitEmpty() + return metric }() -var loadAvg15mDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.cpu.load_average.15m") - descriptor.SetDescription("Average CPU Load over 15 minutes.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeDouble) - return descriptor +var loadAvg15mDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.cpu.load_average.15m") + metric.SetDescription("Average CPU Load over 15 minutes.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeDoubleGauge) + metric.DoubleGauge().InitEmpty() + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go index 34ba9ca515d..4fbff8c87f5 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go @@ -22,7 +22,6 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -51,8 +50,8 @@ func (s *scraper) Close(ctx context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() now := internal.TimeToUnixNano(time.Now()) avgLoadValues, err := s.load() @@ -67,10 +66,10 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) return metrics, nil } -func initializeLoadMetric(metric dataold.Metric, metricDescriptor dataold.MetricDescriptor, now pdata.TimestampUnixNano, value float64) { - metricDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeLoadMetric(metric pdata.Metric, metricDescriptor pdata.Metric, now pdata.TimestampUnixNano, value float64) { + metricDescriptor.CopyTo(metric) - idps := metric.DoubleDataPoints() + idps := metric.DoubleGauge().DataPoints() idps.Resize(1) dp := idps.At(0) dp.SetTimestamp(now) diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go index 2c051cd97d2..05f9c1eea6c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -77,7 +77,7 @@ func TestScrapeMetrics(t *testing.T) { } } -func assertMetricHasSingleDatapoint(t *testing.T, metric dataold.Metric, descriptor dataold.MetricDescriptor) { - internal.AssertDescriptorEqual(t, descriptor, metric.MetricDescriptor()) - assert.Equal(t, 1, metric.DoubleDataPoints().Len()) +func assertMetricHasSingleDatapoint(t *testing.T, metric pdata.Metric, descriptor pdata.Metric) { + internal.AssertDescriptorEqual(t, descriptor, metric) + assert.Equal(t, 1, metric.DoubleGauge().DataPoints().Len()) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_metadata.go index e1efa5f14fd..a7a3fb1d350 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_metadata.go @@ -15,7 +15,7 @@ package memoryscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // labels @@ -36,12 +36,16 @@ const ( // descriptors -var memoryUsageDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.memory.usage") - descriptor.SetDescription("Bytes of memory in use.") - descriptor.SetUnit("bytes") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var memoryUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.memory.usage") + metric.SetDescription("Bytes of memory in use.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go index 8f42bb3d3b1..ed7b76f59f3 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go @@ -21,7 +21,6 @@ import ( "github.com/shirou/gopsutil/mem" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -49,8 +48,8 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() now := internal.TimeToUnixNano(time.Now()) memInfo, err := s.virtualMemory() @@ -63,15 +62,15 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) return metrics, nil } -func initializeMemoryUsageMetric(metric dataold.Metric, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { - memoryUsageDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeMemoryUsageMetric(metric pdata.Metric, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { + memoryUsageDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(memStatesLen) appendMemoryUsageStateDataPoints(idps, now, memInfo) } -func initializeMemoryUsageDataPoint(dataPoint dataold.Int64DataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { +func initializeMemoryUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(stateLabelName, stateLabel) dataPoint.SetTimestamp(now) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go index 869808fab7d..823419ab149 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go @@ -20,12 +20,11 @@ import ( "github.com/shirou/gopsutil/mem" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const memStatesLen = 6 -func appendMemoryUsageStateDataPoints(idps dataold.Int64DataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { +func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { initializeMemoryUsageDataPoint(idps.At(0), now, usedStateLabelValue, int64(memInfo.Used)) initializeMemoryUsageDataPoint(idps.At(1), now, freeStateLabelValue, int64(memInfo.Free)) initializeMemoryUsageDataPoint(idps.At(2), now, bufferedStateLabelValue, int64(memInfo.Buffers)) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go index ead86ffe692..9939f5c2ed5 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go @@ -20,12 +20,11 @@ import ( "github.com/shirou/gopsutil/mem" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const memStatesLen = 3 -func appendMemoryUsageStateDataPoints(idps dataold.Int64DataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { +func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { initializeMemoryUsageDataPoint(idps.At(0), now, usedStateLabelValue, int64(memInfo.Used)) initializeMemoryUsageDataPoint(idps.At(1), now, freeStateLabelValue, int64(memInfo.Free)) initializeMemoryUsageDataPoint(idps.At(2), now, inactiveStateLabelValue, int64(memInfo.Inactive)) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go index 23267362708..28c2b1de485 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -71,7 +71,7 @@ func TestScrapeMetrics(t *testing.T) { if runtime.GOOS == "linux" { assertMemoryUsageMetricHasLinuxSpecificStateLabels(t, metrics.At(0)) } else if runtime.GOOS != "windows" { - internal.AssertInt64MetricLabelHasValue(t, metrics.At(0), 2, stateLabelName, inactiveStateLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metrics.At(0), 2, stateLabelName, inactiveStateLabelValue) } internal.AssertSameTimeStampForAllMetrics(t, metrics) @@ -79,16 +79,16 @@ func TestScrapeMetrics(t *testing.T) { } } -func assertMemoryUsageMetricValid(t *testing.T, metric dataold.Metric, descriptor dataold.MetricDescriptor) { - internal.AssertDescriptorEqual(t, descriptor, metric.MetricDescriptor()) - assert.GreaterOrEqual(t, metric.Int64DataPoints().Len(), 2) - internal.AssertInt64MetricLabelHasValue(t, metric, 0, stateLabelName, usedStateLabelValue) - internal.AssertInt64MetricLabelHasValue(t, metric, 1, stateLabelName, freeStateLabelValue) +func assertMemoryUsageMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric) { + internal.AssertDescriptorEqual(t, descriptor, metric) + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), 2) + internal.AssertIntSumMetricLabelHasValue(t, metric, 0, stateLabelName, usedStateLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 1, stateLabelName, freeStateLabelValue) } -func assertMemoryUsageMetricHasLinuxSpecificStateLabels(t *testing.T, metric dataold.Metric) { - internal.AssertInt64MetricLabelHasValue(t, metric, 2, stateLabelName, bufferedStateLabelValue) - internal.AssertInt64MetricLabelHasValue(t, metric, 3, stateLabelName, cachedStateLabelValue) - internal.AssertInt64MetricLabelHasValue(t, metric, 4, stateLabelName, slabReclaimableStateLabelValue) - internal.AssertInt64MetricLabelHasValue(t, metric, 5, stateLabelName, slabUnreclaimableStateLabelValue) +func assertMemoryUsageMetricHasLinuxSpecificStateLabels(t *testing.T, metric pdata.Metric) { + internal.AssertIntSumMetricLabelHasValue(t, metric, 2, stateLabelName, bufferedStateLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 3, stateLabelName, cachedStateLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 4, stateLabelName, slabReclaimableStateLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 5, stateLabelName, slabUnreclaimableStateLabelValue) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go index f8777cbb30a..96b6697d679 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go @@ -20,12 +20,11 @@ import ( "github.com/shirou/gopsutil/mem" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const memStatesLen = 2 -func appendMemoryUsageStateDataPoints(idps dataold.Int64DataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { +func appendMemoryUsageStateDataPoints(idps pdata.IntDataPointSlice, now pdata.TimestampUnixNano, memInfo *mem.VirtualMemoryStat) { initializeMemoryUsageDataPoint(idps.At(0), now, usedStateLabelValue, int64(memInfo.Used)) initializeMemoryUsageDataPoint(idps.At(1), now, freeStateLabelValue, int64(memInfo.Available)) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_metadata.go index d510deadf15..352a1a90c97 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_metadata.go @@ -15,7 +15,7 @@ package networkscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // network metric constants @@ -35,52 +35,72 @@ const ( // descriptors -var networkPacketsDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.network.packets") - descriptor.SetDescription("The number of packets transferred.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var networkPacketsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.network.packets") + metric.SetDescription("The number of packets transferred.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var networkDroppedPacketsDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.network.dropped_packets") - descriptor.SetDescription("The number of packets dropped.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var networkDroppedPacketsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.network.dropped_packets") + metric.SetDescription("The number of packets dropped.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var networkErrorsDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.network.errors") - descriptor.SetDescription("The number of errors encountered") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var networkErrorsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.network.errors") + metric.SetDescription("The number of errors encountered") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var networkIODescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.network.io") - descriptor.SetDescription("The number of bytes transmitted and received") - descriptor.SetUnit("bytes") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var networkIODescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.network.io") + metric.SetDescription("The number of bytes transmitted and received") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var networkTCPConnectionsDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.network.tcp_connections") - descriptor.SetDescription("The number of tcp connections") - descriptor.SetUnit("bytes") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var networkTCPConnectionsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.network.tcp_connections") + metric.SetDescription("The number of tcp connections") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go index ff1b947854a..900016e4fb6 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go @@ -24,7 +24,6 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -82,8 +81,8 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() var errors []error @@ -100,7 +99,7 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) return metrics, componenterror.CombineErrors(errors) } -func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics dataold.MetricSlice, startTime pdata.TimestampUnixNano) error { +func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics pdata.MetricSlice, startTime pdata.TimestampUnixNano) error { now := internal.TimeToUnixNano(time.Now()) // get total stats only @@ -124,10 +123,10 @@ func (s *scraper) scrapeAndAppendNetworkCounterMetrics(metrics dataold.MetricSli return nil } -func initializeNetworkPacketsMetric(metric dataold.Metric, metricDescriptor dataold.MetricDescriptor, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { - metricDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeNetworkPacketsMetric(metric pdata.Metric, metricDescriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { + metricDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(ioCountersSlice)) for idx, ioCounters := range ioCountersSlice { initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, transmitDirectionLabelValue, int64(ioCounters.PacketsSent)) @@ -135,10 +134,10 @@ func initializeNetworkPacketsMetric(metric dataold.Metric, metricDescriptor data } } -func initializeNetworkDroppedPacketsMetric(metric dataold.Metric, metricDescriptor dataold.MetricDescriptor, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { - metricDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeNetworkDroppedPacketsMetric(metric pdata.Metric, metricDescriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { + metricDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(ioCountersSlice)) for idx, ioCounters := range ioCountersSlice { initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, transmitDirectionLabelValue, int64(ioCounters.Dropout)) @@ -146,10 +145,10 @@ func initializeNetworkDroppedPacketsMetric(metric dataold.Metric, metricDescript } } -func initializeNetworkErrorsMetric(metric dataold.Metric, metricDescriptor dataold.MetricDescriptor, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { - metricDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeNetworkErrorsMetric(metric pdata.Metric, metricDescriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { + metricDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(ioCountersSlice)) for idx, ioCounters := range ioCountersSlice { initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, transmitDirectionLabelValue, int64(ioCounters.Errout)) @@ -157,10 +156,10 @@ func initializeNetworkErrorsMetric(metric dataold.Metric, metricDescriptor datao } } -func initializeNetworkIOMetric(metric dataold.Metric, metricDescriptor dataold.MetricDescriptor, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { - metricDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeNetworkIOMetric(metric pdata.Metric, metricDescriptor pdata.Metric, startTime, now pdata.TimestampUnixNano, ioCountersSlice []net.IOCountersStat) { + metricDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(ioCountersSlice)) for idx, ioCounters := range ioCountersSlice { initializeNetworkDataPoint(idps.At(2*idx+0), startTime, now, ioCounters.Name, transmitDirectionLabelValue, int64(ioCounters.BytesSent)) @@ -168,7 +167,7 @@ func initializeNetworkIOMetric(metric dataold.Metric, metricDescriptor dataold.M } } -func initializeNetworkDataPoint(dataPoint dataold.Int64DataPoint, startTime, now pdata.TimestampUnixNano, interfaceLabel, directionLabel string, value int64) { +func initializeNetworkDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, interfaceLabel, directionLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(interfaceLabelName, interfaceLabel) labelsMap.Insert(directionLabelName, directionLabel) @@ -177,7 +176,7 @@ func initializeNetworkDataPoint(dataPoint dataold.Int64DataPoint, startTime, now dataPoint.SetValue(value) } -func (s *scraper) scrapeAndAppendNetworkTCPConnectionsMetric(metrics dataold.MetricSlice) error { +func (s *scraper) scrapeAndAppendNetworkTCPConnectionsMetric(metrics pdata.MetricSlice) error { now := internal.TimeToUnixNano(time.Now()) connections, err := s.connections("tcp") @@ -215,10 +214,10 @@ func getTCPConnectionStatusCounts(connections []net.ConnectionStat) map[string]i return tcpStatuses } -func initializeNetworkTCPConnectionsMetric(metric dataold.Metric, now pdata.TimestampUnixNano, connectionStateCounts map[string]int64) { - networkTCPConnectionsDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeNetworkTCPConnectionsMetric(metric pdata.Metric, now pdata.TimestampUnixNano, connectionStateCounts map[string]int64) { + networkTCPConnectionsDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(len(connectionStateCounts)) i := 0 @@ -228,7 +227,7 @@ func initializeNetworkTCPConnectionsMetric(metric dataold.Metric, now pdata.Time } } -func initializeNetworkTCPConnectionsDataPoint(dataPoint dataold.Int64DataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { +func initializeNetworkTCPConnectionsDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(stateLabelName, stateLabel) dataPoint.SetTimestamp(now) diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go index 08e747c7d5b..b2a54d2bb7f 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -143,19 +142,19 @@ func TestScrapeMetrics(t *testing.T) { } } -func assertNetworkIOMetricValid(t *testing.T, metric dataold.Metric, descriptor dataold.MetricDescriptor, startTime pdata.TimestampUnixNano) { - internal.AssertDescriptorEqual(t, descriptor, metric.MetricDescriptor()) +func assertNetworkIOMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, descriptor, metric) if startTime != 0 { - internal.AssertInt64MetricStartTimeEquals(t, metric, startTime) + internal.AssertIntSumMetricStartTimeEquals(t, metric, startTime) } - assert.GreaterOrEqual(t, metric.Int64DataPoints().Len(), 2) - internal.AssertInt64MetricLabelExists(t, metric, 0, interfaceLabelName) - internal.AssertInt64MetricLabelHasValue(t, metric, 0, directionLabelName, transmitDirectionLabelValue) - internal.AssertInt64MetricLabelHasValue(t, metric, 1, directionLabelName, receiveDirectionLabelValue) + assert.GreaterOrEqual(t, metric.IntSum().DataPoints().Len(), 2) + internal.AssertIntSumMetricLabelExists(t, metric, 0, interfaceLabelName) + internal.AssertIntSumMetricLabelHasValue(t, metric, 0, directionLabelName, transmitDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, metric, 1, directionLabelName, receiveDirectionLabelValue) } -func assertNetworkTCPConnectionsMetricValid(t *testing.T, metric dataold.Metric) { - internal.AssertDescriptorEqual(t, networkTCPConnectionsDescriptor, metric.MetricDescriptor()) - internal.AssertInt64MetricLabelExists(t, metric, 0, stateLabelName) - assert.Equal(t, 12, metric.Int64DataPoints().Len()) +func assertNetworkTCPConnectionsMetricValid(t *testing.T, metric pdata.Metric) { + internal.AssertDescriptorEqual(t, networkTCPConnectionsDescriptor, metric) + internal.AssertIntSumMetricLabelExists(t, metric, 0, stateLabelName) + assert.Equal(t, 12, metric.IntSum().DataPoints().Len()) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportresourcescraper.go b/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportresourcescraper.go index 82fdfab64ee..ad0741e0928 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportresourcescraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportresourcescraper.go @@ -19,7 +19,7 @@ import ( "go.opencensus.io/trace" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -42,7 +42,7 @@ func (s *resourceScraper) Close(ctx context.Context) error { } // ScrapeMetrics -func (s *resourceScraper) ScrapeMetrics(ctx context.Context) (dataold.ResourceMetricsSlice, error) { +func (s *resourceScraper) ScrapeMetrics(ctx context.Context) (pdata.ResourceMetricsSlice, error) { // TODO: Add metrics. ctx, span := trace.StartSpan(ctx, s.scrapeMetricsSpanName) defer span.End() diff --git a/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportresourcescraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportresourcescraper_test.go index fda6a1cb5fa..51b703b4e5d 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportresourcescraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportresourcescraper_test.go @@ -22,8 +22,8 @@ import ( "github.com/stretchr/testify/assert" "go.opencensus.io/trace" - "go.opentelemetry.io/collector/internal/dataold" - "go.opentelemetry.io/collector/internal/dataold/testdataold" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/data/testdata" ) func TestWrapResourceScraper(t *testing.T) { @@ -66,11 +66,11 @@ func (s *testResourceScraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *testResourceScraper) ScrapeMetrics(ctx context.Context) (dataold.ResourceMetricsSlice, error) { +func (s *testResourceScraper) ScrapeMetrics(ctx context.Context) (pdata.ResourceMetricsSlice, error) { assert.NotNil(s.t, trace.FromContext(ctx)) return generateResourceMetricsSlice(), s.err } -func generateResourceMetricsSlice() dataold.ResourceMetricsSlice { - return testdataold.GenerateMetricDataOneMetric().ResourceMetrics() +func generateResourceMetricsSlice() pdata.ResourceMetricsSlice { + return testdata.GenerateMetricsOneMetric().ResourceMetrics() } diff --git a/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportscraper.go b/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportscraper.go index 60dba7147f4..520b692f1c1 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportscraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportscraper.go @@ -19,7 +19,7 @@ import ( "go.opencensus.io/trace" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -42,7 +42,7 @@ func (s *scraper) Close(ctx context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(ctx context.Context) (dataold.MetricSlice, error) { +func (s *scraper) ScrapeMetrics(ctx context.Context) (pdata.MetricSlice, error) { // TODO: Add metrics. ctx, span := trace.StartSpan(ctx, s.scrapeMetricsSpanName) defer span.End() diff --git a/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportscraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportscraper_test.go index 2e227d021ba..7ae5b54fb4c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportscraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/obsreportscraper/obsreportscraper_test.go @@ -22,8 +22,8 @@ import ( "github.com/stretchr/testify/assert" "go.opencensus.io/trace" - "go.opentelemetry.io/collector/internal/dataold" - "go.opentelemetry.io/collector/internal/dataold/testdataold" + "go.opentelemetry.io/collector/consumer/pdata" + "go.opentelemetry.io/collector/internal/data/testdata" ) func TestWrapScraper(t *testing.T) { @@ -66,11 +66,11 @@ func (s *testScraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *testScraper) ScrapeMetrics(ctx context.Context) (dataold.MetricSlice, error) { +func (s *testScraper) ScrapeMetrics(ctx context.Context) (pdata.MetricSlice, error) { assert.NotNil(s.t, trace.FromContext(ctx)) return generateMetricsSlice(), s.err } -func generateMetricsSlice() dataold.MetricSlice { - return testdataold.GenerateMetricDataOneMetric().ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() +func generateMetricsSlice() pdata.MetricSlice { + return testdata.GenerateMetricsOneMetric().ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_metadata.go index e69da387856..faaef4899bb 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_metadata.go @@ -15,27 +15,35 @@ package processesscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // descriptors -var processesRunningDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.processes.running") - descriptor.SetDescription("Total number of running processes.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var processesRunningDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.processes.running") + metric.SetDescription("Total number of running processes.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var processesBlockedDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.processes.blocked") - descriptor.SetDescription("Total number of blocked processes.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var processesBlockedDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.processes.blocked") + metric.SetDescription("Total number of blocked processes.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go index 66a884aac79..8a8424783fc 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go @@ -21,7 +21,6 @@ import ( "github.com/shirou/gopsutil/load" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) // scraper for Processes Metrics @@ -57,8 +56,8 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() err := appendSystemSpecificProcessesMetrics(metrics, 0, s.misc) return metrics, err } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go index b71d075cae5..265aa89fa86 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_fallback.go @@ -16,10 +16,8 @@ package processesscraper -import ( - "go.opentelemetry.io/collector/internal/dataold" -) +import "go.opentelemetry.io/collector/consumer/pdata" -func appendSystemSpecificProcessesMetrics(metrics dataold.MetricSlice, startIndex int, miscFunc getMiscStats) error { +func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex int, miscFunc getMiscStats) error { return nil } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go index 0c7365a33e4..4a30e6e96f8 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go @@ -24,11 +24,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) -var systemSpecificMetrics = map[string][]dataold.MetricDescriptor{ +var systemSpecificMetrics = map[string][]pdata.Metric{ "linux": {processesRunningDescriptor, processesBlockedDescriptor}, "darwin": {processesRunningDescriptor, processesBlockedDescriptor}, "freebsd": {processesRunningDescriptor, processesBlockedDescriptor}, @@ -83,8 +83,8 @@ func TestScrapeMetrics(t *testing.T) { } } -func assertProcessesMetricValid(t *testing.T, metric dataold.Metric, descriptor dataold.MetricDescriptor) { - internal.AssertDescriptorEqual(t, descriptor, metric.MetricDescriptor()) - assert.Equal(t, metric.Int64DataPoints().Len(), 1) - assert.Equal(t, metric.Int64DataPoints().At(0).LabelsMap().Len(), 0) +func assertProcessesMetricValid(t *testing.T, metric pdata.Metric, descriptor pdata.Metric) { + internal.AssertDescriptorEqual(t, descriptor, metric) + assert.Equal(t, metric.IntSum().DataPoints().Len(), 1) + assert.Equal(t, metric.IntSum().DataPoints().At(0).LabelsMap().Len(), 0) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go index 8b431fed1c8..4f1013b93c2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_unix.go @@ -20,11 +20,10 @@ import ( "time" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) -func appendSystemSpecificProcessesMetrics(metrics dataold.MetricSlice, startIndex int, miscFunc getMiscStats) error { +func appendSystemSpecificProcessesMetrics(metrics pdata.MetricSlice, startIndex int, miscFunc getMiscStats) error { now := internal.TimeToUnixNano(time.Now()) misc, err := miscFunc() if err != nil { @@ -37,10 +36,10 @@ func appendSystemSpecificProcessesMetrics(metrics dataold.MetricSlice, startInde return nil } -func initializeProcessesMetric(metric dataold.Metric, descriptor dataold.MetricDescriptor, now pdata.TimestampUnixNano, value int64) { - descriptor.CopyTo(metric.MetricDescriptor()) +func initializeProcessesMetric(metric pdata.Metric, descriptor pdata.Metric, now pdata.TimestampUnixNano, value int64) { + descriptor.CopyTo(metric) - ddps := metric.Int64DataPoints() + ddps := metric.IntSum().DataPoints() ddps.Resize(1) ddps.At(0).SetTimestamp(now) ddps.At(0).SetValue(value) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_metadata.go index f95d8954450..fc02c106de4 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_metadata.go @@ -15,7 +15,7 @@ package processscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // labels @@ -42,42 +42,58 @@ const ( // descriptors -var cpuTimeDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("process.cpu.time") - descriptor.SetDescription("Total CPU seconds broken down by different states.") - descriptor.SetUnit("s") - descriptor.SetType(dataold.MetricTypeMonotonicDouble) - return descriptor +var cpuTimeDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("process.cpu.time") + metric.SetDescription("Total CPU seconds broken down by different states.") + metric.SetUnit("s") + metric.SetDataType(pdata.MetricDataTypeDoubleSum) + sum := metric.DoubleSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var physicalMemoryUsageDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("process.memory.physical_usage") - descriptor.SetDescription("The amount of physical memory in use.") - descriptor.SetUnit("bytes") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var physicalMemoryUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("process.memory.physical_usage") + metric.SetDescription("The amount of physical memory in use.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var virtualMemoryUsageDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("process.memory.virtual_usage") - descriptor.SetDescription("Virtual memory size.") - descriptor.SetUnit("bytes") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var virtualMemoryUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("process.memory.virtual_usage") + metric.SetDescription("Virtual memory size.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var diskIODescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("process.disk.io") - descriptor.SetDescription("Disk bytes transferred.") - descriptor.SetUnit("bytes") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var diskIODescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("process.disk.io") + metric.SetDescription("Disk bytes transferred.") + metric.SetUnit("bytes") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index dbb3e765a41..dda292e760b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -25,7 +25,6 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -82,7 +81,7 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.ResourceMetricsSlice, error) { +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.ResourceMetricsSlice, error) { var errs []error metadata, err := s.getProcessMetadata() @@ -90,7 +89,7 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.ResourceMetricsSlice errs = append(errs, err) } - rms := dataold.NewResourceMetricsSlice() + rms := pdata.NewResourceMetricsSlice() rms.Resize(len(metadata)) for i, md := range metadata { rm := rms.At(i) @@ -170,7 +169,7 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { return metadata, componenterror.CombineErrors(errs) } -func scrapeAndAppendCPUTimeMetric(metrics dataold.MetricSlice, startTime, now pdata.TimestampUnixNano, handle processHandle) error { +func scrapeAndAppendCPUTimeMetric(metrics pdata.MetricSlice, startTime, now pdata.TimestampUnixNano, handle processHandle) error { times, err := handle.Times() if err != nil { return err @@ -182,15 +181,15 @@ func scrapeAndAppendCPUTimeMetric(metrics dataold.MetricSlice, startTime, now pd return nil } -func initializeCPUTimeMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, times *cpu.TimesStat) { - cpuTimeDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeCPUTimeMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, times *cpu.TimesStat) { + cpuTimeDescriptor.CopyTo(metric) - ddps := metric.DoubleDataPoints() + ddps := metric.DoubleSum().DataPoints() ddps.Resize(cpuStatesLen) appendCPUTimeStateDataPoints(ddps, startTime, now, times) } -func scrapeAndAppendMemoryUsageMetrics(metrics dataold.MetricSlice, now pdata.TimestampUnixNano, handle processHandle) error { +func scrapeAndAppendMemoryUsageMetrics(metrics pdata.MetricSlice, now pdata.TimestampUnixNano, handle processHandle) error { mem, err := handle.MemoryInfo() if err != nil { return err @@ -203,20 +202,20 @@ func scrapeAndAppendMemoryUsageMetrics(metrics dataold.MetricSlice, now pdata.Ti return nil } -func initializeMemoryUsageMetric(metric dataold.Metric, descriptor dataold.MetricDescriptor, now pdata.TimestampUnixNano, usage int64) { - descriptor.CopyTo(metric.MetricDescriptor()) +func initializeMemoryUsageMetric(metric pdata.Metric, descriptor pdata.Metric, now pdata.TimestampUnixNano, usage int64) { + descriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(1) initializeMemoryUsageDataPoint(idps.At(0), now, usage) } -func initializeMemoryUsageDataPoint(dataPoint dataold.Int64DataPoint, now pdata.TimestampUnixNano, usage int64) { +func initializeMemoryUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, usage int64) { dataPoint.SetTimestamp(now) dataPoint.SetValue(usage) } -func scrapeAndAppendDiskIOMetric(metrics dataold.MetricSlice, startTime, now pdata.TimestampUnixNano, handle processHandle) error { +func scrapeAndAppendDiskIOMetric(metrics pdata.MetricSlice, startTime, now pdata.TimestampUnixNano, handle processHandle) error { io, err := handle.IOCounters() if err != nil { return err @@ -228,16 +227,16 @@ func scrapeAndAppendDiskIOMetric(metrics dataold.MetricSlice, startTime, now pda return nil } -func initializeDiskIOMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, io *process.IOCountersStat) { - diskIODescriptor.CopyTo(metric.MetricDescriptor()) +func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, io *process.IOCountersStat) { + diskIODescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2) initializeDiskIODataPoint(idps.At(0), startTime, now, int64(io.ReadBytes), readDirectionLabelValue) initializeDiskIODataPoint(idps.At(1), startTime, now, int64(io.WriteBytes), writeDirectionLabelValue) } -func initializeDiskIODataPoint(dataPoint dataold.Int64DataPoint, startTime, now pdata.TimestampUnixNano, value int64, directionLabel string) { +func initializeDiskIODataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, value int64, directionLabel string) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(directionLabelName, directionLabel) dataPoint.SetStartTime(startTime) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go index 928242b9f9a..62dc40cde23 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go @@ -20,18 +20,17 @@ import ( "github.com/shirou/gopsutil/cpu" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const cpuStatesLen = 3 -func appendCPUTimeStateDataPoints(ddps dataold.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { initializeCPUTimeDataPoint(ddps.At(0), startTime, now, cpuTime.User, userStateLabelValue) initializeCPUTimeDataPoint(ddps.At(1), startTime, now, cpuTime.System, systemStateLabelValue) initializeCPUTimeDataPoint(ddps.At(2), startTime, now, cpuTime.Iowait, waitStateLabelValue) } -func initializeCPUTimeDataPoint(dataPoint dataold.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, value float64, stateLabel string) { +func initializeCPUTimeDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, value float64, stateLabel string) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(stateLabelName, stateLabel) dataPoint.SetStartTime(startTime) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go index 8256dfebae8..b3056d8a933 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go @@ -20,18 +20,17 @@ import ( "github.com/shirou/gopsutil/cpu" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const cpuStatesLen = 0 -func appendCPUTimeStateDataPoints(ddps dataold.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { } -func getProcessExecutable(proc processHandle) (*executableMetadata, error) { +func getProcessExecutable(processHandle) (*executableMetadata, error) { return nil, nil } -func getProcessCommand(proc processHandle) (*commandMetadata, error) { +func getProcessCommand(processHandle) (*commandMetadata, error) { return nil, nil } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go index ea29da5614a..8806d15fa87 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go @@ -29,7 +29,6 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/internal/processor/filterset" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/translator/conventions" @@ -78,7 +77,7 @@ func TestScrapeMetrics(t *testing.T) { assertSameTimeStampForAllMetricsWithinResource(t, resourceMetrics) } -func assertProcessResourceAttributesExist(t *testing.T, resourceMetrics dataold.ResourceMetricsSlice) { +func assertProcessResourceAttributesExist(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice) { for i := 0; i < resourceMetrics.Len(); i++ { attr := resourceMetrics.At(0).Resource().Attributes() internal.AssertContainsAttribute(t, attr, conventions.AttributeProcessID) @@ -90,35 +89,35 @@ func assertProcessResourceAttributesExist(t *testing.T, resourceMetrics dataold. } } -func assertCPUTimeMetricValid(t *testing.T, resourceMetrics dataold.ResourceMetricsSlice, startTime pdata.TimestampUnixNano) { +func assertCPUTimeMetricValid(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice, startTime pdata.TimestampUnixNano) { cpuTimeMetric := getMetric(t, cpuTimeDescriptor, resourceMetrics) - internal.AssertDescriptorEqual(t, cpuTimeDescriptor, cpuTimeMetric.MetricDescriptor()) + internal.AssertDescriptorEqual(t, cpuTimeDescriptor, cpuTimeMetric) if startTime != 0 { - internal.AssertDoubleMetricStartTimeEquals(t, cpuTimeMetric, startTime) + internal.AssertDoubleSumMetricStartTimeEquals(t, cpuTimeMetric, startTime) } - internal.AssertDoubleMetricLabelHasValue(t, cpuTimeMetric, 0, stateLabelName, userStateLabelValue) - internal.AssertDoubleMetricLabelHasValue(t, cpuTimeMetric, 1, stateLabelName, systemStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, cpuTimeMetric, 0, stateLabelName, userStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, cpuTimeMetric, 1, stateLabelName, systemStateLabelValue) if runtime.GOOS == "linux" { - internal.AssertDoubleMetricLabelHasValue(t, cpuTimeMetric, 2, stateLabelName, waitStateLabelValue) + internal.AssertDoubleSumMetricLabelHasValue(t, cpuTimeMetric, 2, stateLabelName, waitStateLabelValue) } } -func assertMemoryUsageMetricValid(t *testing.T, descriptor dataold.MetricDescriptor, resourceMetrics dataold.ResourceMetricsSlice) { +func assertMemoryUsageMetricValid(t *testing.T, descriptor pdata.Metric, resourceMetrics pdata.ResourceMetricsSlice) { memoryUsageMetric := getMetric(t, descriptor, resourceMetrics) - internal.AssertDescriptorEqual(t, descriptor, memoryUsageMetric.MetricDescriptor()) + internal.AssertDescriptorEqual(t, descriptor, memoryUsageMetric) } -func assertDiskIOMetricValid(t *testing.T, resourceMetrics dataold.ResourceMetricsSlice, startTime pdata.TimestampUnixNano) { +func assertDiskIOMetricValid(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice, startTime pdata.TimestampUnixNano) { diskIOMetric := getMetric(t, diskIODescriptor, resourceMetrics) - internal.AssertDescriptorEqual(t, diskIODescriptor, diskIOMetric.MetricDescriptor()) + internal.AssertDescriptorEqual(t, diskIODescriptor, diskIOMetric) if startTime != 0 { - internal.AssertInt64MetricStartTimeEquals(t, diskIOMetric, startTime) + internal.AssertIntSumMetricStartTimeEquals(t, diskIOMetric, startTime) } - internal.AssertInt64MetricLabelHasValue(t, diskIOMetric, 0, directionLabelName, readDirectionLabelValue) - internal.AssertInt64MetricLabelHasValue(t, diskIOMetric, 1, directionLabelName, writeDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, diskIOMetric, 0, directionLabelName, readDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, diskIOMetric, 1, directionLabelName, writeDirectionLabelValue) } -func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetrics dataold.ResourceMetricsSlice) { +func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice) { for i := 0; i < resourceMetrics.Len(); i++ { ilms := resourceMetrics.At(i).InstrumentationLibraryMetrics() for j := 0; j < ilms.Len(); j++ { @@ -127,22 +126,22 @@ func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetric } } -func getMetric(t *testing.T, descriptor dataold.MetricDescriptor, rms dataold.ResourceMetricsSlice) dataold.Metric { +func getMetric(t *testing.T, descriptor pdata.Metric, rms pdata.ResourceMetricsSlice) pdata.Metric { for i := 0; i < rms.Len(); i++ { metrics := getMetricSlice(t, rms.At(i)) for j := 0; j < metrics.Len(); j++ { metric := metrics.At(j) - if metric.MetricDescriptor().Name() == descriptor.Name() { + if metric.Name() == descriptor.Name() { return metric } } } require.Fail(t, fmt.Sprintf("no metric with name %s was returned", descriptor.Name())) - return dataold.NewMetric() + return pdata.NewMetric() } -func getMetricSlice(t *testing.T, rm dataold.ResourceMetrics) dataold.MetricSlice { +func getMetricSlice(t *testing.T, rm pdata.ResourceMetrics) pdata.MetricSlice { ilms := rm.InstrumentationLibraryMetrics() require.Equal(t, 1, ilms.Len()) return ilms.At(0).Metrics() diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go index 263ade48407..45d0dfd345c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go @@ -23,17 +23,16 @@ import ( "github.com/shirou/gopsutil/cpu" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) const cpuStatesLen = 2 -func appendCPUTimeStateDataPoints(ddps dataold.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { +func appendCPUTimeStateDataPoints(ddps pdata.DoubleDataPointSlice, startTime, now pdata.TimestampUnixNano, cpuTime *cpu.TimesStat) { initializeCPUTimeDataPoint(ddps.At(0), startTime, now, cpuTime.User, userStateLabelValue) initializeCPUTimeDataPoint(ddps.At(1), startTime, now, cpuTime.System, systemStateLabelValue) } -func initializeCPUTimeDataPoint(dataPoint dataold.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, value float64, stateLabel string) { +func initializeCPUTimeDataPoint(dataPoint pdata.DoubleDataPoint, startTime, now pdata.TimestampUnixNano, value float64, stateLabel string) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(stateLabelName, stateLabel) dataPoint.SetStartTime(startTime) diff --git a/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_metadata.go b/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_metadata.go index cbca2791e9f..cb7e9f0b8d5 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_metadata.go +++ b/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_metadata.go @@ -15,7 +15,7 @@ package swapscraper import ( - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/consumer/pdata" ) // labels @@ -49,32 +49,44 @@ const ( minorTypeLabelValue = "minor" ) -var swapUsageDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.swap.usage") - descriptor.SetDescription("Swap (unix) or pagefile (windows) usage.") - descriptor.SetUnit("pages") - descriptor.SetType(dataold.MetricTypeInt64) - return descriptor +var swapUsageDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.swap.usage") + metric.SetDescription("Swap (unix) or pagefile (windows) usage.") + metric.SetUnit("pages") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(false) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var swapPagingDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.swap.paging_ops") - descriptor.SetDescription("The number of paging operations.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var swapPagingDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.swap.paging_ops") + metric.SetDescription("The number of paging operations.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() -var swapPageFaultsDescriptor = func() dataold.MetricDescriptor { - descriptor := dataold.NewMetricDescriptor() - descriptor.InitEmpty() - descriptor.SetName("system.swap.page_faults") - descriptor.SetDescription("The number of page faults.") - descriptor.SetUnit("1") - descriptor.SetType(dataold.MetricTypeMonotonicInt64) - return descriptor +var swapPageFaultsDescriptor = func() pdata.Metric { + metric := pdata.NewMetric() + metric.InitEmpty() + metric.SetName("system.swap.page_faults") + metric.SetDescription("The number of page faults.") + metric.SetUnit("1") + metric.SetDataType(pdata.MetricDataTypeIntSum) + sum := metric.IntSum() + sum.InitEmpty() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pdata.AggregationTemporalityCumulative) + return metric }() diff --git a/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others.go index 40230ff3437..8106df9603d 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_others.go @@ -25,7 +25,6 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -62,8 +61,8 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(_ context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() var errors []error @@ -80,7 +79,7 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) return metrics, componenterror.CombineErrors(errors) } -func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics dataold.MetricSlice) error { +func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics pdata.MetricSlice) error { now := internal.TimeToUnixNano(time.Now()) vmem, err := s.virtualMemory() if err != nil { @@ -93,24 +92,24 @@ func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics dataold.MetricSlice) er return nil } -func initializeSwapUsageMetric(metric dataold.Metric, now pdata.TimestampUnixNano, vmem *mem.VirtualMemoryStat) { - swapUsageDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeSwapUsageMetric(metric pdata.Metric, now pdata.TimestampUnixNano, vmem *mem.VirtualMemoryStat) { + swapUsageDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(3) initializeSwapUsageDataPoint(idps.At(0), now, usedLabelValue, int64(vmem.SwapTotal-vmem.SwapFree-vmem.SwapCached)) initializeSwapUsageDataPoint(idps.At(1), now, freeLabelValue, int64(vmem.SwapFree)) initializeSwapUsageDataPoint(idps.At(2), now, cachedLabelValue, int64(vmem.SwapCached)) } -func initializeSwapUsageDataPoint(dataPoint dataold.Int64DataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { +func initializeSwapUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, stateLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(stateLabelName, stateLabel) dataPoint.SetTimestamp(now) dataPoint.SetValue(value) } -func (s *scraper) scrapeAndAppendPagingMetrics(metrics dataold.MetricSlice) error { +func (s *scraper) scrapeAndAppendPagingMetrics(metrics pdata.MetricSlice) error { now := internal.TimeToUnixNano(time.Now()) swap, err := s.swapMemory() if err != nil { @@ -124,10 +123,10 @@ func (s *scraper) scrapeAndAppendPagingMetrics(metrics dataold.MetricSlice) erro return nil } -func initializePagingMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, swap *mem.SwapMemoryStat) { - swapPagingDescriptor.CopyTo(metric.MetricDescriptor()) +func initializePagingMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, swap *mem.SwapMemoryStat) { + swapPagingDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(4) initializePagingDataPoint(idps.At(0), startTime, now, majorTypeLabelValue, inDirectionLabelValue, int64(swap.Sin)) initializePagingDataPoint(idps.At(1), startTime, now, majorTypeLabelValue, outDirectionLabelValue, int64(swap.Sout)) @@ -135,7 +134,7 @@ func initializePagingMetric(metric dataold.Metric, startTime, now pdata.Timestam initializePagingDataPoint(idps.At(3), startTime, now, minorTypeLabelValue, outDirectionLabelValue, int64(swap.PgOut)) } -func initializePagingDataPoint(dataPoint dataold.Int64DataPoint, startTime, now pdata.TimestampUnixNano, typeLabel string, directionLabel string, value int64) { +func initializePagingDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, typeLabel string, directionLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(typeLabelName, typeLabel) labelsMap.Insert(directionLabelName, directionLabel) @@ -144,16 +143,16 @@ func initializePagingDataPoint(dataPoint dataold.Int64DataPoint, startTime, now dataPoint.SetValue(value) } -func initializePageFaultsMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, swap *mem.SwapMemoryStat) { - swapPageFaultsDescriptor.CopyTo(metric.MetricDescriptor()) +func initializePageFaultsMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, swap *mem.SwapMemoryStat) { + swapPageFaultsDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(1) initializePageFaultDataPoint(idps.At(0), startTime, now, minorTypeLabelValue, int64(swap.PgFault)) // TODO add swap.PgMajFault once available in gopsutil } -func initializePageFaultDataPoint(dataPoint dataold.Int64DataPoint, startTime, now pdata.TimestampUnixNano, typeLabel string, value int64) { +func initializePageFaultDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, typeLabel string, value int64) { dataPoint.LabelsMap().Insert(typeLabelName, typeLabel) dataPoint.SetStartTime(startTime) dataPoint.SetTimestamp(now) diff --git a/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_test.go index 77ddcbfd0df..fdbb438b708 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" ) @@ -53,11 +52,11 @@ func TestScrapeMetrics(t *testing.T) { internal.AssertSameTimeStampForMetrics(t, metrics, 1, metrics.Len()) } -func assertSwapUsageMetricValid(t *testing.T, hostSwapUsageMetric dataold.Metric) { - internal.AssertDescriptorEqual(t, swapUsageDescriptor, hostSwapUsageMetric.MetricDescriptor()) +func assertSwapUsageMetricValid(t *testing.T, hostSwapUsageMetric pdata.Metric) { + internal.AssertDescriptorEqual(t, swapUsageDescriptor, hostSwapUsageMetric) // it's valid for a system to have no swap space / paging file, so if no data points were returned, do no validation - if hostSwapUsageMetric.Int64DataPoints().Len() == 0 { + if hostSwapUsageMetric.IntSum().DataPoints().Len() == 0 { return } @@ -68,24 +67,24 @@ func assertSwapUsageMetricValid(t *testing.T, hostSwapUsageMetric dataold.Metric expectedDataPoints = 2 } - assert.GreaterOrEqual(t, hostSwapUsageMetric.Int64DataPoints().Len(), expectedDataPoints) - internal.AssertInt64MetricLabelHasValue(t, hostSwapUsageMetric, 0, stateLabelName, usedLabelValue) - internal.AssertInt64MetricLabelHasValue(t, hostSwapUsageMetric, 1, stateLabelName, freeLabelValue) + assert.GreaterOrEqual(t, hostSwapUsageMetric.IntSum().DataPoints().Len(), expectedDataPoints) + internal.AssertIntSumMetricLabelHasValue(t, hostSwapUsageMetric, 0, stateLabelName, usedLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, hostSwapUsageMetric, 1, stateLabelName, freeLabelValue) // on non-windows, also expect a cached state label if runtime.GOOS != "windows" { - internal.AssertInt64MetricLabelHasValue(t, hostSwapUsageMetric, 2, stateLabelName, cachedLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, hostSwapUsageMetric, 2, stateLabelName, cachedLabelValue) } // on windows, also expect the page file device name label if runtime.GOOS == "windows" { - internal.AssertInt64MetricLabelExists(t, hostSwapUsageMetric, 0, deviceLabelName) - internal.AssertInt64MetricLabelExists(t, hostSwapUsageMetric, 1, deviceLabelName) + internal.AssertIntSumMetricLabelExists(t, hostSwapUsageMetric, 0, deviceLabelName) + internal.AssertIntSumMetricLabelExists(t, hostSwapUsageMetric, 1, deviceLabelName) } } -func assertPagingMetricValid(t *testing.T, pagingMetric dataold.Metric, startTime pdata.TimestampUnixNano) { - internal.AssertDescriptorEqual(t, swapPagingDescriptor, pagingMetric.MetricDescriptor()) +func assertPagingMetricValid(t *testing.T, pagingMetric pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, swapPagingDescriptor, pagingMetric) if startTime != 0 { - internal.AssertInt64MetricStartTimeEquals(t, pagingMetric, startTime) + internal.AssertIntSumMetricStartTimeEquals(t, pagingMetric, startTime) } // expect an in & out datapoint, for both major and minor paging types (windows does not currently support minor paging data) @@ -93,26 +92,26 @@ func assertPagingMetricValid(t *testing.T, pagingMetric dataold.Metric, startTim if runtime.GOOS == "windows" { expectedDataPoints = 2 } - assert.Equal(t, expectedDataPoints, pagingMetric.Int64DataPoints().Len()) + assert.Equal(t, expectedDataPoints, pagingMetric.IntSum().DataPoints().Len()) - internal.AssertInt64MetricLabelHasValue(t, pagingMetric, 0, typeLabelName, majorTypeLabelValue) - internal.AssertInt64MetricLabelHasValue(t, pagingMetric, 0, directionLabelName, inDirectionLabelValue) - internal.AssertInt64MetricLabelHasValue(t, pagingMetric, 1, typeLabelName, majorTypeLabelValue) - internal.AssertInt64MetricLabelHasValue(t, pagingMetric, 1, directionLabelName, outDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 0, typeLabelName, majorTypeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 0, directionLabelName, inDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 1, typeLabelName, majorTypeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 1, directionLabelName, outDirectionLabelValue) if runtime.GOOS != "windows" { - internal.AssertInt64MetricLabelHasValue(t, pagingMetric, 2, typeLabelName, minorTypeLabelValue) - internal.AssertInt64MetricLabelHasValue(t, pagingMetric, 2, directionLabelName, inDirectionLabelValue) - internal.AssertInt64MetricLabelHasValue(t, pagingMetric, 3, typeLabelName, minorTypeLabelValue) - internal.AssertInt64MetricLabelHasValue(t, pagingMetric, 3, directionLabelName, outDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 2, typeLabelName, minorTypeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 2, directionLabelName, inDirectionLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 3, typeLabelName, minorTypeLabelValue) + internal.AssertIntSumMetricLabelHasValue(t, pagingMetric, 3, directionLabelName, outDirectionLabelValue) } } -func assertPageFaultsMetricValid(t *testing.T, pageFaultsMetric dataold.Metric, startTime pdata.TimestampUnixNano) { - internal.AssertDescriptorEqual(t, swapPageFaultsDescriptor, pageFaultsMetric.MetricDescriptor()) +func assertPageFaultsMetricValid(t *testing.T, pageFaultsMetric pdata.Metric, startTime pdata.TimestampUnixNano) { + internal.AssertDescriptorEqual(t, swapPageFaultsDescriptor, pageFaultsMetric) if startTime != 0 { - internal.AssertInt64MetricStartTimeEquals(t, pageFaultsMetric, startTime) + internal.AssertIntSumMetricStartTimeEquals(t, pageFaultsMetric, startTime) } - assert.Equal(t, 1, pageFaultsMetric.Int64DataPoints().Len()) - internal.AssertInt64MetricLabelHasValue(t, pageFaultsMetric, 0, typeLabelName, minorTypeLabelValue) + assert.Equal(t, 1, pageFaultsMetric.IntSum().DataPoints().Len()) + internal.AssertIntSumMetricLabelHasValue(t, pageFaultsMetric, 0, typeLabelName, minorTypeLabelValue) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows.go index 86b3594e407..88653367bc1 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/swapscraper/swap_scraper_windows.go @@ -23,7 +23,6 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal" "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/windows/pdh" ) @@ -75,7 +74,7 @@ func (s *scraper) Initialize(_ context.Context) error { } // Close -func (s *scraper) Close(_ context.Context) error { +func (s *scraper) Close(context.Context) error { var errors []error err := s.pageReadsPerSecCounter.Close() @@ -92,8 +91,8 @@ func (s *scraper) Close(_ context.Context) error { } // ScrapeMetrics -func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) { - metrics := dataold.NewMetricSlice() +func (s *scraper) ScrapeMetrics(context.Context) (pdata.MetricSlice, error) { + metrics := pdata.NewMetricSlice() var errors []error @@ -110,7 +109,7 @@ func (s *scraper) ScrapeMetrics(_ context.Context) (dataold.MetricSlice, error) return metrics, componenterror.CombineErrors(errors) } -func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics dataold.MetricSlice) error { +func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics pdata.MetricSlice) error { now := internal.TimeToUnixNano(time.Now()) pageFiles, err := s.pageFileStats() if err != nil { @@ -123,10 +122,10 @@ func (s *scraper) scrapeAndAppendSwapUsageMetric(metrics dataold.MetricSlice) er return nil } -func initializeSwapUsageMetric(metric dataold.Metric, now pdata.TimestampUnixNano, pageFiles []*pageFileData) { - swapUsageDescriptor.CopyTo(metric.MetricDescriptor()) +func initializeSwapUsageMetric(metric pdata.Metric, now pdata.TimestampUnixNano, pageFiles []*pageFileData) { + swapUsageDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2 * len(pageFiles)) idx := 0 @@ -137,7 +136,7 @@ func initializeSwapUsageMetric(metric dataold.Metric, now pdata.TimestampUnixNan } } -func initializeSwapUsageDataPoint(dataPoint dataold.Int64DataPoint, now pdata.TimestampUnixNano, deviceLabel string, stateLabel string, value int64) { +func initializeSwapUsageDataPoint(dataPoint pdata.IntDataPoint, now pdata.TimestampUnixNano, deviceLabel string, stateLabel string, value int64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(deviceLabelName, deviceLabel) labelsMap.Insert(stateLabelName, stateLabel) @@ -145,7 +144,7 @@ func initializeSwapUsageDataPoint(dataPoint dataold.Int64DataPoint, now pdata.Ti dataPoint.SetValue(value) } -func (s *scraper) scrapeAndAppendPagingMetric(metrics dataold.MetricSlice) error { +func (s *scraper) scrapeAndAppendPagingMetric(metrics pdata.MetricSlice) error { now := time.Now() durationSinceLastScraped := now.Sub(s.prevPagingScrapeTime).Seconds() s.prevPagingScrapeTime = now @@ -170,16 +169,16 @@ func (s *scraper) scrapeAndAppendPagingMetric(metrics dataold.MetricSlice) error return nil } -func initializePagingMetric(metric dataold.Metric, startTime, now pdata.TimestampUnixNano, reads float64, writes float64) { - swapPagingDescriptor.CopyTo(metric.MetricDescriptor()) +func initializePagingMetric(metric pdata.Metric, startTime, now pdata.TimestampUnixNano, reads float64, writes float64) { + swapPagingDescriptor.CopyTo(metric) - idps := metric.Int64DataPoints() + idps := metric.IntSum().DataPoints() idps.Resize(2) initializePagingDataPoint(idps.At(0), startTime, now, inDirectionLabelValue, reads) initializePagingDataPoint(idps.At(1), startTime, now, outDirectionLabelValue, writes) } -func initializePagingDataPoint(dataPoint dataold.Int64DataPoint, startTime, now pdata.TimestampUnixNano, directionLabel string, value float64) { +func initializePagingDataPoint(dataPoint pdata.IntDataPoint, startTime, now pdata.TimestampUnixNano, directionLabel string, value float64) { labelsMap := dataPoint.LabelsMap() labelsMap.Insert(typeLabelName, majorTypeLabelValue) labelsMap.Insert(directionLabelName, directionLabel) diff --git a/receiver/hostmetricsreceiver/internal/testutils.go b/receiver/hostmetricsreceiver/internal/testutils.go index 61053ef256c..d57ae29f218 100644 --- a/receiver/hostmetricsreceiver/internal/testutils.go +++ b/receiver/hostmetricsreceiver/internal/testutils.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" ) func AssertContainsAttribute(t *testing.T, attr pdata.AttributeMap, key string) { @@ -29,72 +28,77 @@ func AssertContainsAttribute(t *testing.T, attr pdata.AttributeMap, key string) assert.True(t, ok) } -func AssertDescriptorEqual(t *testing.T, expected dataold.MetricDescriptor, actual dataold.MetricDescriptor) { +func AssertDescriptorEqual(t *testing.T, expected pdata.Metric, actual pdata.Metric) { assert.Equal(t, expected.Name(), actual.Name()) assert.Equal(t, expected.Description(), actual.Description()) assert.Equal(t, expected.Unit(), actual.Unit()) - assert.Equal(t, expected.Type(), actual.Type()) + assert.Equal(t, expected.DataType(), actual.DataType()) } -func AssertInt64MetricLabelHasValue(t *testing.T, metric dataold.Metric, index int, labelName string, expectedVal string) { - val, ok := metric.Int64DataPoints().At(index).LabelsMap().Get(labelName) - assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.MetricDescriptor().Name()) +func AssertIntSumMetricLabelHasValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal string) { + val, ok := metric.IntSum().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) assert.Equal(t, expectedVal, val.Value()) } -func AssertDoubleMetricLabelHasValue(t *testing.T, metric dataold.Metric, index int, labelName string, expectedVal string) { - val, ok := metric.DoubleDataPoints().At(index).LabelsMap().Get(labelName) - assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.MetricDescriptor().Name()) +func AssertDoubleSumMetricLabelHasValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal string) { + val, ok := metric.DoubleSum().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) assert.Equal(t, expectedVal, val.Value()) } -func AssertInt64MetricLabelExists(t *testing.T, metric dataold.Metric, index int, labelName string) { - _, ok := metric.Int64DataPoints().At(index).LabelsMap().Get(labelName) - assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.MetricDescriptor().Name()) +func AssertIntSumMetricLabelExists(t *testing.T, metric pdata.Metric, index int, labelName string) { + _, ok := metric.IntSum().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) } -func AssertDoubleMetricLabelExists(t *testing.T, metric dataold.Metric, index int, labelName string) { - _, ok := metric.DoubleDataPoints().At(index).LabelsMap().Get(labelName) - assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.MetricDescriptor().Name()) +func AssertDoubleSumMetricLabelExists(t *testing.T, metric pdata.Metric, index int, labelName string) { + _, ok := metric.DoubleSum().DataPoints().At(index).LabelsMap().Get(labelName) + assert.Truef(t, ok, "Missing label %q in metric %q", labelName, metric.Name()) } -func AssertInt64MetricStartTimeEquals(t *testing.T, metric dataold.Metric, startTime pdata.TimestampUnixNano) { - idps := metric.Int64DataPoints() +func AssertIntSumMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.TimestampUnixNano) { + idps := metric.IntSum().DataPoints() for i := 0; i < idps.Len(); i++ { require.Equal(t, startTime, idps.At(i).StartTime()) } } -func AssertDoubleMetricStartTimeEquals(t *testing.T, metric dataold.Metric, startTime pdata.TimestampUnixNano) { - ddps := metric.DoubleDataPoints() +func AssertDoubleSumMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.TimestampUnixNano) { + ddps := metric.DoubleSum().DataPoints() for i := 0; i < ddps.Len(); i++ { require.Equal(t, startTime, ddps.At(i).StartTime()) } } -func AssertSameTimeStampForAllMetrics(t *testing.T, metrics dataold.MetricSlice) { +func AssertSameTimeStampForAllMetrics(t *testing.T, metrics pdata.MetricSlice) { AssertSameTimeStampForMetrics(t, metrics, 0, metrics.Len()) } -func AssertSameTimeStampForMetrics(t *testing.T, metrics dataold.MetricSlice, startIdx, endIdx int) { +func AssertSameTimeStampForMetrics(t *testing.T, metrics pdata.MetricSlice, startIdx, endIdx int) { var ts pdata.TimestampUnixNano for i := startIdx; i < endIdx; i++ { metric := metrics.At(i) - idps := metric.Int64DataPoints() - for j := 0; j < idps.Len(); j++ { - if ts == 0 { - ts = idps.At(j).Timestamp() + dt := metric.DataType() + if dt == pdata.MetricDataTypeIntSum { + idps := metric.IntSum().DataPoints() + for j := 0; j < idps.Len(); j++ { + if ts == 0 { + ts = idps.At(j).Timestamp() + } + require.Equalf(t, ts, idps.At(j).Timestamp(), "metrics contained different end timestamp values") } - require.Equalf(t, ts, idps.At(j).Timestamp(), "metrics contained different end timestamp values") } - ddps := metric.DoubleDataPoints() - for j := 0; j < ddps.Len(); j++ { - if ts == 0 { - ts = ddps.At(j).Timestamp() + if dt == pdata.MetricDataTypeDoubleSum { + ddps := metric.DoubleSum().DataPoints() + for j := 0; j < ddps.Len(); j++ { + if ts == 0 { + ts = ddps.At(j).Timestamp() + } + require.Equalf(t, ts, ddps.At(j).Timestamp(), "metrics contained different end timestamp values") } - require.Equalf(t, ts, ddps.At(j).Timestamp(), "metrics contained different end timestamp values") } } } diff --git a/receiver/hostmetricsreceiver/internal/utils.go b/receiver/hostmetricsreceiver/internal/utils.go index e834643c523..a563370fecb 100644 --- a/receiver/hostmetricsreceiver/internal/utils.go +++ b/receiver/hostmetricsreceiver/internal/utils.go @@ -18,11 +18,11 @@ import ( "time" "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" + "go.opentelemetry.io/collector/internal/data" ) // Initializes a metric with a metric slice and returns it. -func InitializeMetricSlice(metricData dataold.MetricData) dataold.MetricSlice { +func InitializeMetricSlice(metricData data.MetricData) pdata.MetricSlice { rms := metricData.ResourceMetrics() rms.Resize(1) rm := rms.At(0) diff --git a/receiver/hostmetricsreceiver/internal/windows/pdh/performance_counter_utils.go b/receiver/hostmetricsreceiver/internal/windows/pdh/performance_counter_utils.go deleted file mode 100644 index e3a68c62dfa..00000000000 --- a/receiver/hostmetricsreceiver/internal/windows/pdh/performance_counter_utils.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package pdh - -import ( - "time" - - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" - "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/third_party/telegraf/win_perf_counters" -) - -// InitializeMetric initializes the provided metric with -// datapoints from the specified Counter Values. -// -// The performance counters' "instance" will be recorded -// against the supplied label name -func InitializeMetric( - metric dataold.Metric, - vals []win_perf_counters.CounterValue, - instanceNameLabel string, -) dataold.Metric { - ddps := metric.DoubleDataPoints() - ddps.Resize(len(vals)) - - for i, val := range vals { - ddp := ddps.At(i) - - if len(vals) > 1 || (val.InstanceName != "" && val.InstanceName != totalInstanceName) { - labels := ddp.LabelsMap() - labels.Insert(instanceNameLabel, val.InstanceName) - } - - ddp.SetTimestamp(pdata.TimestampUnixNano(uint64(time.Now().UnixNano()))) - ddp.SetValue(val.Value) - } - - return metric -} diff --git a/receiver/hostmetricsreceiver/internal/windows/pdh/performance_counter_utils_test.go b/receiver/hostmetricsreceiver/internal/windows/pdh/performance_counter_utils_test.go deleted file mode 100644 index cc0eb0847d9..00000000000 --- a/receiver/hostmetricsreceiver/internal/windows/pdh/performance_counter_utils_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package pdh - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "go.opentelemetry.io/collector/consumer/pdata" - "go.opentelemetry.io/collector/internal/dataold" - "go.opentelemetry.io/collector/receiver/hostmetricsreceiver/internal/third_party/telegraf/win_perf_counters" -) - -func TestPerfCounter_InitializeMetric_NoLabels(t *testing.T) { - data := []win_perf_counters.CounterValue{{InstanceName: "_Total", Value: 100}} - - metric := dataold.NewMetric() - metric.InitEmpty() - InitializeMetric(metric, data, "") - - ddp := metric.DoubleDataPoints() - assert.Equal(t, 1, ddp.Len()) - assert.Equal(t, 0, ddp.At(0).LabelsMap().Len()) - assert.Equal(t, float64(100), ddp.At(0).Value()) -} - -func TestPerfCounter_InitializeMetric_Labels(t *testing.T) { - data := []win_perf_counters.CounterValue{{InstanceName: "label_value_1", Value: 20}, {InstanceName: "label_value_2", Value: 50}} - - metric := dataold.NewMetric() - metric.InitEmpty() - InitializeMetric(metric, data, "label") - - ddp := metric.DoubleDataPoints() - assert.Equal(t, 2, ddp.Len()) - assert.Equal(t, pdata.NewStringMap().InitFromMap(map[string]string{"label": "label_value_1"}), ddp.At(0).LabelsMap().Sort()) - assert.Equal(t, float64(20), ddp.At(0).Value()) - assert.Equal(t, pdata.NewStringMap().InitFromMap(map[string]string{"label": "label_value_2"}), ddp.At(1).LabelsMap().Sort()) - assert.Equal(t, float64(50), ddp.At(1).Value()) -}