From 58c4fb23e879473867d47e59edf954acd5f2bdcc Mon Sep 17 00:00:00 2001 From: Dmitry Date: Thu, 16 Dec 2021 12:04:22 -0800 Subject: [PATCH] [receiver/hostmetrics] Migrate disk scraper to the new metrics builder --- receiver/hostmetricsreceiver/config_test.go | 2 +- .../hostmetrics_receiver_test.go | 2 +- .../internal/scraper/diskscraper/codegen.go | 2 +- .../internal/scraper/diskscraper/config.go | 4 + .../diskscraper/disk_scraper_others.go | 72 +-- .../disk_scraper_others_fallback.go | 2 +- .../diskscraper/disk_scraper_others_linux.go | 26 +- .../scraper/diskscraper/disk_scraper_test.go | 96 ++-- .../diskscraper/disk_scraper_windows.go | 65 +-- .../internal/scraper/diskscraper/factory.go | 5 +- .../internal/metadata/generated_metrics.go | 201 -------- .../internal/metadata/generated_metrics_v2.go | 435 ++++++++++++++++++ .../scraper/diskscraper/metadata.yaml | 7 + .../internal/scraper/diskscraper/utils.go | 43 -- 14 files changed, 569 insertions(+), 393 deletions(-) delete mode 100644 receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go create mode 100644 receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go delete mode 100644 receiver/hostmetricsreceiver/internal/scraper/diskscraper/utils.go diff --git a/receiver/hostmetricsreceiver/config_test.go b/receiver/hostmetricsreceiver/config_test.go index 5adf0853c9ff..27487f2d981a 100644 --- a/receiver/hostmetricsreceiver/config_test.go +++ b/receiver/hostmetricsreceiver/config_test.go @@ -68,7 +68,7 @@ func TestLoadConfig(t *testing.T) { }, Scrapers: map[string]internal.Config{ cpuscraper.TypeStr: (&cpuscraper.Factory{}).CreateDefaultConfig(), - diskscraper.TypeStr: &diskscraper.Config{}, + diskscraper.TypeStr: (&diskscraper.Factory{}).CreateDefaultConfig(), loadscraper.TypeStr: &loadscraper.Config{}, filesystemscraper.TypeStr: &filesystemscraper.Config{}, memoryscraper.TypeStr: &memoryscraper.Config{}, diff --git a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go index 6746cba1d3c1..92d9920d9912 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go +++ b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go @@ -106,7 +106,7 @@ func TestGatherMetrics_EndToEnd(t *testing.T) { }, Scrapers: map[string]internal.Config{ cpuscraper.TypeStr: scraperFactories[cpuscraper.TypeStr].CreateDefaultConfig(), - diskscraper.TypeStr: &diskscraper.Config{}, + diskscraper.TypeStr: scraperFactories[diskscraper.TypeStr].CreateDefaultConfig(), filesystemscraper.TypeStr: &filesystemscraper.Config{}, loadscraper.TypeStr: &loadscraper.Config{}, memoryscraper.TypeStr: &memoryscraper.Config{}, diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/codegen.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/codegen.go index b65d7c2b6d0d..cdad03fd21a1 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/codegen.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/codegen.go @@ -15,6 +15,6 @@ //go:build !windows // +build !windows -//go:generate mdatagen metadata.yaml +//go:generate mdatagen --experimental-gen metadata.yaml package diskscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper" diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go index 3c9d6aee8380..2dd448c57547 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/config.go @@ -17,12 +17,16 @@ package diskscraper // import "github.com/open-telemetry/opentelemetry-collector import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata" ) // Config relating to Disk Metric Scraper. type Config struct { internal.ConfigSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct + // Metrics allows to customize scraped metrics representation. + Metrics metadata.MetricsSettings `mapstructure:"metrics"` + // Include specifies a filter on the devices that should be included from the generated metrics. // Exclude specifies a filter on the devices that should be excluded from the generated metrics. // If neither `include` or `exclude` are set, metrics will be generated for all devices. diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go index fe41caf1ff0b..0c5694ccba35 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go @@ -41,6 +41,7 @@ const ( type scraper struct { config *Config startTime pdata.Timestamp + mb *metadata.MetricsBuilder includeFS filterset.FilterSet excludeFS filterset.FilterSet @@ -79,6 +80,7 @@ func (s *scraper) start(context.Context, component.Host) error { } s.startTime = pdata.Timestamp(bootTime * 1e9) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(s.startTime)) return nil } @@ -97,81 +99,51 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { if len(ioCounters) > 0 { metrics.EnsureCapacity(metricsLen) - initializeDiskIOMetric(metrics.AppendEmpty(), s.startTime, now, ioCounters) - initializeDiskOperationsMetric(metrics.AppendEmpty(), s.startTime, now, ioCounters) - initializeDiskIOTimeMetric(metrics.AppendEmpty(), s.startTime, now, ioCounters) - initializeDiskOperationTimeMetric(metrics.AppendEmpty(), s.startTime, now, ioCounters) - initializeDiskPendingOperationsMetric(metrics.AppendEmpty(), now, ioCounters) - appendSystemSpecificMetrics(metrics, s.startTime, now, ioCounters) + s.recordDiskIOMetric(now, ioCounters) + s.recordDiskOperationsMetric(now, ioCounters) + s.recordDiskIOTimeMetric(now, ioCounters) + s.recordDiskOperationTimeMetric(now, ioCounters) + s.recordDiskPendingOperationsMetric(now, ioCounters) + s.recordSystemSpecificDataPoints(now, ioCounters) + s.mb.Emit(metrics) } return md, nil } -func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - metadata.Metrics.SystemDiskIo.Init(metric) - - idps := metric.Sum().DataPoints() - idps.EnsureCapacity(2 * len(ioCounters)) - +func (s *scraper) recordDiskIOMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, device, metadata.AttributeDirection.Read, int64(ioCounter.ReadBytes)) - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, device, metadata.AttributeDirection.Write, int64(ioCounter.WriteBytes)) + s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.ReadBytes), device, metadata.AttributeDirection.Read) + s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.WriteBytes), device, metadata.AttributeDirection.Write) } } -func initializeDiskOperationsMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - metadata.Metrics.SystemDiskOperations.Init(metric) - - idps := metric.Sum().DataPoints() - idps.EnsureCapacity(2 * len(ioCounters)) - +func (s *scraper) recordDiskOperationsMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, device, metadata.AttributeDirection.Read, int64(ioCounter.ReadCount)) - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, device, metadata.AttributeDirection.Write, int64(ioCounter.WriteCount)) + s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.ReadCount), device, metadata.AttributeDirection.Read) + s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.WriteCount), device, metadata.AttributeDirection.Write) } } -func initializeDiskIOTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - metadata.Metrics.SystemDiskIoTime.Init(metric) - - ddps := metric.Sum().DataPoints() - ddps.EnsureCapacity(len(ioCounters)) - +func (s *scraper) recordDiskIOTimeMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { - initializeNumberDataPointAsDouble(ddps.AppendEmpty(), startTime, now, device, "", float64(ioCounter.IoTime)/1e3) + s.mb.RecordSystemDiskIoTimeDataPoint(now, float64(ioCounter.IoTime)/1e3, device) } } -func initializeDiskOperationTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - metadata.Metrics.SystemDiskOperationTime.Init(metric) - - ddps := metric.Sum().DataPoints() - ddps.EnsureCapacity(2 * len(ioCounters)) - +func (s *scraper) recordDiskOperationTimeMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { - initializeNumberDataPointAsDouble(ddps.AppendEmpty(), startTime, now, device, metadata.AttributeDirection.Read, float64(ioCounter.ReadTime)/1e3) - initializeNumberDataPointAsDouble(ddps.AppendEmpty(), startTime, now, device, metadata.AttributeDirection.Write, float64(ioCounter.WriteTime)/1e3) + s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.ReadTime)/1e3, device, metadata.AttributeDirection.Read) + s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.WriteTime)/1e3, device, metadata.AttributeDirection.Write) } } -func initializeDiskPendingOperationsMetric(metric pdata.Metric, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - metadata.Metrics.SystemDiskPendingOperations.Init(metric) - - idps := metric.Sum().DataPoints() - idps.EnsureCapacity(len(ioCounters)) - +func (s *scraper) recordDiskPendingOperationsMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { - initializeDiskPendingDataPoint(idps.AppendEmpty(), now, device, int64(ioCounter.IopsInProgress)) + s.mb.RecordSystemDiskPendingOperationsDataPoint(now, int64(ioCounter.IopsInProgress), device) } } -func initializeDiskPendingDataPoint(dataPoint pdata.NumberDataPoint, now pdata.Timestamp, deviceLabel string, value int64) { - dataPoint.Attributes().InsertString(metadata.Attributes.Device, deviceLabel) - dataPoint.SetTimestamp(now) - dataPoint.SetIntVal(value) -} - func (s *scraper) filterByDevice(ioCounters map[string]disk.IOCountersStat) map[string]disk.IOCountersStat { if s.includeFS == nil && s.excludeFS == nil { return ioCounters diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go index e1f6d92348e5..a248e2acfdc2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go @@ -24,5 +24,5 @@ import ( const systemSpecificMetricsLen = 0 -func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordSystemSpecificDataPoints(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go index 9313c8c3f1a3..4aafedeaa716 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go @@ -26,30 +26,20 @@ import ( const systemSpecificMetricsLen = 2 -func appendSystemSpecificMetrics(metrics pdata.MetricSlice, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - initializeDiskWeightedIOTimeMetric(metrics.AppendEmpty(), startTime, now, ioCounters) - initializeDiskMergedMetric(metrics.AppendEmpty(), startTime, now, ioCounters) +func (s *scraper) recordSystemSpecificDataPoints(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { + s.recordDiskWeightedIOTimeMetric(now, ioCounters) + s.recordDiskMergedMetric(now, ioCounters) } -func initializeDiskWeightedIOTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - metadata.Metrics.SystemDiskWeightedIoTime.Init(metric) - - ddps := metric.Sum().DataPoints() - ddps.EnsureCapacity(len(ioCounters)) - +func (s *scraper) recordDiskWeightedIOTimeMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { - initializeNumberDataPointAsDouble(ddps.AppendEmpty(), startTime, now, device, "", float64(ioCounter.WeightedIO)/1e3) + s.mb.RecordSystemDiskWeightedIoTimeDataPoint(now, float64(ioCounter.WeightedIO)/1e3, device) } } -func initializeDiskMergedMetric(metric pdata.Metric, startTime, now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { - metadata.Metrics.SystemDiskMerged.Init(metric) - - idps := metric.Sum().DataPoints() - idps.EnsureCapacity(2 * len(ioCounters)) - +func (s *scraper) recordDiskMergedMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, device, metadata.AttributeDirection.Read, int64(ioCounter.MergedReadCount)) - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, device, metadata.AttributeDirection.Write, int64(ioCounter.MergedWriteCount)) + s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedReadCount), device, metadata.AttributeDirection.Read) + s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedWriteCount), device, metadata.AttributeDirection.Write) } } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go index a4672716d423..242006b2e350 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go @@ -17,7 +17,6 @@ package diskscraper import ( "context" "errors" - "runtime" "testing" "github.com/stretchr/testify/assert" @@ -37,41 +36,63 @@ func TestScrape(t *testing.T) { bootTimeFunc func() (uint64, error) newErrRegex string initializationErr string - expectMetrics bool + expectMetrics int expectedStartTime pdata.Timestamp } testCases := []testCase{ { name: "Standard", - expectMetrics: true, + config: Config{Metrics: metadata.DefaultMetricsSettings()}, + expectMetrics: metricsLen, }, { name: "Validate Start Time", + config: Config{Metrics: metadata.DefaultMetricsSettings()}, bootTimeFunc: func() (uint64, error) { return 100, nil }, - expectMetrics: true, + expectMetrics: metricsLen, expectedStartTime: 100 * 1e9, }, { name: "Boot Time Error", + config: Config{Metrics: metadata.DefaultMetricsSettings()}, bootTimeFunc: func() (uint64, error) { return 0, errors.New("err1") }, initializationErr: "err1", + expectMetrics: metricsLen, }, { - name: "Include Filter that matches nothing", - config: Config{Include: MatchConfig{filterset.Config{MatchType: "strict"}, []string{"@*^#&*$^#)"}}}, - expectMetrics: false, + name: "Include Filter that matches nothing", + config: Config{ + Metrics: metadata.DefaultMetricsSettings(), + Include: MatchConfig{filterset.Config{MatchType: "strict"}, []string{"@*^#&*$^#)"}}, + }, + expectMetrics: 0, }, { - name: "Invalid Include Filter", - config: Config{Include: MatchConfig{Devices: []string{"test"}}}, + name: "Invalid Include Filter", + config: Config{ + Metrics: metadata.DefaultMetricsSettings(), + Include: MatchConfig{Devices: []string{"test"}}, + }, newErrRegex: "^error creating device include filters:", }, { - name: "Invalid Exclude Filter", - config: Config{Exclude: MatchConfig{Devices: []string{"test"}}}, + name: "Invalid Exclude Filter", + config: Config{ + Metrics: metadata.DefaultMetricsSettings(), + Exclude: MatchConfig{Devices: []string{"test"}}, + }, newErrRegex: "^error creating device exclude filters:", }, + { + name: "Disable one metric", + config: (func() Config { + config := Config{Metrics: metadata.DefaultMetricsSettings()} + config.Metrics.SystemDiskIo.Enabled = false + return config + })(), + expectMetrics: metricsLen - 1, + }, } for _, test := range testCases { @@ -98,23 +119,37 @@ func TestScrape(t *testing.T) { md, err := scraper.scrape(context.Background()) require.NoError(t, err, "Failed to scrape metrics: %v", err) - if !test.expectMetrics { - assert.Equal(t, 0, md.MetricCount()) - return - } - + assert.Equal(t, test.expectMetrics, md.MetricCount()) metrics := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() - assert.Equal(t, metricsLen, metrics.Len()) - - assertInt64DiskMetricValid(t, metrics.At(0), metadata.Metrics.SystemDiskIo.New(), test.expectedStartTime) - assertInt64DiskMetricValid(t, metrics.At(1), metadata.Metrics.SystemDiskOperations.New(), test.expectedStartTime) - assertDoubleDiskMetricValid(t, metrics.At(2), metadata.Metrics.SystemDiskIoTime.New(), false, test.expectedStartTime) - assertDoubleDiskMetricValid(t, metrics.At(3), metadata.Metrics.SystemDiskOperationTime.New(), true, test.expectedStartTime) - assertDiskPendingOperationsMetricValid(t, metrics.At(4)) - - if runtime.GOOS == "linux" { - assertDoubleDiskMetricValid(t, metrics.At(5), metadata.Metrics.SystemDiskWeightedIoTime.New(), false, test.expectedStartTime) - assertInt64DiskMetricValid(t, metrics.At(6), metadata.Metrics.SystemDiskMerged.New(), test.expectedStartTime) + assert.Equal(t, test.expectMetrics, metrics.Len()) + + reportedMetricsCount := map[string]int{} + for i := 0; i < metrics.Len(); i++ { + metric := metrics.At(i) + reportedMetricsCount[metric.Name()]++ + switch metric.Name() { + case "system.disk.io": + assertInt64DiskMetricValid(t, metric, test.expectedStartTime) + case "system.disk.io_time": + assertDoubleDiskMetricValid(t, metric, false, test.expectedStartTime) + case "system.disk.operation_time": + assertDoubleDiskMetricValid(t, metric, true, test.expectedStartTime) + case "system.disk.operations": + assertInt64DiskMetricValid(t, metric, test.expectedStartTime) + case "system.disk.weighted.io.time": + assertDoubleDiskMetricValid(t, metric, false, test.expectedStartTime) + case "system.disk.merged": + assertInt64DiskMetricValid(t, metric, test.expectedStartTime) + case "system.disk.pending_operations": + assertDiskPendingOperationsMetricValid(t, metric) + case "system.disk.weighted_io_time": + assertDoubleDiskMetricValid(t, metric, false, test.expectedStartTime) + default: + assert.Failf(t, "unexpected-metric", "metric %q is not expected", metric.Name()) + } + } + for m, c := range reportedMetricsCount { + assert.Equal(t, 1, c, "metric %q reported %d times", m, c) } internal.AssertSameTimeStampForAllMetrics(t, metrics) @@ -122,8 +157,7 @@ func TestScrape(t *testing.T) { } } -func assertInt64DiskMetricValid(t *testing.T, metric pdata.Metric, expectedDescriptor pdata.Metric, startTime pdata.Timestamp) { - internal.AssertDescriptorEqual(t, expectedDescriptor, metric) +func assertInt64DiskMetricValid(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { if startTime != 0 { internal.AssertSumMetricStartTimeEquals(t, metric, startTime) } @@ -135,8 +169,7 @@ func assertInt64DiskMetricValid(t *testing.T, metric pdata.Metric, expectedDescr internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction", pdata.NewAttributeValueString(metadata.AttributeDirection.Write)) } -func assertDoubleDiskMetricValid(t *testing.T, metric pdata.Metric, expectedDescriptor pdata.Metric, expectDirectionLabels bool, startTime pdata.Timestamp) { - internal.AssertDescriptorEqual(t, expectedDescriptor, metric) +func assertDoubleDiskMetricValid(t *testing.T, metric pdata.Metric, expectDirectionLabels bool, startTime pdata.Timestamp) { if startTime != 0 { internal.AssertSumMetricStartTimeEquals(t, metric, startTime) } @@ -155,7 +188,6 @@ func assertDoubleDiskMetricValid(t *testing.T, metric pdata.Metric, expectedDesc } func assertDiskPendingOperationsMetricValid(t *testing.T, metric pdata.Metric) { - internal.AssertDescriptorEqual(t, metadata.Metrics.SystemDiskPendingOperations.New(), metric) assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 1) internal.AssertSumMetricHasAttribute(t, metric, 0, "device") } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go index ee0ce6927015..167c31aa46b8 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go @@ -52,6 +52,7 @@ const ( type scraper struct { config *Config startTime pdata.Timestamp + mb *metadata.MetricsBuilder includeFS filterset.FilterSet excludeFS filterset.FilterSet @@ -91,6 +92,7 @@ func (s *scraper) start(context.Context, component.Host) error { } s.startTime = pdata.Timestamp(bootTime * 1e9) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(s.startTime)) return s.perfCounterScraper.Initialize(logicalDisk) } @@ -121,72 +123,47 @@ func (s *scraper) scrape(ctx context.Context) (pdata.Metrics, error) { if len(logicalDiskCounterValues) > 0 { metrics.EnsureCapacity(metricsLen) - initializeDiskIOMetric(metrics.AppendEmpty(), s.startTime, now, logicalDiskCounterValues) - initializeDiskOperationsMetric(metrics.AppendEmpty(), s.startTime, now, logicalDiskCounterValues) - initializeDiskIOTimeMetric(metrics.AppendEmpty(), s.startTime, now, logicalDiskCounterValues) - initializeDiskOperationTimeMetric(metrics.AppendEmpty(), s.startTime, now, logicalDiskCounterValues) - initializeDiskPendingOperationsMetric(metrics.AppendEmpty(), now, logicalDiskCounterValues) + s.recordDiskIOMetric(now, logicalDiskCounterValues) + s.recordDiskOperationsMetric(now, logicalDiskCounterValues) + s.recordDiskIOTimeMetric(now, logicalDiskCounterValues) + s.recordDiskOperationTimeMetric(now, logicalDiskCounterValues) + s.recordDiskPendingOperationsMetric(now, logicalDiskCounterValues) + s.mb.Emit(metrics) } return md, nil } -func initializeDiskIOMetric(metric pdata.Metric, startTime, now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { - metadata.Metrics.SystemDiskIo.Init(metric) - - idps := metric.Sum().DataPoints() - idps.EnsureCapacity(2 * len(logicalDiskCounterValues)) +func (s *scraper) recordDiskIOMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read, logicalDiskCounter.Values[readBytesPerSec]) - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write, logicalDiskCounter.Values[writeBytesPerSec]) + s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[readBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read) + s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[writeBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write) } } -func initializeDiskOperationsMetric(metric pdata.Metric, startTime, now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { - metadata.Metrics.SystemDiskOperations.Init(metric) - - idps := metric.Sum().DataPoints() - idps.EnsureCapacity(2 * len(logicalDiskCounterValues)) +func (s *scraper) recordDiskOperationsMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read, logicalDiskCounter.Values[readsPerSec]) - initializeNumberDataPointAsInt(idps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write, logicalDiskCounter.Values[writesPerSec]) + s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[readsPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read) + s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[writesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write) } } -func initializeDiskIOTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { - metadata.Metrics.SystemDiskIoTime.Init(metric) - - ddps := metric.Sum().DataPoints() - ddps.EnsureCapacity(len(logicalDiskCounterValues)) +func (s *scraper) recordDiskIOTimeMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { // disk active time = system boot time - disk idle time - initializeNumberDataPointAsDouble(ddps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, "", float64(now-startTime)/1e9-float64(logicalDiskCounter.Values[idleTime])/1e7) + s.mb.RecordSystemDiskIoTimeDataPoint(now, float64(now-s.startTime)/1e9-float64(logicalDiskCounter.Values[idleTime])/1e7, logicalDiskCounter.InstanceName) } } -func initializeDiskOperationTimeMetric(metric pdata.Metric, startTime, now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { - metadata.Metrics.SystemDiskOperationTime.Init(metric) - - ddps := metric.Sum().DataPoints() - ddps.EnsureCapacity(2 * len(logicalDiskCounterValues)) +func (s *scraper) recordDiskOperationTimeMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { - initializeNumberDataPointAsDouble(ddps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7) - initializeNumberDataPointAsDouble(ddps.AppendEmpty(), startTime, now, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7) + s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read) + s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write) } } -func initializeDiskPendingOperationsMetric(metric pdata.Metric, now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { - metadata.Metrics.SystemDiskPendingOperations.Init(metric) - - idps := metric.Sum().DataPoints() - idps.EnsureCapacity(len(logicalDiskCounterValues)) +func (s *scraper) recordDiskPendingOperationsMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { - initializeDiskPendingDataPoint(idps.AppendEmpty(), now, logicalDiskCounter.InstanceName, logicalDiskCounter.Values[queueLength]) + s.mb.RecordSystemDiskPendingOperationsDataPoint(now, logicalDiskCounter.Values[queueLength], logicalDiskCounter.InstanceName) } } - -func initializeDiskPendingDataPoint(dataPoint pdata.NumberDataPoint, now pdata.Timestamp, deviceLabel string, value int64) { - dataPoint.Attributes().InsertString(metadata.Attributes.Device, deviceLabel) - dataPoint.SetTimestamp(now) - dataPoint.SetIntVal(value) -} diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go index 77ea0e3a4ddc..002534cfd971 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/factory.go @@ -21,6 +21,7 @@ import ( "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata" ) // This file implements Factory for Disk scraper. @@ -36,7 +37,9 @@ type Factory struct { // CreateDefaultConfig creates the default configuration for the Scraper. func (f *Factory) CreateDefaultConfig() internal.Config { - return &Config{} + return &Config{ + Metrics: metadata.DefaultMetricsSettings(), + } } // CreateMetricsScraper creates a scraper based on provided config. diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go deleted file mode 100644 index 19301f66aae7..000000000000 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" -) - -// Type is the component type name. -const Type config.Type = "disk" - -// MetricIntf is an interface to generically interact with generated metric. -type MetricIntf interface { - Name() string - New() pdata.Metric - Init(metric pdata.Metric) -} - -// Intentionally not exposing this so that it is opaque and can change freely. -type metricImpl struct { - name string - initFunc func(pdata.Metric) -} - -// Name returns the metric name. -func (m *metricImpl) Name() string { - return m.name -} - -// New creates a metric object preinitialized. -func (m *metricImpl) New() pdata.Metric { - metric := pdata.NewMetric() - m.Init(metric) - return metric -} - -// Init initializes the provided metric object. -func (m *metricImpl) Init(metric pdata.Metric) { - m.initFunc(metric) -} - -type metricStruct struct { - SystemDiskIo MetricIntf - SystemDiskIoTime MetricIntf - SystemDiskMerged MetricIntf - SystemDiskOperationTime MetricIntf - SystemDiskOperations MetricIntf - SystemDiskPendingOperations MetricIntf - SystemDiskWeightedIoTime MetricIntf -} - -// Names returns a list of all the metric name strings. -func (m *metricStruct) Names() []string { - return []string{ - "system.disk.io", - "system.disk.io_time", - "system.disk.merged", - "system.disk.operation_time", - "system.disk.operations", - "system.disk.pending_operations", - "system.disk.weighted_io_time", - } -} - -var metricsByName = map[string]MetricIntf{ - "system.disk.io": Metrics.SystemDiskIo, - "system.disk.io_time": Metrics.SystemDiskIoTime, - "system.disk.merged": Metrics.SystemDiskMerged, - "system.disk.operation_time": Metrics.SystemDiskOperationTime, - "system.disk.operations": Metrics.SystemDiskOperations, - "system.disk.pending_operations": Metrics.SystemDiskPendingOperations, - "system.disk.weighted_io_time": Metrics.SystemDiskWeightedIoTime, -} - -func (m *metricStruct) ByName(n string) MetricIntf { - return metricsByName[n] -} - -// Metrics contains a set of methods for each metric that help with -// manipulating those metrics. -var Metrics = &metricStruct{ - &metricImpl{ - "system.disk.io", - func(metric pdata.Metric) { - metric.SetName("system.disk.io") - metric.SetDescription("Disk bytes transferred.") - metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "system.disk.io_time", - func(metric pdata.Metric) { - metric.SetName("system.disk.io_time") - metric.SetDescription("Time disk spent activated. On Windows, this is calculated as the inverse of disk idle time.") - metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "system.disk.merged", - func(metric pdata.Metric) { - metric.SetName("system.disk.merged") - metric.SetDescription("The number of disk reads merged into single physical disk access operations.") - metric.SetUnit("{operations}") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "system.disk.operation_time", - func(metric pdata.Metric) { - metric.SetName("system.disk.operation_time") - metric.SetDescription("Time spent in disk operations.") - metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "system.disk.operations", - func(metric pdata.Metric) { - metric.SetName("system.disk.operations") - metric.SetDescription("Disk operations count.") - metric.SetUnit("{operations}") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "system.disk.pending_operations", - func(metric pdata.Metric) { - metric.SetName("system.disk.pending_operations") - metric.SetDescription("The queue size of pending I/O operations.") - metric.SetUnit("{operations}") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, - &metricImpl{ - "system.disk.weighted_io_time", - func(metric pdata.Metric) { - metric.SetName("system.disk.weighted_io_time") - metric.SetDescription("Time disk spent activated multiplied by the queue length.") - metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - }, - }, -} - -// M contains a set of methods for each metric that help with -// manipulating those metrics. M is an alias for Metrics -var M = Metrics - -// Attributes contains the possible metric attributes that can be used. -var Attributes = struct { - // Device (Name of the disk.) - Device string - // Direction (Direction of flow of bytes/opertations (read or write).) - Direction string -}{ - "device", - "direction", -} - -// A is an alias for Attributes. -var A = Attributes - -// AttributeDirection are the possible values that the attribute "direction" can have. -var AttributeDirection = struct { - Read string - Write string -}{ - "read", - "write", -} diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go new file mode 100644 index 000000000000..f29b8478e4fa --- /dev/null +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go @@ -0,0 +1,435 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/model/pdata" +) + +// MetricSettings provides common settings for a particular metric. +type MetricSettings struct { + Enabled bool `mapstructure:"enabled"` +} + +// MetricsSettings provides settings for disk metrics. +type MetricsSettings struct { + SystemDiskIo MetricSettings `mapstructure:"system.disk.io"` + SystemDiskIoTime MetricSettings `mapstructure:"system.disk.io_time"` + SystemDiskMerged MetricSettings `mapstructure:"system.disk.merged"` + SystemDiskOperationTime MetricSettings `mapstructure:"system.disk.operation_time"` + SystemDiskOperations MetricSettings `mapstructure:"system.disk.operations"` + SystemDiskPendingOperations MetricSettings `mapstructure:"system.disk.pending_operations"` + SystemDiskWeightedIoTime MetricSettings `mapstructure:"system.disk.weighted_io_time"` +} + +func DefaultMetricsSettings() MetricsSettings { + return MetricsSettings{ + SystemDiskIo: MetricSettings{ + Enabled: true, + }, + SystemDiskIoTime: MetricSettings{ + Enabled: true, + }, + SystemDiskMerged: MetricSettings{ + Enabled: true, + }, + SystemDiskOperationTime: MetricSettings{ + Enabled: true, + }, + SystemDiskOperations: MetricSettings{ + Enabled: true, + }, + SystemDiskPendingOperations: MetricSettings{ + Enabled: true, + }, + SystemDiskWeightedIoTime: MetricSettings{ + Enabled: true, + }, + } +} + +// metric holds data for generated metric and keeps track of data points slice capacity. +type metric struct { + data pdata.Metric // data buffer for generated metric. + capacity int // max observed number of data points added to the metric. +} + +func (m *metric) updateCapacity(dpLen int) { + if dpLen > m.capacity { + m.capacity = dpLen + } +} + +func newMetric() metric { + return metric{data: pdata.NewMetric()} +} + +type metrics struct { + systemDiskIo metric + systemDiskIoTime metric + systemDiskMerged metric + systemDiskOperationTime metric + systemDiskOperations metric + systemDiskPendingOperations metric + systemDiskWeightedIoTime metric +} + +func newMetrics(config MetricsSettings) metrics { + ms := metrics{} + if config.SystemDiskIo.Enabled { + ms.systemDiskIo = newMetric() + } + if config.SystemDiskIoTime.Enabled { + ms.systemDiskIoTime = newMetric() + } + if config.SystemDiskMerged.Enabled { + ms.systemDiskMerged = newMetric() + } + if config.SystemDiskOperationTime.Enabled { + ms.systemDiskOperationTime = newMetric() + } + if config.SystemDiskOperations.Enabled { + ms.systemDiskOperations = newMetric() + } + if config.SystemDiskPendingOperations.Enabled { + ms.systemDiskPendingOperations = newMetric() + } + if config.SystemDiskWeightedIoTime.Enabled { + ms.systemDiskWeightedIoTime = newMetric() + } + return ms +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user configuration. +type MetricsBuilder struct { + config MetricsSettings + startTime pdata.Timestamp + metrics metrics +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(config MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + config: config, + startTime: pdata.NewTimestampFromTime(time.Now()), + metrics: newMetrics(config), + } + + for _, op := range options { + op(mb) + } + + mb.initMetrics() + return mb +} + +// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording +// another set of data points. This function will be doing all transformations required to produce metric representation +// defined in metadata and user configuration, e.g. delta/cumulative translation. +func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { + if mb.config.SystemDiskIo.Enabled && mb.metrics.systemDiskIo.data.Sum().DataPoints().Len() > 0 { + mb.metrics.systemDiskIo.updateCapacity(mb.metrics.systemDiskIo.data.Sum().DataPoints().Len()) + mb.metrics.systemDiskIo.data.MoveTo(metrics.AppendEmpty()) + } + if mb.config.SystemDiskIoTime.Enabled && mb.metrics.systemDiskIoTime.data.Sum().DataPoints().Len() > 0 { + mb.metrics.systemDiskIoTime.updateCapacity(mb.metrics.systemDiskIoTime.data.Sum().DataPoints().Len()) + mb.metrics.systemDiskIoTime.data.MoveTo(metrics.AppendEmpty()) + } + if mb.config.SystemDiskMerged.Enabled && mb.metrics.systemDiskMerged.data.Sum().DataPoints().Len() > 0 { + mb.metrics.systemDiskMerged.updateCapacity(mb.metrics.systemDiskMerged.data.Sum().DataPoints().Len()) + mb.metrics.systemDiskMerged.data.MoveTo(metrics.AppendEmpty()) + } + if mb.config.SystemDiskOperationTime.Enabled && mb.metrics.systemDiskOperationTime.data.Sum().DataPoints().Len() > 0 { + mb.metrics.systemDiskOperationTime.updateCapacity(mb.metrics.systemDiskOperationTime.data.Sum().DataPoints().Len()) + mb.metrics.systemDiskOperationTime.data.MoveTo(metrics.AppendEmpty()) + } + if mb.config.SystemDiskOperations.Enabled && mb.metrics.systemDiskOperations.data.Sum().DataPoints().Len() > 0 { + mb.metrics.systemDiskOperations.updateCapacity(mb.metrics.systemDiskOperations.data.Sum().DataPoints().Len()) + mb.metrics.systemDiskOperations.data.MoveTo(metrics.AppendEmpty()) + } + if mb.config.SystemDiskPendingOperations.Enabled && mb.metrics.systemDiskPendingOperations.data.Sum().DataPoints().Len() > 0 { + mb.metrics.systemDiskPendingOperations.updateCapacity(mb.metrics.systemDiskPendingOperations.data.Sum().DataPoints().Len()) + mb.metrics.systemDiskPendingOperations.data.MoveTo(metrics.AppendEmpty()) + } + if mb.config.SystemDiskWeightedIoTime.Enabled && mb.metrics.systemDiskWeightedIoTime.data.Sum().DataPoints().Len() > 0 { + mb.metrics.systemDiskWeightedIoTime.updateCapacity(mb.metrics.systemDiskWeightedIoTime.data.Sum().DataPoints().Len()) + mb.metrics.systemDiskWeightedIoTime.data.MoveTo(metrics.AppendEmpty()) + } + + // Reset metric data points collection. + mb.initMetrics() +} + +// initSystemDiskIoMetric builds new system.disk.io metric. +func (mb *MetricsBuilder) initSystemDiskIoMetric() { + metric := mb.metrics.systemDiskIo + metric.data.SetName("system.disk.io") + metric.data.SetDescription("Disk bytes transferred.") + metric.data.SetUnit("By") + metric.data.SetDataType(pdata.MetricDataTypeSum) + metric.data.Sum().SetIsMonotonic(true) + metric.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.data.Sum().DataPoints().EnsureCapacity(metric.capacity) +} + +// initSystemDiskIoTimeMetric builds new system.disk.io_time metric. +func (mb *MetricsBuilder) initSystemDiskIoTimeMetric() { + metric := mb.metrics.systemDiskIoTime + metric.data.SetName("system.disk.io_time") + metric.data.SetDescription("Time disk spent activated. On Windows, this is calculated as the inverse of disk idle time.") + metric.data.SetUnit("s") + metric.data.SetDataType(pdata.MetricDataTypeSum) + metric.data.Sum().SetIsMonotonic(true) + metric.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.data.Sum().DataPoints().EnsureCapacity(metric.capacity) +} + +// initSystemDiskMergedMetric builds new system.disk.merged metric. +func (mb *MetricsBuilder) initSystemDiskMergedMetric() { + metric := mb.metrics.systemDiskMerged + metric.data.SetName("system.disk.merged") + metric.data.SetDescription("The number of disk reads merged into single physical disk access operations.") + metric.data.SetUnit("{operations}") + metric.data.SetDataType(pdata.MetricDataTypeSum) + metric.data.Sum().SetIsMonotonic(true) + metric.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.data.Sum().DataPoints().EnsureCapacity(metric.capacity) +} + +// initSystemDiskOperationTimeMetric builds new system.disk.operation_time metric. +func (mb *MetricsBuilder) initSystemDiskOperationTimeMetric() { + metric := mb.metrics.systemDiskOperationTime + metric.data.SetName("system.disk.operation_time") + metric.data.SetDescription("Time spent in disk operations.") + metric.data.SetUnit("s") + metric.data.SetDataType(pdata.MetricDataTypeSum) + metric.data.Sum().SetIsMonotonic(true) + metric.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.data.Sum().DataPoints().EnsureCapacity(metric.capacity) +} + +// initSystemDiskOperationsMetric builds new system.disk.operations metric. +func (mb *MetricsBuilder) initSystemDiskOperationsMetric() { + metric := mb.metrics.systemDiskOperations + metric.data.SetName("system.disk.operations") + metric.data.SetDescription("Disk operations count.") + metric.data.SetUnit("{operations}") + metric.data.SetDataType(pdata.MetricDataTypeSum) + metric.data.Sum().SetIsMonotonic(true) + metric.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.data.Sum().DataPoints().EnsureCapacity(metric.capacity) +} + +// initSystemDiskPendingOperationsMetric builds new system.disk.pending_operations metric. +func (mb *MetricsBuilder) initSystemDiskPendingOperationsMetric() { + metric := mb.metrics.systemDiskPendingOperations + metric.data.SetName("system.disk.pending_operations") + metric.data.SetDescription("The queue size of pending I/O operations.") + metric.data.SetUnit("{operations}") + metric.data.SetDataType(pdata.MetricDataTypeSum) + metric.data.Sum().SetIsMonotonic(false) + metric.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.data.Sum().DataPoints().EnsureCapacity(metric.capacity) +} + +// initSystemDiskWeightedIoTimeMetric builds new system.disk.weighted_io_time metric. +func (mb *MetricsBuilder) initSystemDiskWeightedIoTimeMetric() { + metric := mb.metrics.systemDiskWeightedIoTime + metric.data.SetName("system.disk.weighted_io_time") + metric.data.SetDescription("Time disk spent activated multiplied by the queue length.") + metric.data.SetUnit("s") + metric.data.SetDataType(pdata.MetricDataTypeSum) + metric.data.Sum().SetIsMonotonic(true) + metric.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.data.Sum().DataPoints().EnsureCapacity(metric.capacity) +} + +// initMetrics initializes metrics. +func (mb *MetricsBuilder) initMetrics() { + if mb.config.SystemDiskIo.Enabled { + // TODO: Use metric.data.Sum().DataPoints().Clear() instead of rebuilding + // the metrics once the Clear method is available. + mb.initSystemDiskIoMetric() + } + if mb.config.SystemDiskIoTime.Enabled { + // TODO: Use metric.data.Sum().DataPoints().Clear() instead of rebuilding + // the metrics once the Clear method is available. + mb.initSystemDiskIoTimeMetric() + } + if mb.config.SystemDiskMerged.Enabled { + // TODO: Use metric.data.Sum().DataPoints().Clear() instead of rebuilding + // the metrics once the Clear method is available. + mb.initSystemDiskMergedMetric() + } + if mb.config.SystemDiskOperationTime.Enabled { + // TODO: Use metric.data.Sum().DataPoints().Clear() instead of rebuilding + // the metrics once the Clear method is available. + mb.initSystemDiskOperationTimeMetric() + } + if mb.config.SystemDiskOperations.Enabled { + // TODO: Use metric.data.Sum().DataPoints().Clear() instead of rebuilding + // the metrics once the Clear method is available. + mb.initSystemDiskOperationsMetric() + } + if mb.config.SystemDiskPendingOperations.Enabled { + // TODO: Use metric.data.Sum().DataPoints().Clear() instead of rebuilding + // the metrics once the Clear method is available. + mb.initSystemDiskPendingOperationsMetric() + } + if mb.config.SystemDiskWeightedIoTime.Enabled { + // TODO: Use metric.data.Sum().DataPoints().Clear() instead of rebuilding + // the metrics once the Clear method is available. + mb.initSystemDiskWeightedIoTimeMetric() + } +} + +// RecordSystemDiskIoDataPoint adds a data point to system.disk.io metric. +// Any attribute of AttributeValueTypeEmpty type will be skipped. +func (mb *MetricsBuilder) RecordSystemDiskIoDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { + if !mb.config.SystemDiskIo.Enabled { + return + } + + dp := mb.metrics.systemDiskIo.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(mb.startTime) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Device, pdata.NewAttributeValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pdata.NewAttributeValueString(directionAttributeValue)) +} + +// RecordSystemDiskIoTimeDataPoint adds a data point to system.disk.io_time metric. +// Any attribute of AttributeValueTypeEmpty type will be skipped. +func (mb *MetricsBuilder) RecordSystemDiskIoTimeDataPoint(ts pdata.Timestamp, val float64, deviceAttributeValue string) { + if !mb.config.SystemDiskIoTime.Enabled { + return + } + + dp := mb.metrics.systemDiskIoTime.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(mb.startTime) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Device, pdata.NewAttributeValueString(deviceAttributeValue)) +} + +// RecordSystemDiskMergedDataPoint adds a data point to system.disk.merged metric. +// Any attribute of AttributeValueTypeEmpty type will be skipped. +func (mb *MetricsBuilder) RecordSystemDiskMergedDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { + if !mb.config.SystemDiskMerged.Enabled { + return + } + + dp := mb.metrics.systemDiskMerged.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(mb.startTime) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Device, pdata.NewAttributeValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pdata.NewAttributeValueString(directionAttributeValue)) +} + +// RecordSystemDiskOperationTimeDataPoint adds a data point to system.disk.operation_time metric. +// Any attribute of AttributeValueTypeEmpty type will be skipped. +func (mb *MetricsBuilder) RecordSystemDiskOperationTimeDataPoint(ts pdata.Timestamp, val float64, deviceAttributeValue string, directionAttributeValue string) { + if !mb.config.SystemDiskOperationTime.Enabled { + return + } + + dp := mb.metrics.systemDiskOperationTime.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(mb.startTime) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Device, pdata.NewAttributeValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pdata.NewAttributeValueString(directionAttributeValue)) +} + +// RecordSystemDiskOperationsDataPoint adds a data point to system.disk.operations metric. +// Any attribute of AttributeValueTypeEmpty type will be skipped. +func (mb *MetricsBuilder) RecordSystemDiskOperationsDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { + if !mb.config.SystemDiskOperations.Enabled { + return + } + + dp := mb.metrics.systemDiskOperations.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(mb.startTime) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Device, pdata.NewAttributeValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pdata.NewAttributeValueString(directionAttributeValue)) +} + +// RecordSystemDiskPendingOperationsDataPoint adds a data point to system.disk.pending_operations metric. +// Any attribute of AttributeValueTypeEmpty type will be skipped. +func (mb *MetricsBuilder) RecordSystemDiskPendingOperationsDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string) { + if !mb.config.SystemDiskPendingOperations.Enabled { + return + } + + dp := mb.metrics.systemDiskPendingOperations.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(mb.startTime) + dp.SetTimestamp(ts) + dp.SetIntVal(val) + dp.Attributes().Insert(A.Device, pdata.NewAttributeValueString(deviceAttributeValue)) +} + +// RecordSystemDiskWeightedIoTimeDataPoint adds a data point to system.disk.weighted_io_time metric. +// Any attribute of AttributeValueTypeEmpty type will be skipped. +func (mb *MetricsBuilder) RecordSystemDiskWeightedIoTimeDataPoint(ts pdata.Timestamp, val float64, deviceAttributeValue string) { + if !mb.config.SystemDiskWeightedIoTime.Enabled { + return + } + + dp := mb.metrics.systemDiskWeightedIoTime.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(mb.startTime) + dp.SetTimestamp(ts) + dp.SetDoubleVal(val) + dp.Attributes().Insert(A.Device, pdata.NewAttributeValueString(deviceAttributeValue)) +} + +// Attributes contains the possible metric attributes that can be used. +var Attributes = struct { + // Device (Name of the disk.) + Device string + // Direction (Direction of flow of bytes/opertations (read or write).) + Direction string +}{ + "device", + "direction", +} + +// A is an alias for Attributes. +var A = Attributes + +// AttributeDirection are the possible values that the attribute "direction" can have. +var AttributeDirection = struct { + Read string + Write string +}{ + "read", + "write", +} diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml index a9eb0cb15a76..c89af3fb7e68 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/metadata.yaml @@ -10,6 +10,7 @@ attributes: metrics: system.disk.io: + enabled: true description: Disk bytes transferred. unit: By sum: @@ -19,6 +20,7 @@ metrics: attributes: [device, direction] system.disk.operations: + enabled: true description: Disk operations count. unit: "{operations}" sum: @@ -28,6 +30,7 @@ metrics: attributes: [device, direction] system.disk.io_time: + enabled: true description: Time disk spent activated. On Windows, this is calculated as the inverse of disk idle time. unit: s sum: @@ -37,6 +40,7 @@ metrics: attributes: [device] system.disk.operation_time: + enabled: true description: Time spent in disk operations. unit: s sum: @@ -46,6 +50,7 @@ metrics: attributes: [device, direction] system.disk.weighted_io_time: + enabled: true description: Time disk spent activated multiplied by the queue length. unit: s sum: @@ -55,6 +60,7 @@ metrics: attributes: [device] system.disk.pending_operations: + enabled: true description: The queue size of pending I/O operations. unit: "{operations}" sum: @@ -64,6 +70,7 @@ metrics: attributes: [device] system.disk.merged: + enabled: true description: The number of disk reads merged into single physical disk access operations. unit: "{operations}" sum: diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/utils.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/utils.go deleted file mode 100644 index 05fe97724080..000000000000 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/utils.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package diskscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper" - -import ( - "go.opentelemetry.io/collector/model/pdata" - - "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata" -) - -func initializeNumberDataPointAsInt(dataPoint pdata.NumberDataPoint, startTime, now pdata.Timestamp, deviceLabel string, directionLabel string, value int64) { - attributes := dataPoint.Attributes() - attributes.InsertString(metadata.Attributes.Device, deviceLabel) - if directionLabel != "" { - attributes.InsertString(metadata.Attributes.Direction, directionLabel) - } - dataPoint.SetStartTimestamp(startTime) - dataPoint.SetTimestamp(now) - dataPoint.SetIntVal(value) -} - -func initializeNumberDataPointAsDouble(dataPoint pdata.NumberDataPoint, startTime, now pdata.Timestamp, deviceLabel string, directionLabel string, value float64) { - attributes := dataPoint.Attributes() - attributes.InsertString(metadata.Attributes.Device, deviceLabel) - if directionLabel != "" { - attributes.InsertString(metadata.Attributes.Direction, directionLabel) - } - dataPoint.SetStartTimestamp(startTime) - dataPoint.SetTimestamp(now) - dataPoint.SetDoubleVal(value) -}