From 1f96387016b29948ea32fc8344821ff5c80319ea Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Thu, 21 Nov 2024 16:59:42 -0800 Subject: [PATCH] Added blocks hit and blocks read metrics --- .chloggen/chan-tim_postgresMetrics.yaml | 27 ++++ receiver/postgresqlreceiver/client.go | 10 +- receiver/postgresqlreceiver/documentation.md | 19 ++- .../postgresqlreceiver/integration_test.go | 2 + .../internal/metadata/generated_config.go | 14 +- .../metadata/generated_config_test.go | 10 +- .../internal/metadata/generated_metrics.go | 133 ++++++++++++++++-- .../metadata/generated_metrics_test.go | 42 +++++- .../internal/metadata/testdata/config.yaml | 14 +- receiver/postgresqlreceiver/metadata.yaml | 18 ++- receiver/postgresqlreceiver/scraper.go | 10 +- receiver/postgresqlreceiver/scraper_test.go | 60 ++++++++ .../testdata/integration/expected_all_db.yaml | 20 +++ .../integration/expected_multi_db.yaml | 22 ++- .../integration/expected_single_db.yaml | 22 ++- .../testdata/scraper/multiple/expected.yaml | 62 +++++++- .../testdata/scraper/otel/expected.yaml | 62 +++++++- 17 files changed, 493 insertions(+), 54 deletions(-) create mode 100644 .chloggen/chan-tim_postgresMetrics.yaml diff --git a/.chloggen/chan-tim_postgresMetrics.yaml b/.chloggen/chan-tim_postgresMetrics.yaml new file mode 100644 index 000000000000..36cbbf6c1b49 --- /dev/null +++ b/.chloggen/chan-tim_postgresMetrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: postgresqlreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added new postgresql metrics to acheive parity with Telegraf + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36528] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/postgresqlreceiver/client.go b/receiver/postgresqlreceiver/client.go index 553fdf6be97c..dc0029873a10 100644 --- a/receiver/postgresqlreceiver/client.go +++ b/receiver/postgresqlreceiver/client.go @@ -139,11 +139,13 @@ type databaseStats struct { tupFetched int64 tupInserted int64 tupDeleted int64 + blksHit int64 + blksRead int64 } func (c *postgreSQLClient) getDatabaseStats(ctx context.Context, databases []string) (map[databaseName]databaseStats, error) { query := filterQueryByDatabases( - "SELECT datname, xact_commit, xact_rollback, deadlocks, temp_files, tup_updated, tup_returned, tup_fetched, tup_inserted, tup_deleted FROM pg_stat_database", + "SELECT datname, xact_commit, xact_rollback, deadlocks, temp_files, tup_updated, tup_returned, tup_fetched, tup_inserted, tup_deleted, blks_hit, blks_read FROM pg_stat_database", databases, false, ) @@ -158,8 +160,8 @@ func (c *postgreSQLClient) getDatabaseStats(ctx context.Context, databases []str for rows.Next() { var datname string - var transactionCommitted, transactionRollback, deadlocks, tempFiles, tupUpdated, tupReturned, tupFetched, tupInserted, tupDeleted int64 - err = rows.Scan(&datname, &transactionCommitted, &transactionRollback, &deadlocks, &tempFiles, &tupUpdated, &tupReturned, &tupFetched, &tupInserted, &tupDeleted) + var transactionCommitted, transactionRollback, deadlocks, tempFiles, tupUpdated, tupReturned, tupFetched, tupInserted, tupDeleted, blksHit, blksRead int64 + err = rows.Scan(&datname, &transactionCommitted, &transactionRollback, &deadlocks, &tempFiles, &tupUpdated, &tupReturned, &tupFetched, &tupInserted, &tupDeleted, &blksHit, &blksRead) if err != nil { errs = multierr.Append(errs, err) continue @@ -175,6 +177,8 @@ func (c *postgreSQLClient) getDatabaseStats(ctx context.Context, databases []str tupFetched: tupFetched, tupInserted: tupInserted, tupDeleted: tupDeleted, + blksHit: blksHit, + blksRead: blksRead, } } } diff --git a/receiver/postgresqlreceiver/documentation.md b/receiver/postgresqlreceiver/documentation.md index 3ff462ef66db..79d92e461829 100644 --- a/receiver/postgresqlreceiver/documentation.md +++ b/receiver/postgresqlreceiver/documentation.md @@ -78,6 +78,22 @@ Number of times the background writer stopped a cleaning scan because it had wri | ---- | ----------- | ---------- | ----------------------- | --------- | | 1 | Sum | Int | Cumulative | true | +### postgresql.blks_hit + +Number of times disk blocks were found already in the buffer cache. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {blks_hit} | Sum | Int | Cumulative | true | + +### postgresql.blks_read + +Number of disk blocks read in this database. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {blks_read} | Sum | Int | Cumulative | true | + ### postgresql.blocks_read The number of blocks read. @@ -214,8 +230,6 @@ Number of times a table has manually been vacuumed. | ---- | ----------- | ---------- | ----------------------- | --------- | | {vacuums} | Sum | Int | Cumulative | true | -<<<<<<< HEAD -======= ### postgresql.tup_deleted Number of rows deleted by queries in the database. @@ -248,7 +262,6 @@ Number of rows returned by queries in the database. | ---- | ----------- | ---------- | ----------------------- | --------- | | {tup_returned} | Sum | Int | Cumulative | true | ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) ### postgresql.tup_updated Number of rows updated by queries in the database. diff --git a/receiver/postgresqlreceiver/integration_test.go b/receiver/postgresqlreceiver/integration_test.go index 85593abd5f0e..d73627615d46 100644 --- a/receiver/postgresqlreceiver/integration_test.go +++ b/receiver/postgresqlreceiver/integration_test.go @@ -84,6 +84,8 @@ func integrationTest(name string, databases []string) func(*testing.T) { rCfg.Metrics.PostgresqlTupFetched.Enabled = true rCfg.Metrics.PostgresqlTupInserted.Enabled = true rCfg.Metrics.PostgresqlTupDeleted.Enabled = true + rCfg.Metrics.PostgresqlBlksHit.Enabled = true + rCfg.Metrics.PostgresqlBlksRead.Enabled = true rCfg.Metrics.PostgresqlSequentialScans.Enabled = true rCfg.Metrics.PostgresqlDatabaseLocks.Enabled = true }), diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config.go b/receiver/postgresqlreceiver/internal/metadata/generated_config.go index d85766189309..22d30c93584a 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_config.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_config.go @@ -34,6 +34,8 @@ type MetricsConfig struct { PostgresqlBgwriterCheckpointCount MetricConfig `mapstructure:"postgresql.bgwriter.checkpoint.count"` PostgresqlBgwriterDuration MetricConfig `mapstructure:"postgresql.bgwriter.duration"` PostgresqlBgwriterMaxwritten MetricConfig `mapstructure:"postgresql.bgwriter.maxwritten"` + PostgresqlBlksHit MetricConfig `mapstructure:"postgresql.blks_hit"` + PostgresqlBlksRead MetricConfig `mapstructure:"postgresql.blks_read"` PostgresqlBlocksRead MetricConfig `mapstructure:"postgresql.blocks_read"` PostgresqlCommits MetricConfig `mapstructure:"postgresql.commits"` PostgresqlConnectionMax MetricConfig `mapstructure:"postgresql.connection.max"` @@ -52,13 +54,10 @@ type MetricsConfig struct { PostgresqlTableSize MetricConfig `mapstructure:"postgresql.table.size"` PostgresqlTableVacuumCount MetricConfig `mapstructure:"postgresql.table.vacuum.count"` PostgresqlTempFiles MetricConfig `mapstructure:"postgresql.temp_files"` -<<<<<<< HEAD -======= PostgresqlTupDeleted MetricConfig `mapstructure:"postgresql.tup_deleted"` PostgresqlTupFetched MetricConfig `mapstructure:"postgresql.tup_fetched"` PostgresqlTupInserted MetricConfig `mapstructure:"postgresql.tup_inserted"` PostgresqlTupReturned MetricConfig `mapstructure:"postgresql.tup_returned"` ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) PostgresqlTupUpdated MetricConfig `mapstructure:"postgresql.tup_updated"` PostgresqlWalAge MetricConfig `mapstructure:"postgresql.wal.age"` PostgresqlWalDelay MetricConfig `mapstructure:"postgresql.wal.delay"` @@ -85,6 +84,12 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlBgwriterMaxwritten: MetricConfig{ Enabled: true, }, + PostgresqlBlksHit: MetricConfig{ + Enabled: true, + }, + PostgresqlBlksRead: MetricConfig{ + Enabled: true, + }, PostgresqlBlocksRead: MetricConfig{ Enabled: true, }, @@ -139,8 +144,6 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlTempFiles: MetricConfig{ Enabled: false, }, -<<<<<<< HEAD -======= PostgresqlTupDeleted: MetricConfig{ Enabled: true, }, @@ -153,7 +156,6 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlTupReturned: MetricConfig{ Enabled: true, }, ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) PostgresqlTupUpdated: MetricConfig{ Enabled: true, }, diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go index 9a8dd6638e88..031e877ebe4d 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go @@ -31,6 +31,8 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlBgwriterCheckpointCount: MetricConfig{Enabled: true}, PostgresqlBgwriterDuration: MetricConfig{Enabled: true}, PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: true}, + PostgresqlBlksHit: MetricConfig{Enabled: true}, + PostgresqlBlksRead: MetricConfig{Enabled: true}, PostgresqlBlocksRead: MetricConfig{Enabled: true}, PostgresqlCommits: MetricConfig{Enabled: true}, PostgresqlConnectionMax: MetricConfig{Enabled: true}, @@ -49,13 +51,10 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlTableSize: MetricConfig{Enabled: true}, PostgresqlTableVacuumCount: MetricConfig{Enabled: true}, PostgresqlTempFiles: MetricConfig{Enabled: true}, -<<<<<<< HEAD -======= PostgresqlTupDeleted: MetricConfig{Enabled: true}, PostgresqlTupFetched: MetricConfig{Enabled: true}, PostgresqlTupInserted: MetricConfig{Enabled: true}, PostgresqlTupReturned: MetricConfig{Enabled: true}, ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) PostgresqlTupUpdated: MetricConfig{Enabled: true}, PostgresqlWalAge: MetricConfig{Enabled: true}, PostgresqlWalDelay: MetricConfig{Enabled: true}, @@ -79,6 +78,8 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlBgwriterCheckpointCount: MetricConfig{Enabled: false}, PostgresqlBgwriterDuration: MetricConfig{Enabled: false}, PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: false}, + PostgresqlBlksHit: MetricConfig{Enabled: false}, + PostgresqlBlksRead: MetricConfig{Enabled: false}, PostgresqlBlocksRead: MetricConfig{Enabled: false}, PostgresqlCommits: MetricConfig{Enabled: false}, PostgresqlConnectionMax: MetricConfig{Enabled: false}, @@ -97,13 +98,10 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlTableSize: MetricConfig{Enabled: false}, PostgresqlTableVacuumCount: MetricConfig{Enabled: false}, PostgresqlTempFiles: MetricConfig{Enabled: false}, -<<<<<<< HEAD -======= PostgresqlTupDeleted: MetricConfig{Enabled: false}, PostgresqlTupFetched: MetricConfig{Enabled: false}, PostgresqlTupInserted: MetricConfig{Enabled: false}, PostgresqlTupReturned: MetricConfig{Enabled: false}, ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) PostgresqlTupUpdated: MetricConfig{Enabled: false}, PostgresqlWalAge: MetricConfig{Enabled: false}, PostgresqlWalDelay: MetricConfig{Enabled: false}, diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go index 8328c4313d7c..a45ef072aaa5 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go @@ -550,6 +550,108 @@ func newMetricPostgresqlBgwriterMaxwritten(cfg MetricConfig) metricPostgresqlBgw return m } +type metricPostgresqlBlksHit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.blks_hit metric with initial data. +func (m *metricPostgresqlBlksHit) init() { + m.data.SetName("postgresql.blks_hit") + m.data.SetDescription("Number of times disk blocks were found already in the buffer cache.") + m.data.SetUnit("{blks_hit}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlBlksHit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlBlksHit) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlBlksHit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlBlksHit(cfg MetricConfig) metricPostgresqlBlksHit { + m := metricPostgresqlBlksHit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlBlksRead struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.blks_read metric with initial data. +func (m *metricPostgresqlBlksRead) init() { + m.data.SetName("postgresql.blks_read") + m.data.SetDescription("Number of disk blocks read in this database.") + m.data.SetUnit("{blks_read}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlBlksRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlBlksRead) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlBlksRead) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlBlksRead(cfg MetricConfig) metricPostgresqlBlksRead { + m := metricPostgresqlBlksRead{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlBlocksRead struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1472,8 +1574,6 @@ func newMetricPostgresqlTempFiles(cfg MetricConfig) metricPostgresqlTempFiles { return m } -<<<<<<< HEAD -======= type metricPostgresqlTupDeleted struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1678,7 +1778,6 @@ func newMetricPostgresqlTupReturned(cfg MetricConfig) metricPostgresqlTupReturne return m } ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) type metricPostgresqlTupUpdated struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1899,6 +1998,8 @@ type MetricsBuilder struct { metricPostgresqlBgwriterCheckpointCount metricPostgresqlBgwriterCheckpointCount metricPostgresqlBgwriterDuration metricPostgresqlBgwriterDuration metricPostgresqlBgwriterMaxwritten metricPostgresqlBgwriterMaxwritten + metricPostgresqlBlksHit metricPostgresqlBlksHit + metricPostgresqlBlksRead metricPostgresqlBlksRead metricPostgresqlBlocksRead metricPostgresqlBlocksRead metricPostgresqlCommits metricPostgresqlCommits metricPostgresqlConnectionMax metricPostgresqlConnectionMax @@ -1917,13 +2018,10 @@ type MetricsBuilder struct { metricPostgresqlTableSize metricPostgresqlTableSize metricPostgresqlTableVacuumCount metricPostgresqlTableVacuumCount metricPostgresqlTempFiles metricPostgresqlTempFiles -<<<<<<< HEAD -======= metricPostgresqlTupDeleted metricPostgresqlTupDeleted metricPostgresqlTupFetched metricPostgresqlTupFetched metricPostgresqlTupInserted metricPostgresqlTupInserted metricPostgresqlTupReturned metricPostgresqlTupReturned ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) metricPostgresqlTupUpdated metricPostgresqlTupUpdated metricPostgresqlWalAge metricPostgresqlWalAge metricPostgresqlWalDelay metricPostgresqlWalDelay @@ -1960,6 +2058,8 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricPostgresqlBgwriterCheckpointCount: newMetricPostgresqlBgwriterCheckpointCount(mbc.Metrics.PostgresqlBgwriterCheckpointCount), metricPostgresqlBgwriterDuration: newMetricPostgresqlBgwriterDuration(mbc.Metrics.PostgresqlBgwriterDuration), metricPostgresqlBgwriterMaxwritten: newMetricPostgresqlBgwriterMaxwritten(mbc.Metrics.PostgresqlBgwriterMaxwritten), + metricPostgresqlBlksHit: newMetricPostgresqlBlksHit(mbc.Metrics.PostgresqlBlksHit), + metricPostgresqlBlksRead: newMetricPostgresqlBlksRead(mbc.Metrics.PostgresqlBlksRead), metricPostgresqlBlocksRead: newMetricPostgresqlBlocksRead(mbc.Metrics.PostgresqlBlocksRead), metricPostgresqlCommits: newMetricPostgresqlCommits(mbc.Metrics.PostgresqlCommits), metricPostgresqlConnectionMax: newMetricPostgresqlConnectionMax(mbc.Metrics.PostgresqlConnectionMax), @@ -1978,13 +2078,10 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricPostgresqlTableSize: newMetricPostgresqlTableSize(mbc.Metrics.PostgresqlTableSize), metricPostgresqlTableVacuumCount: newMetricPostgresqlTableVacuumCount(mbc.Metrics.PostgresqlTableVacuumCount), metricPostgresqlTempFiles: newMetricPostgresqlTempFiles(mbc.Metrics.PostgresqlTempFiles), -<<<<<<< HEAD -======= metricPostgresqlTupDeleted: newMetricPostgresqlTupDeleted(mbc.Metrics.PostgresqlTupDeleted), metricPostgresqlTupFetched: newMetricPostgresqlTupFetched(mbc.Metrics.PostgresqlTupFetched), metricPostgresqlTupInserted: newMetricPostgresqlTupInserted(mbc.Metrics.PostgresqlTupInserted), metricPostgresqlTupReturned: newMetricPostgresqlTupReturned(mbc.Metrics.PostgresqlTupReturned), ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) metricPostgresqlTupUpdated: newMetricPostgresqlTupUpdated(mbc.Metrics.PostgresqlTupUpdated), metricPostgresqlWalAge: newMetricPostgresqlWalAge(mbc.Metrics.PostgresqlWalAge), metricPostgresqlWalDelay: newMetricPostgresqlWalDelay(mbc.Metrics.PostgresqlWalDelay), @@ -2091,6 +2188,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricPostgresqlBgwriterCheckpointCount.emit(ils.Metrics()) mb.metricPostgresqlBgwriterDuration.emit(ils.Metrics()) mb.metricPostgresqlBgwriterMaxwritten.emit(ils.Metrics()) + mb.metricPostgresqlBlksHit.emit(ils.Metrics()) + mb.metricPostgresqlBlksRead.emit(ils.Metrics()) mb.metricPostgresqlBlocksRead.emit(ils.Metrics()) mb.metricPostgresqlCommits.emit(ils.Metrics()) mb.metricPostgresqlConnectionMax.emit(ils.Metrics()) @@ -2109,13 +2208,10 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricPostgresqlTableSize.emit(ils.Metrics()) mb.metricPostgresqlTableVacuumCount.emit(ils.Metrics()) mb.metricPostgresqlTempFiles.emit(ils.Metrics()) -<<<<<<< HEAD -======= mb.metricPostgresqlTupDeleted.emit(ils.Metrics()) mb.metricPostgresqlTupFetched.emit(ils.Metrics()) mb.metricPostgresqlTupInserted.emit(ils.Metrics()) mb.metricPostgresqlTupReturned.emit(ils.Metrics()) ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) mb.metricPostgresqlTupUpdated.emit(ils.Metrics()) mb.metricPostgresqlWalAge.emit(ils.Metrics()) mb.metricPostgresqlWalDelay.emit(ils.Metrics()) @@ -2181,6 +2277,16 @@ func (mb *MetricsBuilder) RecordPostgresqlBgwriterMaxwrittenDataPoint(ts pcommon mb.metricPostgresqlBgwriterMaxwritten.recordDataPoint(mb.startTime, ts, val) } +// RecordPostgresqlBlksHitDataPoint adds a data point to postgresql.blks_hit metric. +func (mb *MetricsBuilder) RecordPostgresqlBlksHitDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlBlksHit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordPostgresqlBlksReadDataPoint adds a data point to postgresql.blks_read metric. +func (mb *MetricsBuilder) RecordPostgresqlBlksReadDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlBlksRead.recordDataPoint(mb.startTime, ts, val) +} + // RecordPostgresqlBlocksReadDataPoint adds a data point to postgresql.blocks_read metric. func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, sourceAttributeValue AttributeSource) { mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, sourceAttributeValue.String()) @@ -2271,8 +2377,6 @@ func (mb *MetricsBuilder) RecordPostgresqlTempFilesDataPoint(ts pcommon.Timestam mb.metricPostgresqlTempFiles.recordDataPoint(mb.startTime, ts, val) } -<<<<<<< HEAD -======= // RecordPostgresqlTupDeletedDataPoint adds a data point to postgresql.tup_deleted metric. func (mb *MetricsBuilder) RecordPostgresqlTupDeletedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTupDeleted.recordDataPoint(mb.startTime, ts, val) @@ -2293,7 +2397,6 @@ func (mb *MetricsBuilder) RecordPostgresqlTupReturnedDataPoint(ts pcommon.Timest mb.metricPostgresqlTupReturned.recordDataPoint(mb.startTime, ts, val) } ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) // RecordPostgresqlTupUpdatedDataPoint adds a data point to postgresql.tup_updated metric. func (mb *MetricsBuilder) RecordPostgresqlTupUpdatedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlTupUpdated.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go index c0e634c1e974..eceecb67e5f4 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go @@ -92,6 +92,14 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlBgwriterMaxwrittenDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlBlksHitDataPoint(ts, 1) + + defaultMetricsCount++ + allMetricsCount++ + mb.RecordPostgresqlBlksReadDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlBlocksReadDataPoint(ts, 1, AttributeSourceHeapRead) @@ -162,8 +170,6 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount++ allMetricsCount++ -<<<<<<< HEAD -======= mb.RecordPostgresqlTupDeletedDataPoint(ts, 1) defaultMetricsCount++ @@ -180,7 +186,6 @@ func TestMetricsBuilder(t *testing.T) { defaultMetricsCount++ allMetricsCount++ ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) mb.RecordPostgresqlTupUpdatedDataPoint(ts, 1) defaultMetricsCount++ @@ -314,6 +319,34 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.blks_hit": + assert.False(t, validatedMetrics["postgresql.blks_hit"], "Found a duplicate in the metrics slice: postgresql.blks_hit") + validatedMetrics["postgresql.blks_hit"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of times disk blocks were found already in the buffer cache.", ms.At(i).Description()) + assert.Equal(t, "{blks_hit}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.blks_read": + assert.False(t, validatedMetrics["postgresql.blks_read"], "Found a duplicate in the metrics slice: postgresql.blks_read") + validatedMetrics["postgresql.blks_read"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of disk blocks read in this database.", ms.At(i).Description()) + assert.Equal(t, "{blks_read}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "postgresql.blocks_read": assert.False(t, validatedMetrics["postgresql.blocks_read"], "Found a duplicate in the metrics slice: postgresql.blocks_read") validatedMetrics["postgresql.blocks_read"] = true @@ -579,8 +612,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) -<<<<<<< HEAD -======= case "postgresql.tup_deleted": assert.False(t, validatedMetrics["postgresql.tup_deleted"], "Found a duplicate in the metrics slice: postgresql.tup_deleted") validatedMetrics["postgresql.tup_deleted"] = true @@ -637,7 +668,6 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) case "postgresql.tup_updated": assert.False(t, validatedMetrics["postgresql.tup_updated"], "Found a duplicate in the metrics slice: postgresql.tup_updated") validatedMetrics["postgresql.tup_updated"] = true diff --git a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml index f219d395ba13..71b1192ade06 100644 --- a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml @@ -13,6 +13,10 @@ all_set: enabled: true postgresql.bgwriter.maxwritten: enabled: true + postgresql.blks_hit: + enabled: true + postgresql.blks_read: + enabled: true postgresql.blocks_read: enabled: true postgresql.commits: @@ -49,8 +53,6 @@ all_set: enabled: true postgresql.temp_files: enabled: true -<<<<<<< HEAD -======= postgresql.tup_deleted: enabled: true postgresql.tup_fetched: @@ -59,7 +61,6 @@ all_set: enabled: true postgresql.tup_returned: enabled: true ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) postgresql.tup_updated: enabled: true postgresql.wal.age: @@ -91,6 +92,10 @@ none_set: enabled: false postgresql.bgwriter.maxwritten: enabled: false + postgresql.blks_hit: + enabled: false + postgresql.blks_read: + enabled: false postgresql.blocks_read: enabled: false postgresql.commits: @@ -127,8 +132,6 @@ none_set: enabled: false postgresql.temp_files: enabled: false -<<<<<<< HEAD -======= postgresql.tup_deleted: enabled: false postgresql.tup_fetched: @@ -137,7 +140,6 @@ none_set: enabled: false postgresql.tup_returned: enabled: false ->>>>>>> a560b922dd (Added new postgresql metrics to acheive parity with Telegraf) postgresql.tup_updated: enabled: false postgresql.wal.age: diff --git a/receiver/postgresqlreceiver/metadata.yaml b/receiver/postgresqlreceiver/metadata.yaml index 6d59d9cce4f2..b47e3cdd7e4c 100644 --- a/receiver/postgresqlreceiver/metadata.yaml +++ b/receiver/postgresqlreceiver/metadata.yaml @@ -349,6 +349,22 @@ metrics: sum: value_type: int monotonic: true - aggregation_temporality: cumulative + aggregation_temporality: cumulative + postgresql.blks_hit: + enabled: true + description: Number of times disk blocks were found already in the buffer cache. + unit: "{blks_hit}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + postgresql.blks_read: + enabled: true + description: Number of disk blocks read in this database. + unit: "{blks_read}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative tests: config: diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go index 9db1fff6e147..55d140716d22 100644 --- a/receiver/postgresqlreceiver/scraper.go +++ b/receiver/postgresqlreceiver/scraper.go @@ -202,10 +202,12 @@ func (p *postgreSQLScraper) recordDatabase(now pcommon.Timestamp, db string, r * p.mb.RecordPostgresqlDeadlocksDataPoint(now, stats.deadlocks) p.mb.RecordPostgresqlTempFilesDataPoint(now, stats.tempFiles) p.mb.RecordPostgresqlTupUpdatedDataPoint(now, stats.tupUpdated) - p.mb.RecordPostgresqlTupReturnedDataPoint(now, stats.tupUpdated) - p.mb.RecordPostgresqlTupFetchedDataPoint(now, stats.tupUpdated) - p.mb.RecordPostgresqlTupInsertedDataPoint(now, stats.tupUpdated) - p.mb.RecordPostgresqlTupDeletedDataPoint(now, stats.tupUpdated) + p.mb.RecordPostgresqlTupReturnedDataPoint(now, stats.tupReturned) + p.mb.RecordPostgresqlTupFetchedDataPoint(now, stats.tupFetched) + p.mb.RecordPostgresqlTupInsertedDataPoint(now, stats.tupInserted) + p.mb.RecordPostgresqlTupDeletedDataPoint(now, stats.tupDeleted) + p.mb.RecordPostgresqlBlksHitDataPoint(now, stats.blksHit) + p.mb.RecordPostgresqlBlksReadDataPoint(now, stats.blksRead) } rb := p.mb.NewResourceBuilder() rb.SetPostgresqlDatabaseName(db) diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go index 6e63cddbf7fe..d880e7d6e7e6 100644 --- a/receiver/postgresqlreceiver/scraper_test.go +++ b/receiver/postgresqlreceiver/scraper_test.go @@ -50,6 +50,8 @@ func TestScraper(t *testing.T) { cfg.Metrics.PostgresqlTupFetched.Enabled = true cfg.Metrics.PostgresqlTupInserted.Enabled = true cfg.Metrics.PostgresqlTupDeleted.Enabled = true + cfg.Metrics.PostgresqlBlksHit.Enabled = true + cfg.Metrics.PostgresqlBlksRead.Enabled = true cfg.Metrics.PostgresqlSequentialScans.Enabled = true cfg.Metrics.PostgresqlDatabaseLocks.Enabled = true @@ -96,6 +98,10 @@ func TestScraperNoDatabaseSingle(t *testing.T) { cfg.Metrics.PostgresqlTupInserted.Enabled = true require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -116,6 +122,12 @@ func TestScraperNoDatabaseSingle(t *testing.T) { cfg.Metrics.PostgresqlDeadlocks.Enabled = false cfg.Metrics.PostgresqlTempFiles.Enabled = false cfg.Metrics.PostgresqlTupUpdated.Enabled = false + cfg.Metrics.PostgresqlTupReturned.Enabled = false + cfg.Metrics.PostgresqlTupFetched.Enabled = false + cfg.Metrics.PostgresqlTupInserted.Enabled = false + cfg.Metrics.PostgresqlTupDeleted.Enabled = false + cfg.Metrics.PostgresqlBlksHit.Enabled = false + cfg.Metrics.PostgresqlBlksRead.Enabled = false cfg.Metrics.PostgresqlSequentialScans.Enabled = false cfg.Metrics.PostgresqlDatabaseLocks.Enabled = false @@ -153,6 +165,18 @@ func TestScraperNoDatabaseMultipleWithoutPreciseLag(t *testing.T) { cfg.Metrics.PostgresqlTempFiles.Enabled = true require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -192,6 +216,18 @@ func TestScraperNoDatabaseMultiple(t *testing.T) { cfg.Metrics.PostgresqlTempFiles.Enabled = true require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -231,6 +267,18 @@ func TestScraperWithResourceAttributeFeatureGate(t *testing.T) { cfg.Metrics.PostgresqlTempFiles.Enabled = true require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -271,6 +319,18 @@ func TestScraperWithResourceAttributeFeatureGateSingle(t *testing.T) { cfg.Metrics.PostgresqlTempFiles.Enabled = true require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml index 9464320147ac..da18457343ad 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml @@ -304,6 +304,26 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{tup_deleted}' + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml index b9d0c728021e..5d6c91c27eae 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml @@ -381,7 +381,27 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" isMonotonic: true - unit: '{tup_deleted}' + unit: '{tup_deleted}' + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml index 04579405a977..806a5824d751 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml @@ -303,7 +303,27 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" isMonotonic: true - unit: '{tup_deleted}' + unit: '{tup_deleted}' + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml index db44061d3715..d3766d9b31da 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml @@ -462,7 +462,67 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" isMonotonic: true - unit: '{tup_updated}' + unit: '{tup_updated}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml index cb679594de7e..725bfdd3fd3d 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml @@ -288,7 +288,67 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" isMonotonic: true - unit: '{tup_updated}' + unit: '{tup_updated}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest