From adbb96afd6f223bf446952152f881d2a680071fc Mon Sep 17 00:00:00 2001 From: Tim Date: Wed, 27 Nov 2024 15:03:52 -0800 Subject: [PATCH] [receiver/postgresqlreceiver] Added new postgresql metrics to acheive parity with Telegraf (#36528) --- .chloggen/chan-tim_postgresMetrics.yaml | 27 ++ receiver/postgresqlreceiver/client.go | 27 +- receiver/postgresqlreceiver/documentation.md | 56 +++ .../postgresqlreceiver/integration_test.go | 7 + .../internal/metadata/generated_config.go | 28 ++ .../metadata/generated_config_test.go | 14 + .../internal/metadata/generated_metrics.go | 413 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 119 +++++ .../internal/metadata/testdata/config.yaml | 28 ++ receiver/postgresqlreceiver/metadata.yaml | 57 ++- receiver/postgresqlreceiver/scraper.go | 7 + receiver/postgresqlreceiver/scraper_test.go | 91 ++++ .../testdata/integration/expected_all_db.yaml | 210 +++++++++ .../integration/expected_all_db_connpool.yaml | 210 +++++++++ .../expected_all_db_schemaattr.yaml | 210 +++++++++ .../integration/expected_multi_db.yaml | 140 ++++++ .../expected_multi_db_connpool.yaml | 140 ++++++ .../expected_multi_db_schemaattr.yaml | 140 ++++++ .../integration/expected_single_db.yaml | 70 +++ .../expected_single_db_connpool.yaml | 70 +++ .../expected_single_db_schemaattr.yaml | 70 +++ .../testdata/scraper/multiple/expected.yaml | 210 +++++++++ .../multiple/expected_imprecise_lag.yaml | 210 +++++++++ .../expected_imprecise_lag_schemaattr.yaml | 210 +++++++++ .../scraper/multiple/expected_schemaattr.yaml | 210 +++++++++ .../testdata/scraper/otel/expected.yaml | 70 +++ .../scraper/otel/expected_schemaattr.yaml | 70 +++ 27 files changed, 3110 insertions(+), 4 deletions(-) create mode 100644 .chloggen/chan-tim_postgresMetrics.yaml diff --git a/.chloggen/chan-tim_postgresMetrics.yaml b/.chloggen/chan-tim_postgresMetrics.yaml new file mode 100644 index 000000000000..36cbbf6c1b49 --- /dev/null +++ b/.chloggen/chan-tim_postgresMetrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: postgresqlreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added new postgresql metrics to acheive parity with Telegraf + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36528] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/postgresqlreceiver/client.go b/receiver/postgresqlreceiver/client.go index 056167eea49c..dc0029873a10 100644 --- a/receiver/postgresqlreceiver/client.go +++ b/receiver/postgresqlreceiver/client.go @@ -134,20 +134,34 @@ type databaseStats struct { transactionRollback int64 deadlocks int64 tempFiles int64 + tupUpdated int64 + tupReturned int64 + tupFetched int64 + tupInserted int64 + tupDeleted int64 + blksHit int64 + blksRead int64 } func (c *postgreSQLClient) getDatabaseStats(ctx context.Context, databases []string) (map[databaseName]databaseStats, error) { - query := filterQueryByDatabases("SELECT datname, xact_commit, xact_rollback, deadlocks, temp_files FROM pg_stat_database", databases, false) + query := filterQueryByDatabases( + "SELECT datname, xact_commit, xact_rollback, deadlocks, temp_files, tup_updated, tup_returned, tup_fetched, tup_inserted, tup_deleted, blks_hit, blks_read FROM pg_stat_database", + databases, + false, + ) + rows, err := c.client.QueryContext(ctx, query) if err != nil { return nil, err } + var errs error dbStats := map[databaseName]databaseStats{} + for rows.Next() { var datname string - var transactionCommitted, transactionRollback, deadlocks, tempFiles int64 - err = rows.Scan(&datname, &transactionCommitted, &transactionRollback, &deadlocks, &tempFiles) + var transactionCommitted, transactionRollback, deadlocks, tempFiles, tupUpdated, tupReturned, tupFetched, tupInserted, tupDeleted, blksHit, blksRead int64 + err = rows.Scan(&datname, &transactionCommitted, &transactionRollback, &deadlocks, &tempFiles, &tupUpdated, &tupReturned, &tupFetched, &tupInserted, &tupDeleted, &blksHit, &blksRead) if err != nil { errs = multierr.Append(errs, err) continue @@ -158,6 +172,13 @@ func (c *postgreSQLClient) getDatabaseStats(ctx context.Context, databases []str transactionRollback: transactionRollback, deadlocks: deadlocks, tempFiles: tempFiles, + tupUpdated: tupUpdated, + tupReturned: tupReturned, + tupFetched: tupFetched, + tupInserted: tupInserted, + tupDeleted: tupDeleted, + blksHit: blksHit, + blksRead: blksRead, } } } diff --git a/receiver/postgresqlreceiver/documentation.md b/receiver/postgresqlreceiver/documentation.md index 0de5881a955a..ba9e0ce2548a 100644 --- a/receiver/postgresqlreceiver/documentation.md +++ b/receiver/postgresqlreceiver/documentation.md @@ -253,6 +253,22 @@ metrics: enabled: true ``` +### postgresql.blks_hit + +Number of times disk blocks were found already in the buffer cache. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {blks_hit} | Sum | Int | Cumulative | true | + +### postgresql.blks_read + +Number of disk blocks read in this database. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {blks_read} | Sum | Int | Cumulative | true | + ### postgresql.database.locks The number of database locks. @@ -293,6 +309,46 @@ The number of temp files. | ---- | ----------- | ---------- | ----------------------- | --------- | | {temp_file} | Sum | Int | Cumulative | true | +### postgresql.tup_deleted + +Number of rows deleted by queries in the database. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tup_deleted} | Sum | Int | Cumulative | true | + +### postgresql.tup_fetched + +Number of rows fetched by queries in the database. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tup_fetched} | Sum | Int | Cumulative | true | + +### postgresql.tup_inserted + +Number of rows inserted by queries in the database. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tup_inserted} | Sum | Int | Cumulative | true | + +### postgresql.tup_returned + +Number of rows returned by queries in the database. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tup_returned} | Sum | Int | Cumulative | true | + +### postgresql.tup_updated + +Number of rows updated by queries in the database. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {tup_updated} | Sum | Int | Cumulative | true | + ### postgresql.wal.delay Time between flushing recent WAL locally and receiving notification that the standby server has completed an operation with it. diff --git a/receiver/postgresqlreceiver/integration_test.go b/receiver/postgresqlreceiver/integration_test.go index cadd953bb701..d73627615d46 100644 --- a/receiver/postgresqlreceiver/integration_test.go +++ b/receiver/postgresqlreceiver/integration_test.go @@ -79,6 +79,13 @@ func integrationTest(name string, databases []string) func(*testing.T) { rCfg.Metrics.PostgresqlWalDelay.Enabled = true rCfg.Metrics.PostgresqlDeadlocks.Enabled = true rCfg.Metrics.PostgresqlTempFiles.Enabled = true + rCfg.Metrics.PostgresqlTupUpdated.Enabled = true + rCfg.Metrics.PostgresqlTupReturned.Enabled = true + rCfg.Metrics.PostgresqlTupFetched.Enabled = true + rCfg.Metrics.PostgresqlTupInserted.Enabled = true + rCfg.Metrics.PostgresqlTupDeleted.Enabled = true + rCfg.Metrics.PostgresqlBlksHit.Enabled = true + rCfg.Metrics.PostgresqlBlksRead.Enabled = true rCfg.Metrics.PostgresqlSequentialScans.Enabled = true rCfg.Metrics.PostgresqlDatabaseLocks.Enabled = true }), diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config.go b/receiver/postgresqlreceiver/internal/metadata/generated_config.go index a0a53f803403..d63330f7e3f7 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_config.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_config.go @@ -34,6 +34,8 @@ type MetricsConfig struct { PostgresqlBgwriterCheckpointCount MetricConfig `mapstructure:"postgresql.bgwriter.checkpoint.count"` PostgresqlBgwriterDuration MetricConfig `mapstructure:"postgresql.bgwriter.duration"` PostgresqlBgwriterMaxwritten MetricConfig `mapstructure:"postgresql.bgwriter.maxwritten"` + PostgresqlBlksHit MetricConfig `mapstructure:"postgresql.blks_hit"` + PostgresqlBlksRead MetricConfig `mapstructure:"postgresql.blks_read"` PostgresqlBlocksRead MetricConfig `mapstructure:"postgresql.blocks_read"` PostgresqlCommits MetricConfig `mapstructure:"postgresql.commits"` PostgresqlConnectionMax MetricConfig `mapstructure:"postgresql.connection.max"` @@ -52,6 +54,11 @@ type MetricsConfig struct { PostgresqlTableSize MetricConfig `mapstructure:"postgresql.table.size"` PostgresqlTableVacuumCount MetricConfig `mapstructure:"postgresql.table.vacuum.count"` PostgresqlTempFiles MetricConfig `mapstructure:"postgresql.temp_files"` + PostgresqlTupDeleted MetricConfig `mapstructure:"postgresql.tup_deleted"` + PostgresqlTupFetched MetricConfig `mapstructure:"postgresql.tup_fetched"` + PostgresqlTupInserted MetricConfig `mapstructure:"postgresql.tup_inserted"` + PostgresqlTupReturned MetricConfig `mapstructure:"postgresql.tup_returned"` + PostgresqlTupUpdated MetricConfig `mapstructure:"postgresql.tup_updated"` PostgresqlWalAge MetricConfig `mapstructure:"postgresql.wal.age"` PostgresqlWalDelay MetricConfig `mapstructure:"postgresql.wal.delay"` PostgresqlWalLag MetricConfig `mapstructure:"postgresql.wal.lag"` @@ -77,6 +84,12 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlBgwriterMaxwritten: MetricConfig{ Enabled: true, }, + PostgresqlBlksHit: MetricConfig{ + Enabled: false, + }, + PostgresqlBlksRead: MetricConfig{ + Enabled: false, + }, PostgresqlBlocksRead: MetricConfig{ Enabled: true, }, @@ -131,6 +144,21 @@ func DefaultMetricsConfig() MetricsConfig { PostgresqlTempFiles: MetricConfig{ Enabled: false, }, + PostgresqlTupDeleted: MetricConfig{ + Enabled: false, + }, + PostgresqlTupFetched: MetricConfig{ + Enabled: false, + }, + PostgresqlTupInserted: MetricConfig{ + Enabled: false, + }, + PostgresqlTupReturned: MetricConfig{ + Enabled: false, + }, + PostgresqlTupUpdated: MetricConfig{ + Enabled: false, + }, PostgresqlWalAge: MetricConfig{ Enabled: true, }, diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go index e97658befe45..aa7fc527db16 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_config_test.go @@ -31,6 +31,8 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlBgwriterCheckpointCount: MetricConfig{Enabled: true}, PostgresqlBgwriterDuration: MetricConfig{Enabled: true}, PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: true}, + PostgresqlBlksHit: MetricConfig{Enabled: true}, + PostgresqlBlksRead: MetricConfig{Enabled: true}, PostgresqlBlocksRead: MetricConfig{Enabled: true}, PostgresqlCommits: MetricConfig{Enabled: true}, PostgresqlConnectionMax: MetricConfig{Enabled: true}, @@ -49,6 +51,11 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlTableSize: MetricConfig{Enabled: true}, PostgresqlTableVacuumCount: MetricConfig{Enabled: true}, PostgresqlTempFiles: MetricConfig{Enabled: true}, + PostgresqlTupDeleted: MetricConfig{Enabled: true}, + PostgresqlTupFetched: MetricConfig{Enabled: true}, + PostgresqlTupInserted: MetricConfig{Enabled: true}, + PostgresqlTupReturned: MetricConfig{Enabled: true}, + PostgresqlTupUpdated: MetricConfig{Enabled: true}, PostgresqlWalAge: MetricConfig{Enabled: true}, PostgresqlWalDelay: MetricConfig{Enabled: true}, PostgresqlWalLag: MetricConfig{Enabled: true}, @@ -71,6 +78,8 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlBgwriterCheckpointCount: MetricConfig{Enabled: false}, PostgresqlBgwriterDuration: MetricConfig{Enabled: false}, PostgresqlBgwriterMaxwritten: MetricConfig{Enabled: false}, + PostgresqlBlksHit: MetricConfig{Enabled: false}, + PostgresqlBlksRead: MetricConfig{Enabled: false}, PostgresqlBlocksRead: MetricConfig{Enabled: false}, PostgresqlCommits: MetricConfig{Enabled: false}, PostgresqlConnectionMax: MetricConfig{Enabled: false}, @@ -89,6 +98,11 @@ func TestMetricsBuilderConfig(t *testing.T) { PostgresqlTableSize: MetricConfig{Enabled: false}, PostgresqlTableVacuumCount: MetricConfig{Enabled: false}, PostgresqlTempFiles: MetricConfig{Enabled: false}, + PostgresqlTupDeleted: MetricConfig{Enabled: false}, + PostgresqlTupFetched: MetricConfig{Enabled: false}, + PostgresqlTupInserted: MetricConfig{Enabled: false}, + PostgresqlTupReturned: MetricConfig{Enabled: false}, + PostgresqlTupUpdated: MetricConfig{Enabled: false}, PostgresqlWalAge: MetricConfig{Enabled: false}, PostgresqlWalDelay: MetricConfig{Enabled: false}, PostgresqlWalLag: MetricConfig{Enabled: false}, diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go index 0d5f0e2cae18..a45ef072aaa5 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics.go @@ -550,6 +550,108 @@ func newMetricPostgresqlBgwriterMaxwritten(cfg MetricConfig) metricPostgresqlBgw return m } +type metricPostgresqlBlksHit struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.blks_hit metric with initial data. +func (m *metricPostgresqlBlksHit) init() { + m.data.SetName("postgresql.blks_hit") + m.data.SetDescription("Number of times disk blocks were found already in the buffer cache.") + m.data.SetUnit("{blks_hit}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlBlksHit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlBlksHit) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlBlksHit) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlBlksHit(cfg MetricConfig) metricPostgresqlBlksHit { + m := metricPostgresqlBlksHit{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlBlksRead struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.blks_read metric with initial data. +func (m *metricPostgresqlBlksRead) init() { + m.data.SetName("postgresql.blks_read") + m.data.SetDescription("Number of disk blocks read in this database.") + m.data.SetUnit("{blks_read}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlBlksRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlBlksRead) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlBlksRead) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlBlksRead(cfg MetricConfig) metricPostgresqlBlksRead { + m := metricPostgresqlBlksRead{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlBlocksRead struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1472,6 +1574,261 @@ func newMetricPostgresqlTempFiles(cfg MetricConfig) metricPostgresqlTempFiles { return m } +type metricPostgresqlTupDeleted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.tup_deleted metric with initial data. +func (m *metricPostgresqlTupDeleted) init() { + m.data.SetName("postgresql.tup_deleted") + m.data.SetDescription("Number of rows deleted by queries in the database.") + m.data.SetUnit("{tup_deleted}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlTupDeleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlTupDeleted) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlTupDeleted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlTupDeleted(cfg MetricConfig) metricPostgresqlTupDeleted { + m := metricPostgresqlTupDeleted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlTupFetched struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.tup_fetched metric with initial data. +func (m *metricPostgresqlTupFetched) init() { + m.data.SetName("postgresql.tup_fetched") + m.data.SetDescription("Number of rows fetched by queries in the database.") + m.data.SetUnit("{tup_fetched}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlTupFetched) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlTupFetched) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlTupFetched) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlTupFetched(cfg MetricConfig) metricPostgresqlTupFetched { + m := metricPostgresqlTupFetched{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlTupInserted struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.tup_inserted metric with initial data. +func (m *metricPostgresqlTupInserted) init() { + m.data.SetName("postgresql.tup_inserted") + m.data.SetDescription("Number of rows inserted by queries in the database.") + m.data.SetUnit("{tup_inserted}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlTupInserted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlTupInserted) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlTupInserted) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlTupInserted(cfg MetricConfig) metricPostgresqlTupInserted { + m := metricPostgresqlTupInserted{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlTupReturned struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.tup_returned metric with initial data. +func (m *metricPostgresqlTupReturned) init() { + m.data.SetName("postgresql.tup_returned") + m.data.SetDescription("Number of rows returned by queries in the database.") + m.data.SetUnit("{tup_returned}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlTupReturned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlTupReturned) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlTupReturned) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlTupReturned(cfg MetricConfig) metricPostgresqlTupReturned { + m := metricPostgresqlTupReturned{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricPostgresqlTupUpdated struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills postgresql.tup_updated metric with initial data. +func (m *metricPostgresqlTupUpdated) init() { + m.data.SetName("postgresql.tup_updated") + m.data.SetDescription("Number of rows updated by queries in the database.") + m.data.SetUnit("{tup_updated}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricPostgresqlTupUpdated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricPostgresqlTupUpdated) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricPostgresqlTupUpdated) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricPostgresqlTupUpdated(cfg MetricConfig) metricPostgresqlTupUpdated { + m := metricPostgresqlTupUpdated{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricPostgresqlWalAge struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1641,6 +1998,8 @@ type MetricsBuilder struct { metricPostgresqlBgwriterCheckpointCount metricPostgresqlBgwriterCheckpointCount metricPostgresqlBgwriterDuration metricPostgresqlBgwriterDuration metricPostgresqlBgwriterMaxwritten metricPostgresqlBgwriterMaxwritten + metricPostgresqlBlksHit metricPostgresqlBlksHit + metricPostgresqlBlksRead metricPostgresqlBlksRead metricPostgresqlBlocksRead metricPostgresqlBlocksRead metricPostgresqlCommits metricPostgresqlCommits metricPostgresqlConnectionMax metricPostgresqlConnectionMax @@ -1659,6 +2018,11 @@ type MetricsBuilder struct { metricPostgresqlTableSize metricPostgresqlTableSize metricPostgresqlTableVacuumCount metricPostgresqlTableVacuumCount metricPostgresqlTempFiles metricPostgresqlTempFiles + metricPostgresqlTupDeleted metricPostgresqlTupDeleted + metricPostgresqlTupFetched metricPostgresqlTupFetched + metricPostgresqlTupInserted metricPostgresqlTupInserted + metricPostgresqlTupReturned metricPostgresqlTupReturned + metricPostgresqlTupUpdated metricPostgresqlTupUpdated metricPostgresqlWalAge metricPostgresqlWalAge metricPostgresqlWalDelay metricPostgresqlWalDelay metricPostgresqlWalLag metricPostgresqlWalLag @@ -1694,6 +2058,8 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricPostgresqlBgwriterCheckpointCount: newMetricPostgresqlBgwriterCheckpointCount(mbc.Metrics.PostgresqlBgwriterCheckpointCount), metricPostgresqlBgwriterDuration: newMetricPostgresqlBgwriterDuration(mbc.Metrics.PostgresqlBgwriterDuration), metricPostgresqlBgwriterMaxwritten: newMetricPostgresqlBgwriterMaxwritten(mbc.Metrics.PostgresqlBgwriterMaxwritten), + metricPostgresqlBlksHit: newMetricPostgresqlBlksHit(mbc.Metrics.PostgresqlBlksHit), + metricPostgresqlBlksRead: newMetricPostgresqlBlksRead(mbc.Metrics.PostgresqlBlksRead), metricPostgresqlBlocksRead: newMetricPostgresqlBlocksRead(mbc.Metrics.PostgresqlBlocksRead), metricPostgresqlCommits: newMetricPostgresqlCommits(mbc.Metrics.PostgresqlCommits), metricPostgresqlConnectionMax: newMetricPostgresqlConnectionMax(mbc.Metrics.PostgresqlConnectionMax), @@ -1712,6 +2078,11 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricPostgresqlTableSize: newMetricPostgresqlTableSize(mbc.Metrics.PostgresqlTableSize), metricPostgresqlTableVacuumCount: newMetricPostgresqlTableVacuumCount(mbc.Metrics.PostgresqlTableVacuumCount), metricPostgresqlTempFiles: newMetricPostgresqlTempFiles(mbc.Metrics.PostgresqlTempFiles), + metricPostgresqlTupDeleted: newMetricPostgresqlTupDeleted(mbc.Metrics.PostgresqlTupDeleted), + metricPostgresqlTupFetched: newMetricPostgresqlTupFetched(mbc.Metrics.PostgresqlTupFetched), + metricPostgresqlTupInserted: newMetricPostgresqlTupInserted(mbc.Metrics.PostgresqlTupInserted), + metricPostgresqlTupReturned: newMetricPostgresqlTupReturned(mbc.Metrics.PostgresqlTupReturned), + metricPostgresqlTupUpdated: newMetricPostgresqlTupUpdated(mbc.Metrics.PostgresqlTupUpdated), metricPostgresqlWalAge: newMetricPostgresqlWalAge(mbc.Metrics.PostgresqlWalAge), metricPostgresqlWalDelay: newMetricPostgresqlWalDelay(mbc.Metrics.PostgresqlWalDelay), metricPostgresqlWalLag: newMetricPostgresqlWalLag(mbc.Metrics.PostgresqlWalLag), @@ -1817,6 +2188,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricPostgresqlBgwriterCheckpointCount.emit(ils.Metrics()) mb.metricPostgresqlBgwriterDuration.emit(ils.Metrics()) mb.metricPostgresqlBgwriterMaxwritten.emit(ils.Metrics()) + mb.metricPostgresqlBlksHit.emit(ils.Metrics()) + mb.metricPostgresqlBlksRead.emit(ils.Metrics()) mb.metricPostgresqlBlocksRead.emit(ils.Metrics()) mb.metricPostgresqlCommits.emit(ils.Metrics()) mb.metricPostgresqlConnectionMax.emit(ils.Metrics()) @@ -1835,6 +2208,11 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricPostgresqlTableSize.emit(ils.Metrics()) mb.metricPostgresqlTableVacuumCount.emit(ils.Metrics()) mb.metricPostgresqlTempFiles.emit(ils.Metrics()) + mb.metricPostgresqlTupDeleted.emit(ils.Metrics()) + mb.metricPostgresqlTupFetched.emit(ils.Metrics()) + mb.metricPostgresqlTupInserted.emit(ils.Metrics()) + mb.metricPostgresqlTupReturned.emit(ils.Metrics()) + mb.metricPostgresqlTupUpdated.emit(ils.Metrics()) mb.metricPostgresqlWalAge.emit(ils.Metrics()) mb.metricPostgresqlWalDelay.emit(ils.Metrics()) mb.metricPostgresqlWalLag.emit(ils.Metrics()) @@ -1899,6 +2277,16 @@ func (mb *MetricsBuilder) RecordPostgresqlBgwriterMaxwrittenDataPoint(ts pcommon mb.metricPostgresqlBgwriterMaxwritten.recordDataPoint(mb.startTime, ts, val) } +// RecordPostgresqlBlksHitDataPoint adds a data point to postgresql.blks_hit metric. +func (mb *MetricsBuilder) RecordPostgresqlBlksHitDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlBlksHit.recordDataPoint(mb.startTime, ts, val) +} + +// RecordPostgresqlBlksReadDataPoint adds a data point to postgresql.blks_read metric. +func (mb *MetricsBuilder) RecordPostgresqlBlksReadDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlBlksRead.recordDataPoint(mb.startTime, ts, val) +} + // RecordPostgresqlBlocksReadDataPoint adds a data point to postgresql.blocks_read metric. func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, sourceAttributeValue AttributeSource) { mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, sourceAttributeValue.String()) @@ -1989,6 +2377,31 @@ func (mb *MetricsBuilder) RecordPostgresqlTempFilesDataPoint(ts pcommon.Timestam mb.metricPostgresqlTempFiles.recordDataPoint(mb.startTime, ts, val) } +// RecordPostgresqlTupDeletedDataPoint adds a data point to postgresql.tup_deleted metric. +func (mb *MetricsBuilder) RecordPostgresqlTupDeletedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlTupDeleted.recordDataPoint(mb.startTime, ts, val) +} + +// RecordPostgresqlTupFetchedDataPoint adds a data point to postgresql.tup_fetched metric. +func (mb *MetricsBuilder) RecordPostgresqlTupFetchedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlTupFetched.recordDataPoint(mb.startTime, ts, val) +} + +// RecordPostgresqlTupInsertedDataPoint adds a data point to postgresql.tup_inserted metric. +func (mb *MetricsBuilder) RecordPostgresqlTupInsertedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlTupInserted.recordDataPoint(mb.startTime, ts, val) +} + +// RecordPostgresqlTupReturnedDataPoint adds a data point to postgresql.tup_returned metric. +func (mb *MetricsBuilder) RecordPostgresqlTupReturnedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlTupReturned.recordDataPoint(mb.startTime, ts, val) +} + +// RecordPostgresqlTupUpdatedDataPoint adds a data point to postgresql.tup_updated metric. +func (mb *MetricsBuilder) RecordPostgresqlTupUpdatedDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricPostgresqlTupUpdated.recordDataPoint(mb.startTime, ts, val) +} + // RecordPostgresqlWalAgeDataPoint adds a data point to postgresql.wal.age metric. func (mb *MetricsBuilder) RecordPostgresqlWalAgeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricPostgresqlWalAge.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go index a6a100c5bea4..8cd326d46a27 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_test.go @@ -92,6 +92,12 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlBgwriterMaxwrittenDataPoint(ts, 1) + allMetricsCount++ + mb.RecordPostgresqlBlksHitDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordPostgresqlBlksReadDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlBlocksReadDataPoint(ts, 1, AttributeSourceHeapRead) @@ -160,6 +166,21 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordPostgresqlTempFilesDataPoint(ts, 1) + allMetricsCount++ + mb.RecordPostgresqlTupDeletedDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordPostgresqlTupFetchedDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordPostgresqlTupInsertedDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordPostgresqlTupReturnedDataPoint(ts, 1) + + allMetricsCount++ + mb.RecordPostgresqlTupUpdatedDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordPostgresqlWalAgeDataPoint(ts, 1) @@ -291,6 +312,34 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.blks_hit": + assert.False(t, validatedMetrics["postgresql.blks_hit"], "Found a duplicate in the metrics slice: postgresql.blks_hit") + validatedMetrics["postgresql.blks_hit"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of times disk blocks were found already in the buffer cache.", ms.At(i).Description()) + assert.Equal(t, "{blks_hit}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.blks_read": + assert.False(t, validatedMetrics["postgresql.blks_read"], "Found a duplicate in the metrics slice: postgresql.blks_read") + validatedMetrics["postgresql.blks_read"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of disk blocks read in this database.", ms.At(i).Description()) + assert.Equal(t, "{blks_read}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "postgresql.blocks_read": assert.False(t, validatedMetrics["postgresql.blocks_read"], "Found a duplicate in the metrics slice: postgresql.blocks_read") validatedMetrics["postgresql.blocks_read"] = true @@ -556,6 +605,76 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.tup_deleted": + assert.False(t, validatedMetrics["postgresql.tup_deleted"], "Found a duplicate in the metrics slice: postgresql.tup_deleted") + validatedMetrics["postgresql.tup_deleted"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of rows deleted by queries in the database.", ms.At(i).Description()) + assert.Equal(t, "{tup_deleted}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.tup_fetched": + assert.False(t, validatedMetrics["postgresql.tup_fetched"], "Found a duplicate in the metrics slice: postgresql.tup_fetched") + validatedMetrics["postgresql.tup_fetched"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of rows fetched by queries in the database.", ms.At(i).Description()) + assert.Equal(t, "{tup_fetched}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.tup_inserted": + assert.False(t, validatedMetrics["postgresql.tup_inserted"], "Found a duplicate in the metrics slice: postgresql.tup_inserted") + validatedMetrics["postgresql.tup_inserted"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of rows inserted by queries in the database.", ms.At(i).Description()) + assert.Equal(t, "{tup_inserted}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.tup_returned": + assert.False(t, validatedMetrics["postgresql.tup_returned"], "Found a duplicate in the metrics slice: postgresql.tup_returned") + validatedMetrics["postgresql.tup_returned"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of rows returned by queries in the database.", ms.At(i).Description()) + assert.Equal(t, "{tup_returned}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + case "postgresql.tup_updated": + assert.False(t, validatedMetrics["postgresql.tup_updated"], "Found a duplicate in the metrics slice: postgresql.tup_updated") + validatedMetrics["postgresql.tup_updated"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "Number of rows updated by queries in the database.", ms.At(i).Description()) + assert.Equal(t, "{tup_updated}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "postgresql.wal.age": assert.False(t, validatedMetrics["postgresql.wal.age"], "Found a duplicate in the metrics slice: postgresql.wal.age") validatedMetrics["postgresql.wal.age"] = true diff --git a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml index 8cf4613c3849..71b1192ade06 100644 --- a/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/postgresqlreceiver/internal/metadata/testdata/config.yaml @@ -13,6 +13,10 @@ all_set: enabled: true postgresql.bgwriter.maxwritten: enabled: true + postgresql.blks_hit: + enabled: true + postgresql.blks_read: + enabled: true postgresql.blocks_read: enabled: true postgresql.commits: @@ -49,6 +53,16 @@ all_set: enabled: true postgresql.temp_files: enabled: true + postgresql.tup_deleted: + enabled: true + postgresql.tup_fetched: + enabled: true + postgresql.tup_inserted: + enabled: true + postgresql.tup_returned: + enabled: true + postgresql.tup_updated: + enabled: true postgresql.wal.age: enabled: true postgresql.wal.delay: @@ -78,6 +92,10 @@ none_set: enabled: false postgresql.bgwriter.maxwritten: enabled: false + postgresql.blks_hit: + enabled: false + postgresql.blks_read: + enabled: false postgresql.blocks_read: enabled: false postgresql.commits: @@ -114,6 +132,16 @@ none_set: enabled: false postgresql.temp_files: enabled: false + postgresql.tup_deleted: + enabled: false + postgresql.tup_fetched: + enabled: false + postgresql.tup_inserted: + enabled: false + postgresql.tup_returned: + enabled: false + postgresql.tup_updated: + enabled: false postgresql.wal.age: enabled: false postgresql.wal.delay: diff --git a/receiver/postgresqlreceiver/metadata.yaml b/receiver/postgresqlreceiver/metadata.yaml index 401d49947b3a..1745f17fba78 100644 --- a/receiver/postgresqlreceiver/metadata.yaml +++ b/receiver/postgresqlreceiver/metadata.yaml @@ -310,6 +310,61 @@ metrics: value_type: double extended_documentation: | This metric requires WAL to be enabled with at least one replica. - + postgresql.tup_updated: + enabled: false + description: Number of rows updated by queries in the database. + unit: "{tup_updated}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + postgresql.tup_returned: + enabled: false + description: Number of rows returned by queries in the database. + unit: "{tup_returned}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + postgresql.tup_fetched: + enabled: false + description: Number of rows fetched by queries in the database. + unit: "{tup_fetched}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + postgresql.tup_inserted: + enabled: false + description: Number of rows inserted by queries in the database. + unit: "{tup_inserted}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + postgresql.tup_deleted: + enabled: false + description: Number of rows deleted by queries in the database. + unit: "{tup_deleted}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + postgresql.blks_hit: + enabled: false + description: Number of times disk blocks were found already in the buffer cache. + unit: "{blks_hit}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative + postgresql.blks_read: + enabled: false + description: Number of disk blocks read in this database. + unit: "{blks_read}" + sum: + value_type: int + monotonic: true + aggregation_temporality: cumulative tests: config: diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go index 1fe28994e68f..55d140716d22 100644 --- a/receiver/postgresqlreceiver/scraper.go +++ b/receiver/postgresqlreceiver/scraper.go @@ -201,6 +201,13 @@ func (p *postgreSQLScraper) recordDatabase(now pcommon.Timestamp, db string, r * p.mb.RecordPostgresqlRollbacksDataPoint(now, stats.transactionRollback) p.mb.RecordPostgresqlDeadlocksDataPoint(now, stats.deadlocks) p.mb.RecordPostgresqlTempFilesDataPoint(now, stats.tempFiles) + p.mb.RecordPostgresqlTupUpdatedDataPoint(now, stats.tupUpdated) + p.mb.RecordPostgresqlTupReturnedDataPoint(now, stats.tupReturned) + p.mb.RecordPostgresqlTupFetchedDataPoint(now, stats.tupFetched) + p.mb.RecordPostgresqlTupInsertedDataPoint(now, stats.tupInserted) + p.mb.RecordPostgresqlTupDeletedDataPoint(now, stats.tupDeleted) + p.mb.RecordPostgresqlBlksHitDataPoint(now, stats.blksHit) + p.mb.RecordPostgresqlBlksReadDataPoint(now, stats.blksRead) } rb := p.mb.NewResourceBuilder() rb.SetPostgresqlDatabaseName(db) diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go index fad106672d75..86457712eb21 100644 --- a/receiver/postgresqlreceiver/scraper_test.go +++ b/receiver/postgresqlreceiver/scraper_test.go @@ -45,6 +45,13 @@ func TestScraper(t *testing.T) { cfg.Metrics.PostgresqlWalDelay.Enabled = true cfg.Metrics.PostgresqlDeadlocks.Enabled = true cfg.Metrics.PostgresqlTempFiles.Enabled = true + cfg.Metrics.PostgresqlTupUpdated.Enabled = true + cfg.Metrics.PostgresqlTupReturned.Enabled = true + cfg.Metrics.PostgresqlTupFetched.Enabled = true + cfg.Metrics.PostgresqlTupInserted.Enabled = true + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + cfg.Metrics.PostgresqlBlksHit.Enabled = true + cfg.Metrics.PostgresqlBlksRead.Enabled = true cfg.Metrics.PostgresqlSequentialScans.Enabled = true cfg.Metrics.PostgresqlDatabaseLocks.Enabled = true @@ -81,6 +88,20 @@ func TestScraperNoDatabaseSingle(t *testing.T) { cfg.Metrics.PostgresqlDeadlocks.Enabled = true require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled) cfg.Metrics.PostgresqlTempFiles.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) + cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -100,6 +121,13 @@ func TestScraperNoDatabaseSingle(t *testing.T) { cfg.Metrics.PostgresqlWalDelay.Enabled = false cfg.Metrics.PostgresqlDeadlocks.Enabled = false cfg.Metrics.PostgresqlTempFiles.Enabled = false + cfg.Metrics.PostgresqlTupUpdated.Enabled = false + cfg.Metrics.PostgresqlTupReturned.Enabled = false + cfg.Metrics.PostgresqlTupFetched.Enabled = false + cfg.Metrics.PostgresqlTupInserted.Enabled = false + cfg.Metrics.PostgresqlTupDeleted.Enabled = false + cfg.Metrics.PostgresqlBlksHit.Enabled = false + cfg.Metrics.PostgresqlBlksRead.Enabled = false cfg.Metrics.PostgresqlSequentialScans.Enabled = false cfg.Metrics.PostgresqlDatabaseLocks.Enabled = false @@ -135,6 +163,20 @@ func TestScraperNoDatabaseMultipleWithoutPreciseLag(t *testing.T) { cfg.Metrics.PostgresqlDeadlocks.Enabled = true require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled) cfg.Metrics.PostgresqlTempFiles.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) + cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -172,6 +214,20 @@ func TestScraperNoDatabaseMultiple(t *testing.T) { cfg.Metrics.PostgresqlDeadlocks.Enabled = true require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled) cfg.Metrics.PostgresqlTempFiles.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) + cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -209,6 +265,20 @@ func TestScraperWithResourceAttributeFeatureGate(t *testing.T) { cfg.Metrics.PostgresqlDeadlocks.Enabled = true require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled) cfg.Metrics.PostgresqlTempFiles.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) + cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -247,6 +317,20 @@ func TestScraperWithResourceAttributeFeatureGateSingle(t *testing.T) { cfg.Metrics.PostgresqlDeadlocks.Enabled = true require.False(t, cfg.Metrics.PostgresqlTempFiles.Enabled) cfg.Metrics.PostgresqlTempFiles.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupUpdated.Enabled) + cfg.Metrics.PostgresqlTupUpdated.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupReturned.Enabled) + cfg.Metrics.PostgresqlTupReturned.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupFetched.Enabled) + cfg.Metrics.PostgresqlTupFetched.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupInserted.Enabled) + cfg.Metrics.PostgresqlTupInserted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlTupDeleted.Enabled) + cfg.Metrics.PostgresqlTupDeleted.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksHit.Enabled) + cfg.Metrics.PostgresqlBlksHit.Enabled = true + require.False(t, cfg.Metrics.PostgresqlBlksRead.Enabled) + cfg.Metrics.PostgresqlBlksRead.Enabled = true require.False(t, cfg.Metrics.PostgresqlSequentialScans.Enabled) cfg.Metrics.PostgresqlSequentialScans.Enabled = true require.False(t, cfg.Metrics.PostgresqlDatabaseLocks.Enabled) @@ -406,6 +490,13 @@ func (m *mockClient) initMocks(database string, schema string, databases []strin transactionRollback: int64(idx + 2), deadlocks: int64(idx + 3), tempFiles: int64(idx + 4), + tupUpdated: int64(idx + 5), + tupReturned: int64(idx + 6), + tupFetched: int64(idx + 7), + tupInserted: int64(idx + 8), + tupDeleted: int64(idx + 9), + blksHit: int64(idx + 10), + blksRead: int64(idx + 11), } dbSize[databaseName(db)] = int64(idx + 4) backends[databaseName(db)] = int64(idx + 3) diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml index d1ada6328582..40e56d97b018 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_all_db.yaml @@ -196,6 +196,26 @@ resourceMetrics: stringValue: otel scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -254,6 +274,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -264,6 +334,26 @@ resourceMetrics: stringValue: otel2 scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -322,6 +412,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -341,6 +481,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -399,6 +559,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_all_db_connpool.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_all_db_connpool.yaml index 8aca138de78c..866b6e1a801a 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_all_db_connpool.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_all_db_connpool.yaml @@ -15,6 +15,26 @@ resourceMetrics: startTimeUnixNano: "1706802467703361527" timeUnixNano: "1706802526712082422" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -73,6 +93,56 @@ resourceMetrics: timeUnixNano: "1706802526712082422" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -412,6 +482,26 @@ resourceMetrics: startTimeUnixNano: "1706802467703361527" timeUnixNano: "1706802526712082422" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -470,6 +560,56 @@ resourceMetrics: timeUnixNano: "1706802526712082422" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -877,6 +1017,26 @@ resourceMetrics: startTimeUnixNano: "1706802467703361527" timeUnixNano: "1706802526712082422" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -935,6 +1095,56 @@ resourceMetrics: timeUnixNano: "1706802526712082422" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802467703361527" + timeUnixNano: "1706802526712082422" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_all_db_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_all_db_schemaattr.yaml index a4c655f3c74b..90edac5efcdf 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_all_db_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_all_db_schemaattr.yaml @@ -196,6 +196,26 @@ resourceMetrics: stringValue: otel scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -254,6 +274,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -264,6 +334,26 @@ resourceMetrics: stringValue: otel2 scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -322,6 +412,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -341,6 +481,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -399,6 +559,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml index b3bcba77057a..81167c9fc9f3 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db.yaml @@ -196,6 +196,26 @@ resourceMetrics: stringValue: otel scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -254,6 +274,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -264,6 +334,26 @@ resourceMetrics: stringValue: otel2 scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -322,6 +412,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_connpool.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_connpool.yaml index c8431659636a..7cb492575fd7 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_connpool.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_connpool.yaml @@ -335,6 +335,26 @@ resourceMetrics: startTimeUnixNano: "1706802402706723341" timeUnixNano: "1706802461712893428" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -393,6 +413,56 @@ resourceMetrics: timeUnixNano: "1706802461712893428" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -800,6 +870,26 @@ resourceMetrics: startTimeUnixNano: "1706802402706723341" timeUnixNano: "1706802461712893428" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -858,6 +948,56 @@ resourceMetrics: timeUnixNano: "1706802461712893428" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_schemaattr.yaml index 8bbeda6fe2b7..454ceb1e1341 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_multi_db_schemaattr.yaml @@ -196,6 +196,26 @@ resourceMetrics: stringValue: otel scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -254,6 +274,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -264,6 +334,26 @@ resourceMetrics: stringValue: otel2 scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -322,6 +412,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml index 48ed6f008318..ff3ccfa944ee 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db.yaml @@ -196,6 +196,26 @@ resourceMetrics: stringValue: otel scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -254,6 +274,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_connpool.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_connpool.yaml index 512e38c76b89..eccc2d04d749 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_connpool.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_connpool.yaml @@ -335,6 +335,26 @@ resourceMetrics: startTimeUnixNano: "1706802337738657906" timeUnixNano: "1706802396744882628" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -393,6 +413,56 @@ resourceMetrics: timeUnixNano: "1706802396744882628" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1706802402706723341" + timeUnixNano: "1706802461712893428" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_schemaattr.yaml index fdcf6e524aec..07c2e86e9a0a 100644 --- a/receiver/postgresqlreceiver/testdata/integration/expected_single_db_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/integration/expected_single_db_schemaattr.yaml @@ -196,6 +196,26 @@ resourceMetrics: stringValue: otel scopeMetrics: - metrics: + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -254,6 +274,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml index 7518c7be6df5..2fb0cfbeed3f 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected.yaml @@ -221,6 +221,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -279,6 +299,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -298,6 +368,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -356,6 +446,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -375,6 +515,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "13" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -433,6 +593,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml index c2d52cd2bec9..0eebeb9b25f2 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag.yaml @@ -221,6 +221,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -279,6 +299,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -298,6 +368,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -356,6 +446,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -375,6 +515,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "13" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -433,6 +593,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml index edc6795581e0..694dfef86fe3 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_imprecise_lag_schemaattr.yaml @@ -221,6 +221,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -279,6 +299,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -298,6 +368,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -356,6 +446,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -375,6 +515,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "13" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -433,6 +593,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml index 785e977b5dd2..29f1db008b3c 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/multiple/expected_schemaattr.yaml @@ -221,6 +221,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -279,6 +299,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -298,6 +368,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -356,6 +446,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest @@ -375,6 +515,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "12" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "13" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -433,6 +593,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml index ca8d32dfb96e..5b9b2fdcc84e 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected.yaml @@ -221,6 +221,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -279,6 +299,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest diff --git a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml index e335959a9871..c7908ab42202 100644 --- a/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml +++ b/receiver/postgresqlreceiver/testdata/scraper/otel/expected_schemaattr.yaml @@ -221,6 +221,26 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" unit: "1" + - description: Number of times disk blocks were found already in the buffer cache. + name: postgresql.blks_hit + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_hit}' + - description: Number of disk blocks read in this database. + name: postgresql.blks_read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "11" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{blks_read}' - description: The number of commits. name: postgresql.commits sum: @@ -279,6 +299,56 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{temp_file}' + - description: Number of rows deleted by queries in the database. + name: postgresql.tup_deleted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "9" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_deleted}' + - description: Number of rows fetched by queries in the database. + name: postgresql.tup_fetched + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "7" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_fetched}' + - description: Number of rows inserted by queries in the database. + name: postgresql.tup_inserted + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "8" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_inserted}' + - description: Number of rows returned by queries in the database. + name: postgresql.tup_returned + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "6" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_returned}' + - description: Number of rows updated by queries in the database. + name: postgresql.tup_updated + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "5" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{tup_updated}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/postgresqlreceiver version: latest