diff --git a/postgres/datadog_checks/postgres/postgres.py b/postgres/datadog_checks/postgres/postgres.py index 8069ebd6830b5..24ed04877dc18 100644 --- a/postgres/datadog_checks/postgres/postgres.py +++ b/postgres/datadog_checks/postgres/postgres.py @@ -16,7 +16,13 @@ from datadog_checks.postgres import aws from datadog_checks.postgres.metadata import PostgresMetadata from datadog_checks.postgres.metrics_cache import PostgresMetricsCache -from datadog_checks.postgres.relationsmanager import INDEX_BLOAT, RELATION_METRICS, TABLE_BLOAT, RelationsManager +from datadog_checks.postgres.relationsmanager import ( + DYNAMIC_RELATION_QUERIES, + INDEX_BLOAT, + RELATION_METRICS, + TABLE_BLOAT, + RelationsManager, +) from datadog_checks.postgres.statement_samples import PostgresStatementSamples from datadog_checks.postgres.statements import PostgresStatementMetrics @@ -85,7 +91,7 @@ def __init__(self, name, init_config, instances): self.statement_metrics = PostgresStatementMetrics(self, self._config, shutdown_callback=self._close_db_pool) self.statement_samples = PostgresStatementSamples(self, self._config, shutdown_callback=self._close_db_pool) self.metadata_samples = PostgresMetadata(self, self._config, shutdown_callback=self._close_db_pool) - self._relations_manager = RelationsManager(self._config.relations) + self._relations_manager = RelationsManager(self._config.relations, self._config.max_relations) self._clean_state() self.check_initializations.append(lambda: RelationsManager.validate_relations_config(self._config.relations)) self.check_initializations.append(self.set_resolved_hostname_metadata) @@ -191,6 +197,14 @@ def dynamic_queries(self): self.log.debug("no dynamic queries defined") return None + # Dynamic queries for relationsmanager + if self._config.relations: + for query in DYNAMIC_RELATION_QUERIES: + query = copy.copy(query) + formatted_query = self._relations_manager.filter_relation_query(query['query'], 'nspname') + query['query'] = formatted_query + queries.append(query) + self._dynamic_queries = self._new_query_executor(queries) self._dynamic_queries.compile_queries() self.log.debug("initialized %s dynamic querie(s)", len(queries)) diff --git a/postgres/datadog_checks/postgres/relationsmanager.py b/postgres/datadog_checks/postgres/relationsmanager.py index 5d31aa251ce48..64f902e93a14c 100644 --- a/postgres/datadog_checks/postgres/relationsmanager.py +++ b/postgres/datadog_checks/postgres/relationsmanager.py @@ -108,27 +108,66 @@ # The catalog pg_class catalogs tables and most everything else that has columns or is otherwise similar to a table. # For this integration we are restricting the query to ordinary tables. -SIZE_METRICS = { - 'descriptors': [('nspname', 'schema'), ('relname', 'table')], - 'metrics': { - 'pg_table_size(C.oid) as table_size': ('postgresql.table_size', AgentCheck.gauge), - 'pg_indexes_size(C.oid) as index_size': ('postgresql.index_size', AgentCheck.gauge), - 'pg_total_relation_size(C.oid) as total_size': ('postgresql.total_size', AgentCheck.gauge), - }, - 'relation': True, +# +# Sizes: Calling pg_relation_size, pg_table_size, pg_indexes_size or pg_total_relation_size +# can be expensive as the relation needs to be locked and stat syscalls are made behind the hood. +# +# We want to limit those calls as much as possible at the cost of precision. +# We also want to get toast size separated from the main table size. +# We can't use pg_total_relation_size which includes both toast, index and table size. +# Same for pg_table_size which includes both toast, table size. +# +# We will mainly rely on pg_relation_size which only get the size of the main fork. +# To keep postgresql.table_size's old behaviour which was based on pg_table_size, we will +# approximate table_size to (relation_size + toast_size). This will ignore FSM and VM size +# but their sizes are dwarfed by the relation's size and it's an acceptable trade off +# to ignore them to lower the amount of stat calls. +# +# Previous version filtered on nspname !~ '^pg_toast'. Since pg_toast namespace only +# contains index and toast table, the filter was redundant with relkind = 'r' +QUERY_PG_CLASS = { + 'name': 'pg_class', 'query': """ -SELECT - N.nspname, - relname, - {metrics_columns} -FROM pg_class C -LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) -WHERE nspname NOT IN ('pg_catalog', 'information_schema') AND - nspname !~ '^pg_toast' AND - relkind = 'r' AND - {relations}""", +SELECT current_database(), + s.schemaname, s.table, s.partition_of, + s.relpages, s.reltuples, s.relallvisible, + s.relation_size + s.toast_size, + s.relation_size, + s.index_size, + s.toast_size, + s.relation_size + s.index_size + s.toast_size +FROM + (SELECT + N.nspname as schemaname, + relname as table, + I.inhparent::regclass AS partition_of, + C.relpages, C.reltuples, C.relallvisible, + pg_relation_size(C.oid) as relation_size, + CASE WHEN C.relhasindex THEN pg_indexes_size(C.oid) ELSE 0 END as index_size, + CASE WHEN C.reltoastrelid > 0 THEN pg_relation_size(C.reltoastrelid) ELSE 0 END as toast_size + FROM pg_class C + LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) + LEFT JOIN pg_inherits I ON (I.inhrelid = C.oid) + WHERE NOT (nspname = ANY('{{pg_catalog,information_schema}}')) AND + relkind = 'r' AND + {relations} {limits}) as s""", + 'columns': [ + {'name': 'db', 'type': 'tag'}, + {'name': 'schema', 'type': 'tag'}, + {'name': 'table', 'type': 'tag'}, + {'name': 'partition_of', 'type': 'tag_not_null'}, + {'name': 'postgresql.relation.pages', 'type': 'gauge'}, + {'name': 'postgresql.relation.tuples', 'type': 'gauge'}, + {'name': 'postgresql.relation.all_visible', 'type': 'gauge'}, + {'name': 'postgresql.table_size', 'type': 'gauge'}, + {'name': 'postgresql.relation_size', 'type': 'gauge'}, + {'name': 'postgresql.index_size', 'type': 'gauge'}, + {'name': 'postgresql.toast_size', 'type': 'gauge'}, + {'name': 'postgresql.total_size', 'type': 'gauge'}, + ], } + # The pg_statio_all_tables view will contain one row for each table in the current database, # showing statistics about I/O on that specific table. The pg_statio_user_tables views contain the same information, # but filtered to only show user tables. @@ -259,16 +298,18 @@ 'relation': True, } -RELATION_METRICS = [LOCK_METRICS, REL_METRICS, IDX_METRICS, SIZE_METRICS, STATIO_METRICS] +RELATION_METRICS = [LOCK_METRICS, REL_METRICS, IDX_METRICS, STATIO_METRICS] +DYNAMIC_RELATION_QUERIES = [QUERY_PG_CLASS] class RelationsManager(object): """Builds queries to collect metrics about relations""" - def __init__(self, yamlconfig): - # type: (List[Union[str, Dict]]) -> None + def __init__(self, yamlconfig, max_relations): + # type: (List[Union[str, Dict]], int) -> None self.log = get_check_logger() self.config = self._build_relations_config(yamlconfig) + self.max_relations = max_relations self.has_relations = len(self.config) > 0 def filter_relation_query(self, query, schema_field): @@ -295,8 +336,11 @@ def filter_relation_query(self, query, schema_field): relations_filter.append(' '.join(relation_filter)) relations_filter = '(' + ' OR '.join(relations_filter) + ')' - self.log.debug("Running query: %s with relations matching: %s", str(query), relations_filter) - return query.format(relations=relations_filter) + limits_filter = 'LIMIT {}'.format(self.max_relations) + self.log.debug( + "Running query: %s with relations matching: %s, limits %s", str(query), relations_filter, self.max_relations + ) + return query.format(relations=relations_filter, limits=limits_filter) @staticmethod def validate_relations_config(yamlconfig): diff --git a/postgres/metadata.csv b/postgres/metadata.csv index cf367b0c280ac..3fb36da4ef4ab 100644 --- a/postgres/metadata.csv +++ b/postgres/metadata.csv @@ -45,8 +45,10 @@ postgresql.autovacuumed,count,,,,The number of times this table has been vacuume postgresql.analyzed,count,,,,The number of times this table has been manually analyzed.,0,postgres,analyze, postgresql.autoanalyzed,count,,,,The number of times this table has been analyzed by the autovacuum daemon.,0,postgres,auto analyze, postgresql.index_rows_read,gauge,,row,second,The number of index entries returned by scans on this index.,0,postgres,idx rows read, -postgresql.table_size,gauge,,byte,,"The total disk space used by the specified table. Includes TOAST, free space map, and visibility map. Excludes indexes.",0,postgres,tbl size, +postgresql.table_size,gauge,,byte,,"The disk space used by the specified table with TOAST data. Free space map and visibility map are not included.",0,postgres,tbl size, +postgresql.relation_size,gauge,,byte,,"The disk space used by the specified table. TOAST data, indexes, free space map and visibility map are not included.",0,postgres,relation size, postgresql.index_size,gauge,,byte,,The total disk space used by indexes attached to the specified table.,0,postgres,idx size, +postgresql.toast_size,gauge,,byte,,The total disk space used by the toast table attached to the specified table.,0,postgres,toast size, postgresql.total_size,gauge,,byte,,"The total disk space used by the table, including indexes and TOAST data.",0,postgres,tot size, postgresql.individual_index_size,gauge,,byte,,The disk space used by a specified index.,0,postgres,idx size, postgresql.table.count,gauge,,table,,The number of user tables in this database.,0,postgres,tbl count, @@ -137,3 +139,6 @@ postgresql.replication_slot.confirmed_flush_delay_bytes,gauge,,byte,,"The delay postgresql.pg_stat_statements.dealloc,count,,,,"The number of times pg_stat_statements had to evict least executed queries because pg_stat_statements.max was reached.",-1,postgres,pgss dealloc, postgresql.control.timeline_id,gauge,,,,"The current timeline id.",0,postgres,control tid, postgresql.control.checkpoint_delay,gauge,,second,,"The time since the last checkpoint.",0,postgres,control checkpoint, +postgresql.relation.pages,gauge,,,,"Size of a table in pages (1 page == 8KB by default). This is only an estimation used by the planner and is updated by VACUUM or ANALYZE.",0,postgres,relation pages, +postgresql.relation.tuples,gauge,,,,"Number of live rows in the table. This is only an estimation used by the planner and is updated by VACUUM or ANALYZE. If the table has never been vacuumed or analyze, -1 will be reported.",0,postgres,relation tuples, +postgresql.relation.all_visible,gauge,,,,"Number of pages that are marked as all visible in the table's visibility map. This is only an estimation used by the planner and is updated by VACUUM or ANALYZE.",0,postgres,relation all_visible, diff --git a/postgres/pyproject.toml b/postgres/pyproject.toml index cce2d3ac694e3..daef65d310138 100644 --- a/postgres/pyproject.toml +++ b/postgres/pyproject.toml @@ -30,7 +30,7 @@ classifiers = [ "Private :: Do Not Upload", ] dependencies = [ - "datadog-checks-base>=25.4.0", + "datadog-checks-base>=32.1.0", ] dynamic = [ "version", diff --git a/postgres/tests/common.py b/postgres/tests/common.py index d35a65d603a58..3bc1dfbbc6028 100644 --- a/postgres/tests/common.py +++ b/postgres/tests/common.py @@ -99,9 +99,9 @@ requires_static_version = pytest.mark.skipif(USING_LATEST, reason='Version `latest` is ever-changing, skipping') -def _iterate_metric_name(columns): - for column in columns: - if column['type'] == 'tag': +def _iterate_metric_name(query): + for column in query['columns']: + if column['type'].startswith('tag'): continue yield column['name'] @@ -148,8 +148,18 @@ def check_common_metrics(aggregator, expected_tags, count=1): def check_db_count(aggregator, expected_tags, count=1): + table_count = 5 + # We create 2 additional partition tables when partition is available + if float(POSTGRES_VERSION) >= 11.0: + table_count = 7 + # And PG >= 14 will also report the parent table + if float(POSTGRES_VERSION) >= 14.0: + table_count = 8 aggregator.assert_metric( - 'postgresql.table.count', value=5, count=count, tags=expected_tags + ['db:{}'.format(DB_NAME), 'schema:public'] + 'postgresql.table.count', + value=table_count, + count=count, + tags=expected_tags + ['db:{}'.format(DB_NAME), 'schema:public'], ) aggregator.assert_metric('postgresql.db.count', value=106, count=1) @@ -200,7 +210,7 @@ def check_wal_receiver_metrics(aggregator, expected_tags, count=1, connected=1): 'postgresql.wal_receiver.connected', count=count, value=1, tags=expected_tags + ['status:disconnected'] ) return - for metric_name in _iterate_metric_name(QUERY_PG_STAT_WAL_RECEIVER['columns']): + for metric_name in _iterate_metric_name(QUERY_PG_STAT_WAL_RECEIVER): aggregator.assert_metric(metric_name, count=count, tags=expected_tags) @@ -227,7 +237,7 @@ def check_logical_replication_slots(aggregator, expected_tags): def check_replication_slots(aggregator, expected_tags, count=1): if float(POSTGRES_VERSION) < 10.0: return - for metric_name in _iterate_metric_name(QUERY_PG_REPLICATION_SLOTS['columns']): + for metric_name in _iterate_metric_name(QUERY_PG_REPLICATION_SLOTS): if 'slot_type:physical' in expected_tags and metric_name in [ 'postgresql.replication_slot.confirmed_flush_delay_bytes', ]: @@ -247,13 +257,13 @@ def check_replication_delay(aggregator, metrics_cache, expected_tags, count=1): def check_uptime_metrics(aggregator, expected_tags, count=1): - for column in QUERY_PG_UPTIME['columns']: - aggregator.assert_metric(column['name'], count=count, tags=expected_tags) + for metric_name in _iterate_metric_name(QUERY_PG_UPTIME): + aggregator.assert_metric(metric_name, count=count, tags=expected_tags) def check_control_metrics(aggregator, expected_tags, count=1): - for column in QUERY_PG_CONTROL_CHECKPOINT['columns']: - aggregator.assert_metric(column['name'], count=count, tags=expected_tags) + for metric_name in _iterate_metric_name(QUERY_PG_CONTROL_CHECKPOINT): + aggregator.assert_metric(metric_name, count=count, tags=expected_tags) def check_conflict_metrics(aggregator, expected_tags, count=1): @@ -285,7 +295,7 @@ def check_slru_metrics(aggregator, expected_tags, count=1): def check_snapshot_txid_metrics(aggregator, expected_tags, count=1): - for metric_name in _iterate_metric_name(SNAPSHOT_TXID_METRICS['columns']): + for metric_name in _iterate_metric_name(SNAPSHOT_TXID_METRICS): aggregator.assert_metric(metric_name, count=count, tags=expected_tags) @@ -293,7 +303,7 @@ def check_file_wal_metrics(aggregator, expected_tags, count=1): if float(POSTGRES_VERSION) < 10: return - for metric_name in _iterate_metric_name(WAL_FILE_METRICS['columns']): + for metric_name in _iterate_metric_name(WAL_FILE_METRICS): aggregator.assert_metric(metric_name, count=count, tags=expected_tags) @@ -301,5 +311,5 @@ def check_stat_wal_metrics(aggregator, expected_tags, count=1): if float(POSTGRES_VERSION) < 14.0: return - for metric_name in _iterate_metric_name(STAT_WAL_METRICS['columns']): + for metric_name in _iterate_metric_name(STAT_WAL_METRICS): aggregator.assert_metric(metric_name, count=count, tags=expected_tags) diff --git a/postgres/tests/compose/resources/02_setup.sh b/postgres/tests/compose/resources/02_setup.sh index dc3fe56451ff4..3ef07407e8b2c 100755 --- a/postgres/tests/compose/resources/02_setup.sh +++ b/postgres/tests/compose/resources/02_setup.sh @@ -93,6 +93,13 @@ psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" dogs_nofunc <<-'EOSQL' DROP FUNCTION datadog.explain_statement(l_query text, out explain JSON) EOSQL +# Somehow, on old postgres version (11 and 12), wal_level is incorrectly set despite +# being present in postgresql.conf. Alter and restart to make sure we have the correct wal_level. +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" datadog_test <<-'EOSQL' + ALTER SYSTEM SET wal_level = logical; +EOSQL +pg_ctl -D /var/lib/postgresql/data -w restart + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" datadog_test <<-'EOSQL' SELECT * FROM pg_create_physical_replication_slot('replication_slot'); SELECT * FROM pg_create_logical_replication_slot('logical_slot', 'test_decoding'); diff --git a/postgres/tests/compose/resources/03_load_data.sh b/postgres/tests/compose/resources/03_load_data.sh index 424997f191853..aee6a034aec15 100755 --- a/postgres/tests/compose/resources/03_load_data.sh +++ b/postgres/tests/compose/resources/03_load_data.sh @@ -19,6 +19,18 @@ psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" datadog_test <<-EOSQL GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO blocking_bob; EOSQL +if [[ !("$PG_MAJOR" == 9.*) && !("$PG_MAJOR" == 10) ]]; then +psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" datadog_test <<-EOSQL + CREATE TABLE test_part (id SERIAL PRIMARY KEY, filler text) PARTITION BY RANGE (id); + CREATE TABLE test_part1 PARTITION OF test_part FOR VALUES FROM (MINVALUE) TO (500); + CREATE TABLE test_part2 PARTITION OF test_part FOR VALUES FROM (500) TO (MAXVALUE); + CREATE INDEX test_part_id ON test_part(id); + INSERT INTO test_part (filler) SELECT * FROM generate_series(1, 2000); + INSERT INTO test_part (filler) SELECT array_to_string(ARRAY(SELECT chr((65 + round(random() * 50)) :: integer) FROM generate_series(1,3000)), ''); + VACUUM ANALYZE test_part; +EOSQL +fi + for DBNAME in dogs dogs_noschema dogs_nofunc; do psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" "$DBNAME" <<-EOSQL diff --git a/postgres/tests/test_relations.py b/postgres/tests/test_relations.py index c192db14bf41c..7d2590274bcfe 100644 --- a/postgres/tests/test_relations.py +++ b/postgres/tests/test_relations.py @@ -5,9 +5,10 @@ import pytest from datadog_checks.base import ConfigurationError -from datadog_checks.postgres.relationsmanager import RelationsManager +from datadog_checks.postgres.relationsmanager import QUERY_PG_CLASS, RelationsManager -from .common import DB_NAME, HOST, PORT +from .common import DB_NAME, HOST, PORT, _get_expected_tags, _iterate_metric_name +from .utils import requires_over_11 RELATION_METRICS = [ 'postgresql.seq_scans', @@ -30,8 +31,6 @@ 'postgresql.autoanalyzed', ] -RELATION_SIZE_METRICS = ['postgresql.table_size', 'postgresql.total_size', 'postgresql.index_size'] - RELATION_INDEX_METRICS = [ 'postgresql.index_scans', 'postgresql.index_rows_fetched', # deprecated @@ -48,35 +47,54 @@ @pytest.mark.usefixtures('dd_environment') def test_relations_metrics(aggregator, integration_check, pg_instance): pg_instance['relations'] = ['persons'] + check = integration_check(pg_instance) + check.check(pg_instance) - posgres_check = integration_check(pg_instance) - posgres_check.check(pg_instance) - - expected_tags = pg_instance['tags'] + [ - 'port:{}'.format(pg_instance['port']), - 'db:%s' % pg_instance['dbname'], - 'table:persons', - 'schema:public', - 'dd.internal.resource:database_instance:{}'.format(posgres_check.resolved_hostname), - ] - - expected_size_tags = pg_instance['tags'] + [ - 'port:{}'.format(pg_instance['port']), - 'db:%s' % pg_instance['dbname'], - 'table:persons', - 'schema:public', - 'dd.internal.resource:database_instance:{}'.format(posgres_check.resolved_hostname), - ] + expected_tags = _get_expected_tags(check, pg_instance, db=pg_instance['dbname'], table='persons', schema='public') for name in RELATION_METRICS: aggregator.assert_metric(name, count=1, tags=expected_tags) - # 'persons' db don't have any indexes for name in RELATION_INDEX_METRICS: aggregator.assert_metric(name, count=0, tags=expected_tags) + for name in _iterate_metric_name(QUERY_PG_CLASS): + aggregator.assert_metric(name, count=1, tags=expected_tags) - for name in RELATION_SIZE_METRICS: - aggregator.assert_metric(name, count=1, tags=expected_size_tags) + +@pytest.mark.integration +@pytest.mark.usefixtures('dd_environment') +@requires_over_11 +def test_partition_relation(aggregator, integration_check, pg_instance): + pg_instance['relations'] = [ + {'relation_regex': 'test_.*'}, + ] + + check = integration_check(pg_instance) + check.check(pg_instance) + + part_1_tags = _get_expected_tags( + check, pg_instance, db=pg_instance['dbname'], table='test_part1', partition_of='test_part', schema='public' + ) + aggregator.assert_metric('postgresql.relation.pages', value=3, count=1, tags=part_1_tags) + aggregator.assert_metric('postgresql.relation.tuples', value=499, count=1, tags=part_1_tags) + aggregator.assert_metric('postgresql.relation.all_visible', value=3, count=1, tags=part_1_tags) + aggregator.assert_metric('postgresql.table_size', value=24576, count=1, tags=part_1_tags) + aggregator.assert_metric('postgresql.relation_size', value=24576, count=1, tags=part_1_tags) + aggregator.assert_metric('postgresql.index_size', value=65536, count=1, tags=part_1_tags) + aggregator.assert_metric('postgresql.toast_size', value=0, count=1, tags=part_1_tags) + aggregator.assert_metric('postgresql.total_size', value=90112, count=1, tags=part_1_tags) + + part_2_tags = _get_expected_tags( + check, pg_instance, db=pg_instance['dbname'], table='test_part2', partition_of='test_part', schema='public' + ) + aggregator.assert_metric('postgresql.relation.pages', value=8, count=1, tags=part_2_tags) + aggregator.assert_metric('postgresql.relation.tuples', value=1502, count=1, tags=part_2_tags) + aggregator.assert_metric('postgresql.relation.all_visible', value=8, count=1, tags=part_2_tags) + aggregator.assert_metric('postgresql.table_size', value=73728, count=1, tags=part_2_tags) + aggregator.assert_metric('postgresql.relation_size', value=65536, count=1, tags=part_2_tags) + aggregator.assert_metric('postgresql.index_size', value=98304, count=1, tags=part_2_tags) + aggregator.assert_metric('postgresql.toast_size', value=8192, count=1, tags=part_2_tags) + aggregator.assert_metric('postgresql.total_size', value=172032, count=1, tags=part_2_tags) @pytest.mark.integration @@ -92,17 +110,10 @@ def test_bloat_metrics(aggregator, collect_bloat_metrics, expected_count, integr pg_instance['relations'] = ['pg_index'] pg_instance['collect_bloat_metrics'] = collect_bloat_metrics - posgres_check = integration_check(pg_instance) - posgres_check.check(pg_instance) - - base_tags = pg_instance['tags'] + [ - 'port:{}'.format(pg_instance['port']), - 'db:%s' % pg_instance['dbname'], - 'table:pg_index', - 'schema:pg_catalog', - 'dd.internal.resource:database_instance:{}'.format(posgres_check.resolved_hostname), - ] + check = integration_check(pg_instance) + check.check(pg_instance) + base_tags = _get_expected_tags(check, pg_instance, db=pg_instance['dbname'], table='pg_index', schema='pg_catalog') aggregator.assert_metric('postgresql.table_bloat', count=expected_count, tags=base_tags) indices = ['pg_index_indrelid_index', 'pg_index_indexrelid_index'] @@ -120,18 +131,14 @@ def test_relations_metrics_regex(aggregator, integration_check, pg_instance): {'relation_regex': r'[pP]ersons[-_]?(dup\d)?'}, ] relations = ['persons', 'personsdup1', 'Personsdup2'] - posgres_check = integration_check(pg_instance) - posgres_check.check(pg_instance) + check = integration_check(pg_instance) + check.check(pg_instance) expected_tags = {} for relation in relations: - expected_tags[relation] = pg_instance['tags'] + [ - 'port:{}'.format(pg_instance['port']), - 'db:%s' % pg_instance['dbname'], - 'table:{}'.format(relation.lower()), - 'schema:public', - 'dd.internal.resource:database_instance:{}'.format(posgres_check.resolved_hostname), - ] + expected_tags[relation] = _get_expected_tags( + check, pg_instance, db=pg_instance['dbname'], table=relation.lower(), schema='public' + ) for relation in relations: for name in RELATION_METRICS: @@ -141,16 +148,16 @@ def test_relations_metrics_regex(aggregator, integration_check, pg_instance): for name in RELATION_INDEX_METRICS: aggregator.assert_metric(name, count=0, tags=expected_tags[relation]) - for name in RELATION_SIZE_METRICS: - aggregator.assert_metric(name, count=1, tags=expected_tags[relation]) + for name in _iterate_metric_name(QUERY_PG_CLASS): + aggregator.assert_metric(name, count=1, tags=expected_tags[relation]) @pytest.mark.integration @pytest.mark.usefixtures('dd_environment') def test_max_relations(aggregator, integration_check, pg_instance): pg_instance.update({'relations': [{'relation_regex': '.*'}], 'max_relations': 1}) - posgres_check = integration_check(pg_instance) - posgres_check.check(pg_instance) + check = integration_check(pg_instance) + check.check(pg_instance) for name in RELATION_METRICS: relation_metrics = [] @@ -159,7 +166,7 @@ def test_max_relations(aggregator, integration_check, pg_instance): relation_metrics.append(m) assert len(relation_metrics) == 1 - for name in RELATION_SIZE_METRICS: + for name in _iterate_metric_name(QUERY_PG_CLASS): relation_metrics = [] for m in aggregator._metrics[name]: if any(['table:' in tag for tag in m.tags]): @@ -173,18 +180,12 @@ def test_index_metrics(aggregator, integration_check, pg_instance): pg_instance['relations'] = ['breed'] pg_instance['dbname'] = 'dogs' - posgres_check = integration_check(pg_instance) - posgres_check.check(pg_instance) - - expected_tags = pg_instance['tags'] + [ - 'port:{}'.format(pg_instance['port']), - 'db:dogs', - 'table:breed', - 'index:breed_names', - 'schema:public', - 'dd.internal.resource:database_instance:{}'.format(posgres_check.resolved_hostname), - ] + check = integration_check(pg_instance) + check.check(pg_instance) + expected_tags = _get_expected_tags( + check, pg_instance, db='dogs', table='breed', index='breed_names', schema='public' + ) for name in IDX_METRICS: aggregator.assert_metric(name, count=1, tags=expected_tags) diff --git a/postgres/tests/test_relationsmanager.py b/postgres/tests/test_relationsmanager.py index 5b43206385ad0..ee48bbc94834b 100644 --- a/postgres/tests/test_relationsmanager.py +++ b/postgres/tests/test_relationsmanager.py @@ -3,11 +3,18 @@ # Licensed under Simplified BSD License (see LICENSE) import pytest -from datadog_checks.postgres.relationsmanager import ALL_SCHEMAS, IDX_METRICS, LOCK_METRICS, RelationsManager +from datadog_checks.postgres.relationsmanager import ( + ALL_SCHEMAS, + IDX_METRICS, + LOCK_METRICS, + QUERY_PG_CLASS, + RelationsManager, +) from .common import SCHEMA_NAME pytestmark = pytest.mark.unit +default_max_relations = 300 @pytest.mark.parametrize( @@ -47,7 +54,7 @@ ) def test_relations_cases(relations_config, expected_filter): query = '{relations}' - relations = RelationsManager(relations_config) + relations = RelationsManager(relations_config, default_max_relations) query_filter = relations.filter_relation_query(query, SCHEMA_NAME) assert query_filter == expected_filter @@ -55,7 +62,7 @@ def test_relations_cases(relations_config, expected_filter): def test_relation_filter(): query = "Select foo from bar where {relations}" relations_config = [{'relation_name': 'breed', 'schemas': ['public']}] - relations = RelationsManager(relations_config) + relations = RelationsManager(relations_config, default_max_relations) query_filter = relations.filter_relation_query(query, SCHEMA_NAME) assert ( @@ -67,7 +74,7 @@ def test_relation_filter(): def test_relation_filter_no_schemas(): query = "Select foo from bar where {relations}" relations_config = [{'relation_name': 'persons', 'schemas': [ALL_SCHEMAS]}] - relations = RelationsManager(relations_config) + relations = RelationsManager(relations_config, default_max_relations) query_filter = relations.filter_relation_query(query, SCHEMA_NAME) assert query_filter == "Select foo from bar where (( relname = 'persons' ))" @@ -76,7 +83,7 @@ def test_relation_filter_no_schemas(): def test_relation_filter_regex(): query = "Select foo from bar where {relations}" relations_config = [{'relation_regex': 'b.*', 'schemas': [ALL_SCHEMAS]}] - relations = RelationsManager(relations_config) + relations = RelationsManager(relations_config, default_max_relations) query_filter = relations.filter_relation_query(query, SCHEMA_NAME) assert query_filter == "Select foo from bar where (( relname ~ 'b.*' ))" @@ -85,16 +92,25 @@ def test_relation_filter_regex(): def test_relation_filter_relkind(): query = LOCK_METRICS['query'].replace('{metrics_columns}', 'foo') relations_config = [{'relation_regex': 'b.*', 'schemas': [ALL_SCHEMAS], 'relkind': ['r', 't']}] - relations = RelationsManager(relations_config) + relations = RelationsManager(relations_config, default_max_relations) query_filter = relations.filter_relation_query(query, SCHEMA_NAME) assert "AND relkind = ANY(array['r','t'])" in query_filter +def test_relation_filter_limit(): + query = QUERY_PG_CLASS['query'] + relations_config = [{'relation_regex': '.*', 'schemas': [ALL_SCHEMAS]}] + relations = RelationsManager(relations_config, default_max_relations) + + query_filter = relations.filter_relation_query(query, SCHEMA_NAME) + assert 'LIMIT 300' in query_filter + + def test_relkind_does_not_apply_to_index_metrics(): query = IDX_METRICS['query'].replace('{metrics_columns}', 'foo') relations_config = [{'relation_regex': 'b.*', 'schemas': [ALL_SCHEMAS], 'relkind': ['r']}] - relations = RelationsManager(relations_config) + relations = RelationsManager(relations_config, default_max_relations) query_filter = relations.filter_relation_query(query, SCHEMA_NAME) assert 'relkind' not in query_filter diff --git a/postgres/tests/utils.py b/postgres/tests/utils.py index 224120933b738..7396db3227e7e 100644 --- a/postgres/tests/utils.py +++ b/postgres/tests/utils.py @@ -12,6 +12,10 @@ POSTGRES_VERSION is None or float(POSTGRES_VERSION) < 10, reason='This test is for over 10 only (make sure POSTGRES_VERSION is set)', ) +requires_over_11 = pytest.mark.skipif( + POSTGRES_VERSION is None or float(POSTGRES_VERSION) < 11, + reason='This test is for over 11 only (make sure POSTGRES_VERSION is set)', +) requires_over_14 = pytest.mark.skipif( POSTGRES_VERSION is None or float(POSTGRES_VERSION) < 14, reason='This test is for over 14 only (make sure POSTGRES_VERSION is set)',