diff --git a/src/guc.h b/src/guc.h index 54a341b7a3b..7691be19850 100644 --- a/src/guc.h +++ b/src/guc.h @@ -44,7 +44,7 @@ extern bool ts_guc_enable_cagg_reorder_groupby; extern TSDLLEXPORT int ts_guc_cagg_max_individual_materializations; extern bool ts_guc_enable_now_constify; extern bool ts_guc_enable_foreign_key_propagation; -extern bool ts_guc_enable_osm_reads; +extern TSDLLEXPORT bool ts_guc_enable_osm_reads; #if PG16_GE extern TSDLLEXPORT bool ts_guc_enable_cagg_sort_pushdown; #endif diff --git a/tsl/src/bgw_policy/continuous_aggregate_api.c b/tsl/src/bgw_policy/continuous_aggregate_api.c index 245c120060c..9c289cb7ce2 100644 --- a/tsl/src/bgw_policy/continuous_aggregate_api.c +++ b/tsl/src/bgw_policy/continuous_aggregate_api.c @@ -136,6 +136,16 @@ policy_refresh_cagg_get_refresh_end(const Dimension *dim, const Jsonb *config, b return res; } +bool +policy_refresh_cagg_get_include_tiered_data(const Jsonb *config, bool *isnull) +{ + bool found; + bool res = ts_jsonb_get_bool_field(config, POL_REFRESH_CONF_KEY_ENABLE_TIERED_READS, &found); + + *isnull = !found; + return res; +} + /* returns false if a policy could not be found */ bool policy_refresh_cagg_exists(int32 materialization_id) diff --git a/tsl/src/bgw_policy/continuous_aggregate_api.h b/tsl/src/bgw_policy/continuous_aggregate_api.h index 160d7bd67d4..abb154e6639 100644 --- a/tsl/src/bgw_policy/continuous_aggregate_api.h +++ b/tsl/src/bgw_policy/continuous_aggregate_api.h @@ -20,6 +20,7 @@ int64 policy_refresh_cagg_get_refresh_start(const ContinuousAgg *cagg, const Dim const Jsonb *config, bool *start_isnull); int64 policy_refresh_cagg_get_refresh_end(const Dimension *dim, const Jsonb *config, bool *end_isnull); +bool policy_refresh_cagg_get_include_tiered_data(const Jsonb *config, bool *isnull); bool policy_refresh_cagg_refresh_start_lt(int32 materialization_id, Oid cmp_type, Datum cmp_interval); bool policy_refresh_cagg_exists(int32 materialization_id); diff --git a/tsl/src/bgw_policy/job.c b/tsl/src/bgw_policy/job.c index f4a9dfdd966..dc4cb19f910 100644 --- a/tsl/src/bgw_policy/job.c +++ b/tsl/src/bgw_policy/job.c @@ -51,6 +51,7 @@ #include "dimension_slice.h" #include "dimension_vector.h" #include "errors.h" +#include "guc.h" #include "job.h" #include "reorder.h" #include "utils.h" @@ -372,7 +373,21 @@ policy_refresh_cagg_execute(int32 job_id, Jsonb *config) { PolicyContinuousAggData policy_data; + StringInfo str = makeStringInfo(); + JsonbToCStringIndent(str, &config->root, VARSIZE(config)); + policy_refresh_cagg_read_and_validate_config(config, &policy_data); + + bool enable_osm_reads_old = ts_guc_enable_osm_reads; + + if (!policy_data.include_tiered_data_isnull) { + SetConfigOption( + "timescaledb.enable_tiered_reads", + policy_data.include_tiered_data ? "on" : "off", + PGC_USERSET, + PGC_S_SESSION); + } + continuous_agg_refresh_internal(policy_data.cagg, &policy_data.refresh_window, CAGG_REFRESH_POLICY, @@ -380,6 +395,14 @@ policy_refresh_cagg_execute(int32 job_id, Jsonb *config) policy_data.end_is_null, false); + if (!policy_data.include_tiered_data_isnull) { + SetConfigOption( + "timescaledb.enable_tiered_reads", + enable_osm_reads_old ? "on" : "off", + PGC_USERSET, + PGC_S_SESSION); + } + return true; } @@ -392,6 +415,7 @@ policy_refresh_cagg_read_and_validate_config(Jsonb *config, PolicyContinuousAggD Oid dim_type; int64 refresh_start, refresh_end; bool start_isnull, end_isnull; + bool include_tiered_data, include_tiered_data_isnull; materialization_id = policy_continuous_aggregate_get_mat_hypertable_id(config); mat_ht = ts_hypertable_get_by_id(materialization_id); @@ -418,6 +442,9 @@ policy_refresh_cagg_read_and_validate_config(Jsonb *config, PolicyContinuousAggD ts_internal_to_time_string(refresh_end, dim_type)), errhint("The start of the window must be before the end."))); + include_tiered_data = policy_refresh_cagg_get_include_tiered_data( + config, &include_tiered_data_isnull); + if (policy_data) { policy_data->refresh_window.type = dim_type; @@ -426,6 +453,8 @@ policy_refresh_cagg_read_and_validate_config(Jsonb *config, PolicyContinuousAggD policy_data->cagg = cagg; policy_data->start_is_null = start_isnull; policy_data->end_is_null = end_isnull; + policy_data->include_tiered_data = include_tiered_data; + policy_data->include_tiered_data_isnull = include_tiered_data_isnull; } } diff --git a/tsl/src/bgw_policy/job.h b/tsl/src/bgw_policy/job.h index 70e488cd475..404d9764046 100644 --- a/tsl/src/bgw_policy/job.h +++ b/tsl/src/bgw_policy/job.h @@ -36,7 +36,8 @@ typedef struct PolicyContinuousAggData { InternalTimeRange refresh_window; ContinuousAgg *cagg; - bool start_is_null, end_is_null; + bool include_tiered_data; + bool start_is_null, end_is_null, include_tiered_data_isnull; } PolicyContinuousAggData; typedef struct PolicyCompressionData diff --git a/tsl/src/bgw_policy/policies_v2.h b/tsl/src/bgw_policy/policies_v2.h index 4a5eeb852d8..6dc64e957fd 100644 --- a/tsl/src/bgw_policy/policies_v2.h +++ b/tsl/src/bgw_policy/policies_v2.h @@ -19,6 +19,7 @@ #define POL_REFRESH_CONF_KEY_MAT_HYPERTABLE_ID "mat_hypertable_id" #define POL_REFRESH_CONF_KEY_START_OFFSET "start_offset" #define POL_REFRESH_CONF_KEY_END_OFFSET "end_offset" +#define POL_REFRESH_CONF_KEY_ENABLE_TIERED_READS "include_tiered_data" #define POLICY_COMPRESSION_PROC_NAME "policy_compression" #define POLICY_COMPRESSION_CHECK_NAME "policy_compression_check" diff --git a/tsl/test/expected/chunk_utils_internal.out b/tsl/test/expected/chunk_utils_internal.out index 520484a8651..73985381693 100644 --- a/tsl/test/expected/chunk_utils_internal.out +++ b/tsl/test/expected/chunk_utils_internal.out @@ -819,6 +819,70 @@ SELECT * FROM ht_try_weekly; DROP MATERIALIZED VIEW ht_try_weekly; NOTICE: drop cascades to table _timescaledb_internal._hyper_6_12_chunk +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE USER MAPPING FOR :ROLE_SUPERUSER SERVER s3_server +OPTIONS (user :'ROLE_SUPERUSER', password_required 'false'); +CREATE FUNCTION create_test_cagg(include_tiered_data JSONB) +RETURNS INTEGER AS +$$ +DECLARE + job_id INTEGER; +BEGIN + CREATE MATERIALIZED VIEW ht_try_weekly + WITH (timescaledb.continuous) AS + SELECT time_bucket(interval '1 week', timec) AS ts_bucket, avg(value) + FROM ht_try + GROUP BY 1 + WITH NO DATA; + + job_id := add_continuous_aggregate_policy( + 'ht_try_weekly', + start_offset => NULL, + end_offset => INTERVAL '1 hour', + schedule_interval => INTERVAL '1 hour' + ); + + UPDATE _timescaledb_config.bgw_job + SET config = jsonb_insert(config, '{include_tiered_data}'::TEXT[], include_tiered_data) + WHERE id = job_id; + + RETURN job_id; +END +$$ LANGUAGE plpgsql; +SELECT create_test_cagg('true') AS job_id \gset +CALL run_job(:job_id); +SELECT * FROM ht_try_weekly; + ts_bucket | avg +------------------------------+----------------------- + Sun Dec 29 16:00:00 2019 PST | 1000.0000000000000000 + Sun May 01 17:00:00 2022 PDT | 222.0000000000000000 +(2 rows) + +DROP MATERIALIZED VIEW ht_try_weekly; +NOTICE: drop cascades to 2 other objects +SELECT create_test_cagg('false') AS job_id \gset +CALL run_job(:job_id); +SELECT * FROM ht_try_weekly; + ts_bucket | avg +------------------------------+---------------------- + Sun May 01 17:00:00 2022 PDT | 222.0000000000000000 +(1 row) + +DROP MATERIALIZED VIEW ht_try_weekly; +NOTICE: drop cascades to table _timescaledb_internal._hyper_8_15_chunk +-- default behavior: use instance-wide GUC value +SELECT create_test_cagg('null') AS job_id \gset +CALL run_job(:job_id); +SELECT * FROM ht_try_weekly; + ts_bucket | avg +------------------------------+----------------------- + Sun Dec 29 16:00:00 2019 PST | 1000.0000000000000000 + Sun May 01 17:00:00 2022 PDT | 222.0000000000000000 +(2 rows) + +DROP MATERIALIZED VIEW ht_try_weekly; +NOTICE: drop cascades to 2 other objects +\c :TEST_DBNAME :ROLE_4; -- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted -- in the chunks' targetlists on DELETE/UPDATE works (including partially -- compressed chunks) @@ -831,7 +895,7 @@ SELECT compress_chunk(show_chunks('ht_try', newer_than => '2021-01-01'::timestam compress_chunk ----------------------------------------- _timescaledb_internal._hyper_5_10_chunk - _timescaledb_internal._hyper_5_13_chunk + _timescaledb_internal._hyper_5_18_chunk (2 rows) INSERT INTO ht_try VALUES ('2021-06-05 01:00', 10, 222); @@ -934,7 +998,7 @@ Indexes: Triggers: ts_insert_blocker BEFORE INSERT ON ht_try FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() Child tables: _timescaledb_internal._hyper_5_10_chunk, - _timescaledb_internal._hyper_5_13_chunk + _timescaledb_internal._hyper_5_18_chunk -- verify that still can read from the table after catalog manipulations EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM ht_try; @@ -942,10 +1006,10 @@ EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM ht_try; ---------------------------------------------------------------------------------- Append (actual rows=3 loops=1) -> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=1 loops=1) - -> Seq Scan on compress_hyper_7_14_chunk (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_5_13_chunk (actual rows=1 loops=1) - -> Seq Scan on compress_hyper_7_15_chunk (actual rows=1 loops=1) - -> Seq Scan on _hyper_5_13_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_10_19_chunk (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_5_18_chunk (actual rows=1 loops=1) + -> Seq Scan on compress_hyper_10_20_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_5_18_chunk (actual rows=1 loops=1) (6 rows) ROLLBACK; @@ -996,9 +1060,9 @@ RESTRICT ,CHECK ( temp > 10) ); SELECT create_hypertable('hyper_constr', 'time', chunk_time_interval => 10); - create_hypertable ---------------------------- - (8,public,hyper_constr,t) + create_hypertable +---------------------------- + (11,public,hyper_constr,t) (1 row) INSERT INTO hyper_constr VALUES( 10, 200, 22, 1, 111, 44); @@ -1031,7 +1095,7 @@ WHERE hypertable_id IN (SELECT id from _timescaledb_catalog.hypertable ORDER BY table_name; table_name | status | osm_chunk --------------------+--------+----------- - _hyper_8_16_chunk | 0 | f + _hyper_11_21_chunk | 0 | f child_hyper_constr | 0 | t (2 rows) @@ -1119,15 +1183,15 @@ where hypertable_id = (Select id from _timescaledb_catalog.hypertable where tabl ORDER BY id; id | table_name ----+-------------------- - 16 | _hyper_8_16_chunk - 17 | child_hyper_constr + 21 | _hyper_11_21_chunk + 22 | child_hyper_constr (2 rows) -- show_chunks will not show the OSM chunk which is visible via the above query SELECT show_chunks('hyper_constr'); - show_chunks ------------------------------------------ - _timescaledb_internal._hyper_8_16_chunk + show_chunks +------------------------------------------ + _timescaledb_internal._hyper_11_21_chunk (1 row) ROLLBACK; @@ -1157,9 +1221,9 @@ CREATE TABLE test1.copy_test ( "value" double precision NOT NULL ); SELECT create_hypertable('test1.copy_test', 'time', chunk_time_interval => interval '1 day'); - create_hypertable ------------------------ - (9,test1,copy_test,t) + create_hypertable +------------------------ + (12,test1,copy_test,t) (1 row) COPY test1.copy_test FROM STDIN DELIMITER ','; @@ -1178,15 +1242,15 @@ SELECT _timescaledb_functions.freeze_chunk( :'COPY_CHNAME'); -- Check state SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; - table_name | status --------------------+-------- - _hyper_9_18_chunk | 4 + table_name | status +--------------------+-------- + _hyper_12_23_chunk | 4 (1 row) \set ON_ERROR_STOP 0 -- Copy should fail because one of che chunks is frozen COPY test1.copy_test FROM STDIN DELIMITER ','; -ERROR: cannot INSERT into frozen chunk "_hyper_9_18_chunk" +ERROR: cannot INSERT into frozen chunk "_hyper_12_23_chunk" \set ON_ERROR_STOP 1 -- Count existing rows SELECT COUNT(*) FROM test1.copy_test; @@ -1198,15 +1262,15 @@ SELECT COUNT(*) FROM test1.copy_test; -- Check state SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; - table_name | status --------------------+-------- - _hyper_9_18_chunk | 4 + table_name | status +--------------------+-------- + _hyper_12_23_chunk | 4 (1 row) \set ON_ERROR_STOP 0 -- Copy should fail because one of che chunks is frozen COPY test1.copy_test FROM STDIN DELIMITER ','; -ERROR: cannot INSERT into frozen chunk "_hyper_9_18_chunk" +ERROR: cannot INSERT into frozen chunk "_hyper_12_23_chunk" \set ON_ERROR_STOP 1 -- Count existing rows SELECT COUNT(*) FROM test1.copy_test; @@ -1225,9 +1289,9 @@ SELECT _timescaledb_functions.unfreeze_chunk( :'COPY_CHNAME'); -- Check state SELECT table_name, status FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME'; - table_name | status --------------------+-------- - _hyper_9_18_chunk | 0 + table_name | status +--------------------+-------- + _hyper_12_23_chunk | 0 (1 row) -- Copy should work now @@ -1330,12 +1394,12 @@ WHERE ht.table_name LIKE 'osm%' ORDER BY 2,3; table_name | id | dimension_id | range_start | range_end ------------+----+--------------+---------------------+--------------------- - osm_int2 | 17 | 8 | 9223372036854775806 | 9223372036854775807 - osm_int4 | 18 | 9 | 9223372036854775806 | 9223372036854775807 - osm_int8 | 19 | 10 | 9223372036854775806 | 9223372036854775807 - osm_date | 20 | 11 | 9223372036854775806 | 9223372036854775807 - osm_ts | 21 | 12 | 9223372036854775806 | 9223372036854775807 - osm_tstz | 22 | 13 | 9223372036854775806 | 9223372036854775807 + osm_int2 | 22 | 11 | 9223372036854775806 | 9223372036854775807 + osm_int4 | 23 | 12 | 9223372036854775806 | 9223372036854775807 + osm_int8 | 24 | 13 | 9223372036854775806 | 9223372036854775807 + osm_date | 25 | 14 | 9223372036854775806 | 9223372036854775807 + osm_ts | 26 | 15 | 9223372036854775806 | 9223372036854775807 + osm_tstz | 27 | 16 | 9223372036854775806 | 9223372036854775807 (6 rows) -- test that correct slice is found and updated for table with multiple chunk constraints @@ -1348,8 +1412,8 @@ _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc WHERE c.h AND c.id = cc.chunk_id; id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk | chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name ----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------+----------+--------------------+-----------------------------+---------------------------- - 26 | 16 | _timescaledb_internal | _hyper_16_26_chunk | | f | 0 | f | 26 | | 26_5_test_multicon_time_key | test_multicon_time_key - 26 | 16 | _timescaledb_internal | _hyper_16_26_chunk | | f | 0 | f | 26 | 23 | constraint_23 | + 31 | 19 | _timescaledb_internal | _hyper_19_31_chunk | | f | 0 | f | 31 | | 31_5_test_multicon_time_key | test_multicon_time_key + 31 | 19 | _timescaledb_internal | _hyper_19_31_chunk | | f | 0 | f | 31 | 28 | constraint_28 | (2 rows) \c :TEST_DBNAME :ROLE_SUPERUSER ; @@ -1367,7 +1431,7 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+--------------------+--------+-----------+--------------------+------------------+------------------ - 26 | _hyper_16_26_chunk | 0 | t | 23 | 1577955600000000 | 1578128400000000 + 31 | _hyper_19_31_chunk | 0 | t | 28 | 1577955600000000 | 1578128400000000 (1 row) -- check that range was reset to default - infinity @@ -1395,7 +1459,7 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+--------------------+--------+-----------+--------------------+---------------------+--------------------- - 26 | _hyper_16_26_chunk | 0 | t | 23 | 9223372036854775806 | 9223372036854775807 + 31 | _hyper_19_31_chunk | 0 | t | 28 | 9223372036854775806 | 9223372036854775807 (1 row) -- TEST for orderedappend that depends on hypertable_osm_range_update functionality @@ -1420,9 +1484,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+---------------------+--------------------- - 27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 - 28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 - 29 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807 + 32 | _hyper_20_32_chunk | 0 | f | 29 | 1577836800000000 | 1577923200000000 + 33 | _hyper_20_33_chunk | 0 | f | 30 | 1577923200000000 | 1578009600000000 + 34 | test_chunkapp_fdw_child | 0 | t | 31 | 9223372036854775806 | 9223372036854775807 (3 rows) -- attempt to update overlapping range, should fail @@ -1443,9 +1507,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+------------------+------------------ - 27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 - 28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 - 29 | test_chunkapp_fdw_child | 0 | t | 26 | 1578038400000000 | 1578124800000000 + 32 | _hyper_20_32_chunk | 0 | f | 29 | 1577836800000000 | 1577923200000000 + 33 | _hyper_20_33_chunk | 0 | f | 30 | 1577923200000000 | 1578009600000000 + 34 | test_chunkapp_fdw_child | 0 | t | 31 | 1578038400000000 | 1578124800000000 (3 rows) -- ordered append should be possible as ranges do not overlap @@ -1454,8 +1518,8 @@ WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_sl ------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk - -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk + -> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk + -> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk -> Foreign Scan on test_chunkapp_fdw_child (5 rows) @@ -1496,9 +1560,9 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp',empty: QUERY PLAN ------------------------------------------------------------------------------------------------- Merge Append - Sort Key: _hyper_17_27_chunk."time" - -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk - -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk + Sort Key: _hyper_20_32_chunk."time" + -> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk + -> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk -> Foreign Scan on test_chunkapp_fdw_child (5 rows) @@ -1515,9 +1579,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end ----------+-------------------------+--------+-----------+--------------------+---------------------+--------------------- - 27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000 - 28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000 - 29 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807 + 32 | _hyper_20_32_chunk | 0 | f | 29 | 1577836800000000 | 1577923200000000 + 33 | _hyper_20_33_chunk | 0 | f | 30 | 1577923200000000 | 1578009600000000 + 34 | test_chunkapp_fdw_child | 0 | t | 31 | 9223372036854775806 | 9223372036854775807 (3 rows) -- but also, OSM chunk should be included in the scan, since range is invalid and chunk is not empty @@ -1525,10 +1589,10 @@ WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_sl QUERY PLAN ------------------------------------------------------------------------------------------------- Merge Append - Sort Key: _hyper_17_27_chunk."time" - -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk + Sort Key: _hyper_20_32_chunk."time" + -> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) - -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk + -> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) -> Foreign Scan on test_chunkapp_fdw_child (7 rows) @@ -1556,8 +1620,8 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', NULL: ------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk - -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk + -> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk + -> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk -> Foreign Scan on test_chunkapp_fdw_child (5 rows) @@ -1574,9 +1638,9 @@ SELECT * FROM test_chunkapp ORDER BY 1; ------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk + -> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) - -> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk + -> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) (6 rows) @@ -1613,7 +1677,7 @@ CREATE TABLE test2(time timestamptz not null, a int); SELECT create_hypertable('test2', 'time'); create_hypertable --------------------- - (18,public,test2,t) + (21,public,test2,t) (1 row) INSERT INTO test2 VALUES ('2020-01-01'::timestamptz, 1); @@ -1624,7 +1688,7 @@ psql:include/chunk_utils_internal_orderedappend.sql:138: NOTICE: default order SELECT compress_chunk(show_chunks('test2')); compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_18_30_chunk + _timescaledb_internal._hyper_21_35_chunk (1 row) -- find internal compression table, call API function on it @@ -1633,7 +1697,7 @@ FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable cht WHERE ht.table_name = 'test2' and cht.id = ht.compressed_hypertable_id \gset \set ON_ERROR_STOP 0 SELECT _timescaledb_functions.hypertable_osm_range_update(:'COMPRESSION_TBLNM'::regclass, '2020-01-01'::timestamptz); -psql:include/chunk_utils_internal_orderedappend.sql:145: ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_19 +psql:include/chunk_utils_internal_orderedappend.sql:145: ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_22 \set ON_ERROR_STOP 1 -- test wrong/incompatible data types with hypertable time dimension -- update range of int2 with int4 diff --git a/tsl/test/sql/chunk_utils_internal.sql b/tsl/test/sql/chunk_utils_internal.sql index feeb83bc54b..fcd6d4efa60 100644 --- a/tsl/test/sql/chunk_utils_internal.sql +++ b/tsl/test/sql/chunk_utils_internal.sql @@ -440,6 +440,56 @@ CALL refresh_continuous_aggregate('ht_try_weekly', '2019-12-29', '2020-01-10', f SELECT * FROM ht_try_weekly; DROP MATERIALIZED VIEW ht_try_weekly; +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE USER MAPPING FOR :ROLE_SUPERUSER SERVER s3_server +OPTIONS (user :'ROLE_SUPERUSER', password_required 'false'); + +CREATE FUNCTION create_test_cagg(include_tiered_data JSONB) +RETURNS INTEGER AS +$$ +DECLARE + job_id INTEGER; +BEGIN + CREATE MATERIALIZED VIEW ht_try_weekly + WITH (timescaledb.continuous) AS + SELECT time_bucket(interval '1 week', timec) AS ts_bucket, avg(value) + FROM ht_try + GROUP BY 1 + WITH NO DATA; + + job_id := add_continuous_aggregate_policy( + 'ht_try_weekly', + start_offset => NULL, + end_offset => INTERVAL '1 hour', + schedule_interval => INTERVAL '1 hour' + ); + + UPDATE _timescaledb_config.bgw_job + SET config = jsonb_insert(config, '{include_tiered_data}'::TEXT[], include_tiered_data) + WHERE id = job_id; + + RETURN job_id; +END +$$ LANGUAGE plpgsql; + +SELECT create_test_cagg('true') AS job_id \gset +CALL run_job(:job_id); +SELECT * FROM ht_try_weekly; +DROP MATERIALIZED VIEW ht_try_weekly; + +SELECT create_test_cagg('false') AS job_id \gset +CALL run_job(:job_id); +SELECT * FROM ht_try_weekly; +DROP MATERIALIZED VIEW ht_try_weekly; + +-- default behavior: use instance-wide GUC value +SELECT create_test_cagg('null') AS job_id \gset +CALL run_job(:job_id); +SELECT * FROM ht_try_weekly; +DROP MATERIALIZED VIEW ht_try_weekly; + +\c :TEST_DBNAME :ROLE_4; + -- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted -- in the chunks' targetlists on DELETE/UPDATE works (including partially -- compressed chunks)