-
Notifications
You must be signed in to change notification settings - Fork 895
Commit
This release contains performance improvements and bug fixes since the 2.16.1 release. We recommend that you upgrade at the next available opportunity. **Features** * #6882: Allow DELETE on the compressed chunks without decompression. * #7033 Use MERGE statement on CAgg Refresh * #7126: Add functions to show the compression information. * #7147: Vectorize partial aggregation for `sum * #7200: Vectorize common aggregate functions like `min`, `max`, `sum`, `avg`, `stddev`, `variance` for compressed columns of arithmetic types, when there is grouping on segmentby columns or no grouping. * #7204: Track additional extensions in telemetry. * #7207: Refactor the `decompress_batches_scan` functions for easier maintenance. * #7209: Add a function to drop the `osm` chunk. * #7275: Add support for RETURNING clause for MERGE * #7295 Support ALTER TABLE SET ACCESS METHOD on hypertable **Bugfixes** * #7187: Fix the string literal length for the `compressed_data_info` function. * #7191: Fix creating default indexes on chunks when migrating the data. * #7195: Fix the `segment by` and `order by` checks when dropping a column from a compressed hypertable. * #7201: Use the generic extension description when building `apt` and `rpm` loader packages. * #7227: Add an index to the `compression_chunk_size` catalog table. * #7229: Fix the foreign key constraints where the index and the constraint column order are different. * #7230: Do not propagate the foreign key constraints to the `osm` chunk. * #7234: Release the cache after accessing the cache entry. * #7258 Force English in the pg_config command executed by cmake to avoid unexpected building errors * #7270 Fix memory leak in compressed DML batch filtering * #7286: Fix index column check while searching for index * #7290 Add check for NULL offset for caggs built on top of caggs * #7301 Make foreign key behaviour for hypertables consistent * #7318: Fix chunk skipping range filtering * #7320 Set license specific extension comment in install script **Thanks** * @MiguelTubio for reporting and fixing a Windows build error * @posuch for reporting the misleading extension description in the generic loader packages. * @snyrkill for discovering and reporting the issue
- Loading branch information
There are no files selected for viewing
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
CREATE FUNCTION _timescaledb_functions.compressed_data_info(_timescaledb_internal.compressed_data) | ||
RETURNS TABLE (algorithm name, has_nulls bool) | ||
AS '@MODULE_PATHNAME@', 'ts_update_placeholder' | ||
LANGUAGE C STRICT IMMUTABLE SET search_path = pg_catalog, pg_temp; | ||
|
||
CREATE INDEX compression_chunk_size_idx ON _timescaledb_catalog.compression_chunk_size (compressed_chunk_id); | ||
|
||
CREATE FUNCTION _timescaledb_functions.drop_osm_chunk(hypertable REGCLASS) | ||
RETURNS BOOL | ||
AS '@MODULE_PATHNAME@', 'ts_update_placeholder' | ||
LANGUAGE C VOLATILE; |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,119 @@ | ||
-- check whether we can safely downgrade the existing compression setup | ||
CREATE OR REPLACE FUNCTION _timescaledb_functions.add_sequence_number_metadata_column( | ||
comp_ch_schema_name text, | ||
comp_ch_table_name text | ||
) | ||
RETURNS BOOL LANGUAGE PLPGSQL AS | ||
$BODY$ | ||
DECLARE | ||
chunk_schema_name text; | ||
chunk_table_name text; | ||
index_name text; | ||
segmentby_columns text; | ||
BEGIN | ||
SELECT ch.schema_name, ch.table_name INTO STRICT chunk_schema_name, chunk_table_name | ||
FROM _timescaledb_catalog.chunk ch | ||
INNER JOIN _timescaledb_catalog.chunk comp_ch | ||
ON ch.compressed_chunk_id = comp_ch.id | ||
WHERE comp_ch.schema_name = comp_ch_schema_name | ||
AND comp_ch.table_name = comp_ch_table_name; | ||
|
||
IF NOT FOUND THEN | ||
RAISE USING | ||
ERRCODE = 'feature_not_supported', | ||
MESSAGE = 'Cannot migrate compressed chunk to version 2.16.1, chunk not found'; | ||
END IF; | ||
|
||
-- Add sequence number column to compressed chunk | ||
EXECUTE format('ALTER TABLE %s.%s ADD COLUMN _ts_meta_sequence_num INT DEFAULT NULL', comp_ch_schema_name, comp_ch_table_name); | ||
|
||
-- Remove all indexes from compressed chunk | ||
FOR index_name IN | ||
SELECT format('%s.%s', i.schemaname, i.indexname) | ||
FROM pg_indexes i | ||
WHERE i.schemaname = comp_ch_schema_name | ||
AND i.tablename = comp_ch_table_name | ||
LOOP | ||
EXECUTE format('DROP INDEX %s;', index_name); | ||
END LOOP; | ||
|
||
-- Fetch the segmentby columns from compression settings | ||
SELECT string_agg(cs.segmentby_column, ',') INTO segmentby_columns | ||
FROM ( | ||
SELECT unnest(segmentby) | ||
FROM _timescaledb_catalog.compression_settings | ||
WHERE relid = format('%s.%s', comp_ch_schema_name, comp_ch_table_name)::regclass::oid | ||
AND segmentby IS NOT NULL | ||
) AS cs(segmentby_column); | ||
|
||
-- Create compressed chunk index based on sequence num metadata column | ||
-- If there is no segmentby columns, we can skip creating the index | ||
IF FOUND AND segmentby_columns IS NOT NULL THEN | ||
EXECUTE format('CREATE INDEX ON %s.%s (%s, _ts_meta_sequence_num);', comp_ch_schema_name, comp_ch_table_name, segmentby_columns); | ||
END IF; | ||
|
||
-- Mark compressed chunk as unordered | ||
-- Marking the chunk status bit (2) makes it unordered | ||
-- and disables some optimizations. In order to re-enable | ||
-- them, you need to recompress these chunks. | ||
UPDATE _timescaledb_catalog.chunk | ||
SET status = status | 2 -- set unordered bit | ||
WHERE schema_name = chunk_schema_name | ||
AND table_name = chunk_table_name; | ||
|
||
RETURN true; | ||
END | ||
$BODY$ SET search_path TO pg_catalog, pg_temp; | ||
|
||
DO $$ | ||
DECLARE | ||
chunk_count int; | ||
chunk_record record; | ||
BEGIN | ||
-- if we find chunks which don't have sequence number metadata column in | ||
-- compressed chunk, we need to stop downgrade and have the user run | ||
-- a migration script to re-add the missing columns | ||
SELECT count(*) INTO STRICT chunk_count | ||
FROM _timescaledb_catalog.chunk ch | ||
INNER JOIN _timescaledb_catalog.chunk uncomp_ch | ||
ON uncomp_ch.compressed_chunk_id = ch.id | ||
WHERE not exists ( | ||
SELECT | ||
FROM pg_attribute att | ||
WHERE attrelid=format('%I.%I',ch.schema_name,ch.table_name)::regclass | ||
AND attname='_ts_meta_sequence_num') | ||
AND NOT uncomp_ch.dropped; | ||
|
||
-- Doing the migration if we find 10 or less chunks that need to be migrated | ||
IF chunk_count > 10 THEN | ||
RAISE USING | ||
ERRCODE = 'feature_not_supported', | ||
MESSAGE = 'Cannot downgrade compressed hypertables with chunks that do not contain sequence numbers. Run timescaledb--2.17-2.16.1.sql migration script before downgrading.', | ||
DETAIL = 'Number of chunks that need to be migrated: '|| chunk_count::text; | ||
ELSIF chunk_count > 0 THEN | ||
FOR chunk_record IN | ||
SELECT comp_ch.* | ||
FROM _timescaledb_catalog.chunk ch | ||
INNER JOIN _timescaledb_catalog.chunk comp_ch | ||
ON ch.compressed_chunk_id = comp_ch.id | ||
WHERE not exists ( | ||
SELECT | ||
FROM pg_attribute att | ||
WHERE attrelid=format('%I.%I',comp_ch.schema_name,comp_ch.table_name)::regclass | ||
AND attname='_ts_meta_sequence_num') | ||
AND NOT ch.dropped | ||
LOOP | ||
PERFORM _timescaledb_functions.add_sequence_number_metadata_column(chunk_record.schema_name, chunk_record.table_name); | ||
RAISE LOG 'Migrated compressed chunk %s.%s to version 2.16.1', chunk_record.schema_name, chunk_record.table_name; | ||
END LOOP; | ||
|
||
RAISE LOG 'Migration successful!'; | ||
END IF; | ||
END | ||
$$; | ||
|
||
DROP FUNCTION _timescaledb_functions.add_sequence_number_metadata_column(text, text); | ||
|
||
DROP FUNCTION _timescaledb_functions.compressed_data_info(_timescaledb_internal.compressed_data); | ||
DROP INDEX _timescaledb_catalog.compression_chunk_size_idx; | ||
DROP FUNCTION IF EXISTS _timescaledb_functions.drop_osm_chunk(REGCLASS); |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +0,0 @@ | ||
CREATE FUNCTION _timescaledb_functions.compressed_data_info(_timescaledb_internal.compressed_data) | ||
RETURNS TABLE (algorithm name, has_nulls bool) | ||
AS '@MODULE_PATHNAME@', 'ts_update_placeholder' | ||
LANGUAGE C STRICT IMMUTABLE SET search_path = pg_catalog, pg_temp; | ||
|
||
CREATE INDEX compression_chunk_size_idx ON _timescaledb_catalog.compression_chunk_size (compressed_chunk_id); | ||
|
||
CREATE FUNCTION _timescaledb_functions.drop_osm_chunk(hypertable REGCLASS) | ||
RETURNS BOOL | ||
AS '@MODULE_PATHNAME@', 'ts_update_placeholder' | ||
LANGUAGE C VOLATILE; | ||
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,119 +0,0 @@ | ||
-- check whether we can safely downgrade the existing compression setup | ||
CREATE OR REPLACE FUNCTION _timescaledb_functions.add_sequence_number_metadata_column( | ||
comp_ch_schema_name text, | ||
comp_ch_table_name text | ||
) | ||
RETURNS BOOL LANGUAGE PLPGSQL AS | ||
$BODY$ | ||
DECLARE | ||
chunk_schema_name text; | ||
chunk_table_name text; | ||
index_name text; | ||
segmentby_columns text; | ||
BEGIN | ||
SELECT ch.schema_name, ch.table_name INTO STRICT chunk_schema_name, chunk_table_name | ||
FROM _timescaledb_catalog.chunk ch | ||
INNER JOIN _timescaledb_catalog.chunk comp_ch | ||
ON ch.compressed_chunk_id = comp_ch.id | ||
WHERE comp_ch.schema_name = comp_ch_schema_name | ||
AND comp_ch.table_name = comp_ch_table_name; | ||
|
||
IF NOT FOUND THEN | ||
RAISE USING | ||
ERRCODE = 'feature_not_supported', | ||
MESSAGE = 'Cannot migrate compressed chunk to version 2.16.1, chunk not found'; | ||
END IF; | ||
|
||
-- Add sequence number column to compressed chunk | ||
EXECUTE format('ALTER TABLE %s.%s ADD COLUMN _ts_meta_sequence_num INT DEFAULT NULL', comp_ch_schema_name, comp_ch_table_name); | ||
|
||
-- Remove all indexes from compressed chunk | ||
FOR index_name IN | ||
SELECT format('%s.%s', i.schemaname, i.indexname) | ||
FROM pg_indexes i | ||
WHERE i.schemaname = comp_ch_schema_name | ||
AND i.tablename = comp_ch_table_name | ||
LOOP | ||
EXECUTE format('DROP INDEX %s;', index_name); | ||
END LOOP; | ||
|
||
-- Fetch the segmentby columns from compression settings | ||
SELECT string_agg(cs.segmentby_column, ',') INTO segmentby_columns | ||
FROM ( | ||
SELECT unnest(segmentby) | ||
FROM _timescaledb_catalog.compression_settings | ||
WHERE relid = format('%s.%s', comp_ch_schema_name, comp_ch_table_name)::regclass::oid | ||
AND segmentby IS NOT NULL | ||
) AS cs(segmentby_column); | ||
|
||
-- Create compressed chunk index based on sequence num metadata column | ||
-- If there is no segmentby columns, we can skip creating the index | ||
IF FOUND AND segmentby_columns IS NOT NULL THEN | ||
EXECUTE format('CREATE INDEX ON %s.%s (%s, _ts_meta_sequence_num);', comp_ch_schema_name, comp_ch_table_name, segmentby_columns); | ||
END IF; | ||
|
||
-- Mark compressed chunk as unordered | ||
-- Marking the chunk status bit (2) makes it unordered | ||
-- and disables some optimizations. In order to re-enable | ||
-- them, you need to recompress these chunks. | ||
UPDATE _timescaledb_catalog.chunk | ||
SET status = status | 2 -- set unordered bit | ||
WHERE schema_name = chunk_schema_name | ||
AND table_name = chunk_table_name; | ||
|
||
RETURN true; | ||
END | ||
$BODY$ SET search_path TO pg_catalog, pg_temp; | ||
|
||
DO $$ | ||
DECLARE | ||
chunk_count int; | ||
chunk_record record; | ||
BEGIN | ||
-- if we find chunks which don't have sequence number metadata column in | ||
-- compressed chunk, we need to stop downgrade and have the user run | ||
-- a migration script to re-add the missing columns | ||
SELECT count(*) INTO STRICT chunk_count | ||
FROM _timescaledb_catalog.chunk ch | ||
INNER JOIN _timescaledb_catalog.chunk uncomp_ch | ||
ON uncomp_ch.compressed_chunk_id = ch.id | ||
WHERE not exists ( | ||
SELECT | ||
FROM pg_attribute att | ||
WHERE attrelid=format('%I.%I',ch.schema_name,ch.table_name)::regclass | ||
AND attname='_ts_meta_sequence_num') | ||
AND NOT uncomp_ch.dropped; | ||
|
||
-- Doing the migration if we find 10 or less chunks that need to be migrated | ||
IF chunk_count > 10 THEN | ||
RAISE USING | ||
ERRCODE = 'feature_not_supported', | ||
MESSAGE = 'Cannot downgrade compressed hypertables with chunks that do not contain sequence numbers. Run timescaledb--2.17-2.16.1.sql migration script before downgrading.', | ||
DETAIL = 'Number of chunks that need to be migrated: '|| chunk_count::text; | ||
ELSIF chunk_count > 0 THEN | ||
FOR chunk_record IN | ||
SELECT comp_ch.* | ||
FROM _timescaledb_catalog.chunk ch | ||
INNER JOIN _timescaledb_catalog.chunk comp_ch | ||
ON ch.compressed_chunk_id = comp_ch.id | ||
WHERE not exists ( | ||
SELECT | ||
FROM pg_attribute att | ||
WHERE attrelid=format('%I.%I',comp_ch.schema_name,comp_ch.table_name)::regclass | ||
AND attname='_ts_meta_sequence_num') | ||
AND NOT ch.dropped | ||
LOOP | ||
PERFORM _timescaledb_functions.add_sequence_number_metadata_column(chunk_record.schema_name, chunk_record.table_name); | ||
RAISE LOG 'Migrated compressed chunk %s.%s to version 2.16.1', chunk_record.schema_name, chunk_record.table_name; | ||
END LOOP; | ||
|
||
RAISE LOG 'Migration successful!'; | ||
END IF; | ||
END | ||
$$; | ||
|
||
DROP FUNCTION _timescaledb_functions.add_sequence_number_metadata_column(text, text); | ||
|
||
DROP FUNCTION _timescaledb_functions.compressed_data_info(_timescaledb_internal.compressed_data); | ||
DROP INDEX _timescaledb_catalog.compression_chunk_size_idx; | ||
DROP FUNCTION IF EXISTS _timescaledb_functions.drop_osm_chunk(REGCLASS); | ||
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,3 @@ | ||
version = 2.17.0-dev | ||
version = 2.17.0 | ||
update_from_version = 2.16.1 | ||
downgrade_to_version = 2.16.1 |