diff --git a/.unreleased/bugfix_5990 b/.unreleased/bugfix_5990 new file mode 100644 index 00000000000..39513242393 --- /dev/null +++ b/.unreleased/bugfix_5990 @@ -0,0 +1 @@ +Fixes: #5990 Place data in first/last function in correct mctx diff --git a/src/agg_bookend.c b/src/agg_bookend.c index ae163c646f5..90cd4e556aa 100644 --- a/src/agg_bookend.c +++ b/src/agg_bookend.c @@ -137,18 +137,17 @@ polydatum_deserialize_type(StringInfo buf) * */ static PolyDatum * -polydatum_deserialize(PolyDatum *result, StringInfo buf, PolyDatumIOState *state, - FunctionCallInfo fcinfo) +polydatum_deserialize(MemoryContext mem_ctx, PolyDatum *result, StringInfo buf, + PolyDatumIOState *state, FunctionCallInfo fcinfo) { int itemlen; StringInfoData item_buf; StringInfo bufptr; char csave; - if (result == NULL) - { - result = palloc(sizeof(PolyDatum)); - } + Assert(result != NULL); + + MemoryContext old_context = MemoryContextSwitchTo(mem_ctx); result->type_oid = polydatum_deserialize_type(buf); @@ -212,6 +211,9 @@ polydatum_deserialize(PolyDatum *result, StringInfo buf, PolyDatumIOState *state buf->data[buf->cursor] = csave; } + + MemoryContextSwitchTo(old_context); + return result; } @@ -511,12 +513,13 @@ ts_bookend_serializefunc(PG_FUNCTION_ARGS) Datum ts_bookend_deserializefunc(PG_FUNCTION_ARGS) { + MemoryContext aggcontext; bytea *sstate; StringInfoData buf; InternalCmpAggStore *result; InternalCmpAggStoreIOState *my_extra; - if (!AggCheckCallContext(fcinfo, NULL)) + if (!AggCheckCallContext(fcinfo, &aggcontext)) elog(ERROR, "aggregate function called in non-aggregate context"); sstate = PG_GETARG_BYTEA_P(0); @@ -536,9 +539,9 @@ ts_bookend_deserializefunc(PG_FUNCTION_ARGS) my_extra = (InternalCmpAggStoreIOState *) fcinfo->flinfo->fn_extra; } - result = palloc(sizeof(InternalCmpAggStore)); - polydatum_deserialize(&result->value, &buf, &my_extra->value, fcinfo); - polydatum_deserialize(&result->cmp, &buf, &my_extra->cmp, fcinfo); + result = MemoryContextAllocZero(aggcontext, sizeof(InternalCmpAggStore)); + polydatum_deserialize(aggcontext, &result->value, &buf, &my_extra->value, fcinfo); + polydatum_deserialize(aggcontext, &result->cmp, &buf, &my_extra->cmp, fcinfo); PG_RETURN_POINTER(result); } diff --git a/test/expected/agg_bookends.out b/test/expected/agg_bookends.out index 1c9868e146e..38811b89227 100644 --- a/test/expected/agg_bookends.out +++ b/test/expected/agg_bookends.out @@ -1500,3 +1500,42 @@ ROLLBACK; (1 row) time | gp | temp +-- Test partial aggregation +CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text); +SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time'); + schema_name | table_name | created +-------------+---------------------+--------- + public | partial_aggregation | t +(1 row) + +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'Hello'); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'World'); +-- Use enable_partitionwise_aggregate to create partial aggregates per chunk +SET enable_partitionwise_aggregate = ON; +SELECT first(time, quantity) FROM partial_aggregation; + first +------------------------------ + Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT last(time, quantity) FROM partial_aggregation; + last +------------------------------ + Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT first(longvalue, quantity) FROM partial_aggregation; + first +------- + Hello +(1 row) + +SELECT last(longvalue, quantity) FROM partial_aggregation; + last +------- + World +(1 row) + +SET enable_partitionwise_aggregate = OFF; diff --git a/test/sql/agg_bookends.sql b/test/sql/agg_bookends.sql index 3724874580f..a0ac7c4b576 100644 --- a/test/sql/agg_bookends.sql +++ b/test/sql/agg_bookends.sql @@ -31,3 +31,21 @@ SET timescaledb.enable_optimizations TO true; \o :DIFF_CMD + +-- Test partial aggregation +CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text); +SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time'); + +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'Hello'); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'World'); + +-- Use enable_partitionwise_aggregate to create partial aggregates per chunk +SET enable_partitionwise_aggregate = ON; +SELECT first(time, quantity) FROM partial_aggregation; +SELECT last(time, quantity) FROM partial_aggregation; +SELECT first(longvalue, quantity) FROM partial_aggregation; +SELECT last(longvalue, quantity) FROM partial_aggregation; +SET enable_partitionwise_aggregate = OFF; +