Skip to content

Commit

Permalink
Place data in first/last function in correct mctx
Browse files Browse the repository at this point in the history
So far, the ts_bookend_deserializefunc() function has allocated the
deserialized data in the current memory context. This data could be
removed before the aggregation is finished. This patch moves the data
into the aggregation memory context.
  • Loading branch information
jnidzwetzki committed Aug 21, 2023
1 parent 0a66bdb commit d45706a
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 10 deletions.
1 change: 1 addition & 0 deletions .unreleased/bugfix_5990
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #5990 Place data in first/last function in correct mctx
23 changes: 13 additions & 10 deletions src/agg_bookend.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,18 +137,17 @@ polydatum_deserialize_type(StringInfo buf)
*
*/
static PolyDatum *
polydatum_deserialize(PolyDatum *result, StringInfo buf, PolyDatumIOState *state,
FunctionCallInfo fcinfo)
polydatum_deserialize(MemoryContext mem_ctx, PolyDatum *result, StringInfo buf,
PolyDatumIOState *state, FunctionCallInfo fcinfo)
{
int itemlen;
StringInfoData item_buf;
StringInfo bufptr;
char csave;

if (result == NULL)
{
result = palloc(sizeof(PolyDatum));
}
Assert(result != NULL);

MemoryContext old_context = MemoryContextSwitchTo(mem_ctx);

result->type_oid = polydatum_deserialize_type(buf);

Expand Down Expand Up @@ -212,6 +211,9 @@ polydatum_deserialize(PolyDatum *result, StringInfo buf, PolyDatumIOState *state

buf->data[buf->cursor] = csave;
}

MemoryContextSwitchTo(old_context);

return result;
}

Expand Down Expand Up @@ -511,12 +513,13 @@ ts_bookend_serializefunc(PG_FUNCTION_ARGS)
Datum
ts_bookend_deserializefunc(PG_FUNCTION_ARGS)
{
MemoryContext aggcontext;
bytea *sstate;
StringInfoData buf;
InternalCmpAggStore *result;
InternalCmpAggStoreIOState *my_extra;

if (!AggCheckCallContext(fcinfo, NULL))
if (!AggCheckCallContext(fcinfo, &aggcontext))
elog(ERROR, "aggregate function called in non-aggregate context");

sstate = PG_GETARG_BYTEA_P(0);
Expand All @@ -536,9 +539,9 @@ ts_bookend_deserializefunc(PG_FUNCTION_ARGS)
my_extra = (InternalCmpAggStoreIOState *) fcinfo->flinfo->fn_extra;
}

result = palloc(sizeof(InternalCmpAggStore));
polydatum_deserialize(&result->value, &buf, &my_extra->value, fcinfo);
polydatum_deserialize(&result->cmp, &buf, &my_extra->cmp, fcinfo);
result = MemoryContextAllocZero(aggcontext, sizeof(InternalCmpAggStore));
polydatum_deserialize(aggcontext, &result->value, &buf, &my_extra->value, fcinfo);
polydatum_deserialize(aggcontext, &result->cmp, &buf, &my_extra->cmp, fcinfo);
PG_RETURN_POINTER(result);
}

Expand Down
39 changes: 39 additions & 0 deletions test/expected/agg_bookends.out
Original file line number Diff line number Diff line change
Expand Up @@ -1500,3 +1500,42 @@ ROLLBACK;
(1 row)

time | gp | temp
-- Test partial aggregation
CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text);
SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time');
schema_name | table_name | created
-------------+---------------------+---------
public | partial_aggregation | t
(1 row)

INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL);
INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL);
INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'Hello');
INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'World');
-- Use enable_partitionwise_aggregate to create partial aggregates per chunk
SET enable_partitionwise_aggregate = ON;
SELECT first(time, quantity) FROM partial_aggregation;
first
------------------------------
Sun Jan 20 09:00:43 2019 PST
(1 row)

SELECT last(time, quantity) FROM partial_aggregation;
last
------------------------------
Sun Jan 20 09:00:43 2019 PST
(1 row)

SELECT first(longvalue, quantity) FROM partial_aggregation;
first
-------
Hello
(1 row)

SELECT last(longvalue, quantity) FROM partial_aggregation;
last
-------
World
(1 row)

SET enable_partitionwise_aggregate = OFF;
18 changes: 18 additions & 0 deletions test/sql/agg_bookends.sql
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,21 @@ SET timescaledb.enable_optimizations TO true;
\o

:DIFF_CMD

-- Test partial aggregation
CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text);
SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time');

INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL);
INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL);
INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'Hello');
INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'World');

-- Use enable_partitionwise_aggregate to create partial aggregates per chunk
SET enable_partitionwise_aggregate = ON;
SELECT first(time, quantity) FROM partial_aggregation;
SELECT last(time, quantity) FROM partial_aggregation;
SELECT first(longvalue, quantity) FROM partial_aggregation;
SELECT last(longvalue, quantity) FROM partial_aggregation;
SET enable_partitionwise_aggregate = OFF;

0 comments on commit d45706a

Please sign in to comment.