Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Place data in first/last function in correct mctx #5990

Merged
merged 1 commit into from
Aug 22, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .unreleased/bugfix_5990
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #5990 Place data in first/last function in correct mctx
23 changes: 13 additions & 10 deletions src/agg_bookend.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,18 +137,17 @@ polydatum_deserialize_type(StringInfo buf)
*
*/
static PolyDatum *
polydatum_deserialize(PolyDatum *result, StringInfo buf, PolyDatumIOState *state,
FunctionCallInfo fcinfo)
polydatum_deserialize(MemoryContext mem_ctx, PolyDatum *result, StringInfo buf,
PolyDatumIOState *state, FunctionCallInfo fcinfo)
{
int itemlen;
StringInfoData item_buf;
StringInfo bufptr;
char csave;

if (result == NULL)
{
result = palloc(sizeof(PolyDatum));
}
Assert(result != NULL);

MemoryContext old_context = MemoryContextSwitchTo(mem_ctx);

result->type_oid = polydatum_deserialize_type(buf);

Expand Down Expand Up @@ -212,6 +211,9 @@ polydatum_deserialize(PolyDatum *result, StringInfo buf, PolyDatumIOState *state

buf->data[buf->cursor] = csave;
}

MemoryContextSwitchTo(old_context);

return result;
}

Expand Down Expand Up @@ -511,12 +513,13 @@ ts_bookend_serializefunc(PG_FUNCTION_ARGS)
Datum
ts_bookend_deserializefunc(PG_FUNCTION_ARGS)
{
MemoryContext aggcontext;
bytea *sstate;
StringInfoData buf;
InternalCmpAggStore *result;
InternalCmpAggStoreIOState *my_extra;

if (!AggCheckCallContext(fcinfo, NULL))
if (!AggCheckCallContext(fcinfo, &aggcontext))
elog(ERROR, "aggregate function called in non-aggregate context");

sstate = PG_GETARG_BYTEA_P(0);
Expand All @@ -536,9 +539,9 @@ ts_bookend_deserializefunc(PG_FUNCTION_ARGS)
my_extra = (InternalCmpAggStoreIOState *) fcinfo->flinfo->fn_extra;
}

result = palloc(sizeof(InternalCmpAggStore));
polydatum_deserialize(&result->value, &buf, &my_extra->value, fcinfo);
polydatum_deserialize(&result->cmp, &buf, &my_extra->cmp, fcinfo);
result = MemoryContextAllocZero(aggcontext, sizeof(InternalCmpAggStore));
polydatum_deserialize(aggcontext, &result->value, &buf, &my_extra->value, fcinfo);
polydatum_deserialize(aggcontext, &result->cmp, &buf, &my_extra->cmp, fcinfo);
PG_RETURN_POINTER(result);
}

Expand Down
39 changes: 39 additions & 0 deletions test/expected/agg_bookends.out
Original file line number Diff line number Diff line change
Expand Up @@ -1500,3 +1500,42 @@ ROLLBACK;
(1 row)

time | gp | temp
-- Test partial aggregation
CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text);
SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time');
schema_name | table_name | created
-------------+---------------------+---------
public | partial_aggregation | t
(1 row)

INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL);
INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL);
INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'Hello');
INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'World');
-- Use enable_partitionwise_aggregate to create partial aggregates per chunk
SET enable_partitionwise_aggregate = ON;
SELECT first(time, quantity) FROM partial_aggregation;
first
------------------------------
Sun Jan 20 09:00:43 2019 PST
(1 row)

SELECT last(time, quantity) FROM partial_aggregation;
last
------------------------------
Sun Jan 20 09:00:43 2019 PST
(1 row)

SELECT first(longvalue, quantity) FROM partial_aggregation;
first
-------
Hello
(1 row)

SELECT last(longvalue, quantity) FROM partial_aggregation;
last
-------
World
(1 row)

SET enable_partitionwise_aggregate = OFF;
18 changes: 18 additions & 0 deletions test/sql/agg_bookends.sql
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,21 @@ SET timescaledb.enable_optimizations TO true;
\o

:DIFF_CMD

-- Test partial aggregation
CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text);
SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time');

INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL);
INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL);
INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'Hello');
INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'World');

-- Use enable_partitionwise_aggregate to create partial aggregates per chunk
SET enable_partitionwise_aggregate = ON;
Copy link
Contributor Author

@jnidzwetzki jnidzwetzki Aug 21, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This setting will be replaced by our own partial pushdown GAC soon. But use enable_partitionwise_aggregate for now to test the functions properly.

SELECT first(time, quantity) FROM partial_aggregation;
SELECT last(time, quantity) FROM partial_aggregation;
SELECT first(longvalue, quantity) FROM partial_aggregation;
SELECT last(longvalue, quantity) FROM partial_aggregation;
SET enable_partitionwise_aggregate = OFF;