aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeSetOp.c
diff options
context:
space:
mode:
authorJeff Davis <jdavis@postgresql.org>2025-01-10 17:14:37 -0800
committerJeff Davis <jdavis@postgresql.org>2025-01-10 17:14:37 -0800
commite0ece2a981ee9068f50c4423e303836c2585eb02 (patch)
treeb5a61a16ff16405c3c3bcbb4f44089648e166d12 /src/backend/executor/nodeSetOp.c
parent34c6e652425fde42c2746f749e31d196fc0d5538 (diff)
downloadpostgresql-e0ece2a981ee9068f50c4423e303836c2585eb02.tar.gz
postgresql-e0ece2a981ee9068f50c4423e303836c2585eb02.zip
TupleHashTable: store additional data along with tuple.
Previously, the caller needed to allocate the memory and the TupleHashTable would store a pointer to it. That wastes space for the palloc overhead as well as the size of the pointer itself. Now, the TupleHashTable relies on the caller to correctly specify the additionalsize, and allocates that amount of space. The caller can then request a pointer into that space. Discussion: https://postgr.es/m/b9cbf0219a9859dc8d240311643ff4362fd9602c.camel@j-davis.com Reviewed-by: Heikki Linnakangas
Diffstat (limited to 'src/backend/executor/nodeSetOp.c')
-rw-r--r--src/backend/executor/nodeSetOp.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 5b7ff9c3748..2a50d56fc87 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -425,6 +425,7 @@ setop_fill_hash_table(SetOpState *setopstate)
{
TupleTableSlot *outerslot;
TupleHashEntryData *entry;
+ SetOpStatePerGroup pergroup;
bool isnew;
outerslot = ExecProcNode(outerPlan);
@@ -437,16 +438,16 @@ setop_fill_hash_table(SetOpState *setopstate)
outerslot,
&isnew, NULL);
+ pergroup = TupleHashEntryGetAdditional(entry);
/* If new tuple group, initialize counts to zero */
if (isnew)
{
- entry->additional = (SetOpStatePerGroup)
- MemoryContextAllocZero(setopstate->hashtable->tablecxt,
- sizeof(SetOpStatePerGroupData));
+ pergroup->numLeft = 0;
+ pergroup->numRight = 0;
}
/* Advance the counts */
- ((SetOpStatePerGroup) entry->additional)->numLeft++;
+ pergroup->numLeft++;
/* Must reset expression context after each hashtable lookup */
ResetExprContext(econtext);
@@ -478,7 +479,7 @@ setop_fill_hash_table(SetOpState *setopstate)
/* Advance the counts if entry is already present */
if (entry)
- ((SetOpStatePerGroup) entry->additional)->numRight++;
+ ((SetOpStatePerGroup) TupleHashEntryGetAdditional(entry))->numRight++;
/* Must reset expression context after each hashtable lookup */
ResetExprContext(econtext);
@@ -496,7 +497,7 @@ setop_fill_hash_table(SetOpState *setopstate)
static TupleTableSlot *
setop_retrieve_hash_table(SetOpState *setopstate)
{
- TupleHashEntryData *entry;
+ TupleHashEntry entry;
TupleTableSlot *resultTupleSlot;
/*
@@ -526,12 +527,12 @@ setop_retrieve_hash_table(SetOpState *setopstate)
* See if we should emit any copies of this tuple, and if so return
* the first copy.
*/
- set_output_count(setopstate, (SetOpStatePerGroup) entry->additional);
+ set_output_count(setopstate, (SetOpStatePerGroup) TupleHashEntryGetAdditional(entry));
if (setopstate->numOutput > 0)
{
setopstate->numOutput--;
- return ExecStoreMinimalTuple(entry->firstTuple,
+ return ExecStoreMinimalTuple(TupleHashEntryGetTuple(entry),
resultTupleSlot,
false);
}