aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeGather.c
diff options
context:
space:
mode:
authorThomas Munro <tmunro@postgresql.org>2020-07-17 14:57:50 +1200
committerThomas Munro <tmunro@postgresql.org>2020-07-17 15:04:16 +1200
commitcdc7169509113018cc389da740e950c587b5751f (patch)
treeb62eba135c229de78eae334af0c8b64f169f36e8 /src/backend/executor/nodeGather.c
parentd2bddc2500fb74d56e5bc53a1cfa269e2e846510 (diff)
downloadpostgresql-cdc7169509113018cc389da740e950c587b5751f.tar.gz
postgresql-cdc7169509113018cc389da740e950c587b5751f.zip
Use MinimalTuple for tuple queues.
This representation saves 8 bytes per tuple compared to HeapTuple, and avoids the need to allocate, copy and free on the receiving side. Gather can emit the returned MinimalTuple directly, but GatherMerge now needs to make an explicit copy because it buffers multiple tuples at a time. That should be no worse than before. Reviewed-by: Soumyadeep Chakraborty <soumyadeep2007@gmail.com> Discussion: https://postgr.es/m/CA%2BhUKG%2B8T_ggoUTAE-U%3DA%2BOcPc4%3DB0nPPHcSfffuQhvXXjML6w%40mail.gmail.com
Diffstat (limited to 'src/backend/executor/nodeGather.c')
-rw-r--r--src/backend/executor/nodeGather.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c
index 6b8ed867d59..a01b46af148 100644
--- a/src/backend/executor/nodeGather.c
+++ b/src/backend/executor/nodeGather.c
@@ -46,7 +46,7 @@
static TupleTableSlot *ExecGather(PlanState *pstate);
static TupleTableSlot *gather_getnext(GatherState *gatherstate);
-static HeapTuple gather_readnext(GatherState *gatherstate);
+static MinimalTuple gather_readnext(GatherState *gatherstate);
static void ExecShutdownGatherWorkers(GatherState *node);
@@ -120,7 +120,7 @@ ExecInitGather(Gather *node, EState *estate, int eflags)
* Initialize funnel slot to same tuple descriptor as outer plan.
*/
gatherstate->funnel_slot = ExecInitExtraTupleSlot(estate, tupDesc,
- &TTSOpsHeapTuple);
+ &TTSOpsMinimalTuple);
/*
* Gather doesn't support checking a qual (it's always more efficient to
@@ -266,7 +266,7 @@ gather_getnext(GatherState *gatherstate)
PlanState *outerPlan = outerPlanState(gatherstate);
TupleTableSlot *outerTupleSlot;
TupleTableSlot *fslot = gatherstate->funnel_slot;
- HeapTuple tup;
+ MinimalTuple tup;
while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
{
@@ -278,9 +278,9 @@ gather_getnext(GatherState *gatherstate)
if (HeapTupleIsValid(tup))
{
- ExecStoreHeapTuple(tup, /* tuple to store */
- fslot, /* slot to store the tuple */
- true); /* pfree tuple when done with it */
+ ExecStoreMinimalTuple(tup, /* tuple to store */
+ fslot, /* slot to store the tuple */
+ false); /* don't pfree tuple */
return fslot;
}
}
@@ -308,7 +308,7 @@ gather_getnext(GatherState *gatherstate)
/*
* Attempt to read a tuple from one of our parallel workers.
*/
-static HeapTuple
+static MinimalTuple
gather_readnext(GatherState *gatherstate)
{
int nvisited = 0;
@@ -316,7 +316,7 @@ gather_readnext(GatherState *gatherstate)
for (;;)
{
TupleQueueReader *reader;
- HeapTuple tup;
+ MinimalTuple tup;
bool readerdone;
/* Check for async events, particularly messages from workers. */