aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execExpr.c2
-rw-r--r--src/backend/executor/execExprInterp.c80
-rw-r--r--src/backend/executor/execGrouping.c14
-rw-r--r--src/backend/executor/execSRF.c6
-rw-r--r--src/backend/executor/execUtils.c6
-rw-r--r--src/backend/executor/nodeAgg.c323
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c4
-rw-r--r--src/backend/executor/nodeIncrementalSort.c71
-rw-r--r--src/backend/executor/nodeTidscan.c2
9 files changed, 256 insertions, 252 deletions
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index c6a77bd66fa..236413f62aa 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -3238,7 +3238,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
bool nullcheck)
{
ExprContext *aggcontext;
- int adjust_jumpnull = -1;
+ int adjust_jumpnull = -1;
if (ishash)
aggcontext = aggstate->hashcontext;
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 113ed1547cb..b812bbaceef 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -167,15 +167,16 @@ static Datum ExecJustAssignOuterVarVirt(ExprState *state, ExprContext *econtext,
static Datum ExecJustAssignScanVarVirt(ExprState *state, ExprContext *econtext, bool *isnull);
/* execution helper functions */
-static pg_attribute_always_inline void
-ExecAggPlainTransByVal(AggState *aggstate, AggStatePerTrans pertrans,
- AggStatePerGroup pergroup,
- ExprContext *aggcontext, int setno);
-
-static pg_attribute_always_inline void
-ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans,
- AggStatePerGroup pergroup,
- ExprContext *aggcontext, int setno);
+static pg_attribute_always_inline void ExecAggPlainTransByVal(AggState *aggstate,
+ AggStatePerTrans pertrans,
+ AggStatePerGroup pergroup,
+ ExprContext *aggcontext,
+ int setno);
+static pg_attribute_always_inline void ExecAggPlainTransByRef(AggState *aggstate,
+ AggStatePerTrans pertrans,
+ AggStatePerGroup pergroup,
+ ExprContext *aggcontext,
+ int setno);
/*
* Prepare ExprState for interpreted execution.
@@ -1611,8 +1612,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
EEO_CASE(EEOP_AGG_PLAIN_PERGROUP_NULLCHECK)
{
AggState *aggstate = castNode(AggState, state->parent);
- AggStatePerGroup pergroup_allaggs = aggstate->all_pergroups
- [op->d.agg_plain_pergroup_nullcheck.setoff];
+ AggStatePerGroup pergroup_allaggs =
+ aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
@@ -1636,9 +1637,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@@ -1665,9 +1665,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@@ -1684,9 +1683,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@@ -1702,9 +1700,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@@ -1724,9 +1721,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@@ -1742,9 +1738,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@@ -4302,21 +4297,20 @@ ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans,
newVal = FunctionCallInvoke(fcinfo);
/*
- * For pass-by-ref datatype, must copy the new value into
- * aggcontext and free the prior transValue. But if transfn
- * returned a pointer to its first input, we don't need to do
- * anything. Also, if transfn returned a pointer to a R/W
- * expanded object that is already a child of the aggcontext,
- * assume we can adopt that value without copying it.
+ * For pass-by-ref datatype, must copy the new value into aggcontext and
+ * free the prior transValue. But if transfn returned a pointer to its
+ * first input, we don't need to do anything. Also, if transfn returned a
+ * pointer to a R/W expanded object that is already a child of the
+ * aggcontext, assume we can adopt that value without copying it.
*
- * It's safe to compare newVal with pergroup->transValue without
- * regard for either being NULL, because ExecAggTransReparent()
- * takes care to set transValue to 0 when NULL. Otherwise we could
- * end up accidentally not reparenting, when the transValue has
- * the same numerical value as newValue, despite being NULL. This
- * is a somewhat hot path, making it undesirable to instead solve
- * this with another branch for the common case of the transition
- * function returning its (modified) input argument.
+ * It's safe to compare newVal with pergroup->transValue without regard
+ * for either being NULL, because ExecAggTransReparent() takes care to set
+ * transValue to 0 when NULL. Otherwise we could end up accidentally not
+ * reparenting, when the transValue has the same numerical value as
+ * newValue, despite being NULL. This is a somewhat hot path, making it
+ * undesirable to instead solve this with another branch for the common
+ * case of the transition function returning its (modified) input
+ * argument.
*/
if (DatumGetPointer(newVal) != DatumGetPointer(pergroup->transValue))
newVal = ExecAggTransReparent(aggstate, pertrans,
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 009d27b9a80..8be36ca7634 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -300,9 +300,9 @@ TupleHashEntry
LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew)
{
- TupleHashEntry entry;
- MemoryContext oldContext;
- uint32 hash;
+ TupleHashEntry entry;
+ MemoryContext oldContext;
+ uint32 hash;
/* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);
@@ -326,8 +326,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
uint32
TupleHashTableHash(TupleHashTable hashtable, TupleTableSlot *slot)
{
- MemoryContext oldContext;
- uint32 hash;
+ MemoryContext oldContext;
+ uint32 hash;
hashtable->inputslot = slot;
hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
@@ -350,8 +350,8 @@ TupleHashEntry
LookupTupleHashEntryHash(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew, uint32 hash)
{
- TupleHashEntry entry;
- MemoryContext oldContext;
+ TupleHashEntry entry;
+ MemoryContext oldContext;
/* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);
diff --git a/src/backend/executor/execSRF.c b/src/backend/executor/execSRF.c
index 461c8601b4f..b0ea72de685 100644
--- a/src/backend/executor/execSRF.c
+++ b/src/backend/executor/execSRF.c
@@ -259,7 +259,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
if (first_time)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
@@ -289,7 +289,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
if (tupdesc == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
/*
* This is the first non-NULL result from the
@@ -384,7 +384,7 @@ no_function_result:
if (rsinfo.setResult == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index ca973882d01..d0e65b86473 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -320,9 +320,9 @@ CreateExprContext(EState *estate)
ExprContext *
CreateWorkExprContext(EState *estate)
{
- Size minContextSize = ALLOCSET_DEFAULT_MINSIZE;
- Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
- Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
+ Size minContextSize = ALLOCSET_DEFAULT_MINSIZE;
+ Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
+ Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
/* choose the maxBlockSize to be no larger than 1/16 of work_mem */
while (16 * maxBlockSize > work_mem * 1024L)
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 9f4229de600..8553db0dd07 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -317,11 +317,11 @@
*/
typedef struct HashTapeInfo
{
- LogicalTapeSet *tapeset;
- int ntapes;
- int *freetapes;
- int nfreetapes;
- int freetapes_alloc;
+ LogicalTapeSet *tapeset;
+ int ntapes;
+ int *freetapes;
+ int nfreetapes;
+ int freetapes_alloc;
} HashTapeInfo;
/*
@@ -336,11 +336,11 @@ typedef struct HashTapeInfo
typedef struct HashAggSpill
{
LogicalTapeSet *tapeset; /* borrowed reference to tape set */
- int npartitions; /* number of partitions */
- int *partitions; /* spill partition tape numbers */
- int64 *ntuples; /* number of tuples in each partition */
- uint32 mask; /* mask to find partition from hash value */
- int shift; /* after masking, shift by this amount */
+ int npartitions; /* number of partitions */
+ int *partitions; /* spill partition tape numbers */
+ int64 *ntuples; /* number of tuples in each partition */
+ uint32 mask; /* mask to find partition from hash value */
+ int shift; /* after masking, shift by this amount */
} HashAggSpill;
/*
@@ -354,11 +354,11 @@ typedef struct HashAggSpill
*/
typedef struct HashAggBatch
{
- int setno; /* grouping set */
- int used_bits; /* number of bits of hash already used */
- LogicalTapeSet *tapeset; /* borrowed reference to tape set */
- int input_tapenum; /* input partition tape */
- int64 input_tuples; /* number of tuples in this batch */
+ int setno; /* grouping set */
+ int used_bits; /* number of bits of hash already used */
+ LogicalTapeSet *tapeset; /* borrowed reference to tape set */
+ int input_tapenum; /* input partition tape */
+ int64 input_tuples; /* number of tuples in this batch */
} HashAggBatch;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
@@ -402,10 +402,10 @@ static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
static long hash_choose_num_buckets(double hashentrysize,
long estimated_nbuckets,
Size memory);
-static int hash_choose_num_partitions(uint64 input_groups,
- double hashentrysize,
- int used_bits,
- int *log2_npartittions);
+static int hash_choose_num_partitions(uint64 input_groups,
+ double hashentrysize,
+ int used_bits,
+ int *log2_npartittions);
static AggStatePerGroup lookup_hash_entry(AggState *aggstate, uint32 hash,
bool *in_hash_table);
static void lookup_hash_entries(AggState *aggstate);
@@ -786,14 +786,14 @@ advance_transition_function(AggState *aggstate,
* pointer to a R/W expanded object that is already a child of the
* aggcontext, assume we can adopt that value without copying it.
*
- * It's safe to compare newVal with pergroup->transValue without
- * regard for either being NULL, because ExecAggTransReparent()
- * takes care to set transValue to 0 when NULL. Otherwise we could
- * end up accidentally not reparenting, when the transValue has
- * the same numerical value as newValue, despite being NULL. This
- * is a somewhat hot path, making it undesirable to instead solve
- * this with another branch for the common case of the transition
- * function returning its (modified) input argument.
+ * It's safe to compare newVal with pergroup->transValue without regard
+ * for either being NULL, because ExecAggTransReparent() takes care to set
+ * transValue to 0 when NULL. Otherwise we could end up accidentally not
+ * reparenting, when the transValue has the same numerical value as
+ * newValue, despite being NULL. This is a somewhat hot path, making it
+ * undesirable to instead solve this with another branch for the common
+ * case of the transition function returning its (modified) input
+ * argument.
*/
if (!pertrans->transtypeByVal &&
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
@@ -1206,7 +1206,7 @@ prepare_hash_slot(AggState *aggstate)
TupleTableSlot *inputslot = aggstate->tmpcontext->ecxt_outertuple;
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
- int i;
+ int i;
/* transfer just the needed columns into hashslot */
slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
@@ -1438,13 +1438,13 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
static void
build_hash_tables(AggState *aggstate)
{
- int setno;
+ int setno;
for (setno = 0; setno < aggstate->num_hashes; ++setno)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- long nbuckets;
- Size memory;
+ long nbuckets;
+ Size memory;
if (perhash->hashtable != NULL)
{
@@ -1457,8 +1457,9 @@ build_hash_tables(AggState *aggstate)
memory = aggstate->hash_mem_limit / aggstate->num_hashes;
/* choose reasonable number of buckets per hashtable */
- nbuckets = hash_choose_num_buckets(
- aggstate->hashentrysize, perhash->aggnode->numGroups, memory);
+ nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
+ perhash->aggnode->numGroups,
+ memory);
build_hash_table(aggstate, setno, nbuckets);
}
@@ -1473,10 +1474,10 @@ static void
build_hash_table(AggState *aggstate, int setno, long nbuckets)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- MemoryContext metacxt = aggstate->hash_metacxt;
- MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
- MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
- Size additionalsize;
+ MemoryContext metacxt = aggstate->hash_metacxt;
+ MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
+ MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
+ Size additionalsize;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
@@ -1489,20 +1490,19 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets)
*/
additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
- perhash->hashtable = BuildTupleHashTableExt(
- &aggstate->ss.ps,
- perhash->hashslot->tts_tupleDescriptor,
- perhash->numCols,
- perhash->hashGrpColIdxHash,
- perhash->eqfuncoids,
- perhash->hashfunctions,
- perhash->aggnode->grpCollations,
- nbuckets,
- additionalsize,
- metacxt,
- hashcxt,
- tmpcxt,
- DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+ perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
+ perhash->hashslot->tts_tupleDescriptor,
+ perhash->numCols,
+ perhash->hashGrpColIdxHash,
+ perhash->eqfuncoids,
+ perhash->hashfunctions,
+ perhash->aggnode->grpCollations,
+ nbuckets,
+ additionalsize,
+ metacxt,
+ hashcxt,
+ tmpcxt,
+ DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
}
/*
@@ -1648,12 +1648,12 @@ find_hash_columns(AggState *aggstate)
Size
hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
{
- Size tupleChunkSize;
- Size pergroupChunkSize;
- Size transitionChunkSize;
- Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
- tupleWidth);
- Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
+ Size tupleChunkSize;
+ Size pergroupChunkSize;
+ Size transitionChunkSize;
+ Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
+ tupleWidth);
+ Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
tupleChunkSize = CHUNKHDRSZ + tupleSize;
@@ -1695,24 +1695,24 @@ hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
static void
hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
{
- AggStatePerPhase phase;
- int i = minslot ? 1 : 0;
- int j = nullcheck ? 1 : 0;
+ AggStatePerPhase phase;
+ int i = minslot ? 1 : 0;
+ int j = nullcheck ? 1 : 0;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
if (aggstate->aggstrategy == AGG_HASHED)
phase = &aggstate->phases[0];
- else /* AGG_MIXED */
+ else /* AGG_MIXED */
phase = &aggstate->phases[1];
if (phase->evaltrans_cache[i][j] == NULL)
{
- const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
- bool outerfixed = aggstate->ss.ps.outeropsfixed;
- bool dohash = true;
- bool dosort;
+ const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
+ bool outerfixed = aggstate->ss.ps.outeropsfixed;
+ bool dohash = true;
+ bool dosort;
dosort = aggstate->aggstrategy == AGG_MIXED ? true : false;
@@ -1723,8 +1723,9 @@ hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
aggstate->ss.ps.outeropsfixed = true;
}
- phase->evaltrans_cache[i][j] = ExecBuildAggTrans(
- aggstate, phase, dosort, dohash, nullcheck);
+ phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
+ dosort, dohash,
+ nullcheck);
/* change back */
aggstate->ss.ps.outerops = outerops;
@@ -1747,8 +1748,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
Size *mem_limit, uint64 *ngroups_limit,
int *num_partitions)
{
- int npartitions;
- Size partition_mem;
+ int npartitions;
+ Size partition_mem;
/* if not expected to spill, use all of work_mem */
if (input_groups * hashentrysize < work_mem * 1024L)
@@ -1762,9 +1763,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
/*
* Calculate expected memory requirements for spilling, which is the size
- * of the buffers needed for all the tapes that need to be open at
- * once. Then, subtract that from the memory available for holding hash
- * tables.
+ * of the buffers needed for all the tapes that need to be open at once.
+ * Then, subtract that from the memory available for holding hash tables.
*/
npartitions = hash_choose_num_partitions(input_groups,
hashentrysize,
@@ -1803,11 +1803,11 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
static void
hash_agg_check_limits(AggState *aggstate)
{
- uint64 ngroups = aggstate->hash_ngroups_current;
- Size meta_mem = MemoryContextMemAllocated(
- aggstate->hash_metacxt, true);
- Size hash_mem = MemoryContextMemAllocated(
- aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ uint64 ngroups = aggstate->hash_ngroups_current;
+ Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
+ true);
+ Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
+ true);
/*
* Don't spill unless there's at least one group in the hash table so we
@@ -1841,13 +1841,12 @@ hash_agg_enter_spill_mode(AggState *aggstate)
hashagg_tapeinfo_init(aggstate);
- aggstate->hash_spills = palloc(
- sizeof(HashAggSpill) * aggstate->num_hashes);
+ aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes);
for (int setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
- HashAggSpill *spill = &aggstate->hash_spills[setno];
+ AggStatePerHash perhash = &aggstate->perhash[setno];
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
perhash->aggnode->numGroups,
@@ -1865,10 +1864,10 @@ hash_agg_enter_spill_mode(AggState *aggstate)
static void
hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
- Size meta_mem;
- Size hash_mem;
- Size buffer_mem;
- Size total_mem;
+ Size meta_mem;
+ Size hash_mem;
+ Size buffer_mem;
+ Size total_mem;
if (aggstate->aggstrategy != AGG_MIXED &&
aggstate->aggstrategy != AGG_HASHED)
@@ -1878,8 +1877,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
/* memory for the group keys and transition states */
- hash_mem = MemoryContextMemAllocated(
- aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
/* memory for read/write tape buffers, if spilled */
buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
@@ -1894,8 +1892,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
/* update disk usage */
if (aggstate->hash_tapeinfo != NULL)
{
- uint64 disk_used = LogicalTapeSetBlocks(
- aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
+ uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
if (aggstate->hash_disk_used < disk_used)
aggstate->hash_disk_used = disk_used;
@@ -1906,7 +1903,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
aggstate->hashentrysize =
sizeof(TupleHashEntryData) +
- (hash_mem / (double)aggstate->hash_ngroups_current);
+ (hash_mem / (double) aggstate->hash_ngroups_current);
}
}
@@ -1916,8 +1913,8 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
static long
hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
{
- long max_nbuckets;
- long nbuckets = ngroups;
+ long max_nbuckets;
+ long nbuckets = ngroups;
max_nbuckets = memory / hashentrysize;
@@ -1943,10 +1940,10 @@ static int
hash_choose_num_partitions(uint64 input_groups, double hashentrysize,
int used_bits, int *log2_npartitions)
{
- Size mem_wanted;
- int partition_limit;
- int npartitions;
- int partition_bits;
+ Size mem_wanted;
+ int partition_limit;
+ int npartitions;
+ int partition_bits;
/*
* Avoid creating so many partitions that the memory requirements of the
@@ -2005,8 +2002,8 @@ lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table)
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
TupleHashEntryData *entry;
- bool isnew = false;
- bool *p_isnew;
+ bool isnew = false;
+ bool *p_isnew;
/* if hash table already spilled, don't create new entries */
p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
@@ -2025,8 +2022,8 @@ lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table)
if (isnew)
{
- AggStatePerGroup pergroup;
- int transno;
+ AggStatePerGroup pergroup;
+ int transno;
aggstate->hash_ngroups_current++;
hash_agg_check_limits(aggstate);
@@ -2083,9 +2080,9 @@ lookup_hash_entries(AggState *aggstate)
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
- uint32 hash;
- bool in_hash_table;
+ AggStatePerHash perhash = &aggstate->perhash[setno];
+ uint32 hash;
+ bool in_hash_table;
select_current_set(aggstate, setno, true);
prepare_hash_slot(aggstate);
@@ -2095,8 +2092,8 @@ lookup_hash_entries(AggState *aggstate)
/* check to see if we need to spill the tuple for this grouping set */
if (!in_hash_table)
{
- HashAggSpill *spill = &aggstate->hash_spills[setno];
- TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
+ TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
if (spill->partitions == NULL)
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
@@ -2560,11 +2557,11 @@ agg_fill_hash_table(AggState *aggstate)
static bool
agg_refill_hash_table(AggState *aggstate)
{
- HashAggBatch *batch;
- HashAggSpill spill;
- HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
- uint64 ngroups_estimate;
- bool spill_initialized = false;
+ HashAggBatch *batch;
+ HashAggSpill spill;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ uint64 ngroups_estimate;
+ bool spill_initialized = false;
if (aggstate->hash_batches == NIL)
return false;
@@ -2623,11 +2620,12 @@ agg_refill_hash_table(AggState *aggstate)
LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum,
HASHAGG_READ_BUFFER_SIZE);
- for (;;) {
- TupleTableSlot *slot = aggstate->hash_spill_slot;
- MinimalTuple tuple;
- uint32 hash;
- bool in_hash_table;
+ for (;;)
+ {
+ TupleTableSlot *slot = aggstate->hash_spill_slot;
+ MinimalTuple tuple;
+ uint32 hash;
+ bool in_hash_table;
CHECK_FOR_INTERRUPTS();
@@ -2639,8 +2637,8 @@ agg_refill_hash_table(AggState *aggstate)
aggstate->tmpcontext->ecxt_outertuple = slot;
prepare_hash_slot(aggstate);
- aggstate->hash_pergroup[batch->setno] = lookup_hash_entry(
- aggstate, hash, &in_hash_table);
+ aggstate->hash_pergroup[batch->setno] =
+ lookup_hash_entry(aggstate, hash, &in_hash_table);
if (in_hash_table)
{
@@ -2657,7 +2655,7 @@ agg_refill_hash_table(AggState *aggstate)
*/
spill_initialized = true;
hashagg_spill_init(&spill, tapeinfo, batch->used_bits,
- ngroups_estimate, aggstate->hashentrysize);
+ ngroups_estimate, aggstate->hashentrysize);
}
/* no memory for a new group, spill */
hashagg_spill_tuple(&spill, slot, hash);
@@ -2851,8 +2849,8 @@ agg_retrieve_hash_table_in_memory(AggState *aggstate)
static void
hashagg_tapeinfo_init(AggState *aggstate)
{
- HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
- int init_tapes = 16; /* expanded dynamically */
+ HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
+ int init_tapes = 16; /* expanded dynamically */
tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, NULL, NULL, -1);
tapeinfo->ntapes = init_tapes;
@@ -2873,7 +2871,7 @@ static void
hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions,
int npartitions)
{
- int partidx = 0;
+ int partidx = 0;
/* use free tapes if available */
while (partidx < npartitions && tapeinfo->nfreetapes > 0)
@@ -2899,8 +2897,8 @@ hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum)
if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes)
{
tapeinfo->freetapes_alloc <<= 1;
- tapeinfo->freetapes = repalloc(
- tapeinfo->freetapes, tapeinfo->freetapes_alloc * sizeof(int));
+ tapeinfo->freetapes = repalloc(tapeinfo->freetapes,
+ tapeinfo->freetapes_alloc * sizeof(int));
}
tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum;
}
@@ -2915,11 +2913,11 @@ static void
hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
uint64 input_groups, double hashentrysize)
{
- int npartitions;
- int partition_bits;
+ int npartitions;
+ int partition_bits;
- npartitions = hash_choose_num_partitions(
- input_groups, hashentrysize, used_bits, &partition_bits);
+ npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
+ used_bits, &partition_bits);
spill->partitions = palloc0(sizeof(int) * npartitions);
spill->ntuples = palloc0(sizeof(int64) * npartitions);
@@ -2941,12 +2939,12 @@ hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
static Size
hashagg_spill_tuple(HashAggSpill *spill, TupleTableSlot *slot, uint32 hash)
{
- LogicalTapeSet *tapeset = spill->tapeset;
- int partition;
- MinimalTuple tuple;
- int tapenum;
- int total_written = 0;
- bool shouldFree;
+ LogicalTapeSet *tapeset = spill->tapeset;
+ int partition;
+ MinimalTuple tuple;
+ int tapenum;
+ int total_written = 0;
+ bool shouldFree;
Assert(spill->partitions != NULL);
@@ -2999,11 +2997,11 @@ static MinimalTuple
hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
{
LogicalTapeSet *tapeset = batch->tapeset;
- int tapenum = batch->input_tapenum;
- MinimalTuple tuple;
- uint32 t_len;
- size_t nread;
- uint32 hash;
+ int tapenum = batch->input_tapenum;
+ MinimalTuple tuple;
+ uint32 t_len;
+ size_t nread;
+ uint32 hash;
nread = LogicalTapeRead(tapeset, tapenum, &hash, sizeof(uint32));
if (nread == 0)
@@ -3027,7 +3025,7 @@ hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
tuple->t_len = t_len;
nread = LogicalTapeRead(tapeset, tapenum,
- (void *)((char *)tuple + sizeof(uint32)),
+ (void *) ((char *) tuple + sizeof(uint32)),
t_len - sizeof(uint32));
if (nread != t_len - sizeof(uint32))
ereport(ERROR,
@@ -3048,14 +3046,15 @@ hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
static void
hashagg_finish_initial_spills(AggState *aggstate)
{
- int setno;
- int total_npartitions = 0;
+ int setno;
+ int total_npartitions = 0;
if (aggstate->hash_spills != NULL)
{
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
+
total_npartitions += spill->npartitions;
hashagg_spill_finish(aggstate, spill, setno);
}
@@ -3081,16 +3080,16 @@ hashagg_finish_initial_spills(AggState *aggstate)
static void
hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
{
- int i;
- int used_bits = 32 - spill->shift;
+ int i;
+ int used_bits = 32 - spill->shift;
if (spill->npartitions == 0)
- return; /* didn't spill */
+ return; /* didn't spill */
for (i = 0; i < spill->npartitions; i++)
{
- int tapenum = spill->partitions[i];
- HashAggBatch *new_batch;
+ int tapenum = spill->partitions[i];
+ HashAggBatch *new_batch;
/* if the partition is empty, don't create a new batch of work */
if (spill->ntuples[i] == 0)
@@ -3113,16 +3112,17 @@ hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
static void
hashagg_reset_spill_state(AggState *aggstate)
{
- ListCell *lc;
+ ListCell *lc;
/* free spills from initial pass */
if (aggstate->hash_spills != NULL)
{
- int setno;
+ int setno;
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
+
pfree(spill->ntuples);
pfree(spill->partitions);
}
@@ -3133,7 +3133,8 @@ hashagg_reset_spill_state(AggState *aggstate)
/* free batches */
foreach(lc, aggstate->hash_batches)
{
- HashAggBatch *batch = (HashAggBatch*) lfirst(lc);
+ HashAggBatch *batch = (HashAggBatch *) lfirst(lc);
+
pfree(batch);
}
list_free(aggstate->hash_batches);
@@ -3142,7 +3143,7 @@ hashagg_reset_spill_state(AggState *aggstate)
/* close tape set */
if (aggstate->hash_tapeinfo != NULL)
{
- HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
LogicalTapeSetClose(tapeinfo->tapeset);
pfree(tapeinfo->freetapes);
@@ -3558,22 +3559,22 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/
if (use_hashing)
{
- Plan *outerplan = outerPlan(node);
- uint64 totalGroups = 0;
- int i;
+ Plan *outerplan = outerPlan(node);
+ uint64 totalGroups = 0;
+ int i;
- aggstate->hash_metacxt = AllocSetContextCreate(
- aggstate->ss.ps.state->es_query_cxt,
- "HashAgg meta context",
- ALLOCSET_DEFAULT_SIZES);
- aggstate->hash_spill_slot = ExecInitExtraTupleSlot(
- estate, scanDesc, &TTSOpsMinimalTuple);
+ aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
+ "HashAgg meta context",
+ ALLOCSET_DEFAULT_SIZES);
+ aggstate->hash_spill_slot = ExecInitExtraTupleSlot(estate, scanDesc,
+ &TTSOpsMinimalTuple);
/* this is an array of pointers, not structures */
aggstate->hash_pergroup = pergroups;
- aggstate->hashentrysize = hash_agg_entry_size(
- aggstate->numtrans, outerplan->plan_width, node->transitionSpace);
+ aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
+ outerplan->plan_width,
+ node->transitionSpace);
/*
* Consider all of the grouping sets together when setting the limits
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 726d3a2d9a4..5a5c410106a 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -791,8 +791,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
ExecInitQual(node->bitmapqualorig, (PlanState *) scanstate);
/*
- * Maximum number of prefetches for the tablespace if configured, otherwise
- * the current value of the effective_io_concurrency GUC.
+ * Maximum number of prefetches for the tablespace if configured,
+ * otherwise the current value of the effective_io_concurrency GUC.
*/
scanstate->prefetch_maximum =
get_tablespace_io_concurrency(currentRelation->rd_rel->reltablespace);
diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c
index 7af6a12a1e3..e056469448c 100644
--- a/src/backend/executor/nodeIncrementalSort.c
+++ b/src/backend/executor/nodeIncrementalSort.c
@@ -97,17 +97,24 @@
* - groupName: the token fullsort or prefixsort
*/
#define INSTRUMENT_SORT_GROUP(node, groupName) \
- if (node->ss.ps.instrument != NULL) \
- { \
- if (node->shared_info && node->am_worker) \
+ do { \
+ if ((node)->ss.ps.instrument != NULL) \
{ \
- Assert(IsParallelWorker()); \
- Assert(ParallelWorkerNumber <= node->shared_info->num_workers); \
- instrumentSortedGroup(&node->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, node->groupName##_state); \
- } else { \
- instrumentSortedGroup(&node->incsort_info.groupName##GroupInfo, node->groupName##_state); \
+ if ((node)->shared_info && (node)->am_worker) \
+ { \
+ Assert(IsParallelWorker()); \
+ Assert(ParallelWorkerNumber <= (node)->shared_info->num_workers); \
+ instrumentSortedGroup(&(node)->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, \
+ (node)->groupName##_state); \
+ } \
+ else \
+ { \
+ instrumentSortedGroup(&(node)->incsort_info.groupName##GroupInfo, \
+ (node)->groupName##_state); \
+ } \
} \
- }
+ } while (0)
+
/* ----------------------------------------------------------------
* instrumentSortedGroup
@@ -122,6 +129,7 @@ instrumentSortedGroup(IncrementalSortGroupInfo *groupInfo,
Tuplesortstate *sortState)
{
TuplesortInstrumentation sort_instr;
+
groupInfo->groupCount++;
tuplesort_get_stats(sortState, &sort_instr);
@@ -444,7 +452,7 @@ switchToPresortedPrefixMode(PlanState *pstate)
SO1_printf("Sorting presorted prefix tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
- INSTRUMENT_SORT_GROUP(node, prefixsort)
+ INSTRUMENT_SORT_GROUP(node, prefixsort);
if (node->bounded)
{
@@ -702,7 +710,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting fullsort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (final tuple)\n");
node->execution_status = INCSORT_READFULLSORT;
@@ -783,7 +791,7 @@ ExecIncrementalSort(PlanState *pstate)
nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (found end of group)\n");
node->execution_status = INCSORT_READFULLSORT;
@@ -792,8 +800,8 @@ ExecIncrementalSort(PlanState *pstate)
}
/*
- * Unless we've already transitioned modes to reading from the full
- * sort state, then we assume that having read at least
+ * Unless we've already transitioned modes to reading from the
+ * full sort state, then we assume that having read at least
* DEFAULT_MAX_FULL_SORT_GROUP_SIZE tuples means it's likely we're
* processing a large group of tuples all having equal prefix keys
* (but haven't yet found the final tuple in that prefix key
@@ -823,7 +831,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting fullsort tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
/*
* If the full sort tuplesort happened to switch into top-n
@@ -849,8 +857,9 @@ ExecIncrementalSort(PlanState *pstate)
/*
* We might have multiple prefix key groups in the full sort
- * state, so the mode transition function needs to know that it
- * needs to move from the fullsort to presorted prefix sort.
+ * state, so the mode transition function needs to know that
+ * it needs to move from the fullsort to presorted prefix
+ * sort.
*/
node->n_fullsort_remaining = nTuples;
@@ -936,7 +945,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting presorted prefix tuplesort with >= %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
- INSTRUMENT_SORT_GROUP(node, prefixsort)
+ INSTRUMENT_SORT_GROUP(node, prefixsort);
SO_printf("Setting execution_status to INCSORT_READPREFIXSORT (found end of group)\n");
node->execution_status = INCSORT_READPREFIXSORT;
@@ -986,9 +995,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
SO_printf("ExecInitIncrementalSort: initializing sort node\n");
/*
- * Incremental sort can't be used with EXEC_FLAG_BACKWARD or EXEC_FLAG_MARK,
- * because the current sort state contains only one sort batch rather than
- * the full result set.
+ * Incremental sort can't be used with EXEC_FLAG_BACKWARD or
+ * EXEC_FLAG_MARK, because the current sort state contains only one sort
+ * batch rather than the full result set.
*/
Assert((eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)) == 0);
@@ -1041,8 +1050,8 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
* Initialize child nodes.
*
* Incremental sort does not support backwards scans and mark/restore, so
- * we don't bother removing the flags from eflags here. We allow passing
- * a REWIND flag, because although incremental sort can't use it, the child
+ * we don't bother removing the flags from eflags here. We allow passing a
+ * REWIND flag, because although incremental sort can't use it, the child
* nodes may be able to do something more useful.
*/
outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags);
@@ -1128,10 +1137,10 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
* re-execute the sort along with the child node. Incremental sort itself
* can't do anything smarter, but maybe the child nodes can.
*
- * In theory if we've only filled the full sort with one batch (and haven't
- * reset it for a new batch yet) then we could efficiently rewind, but
- * that seems a narrow enough case that it's not worth handling specially
- * at this time.
+ * In theory if we've only filled the full sort with one batch (and
+ * haven't reset it for a new batch yet) then we could efficiently rewind,
+ * but that seems a narrow enough case that it's not worth handling
+ * specially at this time.
*/
/* must drop pointer to sort result tuple */
@@ -1152,10 +1161,10 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
/*
* If we've set up either of the sort states yet, we need to reset them.
* We could end them and null out the pointers, but there's no reason to
- * repay the setup cost, and because ExecIncrementalSort guards
- * presorted column functions by checking to see if the full sort state
- * has been initialized yet, setting the sort states to null here might
- * actually cause a leak.
+ * repay the setup cost, and because ExecIncrementalSort guards presorted
+ * column functions by checking to see if the full sort state has been
+ * initialized yet, setting the sort states to null here might actually
+ * cause a leak.
*/
if (node->fullsort_state != NULL)
{
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index b53a6bbe1d6..8049fdc64ea 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -144,7 +144,7 @@ TidListEval(TidScanState *tidstate)
if (tidstate->ss.ss_currentScanDesc == NULL)
tidstate->ss.ss_currentScanDesc =
table_beginscan_tid(tidstate->ss.ss_currentRelation,
- tidstate->ss.ps.state->es_snapshot);
+ tidstate->ss.ps.state->es_snapshot);
scan = tidstate->ss.ss_currentScanDesc;
/*