aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2016-06-09 18:02:36 -0400
committerRobert Haas <rhaas@postgresql.org>2016-06-09 18:02:36 -0400
commit4bc424b968058c7f0aa685821d7039e86faac99c (patch)
treea4e245ae67bd11edb3926ff5fb3b0223438ac283 /src/backend/executor
parent9164deea2f4ac90ee5e008ff41fc5ad4423887b2 (diff)
downloadpostgresql-4bc424b968058c7f0aa685821d7039e86faac99c.tar.gz
postgresql-4bc424b968058c7f0aa685821d7039e86faac99c.zip
pgindent run for 9.6
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execAmi.c7
-rw-r--r--src/backend/executor/execIndexing.c2
-rw-r--r--src/backend/executor/execMain.c16
-rw-r--r--src/backend/executor/execParallel.c46
-rw-r--r--src/backend/executor/functions.c4
-rw-r--r--src/backend/executor/nodeAgg.c60
-rw-r--r--src/backend/executor/nodeForeignscan.c4
-rw-r--r--src/backend/executor/nodeGather.c26
-rw-r--r--src/backend/executor/nodeModifyTable.c10
-rw-r--r--src/backend/executor/nodeSeqscan.c14
-rw-r--r--src/backend/executor/nodeWindowAgg.c4
-rw-r--r--src/backend/executor/tqueue.c32
12 files changed, 113 insertions, 112 deletions
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 0c8e9399052..4a978adea71 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node)
return false;
/*
- * Parallel-aware nodes return a subset of the tuples in each worker,
- * and in general we can't expect to have enough bookkeeping state to
- * know which ones we returned in this worker as opposed to some other
- * worker.
+ * Parallel-aware nodes return a subset of the tuples in each worker, and
+ * in general we can't expect to have enough bookkeeping state to know
+ * which ones we returned in this worker as opposed to some other worker.
*/
if (node->parallel_aware)
return false;
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index a2eeeb6f6cd..c819d19db42 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -725,7 +725,7 @@ retry:
{
TransactionId xwait;
ItemPointerData ctid_wait;
- XLTW_Oper reason_wait;
+ XLTW_Oper reason_wait;
Datum existing_values[INDEX_MAX_KEYS];
bool existing_isnull[INDEX_MAX_KEYS];
char *error_new;
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index b5ced388d20..32bb3f92054 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1851,25 +1851,25 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
if (wco->polname != NULL)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
- wco->polname, wco->relname)));
+ errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
+ wco->polname, wco->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row-level security policy for table \"%s\"",
- wco->relname)));
+ errmsg("new row violates row-level security policy for table \"%s\"",
+ wco->relname)));
break;
case WCO_RLS_CONFLICT_CHECK:
if (wco->polname != NULL)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
- wco->polname, wco->relname)));
+ errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
+ wco->polname, wco->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
- wco->relname)));
+ errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
+ wco->relname)));
break;
default:
elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index f03cd9b07b3..6de90705e48 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -83,7 +83,7 @@ struct SharedExecutorInstrumentation
typedef struct ExecParallelEstimateContext
{
ParallelContext *pcxt;
- int nnodes;
+ int nnodes;
} ExecParallelEstimateContext;
/* Context object for ExecParallelInitializeDSM. */
@@ -91,7 +91,7 @@ typedef struct ExecParallelInitializeDSMContext
{
ParallelContext *pcxt;
SharedExecutorInstrumentation *instrumentation;
- int nnodes;
+ int nnodes;
} ExecParallelInitializeDSMContext;
/* Helper functions that run in the parallel leader. */
@@ -99,11 +99,11 @@ static char *ExecSerializePlan(Plan *plan, EState *estate);
static bool ExecParallelEstimate(PlanState *node,
ExecParallelEstimateContext *e);
static bool ExecParallelInitializeDSM(PlanState *node,
- ExecParallelInitializeDSMContext *d);
+ ExecParallelInitializeDSMContext *d);
static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
bool reinitialize);
static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
- SharedExecutorInstrumentation *instrumentation);
+ SharedExecutorInstrumentation *instrumentation);
/* Helper functions that run in the parallel worker. */
static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
@@ -387,12 +387,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
/* Estimate space for tuple queues. */
shm_toc_estimate_chunk(&pcxt->estimator,
- mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
+ mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
- * Give parallel-aware nodes a chance to add to the estimates, and get
- * a count of how many PlanState nodes there are.
+ * Give parallel-aware nodes a chance to add to the estimates, and get a
+ * count of how many PlanState nodes there are.
*/
e.pcxt = pcxt;
e.nnodes = 0;
@@ -444,14 +444,14 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
/*
- * If instrumentation options were supplied, allocate space for the
- * data. It only gets partially initialized here; the rest happens
- * during ExecParallelInitializeDSM.
+ * If instrumentation options were supplied, allocate space for the data.
+ * It only gets partially initialized here; the rest happens during
+ * ExecParallelInitializeDSM.
*/
if (estate->es_instrument)
{
Instrumentation *instrument;
- int i;
+ int i;
instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
instrumentation->instrument_options = estate->es_instrument;
@@ -493,13 +493,13 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
*/
static bool
ExecParallelRetrieveInstrumentation(PlanState *planstate,
- SharedExecutorInstrumentation *instrumentation)
+ SharedExecutorInstrumentation *instrumentation)
{
Instrumentation *instrument;
- int i;
- int n;
- int ibytes;
- int plan_node_id = planstate->plan->plan_node_id;
+ int i;
+ int n;
+ int ibytes;
+ int plan_node_id = planstate->plan->plan_node_id;
/* Find the instumentation for this node. */
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
@@ -532,7 +532,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
void
ExecParallelFinish(ParallelExecutorInfo *pei)
{
- int i;
+ int i;
if (pei->finished)
return;
@@ -626,19 +626,19 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
*/
static bool
ExecParallelReportInstrumentation(PlanState *planstate,
- SharedExecutorInstrumentation *instrumentation)
+ SharedExecutorInstrumentation *instrumentation)
{
- int i;
- int plan_node_id = planstate->plan->plan_node_id;
+ int i;
+ int plan_node_id = planstate->plan->plan_node_id;
Instrumentation *instrument;
InstrEndLoop(planstate->instrument);
/*
* If we shuffled the plan_node_id values in ps_instrument into sorted
- * order, we could use binary search here. This might matter someday
- * if we're pushing down sufficiently large plan trees. For now, do it
- * the slow, dumb way.
+ * order, we could use binary search here. This might matter someday if
+ * we're pushing down sufficiently large plan trees. For now, do it the
+ * slow, dumb way.
*/
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id)
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index cd93c045dcb..e02fba52329 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -497,8 +497,8 @@ init_execution_state(List *queryTree_list,
stmt = queryTree->utilityStmt;
else
stmt = (Node *) pg_plan_query(queryTree,
- fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
- NULL);
+ fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
+ NULL);
/* Precheck all commands for validity in a function */
if (IsA(stmt, TransactionStmt))
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 0c1e4a3cb6e..c3a04ef7daa 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -491,9 +491,9 @@ static void finalize_aggregate(AggState *aggstate,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
static void finalize_partialaggregate(AggState *aggstate,
- AggStatePerAgg peragg,
- AggStatePerGroup pergroupstate,
- Datum *resultVal, bool *resultIsNull);
+ AggStatePerAgg peragg,
+ AggStatePerGroup pergroupstate,
+ Datum *resultVal, bool *resultIsNull);
static void prepare_projection_slot(AggState *aggstate,
TupleTableSlot *slot,
int currentSet);
@@ -981,17 +981,18 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
if (OidIsValid(pertrans->deserialfn_oid))
{
/*
- * Don't call a strict deserialization function with NULL input.
- * A strict deserialization function and a null value means we skip
- * calling the combine function for this state. We assume that this
- * would be a waste of time and effort anyway so just skip it.
+ * Don't call a strict deserialization function with NULL input. A
+ * strict deserialization function and a null value means we skip
+ * calling the combine function for this state. We assume that
+ * this would be a waste of time and effort anyway so just skip
+ * it.
*/
if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0])
continue;
else
{
- FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
- MemoryContext oldContext;
+ FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
+ MemoryContext oldContext;
dsinfo->arg[0] = slot->tts_values[0];
dsinfo->argnull[0] = slot->tts_isnull[0];
@@ -1423,14 +1424,14 @@ finalize_partialaggregate(AggState *aggstate,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull)
{
- AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
- MemoryContext oldContext;
+ AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
+ MemoryContext oldContext;
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
/*
- * serialfn_oid will be set if we must serialize the input state
- * before calling the combine function on the state.
+ * serialfn_oid will be set if we must serialize the input state before
+ * calling the combine function on the state.
*/
if (OidIsValid(pertrans->serialfn_oid))
{
@@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate,
else
{
FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo;
+
fcinfo->arg[0] = pergroupstate->transValue;
fcinfo->argnull[0] = pergroupstate->transValueIsNull;
@@ -1459,7 +1461,7 @@ finalize_partialaggregate(AggState *aggstate,
/* If result is pass-by-ref, make sure it is in the right context. */
if (!peragg->resulttypeByVal && !*resultIsNull &&
!MemoryContextContains(CurrentMemoryContext,
- DatumGetPointer(*resultVal)))
+ DatumGetPointer(*resultVal)))
*resultVal = datumCopy(*resultVal,
peragg->resulttypeByVal,
peragg->resulttypeLen);
@@ -2627,21 +2629,21 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*
* 1. An aggregate function appears more than once in query:
*
- * SELECT SUM(x) FROM ... HAVING SUM(x) > 0
+ * SELECT SUM(x) FROM ... HAVING SUM(x) > 0
*
- * Since the aggregates are the identical, we only need to calculate
- * the calculate it once. Both aggregates will share the same 'aggno'
- * value.
+ * Since the aggregates are the identical, we only need to calculate
+ * the calculate it once. Both aggregates will share the same 'aggno'
+ * value.
*
* 2. Two different aggregate functions appear in the query, but the
- * aggregates have the same transition function and initial value, but
- * different final function:
+ * aggregates have the same transition function and initial value, but
+ * different final function:
*
- * SELECT SUM(x), AVG(x) FROM ...
+ * SELECT SUM(x), AVG(x) FROM ...
*
- * In this case we must create a new peragg for the varying aggregate,
- * and need to call the final functions separately, but can share the
- * same transition state.
+ * In this case we must create a new peragg for the varying aggregate,
+ * and need to call the final functions separately, but can share the
+ * same transition state.
*
* For either of these optimizations to be valid, the aggregate's
* arguments must be the same, including any modifiers such as ORDER BY,
@@ -2889,8 +2891,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/
existing_transno = find_compatible_pertrans(aggstate, aggref,
transfn_oid, aggtranstype,
- serialfn_oid, deserialfn_oid,
- initValue, initValueIsNull,
+ serialfn_oid, deserialfn_oid,
+ initValue, initValueIsNull,
same_input_transnos);
if (existing_transno != -1)
{
@@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg,
/*
* The serialization and deserialization functions must match, if
* present, as we're unable to share the trans state for aggregates
- * which will serialize or deserialize into different formats. Remember
- * that these will be InvalidOid if they're not required for this agg
- * node.
+ * which will serialize or deserialize into different formats.
+ * Remember that these will be InvalidOid if they're not required for
+ * this agg node.
*/
if (aggserialfn != pertrans->serialfn_oid ||
aggdeserialfn != pertrans->deserialfn_oid)
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index 300f947d431..d886aaf64d6 100644
--- a/src/backend/executor/nodeForeignscan.c
+++ b/src/backend/executor/nodeForeignscan.c
@@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node)
/*
* If chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. outerPlan may also be NULL, in which case there
- * is nothing to rescan at all.
+ * first ExecProcNode. outerPlan may also be NULL, in which case there is
+ * nothing to rescan at all.
*/
if (outerPlan != NULL && outerPlan->chgParam == NULL)
ExecReScan(outerPlan);
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c
index 3834ed678cb..313b2344540 100644
--- a/src/backend/executor/nodeGather.c
+++ b/src/backend/executor/nodeGather.c
@@ -138,8 +138,8 @@ ExecGather(GatherState *node)
/*
* Initialize the parallel context and workers on first execution. We do
* this on first execution rather than during node initialization, as it
- * needs to allocate large dynamic segment, so it is better to do if it
- * is really needed.
+ * needs to allocate large dynamic segment, so it is better to do if it is
+ * really needed.
*/
if (!node->initialized)
{
@@ -147,8 +147,8 @@ ExecGather(GatherState *node)
Gather *gather = (Gather *) node->ps.plan;
/*
- * Sometimes we might have to run without parallelism; but if
- * parallel mode is active then we can try to fire up some workers.
+ * Sometimes we might have to run without parallelism; but if parallel
+ * mode is active then we can try to fire up some workers.
*/
if (gather->num_workers > 0 && IsInParallelMode())
{
@@ -186,7 +186,7 @@ ExecGather(GatherState *node)
}
else
{
- /* No workers? Then never mind. */
+ /* No workers? Then never mind. */
ExecShutdownGatherWorkers(node);
}
}
@@ -314,7 +314,7 @@ gather_getnext(GatherState *gatherstate)
static HeapTuple
gather_readnext(GatherState *gatherstate)
{
- int waitpos = gatherstate->nextreader;
+ int waitpos = gatherstate->nextreader;
for (;;)
{
@@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate)
tup = TupleQueueReaderNext(reader, true, &readerdone);
/*
- * If this reader is done, remove it. If all readers are done,
- * clean up remaining worker state.
+ * If this reader is done, remove it. If all readers are done, clean
+ * up remaining worker state.
*/
if (readerdone)
{
@@ -402,7 +402,7 @@ ExecShutdownGatherWorkers(GatherState *node)
/* Shut down tuple queue readers before shutting down workers. */
if (node->reader != NULL)
{
- int i;
+ int i;
for (i = 0; i < node->nreaders; ++i)
DestroyTupleQueueReader(node->reader[i]);
@@ -452,10 +452,10 @@ void
ExecReScanGather(GatherState *node)
{
/*
- * Re-initialize the parallel workers to perform rescan of relation.
- * We want to gracefully shutdown all the workers so that they
- * should be able to propagate any error or other information to master
- * backend before dying. Parallel context will be reused for rescan.
+ * Re-initialize the parallel workers to perform rescan of relation. We
+ * want to gracefully shutdown all the workers so that they should be able
+ * to propagate any error or other information to master backend before
+ * dying. Parallel context will be reused for rescan.
*/
ExecShutdownGatherWorkers(node);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index e62c8aad657..af7b26c0ef0 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/*
* Note that it is possible that the target tuple has been modified in
* this session, after the above heap_lock_tuple. We choose to not error
- * out in that case, in line with ExecUpdate's treatment of similar
- * cases. This can happen if an UPDATE is triggered from within
- * ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by
- * selecting from a wCTE in the ON CONFLICT's SET.
+ * out in that case, in line with ExecUpdate's treatment of similar cases.
+ * This can happen if an UPDATE is triggered from within ExecQual(),
+ * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
+ * wCTE in the ON CONFLICT's SET.
*/
/* Execute UPDATE with projection */
@@ -1595,7 +1595,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/* Initialize the usesFdwDirectModify flag */
resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
- node->fdwDirectModifyPlans);
+ node->fdwDirectModifyPlans);
/*
* Verify result relation is a valid target for the current operation
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index f12921d1889..00bf3a58b1a 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -65,8 +65,8 @@ SeqNext(SeqScanState *node)
if (scandesc == NULL)
{
/*
- * We reach here if the scan is not parallel, or if we're executing
- * a scan that was intended to be parallel serially.
+ * We reach here if the scan is not parallel, or if we're executing a
+ * scan that was intended to be parallel serially.
*/
scandesc = heap_beginscan(node->ss.ss_currentRelation,
estate->es_snapshot,
@@ -145,7 +145,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags)
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
- ((SeqScan *) node->ss.ps.plan)->scanrelid,
+ ((SeqScan *) node->ss.ps.plan)->scanrelid,
eflags);
node->ss.ss_currentRelation = currentRelation;
@@ -277,8 +277,8 @@ ExecReScanSeqScan(SeqScanState *node)
scan = node->ss.ss_currentScanDesc;
if (scan != NULL)
- heap_rescan(scan, /* scan desc */
- NULL); /* new scan keys */
+ heap_rescan(scan, /* scan desc */
+ NULL); /* new scan keys */
ExecScanReScan((ScanState *) node);
}
@@ -316,7 +316,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
ParallelContext *pcxt)
{
EState *estate = node->ss.ps.state;
- ParallelHeapScanDesc pscan;
+ ParallelHeapScanDesc pscan;
pscan = shm_toc_allocate(pcxt->toc, node->pscan_len);
heap_parallelscan_initialize(pscan,
@@ -336,7 +336,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
void
ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc)
{
- ParallelHeapScanDesc pscan;
+ ParallelHeapScanDesc pscan;
pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
node->ss.ss_currentScanDesc =
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index f06eebee0cd..d4c88a1f0ef 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -2220,8 +2220,8 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
/* build expression trees using actual argument & result types */
build_aggregate_transfn_expr(inputTypes,
numArguments,
- 0, /* no ordered-set window functions yet */
- false, /* no variadic window functions yet */
+ 0, /* no ordered-set window functions yet */
+ false, /* no variadic window functions yet */
wfunc->wintype,
wfunc->inputcollid,
transfn_oid,
diff --git a/src/backend/executor/tqueue.c b/src/backend/executor/tqueue.c
index 8abb1f16e45..a729372c740 100644
--- a/src/backend/executor/tqueue.c
+++ b/src/backend/executor/tqueue.c
@@ -44,13 +44,13 @@ typedef enum
TQUEUE_REMAP_ARRAY, /* array */
TQUEUE_REMAP_RANGE, /* range */
TQUEUE_REMAP_RECORD /* composite type, named or anonymous */
-} RemapClass;
+} RemapClass;
typedef struct
{
int natts;
RemapClass mapping[FLEXIBLE_ARRAY_MEMBER];
-} RemapInfo;
+} RemapInfo;
typedef struct
{
@@ -61,13 +61,13 @@ typedef struct
char mode;
TupleDesc tupledesc;
RemapInfo *remapinfo;
-} TQueueDestReceiver;
+} TQueueDestReceiver;
typedef struct RecordTypemodMap
{
int remotetypmod;
int localtypmod;
-} RecordTypemodMap;
+} RecordTypemodMap;
struct TupleQueueReader
{
@@ -81,19 +81,19 @@ struct TupleQueueReader
#define TUPLE_QUEUE_MODE_CONTROL 'c'
#define TUPLE_QUEUE_MODE_DATA 'd'
-static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype,
+static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype,
Datum value);
-static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value);
-static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value);
-static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value);
-static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
+static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value);
+static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value);
+static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value);
+static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc);
static void TupleQueueHandleControlMessage(TupleQueueReader *reader,
Size nbytes, char *data);
static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader,
Size nbytes, HeapTupleHeader data);
static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader,
- TupleDesc tupledesc, RemapInfo * remapinfo,
+ TupleDesc tupledesc, RemapInfo *remapinfo,
HeapTuple tuple);
static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass,
Datum value);
@@ -212,7 +212,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self)
* Invoke the appropriate walker function based on the given RemapClass.
*/
static void
-tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
+tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value)
{
check_stack_depth();
@@ -237,7 +237,7 @@ tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
* contained therein.
*/
static void
-tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
+tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value)
{
HeapTupleHeader tup;
Oid typeid;
@@ -304,7 +304,7 @@ tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
* contained therein.
*/
static void
-tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
+tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value)
{
ArrayType *arr = DatumGetArrayTypeP(value);
Oid typeid = ARR_ELEMTYPE(arr);
@@ -342,7 +342,7 @@ tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
* contained therein.
*/
static void
-tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
+tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value)
{
RangeType *range = DatumGetRangeType(value);
Oid typeid = RangeTypeGetOid(range);
@@ -386,7 +386,7 @@ tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
* already done so previously.
*/
static void
-tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
+tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc)
{
StringInfoData buf;
@@ -613,7 +613,7 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader,
*/
static HeapTuple
TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc,
- RemapInfo * remapinfo, HeapTuple tuple)
+ RemapInfo *remapinfo, HeapTuple tuple)
{
Datum *values;
bool *isnull;