aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeGatherMerge.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor/nodeGatherMerge.c')
-rw-r--r--src/backend/executor/nodeGatherMerge.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c
index f50841699c4..0bd5da38b4a 100644
--- a/src/backend/executor/nodeGatherMerge.c
+++ b/src/backend/executor/nodeGatherMerge.c
@@ -187,12 +187,15 @@ ExecGatherMerge(PlanState *pstate)
{
ParallelContext *pcxt;
- /* Initialize data structures for workers. */
+ /* Initialize, or re-initialize, shared state needed by workers. */
if (!node->pei)
node->pei = ExecInitParallelPlan(node->ps.lefttree,
estate,
gm->num_workers,
node->tuples_needed);
+ else
+ ExecParallelReinitialize(node->ps.lefttree,
+ node->pei);
/* Try to launch workers. */
pcxt = node->pei->pcxt;
@@ -321,7 +324,7 @@ ExecShutdownGatherMergeWorkers(GatherMergeState *node)
/* ----------------------------------------------------------------
* ExecReScanGatherMerge
*
- * Re-initialize the workers and rescans a relation via them.
+ * Prepare to re-scan the result of a GatherMerge.
* ----------------------------------------------------------------
*/
void
@@ -330,20 +333,13 @@ ExecReScanGatherMerge(GatherMergeState *node)
GatherMerge *gm = (GatherMerge *) node->ps.plan;
PlanState *outerPlan = outerPlanState(node);
- /*
- * Re-initialize the parallel workers to perform rescan of relation. We
- * want to gracefully shutdown all the workers so that they should be able
- * to propagate any error or other information to master backend before
- * dying. Parallel context will be reused for rescan.
- */
+ /* Make sure any existing workers are gracefully shut down */
ExecShutdownGatherMergeWorkers(node);
+ /* Mark node so that shared state will be rebuilt at next call */
node->initialized = false;
node->gm_initialized = false;
- if (node->pei)
- ExecParallelReinitialize(node->pei);
-
/*
* Set child node's chgParam to tell it that the next scan might deliver a
* different set of rows within the leader process. (The overall rowset
@@ -355,10 +351,15 @@ ExecReScanGatherMerge(GatherMergeState *node)
outerPlan->chgParam = bms_add_member(outerPlan->chgParam,
gm->rescan_param);
-
/*
- * if chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode.
+ * If chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode. Note: because this does nothing if we have a
+ * rescan_param, it's currently guaranteed that parallel-aware child nodes
+ * will not see a ReScan call until after they get a ReInitializeDSM call.
+ * That ordering might not be something to rely on, though. A good rule
+ * of thumb is that ReInitializeDSM should reset only shared state, ReScan
+ * should reset only local state, and anything that depends on both of
+ * those steps being finished must wait until the first ExecProcNode call.
*/
if (outerPlan->chgParam == NULL)
ExecReScan(outerPlan);