aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/execMain.c
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2015-10-16 11:56:02 -0400
committerRobert Haas <rhaas@postgresql.org>2015-10-16 11:56:02 -0400
commitbfc78d7196eb28cd4e3d6c24f7e607bacecf1129 (patch)
treee13e9937fcfc2666def5dfad1aa8cdd6038fdd78 /src/backend/executor/execMain.c
parent816e336f12ecabdc834d4cc31bcf966b2dd323dc (diff)
downloadpostgresql-bfc78d7196eb28cd4e3d6c24f7e607bacecf1129.tar.gz
postgresql-bfc78d7196eb28cd4e3d6c24f7e607bacecf1129.zip
Rewrite interaction of parallel mode with parallel executor support.
In the previous coding, before returning from ExecutorRun, we'd shut down all parallel workers. This was dead wrong if ExecutorRun was called with a non-zero tuple count; it had the effect of truncating the query output. To fix, give ExecutePlan control over whether to enter parallel mode, and have it refuse to do so if the tuple count is non-zero. Rewrite the Gather logic so that it can cope with being called outside parallel mode. Commit 7aea8e4f2daa4b39ca9d1309a0c4aadb0f7ed81b is largely to blame for this problem, though this patch modifies some subsequently-committed code which relied on the guarantees it purported to make.
Diffstat (limited to 'src/backend/executor/execMain.c')
-rw-r--r--src/backend/executor/execMain.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 37b7bbd413b..a55022e0a80 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -76,6 +76,7 @@ static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
static void ExecPostprocessPlan(EState *estate);
static void ExecEndPlan(PlanState *planstate, EState *estate);
static void ExecutePlan(EState *estate, PlanState *planstate,
+ bool use_parallel_mode,
CmdType operation,
bool sendTuples,
long numberTuples,
@@ -243,11 +244,6 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
AfterTriggerBeginQuery();
- /* Enter parallel mode, if required by the query. */
- if (queryDesc->plannedstmt->parallelModeNeeded &&
- !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
- EnterParallelMode();
-
MemoryContextSwitchTo(oldcontext);
}
@@ -341,15 +337,13 @@ standard_ExecutorRun(QueryDesc *queryDesc,
if (!ScanDirectionIsNoMovement(direction))
ExecutePlan(estate,
queryDesc->planstate,
+ queryDesc->plannedstmt->parallelModeNeeded,
operation,
sendTuples,
count,
direction,
dest);
- /* Allow nodes to release or shut down resources. */
- (void) ExecShutdownNode(queryDesc->planstate);
-
/*
* shutdown tuple receiver, if we started it
*/
@@ -482,11 +476,6 @@ standard_ExecutorEnd(QueryDesc *queryDesc)
*/
MemoryContextSwitchTo(oldcontext);
- /* Exit parallel mode, if it was required by the query. */
- if (queryDesc->plannedstmt->parallelModeNeeded &&
- !(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY))
- ExitParallelMode();
-
/*
* Release EState and per-query memory context. This should release
* everything the executor has allocated.
@@ -1529,6 +1518,7 @@ ExecEndPlan(PlanState *planstate, EState *estate)
static void
ExecutePlan(EState *estate,
PlanState *planstate,
+ bool use_parallel_mode,
CmdType operation,
bool sendTuples,
long numberTuples,
@@ -1549,6 +1539,20 @@ ExecutePlan(EState *estate,
estate->es_direction = direction;
/*
+ * If a tuple count was supplied, we must force the plan to run without
+ * parallelism, because we might exit early.
+ */
+ if (numberTuples != 0)
+ use_parallel_mode = false;
+
+ /*
+ * If a tuple count was supplied, we must force the plan to run without
+ * parallelism, because we might exit early.
+ */
+ if (use_parallel_mode)
+ EnterParallelMode();
+
+ /*
* Loop until we've processed the proper number of tuples from the plan.
*/
for (;;)
@@ -1566,7 +1570,11 @@ ExecutePlan(EState *estate,
* process so we just end the loop...
*/
if (TupIsNull(slot))
+ {
+ /* Allow nodes to release or shut down resources. */
+ (void) ExecShutdownNode(planstate);
break;
+ }
/*
* If we have a junk filter, then project a new tuple with the junk
@@ -1603,6 +1611,9 @@ ExecutePlan(EState *estate,
if (numberTuples && numberTuples == current_tuple_count)
break;
}
+
+ if (use_parallel_mode)
+ ExitParallelMode();
}