aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/transam/parallel.c19
-rw-r--r--src/backend/executor/nodeHashjoin.c9
-rw-r--r--src/backend/optimizer/plan/planner.c6
3 files changed, 23 insertions, 11 deletions
diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c
index 4a2e352d579..a10bf02ccff 100644
--- a/src/backend/access/transam/parallel.c
+++ b/src/backend/access/transam/parallel.c
@@ -231,6 +231,15 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
+ * If we manage to reach here while non-interruptible, it's unsafe to
+ * launch any workers: we would fail to process interrupts sent by them.
+ * We can deal with that edge case by pretending no workers were
+ * requested.
+ */
+ if (!INTERRUPTS_CAN_BE_PROCESSED())
+ pcxt->nworkers = 0;
+
+ /*
* Normally, the user will have requested at least one worker process, but
* if by chance they have not, we can skip a bunch of things here.
*/
@@ -476,6 +485,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
shm_toc_insert(pcxt->toc, PARALLEL_KEY_ENTRYPOINT, entrypointstate);
}
+ /* Update nworkers_to_launch, in case we changed nworkers above. */
+ pcxt->nworkers_to_launch = pcxt->nworkers;
+
/* Restore previous memory context. */
MemoryContextSwitchTo(oldcontext);
}
@@ -539,10 +551,11 @@ ReinitializeParallelWorkers(ParallelContext *pcxt, int nworkers_to_launch)
{
/*
* The number of workers that need to be launched must be less than the
- * number of workers with which the parallel context is initialized.
+ * number of workers with which the parallel context is initialized. But
+ * the caller might not know that InitializeParallelDSM reduced nworkers,
+ * so just silently trim the request.
*/
- Assert(pcxt->nworkers >= nworkers_to_launch);
- pcxt->nworkers_to_launch = nworkers_to_launch;
+ pcxt->nworkers_to_launch = Min(pcxt->nworkers, nworkers_to_launch);
}
/*
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 2f7170604d6..6c3009fba0f 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -1713,8 +1713,13 @@ void
ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
{
int plan_node_id = state->js.ps.plan->plan_node_id;
- ParallelHashJoinState *pstate =
- shm_toc_lookup(pcxt->toc, plan_node_id, false);
+ ParallelHashJoinState *pstate;
+
+ /* Nothing to do if we failed to create a DSM segment. */
+ if (pcxt->seg == NULL)
+ return;
+
+ pstate = shm_toc_lookup(pcxt->toc, plan_node_id, false);
/*
* It would be possible to reuse the shared hash table in single-batch
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 3e3f0d486a2..1f78dc3d530 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -342,11 +342,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
* we want to allow parallel inserts in general; updates and deletes have
* additional problems especially around combo CIDs.)
*
- * We don't try to use parallel mode unless interruptible. The leader
- * expects ProcessInterrupts() calls to reach HandleParallelMessages().
- * Even if we called HandleParallelMessages() another way, starting a
- * parallel worker is too delay-prone to be prudent when uncancellable.
- *
* For now, we don't try to use parallel mode if we're running inside a
* parallel worker. We might eventually be able to relax this
* restriction, but for now it seems best not to have parallel workers
@@ -357,7 +352,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
parse->commandType == CMD_SELECT &&
!parse->hasModifyingCTE &&
max_parallel_workers_per_gather > 0 &&
- INTERRUPTS_CAN_BE_PROCESSED() &&
!IsParallelWorker())
{
/* all the cheap tests pass, so scan the query tree */