diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 8ce95ab190..f8af9d3863 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -226,6 +226,15 @@ InitializeParallelDSM(ParallelContext *pcxt) shm_toc_estimate_chunk(&pcxt->estimator, sizeof(FixedParallelState)); shm_toc_estimate_keys(&pcxt->estimator, 1); + /* + * If we manage to reach here while non-interruptible, it's unsafe to + * launch any workers: we would fail to process interrupts sent by them. + * We can deal with that edge case by pretending no workers were + * requested. + */ + if (!INTERRUPTS_CAN_BE_PROCESSED()) + pcxt->nworkers = 0; + /* * Normally, the user will have requested at least one worker process, but * if by chance they have not, we can skip a bunch of things here. @@ -463,6 +472,9 @@ InitializeParallelDSM(ParallelContext *pcxt) shm_toc_insert(pcxt->toc, PARALLEL_KEY_ENTRYPOINT, entrypointstate); } + /* Update nworkers_to_launch, in case we changed nworkers above. */ + pcxt->nworkers_to_launch = pcxt->nworkers; + /* Restore previous memory context. */ MemoryContextSwitchTo(oldcontext); } @@ -526,10 +538,11 @@ ReinitializeParallelWorkers(ParallelContext *pcxt, int nworkers_to_launch) { /* * The number of workers that need to be launched must be less than the - * number of workers with which the parallel context is initialized. + * number of workers with which the parallel context is initialized. But + * the caller might not know that InitializeParallelDSM reduced nworkers, + * so just silently trim the request. */ - Assert(pcxt->nworkers >= nworkers_to_launch); - pcxt->nworkers_to_launch = nworkers_to_launch; + pcxt->nworkers_to_launch = Min(pcxt->nworkers, nworkers_to_launch); } /* diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index b2f1cf7fa7..43569fc58b 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -1524,8 +1524,13 @@ void ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *cxt) { int plan_node_id = state->js.ps.plan->plan_node_id; - ParallelHashJoinState *pstate = - shm_toc_lookup(cxt->toc, plan_node_id, false); + ParallelHashJoinState *pstate; + + /* Nothing to do if we failed to create a DSM segment. */ + if (cxt->seg == NULL) + return; + + pstate = shm_toc_lookup(cxt->toc, plan_node_id, false); /* * It would be possible to reuse the shared hash table in single-batch diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index ba91f0062c..4979f17a91 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -327,11 +327,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, * if we want to allow parallel inserts in general; updates and deletes * have additional problems especially around combo CIDs.) * - * We don't try to use parallel mode unless interruptible. The leader - * expects ProcessInterrupts() calls to reach HandleParallelMessages(). - * Even if we called HandleParallelMessages() another way, starting a - * parallel worker is too delay-prone to be prudent when uncancellable. - * * For now, we don't try to use parallel mode if we're running inside a * parallel worker. We might eventually be able to relax this * restriction, but for now it seems best not to have parallel workers @@ -342,7 +337,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, parse->commandType == CMD_SELECT && !parse->hasModifyingCTE && max_parallel_workers_per_gather > 0 && - INTERRUPTS_CAN_BE_PROCESSED() && !IsParallelWorker()) { /* all the cheap tests pass, so scan the query tree */