diff options
author | Thomas Munro <tmunro@postgresql.org> | 2021-03-17 18:24:45 +1300 |
---|---|---|
committer | Thomas Munro <tmunro@postgresql.org> | 2021-03-17 18:43:04 +1300 |
commit | 378802e3713c6c0fce31d2390c134cd5d7c30157 (patch) | |
tree | 450729ca825574a00ee8513fff49f987ea0a8e59 /src/backend/executor/nodeHash.c | |
parent | 3b8981b6e1a2aea0f18384c803e21e9391de669a (diff) | |
download | postgresql-378802e3713c6c0fce31d2390c134cd5d7c30157.tar.gz postgresql-378802e3713c6c0fce31d2390c134cd5d7c30157.zip |
Update the names of Parallel Hash Join phases.
Commit 3048898e dropped -ING from some wait event names that correspond
to barrier phases. Update the phases' names to match.
While we're here making cosmetic changes, also rename "DONE" to "FREE".
That pairs better with "ALLOCATE", and describes the activity that
actually happens in that phase (as we do for the other phases) rather
than describing a state. The distinction is clearer after bugfix commit
3b8981b6 split the phase into two. As for the growth barriers, rename
their "ALLOCATE" phase to "REALLOCATE", which is probably a better
description of what happens then. Also improve the comments about
the phases a bit.
Discussion: https://postgr.es/m/CA%2BhUKG%2BMDpwF2Eo2LAvzd%3DpOh81wUTsrwU1uAwR-v6OGBB6%2B7g%40mail.gmail.com
Diffstat (limited to 'src/backend/executor/nodeHash.c')
-rw-r--r-- | src/backend/executor/nodeHash.c | 72 |
1 files changed, 36 insertions, 36 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index c41c86ab51e..98db9c2fb0d 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -246,10 +246,10 @@ MultiExecParallelHash(HashState *node) */ pstate = hashtable->parallel_state; build_barrier = &pstate->build_barrier; - Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATING); + Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE); switch (BarrierPhase(build_barrier)) { - case PHJ_BUILD_ALLOCATING: + case PHJ_BUILD_ALLOCATE: /* * Either I just allocated the initial hash table in @@ -259,7 +259,7 @@ MultiExecParallelHash(HashState *node) BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE); /* Fall through. */ - case PHJ_BUILD_HASHING_INNER: + case PHJ_BUILD_HASH_INNER: /* * It's time to begin hashing, or if we just arrived here then @@ -271,10 +271,10 @@ MultiExecParallelHash(HashState *node) * below. */ if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) != - PHJ_GROW_BATCHES_ELECTING) + PHJ_GROW_BATCHES_ELECT) ExecParallelHashIncreaseNumBatches(hashtable); if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) != - PHJ_GROW_BUCKETS_ELECTING) + PHJ_GROW_BUCKETS_ELECT) ExecParallelHashIncreaseNumBuckets(hashtable); ExecParallelHashEnsureBatchAccessors(hashtable); ExecParallelHashTableSetCurrentBatch(hashtable, 0); @@ -338,17 +338,17 @@ MultiExecParallelHash(HashState *node) * Unless we're completely done and the batch state has been freed, make * sure we have accessors. */ - if (BarrierPhase(build_barrier) < PHJ_BUILD_DONE) + if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE) ExecParallelHashEnsureBatchAccessors(hashtable); /* * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE - * case, which will bring the build phase to PHJ_BUILD_RUNNING (if it isn't + * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't * there already). */ - Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASHING_OUTER || - BarrierPhase(build_barrier) == PHJ_BUILD_RUNNING || - BarrierPhase(build_barrier) == PHJ_BUILD_DONE); + Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER || + BarrierPhase(build_barrier) == PHJ_BUILD_RUN || + BarrierPhase(build_barrier) == PHJ_BUILD_FREE); } /* ---------------------------------------------------------------- @@ -596,8 +596,8 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, * Attach to the build barrier. The corresponding detach operation is * in ExecHashTableDetach. Note that we won't attach to the * batch_barrier for batch 0 yet. We'll attach later and start it out - * in PHJ_BATCH_PROBING phase, because batch 0 is allocated up front - * and then loaded while hashing (the standard hybrid hash join + * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and + * then loaded while hashing (the standard hybrid hash join * algorithm), and we'll coordinate that using build_barrier. */ build_barrier = &pstate->build_barrier; @@ -610,7 +610,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, * SharedHashJoinBatch objects and the hash table for batch 0. One * backend will be elected to do that now if necessary. */ - if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECTING && + if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT && BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT)) { pstate->nbatch = nbatch; @@ -631,7 +631,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, /* * The next Parallel Hash synchronization point is in * MultiExecParallelHash(), which will progress it all the way to - * PHJ_BUILD_RUNNING. The caller must not return control from this + * PHJ_BUILD_RUN. The caller must not return control from this * executor node between now and then. */ } @@ -1067,7 +1067,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) ParallelHashJoinState *pstate = hashtable->parallel_state; int i; - Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER); + Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER); /* * It's unlikely, but we need to be prepared for new participants to show @@ -1076,7 +1076,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) */ switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier))) { - case PHJ_GROW_BATCHES_ELECTING: + case PHJ_GROW_BATCHES_ELECT: /* * Elect one participant to prepare to grow the number of batches. @@ -1194,13 +1194,13 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) } /* Fall through. */ - case PHJ_GROW_BATCHES_ALLOCATING: + case PHJ_GROW_BATCHES_REALLOCATE: /* Wait for the above to be finished. */ BarrierArriveAndWait(&pstate->grow_batches_barrier, - WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATE); + WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE); /* Fall through. */ - case PHJ_GROW_BATCHES_REPARTITIONING: + case PHJ_GROW_BATCHES_REPARTITION: /* Make sure that we have the current dimensions and buckets. */ ExecParallelHashEnsureBatchAccessors(hashtable); ExecParallelHashTableSetCurrentBatch(hashtable, 0); @@ -1213,7 +1213,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION); /* Fall through. */ - case PHJ_GROW_BATCHES_DECIDING: + case PHJ_GROW_BATCHES_DECIDE: /* * Elect one participant to clean up and decide whether further @@ -1268,7 +1268,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable) } /* Fall through. */ - case PHJ_GROW_BATCHES_FINISHING: + case PHJ_GROW_BATCHES_FINISH: /* Wait for the above to complete. */ BarrierArriveAndWait(&pstate->grow_batches_barrier, WAIT_EVENT_HASH_GROW_BATCHES_FINISH); @@ -1508,7 +1508,7 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable) HashMemoryChunk chunk; dsa_pointer chunk_s; - Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER); + Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER); /* * It's unlikely, but we need to be prepared for new participants to show @@ -1517,7 +1517,7 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable) */ switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier))) { - case PHJ_GROW_BUCKETS_ELECTING: + case PHJ_GROW_BUCKETS_ELECT: /* Elect one participant to prepare to increase nbuckets. */ if (BarrierArriveAndWait(&pstate->grow_buckets_barrier, WAIT_EVENT_HASH_GROW_BUCKETS_ELECT)) @@ -1546,13 +1546,13 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable) } /* Fall through. */ - case PHJ_GROW_BUCKETS_ALLOCATING: + case PHJ_GROW_BUCKETS_REALLOCATE: /* Wait for the above to complete. */ BarrierArriveAndWait(&pstate->grow_buckets_barrier, - WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATE); + WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE); /* Fall through. */ - case PHJ_GROW_BUCKETS_REINSERTING: + case PHJ_GROW_BUCKETS_REINSERT: /* Reinsert all tuples into the hash table. */ ExecParallelHashEnsureBatchAccessors(hashtable); ExecParallelHashTableSetCurrentBatch(hashtable, 0); @@ -1708,7 +1708,7 @@ retry: /* Try to load it into memory. */ Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) == - PHJ_BUILD_HASHING_INNER); + PHJ_BUILD_HASH_INNER); hashTuple = ExecParallelHashTupleAlloc(hashtable, HJTUPLE_OVERHEAD + tuple->t_len, &shared); @@ -2862,7 +2862,7 @@ ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size, if (pstate->growth != PHJ_GROWTH_DISABLED) { Assert(curbatch == 0); - Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASHING_INNER); + Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER); /* * Check if our space limit would be exceeded. To avoid choking on @@ -2982,7 +2982,7 @@ ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch) { /* Batch 0 doesn't need to be loaded. */ BarrierAttach(&shared->batch_barrier); - while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBING) + while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE) BarrierArriveAndWait(&shared->batch_barrier, 0); BarrierDetach(&shared->batch_barrier); } @@ -3056,8 +3056,8 @@ ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable) /* * We should never see a state where the batch-tracking array is freed, - * because we should have given up sooner if we join when the build barrier - * has reached the PHJ_BUILD_DONE phase. + * because we should have given up sooner if we join when the build + * barrier has reached the PHJ_BUILD_FREE phase. */ Assert(DsaPointerIsValid(pstate->batches)); @@ -3140,7 +3140,7 @@ ExecHashTableDetachBatch(HashJoinTable hashtable) * longer attached, but since there is no way it's moving after * this point it seems safe to make the following assertion. */ - Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_DONE); + Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE); /* Free shared chunks and buckets. */ while (DsaPointerIsValid(batch->chunks)) @@ -3183,12 +3183,12 @@ ExecHashTableDetach(HashJoinTable hashtable) /* * If we're involved in a parallel query, we must either have got all the - * way to PHJ_BUILD_RUNNING, or joined too late and be in PHJ_BUILD_DONE. + * way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE. */ Assert(!pstate || - BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUNNING); + BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN); - if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUNNING) + if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN) { int i; @@ -3211,7 +3211,7 @@ ExecHashTableDetach(HashJoinTable hashtable) * Late joining processes will see this state and give up * immediately. */ - Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_DONE); + Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE); if (DsaPointerIsValid(pstate->batches)) { |