aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeHash.c
diff options
context:
space:
mode:
authorAlvaro Herrera <alvherre@alvh.no-ip.org>2020-05-13 15:31:14 -0400
committerAlvaro Herrera <alvherre@alvh.no-ip.org>2020-05-13 15:31:14 -0400
commit17cc133f017cb13737e23ce0da4415daf2c34cc3 (patch)
tree45bd1e33f52902f315aa6364fd9fdf1ff152403a /src/backend/executor/nodeHash.c
parent81ca8686305c4c62d723ab224ad5c414f350a3a0 (diff)
downloadpostgresql-17cc133f017cb13737e23ce0da4415daf2c34cc3.tar.gz
postgresql-17cc133f017cb13737e23ce0da4415daf2c34cc3.zip
Dial back -Wimplicit-fallthrough to level 3
The additional pain from level 4 is excessive for the gain. Also revert all the source annotation changes to their original wordings, to avoid back-patching pain. Discussion: https://postgr.es/m/31166.1589378554@sss.pgh.pa.us
Diffstat (limited to 'src/backend/executor/nodeHash.c')
-rw-r--r--src/backend/executor/nodeHash.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 4516c6346bd..5da13ada726 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -256,7 +256,7 @@ MultiExecParallelHash(HashState *node)
* way, wait for everyone to arrive here so we can proceed.
*/
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_BUILD_HASHING_INNER:
@@ -1181,13 +1181,13 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
/* All other participants just flush their tuples to disk. */
ExecParallelHashCloseBatchAccessors(hashtable);
}
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BATCHES_ALLOCATING:
/* Wait for the above to be finished. */
BarrierArriveAndWait(&pstate->grow_batches_barrier,
WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BATCHES_REPARTITIONING:
/* Make sure that we have the current dimensions and buckets. */
@@ -1200,7 +1200,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
/* Wait for the above to be finished. */
BarrierArriveAndWait(&pstate->grow_batches_barrier,
WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BATCHES_DECIDING:
@@ -1255,7 +1255,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
dsa_free(hashtable->area, pstate->old_batches);
pstate->old_batches = InvalidDsaPointer;
}
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BATCHES_FINISHING:
/* Wait for the above to complete. */
@@ -1533,13 +1533,13 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
/* Clear the flag. */
pstate->growth = PHJ_GROWTH_OK;
}
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BUCKETS_ALLOCATING:
/* Wait for the above to complete. */
BarrierArriveAndWait(&pstate->grow_buckets_barrier,
WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BUCKETS_REINSERTING:
/* Reinsert all tuples into the hash table. */