1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-09 22:41:56 +03:00

Dial back -Wimplicit-fallthrough to level 3

The additional pain from level 4 is excessive for the gain.

Also revert all the source annotation changes to their original
wordings, to avoid back-patching pain.

Discussion: https://postgr.es/m/31166.1589378554@sss.pgh.pa.us
This commit is contained in:
Alvaro Herrera
2020-05-13 15:31:14 -04:00
parent 81ca868630
commit 17cc133f01
34 changed files with 194 additions and 199 deletions

View File

@ -256,7 +256,7 @@ MultiExecParallelHash(HashState *node)
* way, wait for everyone to arrive here so we can proceed.
*/
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATING);
/* FALLTHROUGH */
/* Fall through. */
case PHJ_BUILD_HASHING_INNER:
@ -1181,13 +1181,13 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
/* All other participants just flush their tuples to disk. */
ExecParallelHashCloseBatchAccessors(hashtable);
}
/* FALLTHROUGH */
/* Fall through. */
case PHJ_GROW_BATCHES_ALLOCATING:
/* Wait for the above to be finished. */
BarrierArriveAndWait(&pstate->grow_batches_barrier,
WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING);
/* FALLTHROUGH */
/* Fall through. */
case PHJ_GROW_BATCHES_REPARTITIONING:
/* Make sure that we have the current dimensions and buckets. */
@ -1200,7 +1200,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
/* Wait for the above to be finished. */
BarrierArriveAndWait(&pstate->grow_batches_barrier,
WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING);
/* FALLTHROUGH */
/* Fall through. */
case PHJ_GROW_BATCHES_DECIDING:
@ -1255,7 +1255,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
dsa_free(hashtable->area, pstate->old_batches);
pstate->old_batches = InvalidDsaPointer;
}
/* FALLTHROUGH */
/* Fall through. */
case PHJ_GROW_BATCHES_FINISHING:
/* Wait for the above to complete. */
@ -1533,13 +1533,13 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
/* Clear the flag. */
pstate->growth = PHJ_GROWTH_OK;
}
/* FALLTHROUGH */
/* Fall through. */
case PHJ_GROW_BUCKETS_ALLOCATING:
/* Wait for the above to complete. */
BarrierArriveAndWait(&pstate->grow_buckets_barrier,
WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING);
/* FALLTHROUGH */
/* Fall through. */
case PHJ_GROW_BUCKETS_REINSERTING:
/* Reinsert all tuples into the hash table. */

View File

@ -340,7 +340,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
else
node->hj_JoinState = HJ_NEED_NEW_OUTER;
/* FALLTHROUGH */
/* FALL THRU */
case HJ_NEED_NEW_OUTER:
@ -413,7 +413,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
/* OK, let's scan the bucket for matches */
node->hj_JoinState = HJ_SCAN_BUCKET;
/* FALLTHROUGH */
/* FALL THRU */
case HJ_SCAN_BUCKET:
@ -1137,13 +1137,13 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
if (BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_ELECTING))
ExecParallelHashTableAlloc(hashtable, batchno);
/* FALLTHROUGH */
/* Fall through. */
case PHJ_BATCH_ALLOCATING:
/* Wait for allocation to complete. */
BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_ALLOCATING);
/* FALLTHROUGH */
/* Fall through. */
case PHJ_BATCH_LOADING:
/* Start (or join in) loading tuples. */
@ -1163,7 +1163,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
sts_end_parallel_scan(inner_tuples);
BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_LOADING);
/* FALLTHROUGH */
/* Fall through. */
case PHJ_BATCH_PROBING:

View File

@ -69,7 +69,7 @@ ExecLimit(PlanState *pstate)
*/
recompute_limits(node);
/* FALLTHROUGH */
/* FALL THRU */
case LIMIT_RESCAN:
@ -216,7 +216,7 @@ ExecLimit(PlanState *pstate)
}
Assert(node->lstate == LIMIT_WINDOWEND_TIES);
/* FALLTHROUGH */
/* FALL THRU */
case LIMIT_WINDOWEND_TIES:
if (ScanDirectionIsForward(direction))