mirror of
https://github.com/postgres/postgres.git
synced 2025-07-09 22:41:56 +03:00
Mop-up for wait event naming issues.
Synchronize the event names for parallel hash join waits with other event names, by getting rid of the slashes and dropping "-ing" suffixes. Rename ClogGroupUpdate to XactGroupUpdate, to match the new SLRU name. Move the ProcSignalBarrier event to the IPC category; it doesn't belong under IO. Also a bit more wordsmithing in the wait event documentation tables. Discussion: https://postgr.es/m/4505.1589640417@sss.pgh.pa.us
This commit is contained in:
@ -255,7 +255,7 @@ MultiExecParallelHash(HashState *node)
|
||||
* ExecHashTableCreate(), or someone else is doing that. Either
|
||||
* way, wait for everyone to arrive here so we can proceed.
|
||||
*/
|
||||
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATING);
|
||||
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
|
||||
/* Fall through. */
|
||||
|
||||
case PHJ_BUILD_HASHING_INNER:
|
||||
@ -311,7 +311,7 @@ MultiExecParallelHash(HashState *node)
|
||||
* counters.
|
||||
*/
|
||||
if (BarrierArriveAndWait(build_barrier,
|
||||
WAIT_EVENT_HASH_BUILD_HASHING_INNER))
|
||||
WAIT_EVENT_HASH_BUILD_HASH_INNER))
|
||||
{
|
||||
/*
|
||||
* Elect one backend to disable any further growth. Batches
|
||||
@ -603,7 +603,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
|
||||
* backend will be elected to do that now if necessary.
|
||||
*/
|
||||
if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECTING &&
|
||||
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECTING))
|
||||
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
|
||||
{
|
||||
pstate->nbatch = nbatch;
|
||||
pstate->space_allowed = space_allowed;
|
||||
@ -1076,7 +1076,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
|
||||
* tuples.
|
||||
*/
|
||||
if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_ELECTING))
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
|
||||
{
|
||||
dsa_pointer_atomic *buckets;
|
||||
ParallelHashJoinBatch *old_batch0;
|
||||
@ -1186,7 +1186,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
|
||||
case PHJ_GROW_BATCHES_ALLOCATING:
|
||||
/* Wait for the above to be finished. */
|
||||
BarrierArriveAndWait(&pstate->grow_batches_barrier,
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING);
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATE);
|
||||
/* Fall through. */
|
||||
|
||||
case PHJ_GROW_BATCHES_REPARTITIONING:
|
||||
@ -1199,7 +1199,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
|
||||
ExecParallelHashMergeCounters(hashtable);
|
||||
/* Wait for the above to be finished. */
|
||||
BarrierArriveAndWait(&pstate->grow_batches_barrier,
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING);
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
|
||||
/* Fall through. */
|
||||
|
||||
case PHJ_GROW_BATCHES_DECIDING:
|
||||
@ -1210,7 +1210,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
|
||||
* not helping.
|
||||
*/
|
||||
if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_DECIDING))
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
|
||||
{
|
||||
bool space_exhausted = false;
|
||||
bool extreme_skew_detected = false;
|
||||
@ -1260,7 +1260,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
|
||||
case PHJ_GROW_BATCHES_FINISHING:
|
||||
/* Wait for the above to complete. */
|
||||
BarrierArriveAndWait(&pstate->grow_batches_barrier,
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_FINISHING);
|
||||
WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1509,7 +1509,7 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
|
||||
case PHJ_GROW_BUCKETS_ELECTING:
|
||||
/* Elect one participant to prepare to increase nbuckets. */
|
||||
if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
|
||||
WAIT_EVENT_HASH_GROW_BUCKETS_ELECTING))
|
||||
WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
|
||||
{
|
||||
size_t size;
|
||||
dsa_pointer_atomic *buckets;
|
||||
@ -1538,7 +1538,7 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
|
||||
case PHJ_GROW_BUCKETS_ALLOCATING:
|
||||
/* Wait for the above to complete. */
|
||||
BarrierArriveAndWait(&pstate->grow_buckets_barrier,
|
||||
WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING);
|
||||
WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATE);
|
||||
/* Fall through. */
|
||||
|
||||
case PHJ_GROW_BUCKETS_REINSERTING:
|
||||
@ -1573,7 +1573,7 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
}
|
||||
BarrierArriveAndWait(&pstate->grow_buckets_barrier,
|
||||
WAIT_EVENT_HASH_GROW_BUCKETS_REINSERTING);
|
||||
WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -327,7 +327,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
|
||||
if (hashtable->nbatch > 1)
|
||||
ExecParallelHashJoinPartitionOuter(node);
|
||||
BarrierArriveAndWait(build_barrier,
|
||||
WAIT_EVENT_HASH_BUILD_HASHING_OUTER);
|
||||
WAIT_EVENT_HASH_BUILD_HASH_OUTER);
|
||||
}
|
||||
Assert(BarrierPhase(build_barrier) == PHJ_BUILD_DONE);
|
||||
|
||||
@ -1135,14 +1135,14 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
|
||||
|
||||
/* One backend allocates the hash table. */
|
||||
if (BarrierArriveAndWait(batch_barrier,
|
||||
WAIT_EVENT_HASH_BATCH_ELECTING))
|
||||
WAIT_EVENT_HASH_BATCH_ELECT))
|
||||
ExecParallelHashTableAlloc(hashtable, batchno);
|
||||
/* Fall through. */
|
||||
|
||||
case PHJ_BATCH_ALLOCATING:
|
||||
/* Wait for allocation to complete. */
|
||||
BarrierArriveAndWait(batch_barrier,
|
||||
WAIT_EVENT_HASH_BATCH_ALLOCATING);
|
||||
WAIT_EVENT_HASH_BATCH_ALLOCATE);
|
||||
/* Fall through. */
|
||||
|
||||
case PHJ_BATCH_LOADING:
|
||||
@ -1162,7 +1162,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
|
||||
}
|
||||
sts_end_parallel_scan(inner_tuples);
|
||||
BarrierArriveAndWait(batch_barrier,
|
||||
WAIT_EVENT_HASH_BATCH_LOADING);
|
||||
WAIT_EVENT_HASH_BATCH_LOAD);
|
||||
/* Fall through. */
|
||||
|
||||
case PHJ_BATCH_PROBING:
|
||||
|
Reference in New Issue
Block a user