diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index afad0aaa991..e5b21cd8f61 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -1751,29 +1751,22 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser AddinShmemInitLock - Waiting to manage space allocation in shared memory. - - - NotifySLRULock - Waiting to access the NOTIFY message SLRU - cache. - - - NotifyQueueLock - Waiting to read or update NOTIFY messages. + Waiting to manage an extension's space allocation in shared + memory. AutoFileLock - Waiting to update the postgresql.auto.conf file. + Waiting to update the postgresql.auto.conf + file. AutovacuumLock - Autovacuum worker or launcher waiting to update or - read the current state of autovacuum workers. + Waiting to read or update the current state of autovacuum + workers. AutovacuumScheduleLock - Waiting to ensure that the table selected for a vacuum + Waiting to ensure that a table selected for autovacuum still needs vacuuming. @@ -1786,52 +1779,80 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser B-tree index. - XactSLRULock - Waiting to access the transaction status SLRU cache. + BufferContent + Waiting to access a data page in memory. - XactTruncationLock - Waiting to execute pg_xact_status or update - the oldest transaction ID available to it. + BufferIO + Waiting for I/O on a data page. + + + BufferMapping + Waiting to associate a data block with a buffer in the buffer + pool. CheckpointLock - Waiting to perform checkpoint. + Waiting to begin a checkpoint. CheckpointerCommLock Waiting to manage fsync requests. + + CommitTsLock + Waiting to read or update the last value set for a + transaction commit timestamp. + + + CommitTsBuffer + Waiting for I/O on a commit timestamp SLRU buffer. + CommitTsSLRULock Waiting to access the commit timestamp SLRU cache. - - CommitTsLock - Waiting to read or update the last value set for the - transaction timestamp. - ControlFileLock - Waiting to read or update the control file or creation of a - new WAL file. + Waiting to read or update the pg_control + file or create a new WAL file. DynamicSharedMemoryControlLock - Waiting to read or update dynamic shared memory state. + Waiting to read or update dynamic shared memory allocation + information. + + + LockFastPath + Waiting to read or update a process' fast-path lock + information. + + + LockManager + Waiting to read or update information + about heavyweight locks. LogicalRepWorkerLock - Waiting for action on logical replication worker to finish. + Waiting to read or update the state of logical replication + workers. MultiXactGenLock Waiting to read or update shared multixact state. + + MultiXactMemberBuffer + Waiting for I/O on a multixact member SLRU buffer. + MultiXactMemberSLRULock Waiting to access the multixact member SLRU cache. + + MultiXactOffsetBuffer + Waiting for I/O on a multixact offset SLRU buffer. + MultiXactOffsetSLRULock Waiting to access the multixact offset SLRU cache. @@ -1841,35 +1862,90 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser Waiting to read or truncate multixact information. - OidGenLock - Waiting to allocate or assign an OID. + NotifyBuffer + Waiting for I/O on a NOTIFY message SLRU + buffer. - SerialSLRULock - Waiting to access the serializable transaction conflict SLRU + NotifyQueueLock + Waiting to read or update NOTIFY messages. + + + NotifySLRULock + Waiting to access the NOTIFY message SLRU cache. + + OidGenLock + Waiting to allocate a new OID. + OldSnapshotTimeMapLock Waiting to read or update old snapshot control information. - ProcArrayLock - Waiting to get a snapshot or clearing a transaction id at - transaction end. + ParallelAppend + Waiting to choose the next subplan during Parallel Append plan + execution. - RelCacheInitLock - Waiting to read or write relation cache initialization file. + ParallelHashJoin + Waiting to synchronize workers during Parallel Hash Join plan + execution. + + + ParallelQueryDSA + Waiting for parallel query dynamic shared memory allocation. + + + PerSessionDSA + Waiting for parallel query dynamic shared memory allocation. + + + PerSessionRecordType + Waiting to access a parallel query's information about composite + types. + + + PerSessionRecordTypmod + Waiting to access a parallel query's information about type + modifiers that identify anonymous record types. + + + PerXactPredicateList + Waiting to access the list of predicate locks held by the current + serializable transaction during a parallel query. + + + PredicateLockManager + Waiting to access predicate lock information used by + serializable transactions. + + + ProcArrayLock + Waiting to access the shared per-process data structures + (typically, to get a snapshot or report a session's transaction + ID). RelationMappingLock - Waiting to update the relation map file used to store catalog - to filenode mapping. + Waiting to read or update + a pg_filenode.map file (used to track the + filenode assignments of certain system catalogs). + + + RelCacheInitLock + Waiting to read or update a pg_internal.init + relation cache initialization file. ReplicationOriginLock - Waiting to setup, drop or use replication origin. + Waiting to create, drop or use a replication origin. + + + ReplicationOriginState + Waiting to read or update the progress of one replication + origin. ReplicationSlotAllocationLock @@ -1880,13 +1956,13 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser Waiting to read or update replication slot state. - SInvalReadLock - Waiting to retrieve or remove messages from shared invalidation - queue. + ReplicationSlotIO + Waiting for I/O on a replication slot. - SInvalWriteLock - Waiting to add a message in shared invalidation queue. + SerialBuffer + Waiting for I/O on a serializable transaction conflict SLRU + buffer. SerializableFinishedListLock @@ -1894,36 +1970,65 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser transactions. - SerializablePredicateLockListLock - Waiting to perform an operation on a list of locks held by + SerializablePredicateListLock + Waiting to access the list of predicate locks held by serializable transactions. SerializableXactHashLock - Waiting to retrieve or store information about serializable + Waiting to read or update information about serializable transactions. + + SerialSLRULock + Waiting to access the serializable transaction conflict SLRU + cache. + + + SharedTidBitmap + Waiting to access a shared TID bitmap during a parallel bitmap + index scan. + + + SharedTupleStore + Waiting to access a shared tuple store during parallel + query. + ShmemIndexLock Waiting to find or allocate space in shared memory. + + SInvalReadLock + Waiting to retrieve messages from the shared catalog invalidation + queue. + + + SInvalWriteLock + Waiting to add a message to the shared catalog invalidation + queue. + + + SubtransBuffer + Waiting for I/O on a sub-transaction SLRU buffer. + SubtransSLRULock Waiting to access the sub-transaction SLRU cache. SyncRepLock - Waiting to read or update information about synchronous - replicas. + Waiting to read or update information about the state of + synchronous replication. SyncScanLock - Waiting to get the start location of a scan on a table for - synchronized scans. + Waiting to select the starting location of a synchronized table + scan. TablespaceCreateLock - Waiting to create or drop the tablespace. + Waiting to create or drop a tablespace. TwoPhaseStateLock @@ -1933,104 +2038,30 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser WALBufMappingLock Waiting to replace a page in WAL buffers. + + WALInsert + Waiting to insert WAL data into a memory buffer. + WALWriteLock Waiting for WAL buffers to be written to disk. - - XidGenLock - Waiting to allocate or assign a transaction id. - - - NotifyBuffer - Waiting for I/O on a NOTIFY message SLRU - buffer. - - - buffer_content - Waiting to read or write a data page in memory. - - - buffer_io - Waiting for I/O on a data page. - - - buffer_mapping - Waiting to associate a data block with a buffer in the buffer - pool. - XactBuffer Waiting for I/O on a transaction status SLRU buffer. - CommitTsBuffer - Waiting for I/O on a commit timestamp SLRU buffer. + XactSLRULock + Waiting to access the transaction status SLRU cache. - lock_manager - Waiting to add or examine locks for backends, or waiting to - join or exit a locking group (used by parallel query). + XactTruncationLock + Waiting to execute pg_xact_status or update + the oldest transaction ID available to it. - MultiXactMember - Waiting for I/O on a multixact member SLRU buffer. - - - MultiXactOffsetBuffer - Waiting for I/O on a multixact offset SLRU buffer. - - - SerialBuffer - Waiting for I/O on a serializable transaction conflict SLRU - buffer. - - - parallel_append - Waiting to choose the next subplan during Parallel Append plan - execution. - - - parallel_hash_join - Waiting to allocate or exchange a chunk of memory or update - counters during Parallel Hash plan execution. - - - parallel_query_dsa - Waiting for parallel query dynamic shared memory allocation lock. - - - predicate_lock_manager - Waiting to add or examine predicate lock information. - - - proc - Waiting to read or update the fast-path lock information. - - - replication_origin - Waiting to read or update the replication progress. - - - replication_slot_io - Waiting for I/O on a replication slot. - - - serializable_xact - Waiting to perform an operation on a serializable transaction - in a parallel query. - - - SubtransBuffer - Waiting for I/O on a sub-transaction SLRU buffer. - - - tbm - Waiting for TBM shared iterator lock. - - - wal_insert - Waiting to insert WAL into a memory buffer. + XidGenLock + Waiting to allocate a new transaction ID. diff --git a/src/backend/access/common/session.c b/src/backend/access/common/session.c index 070ece5b8fc..0ec61d48a2d 100644 --- a/src/backend/access/common/session.c +++ b/src/backend/access/common/session.c @@ -117,7 +117,7 @@ GetSessionDsmHandle(void) dsa_space = shm_toc_allocate(toc, SESSION_DSA_SIZE); dsa = dsa_create_in_place(dsa_space, SESSION_DSA_SIZE, - LWTRANCHE_SESSION_DSA, + LWTRANCHE_PER_SESSION_DSA, seg); shm_toc_insert(toc, SESSION_KEY_DSA, dsa_space); diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index ad4e071ca3b..0d5056c3e3d 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -889,7 +889,7 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm) pg_atomic_add_fetch_u32(&ptchunks->refcount, 1); /* Initialize the iterator lock */ - LWLockInitialize(&istate->lock, LWTRANCHE_TBM); + LWLockInitialize(&istate->lock, LWTRANCHE_SHARED_TIDBITMAP); /* Initialize the shared iterator state */ istate->schunkbit = 0; diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 923ea3ffd72..26f1cb2aca8 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -506,7 +506,7 @@ ReplicationOriginShmemInit(void) { int i; - replication_states_ctl->tranche_id = LWTRANCHE_REPLICATION_ORIGIN; + replication_states_ctl->tranche_id = LWTRANCHE_REPLICATION_ORIGIN_STATE; MemSet(replication_states, 0, ReplicationOriginShmemSize()); diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index d3d1033beba..35ff59cf5b4 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -153,7 +153,8 @@ ReplicationSlotsShmemInit(void) /* everything else is zeroed by the memset above */ SpinLockInit(&slot->mutex); - LWLockInitialize(&slot->io_in_progress_lock, LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS); + LWLockInitialize(&slot->io_in_progress_lock, + LWTRANCHE_REPLICATION_SLOT_IO); ConditionVariableInit(&slot->active_cv); } } diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index 895485698a2..a8ce6603ed0 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -132,7 +132,7 @@ InitBufferPool(void) LWTRANCHE_BUFFER_CONTENT); LWLockInitialize(BufferDescriptorGetIOLock(buf), - LWTRANCHE_BUFFER_IO_IN_PROGRESS); + LWTRANCHE_BUFFER_IO); } /* Correct last entry of linked list */ diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index fab387b5dfe..e1e623db153 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -936,13 +936,13 @@ LockAcquireExtended(const LOCKTAG *locktag, * FastPathStrongRelationLocks->counts becomes visible after we test * it has yet to begin to transfer fast-path locks. */ - LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE); + LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE); if (FastPathStrongRelationLocks->count[fasthashcode] != 0) acquired = false; else acquired = FastPathGrantRelationLock(locktag->locktag_field2, lockmode); - LWLockRelease(&MyProc->backendLock); + LWLockRelease(&MyProc->fpInfoLock); if (acquired) { /* @@ -2085,10 +2085,10 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) * We might not find the lock here, even if we originally entered it * here. Another backend may have moved it to the main table. */ - LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE); + LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE); released = FastPathUnGrantRelationLock(locktag->locktag_field2, lockmode); - LWLockRelease(&MyProc->backendLock); + LWLockRelease(&MyProc->fpInfoLock); if (released) { RemoveLocalLock(locallock); @@ -2291,7 +2291,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) */ if (!have_fast_path_lwlock) { - LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE); + LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE); have_fast_path_lwlock = true; } @@ -2308,7 +2308,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) * transferred to the main lock table. That's going to require * some extra work, so release our fast-path lock before starting. */ - LWLockRelease(&MyProc->backendLock); + LWLockRelease(&MyProc->fpInfoLock); have_fast_path_lwlock = false; /* @@ -2334,7 +2334,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) /* Done with the fast-path data structures */ if (have_fast_path_lwlock) - LWLockRelease(&MyProc->backendLock); + LWLockRelease(&MyProc->fpInfoLock); /* * Now, scan each lock partition separately. @@ -2737,7 +2737,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag PGPROC *proc = &ProcGlobal->allProcs[i]; uint32 f; - LWLockAcquire(&proc->backendLock, LW_EXCLUSIVE); + LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE); /* * If the target backend isn't referencing the same database as the @@ -2746,8 +2746,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag * * proc->databaseId is set at backend startup time and never changes * thereafter, so it might be safe to perform this test before - * acquiring &proc->backendLock. In particular, it's certainly safe - * to assume that if the target backend holds any fast-path locks, it + * acquiring &proc->fpInfoLock. In particular, it's certainly safe to + * assume that if the target backend holds any fast-path locks, it * must have performed a memory-fencing operation (in particular, an * LWLock acquisition) since setting proc->databaseId. However, it's * less clear that our backend is certain to have performed a memory @@ -2756,7 +2756,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag */ if (proc->databaseId != locktag->locktag_field1) { - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); continue; } @@ -2783,7 +2783,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag if (!proclock) { LWLockRelease(partitionLock); - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); return false; } GrantLock(proclock->tag.myLock, proclock, lockmode); @@ -2794,7 +2794,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag /* No need to examine remaining slots. */ break; } - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); } return true; } @@ -2816,7 +2816,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock) Oid relid = locktag->locktag_field2; uint32 f; - LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE); + LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE); for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++) { @@ -2839,7 +2839,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock) if (!proclock) { LWLockRelease(partitionLock); - LWLockRelease(&MyProc->backendLock); + LWLockRelease(&MyProc->fpInfoLock); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), @@ -2854,7 +2854,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock) break; } - LWLockRelease(&MyProc->backendLock); + LWLockRelease(&MyProc->fpInfoLock); /* Lock may have already been transferred by some other backend. */ if (proclock == NULL) @@ -2980,7 +2980,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp) if (proc == MyProc) continue; - LWLockAcquire(&proc->backendLock, LW_SHARED); + LWLockAcquire(&proc->fpInfoLock, LW_SHARED); /* * If the target backend isn't referencing the same database as @@ -2992,7 +2992,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp) */ if (proc->databaseId != locktag->locktag_field1) { - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); continue; } @@ -3030,7 +3030,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp) break; } - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); } } @@ -3599,7 +3599,7 @@ GetLockStatusData(void) PGPROC *proc = &ProcGlobal->allProcs[i]; uint32 f; - LWLockAcquire(&proc->backendLock, LW_SHARED); + LWLockAcquire(&proc->fpInfoLock, LW_SHARED); for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f) { @@ -3659,7 +3659,7 @@ GetLockStatusData(void) el++; } - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); } /* @@ -4381,7 +4381,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info, * as MyProc->lxid, you might wonder if we really need both. The * difference is that MyProc->lxid is set and cleared unlocked, and * examined by procarray.c, while fpLocalTransactionId is protected by - * backendLock and is used only by the locking subsystem. Doing it this + * fpInfoLock and is used only by the locking subsystem. Doing it this * way makes it easier to verify that there are no funny race conditions. * * We don't bother recording this lock in the local lock table, since it's @@ -4393,7 +4393,7 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid) { Assert(VirtualTransactionIdIsValid(vxid)); - LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE); + LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE); Assert(MyProc->backendId == vxid.backendId); Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId); @@ -4402,7 +4402,7 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid) MyProc->fpVXIDLock = true; MyProc->fpLocalTransactionId = vxid.localTransactionId; - LWLockRelease(&MyProc->backendLock); + LWLockRelease(&MyProc->fpInfoLock); } /* @@ -4422,14 +4422,14 @@ VirtualXactLockTableCleanup(void) /* * Clean up shared memory state. */ - LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE); + LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE); fastpath = MyProc->fpVXIDLock; lxid = MyProc->fpLocalTransactionId; MyProc->fpVXIDLock = false; MyProc->fpLocalTransactionId = InvalidLocalTransactionId; - LWLockRelease(&MyProc->backendLock); + LWLockRelease(&MyProc->fpInfoLock); /* * If fpVXIDLock has been cleared without touching fpLocalTransactionId, @@ -4485,13 +4485,13 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait) * against the ones we're waiting for. The target backend will only set * or clear lxid while holding this lock. */ - LWLockAcquire(&proc->backendLock, LW_EXCLUSIVE); + LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE); /* If the transaction has ended, our work here is done. */ if (proc->backendId != vxid.backendId || proc->fpLocalTransactionId != vxid.localTransactionId) { - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); return true; } @@ -4501,7 +4501,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait) */ if (!wait) { - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); return false; } @@ -4526,7 +4526,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait) if (!proclock) { LWLockRelease(partitionLock); - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), @@ -4540,7 +4540,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait) } /* Done with proc->fpLockBits */ - LWLockRelease(&proc->backendLock); + LWLockRelease(&proc->fpInfoLock); /* Time to wait. */ (void) LockAcquire(&tag, ShareLock, false, false); diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index c226c4add28..0bdc8e0499d 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -121,6 +121,9 @@ extern slock_t *ShmemLock; * 3. Extensions can create new tranches, via either RequestNamedLWLockTranche * or LWLockRegisterTranche. The names of these that are known in the current * process appear in LWLockTrancheNames[]. + * + * All these names are user-visible as wait event names, so choose with care + * ... and do not forget to update the documentation's list of wait events. */ static const char *const BuiltinTrancheNames[] = { @@ -139,41 +142,41 @@ static const char *const BuiltinTrancheNames[] = { /* LWTRANCHE_SERIAL_BUFFER: */ "SerialBuffer", /* LWTRANCHE_WAL_INSERT: */ - "wal_insert", + "WALInsert", /* LWTRANCHE_BUFFER_CONTENT: */ - "buffer_content", - /* LWTRANCHE_BUFFER_IO_IN_PROGRESS: */ - "buffer_io", - /* LWTRANCHE_REPLICATION_ORIGIN: */ - "replication_origin", - /* LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS: */ - "replication_slot_io", - /* LWTRANCHE_PROC: */ - "proc", + "BufferContent", + /* LWTRANCHE_BUFFER_IO: */ + "BufferIO", + /* LWTRANCHE_REPLICATION_ORIGIN_STATE: */ + "ReplicationOriginState", + /* LWTRANCHE_REPLICATION_SLOT_IO: */ + "ReplicationSlotIO", + /* LWTRANCHE_LOCK_FASTPATH: */ + "LockFastPath", /* LWTRANCHE_BUFFER_MAPPING: */ - "buffer_mapping", + "BufferMapping", /* LWTRANCHE_LOCK_MANAGER: */ - "lock_manager", + "LockManager", /* LWTRANCHE_PREDICATE_LOCK_MANAGER: */ - "predicate_lock_manager", + "PredicateLockManager", /* LWTRANCHE_PARALLEL_HASH_JOIN: */ - "parallel_hash_join", + "ParallelHashJoin", /* LWTRANCHE_PARALLEL_QUERY_DSA: */ - "parallel_query_dsa", - /* LWTRANCHE_SESSION_DSA: */ - "session_dsa", - /* LWTRANCHE_SESSION_RECORD_TABLE: */ - "session_record_table", - /* LWTRANCHE_SESSION_TYPMOD_TABLE: */ - "session_typmod_table", + "ParallelQueryDSA", + /* LWTRANCHE_PER_SESSION_DSA: */ + "PerSessionDSA", + /* LWTRANCHE_PER_SESSION_RECORD_TYPE: */ + "PerSessionRecordType", + /* LWTRANCHE_PER_SESSION_RECORD_TYPMOD: */ + "PerSessionRecordTypmod", /* LWTRANCHE_SHARED_TUPLESTORE: */ - "shared_tuplestore", - /* LWTRANCHE_TBM: */ - "tbm", + "SharedTupleStore", + /* LWTRANCHE_SHARED_TIDBITMAP: */ + "SharedTidBitmap", /* LWTRANCHE_PARALLEL_APPEND: */ - "parallel_append", - /* LWTRANCHE_SXACT: */ - "serializable_xact" + "ParallelAppend", + /* LWTRANCHE_PER_XACT_PREDICATE_LIST: */ + "PerXactPredicateList" }; StaticAssertDecl(lengthof(BuiltinTrancheNames) == @@ -640,7 +643,10 @@ LWLockNewTrancheId(void) * * This routine will save a pointer to the tranche name passed as an argument, * so the name should be allocated in a backend-lifetime context - * (TopMemoryContext, static constant, or similar). + * (shared memory, TopMemoryContext, static constant, or similar). + * + * The tranche name will be user-visible as a wait event name, so try to + * use a name that fits the style for those. */ void LWLockRegisterTranche(int tranche_id, const char *tranche_name) @@ -690,6 +696,9 @@ LWLockRegisterTranche(int tranche_id, const char *tranche_name) * will be ignored. (We could raise an error, but it seems better to make * it a no-op, so that libraries containing such calls can be reloaded if * needed.) + * + * The tranche name will be user-visible as a wait event name, so try to + * use a name that fits the style for those. */ void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks) diff --git a/src/backend/storage/lmgr/lwlocknames.txt b/src/backend/storage/lmgr/lwlocknames.txt index 6112af381fe..e6985e8eedf 100644 --- a/src/backend/storage/lmgr/lwlocknames.txt +++ b/src/backend/storage/lmgr/lwlocknames.txt @@ -2,7 +2,8 @@ # these are defined here. If you add a lock, add it to the end to avoid # renumbering the existing locks; if you remove a lock, consider leaving a gap # in the numbering sequence for the benefit of DTrace and other external -# debugging scripts. +# debugging scripts. Also, do not forget to update the list of wait events +# in the user documentation. # 0 is available; was formerly BufFreelistLock ShmemIndexLock 1 @@ -34,7 +35,7 @@ NotifySLRULock 26 NotifyQueueLock 27 SerializableXactHashLock 28 SerializableFinishedListLock 29 -SerializablePredicateLockListLock 30 +SerializablePredicateListLock 30 SerialSLRULock 31 SyncRepLock 32 BackgroundWorkerLock 33 diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 02415ff01e3..ba93fb199d4 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -89,7 +89,7 @@ * - Protects the list of transactions which have completed but which * may yet matter because they overlap still-active transactions. * - * SerializablePredicateLockListLock + * SerializablePredicateListLock * - Protects the linked list of locks held by a transaction. Note * that the locks themselves are also covered by the partition * locks of their respective lock targets; this lock only affects @@ -118,11 +118,11 @@ * than its own active transaction must acquire an exclusive * lock. * - * SERIALIZABLEXACT's member 'predicateLockListLock' - * - Protects the linked list of locks held by a transaction. Only - * needed for parallel mode, where multiple backends share the + * SERIALIZABLEXACT's member 'perXactPredicateListLock' + * - Protects the linked list of predicate locks held by a transaction. + * Only needed for parallel mode, where multiple backends share the * same SERIALIZABLEXACT object. Not needed if - * SerializablePredicateLockListLock is held exclusively. + * SerializablePredicateListLock is held exclusively. * * PredicateLockHashPartitionLock(hashcode) * - The same lock protects a target, all locks on that target, and @@ -1186,8 +1186,8 @@ InitPredicateLocks(void) memset(PredXact->element, 0, requestSize); for (i = 0; i < max_table_size; i++) { - LWLockInitialize(&PredXact->element[i].sxact.predicateLockListLock, - LWTRANCHE_SXACT); + LWLockInitialize(&PredXact->element[i].sxact.perXactPredicateListLock, + LWTRANCHE_PER_XACT_PREDICATE_LIST); SHMQueueInsertBefore(&(PredXact->availableList), &(PredXact->element[i].link)); } @@ -2042,7 +2042,7 @@ CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag) /* * Remove the dummy entry from the predicate lock target hash, to free up some - * scratch space. The caller must be holding SerializablePredicateLockListLock, + * scratch space. The caller must be holding SerializablePredicateListLock, * and must restore the entry with RestoreScratchTarget() before releasing the * lock. * @@ -2054,7 +2054,7 @@ RemoveScratchTarget(bool lockheld) { bool found; - Assert(LWLockHeldByMe(SerializablePredicateLockListLock)); + Assert(LWLockHeldByMe(SerializablePredicateListLock)); if (!lockheld) LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE); @@ -2075,7 +2075,7 @@ RestoreScratchTarget(bool lockheld) { bool found; - Assert(LWLockHeldByMe(SerializablePredicateLockListLock)); + Assert(LWLockHeldByMe(SerializablePredicateListLock)); if (!lockheld) LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE); @@ -2097,7 +2097,7 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash) { PREDICATELOCKTARGET *rmtarget PG_USED_FOR_ASSERTS_ONLY; - Assert(LWLockHeldByMe(SerializablePredicateLockListLock)); + Assert(LWLockHeldByMe(SerializablePredicateListLock)); /* Can't remove it until no locks at this target. */ if (!SHMQueueEmpty(&target->predicateLocks)) @@ -2129,10 +2129,10 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag) SERIALIZABLEXACT *sxact; PREDICATELOCK *predlock; - LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED); + LWLockAcquire(SerializablePredicateListLock, LW_SHARED); sxact = MySerializableXact; if (IsInParallelMode()) - LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE); + LWLockAcquire(&sxact->perXactPredicateListLock, LW_EXCLUSIVE); predlock = (PREDICATELOCK *) SHMQueueNext(&(sxact->predicateLocks), &(sxact->predicateLocks), @@ -2187,8 +2187,8 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag) predlock = nextpredlock; } if (IsInParallelMode()) - LWLockRelease(&sxact->predicateLockListLock); - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(&sxact->perXactPredicateListLock); + LWLockRelease(SerializablePredicateListLock); } /* @@ -2385,9 +2385,9 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag, partitionLock = PredicateLockHashPartitionLock(targettaghash); - LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED); + LWLockAcquire(SerializablePredicateListLock, LW_SHARED); if (IsInParallelMode()) - LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE); + LWLockAcquire(&sxact->perXactPredicateListLock, LW_EXCLUSIVE); LWLockAcquire(partitionLock, LW_EXCLUSIVE); /* Make sure that the target is represented. */ @@ -2426,8 +2426,8 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag, LWLockRelease(partitionLock); if (IsInParallelMode()) - LWLockRelease(&sxact->predicateLockListLock); - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(&sxact->perXactPredicateListLock); + LWLockRelease(SerializablePredicateListLock); } /* @@ -2586,7 +2586,7 @@ PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, * * Remove a predicate lock target along with any locks held for it. * - * Caller must hold SerializablePredicateLockListLock and the + * Caller must hold SerializablePredicateListLock and the * appropriate hash partition lock for the target. */ static void @@ -2597,7 +2597,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash) PREDICATELOCK *nextpredlock; bool found; - Assert(LWLockHeldByMeInMode(SerializablePredicateLockListLock, + Assert(LWLockHeldByMeInMode(SerializablePredicateListLock, LW_EXCLUSIVE)); Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash))); @@ -2658,7 +2658,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash) * covers it, or if we are absolutely certain that no one will need to * refer to that lock in the future. * - * Caller must hold SerializablePredicateLockListLock exclusively. + * Caller must hold SerializablePredicateListLock exclusively. */ static bool TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag, @@ -2673,7 +2673,7 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag, bool found; bool outOfShmem = false; - Assert(LWLockHeldByMeInMode(SerializablePredicateLockListLock, + Assert(LWLockHeldByMeInMode(SerializablePredicateListLock, LW_EXCLUSIVE)); oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag); @@ -2924,7 +2924,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer) heaptarget = NULL; /* Acquire locks on all lock partitions */ - LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE); + LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE); for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++) LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_EXCLUSIVE); LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE); @@ -3065,7 +3065,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer) LWLockRelease(SerializableXactHashLock); for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--) LWLockRelease(PredicateLockHashPartitionLockByIndex(i)); - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(SerializablePredicateListLock); } /* @@ -3131,7 +3131,7 @@ PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, relation->rd_id, newblkno); - LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE); + LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE); /* * Try copying the locks over to the new page's tag, creating it if @@ -3167,7 +3167,7 @@ PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, Assert(success); } - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(SerializablePredicateListLock); } /* @@ -3748,7 +3748,7 @@ ClearOldPredicateLocks(void) /* * Loop through predicate locks on dummy transaction for summarized data. */ - LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED); + LWLockAcquire(SerializablePredicateListLock, LW_SHARED); predlock = (PREDICATELOCK *) SHMQueueNext(&OldCommittedSxact->predicateLocks, &OldCommittedSxact->predicateLocks, @@ -3804,7 +3804,7 @@ ClearOldPredicateLocks(void) predlock = nextpredlock; } - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(SerializablePredicateListLock); LWLockRelease(SerializableFinishedListLock); } @@ -3845,9 +3845,9 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, * First release all the predicate locks held by this xact (or transfer * them to OldCommittedSxact if summarize is true) */ - LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED); + LWLockAcquire(SerializablePredicateListLock, LW_SHARED); if (IsInParallelMode()) - LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE); + LWLockAcquire(&sxact->perXactPredicateListLock, LW_EXCLUSIVE); predlock = (PREDICATELOCK *) SHMQueueNext(&(sxact->predicateLocks), &(sxact->predicateLocks), @@ -3928,8 +3928,8 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial, SHMQueueInit(&sxact->predicateLocks); if (IsInParallelMode()) - LWLockRelease(&sxact->predicateLockListLock); - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(&sxact->perXactPredicateListLock); + LWLockRelease(SerializablePredicateListLock); sxidtag.xid = sxact->topXid; LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE); @@ -4302,9 +4302,9 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag) uint32 predlockhashcode; PREDICATELOCK *rmpredlock; - LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED); + LWLockAcquire(SerializablePredicateListLock, LW_SHARED); if (IsInParallelMode()) - LWLockAcquire(&MySerializableXact->predicateLockListLock, LW_EXCLUSIVE); + LWLockAcquire(&MySerializableXact->perXactPredicateListLock, LW_EXCLUSIVE); LWLockAcquire(partitionLock, LW_EXCLUSIVE); LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE); @@ -4340,8 +4340,8 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag) LWLockRelease(SerializableXactHashLock); LWLockRelease(partitionLock); if (IsInParallelMode()) - LWLockRelease(&MySerializableXact->predicateLockListLock); - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(&MySerializableXact->perXactPredicateListLock); + LWLockRelease(SerializablePredicateListLock); if (rmpredlock != NULL) { @@ -4485,7 +4485,7 @@ CheckTableForSerializableConflictIn(Relation relation) dbId = relation->rd_node.dbNode; heapId = relation->rd_id; - LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE); + LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE); for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++) LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED); LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE); @@ -4535,7 +4535,7 @@ CheckTableForSerializableConflictIn(Relation relation) LWLockRelease(SerializableXactHashLock); for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--) LWLockRelease(PredicateLockHashPartitionLockByIndex(i)); - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(SerializablePredicateListLock); } @@ -4887,12 +4887,12 @@ AtPrepare_PredicateLocks(void) * than using the local predicate lock table because the latter is not * guaranteed to be accurate. */ - LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED); + LWLockAcquire(SerializablePredicateListLock, LW_SHARED); /* - * No need to take sxact->predicateLockListLock in parallel mode because - * there cannot be any parallel workers running while we are preparing a - * transaction. + * No need to take sxact->perXactPredicateListLock in parallel mode + * because there cannot be any parallel workers running while we are + * preparing a transaction. */ Assert(!IsParallelWorker() && !ParallelContextActive()); @@ -4915,7 +4915,7 @@ AtPrepare_PredicateLocks(void) offsetof(PREDICATELOCK, xactLink)); } - LWLockRelease(SerializablePredicateLockListLock); + LWLockRelease(SerializablePredicateListLock); } /* diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 5aa19d3f781..f5eef6fa4ee 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -221,7 +221,7 @@ InitProcGlobal(void) /* Common initialization for all PGPROCs, regardless of type. */ /* - * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact + * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact * dummy PGPROCs don't need these though - they're never associated * with a real process */ @@ -229,7 +229,7 @@ InitProcGlobal(void) { procs[i].sem = PGSemaphoreCreate(); InitSharedLatch(&(procs[i].procLatch)); - LWLockInitialize(&(procs[i].backendLock), LWTRANCHE_PROC); + LWLockInitialize(&(procs[i].fpInfoLock), LWTRANCHE_LOCK_FASTPATH); } procs[i].pgprocno = i; diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index 854f133f9be..f51248b70d0 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -256,7 +256,7 @@ static const dshash_parameters srtr_record_table_params = { sizeof(SharedRecordTableEntry), shared_record_table_compare, shared_record_table_hash, - LWTRANCHE_SESSION_RECORD_TABLE + LWTRANCHE_PER_SESSION_RECORD_TYPE }; /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */ @@ -265,7 +265,7 @@ static const dshash_parameters srtr_typmod_table_params = { sizeof(SharedTypmodTableEntry), dshash_memcmp, dshash_memhash, - LWTRANCHE_SESSION_TYPMOD_TABLE + LWTRANCHE_PER_SESSION_RECORD_TYPMOD }; /* hashtable for recognizing registered record types */ diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index 19ff265a4d4..d8e1b5c493e 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -204,22 +204,22 @@ typedef enum BuiltinTrancheIds LWTRANCHE_SERIAL_BUFFER, LWTRANCHE_WAL_INSERT, LWTRANCHE_BUFFER_CONTENT, - LWTRANCHE_BUFFER_IO_IN_PROGRESS, - LWTRANCHE_REPLICATION_ORIGIN, - LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS, - LWTRANCHE_PROC, + LWTRANCHE_BUFFER_IO, + LWTRANCHE_REPLICATION_ORIGIN_STATE, + LWTRANCHE_REPLICATION_SLOT_IO, + LWTRANCHE_LOCK_FASTPATH, LWTRANCHE_BUFFER_MAPPING, LWTRANCHE_LOCK_MANAGER, LWTRANCHE_PREDICATE_LOCK_MANAGER, LWTRANCHE_PARALLEL_HASH_JOIN, LWTRANCHE_PARALLEL_QUERY_DSA, - LWTRANCHE_SESSION_DSA, - LWTRANCHE_SESSION_RECORD_TABLE, - LWTRANCHE_SESSION_TYPMOD_TABLE, + LWTRANCHE_PER_SESSION_DSA, + LWTRANCHE_PER_SESSION_RECORD_TYPE, + LWTRANCHE_PER_SESSION_RECORD_TYPMOD, LWTRANCHE_SHARED_TUPLESTORE, - LWTRANCHE_TBM, + LWTRANCHE_SHARED_TIDBITMAP, LWTRANCHE_PARALLEL_APPEND, - LWTRANCHE_SXACT, + LWTRANCHE_PER_XACT_PREDICATE_LIST, LWTRANCHE_FIRST_USER_DEFINED } BuiltinTrancheIds; diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h index 591ac6f42aa..cf9694d65e3 100644 --- a/src/include/storage/predicate_internals.h +++ b/src/include/storage/predicate_internals.h @@ -92,8 +92,12 @@ typedef struct SERIALIZABLEXACT SHM_QUEUE finishedLink; /* list link in * FinishedSerializableTransactions */ - LWLock predicateLockListLock; /* protects predicateLocks in parallel - * mode */ + /* + * perXactPredicateListLock is only used in parallel queries: it protects + * this SERIALIZABLEXACT's predicate lock list against other workers of + * the same session. + */ + LWLock perXactPredicateListLock; /* * for r/o transactions: list of concurrent r/w transactions that we could diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index ae4f573ab46..1ee9000b2b9 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -188,10 +188,8 @@ struct PGPROC XLogRecPtr clogGroupMemberLsn; /* WAL location of commit record for clog * group member */ - /* Per-backend LWLock. Protects fields below (but not group fields). */ - LWLock backendLock; - /* Lock manager data, recording fast-path locks taken by this backend. */ + LWLock fpInfoLock; /* protects per-backend fast-path state */ uint64 fpLockBits; /* lock modes held for each fast-path slot */ Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */ bool fpVXIDLock; /* are we holding a fast-path VXID lock? */