mirror of
https://github.com/postgres/postgres.git
synced 2025-05-21 15:54:08 +03:00
Rename assorted LWLock tranches.
Choose names that fit into the conventions for wait event names (particularly, that multi-word names are in the style MultiWordName) and hopefully convey more information to non-hacker users than the previous names did. Also rename SerializablePredicateLockListLock to SerializablePredicateListLock; the old name was long enough to cause table formatting problems, plus the double occurrence of "Lock" seems confusing/error-prone. Also change a couple of particularly opaque LWLock field names. Discussion: https://postgr.es/m/28683.1589405363@sss.pgh.pa.us
This commit is contained in:
parent
a0ab4f4909
commit
36ac359d36
@ -1751,29 +1751,22 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
|
|||||||
<tbody>
|
<tbody>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>AddinShmemInitLock</literal></entry>
|
<entry><literal>AddinShmemInitLock</literal></entry>
|
||||||
<entry>Waiting to manage space allocation in shared memory.</entry>
|
<entry>Waiting to manage an extension's space allocation in shared
|
||||||
</row>
|
memory.</entry>
|
||||||
<row>
|
|
||||||
<entry><literal>NotifySLRULock</literal></entry>
|
|
||||||
<entry>Waiting to access the <command>NOTIFY</command> message SLRU
|
|
||||||
cache.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>NotifyQueueLock</literal></entry>
|
|
||||||
<entry>Waiting to read or update <command>NOTIFY</command> messages.</entry>
|
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>AutoFileLock</literal></entry>
|
<entry><literal>AutoFileLock</literal></entry>
|
||||||
<entry>Waiting to update the <filename>postgresql.auto.conf</filename> file.</entry>
|
<entry>Waiting to update the <filename>postgresql.auto.conf</filename>
|
||||||
|
file.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>AutovacuumLock</literal></entry>
|
<entry><literal>AutovacuumLock</literal></entry>
|
||||||
<entry>Autovacuum worker or launcher waiting to update or
|
<entry>Waiting to read or update the current state of autovacuum
|
||||||
read the current state of autovacuum workers.</entry>
|
workers.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>AutovacuumScheduleLock</literal></entry>
|
<entry><literal>AutovacuumScheduleLock</literal></entry>
|
||||||
<entry>Waiting to ensure that the table selected for a vacuum
|
<entry>Waiting to ensure that a table selected for autovacuum
|
||||||
still needs vacuuming.</entry>
|
still needs vacuuming.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
@ -1786,52 +1779,80 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
|
|||||||
B-tree index.</entry>
|
B-tree index.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>XactSLRULock</literal></entry>
|
<entry><literal>BufferContent</literal></entry>
|
||||||
<entry>Waiting to access the transaction status SLRU cache.</entry>
|
<entry>Waiting to access a data page in memory.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>XactTruncationLock</literal></entry>
|
<entry><literal>BufferIO</literal></entry>
|
||||||
<entry>Waiting to execute <function>pg_xact_status</function> or update
|
<entry>Waiting for I/O on a data page.</entry>
|
||||||
the oldest transaction ID available to it.</entry>
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>BufferMapping</literal></entry>
|
||||||
|
<entry>Waiting to associate a data block with a buffer in the buffer
|
||||||
|
pool.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>CheckpointLock</literal></entry>
|
<entry><literal>CheckpointLock</literal></entry>
|
||||||
<entry>Waiting to perform checkpoint.</entry>
|
<entry>Waiting to begin a checkpoint.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>CheckpointerCommLock</literal></entry>
|
<entry><literal>CheckpointerCommLock</literal></entry>
|
||||||
<entry>Waiting to manage fsync requests.</entry>
|
<entry>Waiting to manage fsync requests.</entry>
|
||||||
</row>
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>CommitTsLock</literal></entry>
|
||||||
|
<entry>Waiting to read or update the last value set for a
|
||||||
|
transaction commit timestamp.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>CommitTsBuffer</literal></entry>
|
||||||
|
<entry>Waiting for I/O on a commit timestamp SLRU buffer.</entry>
|
||||||
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>CommitTsSLRULock</literal></entry>
|
<entry><literal>CommitTsSLRULock</literal></entry>
|
||||||
<entry>Waiting to access the commit timestamp SLRU cache.</entry>
|
<entry>Waiting to access the commit timestamp SLRU cache.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
|
||||||
<entry><literal>CommitTsLock</literal></entry>
|
|
||||||
<entry>Waiting to read or update the last value set for the
|
|
||||||
transaction timestamp.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>ControlFileLock</literal></entry>
|
<entry><literal>ControlFileLock</literal></entry>
|
||||||
<entry>Waiting to read or update the control file or creation of a
|
<entry>Waiting to read or update the <filename>pg_control</filename>
|
||||||
new WAL file.</entry>
|
file or create a new WAL file.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>DynamicSharedMemoryControlLock</literal></entry>
|
<entry><literal>DynamicSharedMemoryControlLock</literal></entry>
|
||||||
<entry>Waiting to read or update dynamic shared memory state.</entry>
|
<entry>Waiting to read or update dynamic shared memory allocation
|
||||||
|
information.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>LockFastPath</literal></entry>
|
||||||
|
<entry>Waiting to read or update a process' fast-path lock
|
||||||
|
information.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>LockManager</literal></entry>
|
||||||
|
<entry>Waiting to read or update information
|
||||||
|
about <quote>heavyweight</quote> locks.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>LogicalRepWorkerLock</literal></entry>
|
<entry><literal>LogicalRepWorkerLock</literal></entry>
|
||||||
<entry>Waiting for action on logical replication worker to finish.</entry>
|
<entry>Waiting to read or update the state of logical replication
|
||||||
|
workers.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>MultiXactGenLock</literal></entry>
|
<entry><literal>MultiXactGenLock</literal></entry>
|
||||||
<entry>Waiting to read or update shared multixact state.</entry>
|
<entry>Waiting to read or update shared multixact state.</entry>
|
||||||
</row>
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>MultiXactMemberBuffer</literal></entry>
|
||||||
|
<entry>Waiting for I/O on a multixact member SLRU buffer.</entry>
|
||||||
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>MultiXactMemberSLRULock</literal></entry>
|
<entry><literal>MultiXactMemberSLRULock</literal></entry>
|
||||||
<entry>Waiting to access the multixact member SLRU cache.</entry>
|
<entry>Waiting to access the multixact member SLRU cache.</entry>
|
||||||
</row>
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>MultiXactOffsetBuffer</literal></entry>
|
||||||
|
<entry>Waiting for I/O on a multixact offset SLRU buffer.</entry>
|
||||||
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>MultiXactOffsetSLRULock</literal></entry>
|
<entry><literal>MultiXactOffsetSLRULock</literal></entry>
|
||||||
<entry>Waiting to access the multixact offset SLRU cache.</entry>
|
<entry>Waiting to access the multixact offset SLRU cache.</entry>
|
||||||
@ -1841,35 +1862,90 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
|
|||||||
<entry>Waiting to read or truncate multixact information.</entry>
|
<entry>Waiting to read or truncate multixact information.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>OidGenLock</literal></entry>
|
<entry><literal>NotifyBuffer</literal></entry>
|
||||||
<entry>Waiting to allocate or assign an OID.</entry>
|
<entry>Waiting for I/O on a <command>NOTIFY</command> message SLRU
|
||||||
|
buffer.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SerialSLRULock</literal></entry>
|
<entry><literal>NotifyQueueLock</literal></entry>
|
||||||
<entry>Waiting to access the serializable transaction conflict SLRU
|
<entry>Waiting to read or update <command>NOTIFY</command> messages.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>NotifySLRULock</literal></entry>
|
||||||
|
<entry>Waiting to access the <command>NOTIFY</command> message SLRU
|
||||||
cache.</entry>
|
cache.</entry>
|
||||||
</row>
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>OidGenLock</literal></entry>
|
||||||
|
<entry>Waiting to allocate a new OID.</entry>
|
||||||
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>OldSnapshotTimeMapLock</literal></entry>
|
<entry><literal>OldSnapshotTimeMapLock</literal></entry>
|
||||||
<entry>Waiting to read or update old snapshot control information.</entry>
|
<entry>Waiting to read or update old snapshot control information.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>ProcArrayLock</literal></entry>
|
<entry><literal>ParallelAppend</literal></entry>
|
||||||
<entry>Waiting to get a snapshot or clearing a transaction id at
|
<entry>Waiting to choose the next subplan during Parallel Append plan
|
||||||
transaction end.</entry>
|
execution.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>RelCacheInitLock</literal></entry>
|
<entry><literal>ParallelHashJoin</literal></entry>
|
||||||
<entry>Waiting to read or write relation cache initialization file.</entry>
|
<entry>Waiting to synchronize workers during Parallel Hash Join plan
|
||||||
|
execution.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>ParallelQueryDSA</literal></entry>
|
||||||
|
<entry>Waiting for parallel query dynamic shared memory allocation.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>PerSessionDSA</literal></entry>
|
||||||
|
<entry>Waiting for parallel query dynamic shared memory allocation.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>PerSessionRecordType</literal></entry>
|
||||||
|
<entry>Waiting to access a parallel query's information about composite
|
||||||
|
types.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>PerSessionRecordTypmod</literal></entry>
|
||||||
|
<entry>Waiting to access a parallel query's information about type
|
||||||
|
modifiers that identify anonymous record types.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>PerXactPredicateList</literal></entry>
|
||||||
|
<entry>Waiting to access the list of predicate locks held by the current
|
||||||
|
serializable transaction during a parallel query.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>PredicateLockManager</literal></entry>
|
||||||
|
<entry>Waiting to access predicate lock information used by
|
||||||
|
serializable transactions.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>ProcArrayLock</literal></entry>
|
||||||
|
<entry>Waiting to access the shared per-process data structures
|
||||||
|
(typically, to get a snapshot or report a session's transaction
|
||||||
|
ID).</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>RelationMappingLock</literal></entry>
|
<entry><literal>RelationMappingLock</literal></entry>
|
||||||
<entry>Waiting to update the relation map file used to store catalog
|
<entry>Waiting to read or update
|
||||||
to filenode mapping.</entry>
|
a <filename>pg_filenode.map</filename> file (used to track the
|
||||||
|
filenode assignments of certain system catalogs).</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>RelCacheInitLock</literal></entry>
|
||||||
|
<entry>Waiting to read or update a <filename>pg_internal.init</filename>
|
||||||
|
relation cache initialization file.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>ReplicationOriginLock</literal></entry>
|
<entry><literal>ReplicationOriginLock</literal></entry>
|
||||||
<entry>Waiting to setup, drop or use replication origin.</entry>
|
<entry>Waiting to create, drop or use a replication origin.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>ReplicationOriginState</literal></entry>
|
||||||
|
<entry>Waiting to read or update the progress of one replication
|
||||||
|
origin.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>ReplicationSlotAllocationLock</literal></entry>
|
<entry><literal>ReplicationSlotAllocationLock</literal></entry>
|
||||||
@ -1880,13 +1956,13 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
|
|||||||
<entry>Waiting to read or update replication slot state.</entry>
|
<entry>Waiting to read or update replication slot state.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SInvalReadLock</literal></entry>
|
<entry><literal>ReplicationSlotIO</literal></entry>
|
||||||
<entry>Waiting to retrieve or remove messages from shared invalidation
|
<entry>Waiting for I/O on a replication slot.</entry>
|
||||||
queue.</entry>
|
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SInvalWriteLock</literal></entry>
|
<entry><literal>SerialBuffer</literal></entry>
|
||||||
<entry>Waiting to add a message in shared invalidation queue.</entry>
|
<entry>Waiting for I/O on a serializable transaction conflict SLRU
|
||||||
|
buffer.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SerializableFinishedListLock</literal></entry>
|
<entry><literal>SerializableFinishedListLock</literal></entry>
|
||||||
@ -1894,36 +1970,65 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
|
|||||||
transactions.</entry>
|
transactions.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SerializablePredicateLockListLock</literal></entry>
|
<entry><literal>SerializablePredicateListLock</literal></entry>
|
||||||
<entry>Waiting to perform an operation on a list of locks held by
|
<entry>Waiting to access the list of predicate locks held by
|
||||||
serializable transactions.</entry>
|
serializable transactions.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SerializableXactHashLock</literal></entry>
|
<entry><literal>SerializableXactHashLock</literal></entry>
|
||||||
<entry>Waiting to retrieve or store information about serializable
|
<entry>Waiting to read or update information about serializable
|
||||||
transactions.</entry>
|
transactions.</entry>
|
||||||
</row>
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>SerialSLRULock</literal></entry>
|
||||||
|
<entry>Waiting to access the serializable transaction conflict SLRU
|
||||||
|
cache.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>SharedTidBitmap</literal></entry>
|
||||||
|
<entry>Waiting to access a shared TID bitmap during a parallel bitmap
|
||||||
|
index scan.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>SharedTupleStore</literal></entry>
|
||||||
|
<entry>Waiting to access a shared tuple store during parallel
|
||||||
|
query.</entry>
|
||||||
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>ShmemIndexLock</literal></entry>
|
<entry><literal>ShmemIndexLock</literal></entry>
|
||||||
<entry>Waiting to find or allocate space in shared memory.</entry>
|
<entry>Waiting to find or allocate space in shared memory.</entry>
|
||||||
</row>
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>SInvalReadLock</literal></entry>
|
||||||
|
<entry>Waiting to retrieve messages from the shared catalog invalidation
|
||||||
|
queue.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>SInvalWriteLock</literal></entry>
|
||||||
|
<entry>Waiting to add a message to the shared catalog invalidation
|
||||||
|
queue.</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>SubtransBuffer</literal></entry>
|
||||||
|
<entry>Waiting for I/O on a sub-transaction SLRU buffer.</entry>
|
||||||
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SubtransSLRULock</literal></entry>
|
<entry><literal>SubtransSLRULock</literal></entry>
|
||||||
<entry>Waiting to access the sub-transaction SLRU cache.</entry>
|
<entry>Waiting to access the sub-transaction SLRU cache.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SyncRepLock</literal></entry>
|
<entry><literal>SyncRepLock</literal></entry>
|
||||||
<entry>Waiting to read or update information about synchronous
|
<entry>Waiting to read or update information about the state of
|
||||||
replicas.</entry>
|
synchronous replication.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>SyncScanLock</literal></entry>
|
<entry><literal>SyncScanLock</literal></entry>
|
||||||
<entry>Waiting to get the start location of a scan on a table for
|
<entry>Waiting to select the starting location of a synchronized table
|
||||||
synchronized scans.</entry>
|
scan.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>TablespaceCreateLock</literal></entry>
|
<entry><literal>TablespaceCreateLock</literal></entry>
|
||||||
<entry>Waiting to create or drop the tablespace.</entry>
|
<entry>Waiting to create or drop a tablespace.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>TwoPhaseStateLock</literal></entry>
|
<entry><literal>TwoPhaseStateLock</literal></entry>
|
||||||
@ -1933,104 +2038,30 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
|
|||||||
<entry><literal>WALBufMappingLock</literal></entry>
|
<entry><literal>WALBufMappingLock</literal></entry>
|
||||||
<entry>Waiting to replace a page in WAL buffers.</entry>
|
<entry>Waiting to replace a page in WAL buffers.</entry>
|
||||||
</row>
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry><literal>WALInsert</literal></entry>
|
||||||
|
<entry>Waiting to insert WAL data into a memory buffer.</entry>
|
||||||
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>WALWriteLock</literal></entry>
|
<entry><literal>WALWriteLock</literal></entry>
|
||||||
<entry>Waiting for WAL buffers to be written to disk.</entry>
|
<entry>Waiting for WAL buffers to be written to disk.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
|
||||||
<entry><literal>XidGenLock</literal></entry>
|
|
||||||
<entry>Waiting to allocate or assign a transaction id.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>NotifyBuffer</literal></entry>
|
|
||||||
<entry>Waiting for I/O on a <command>NOTIFY</command> message SLRU
|
|
||||||
buffer.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>buffer_content</literal></entry>
|
|
||||||
<entry>Waiting to read or write a data page in memory.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>buffer_io</literal></entry>
|
|
||||||
<entry>Waiting for I/O on a data page.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>buffer_mapping</literal></entry>
|
|
||||||
<entry>Waiting to associate a data block with a buffer in the buffer
|
|
||||||
pool.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>XactBuffer</literal></entry>
|
<entry><literal>XactBuffer</literal></entry>
|
||||||
<entry>Waiting for I/O on a transaction status SLRU buffer.</entry>
|
<entry>Waiting for I/O on a transaction status SLRU buffer.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>CommitTsBuffer</literal></entry>
|
<entry><literal>XactSLRULock</literal></entry>
|
||||||
<entry>Waiting for I/O on a commit timestamp SLRU buffer.</entry>
|
<entry>Waiting to access the transaction status SLRU cache.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>lock_manager</literal></entry>
|
<entry><literal>XactTruncationLock</literal></entry>
|
||||||
<entry>Waiting to add or examine locks for backends, or waiting to
|
<entry>Waiting to execute <function>pg_xact_status</function> or update
|
||||||
join or exit a locking group (used by parallel query).</entry>
|
the oldest transaction ID available to it.</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
<entry><literal>MultiXactMember</literal></entry>
|
<entry><literal>XidGenLock</literal></entry>
|
||||||
<entry>Waiting for I/O on a multixact member SLRU buffer.</entry>
|
<entry>Waiting to allocate a new transaction ID.</entry>
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>MultiXactOffsetBuffer</literal></entry>
|
|
||||||
<entry>Waiting for I/O on a multixact offset SLRU buffer.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>SerialBuffer</literal></entry>
|
|
||||||
<entry>Waiting for I/O on a serializable transaction conflict SLRU
|
|
||||||
buffer.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>parallel_append</literal></entry>
|
|
||||||
<entry>Waiting to choose the next subplan during Parallel Append plan
|
|
||||||
execution.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>parallel_hash_join</literal></entry>
|
|
||||||
<entry>Waiting to allocate or exchange a chunk of memory or update
|
|
||||||
counters during Parallel Hash plan execution.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>parallel_query_dsa</literal></entry>
|
|
||||||
<entry>Waiting for parallel query dynamic shared memory allocation lock.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>predicate_lock_manager</literal></entry>
|
|
||||||
<entry>Waiting to add or examine predicate lock information.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>proc</literal></entry>
|
|
||||||
<entry>Waiting to read or update the fast-path lock information.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>replication_origin</literal></entry>
|
|
||||||
<entry>Waiting to read or update the replication progress.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>replication_slot_io</literal></entry>
|
|
||||||
<entry>Waiting for I/O on a replication slot.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>serializable_xact</literal></entry>
|
|
||||||
<entry>Waiting to perform an operation on a serializable transaction
|
|
||||||
in a parallel query.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>SubtransBuffer</literal></entry>
|
|
||||||
<entry>Waiting for I/O on a sub-transaction SLRU buffer.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>tbm</literal></entry>
|
|
||||||
<entry>Waiting for TBM shared iterator lock.</entry>
|
|
||||||
</row>
|
|
||||||
<row>
|
|
||||||
<entry><literal>wal_insert</literal></entry>
|
|
||||||
<entry>Waiting to insert WAL into a memory buffer.</entry>
|
|
||||||
</row>
|
</row>
|
||||||
</tbody>
|
</tbody>
|
||||||
</tgroup>
|
</tgroup>
|
||||||
|
@ -117,7 +117,7 @@ GetSessionDsmHandle(void)
|
|||||||
dsa_space = shm_toc_allocate(toc, SESSION_DSA_SIZE);
|
dsa_space = shm_toc_allocate(toc, SESSION_DSA_SIZE);
|
||||||
dsa = dsa_create_in_place(dsa_space,
|
dsa = dsa_create_in_place(dsa_space,
|
||||||
SESSION_DSA_SIZE,
|
SESSION_DSA_SIZE,
|
||||||
LWTRANCHE_SESSION_DSA,
|
LWTRANCHE_PER_SESSION_DSA,
|
||||||
seg);
|
seg);
|
||||||
shm_toc_insert(toc, SESSION_KEY_DSA, dsa_space);
|
shm_toc_insert(toc, SESSION_KEY_DSA, dsa_space);
|
||||||
|
|
||||||
|
@ -889,7 +889,7 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
|
|||||||
pg_atomic_add_fetch_u32(&ptchunks->refcount, 1);
|
pg_atomic_add_fetch_u32(&ptchunks->refcount, 1);
|
||||||
|
|
||||||
/* Initialize the iterator lock */
|
/* Initialize the iterator lock */
|
||||||
LWLockInitialize(&istate->lock, LWTRANCHE_TBM);
|
LWLockInitialize(&istate->lock, LWTRANCHE_SHARED_TIDBITMAP);
|
||||||
|
|
||||||
/* Initialize the shared iterator state */
|
/* Initialize the shared iterator state */
|
||||||
istate->schunkbit = 0;
|
istate->schunkbit = 0;
|
||||||
|
@ -506,7 +506,7 @@ ReplicationOriginShmemInit(void)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
replication_states_ctl->tranche_id = LWTRANCHE_REPLICATION_ORIGIN;
|
replication_states_ctl->tranche_id = LWTRANCHE_REPLICATION_ORIGIN_STATE;
|
||||||
|
|
||||||
MemSet(replication_states, 0, ReplicationOriginShmemSize());
|
MemSet(replication_states, 0, ReplicationOriginShmemSize());
|
||||||
|
|
||||||
|
@ -153,7 +153,8 @@ ReplicationSlotsShmemInit(void)
|
|||||||
|
|
||||||
/* everything else is zeroed by the memset above */
|
/* everything else is zeroed by the memset above */
|
||||||
SpinLockInit(&slot->mutex);
|
SpinLockInit(&slot->mutex);
|
||||||
LWLockInitialize(&slot->io_in_progress_lock, LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS);
|
LWLockInitialize(&slot->io_in_progress_lock,
|
||||||
|
LWTRANCHE_REPLICATION_SLOT_IO);
|
||||||
ConditionVariableInit(&slot->active_cv);
|
ConditionVariableInit(&slot->active_cv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ InitBufferPool(void)
|
|||||||
LWTRANCHE_BUFFER_CONTENT);
|
LWTRANCHE_BUFFER_CONTENT);
|
||||||
|
|
||||||
LWLockInitialize(BufferDescriptorGetIOLock(buf),
|
LWLockInitialize(BufferDescriptorGetIOLock(buf),
|
||||||
LWTRANCHE_BUFFER_IO_IN_PROGRESS);
|
LWTRANCHE_BUFFER_IO);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Correct last entry of linked list */
|
/* Correct last entry of linked list */
|
||||||
|
@ -936,13 +936,13 @@ LockAcquireExtended(const LOCKTAG *locktag,
|
|||||||
* FastPathStrongRelationLocks->counts becomes visible after we test
|
* FastPathStrongRelationLocks->counts becomes visible after we test
|
||||||
* it has yet to begin to transfer fast-path locks.
|
* it has yet to begin to transfer fast-path locks.
|
||||||
*/
|
*/
|
||||||
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
|
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
|
||||||
if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
|
if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
|
||||||
acquired = false;
|
acquired = false;
|
||||||
else
|
else
|
||||||
acquired = FastPathGrantRelationLock(locktag->locktag_field2,
|
acquired = FastPathGrantRelationLock(locktag->locktag_field2,
|
||||||
lockmode);
|
lockmode);
|
||||||
LWLockRelease(&MyProc->backendLock);
|
LWLockRelease(&MyProc->fpInfoLock);
|
||||||
if (acquired)
|
if (acquired)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -2085,10 +2085,10 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
|
|||||||
* We might not find the lock here, even if we originally entered it
|
* We might not find the lock here, even if we originally entered it
|
||||||
* here. Another backend may have moved it to the main table.
|
* here. Another backend may have moved it to the main table.
|
||||||
*/
|
*/
|
||||||
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
|
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
|
||||||
released = FastPathUnGrantRelationLock(locktag->locktag_field2,
|
released = FastPathUnGrantRelationLock(locktag->locktag_field2,
|
||||||
lockmode);
|
lockmode);
|
||||||
LWLockRelease(&MyProc->backendLock);
|
LWLockRelease(&MyProc->fpInfoLock);
|
||||||
if (released)
|
if (released)
|
||||||
{
|
{
|
||||||
RemoveLocalLock(locallock);
|
RemoveLocalLock(locallock);
|
||||||
@ -2291,7 +2291,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
|
|||||||
*/
|
*/
|
||||||
if (!have_fast_path_lwlock)
|
if (!have_fast_path_lwlock)
|
||||||
{
|
{
|
||||||
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
|
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
|
||||||
have_fast_path_lwlock = true;
|
have_fast_path_lwlock = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2308,7 +2308,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
|
|||||||
* transferred to the main lock table. That's going to require
|
* transferred to the main lock table. That's going to require
|
||||||
* some extra work, so release our fast-path lock before starting.
|
* some extra work, so release our fast-path lock before starting.
|
||||||
*/
|
*/
|
||||||
LWLockRelease(&MyProc->backendLock);
|
LWLockRelease(&MyProc->fpInfoLock);
|
||||||
have_fast_path_lwlock = false;
|
have_fast_path_lwlock = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2334,7 +2334,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
|
|||||||
|
|
||||||
/* Done with the fast-path data structures */
|
/* Done with the fast-path data structures */
|
||||||
if (have_fast_path_lwlock)
|
if (have_fast_path_lwlock)
|
||||||
LWLockRelease(&MyProc->backendLock);
|
LWLockRelease(&MyProc->fpInfoLock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now, scan each lock partition separately.
|
* Now, scan each lock partition separately.
|
||||||
@ -2737,7 +2737,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
|
|||||||
PGPROC *proc = &ProcGlobal->allProcs[i];
|
PGPROC *proc = &ProcGlobal->allProcs[i];
|
||||||
uint32 f;
|
uint32 f;
|
||||||
|
|
||||||
LWLockAcquire(&proc->backendLock, LW_EXCLUSIVE);
|
LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the target backend isn't referencing the same database as the
|
* If the target backend isn't referencing the same database as the
|
||||||
@ -2746,8 +2746,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
|
|||||||
*
|
*
|
||||||
* proc->databaseId is set at backend startup time and never changes
|
* proc->databaseId is set at backend startup time and never changes
|
||||||
* thereafter, so it might be safe to perform this test before
|
* thereafter, so it might be safe to perform this test before
|
||||||
* acquiring &proc->backendLock. In particular, it's certainly safe
|
* acquiring &proc->fpInfoLock. In particular, it's certainly safe to
|
||||||
* to assume that if the target backend holds any fast-path locks, it
|
* assume that if the target backend holds any fast-path locks, it
|
||||||
* must have performed a memory-fencing operation (in particular, an
|
* must have performed a memory-fencing operation (in particular, an
|
||||||
* LWLock acquisition) since setting proc->databaseId. However, it's
|
* LWLock acquisition) since setting proc->databaseId. However, it's
|
||||||
* less clear that our backend is certain to have performed a memory
|
* less clear that our backend is certain to have performed a memory
|
||||||
@ -2756,7 +2756,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
|
|||||||
*/
|
*/
|
||||||
if (proc->databaseId != locktag->locktag_field1)
|
if (proc->databaseId != locktag->locktag_field1)
|
||||||
{
|
{
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2783,7 +2783,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
|
|||||||
if (!proclock)
|
if (!proclock)
|
||||||
{
|
{
|
||||||
LWLockRelease(partitionLock);
|
LWLockRelease(partitionLock);
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
GrantLock(proclock->tag.myLock, proclock, lockmode);
|
GrantLock(proclock->tag.myLock, proclock, lockmode);
|
||||||
@ -2794,7 +2794,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
|
|||||||
/* No need to examine remaining slots. */
|
/* No need to examine remaining slots. */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -2816,7 +2816,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
|
|||||||
Oid relid = locktag->locktag_field2;
|
Oid relid = locktag->locktag_field2;
|
||||||
uint32 f;
|
uint32 f;
|
||||||
|
|
||||||
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
|
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
|
for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
|
||||||
{
|
{
|
||||||
@ -2839,7 +2839,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
|
|||||||
if (!proclock)
|
if (!proclock)
|
||||||
{
|
{
|
||||||
LWLockRelease(partitionLock);
|
LWLockRelease(partitionLock);
|
||||||
LWLockRelease(&MyProc->backendLock);
|
LWLockRelease(&MyProc->fpInfoLock);
|
||||||
ereport(ERROR,
|
ereport(ERROR,
|
||||||
(errcode(ERRCODE_OUT_OF_MEMORY),
|
(errcode(ERRCODE_OUT_OF_MEMORY),
|
||||||
errmsg("out of shared memory"),
|
errmsg("out of shared memory"),
|
||||||
@ -2854,7 +2854,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
LWLockRelease(&MyProc->backendLock);
|
LWLockRelease(&MyProc->fpInfoLock);
|
||||||
|
|
||||||
/* Lock may have already been transferred by some other backend. */
|
/* Lock may have already been transferred by some other backend. */
|
||||||
if (proclock == NULL)
|
if (proclock == NULL)
|
||||||
@ -2980,7 +2980,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
|||||||
if (proc == MyProc)
|
if (proc == MyProc)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
LWLockAcquire(&proc->backendLock, LW_SHARED);
|
LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the target backend isn't referencing the same database as
|
* If the target backend isn't referencing the same database as
|
||||||
@ -2992,7 +2992,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
|||||||
*/
|
*/
|
||||||
if (proc->databaseId != locktag->locktag_field1)
|
if (proc->databaseId != locktag->locktag_field1)
|
||||||
{
|
{
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3030,7 +3030,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3599,7 +3599,7 @@ GetLockStatusData(void)
|
|||||||
PGPROC *proc = &ProcGlobal->allProcs[i];
|
PGPROC *proc = &ProcGlobal->allProcs[i];
|
||||||
uint32 f;
|
uint32 f;
|
||||||
|
|
||||||
LWLockAcquire(&proc->backendLock, LW_SHARED);
|
LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
|
||||||
|
|
||||||
for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
|
for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
|
||||||
{
|
{
|
||||||
@ -3659,7 +3659,7 @@ GetLockStatusData(void)
|
|||||||
el++;
|
el++;
|
||||||
}
|
}
|
||||||
|
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4381,7 +4381,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info,
|
|||||||
* as MyProc->lxid, you might wonder if we really need both. The
|
* as MyProc->lxid, you might wonder if we really need both. The
|
||||||
* difference is that MyProc->lxid is set and cleared unlocked, and
|
* difference is that MyProc->lxid is set and cleared unlocked, and
|
||||||
* examined by procarray.c, while fpLocalTransactionId is protected by
|
* examined by procarray.c, while fpLocalTransactionId is protected by
|
||||||
* backendLock and is used only by the locking subsystem. Doing it this
|
* fpInfoLock and is used only by the locking subsystem. Doing it this
|
||||||
* way makes it easier to verify that there are no funny race conditions.
|
* way makes it easier to verify that there are no funny race conditions.
|
||||||
*
|
*
|
||||||
* We don't bother recording this lock in the local lock table, since it's
|
* We don't bother recording this lock in the local lock table, since it's
|
||||||
@ -4393,7 +4393,7 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid)
|
|||||||
{
|
{
|
||||||
Assert(VirtualTransactionIdIsValid(vxid));
|
Assert(VirtualTransactionIdIsValid(vxid));
|
||||||
|
|
||||||
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
|
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
Assert(MyProc->backendId == vxid.backendId);
|
Assert(MyProc->backendId == vxid.backendId);
|
||||||
Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
|
Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
|
||||||
@ -4402,7 +4402,7 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid)
|
|||||||
MyProc->fpVXIDLock = true;
|
MyProc->fpVXIDLock = true;
|
||||||
MyProc->fpLocalTransactionId = vxid.localTransactionId;
|
MyProc->fpLocalTransactionId = vxid.localTransactionId;
|
||||||
|
|
||||||
LWLockRelease(&MyProc->backendLock);
|
LWLockRelease(&MyProc->fpInfoLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4422,14 +4422,14 @@ VirtualXactLockTableCleanup(void)
|
|||||||
/*
|
/*
|
||||||
* Clean up shared memory state.
|
* Clean up shared memory state.
|
||||||
*/
|
*/
|
||||||
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
|
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
fastpath = MyProc->fpVXIDLock;
|
fastpath = MyProc->fpVXIDLock;
|
||||||
lxid = MyProc->fpLocalTransactionId;
|
lxid = MyProc->fpLocalTransactionId;
|
||||||
MyProc->fpVXIDLock = false;
|
MyProc->fpVXIDLock = false;
|
||||||
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
|
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
|
||||||
|
|
||||||
LWLockRelease(&MyProc->backendLock);
|
LWLockRelease(&MyProc->fpInfoLock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If fpVXIDLock has been cleared without touching fpLocalTransactionId,
|
* If fpVXIDLock has been cleared without touching fpLocalTransactionId,
|
||||||
@ -4485,13 +4485,13 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
|
|||||||
* against the ones we're waiting for. The target backend will only set
|
* against the ones we're waiting for. The target backend will only set
|
||||||
* or clear lxid while holding this lock.
|
* or clear lxid while holding this lock.
|
||||||
*/
|
*/
|
||||||
LWLockAcquire(&proc->backendLock, LW_EXCLUSIVE);
|
LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
/* If the transaction has ended, our work here is done. */
|
/* If the transaction has ended, our work here is done. */
|
||||||
if (proc->backendId != vxid.backendId
|
if (proc->backendId != vxid.backendId
|
||||||
|| proc->fpLocalTransactionId != vxid.localTransactionId)
|
|| proc->fpLocalTransactionId != vxid.localTransactionId)
|
||||||
{
|
{
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4501,7 +4501,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
|
|||||||
*/
|
*/
|
||||||
if (!wait)
|
if (!wait)
|
||||||
{
|
{
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4526,7 +4526,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
|
|||||||
if (!proclock)
|
if (!proclock)
|
||||||
{
|
{
|
||||||
LWLockRelease(partitionLock);
|
LWLockRelease(partitionLock);
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
ereport(ERROR,
|
ereport(ERROR,
|
||||||
(errcode(ERRCODE_OUT_OF_MEMORY),
|
(errcode(ERRCODE_OUT_OF_MEMORY),
|
||||||
errmsg("out of shared memory"),
|
errmsg("out of shared memory"),
|
||||||
@ -4540,7 +4540,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Done with proc->fpLockBits */
|
/* Done with proc->fpLockBits */
|
||||||
LWLockRelease(&proc->backendLock);
|
LWLockRelease(&proc->fpInfoLock);
|
||||||
|
|
||||||
/* Time to wait. */
|
/* Time to wait. */
|
||||||
(void) LockAcquire(&tag, ShareLock, false, false);
|
(void) LockAcquire(&tag, ShareLock, false, false);
|
||||||
|
@ -121,6 +121,9 @@ extern slock_t *ShmemLock;
|
|||||||
* 3. Extensions can create new tranches, via either RequestNamedLWLockTranche
|
* 3. Extensions can create new tranches, via either RequestNamedLWLockTranche
|
||||||
* or LWLockRegisterTranche. The names of these that are known in the current
|
* or LWLockRegisterTranche. The names of these that are known in the current
|
||||||
* process appear in LWLockTrancheNames[].
|
* process appear in LWLockTrancheNames[].
|
||||||
|
*
|
||||||
|
* All these names are user-visible as wait event names, so choose with care
|
||||||
|
* ... and do not forget to update the documentation's list of wait events.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static const char *const BuiltinTrancheNames[] = {
|
static const char *const BuiltinTrancheNames[] = {
|
||||||
@ -139,41 +142,41 @@ static const char *const BuiltinTrancheNames[] = {
|
|||||||
/* LWTRANCHE_SERIAL_BUFFER: */
|
/* LWTRANCHE_SERIAL_BUFFER: */
|
||||||
"SerialBuffer",
|
"SerialBuffer",
|
||||||
/* LWTRANCHE_WAL_INSERT: */
|
/* LWTRANCHE_WAL_INSERT: */
|
||||||
"wal_insert",
|
"WALInsert",
|
||||||
/* LWTRANCHE_BUFFER_CONTENT: */
|
/* LWTRANCHE_BUFFER_CONTENT: */
|
||||||
"buffer_content",
|
"BufferContent",
|
||||||
/* LWTRANCHE_BUFFER_IO_IN_PROGRESS: */
|
/* LWTRANCHE_BUFFER_IO: */
|
||||||
"buffer_io",
|
"BufferIO",
|
||||||
/* LWTRANCHE_REPLICATION_ORIGIN: */
|
/* LWTRANCHE_REPLICATION_ORIGIN_STATE: */
|
||||||
"replication_origin",
|
"ReplicationOriginState",
|
||||||
/* LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS: */
|
/* LWTRANCHE_REPLICATION_SLOT_IO: */
|
||||||
"replication_slot_io",
|
"ReplicationSlotIO",
|
||||||
/* LWTRANCHE_PROC: */
|
/* LWTRANCHE_LOCK_FASTPATH: */
|
||||||
"proc",
|
"LockFastPath",
|
||||||
/* LWTRANCHE_BUFFER_MAPPING: */
|
/* LWTRANCHE_BUFFER_MAPPING: */
|
||||||
"buffer_mapping",
|
"BufferMapping",
|
||||||
/* LWTRANCHE_LOCK_MANAGER: */
|
/* LWTRANCHE_LOCK_MANAGER: */
|
||||||
"lock_manager",
|
"LockManager",
|
||||||
/* LWTRANCHE_PREDICATE_LOCK_MANAGER: */
|
/* LWTRANCHE_PREDICATE_LOCK_MANAGER: */
|
||||||
"predicate_lock_manager",
|
"PredicateLockManager",
|
||||||
/* LWTRANCHE_PARALLEL_HASH_JOIN: */
|
/* LWTRANCHE_PARALLEL_HASH_JOIN: */
|
||||||
"parallel_hash_join",
|
"ParallelHashJoin",
|
||||||
/* LWTRANCHE_PARALLEL_QUERY_DSA: */
|
/* LWTRANCHE_PARALLEL_QUERY_DSA: */
|
||||||
"parallel_query_dsa",
|
"ParallelQueryDSA",
|
||||||
/* LWTRANCHE_SESSION_DSA: */
|
/* LWTRANCHE_PER_SESSION_DSA: */
|
||||||
"session_dsa",
|
"PerSessionDSA",
|
||||||
/* LWTRANCHE_SESSION_RECORD_TABLE: */
|
/* LWTRANCHE_PER_SESSION_RECORD_TYPE: */
|
||||||
"session_record_table",
|
"PerSessionRecordType",
|
||||||
/* LWTRANCHE_SESSION_TYPMOD_TABLE: */
|
/* LWTRANCHE_PER_SESSION_RECORD_TYPMOD: */
|
||||||
"session_typmod_table",
|
"PerSessionRecordTypmod",
|
||||||
/* LWTRANCHE_SHARED_TUPLESTORE: */
|
/* LWTRANCHE_SHARED_TUPLESTORE: */
|
||||||
"shared_tuplestore",
|
"SharedTupleStore",
|
||||||
/* LWTRANCHE_TBM: */
|
/* LWTRANCHE_SHARED_TIDBITMAP: */
|
||||||
"tbm",
|
"SharedTidBitmap",
|
||||||
/* LWTRANCHE_PARALLEL_APPEND: */
|
/* LWTRANCHE_PARALLEL_APPEND: */
|
||||||
"parallel_append",
|
"ParallelAppend",
|
||||||
/* LWTRANCHE_SXACT: */
|
/* LWTRANCHE_PER_XACT_PREDICATE_LIST: */
|
||||||
"serializable_xact"
|
"PerXactPredicateList"
|
||||||
};
|
};
|
||||||
|
|
||||||
StaticAssertDecl(lengthof(BuiltinTrancheNames) ==
|
StaticAssertDecl(lengthof(BuiltinTrancheNames) ==
|
||||||
@ -640,7 +643,10 @@ LWLockNewTrancheId(void)
|
|||||||
*
|
*
|
||||||
* This routine will save a pointer to the tranche name passed as an argument,
|
* This routine will save a pointer to the tranche name passed as an argument,
|
||||||
* so the name should be allocated in a backend-lifetime context
|
* so the name should be allocated in a backend-lifetime context
|
||||||
* (TopMemoryContext, static constant, or similar).
|
* (shared memory, TopMemoryContext, static constant, or similar).
|
||||||
|
*
|
||||||
|
* The tranche name will be user-visible as a wait event name, so try to
|
||||||
|
* use a name that fits the style for those.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
LWLockRegisterTranche(int tranche_id, const char *tranche_name)
|
LWLockRegisterTranche(int tranche_id, const char *tranche_name)
|
||||||
@ -690,6 +696,9 @@ LWLockRegisterTranche(int tranche_id, const char *tranche_name)
|
|||||||
* will be ignored. (We could raise an error, but it seems better to make
|
* will be ignored. (We could raise an error, but it seems better to make
|
||||||
* it a no-op, so that libraries containing such calls can be reloaded if
|
* it a no-op, so that libraries containing such calls can be reloaded if
|
||||||
* needed.)
|
* needed.)
|
||||||
|
*
|
||||||
|
* The tranche name will be user-visible as a wait event name, so try to
|
||||||
|
* use a name that fits the style for those.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
|
RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
|
||||||
|
@ -2,7 +2,8 @@
|
|||||||
# these are defined here. If you add a lock, add it to the end to avoid
|
# these are defined here. If you add a lock, add it to the end to avoid
|
||||||
# renumbering the existing locks; if you remove a lock, consider leaving a gap
|
# renumbering the existing locks; if you remove a lock, consider leaving a gap
|
||||||
# in the numbering sequence for the benefit of DTrace and other external
|
# in the numbering sequence for the benefit of DTrace and other external
|
||||||
# debugging scripts.
|
# debugging scripts. Also, do not forget to update the list of wait events
|
||||||
|
# in the user documentation.
|
||||||
|
|
||||||
# 0 is available; was formerly BufFreelistLock
|
# 0 is available; was formerly BufFreelistLock
|
||||||
ShmemIndexLock 1
|
ShmemIndexLock 1
|
||||||
@ -34,7 +35,7 @@ NotifySLRULock 26
|
|||||||
NotifyQueueLock 27
|
NotifyQueueLock 27
|
||||||
SerializableXactHashLock 28
|
SerializableXactHashLock 28
|
||||||
SerializableFinishedListLock 29
|
SerializableFinishedListLock 29
|
||||||
SerializablePredicateLockListLock 30
|
SerializablePredicateListLock 30
|
||||||
SerialSLRULock 31
|
SerialSLRULock 31
|
||||||
SyncRepLock 32
|
SyncRepLock 32
|
||||||
BackgroundWorkerLock 33
|
BackgroundWorkerLock 33
|
||||||
|
@ -89,7 +89,7 @@
|
|||||||
* - Protects the list of transactions which have completed but which
|
* - Protects the list of transactions which have completed but which
|
||||||
* may yet matter because they overlap still-active transactions.
|
* may yet matter because they overlap still-active transactions.
|
||||||
*
|
*
|
||||||
* SerializablePredicateLockListLock
|
* SerializablePredicateListLock
|
||||||
* - Protects the linked list of locks held by a transaction. Note
|
* - Protects the linked list of locks held by a transaction. Note
|
||||||
* that the locks themselves are also covered by the partition
|
* that the locks themselves are also covered by the partition
|
||||||
* locks of their respective lock targets; this lock only affects
|
* locks of their respective lock targets; this lock only affects
|
||||||
@ -118,11 +118,11 @@
|
|||||||
* than its own active transaction must acquire an exclusive
|
* than its own active transaction must acquire an exclusive
|
||||||
* lock.
|
* lock.
|
||||||
*
|
*
|
||||||
* SERIALIZABLEXACT's member 'predicateLockListLock'
|
* SERIALIZABLEXACT's member 'perXactPredicateListLock'
|
||||||
* - Protects the linked list of locks held by a transaction. Only
|
* - Protects the linked list of predicate locks held by a transaction.
|
||||||
* needed for parallel mode, where multiple backends share the
|
* Only needed for parallel mode, where multiple backends share the
|
||||||
* same SERIALIZABLEXACT object. Not needed if
|
* same SERIALIZABLEXACT object. Not needed if
|
||||||
* SerializablePredicateLockListLock is held exclusively.
|
* SerializablePredicateListLock is held exclusively.
|
||||||
*
|
*
|
||||||
* PredicateLockHashPartitionLock(hashcode)
|
* PredicateLockHashPartitionLock(hashcode)
|
||||||
* - The same lock protects a target, all locks on that target, and
|
* - The same lock protects a target, all locks on that target, and
|
||||||
@ -1186,8 +1186,8 @@ InitPredicateLocks(void)
|
|||||||
memset(PredXact->element, 0, requestSize);
|
memset(PredXact->element, 0, requestSize);
|
||||||
for (i = 0; i < max_table_size; i++)
|
for (i = 0; i < max_table_size; i++)
|
||||||
{
|
{
|
||||||
LWLockInitialize(&PredXact->element[i].sxact.predicateLockListLock,
|
LWLockInitialize(&PredXact->element[i].sxact.perXactPredicateListLock,
|
||||||
LWTRANCHE_SXACT);
|
LWTRANCHE_PER_XACT_PREDICATE_LIST);
|
||||||
SHMQueueInsertBefore(&(PredXact->availableList),
|
SHMQueueInsertBefore(&(PredXact->availableList),
|
||||||
&(PredXact->element[i].link));
|
&(PredXact->element[i].link));
|
||||||
}
|
}
|
||||||
@ -2042,7 +2042,7 @@ CoarserLockCovers(const PREDICATELOCKTARGETTAG *newtargettag)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove the dummy entry from the predicate lock target hash, to free up some
|
* Remove the dummy entry from the predicate lock target hash, to free up some
|
||||||
* scratch space. The caller must be holding SerializablePredicateLockListLock,
|
* scratch space. The caller must be holding SerializablePredicateListLock,
|
||||||
* and must restore the entry with RestoreScratchTarget() before releasing the
|
* and must restore the entry with RestoreScratchTarget() before releasing the
|
||||||
* lock.
|
* lock.
|
||||||
*
|
*
|
||||||
@ -2054,7 +2054,7 @@ RemoveScratchTarget(bool lockheld)
|
|||||||
{
|
{
|
||||||
bool found;
|
bool found;
|
||||||
|
|
||||||
Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
|
Assert(LWLockHeldByMe(SerializablePredicateListLock));
|
||||||
|
|
||||||
if (!lockheld)
|
if (!lockheld)
|
||||||
LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
|
LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
|
||||||
@ -2075,7 +2075,7 @@ RestoreScratchTarget(bool lockheld)
|
|||||||
{
|
{
|
||||||
bool found;
|
bool found;
|
||||||
|
|
||||||
Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
|
Assert(LWLockHeldByMe(SerializablePredicateListLock));
|
||||||
|
|
||||||
if (!lockheld)
|
if (!lockheld)
|
||||||
LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
|
LWLockAcquire(ScratchPartitionLock, LW_EXCLUSIVE);
|
||||||
@ -2097,7 +2097,7 @@ RemoveTargetIfNoLongerUsed(PREDICATELOCKTARGET *target, uint32 targettaghash)
|
|||||||
{
|
{
|
||||||
PREDICATELOCKTARGET *rmtarget PG_USED_FOR_ASSERTS_ONLY;
|
PREDICATELOCKTARGET *rmtarget PG_USED_FOR_ASSERTS_ONLY;
|
||||||
|
|
||||||
Assert(LWLockHeldByMe(SerializablePredicateLockListLock));
|
Assert(LWLockHeldByMe(SerializablePredicateListLock));
|
||||||
|
|
||||||
/* Can't remove it until no locks at this target. */
|
/* Can't remove it until no locks at this target. */
|
||||||
if (!SHMQueueEmpty(&target->predicateLocks))
|
if (!SHMQueueEmpty(&target->predicateLocks))
|
||||||
@ -2129,10 +2129,10 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
|
|||||||
SERIALIZABLEXACT *sxact;
|
SERIALIZABLEXACT *sxact;
|
||||||
PREDICATELOCK *predlock;
|
PREDICATELOCK *predlock;
|
||||||
|
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
|
LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
|
||||||
sxact = MySerializableXact;
|
sxact = MySerializableXact;
|
||||||
if (IsInParallelMode())
|
if (IsInParallelMode())
|
||||||
LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
|
LWLockAcquire(&sxact->perXactPredicateListLock, LW_EXCLUSIVE);
|
||||||
predlock = (PREDICATELOCK *)
|
predlock = (PREDICATELOCK *)
|
||||||
SHMQueueNext(&(sxact->predicateLocks),
|
SHMQueueNext(&(sxact->predicateLocks),
|
||||||
&(sxact->predicateLocks),
|
&(sxact->predicateLocks),
|
||||||
@ -2187,8 +2187,8 @@ DeleteChildTargetLocks(const PREDICATELOCKTARGETTAG *newtargettag)
|
|||||||
predlock = nextpredlock;
|
predlock = nextpredlock;
|
||||||
}
|
}
|
||||||
if (IsInParallelMode())
|
if (IsInParallelMode())
|
||||||
LWLockRelease(&sxact->predicateLockListLock);
|
LWLockRelease(&sxact->perXactPredicateListLock);
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2385,9 +2385,9 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
|
|||||||
|
|
||||||
partitionLock = PredicateLockHashPartitionLock(targettaghash);
|
partitionLock = PredicateLockHashPartitionLock(targettaghash);
|
||||||
|
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
|
LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
|
||||||
if (IsInParallelMode())
|
if (IsInParallelMode())
|
||||||
LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
|
LWLockAcquire(&sxact->perXactPredicateListLock, LW_EXCLUSIVE);
|
||||||
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
|
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
/* Make sure that the target is represented. */
|
/* Make sure that the target is represented. */
|
||||||
@ -2426,8 +2426,8 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
|
|||||||
|
|
||||||
LWLockRelease(partitionLock);
|
LWLockRelease(partitionLock);
|
||||||
if (IsInParallelMode())
|
if (IsInParallelMode())
|
||||||
LWLockRelease(&sxact->predicateLockListLock);
|
LWLockRelease(&sxact->perXactPredicateListLock);
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2586,7 +2586,7 @@ PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot,
|
|||||||
*
|
*
|
||||||
* Remove a predicate lock target along with any locks held for it.
|
* Remove a predicate lock target along with any locks held for it.
|
||||||
*
|
*
|
||||||
* Caller must hold SerializablePredicateLockListLock and the
|
* Caller must hold SerializablePredicateListLock and the
|
||||||
* appropriate hash partition lock for the target.
|
* appropriate hash partition lock for the target.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
@ -2597,7 +2597,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
|
|||||||
PREDICATELOCK *nextpredlock;
|
PREDICATELOCK *nextpredlock;
|
||||||
bool found;
|
bool found;
|
||||||
|
|
||||||
Assert(LWLockHeldByMeInMode(SerializablePredicateLockListLock,
|
Assert(LWLockHeldByMeInMode(SerializablePredicateListLock,
|
||||||
LW_EXCLUSIVE));
|
LW_EXCLUSIVE));
|
||||||
Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
|
Assert(LWLockHeldByMe(PredicateLockHashPartitionLock(targettaghash)));
|
||||||
|
|
||||||
@ -2658,7 +2658,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
|
|||||||
* covers it, or if we are absolutely certain that no one will need to
|
* covers it, or if we are absolutely certain that no one will need to
|
||||||
* refer to that lock in the future.
|
* refer to that lock in the future.
|
||||||
*
|
*
|
||||||
* Caller must hold SerializablePredicateLockListLock exclusively.
|
* Caller must hold SerializablePredicateListLock exclusively.
|
||||||
*/
|
*/
|
||||||
static bool
|
static bool
|
||||||
TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
|
TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
|
||||||
@ -2673,7 +2673,7 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
|
|||||||
bool found;
|
bool found;
|
||||||
bool outOfShmem = false;
|
bool outOfShmem = false;
|
||||||
|
|
||||||
Assert(LWLockHeldByMeInMode(SerializablePredicateLockListLock,
|
Assert(LWLockHeldByMeInMode(SerializablePredicateListLock,
|
||||||
LW_EXCLUSIVE));
|
LW_EXCLUSIVE));
|
||||||
|
|
||||||
oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
|
oldtargettaghash = PredicateLockTargetTagHashCode(&oldtargettag);
|
||||||
@ -2924,7 +2924,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
|
|||||||
heaptarget = NULL;
|
heaptarget = NULL;
|
||||||
|
|
||||||
/* Acquire locks on all lock partitions */
|
/* Acquire locks on all lock partitions */
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
|
LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);
|
||||||
for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
|
for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
|
||||||
LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
|
LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
|
||||||
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
|
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
|
||||||
@ -3065,7 +3065,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
|
|||||||
LWLockRelease(SerializableXactHashLock);
|
LWLockRelease(SerializableXactHashLock);
|
||||||
for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
|
for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
|
||||||
LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
|
LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3131,7 +3131,7 @@ PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
|
|||||||
relation->rd_id,
|
relation->rd_id,
|
||||||
newblkno);
|
newblkno);
|
||||||
|
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
|
LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try copying the locks over to the new page's tag, creating it if
|
* Try copying the locks over to the new page's tag, creating it if
|
||||||
@ -3167,7 +3167,7 @@ PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
|
|||||||
Assert(success);
|
Assert(success);
|
||||||
}
|
}
|
||||||
|
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3748,7 +3748,7 @@ ClearOldPredicateLocks(void)
|
|||||||
/*
|
/*
|
||||||
* Loop through predicate locks on dummy transaction for summarized data.
|
* Loop through predicate locks on dummy transaction for summarized data.
|
||||||
*/
|
*/
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
|
LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
|
||||||
predlock = (PREDICATELOCK *)
|
predlock = (PREDICATELOCK *)
|
||||||
SHMQueueNext(&OldCommittedSxact->predicateLocks,
|
SHMQueueNext(&OldCommittedSxact->predicateLocks,
|
||||||
&OldCommittedSxact->predicateLocks,
|
&OldCommittedSxact->predicateLocks,
|
||||||
@ -3804,7 +3804,7 @@ ClearOldPredicateLocks(void)
|
|||||||
predlock = nextpredlock;
|
predlock = nextpredlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
LWLockRelease(SerializableFinishedListLock);
|
LWLockRelease(SerializableFinishedListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3845,9 +3845,9 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
|
|||||||
* First release all the predicate locks held by this xact (or transfer
|
* First release all the predicate locks held by this xact (or transfer
|
||||||
* them to OldCommittedSxact if summarize is true)
|
* them to OldCommittedSxact if summarize is true)
|
||||||
*/
|
*/
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
|
LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
|
||||||
if (IsInParallelMode())
|
if (IsInParallelMode())
|
||||||
LWLockAcquire(&sxact->predicateLockListLock, LW_EXCLUSIVE);
|
LWLockAcquire(&sxact->perXactPredicateListLock, LW_EXCLUSIVE);
|
||||||
predlock = (PREDICATELOCK *)
|
predlock = (PREDICATELOCK *)
|
||||||
SHMQueueNext(&(sxact->predicateLocks),
|
SHMQueueNext(&(sxact->predicateLocks),
|
||||||
&(sxact->predicateLocks),
|
&(sxact->predicateLocks),
|
||||||
@ -3928,8 +3928,8 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
|
|||||||
SHMQueueInit(&sxact->predicateLocks);
|
SHMQueueInit(&sxact->predicateLocks);
|
||||||
|
|
||||||
if (IsInParallelMode())
|
if (IsInParallelMode())
|
||||||
LWLockRelease(&sxact->predicateLockListLock);
|
LWLockRelease(&sxact->perXactPredicateListLock);
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
|
|
||||||
sxidtag.xid = sxact->topXid;
|
sxidtag.xid = sxact->topXid;
|
||||||
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
|
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
|
||||||
@ -4302,9 +4302,9 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
|
|||||||
uint32 predlockhashcode;
|
uint32 predlockhashcode;
|
||||||
PREDICATELOCK *rmpredlock;
|
PREDICATELOCK *rmpredlock;
|
||||||
|
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
|
LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
|
||||||
if (IsInParallelMode())
|
if (IsInParallelMode())
|
||||||
LWLockAcquire(&MySerializableXact->predicateLockListLock, LW_EXCLUSIVE);
|
LWLockAcquire(&MySerializableXact->perXactPredicateListLock, LW_EXCLUSIVE);
|
||||||
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
|
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
|
||||||
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
|
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
|
||||||
|
|
||||||
@ -4340,8 +4340,8 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
|
|||||||
LWLockRelease(SerializableXactHashLock);
|
LWLockRelease(SerializableXactHashLock);
|
||||||
LWLockRelease(partitionLock);
|
LWLockRelease(partitionLock);
|
||||||
if (IsInParallelMode())
|
if (IsInParallelMode())
|
||||||
LWLockRelease(&MySerializableXact->predicateLockListLock);
|
LWLockRelease(&MySerializableXact->perXactPredicateListLock);
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
|
|
||||||
if (rmpredlock != NULL)
|
if (rmpredlock != NULL)
|
||||||
{
|
{
|
||||||
@ -4485,7 +4485,7 @@ CheckTableForSerializableConflictIn(Relation relation)
|
|||||||
dbId = relation->rd_node.dbNode;
|
dbId = relation->rd_node.dbNode;
|
||||||
heapId = relation->rd_id;
|
heapId = relation->rd_id;
|
||||||
|
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_EXCLUSIVE);
|
LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);
|
||||||
for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
|
for (i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
|
||||||
LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
|
LWLockAcquire(PredicateLockHashPartitionLockByIndex(i), LW_SHARED);
|
||||||
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
|
LWLockAcquire(SerializableXactHashLock, LW_EXCLUSIVE);
|
||||||
@ -4535,7 +4535,7 @@ CheckTableForSerializableConflictIn(Relation relation)
|
|||||||
LWLockRelease(SerializableXactHashLock);
|
LWLockRelease(SerializableXactHashLock);
|
||||||
for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
|
for (i = NUM_PREDICATELOCK_PARTITIONS - 1; i >= 0; i--)
|
||||||
LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
|
LWLockRelease(PredicateLockHashPartitionLockByIndex(i));
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -4887,12 +4887,12 @@ AtPrepare_PredicateLocks(void)
|
|||||||
* than using the local predicate lock table because the latter is not
|
* than using the local predicate lock table because the latter is not
|
||||||
* guaranteed to be accurate.
|
* guaranteed to be accurate.
|
||||||
*/
|
*/
|
||||||
LWLockAcquire(SerializablePredicateLockListLock, LW_SHARED);
|
LWLockAcquire(SerializablePredicateListLock, LW_SHARED);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No need to take sxact->predicateLockListLock in parallel mode because
|
* No need to take sxact->perXactPredicateListLock in parallel mode
|
||||||
* there cannot be any parallel workers running while we are preparing a
|
* because there cannot be any parallel workers running while we are
|
||||||
* transaction.
|
* preparing a transaction.
|
||||||
*/
|
*/
|
||||||
Assert(!IsParallelWorker() && !ParallelContextActive());
|
Assert(!IsParallelWorker() && !ParallelContextActive());
|
||||||
|
|
||||||
@ -4915,7 +4915,7 @@ AtPrepare_PredicateLocks(void)
|
|||||||
offsetof(PREDICATELOCK, xactLink));
|
offsetof(PREDICATELOCK, xactLink));
|
||||||
}
|
}
|
||||||
|
|
||||||
LWLockRelease(SerializablePredicateLockListLock);
|
LWLockRelease(SerializablePredicateListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -221,7 +221,7 @@ InitProcGlobal(void)
|
|||||||
/* Common initialization for all PGPROCs, regardless of type. */
|
/* Common initialization for all PGPROCs, regardless of type. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
|
* Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
|
||||||
* dummy PGPROCs don't need these though - they're never associated
|
* dummy PGPROCs don't need these though - they're never associated
|
||||||
* with a real process
|
* with a real process
|
||||||
*/
|
*/
|
||||||
@ -229,7 +229,7 @@ InitProcGlobal(void)
|
|||||||
{
|
{
|
||||||
procs[i].sem = PGSemaphoreCreate();
|
procs[i].sem = PGSemaphoreCreate();
|
||||||
InitSharedLatch(&(procs[i].procLatch));
|
InitSharedLatch(&(procs[i].procLatch));
|
||||||
LWLockInitialize(&(procs[i].backendLock), LWTRANCHE_PROC);
|
LWLockInitialize(&(procs[i].fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
|
||||||
}
|
}
|
||||||
procs[i].pgprocno = i;
|
procs[i].pgprocno = i;
|
||||||
|
|
||||||
|
4
src/backend/utils/cache/typcache.c
vendored
4
src/backend/utils/cache/typcache.c
vendored
@ -256,7 +256,7 @@ static const dshash_parameters srtr_record_table_params = {
|
|||||||
sizeof(SharedRecordTableEntry),
|
sizeof(SharedRecordTableEntry),
|
||||||
shared_record_table_compare,
|
shared_record_table_compare,
|
||||||
shared_record_table_hash,
|
shared_record_table_hash,
|
||||||
LWTRANCHE_SESSION_RECORD_TABLE
|
LWTRANCHE_PER_SESSION_RECORD_TYPE
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
|
/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
|
||||||
@ -265,7 +265,7 @@ static const dshash_parameters srtr_typmod_table_params = {
|
|||||||
sizeof(SharedTypmodTableEntry),
|
sizeof(SharedTypmodTableEntry),
|
||||||
dshash_memcmp,
|
dshash_memcmp,
|
||||||
dshash_memhash,
|
dshash_memhash,
|
||||||
LWTRANCHE_SESSION_TYPMOD_TABLE
|
LWTRANCHE_PER_SESSION_RECORD_TYPMOD
|
||||||
};
|
};
|
||||||
|
|
||||||
/* hashtable for recognizing registered record types */
|
/* hashtable for recognizing registered record types */
|
||||||
|
@ -204,22 +204,22 @@ typedef enum BuiltinTrancheIds
|
|||||||
LWTRANCHE_SERIAL_BUFFER,
|
LWTRANCHE_SERIAL_BUFFER,
|
||||||
LWTRANCHE_WAL_INSERT,
|
LWTRANCHE_WAL_INSERT,
|
||||||
LWTRANCHE_BUFFER_CONTENT,
|
LWTRANCHE_BUFFER_CONTENT,
|
||||||
LWTRANCHE_BUFFER_IO_IN_PROGRESS,
|
LWTRANCHE_BUFFER_IO,
|
||||||
LWTRANCHE_REPLICATION_ORIGIN,
|
LWTRANCHE_REPLICATION_ORIGIN_STATE,
|
||||||
LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
|
LWTRANCHE_REPLICATION_SLOT_IO,
|
||||||
LWTRANCHE_PROC,
|
LWTRANCHE_LOCK_FASTPATH,
|
||||||
LWTRANCHE_BUFFER_MAPPING,
|
LWTRANCHE_BUFFER_MAPPING,
|
||||||
LWTRANCHE_LOCK_MANAGER,
|
LWTRANCHE_LOCK_MANAGER,
|
||||||
LWTRANCHE_PREDICATE_LOCK_MANAGER,
|
LWTRANCHE_PREDICATE_LOCK_MANAGER,
|
||||||
LWTRANCHE_PARALLEL_HASH_JOIN,
|
LWTRANCHE_PARALLEL_HASH_JOIN,
|
||||||
LWTRANCHE_PARALLEL_QUERY_DSA,
|
LWTRANCHE_PARALLEL_QUERY_DSA,
|
||||||
LWTRANCHE_SESSION_DSA,
|
LWTRANCHE_PER_SESSION_DSA,
|
||||||
LWTRANCHE_SESSION_RECORD_TABLE,
|
LWTRANCHE_PER_SESSION_RECORD_TYPE,
|
||||||
LWTRANCHE_SESSION_TYPMOD_TABLE,
|
LWTRANCHE_PER_SESSION_RECORD_TYPMOD,
|
||||||
LWTRANCHE_SHARED_TUPLESTORE,
|
LWTRANCHE_SHARED_TUPLESTORE,
|
||||||
LWTRANCHE_TBM,
|
LWTRANCHE_SHARED_TIDBITMAP,
|
||||||
LWTRANCHE_PARALLEL_APPEND,
|
LWTRANCHE_PARALLEL_APPEND,
|
||||||
LWTRANCHE_SXACT,
|
LWTRANCHE_PER_XACT_PREDICATE_LIST,
|
||||||
LWTRANCHE_FIRST_USER_DEFINED
|
LWTRANCHE_FIRST_USER_DEFINED
|
||||||
} BuiltinTrancheIds;
|
} BuiltinTrancheIds;
|
||||||
|
|
||||||
|
@ -92,8 +92,12 @@ typedef struct SERIALIZABLEXACT
|
|||||||
SHM_QUEUE finishedLink; /* list link in
|
SHM_QUEUE finishedLink; /* list link in
|
||||||
* FinishedSerializableTransactions */
|
* FinishedSerializableTransactions */
|
||||||
|
|
||||||
LWLock predicateLockListLock; /* protects predicateLocks in parallel
|
/*
|
||||||
* mode */
|
* perXactPredicateListLock is only used in parallel queries: it protects
|
||||||
|
* this SERIALIZABLEXACT's predicate lock list against other workers of
|
||||||
|
* the same session.
|
||||||
|
*/
|
||||||
|
LWLock perXactPredicateListLock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* for r/o transactions: list of concurrent r/w transactions that we could
|
* for r/o transactions: list of concurrent r/w transactions that we could
|
||||||
|
@ -188,10 +188,8 @@ struct PGPROC
|
|||||||
XLogRecPtr clogGroupMemberLsn; /* WAL location of commit record for clog
|
XLogRecPtr clogGroupMemberLsn; /* WAL location of commit record for clog
|
||||||
* group member */
|
* group member */
|
||||||
|
|
||||||
/* Per-backend LWLock. Protects fields below (but not group fields). */
|
|
||||||
LWLock backendLock;
|
|
||||||
|
|
||||||
/* Lock manager data, recording fast-path locks taken by this backend. */
|
/* Lock manager data, recording fast-path locks taken by this backend. */
|
||||||
|
LWLock fpInfoLock; /* protects per-backend fast-path state */
|
||||||
uint64 fpLockBits; /* lock modes held for each fast-path slot */
|
uint64 fpLockBits; /* lock modes held for each fast-path slot */
|
||||||
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
|
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
|
||||||
bool fpVXIDLock; /* are we holding a fast-path VXID lock? */
|
bool fpVXIDLock; /* are we holding a fast-path VXID lock? */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user