mirror of
https://github.com/postgres/postgres.git
synced 2025-06-05 23:56:58 +03:00
Make FP_LOCK_SLOTS_PER_BACKEND look like a function
The FP_LOCK_SLOTS_PER_BACKEND macro looks like a constant, but it depends on the max_locks_per_transaction GUC, and thus can change. This is non-obvious and confusing, so make it look more like a function by renaming it to FastPathLockSlotsPerBackend(). While at it, use the macro when initializing fast-path shared memory, instead of using the formula. Reported-by: Andres Freund Discussion: https://postgr.es/m/ffiwtzc6vedo6wb4gbwelon5nefqg675t5c7an2ta7pcz646cg%40qwmkdb3l4ett
This commit is contained in:
parent
91ecb5e0bc
commit
c878de1db4
@ -226,10 +226,10 @@ int FastPathLockGroupsPerBackend = 0;
|
|||||||
* the FAST_PATH_SLOT macro, split it into group and index (in the group).
|
* the FAST_PATH_SLOT macro, split it into group and index (in the group).
|
||||||
*/
|
*/
|
||||||
#define FAST_PATH_GROUP(index) \
|
#define FAST_PATH_GROUP(index) \
|
||||||
(AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_BACKEND), \
|
(AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
|
||||||
((index) / FP_LOCK_SLOTS_PER_GROUP))
|
((index) / FP_LOCK_SLOTS_PER_GROUP))
|
||||||
#define FAST_PATH_INDEX(index) \
|
#define FAST_PATH_INDEX(index) \
|
||||||
(AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_BACKEND), \
|
(AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
|
||||||
((index) % FP_LOCK_SLOTS_PER_GROUP))
|
((index) % FP_LOCK_SLOTS_PER_GROUP))
|
||||||
|
|
||||||
/* Macros for manipulating proc->fpLockBits */
|
/* Macros for manipulating proc->fpLockBits */
|
||||||
@ -242,7 +242,7 @@ int FastPathLockGroupsPerBackend = 0;
|
|||||||
#define FAST_PATH_BIT_POSITION(n, l) \
|
#define FAST_PATH_BIT_POSITION(n, l) \
|
||||||
(AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
|
(AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
|
||||||
AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
|
AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
|
||||||
AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
|
AssertMacro((n) < FastPathLockSlotsPerBackend()), \
|
||||||
((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
|
((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
|
||||||
#define FAST_PATH_SET_LOCKMODE(proc, n, l) \
|
#define FAST_PATH_SET_LOCKMODE(proc, n, l) \
|
||||||
FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
|
FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
|
||||||
@ -2691,7 +2691,7 @@ static bool
|
|||||||
FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
|
FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
|
||||||
{
|
{
|
||||||
uint32 i;
|
uint32 i;
|
||||||
uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
|
uint32 unused_slot = FastPathLockSlotsPerBackend();
|
||||||
|
|
||||||
/* fast-path group the lock belongs to */
|
/* fast-path group the lock belongs to */
|
||||||
uint32 group = FAST_PATH_REL_GROUP(relid);
|
uint32 group = FAST_PATH_REL_GROUP(relid);
|
||||||
@ -2713,7 +2713,7 @@ FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* If no existing entry, use any empty slot. */
|
/* If no existing entry, use any empty slot. */
|
||||||
if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
|
if (unused_slot < FastPathLockSlotsPerBackend())
|
||||||
{
|
{
|
||||||
MyProc->fpRelId[unused_slot] = relid;
|
MyProc->fpRelId[unused_slot] = relid;
|
||||||
FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
|
FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
|
||||||
|
@ -116,7 +116,7 @@ ProcGlobalShmemSize(void)
|
|||||||
* nicely aligned in each backend.
|
* nicely aligned in each backend.
|
||||||
*/
|
*/
|
||||||
fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
|
fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
|
||||||
fpRelIdSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(Oid) * FP_LOCK_SLOTS_PER_GROUP);
|
fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
|
||||||
|
|
||||||
size = add_size(size, mul_size(TotalProcs, (fpLockBitsSize + fpRelIdSize)));
|
size = add_size(size, mul_size(TotalProcs, (fpLockBitsSize + fpRelIdSize)));
|
||||||
|
|
||||||
@ -231,7 +231,7 @@ InitProcGlobal(void)
|
|||||||
* shared memory and then divide that between backends.
|
* shared memory and then divide that between backends.
|
||||||
*/
|
*/
|
||||||
fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
|
fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
|
||||||
fpRelIdSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(Oid) * FP_LOCK_SLOTS_PER_GROUP);
|
fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
|
||||||
|
|
||||||
fpPtr = ShmemAlloc(TotalProcs * (fpLockBitsSize + fpRelIdSize));
|
fpPtr = ShmemAlloc(TotalProcs * (fpLockBitsSize + fpRelIdSize));
|
||||||
MemSet(fpPtr, 0, TotalProcs * (fpLockBitsSize + fpRelIdSize));
|
MemSet(fpPtr, 0, TotalProcs * (fpLockBitsSize + fpRelIdSize));
|
||||||
|
@ -587,7 +587,7 @@ InitializeFastPathLocks(void)
|
|||||||
while (FastPathLockGroupsPerBackend < FP_LOCK_GROUPS_PER_BACKEND_MAX)
|
while (FastPathLockGroupsPerBackend < FP_LOCK_GROUPS_PER_BACKEND_MAX)
|
||||||
{
|
{
|
||||||
/* stop once we exceed max_locks_per_xact */
|
/* stop once we exceed max_locks_per_xact */
|
||||||
if (FastPathLockGroupsPerBackend * FP_LOCK_SLOTS_PER_GROUP >= max_locks_per_xact)
|
if (FastPathLockSlotsPerBackend() >= max_locks_per_xact)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
FastPathLockGroupsPerBackend *= 2;
|
FastPathLockGroupsPerBackend *= 2;
|
||||||
|
@ -88,7 +88,8 @@ extern PGDLLIMPORT int FastPathLockGroupsPerBackend;
|
|||||||
|
|
||||||
#define FP_LOCK_GROUPS_PER_BACKEND_MAX 1024
|
#define FP_LOCK_GROUPS_PER_BACKEND_MAX 1024
|
||||||
#define FP_LOCK_SLOTS_PER_GROUP 16 /* don't change */
|
#define FP_LOCK_SLOTS_PER_GROUP 16 /* don't change */
|
||||||
#define FP_LOCK_SLOTS_PER_BACKEND (FP_LOCK_SLOTS_PER_GROUP * FastPathLockGroupsPerBackend)
|
#define FastPathLockSlotsPerBackend() \
|
||||||
|
(FP_LOCK_SLOTS_PER_GROUP * FastPathLockGroupsPerBackend)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags for PGPROC.delayChkptFlags
|
* Flags for PGPROC.delayChkptFlags
|
||||||
|
Loading…
x
Reference in New Issue
Block a user