mirror of
https://github.com/postgres/postgres.git
synced 2025-05-11 05:41:32 +03:00
If we're going to have a non-panic check for held_lwlocks[] overrun,
it must occur *before* we get into the critical state of holding a lock we have no place to record. Per discussion with Qingqing Zhou.
This commit is contained in:
parent
e794dfa511
commit
badb83f9ec
@ -15,7 +15,7 @@
|
|||||||
* Portions Copyright (c) 1994, Regents of the University of California
|
* Portions Copyright (c) 1994, Regents of the University of California
|
||||||
*
|
*
|
||||||
* IDENTIFICATION
|
* IDENTIFICATION
|
||||||
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.26 2005/04/08 03:43:54 tgl Exp $
|
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.27 2005/04/08 14:18:35 tgl Exp $
|
||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
@ -213,6 +213,10 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
|||||||
*/
|
*/
|
||||||
Assert(!(proc == NULL && IsUnderPostmaster));
|
Assert(!(proc == NULL && IsUnderPostmaster));
|
||||||
|
|
||||||
|
/* Ensure we will have room to remember the lock */
|
||||||
|
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
|
||||||
|
elog(ERROR, "too many LWLocks taken");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock out cancel/die interrupts until we exit the code section
|
* Lock out cancel/die interrupts until we exit the code section
|
||||||
* protected by the LWLock. This ensures that interrupts will not
|
* protected by the LWLock. This ensures that interrupts will not
|
||||||
@ -328,8 +332,6 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
|
|||||||
SpinLockRelease_NoHoldoff(&lock->mutex);
|
SpinLockRelease_NoHoldoff(&lock->mutex);
|
||||||
|
|
||||||
/* Add lock to list of locks held by this backend */
|
/* Add lock to list of locks held by this backend */
|
||||||
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
|
|
||||||
elog(ERROR, "too many LWLocks taken");
|
|
||||||
held_lwlocks[num_held_lwlocks++] = lockid;
|
held_lwlocks[num_held_lwlocks++] = lockid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -354,6 +356,10 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
|
|||||||
|
|
||||||
PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
|
PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
|
||||||
|
|
||||||
|
/* Ensure we will have room to remember the lock */
|
||||||
|
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
|
||||||
|
elog(ERROR, "too many LWLocks taken");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock out cancel/die interrupts until we exit the code section
|
* Lock out cancel/die interrupts until we exit the code section
|
||||||
* protected by the LWLock. This ensures that interrupts will not
|
* protected by the LWLock. This ensures that interrupts will not
|
||||||
@ -398,8 +404,6 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* Add lock to list of locks held by this backend */
|
/* Add lock to list of locks held by this backend */
|
||||||
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
|
|
||||||
elog(ERROR, "too many LWLocks taken");
|
|
||||||
held_lwlocks[num_held_lwlocks++] = lockid;
|
held_lwlocks[num_held_lwlocks++] = lockid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user