1
0
mirror of https://github.com/postgres/postgres.git synced 2025-06-13 07:41:39 +03:00

Repair race condition introduced into heap_update() in 7.1 ---

PageGetFreeSpace() was being called while not holding the buffer lock, which
not only could yield a garbage answer, but even if it's the right answer there
might be less space available after we reacquire the buffer lock.

Also repair potential deadlock introduced by my recent performance improvement
in RelationGetBufferForTuple(): it was possible for two heap_updates to try to
lock two buffers in opposite orders.  The fix creates a global rule that
buffers of a single heap relation should be locked in decreasing block number
order.  Currently, this only applies to heap_update; VACUUM can get away with
ignoring the rule since it holds exclusive lock on the whole relation anyway.
However, if we try to implement a VACUUM that can run in parallel with other
transactions, VACUUM will also have to obey the lock order rule.
This commit is contained in:
Tom Lane
2001-05-16 22:35:12 +00:00
parent ae38a1f556
commit 27336e4f7a
3 changed files with 116 additions and 47 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.114 2001/05/12 19:58:27 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.115 2001/05/16 22:35:12 tgl Exp $
*
*
* INTERFACE ROUTINES
@ -1317,7 +1317,7 @@ heap_insert(Relation relation, HeapTuple tup)
#endif
/* Find buffer for this tuple */
buffer = RelationGetBufferForTuple(relation, tup->t_len);
buffer = RelationGetBufferForTuple(relation, tup->t_len, 0);
/* NO ELOG(ERROR) from here till changes are logged */
START_CRIT_SECTION();
@ -1578,6 +1578,8 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
newbuf;
bool need_toast,
already_marked;
Size newtupsize,
pagefree;
int result;
/* increment access statistics */
@ -1685,8 +1687,10 @@ l2:
HeapTupleHasExtended(newtup) ||
(MAXALIGN(newtup->t_len) > TOAST_TUPLE_THRESHOLD));
if (need_toast ||
(unsigned) MAXALIGN(newtup->t_len) > PageGetFreeSpace((Page) dp))
newtupsize = MAXALIGN(newtup->t_len);
pagefree = PageGetFreeSpace((Page) dp);
if (need_toast || newtupsize > pagefree)
{
_locked_tuple_.node = relation->rd_node;
_locked_tuple_.tid = oldtup.t_self;
@ -1704,17 +1708,60 @@ l2:
/* Let the toaster do its thing */
if (need_toast)
{
heap_tuple_toast_attrs(relation, newtup, &oldtup);
newtupsize = MAXALIGN(newtup->t_len);
}
/* Now, do we need a new page for the tuple, or not? */
if ((unsigned) MAXALIGN(newtup->t_len) <= PageGetFreeSpace((Page) dp))
newbuf = buffer;
/*
* Now, do we need a new page for the tuple, or not? This is a bit
* tricky since someone else could have added tuples to the page
* while we weren't looking. We have to recheck the available space
* after reacquiring the buffer lock. But don't bother to do that
* if the former amount of free space is still not enough; it's
* unlikely there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
* buffer locks on both old and new pages. To avoid deadlock against
* some other backend trying to get the same two locks in the other
* order, we must be consistent about the order we get the locks in.
* We use the rule "lock the higher-numbered page of the relation
* first". To implement this, we must do RelationGetBufferForTuple
* while not holding the lock on the old page, and we must tell it
* to give us a page beyond the old page.
*/
if (newtupsize > pagefree)
{
/* Assume there's no chance to put newtup on same page. */
newbuf = RelationGetBufferForTuple(relation, newtup->t_len,
BufferGetBlockNumber(buffer) + 1);
/* Now reacquire lock on old tuple's page. */
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
}
else
newbuf = RelationGetBufferForTuple(relation, newtup->t_len);
/* Re-acquire the lock on the old tuple's page. */
/* this seems to be deadlock free... */
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
{
/* Re-acquire the lock on the old tuple's page. */
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/* Re-check using the up-to-date free space */
pagefree = PageGetFreeSpace((Page) dp);
if (newtupsize > pagefree)
{
/*
* Rats, it doesn't fit anymore. We must now unlock and
* relock to avoid deadlock. Fortunately, this path should
* seldom be taken.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
newbuf = RelationGetBufferForTuple(relation, newtup->t_len,
BufferGetBlockNumber(buffer) + 1);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
}
else
{
/* OK, it fits here, so we're done. */
newbuf = buffer;
}
}
}
else
{
@ -1723,6 +1770,11 @@ l2:
newbuf = buffer;
}
/*
* At this point newbuf and buffer are both pinned and locked,
* and newbuf has enough space for the new tuple.
*/
/* NO ELOG(ERROR) from here till changes are logged */
START_CRIT_SECTION();