mirror of
				https://github.com/postgres/postgres.git
				synced 2025-11-03 09:13:20 +03:00 
			
		
		
		
	Unpin buffer before inplace update waits for an XID to end.
Commit a07e03fd8f changed inplace updates
to wait for heap_update() commands like GRANT TABLE and GRANT DATABASE.
By keeping the pin during that wait, a sequence of autovacuum workers
and an uncommitted GRANT starved one foreground LockBufferForCleanup()
for six minutes, on buildfarm member sarus.  Prevent, at the cost of a
bit of complexity.  Back-patch to v12, like the earlier commit.  That
commit and heap_inplace_lock() have not yet appeared in any release.
Discussion: https://postgr.es/m/20241026184936.ae.nmisch@google.com
			
			
This commit is contained in:
		@@ -6134,8 +6134,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid)
 | 
			
		||||
 * transaction.  If compatible, return true with the buffer exclusive-locked,
 | 
			
		||||
 * and the caller must release that by calling
 | 
			
		||||
 * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
 | 
			
		||||
 * an error.  Otherwise, return false after blocking transactions, if any,
 | 
			
		||||
 * have ended.
 | 
			
		||||
 * an error.  Otherwise, call release_callback(arg), wait for blocking
 | 
			
		||||
 * transactions to end, and return false.
 | 
			
		||||
 *
 | 
			
		||||
 * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
 | 
			
		||||
 * DDL, this doesn't guarantee any particular predicate locking.
 | 
			
		||||
@@ -6169,7 +6169,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid)
 | 
			
		||||
 */
 | 
			
		||||
bool
 | 
			
		||||
heap_inplace_lock(Relation relation,
 | 
			
		||||
				  HeapTuple oldtup_ptr, Buffer buffer)
 | 
			
		||||
				  HeapTuple oldtup_ptr, Buffer buffer,
 | 
			
		||||
				  void (*release_callback) (void *), void *arg)
 | 
			
		||||
{
 | 
			
		||||
	HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
 | 
			
		||||
	TM_Result	result;
 | 
			
		||||
@@ -6234,6 +6235,7 @@ heap_inplace_lock(Relation relation,
 | 
			
		||||
										lockmode, NULL))
 | 
			
		||||
			{
 | 
			
		||||
				LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 | 
			
		||||
				release_callback(arg);
 | 
			
		||||
				ret = false;
 | 
			
		||||
				MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
 | 
			
		||||
								relation, &oldtup.t_self, XLTW_Update,
 | 
			
		||||
@@ -6249,6 +6251,7 @@ heap_inplace_lock(Relation relation,
 | 
			
		||||
		else
 | 
			
		||||
		{
 | 
			
		||||
			LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 | 
			
		||||
			release_callback(arg);
 | 
			
		||||
			ret = false;
 | 
			
		||||
			XactLockTableWait(xwait, relation, &oldtup.t_self,
 | 
			
		||||
							  XLTW_Update);
 | 
			
		||||
@@ -6260,6 +6263,7 @@ heap_inplace_lock(Relation relation,
 | 
			
		||||
		if (!ret)
 | 
			
		||||
		{
 | 
			
		||||
			LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
 | 
			
		||||
			release_callback(arg);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -805,6 +805,7 @@ systable_inplace_update_begin(Relation relation,
 | 
			
		||||
	int			retries = 0;
 | 
			
		||||
	SysScanDesc scan;
 | 
			
		||||
	HeapTuple	oldtup;
 | 
			
		||||
	BufferHeapTupleTableSlot *bslot;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * For now, we don't allow parallel updates.  Unlike a regular update,
 | 
			
		||||
@@ -826,10 +827,9 @@ systable_inplace_update_begin(Relation relation,
 | 
			
		||||
	Assert(IsInplaceUpdateRelation(relation) || !IsSystemRelation(relation));
 | 
			
		||||
 | 
			
		||||
	/* Loop for an exclusive-locked buffer of a non-updated tuple. */
 | 
			
		||||
	for (;;)
 | 
			
		||||
	do
 | 
			
		||||
	{
 | 
			
		||||
		TupleTableSlot *slot;
 | 
			
		||||
		BufferHeapTupleTableSlot *bslot;
 | 
			
		||||
 | 
			
		||||
		CHECK_FOR_INTERRUPTS();
 | 
			
		||||
 | 
			
		||||
@@ -855,11 +855,9 @@ systable_inplace_update_begin(Relation relation,
 | 
			
		||||
		slot = scan->slot;
 | 
			
		||||
		Assert(TTS_IS_BUFFERTUPLE(slot));
 | 
			
		||||
		bslot = (BufferHeapTupleTableSlot *) slot;
 | 
			
		||||
		if (heap_inplace_lock(scan->heap_rel,
 | 
			
		||||
							  bslot->base.tuple, bslot->buffer))
 | 
			
		||||
			break;
 | 
			
		||||
		systable_endscan(scan);
 | 
			
		||||
	};
 | 
			
		||||
	} while (!heap_inplace_lock(scan->heap_rel,
 | 
			
		||||
								bslot->base.tuple, bslot->buffer,
 | 
			
		||||
								(void (*) (void *)) systable_endscan, scan));
 | 
			
		||||
 | 
			
		||||
	*oldtupcopy = heap_copytuple(oldtup);
 | 
			
		||||
	*state = scan;
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user