1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-27 12:41:57 +03:00

Prevent index-only scans from returning wrong answers under Hot Standby.

The alternative of disallowing index-only scans in HS operation was
discussed, but the consensus was that it was better to treat marking
a page all-visible as a recovery conflict for snapshots that could still
fail to see XIDs on that page.  We may in the future try to soften this,
so that we simply force index scans to do heap fetches in cases where
this may be an issue, rather than throwing a hard conflict.
This commit is contained in:
Robert Haas
2012-04-26 20:00:21 -04:00
parent 92df220343
commit 3424bff90f
7 changed files with 36 additions and 11 deletions

View File

@ -4368,7 +4368,8 @@ log_heap_freeze(Relation reln, Buffer buffer,
* and dirtied.
*/
XLogRecPtr
log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer)
log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer,
TransactionId cutoff_xid)
{
xl_heap_visible xlrec;
XLogRecPtr recptr;
@ -4376,6 +4377,7 @@ log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer)
xlrec.node = rnode;
xlrec.block = block;
xlrec.cutoff_xid = cutoff_xid;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapVisible;
@ -4708,6 +4710,17 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
/*
* If there are any Hot Standby transactions running that have an xmin
* horizon old enough that this page isn't all-visible for them, they
* might incorrectly decide that an index-only scan can skip a heap fetch.
*
* NB: It might be better to throw some kind of "soft" conflict here that
* forces any index-only scan that is in flight to perform heap fetches,
* rather than killing the transaction outright.
*/
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, xlrec->node);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
@ -4760,7 +4773,8 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* harm is done; and the next VACUUM will fix it.
*/
if (!XLByteLE(lsn, PageGetLSN(BufferGetPage(vmbuffer))))
visibilitymap_set(reln, xlrec->block, lsn, vmbuffer);
visibilitymap_set(reln, xlrec->block, lsn, vmbuffer,
xlrec->cutoff_xid);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);

View File

@ -229,7 +229,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
* or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
* page LSN to that value.
* page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
* You must pass a buffer containing the correct map page to this function.
* Call visibilitymap_pin first to pin the right one. This function doesn't do
@ -237,7 +239,7 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
*/
void
visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
Buffer buf)
Buffer buf, TransactionId cutoff_xid)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
@ -269,7 +271,8 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
if (RelationNeedsWAL(rel))
{
if (XLogRecPtrIsInvalid(recptr))
recptr = log_heap_visible(rel->rd_node, heapBlk, buf);
recptr = log_heap_visible(rel->rd_node, heapBlk, buf,
cutoff_xid);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}