1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-30 11:03:19 +03:00

Fix a violation of WAL coding rules in the recent patch to include an

"all tuples visible" flag in heap page headers.  The flag update *must*
be applied before calling XLogInsert, but heap_update and the tuple
moving routines in VACUUM FULL were ignoring this rule.  A crash and
replay could therefore leave the flag incorrectly set, causing rows
to appear visible in seqscans when they should not be.  This might explain
recent reports of data corruption from Jeff Ross and others.

In passing, do a bit of editorialization on comments in visibilitymap.c.
This commit is contained in:
Tom Lane
2009-08-24 02:18:32 +00:00
parent cab9a0656c
commit 7fc7a7c4d0
6 changed files with 112 additions and 90 deletions

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.277 2009/06/11 14:48:53 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.278 2009/08/24 02:18:31 tgl Exp $
*
*
* INTERFACE ROUTINES
@ -78,7 +78,8 @@ static HeapScanDesc heap_beginscan_internal(Relation relation,
bool allow_strat, bool allow_sync,
bool is_bitmapscan);
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move,
bool all_visible_cleared, bool new_all_visible_cleared);
static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
HeapTuple oldtup, HeapTuple newtup);
@ -2760,32 +2761,7 @@ l2:
/* record address of new tuple in t_ctid of old one */
oldtup.t_data->t_ctid = heaptup->t_self;
if (newbuf != buffer)
MarkBufferDirty(newbuf);
MarkBufferDirty(buffer);
/*
* Note: we mustn't clear PD_ALL_VISIBLE flags before writing the WAL
* record, because log_heap_update looks at those flags to set the
* corresponding flags in the WAL record.
*/
/* XLOG stuff */
if (!relation->rd_istemp)
{
XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
newbuf, heaptup, false);
if (newbuf != buffer)
{
PageSetLSN(BufferGetPage(newbuf), recptr);
PageSetTLI(BufferGetPage(newbuf), ThisTimeLineID);
}
PageSetLSN(BufferGetPage(buffer), recptr);
PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);
}
/* Clear PD_ALL_VISIBLE flags */
/* clear PD_ALL_VISIBLE flags */
if (PageIsAllVisible(BufferGetPage(buffer)))
{
all_visible_cleared = true;
@ -2797,6 +2773,27 @@ l2:
PageClearAllVisible(BufferGetPage(newbuf));
}
if (newbuf != buffer)
MarkBufferDirty(newbuf);
MarkBufferDirty(buffer);
/* XLOG stuff */
if (!relation->rd_istemp)
{
XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
newbuf, heaptup, false,
all_visible_cleared,
all_visible_cleared_new);
if (newbuf != buffer)
{
PageSetLSN(BufferGetPage(newbuf), recptr);
PageSetTLI(BufferGetPage(newbuf), ThisTimeLineID);
}
PageSetLSN(BufferGetPage(buffer), recptr);
PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);
}
END_CRIT_SECTION();
if (newbuf != buffer)
@ -3910,7 +3907,8 @@ log_heap_freeze(Relation reln, Buffer buffer,
*/
static XLogRecPtr
log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup, bool move)
Buffer newbuf, HeapTuple newtup, bool move,
bool all_visible_cleared, bool new_all_visible_cleared)
{
/*
* Note: xlhdr is declared to have adequate size and correct alignment for
@ -3946,9 +3944,9 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
xlrec.target.node = reln->rd_node;
xlrec.target.tid = from;
xlrec.all_visible_cleared = PageIsAllVisible(BufferGetPage(oldbuf));
xlrec.all_visible_cleared = all_visible_cleared;
xlrec.newtid = newtup->t_self;
xlrec.new_all_visible_cleared = PageIsAllVisible(BufferGetPage(newbuf));
xlrec.new_all_visible_cleared = new_all_visible_cleared;
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapUpdate;
@ -4015,9 +4013,11 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
*/
XLogRecPtr
log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup)
Buffer newbuf, HeapTuple newtup,
bool all_visible_cleared, bool new_all_visible_cleared)
{
return log_heap_update(reln, oldbuf, from, newbuf, newtup, true);
return log_heap_update(reln, oldbuf, from, newbuf, newtup, true,
all_visible_cleared, new_all_visible_cleared);
}
/*
@ -4222,7 +4222,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
/*
* The visibility map always needs to be updated, even if the heap page is
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
if (xlrec->all_visible_cleared)
@ -4300,7 +4300,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
/*
* The visibility map always needs to be updated, even if the heap page is
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
if (xlrec->all_visible_cleared)
@ -4412,7 +4412,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
Size freespace;
/*
* The visibility map always needs to be updated, even if the heap page is
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
if (xlrec->all_visible_cleared)
@ -4507,7 +4507,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
newt:;
/*
* The visibility map always needs to be updated, even if the heap page is
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
*/
if (xlrec->new_all_visible_cleared)