1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-27 12:41:57 +03:00

Allow I/O reliability checks using 16-bit checksums

Checksums are set immediately prior to flush out of shared buffers
and checked when pages are read in again. Hint bit setting will
require full page write when block is dirtied, which causes various
infrastructure changes. Extensive comments, docs and README.

WARNING message thrown if checksum fails on non-all zeroes page;
ERROR thrown but can be disabled with ignore_checksum_failure = on.

Feature enabled by an initdb option, since transition from option off
to option on is long and complex and has not yet been implemented.
Default is not to use checksums.

Checksum used is WAL CRC-32 truncated to 16-bits.

Simon Riggs, Jeff Davis, Greg Smith
Wide input and assistance from many community members. Thank you.
This commit is contained in:
Simon Riggs
2013-03-22 13:54:07 +00:00
parent e4a05c7512
commit 96ef3b8ff1
40 changed files with 766 additions and 146 deletions

View File

@ -362,8 +362,12 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
{
/* Creating index-page GISTSearchItem */
item->blkno = ItemPointerGetBlockNumber(&it->t_tid);
/* lsn of current page is lsn of parent page for child */
item->data.parentlsn = PageGetLSN(page);
/*
* LSN of current page is lsn of parent page for child. We only
* have a shared lock, so we need to get the LSN atomically.
*/
item->data.parentlsn = BufferGetLSNAtomic(buffer);
}
/* Insert it into the queue using new distance data */

View File

@ -285,11 +285,9 @@ hashgettuple(PG_FUNCTION_ARGS)
ItemIdMarkDead(PageGetItemId(page, offnum));
/*
* Since this can be redone later if needed, it's treated the same
* as a commit-hint-bit status update for heap tuples: we mark the
* buffer dirty but don't make a WAL log entry.
* Since this can be redone later if needed, mark as a hint.
*/
SetBufferCommitInfoNeedsSave(buf);
MarkBufferDirtyHint(buf);
}
/*

View File

@ -5754,17 +5754,23 @@ log_heap_freeze(Relation reln, Buffer buffer,
* being marked all-visible, and vm_buffer is the buffer containing the
* corresponding visibility map block. Both should have already been modified
* and dirtied.
*
* If checksums are enabled, we also add the heap_buffer to the chain to
* protect it from being torn.
*/
XLogRecPtr
log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer,
log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
TransactionId cutoff_xid)
{
xl_heap_visible xlrec;
XLogRecPtr recptr;
XLogRecData rdata[2];
XLogRecData rdata[3];
Assert(BufferIsValid(heap_buffer));
Assert(BufferIsValid(vm_buffer));
xlrec.node = rnode;
xlrec.block = block;
xlrec.block = BufferGetBlockNumber(heap_buffer);
xlrec.cutoff_xid = cutoff_xid;
rdata[0].data = (char *) &xlrec;
@ -5778,6 +5784,17 @@ log_heap_visible(RelFileNode rnode, BlockNumber block, Buffer vm_buffer,
rdata[1].buffer_std = false;
rdata[1].next = NULL;
if (DataChecksumsEnabled())
{
rdata[1].next = &(rdata[2]);
rdata[2].data = NULL;
rdata[2].len = 0;
rdata[2].buffer = heap_buffer;
rdata[2].buffer_std = true;
rdata[2].next = NULL;
}
recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE, rdata);
return recptr;
@ -6139,8 +6156,6 @@ static void
heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record);
Buffer buffer;
Page page;
/*
* If there are any Hot Standby transactions running that have an xmin
@ -6155,39 +6170,56 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, xlrec->node);
/*
* Read the heap page, if it still exists. If the heap file has been
* dropped or truncated later in recovery, we don't need to update the
* page, but we'd better still update the visibility map.
* If heap block was backed up, restore it. This can only happen with
* checksums enabled.
*/
buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block,
RBM_NORMAL);
if (BufferIsValid(buffer))
if (record->xl_info & XLR_BKP_BLOCK(1))
{
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
Assert(DataChecksumsEnabled());
(void) RestoreBackupBlock(lsn, record, 1, false, false);
}
else
{
Buffer buffer;
Page page;
/*
* We don't bump the LSN of the heap page when setting the visibility
* map bit, because that would generate an unworkable volume of
* full-page writes. This exposes us to torn page hazards, but since
* we're not inspecting the existing page contents in any way, we
* don't care.
*
* However, all operations that clear the visibility map bit *do* bump
* the LSN, and those operations will only be replayed if the XLOG LSN
* follows the page LSN. Thus, if the page LSN has advanced past our
* XLOG record's LSN, we mustn't mark the page all-visible, because
* the subsequent update won't be replayed to clear the flag.
* Read the heap page, if it still exists. If the heap file has been
* dropped or truncated later in recovery, we don't need to update the
* page, but we'd better still update the visibility map.
*/
if (lsn > PageGetLSN(page))
buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM,
xlrec->block, RBM_NORMAL);
if (BufferIsValid(buffer))
{
PageSetAllVisible(page);
MarkBufferDirty(buffer);
}
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/* Done with heap page. */
UnlockReleaseBuffer(buffer);
page = (Page) BufferGetPage(buffer);
/*
* We don't bump the LSN of the heap page when setting the
* visibility map bit (unless checksums are enabled, in which case
* we must), because that would generate an unworkable volume of
* full-page writes. This exposes us to torn page hazards, but
* since we're not inspecting the existing page contents in any
* way, we don't care.
*
* However, all operations that clear the visibility map bit *do*
* bump the LSN, and those operations will only be replayed if the
* XLOG LSN follows the page LSN. Thus, if the page LSN has
* advanced past our XLOG record's LSN, we mustn't mark the page
* all-visible, because the subsequent update won't be replayed to
* clear the flag.
*/
if (lsn > PageGetLSN(page))
{
PageSetAllVisible(page);
MarkBufferDirty(buffer);
}
/* Done with heap page. */
UnlockReleaseBuffer(buffer);
}
}
/*
@ -6218,7 +6250,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* real harm is done; and the next VACUUM will fix it.
*/
if (lsn > PageGetLSN(BufferGetPage(vmbuffer)))
visibilitymap_set(reln, xlrec->block, lsn, vmbuffer,
visibilitymap_set(reln, xlrec->block, InvalidBuffer, lsn, vmbuffer,
xlrec->cutoff_xid);
ReleaseBuffer(vmbuffer);

View File

@ -262,7 +262,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
{
((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
PageClearFull(page);
SetBufferCommitInfoNeedsSave(buffer);
MarkBufferDirtyHint(buffer);
}
}

View File

@ -273,6 +273,8 @@ end_heap_rewrite(RewriteState state)
/* Write the last page, if any */
if (state->rs_buffer_valid)
{
PageSetChecksumInplace(state->rs_buffer, state->rs_blockno);
if (state->rs_use_wal)
log_newpage(&state->rs_new_rel->rd_node,
MAIN_FORKNUM,
@ -614,6 +616,8 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
{
/* Doesn't fit, so write out the existing page */
PageSetChecksumInplace(page, state->rs_blockno);
/* XLOG stuff */
if (state->rs_use_wal)
log_newpage(&state->rs_new_rel->rd_node,

View File

@ -233,13 +233,18 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
* Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
* this function. Except in recovery, caller should also pass the heap
* buffer. When checksums are enabled and we're not in recovery, we must add
* the heap buffer to the WAL chain to protect it from being torn.
*
* You must pass a buffer containing the correct map page to this function.
* Call visibilitymap_pin first to pin the right one. This function doesn't do
* any I/O.
*/
void
visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
Buffer buf, TransactionId cutoff_xid)
visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
@ -252,34 +257,55 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
#endif
Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
Assert(InRecovery || BufferIsValid(heapBuf));
/* Check that we have the right page pinned */
if (!BufferIsValid(buf) || BufferGetBlockNumber(buf) != mapBlock)
elog(ERROR, "wrong buffer passed to visibilitymap_set");
/* Check that we have the right heap page pinned, if present */
if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
page = BufferGetPage(buf);
/* Check that we have the right VM page pinned */
if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf);
map = PageGetContents(page);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
if (!(map[mapByte] & (1 << mapBit)))
{
START_CRIT_SECTION();
map[mapByte] |= (1 << mapBit);
MarkBufferDirty(buf);
MarkBufferDirty(vmBuf);
if (RelationNeedsWAL(rel))
{
if (XLogRecPtrIsInvalid(recptr))
recptr = log_heap_visible(rel->rd_node, heapBlk, buf,
{
Assert(!InRecovery);
recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf,
cutoff_xid);
/*
* If data checksums are enabled, we need to protect the heap
* page from being torn.
*/
if (DataChecksumsEnabled())
{
Page heapPage = BufferGetPage(heapBuf);
/* caller is expected to set PD_ALL_VISIBLE first */
Assert(PageIsAllVisible(heapPage));
PageSetLSN(heapPage, recptr);
}
}
PageSetLSN(page, recptr);
}
END_CRIT_SECTION();
}
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
}
/*
@ -579,6 +605,8 @@ vm_extend(Relation rel, BlockNumber vm_nblocks)
/* Now extend the file */
while (vm_nblocks_now < vm_nblocks)
{
PageSetChecksumInplace(pg, vm_nblocks_now);
smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now,
(char *) pg, false);
vm_nblocks_now++;

View File

@ -407,11 +407,15 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
*/
ItemIdMarkDead(curitemid);
opaque->btpo_flags |= BTP_HAS_GARBAGE;
/* be sure to mark the proper buffer dirty... */
/*
* Mark buffer with a dirty hint, since state is not
* crucial. Be sure to mark the proper buffer dirty.
*/
if (nbuf != InvalidBuffer)
SetBufferCommitInfoNeedsSave(nbuf);
MarkBufferDirtyHint(nbuf);
else
SetBufferCommitInfoNeedsSave(buf);
MarkBufferDirtyHint(buf);
}
}
}

View File

@ -217,6 +217,7 @@ btbuildempty(PG_FUNCTION_ARGS)
_bt_initmetapage(metapage, P_NONE, 0);
/* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
if (XLogIsNeeded())
@ -1051,7 +1052,7 @@ restart:
opaque->btpo_cycleid == vstate->cycleid)
{
opaque->btpo_cycleid = 0;
SetBufferCommitInfoNeedsSave(buf);
MarkBufferDirtyHint(buf);
}
}

View File

@ -288,12 +288,15 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
{
if (!wstate->btws_zeropage)
wstate->btws_zeropage = (Page) palloc0(BLCKSZ);
/* don't set checksum for all-zero page */
smgrextend(wstate->index->rd_smgr, MAIN_FORKNUM,
wstate->btws_pages_written++,
(char *) wstate->btws_zeropage,
true);
}
PageSetChecksumInplace(page, blkno);
/*
* Now write the page. There's no need for smgr to schedule an fsync for
* this write; we'll do it ourselves before ending the build.

View File

@ -1781,9 +1781,7 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
}
/*
* Since this can be redone later if needed, it's treated the same as a
* commit-hint-bit status update for heap tuples: we mark the buffer dirty
* but don't make a WAL log entry.
* Since this can be redone later if needed, mark as dirty hint.
*
* Whenever we mark anything LP_DEAD, we also set the page's
* BTP_HAS_GARBAGE flag, which is likewise just a hint.
@ -1791,7 +1789,7 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
SetBufferCommitInfoNeedsSave(so->currPos.buf);
MarkBufferDirtyHint(so->currPos.buf);
}
if (!haveLock)

View File

@ -81,6 +81,10 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "restore point: %s", xlrec->rp_name);
}
else if (info == XLOG_HINT)
{
appendStringInfo(buf, "page hint");
}
else if (info == XLOG_BACKUP_END)
{
XLogRecPtr startpoint;

View File

@ -154,6 +154,7 @@ spgbuildempty(PG_FUNCTION_ARGS)
SpGistInitMetapage(page);
/* Write the page. If archiving/streaming, XLOG it. */
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
(char *) page, true);
if (XLogIsNeeded())
@ -163,6 +164,7 @@ spgbuildempty(PG_FUNCTION_ARGS)
/* Likewise for the root page. */
SpGistInitPage(page, SPGIST_LEAF);
PageSetChecksumInplace(page, SPGIST_ROOT_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_ROOT_BLKNO,
(char *) page, true);
if (XLogIsNeeded())
@ -172,6 +174,7 @@ spgbuildempty(PG_FUNCTION_ARGS)
/* Likewise for the null-tuples root page. */
SpGistInitPage(page, SPGIST_LEAF | SPGIST_NULLS);
PageSetChecksumInplace(page, SPGIST_NULL_BLKNO);
smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_NULL_BLKNO,
(char *) page, true);
if (XLogIsNeeded())

View File

@ -437,6 +437,8 @@ critical section.)
4. Mark the shared buffer(s) as dirty with MarkBufferDirty(). (This must
happen before the WAL record is inserted; see notes in SyncOneBuffer().)
Note that marking a buffer dirty with MarkBufferDirty() should only
happen iff you write a WAL record; see Writing Hints below.
5. If the relation requires WAL-logging, build a WAL log record and pass it
to XLogInsert(); then update the page's LSN using the returned XLOG
@ -584,6 +586,26 @@ replay code has to do the insertion on its own to restore the index to
consistency. Such insertions occur after WAL is operational, so they can
and should write WAL records for the additional generated actions.
Writing Hints
-------------
In some cases, we write additional information to data blocks without
writing a preceding WAL record. This should only happen iff the data can
be reconstructed later following a crash and the action is simply a way
of optimising for performance. When a hint is written we use
MarkBufferDirtyHint() to mark the block dirty.
If the buffer is clean and checksums are in use then
MarkBufferDirtyHint() inserts an XLOG_HINT record to ensure that we
take a full page image that includes the hint. We do this to avoid
a partial page write, when we write the dirtied page. WAL is not
written during recovery, so we simply skip dirtying blocks because
of hints when in recovery.
If you do decide to optimise away a WAL record, then any calls to
MarkBufferDirty() must be replaced by MarkBufferDirtyHint(),
otherwise you will expose the risk of partial page writes.
Write-Ahead Logging for Filesystem Actions
------------------------------------------

View File

@ -60,6 +60,7 @@
#include "utils/timestamp.h"
#include "pg_trace.h"
extern bool bootstrap_data_checksums;
/* File path names (all relative to $PGDATA) */
#define RECOVERY_COMMAND_FILE "recovery.conf"
@ -730,6 +731,7 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
bool updrqst;
bool doPageWrites;
bool isLogSwitch = (rmid == RM_XLOG_ID && info == XLOG_SWITCH);
bool isHint = (rmid == RM_XLOG_ID && info == XLOG_HINT);
uint8 info_orig = info;
static XLogRecord *rechdr;
@ -999,6 +1001,18 @@ begin:;
goto begin;
}
/*
* If this is a hint record and we don't need a backup block then
* we have no more work to do and can exit quickly without inserting
* a WAL record at all. In that case return InvalidXLogRecPtr.
*/
if (isHint && !(info & XLR_BKP_BLOCK_MASK))
{
LWLockRelease(WALInsertLock);
END_CRIT_SECTION();
return InvalidXLogRecPtr;
}
/*
* If the current page is completely full, the record goes to the next
* page, right after the page header.
@ -1253,10 +1267,10 @@ XLogCheckBuffer(XLogRecData *rdata, bool doPageWrites,
* not. We don't need the buffer header lock for PageGetLSN because we
* have exclusive lock on the page and/or the relation.
*/
*lsn = PageGetLSN(page);
*lsn = BufferGetLSNAtomic(rdata->buffer);
if (doPageWrites &&
PageGetLSN(page) <= RedoRecPtr)
*lsn <= RedoRecPtr)
{
/*
* The page needs to be backed up, so set up *bkpb
@ -3187,6 +3201,11 @@ RestoreBackupBlock(XLogRecPtr lsn, XLogRecord *record, int block_index,
BLCKSZ - (bkpb.hole_offset + bkpb.hole_length));
}
/*
* Any checksum set on this page will be invalid. We don't need
* to reset it here since it will be set before being written.
*/
PageSetLSN(page, lsn);
MarkBufferDirty(buffer);
@ -3766,6 +3785,16 @@ GetSystemIdentifier(void)
return ControlFile->system_identifier;
}
/*
* Are checksums enabled for data pages?
*/
bool
DataChecksumsEnabled(void)
{
Assert(ControlFile != NULL);
return ControlFile->data_checksums;
}
/*
* Returns a fake LSN for unlogged relations.
*
@ -4092,6 +4121,7 @@ BootStrapXLOG(void)
ControlFile->max_prepared_xacts = max_prepared_xacts;
ControlFile->max_locks_per_xact = max_locks_per_xact;
ControlFile->wal_level = wal_level;
ControlFile->data_checksums = bootstrap_data_checksums;
/* some additional ControlFile fields are set in WriteControlFile() */
@ -7601,6 +7631,51 @@ XLogRestorePoint(const char *rpName)
return RecPtr;
}
/*
* Write a backup block if needed when we are setting a hint. Note that
* this may be called for a variety of page types, not just heaps.
*
* Deciding the "if needed" part is delicate and requires us to either
* grab WALInsertLock or check the info_lck spinlock. If we check the
* spinlock and it says Yes then we will need to get WALInsertLock as well,
* so the design choice here is to just go straight for the WALInsertLock
* and trust that calls to this function are minimised elsewhere.
*
* Callable while holding just share lock on the buffer content.
*
* Possible that multiple concurrent backends could attempt to write
* WAL records. In that case, more than one backup block may be recorded
* though that isn't important to the outcome and the backup blocks are
* likely to be identical anyway.
*/
#define XLOG_HINT_WATERMARK 13579
XLogRecPtr
XLogSaveBufferForHint(Buffer buffer)
{
/*
* Make an XLOG entry reporting the hint
*/
XLogRecData rdata[2];
int watermark = XLOG_HINT_WATERMARK;
/*
* Not allowed to have zero-length records, so use a small watermark
*/
rdata[0].data = (char *) (&watermark);
rdata[0].len = sizeof(int);
rdata[0].buffer = InvalidBuffer;
rdata[0].buffer_std = false;
rdata[0].next = &(rdata[1]);
rdata[1].data = NULL;
rdata[1].len = 0;
rdata[1].buffer = buffer;
rdata[1].buffer_std = true;
rdata[1].next = NULL;
return XLogInsert(RM_XLOG_ID, XLOG_HINT, rdata);
}
/*
* Check if any of the GUC parameters that are critical for hot standby
* have changed, and update the value in pg_control file if necessary.
@ -7767,8 +7842,8 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
{
uint8 info = record->xl_info & ~XLR_INFO_MASK;
/* Backup blocks are not used in xlog records */
Assert(!(record->xl_info & XLR_BKP_BLOCK_MASK));
/* Backup blocks are not used in most xlog records */
Assert(info == XLOG_HINT || !(record->xl_info & XLR_BKP_BLOCK_MASK));
if (info == XLOG_NEXTOID)
{
@ -7961,6 +8036,34 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
{
/* nothing to do here */
}
else if (info == XLOG_HINT)
{
#ifdef USE_ASSERT_CHECKING
int *watermark = (int *) XLogRecGetData(record);
#endif
/* Check the watermark is correct for the hint record */
Assert(*watermark == XLOG_HINT_WATERMARK);
/* Backup blocks must be present for smgr hint records */
Assert(record->xl_info & XLR_BKP_BLOCK_MASK);
/*
* Hint records have no information that needs to be replayed.
* The sole purpose of them is to ensure that a hint bit does
* not cause a checksum invalidation if a hint bit write should
* cause a torn page. So the body of the record is empty but
* there must be one backup block.
*
* Since the only change in the backup block is a hint bit,
* there is no confict with Hot Standby.
*
* This also means there is no corresponding API call for this,
* so an smgr implementation has no need to implement anything.
* Which means nothing is needed in md.c etc
*/
RestoreBackupBlock(lsn, record, 0, false, false);
}
else if (info == XLOG_BACKUP_END)
{
XLogRecPtr startpoint;