1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-15 03:41:20 +03:00

Add buffer_std flag to MarkBufferDirtyHint().

MarkBufferDirtyHint() writes WAL, and should know if it's got a
standard buffer or not. Currently, the only callers where buffer_std
is false are related to the FSM.

In passing, rename XLOG_HINT to XLOG_FPI, which is more descriptive.

Back-patch to 9.3.
This commit is contained in:
Jeff Davis
2013-06-17 08:02:12 -07:00
parent 2bc4ab4f9c
commit b8fd1a09f3
15 changed files with 29 additions and 29 deletions

View File

@@ -287,7 +287,7 @@ hashgettuple(PG_FUNCTION_ARGS)
/*
* Since this can be redone later if needed, mark as a hint.
*/
MarkBufferDirtyHint(buf);
MarkBufferDirtyHint(buf, true);
}
/*

View File

@@ -262,7 +262,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
{
((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
PageClearFull(page);
MarkBufferDirtyHint(buffer);
MarkBufferDirtyHint(buffer, true);
}
}

View File

@@ -413,9 +413,9 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* crucial. Be sure to mark the proper buffer dirty.
*/
if (nbuf != InvalidBuffer)
MarkBufferDirtyHint(nbuf);
MarkBufferDirtyHint(nbuf, true);
else
MarkBufferDirtyHint(buf);
MarkBufferDirtyHint(buf, true);
}
}
}

View File

@@ -1052,7 +1052,7 @@ restart:
opaque->btpo_cycleid == vstate->cycleid)
{
opaque->btpo_cycleid = 0;
MarkBufferDirtyHint(buf);
MarkBufferDirtyHint(buf, true);
}
}

View File

@@ -1789,7 +1789,7 @@ _bt_killitems(IndexScanDesc scan, bool haveLock)
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
MarkBufferDirtyHint(so->currPos.buf);
MarkBufferDirtyHint(so->currPos.buf, true);
}
if (!haveLock)

View File

@@ -82,11 +82,11 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
appendStringInfo(buf, "restore point: %s", xlrec->rp_name);
}
else if (info == XLOG_HINT)
else if (info == XLOG_FPI)
{
BkpBlock *bkp = (BkpBlock *) rec;
appendStringInfo(buf, "page hint: %s block %u",
appendStringInfo(buf, "full-page image: %s block %u",
relpathperm(bkp->node, bkp->fork),
bkp->block);
}

View File

@@ -7681,12 +7681,9 @@ XLogRestorePoint(const char *rpName)
* records. In that case, multiple copies of the same block would be recorded
* in separate WAL records by different backends, though that is still OK from
* a correctness perspective.
*
* Note that this only works for buffers that fit the standard page model,
* i.e. those for which buffer_std == true
*/
XLogRecPtr
XLogSaveBufferForHint(Buffer buffer)
XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
{
XLogRecPtr recptr = InvalidXLogRecPtr;
XLogRecPtr lsn;
@@ -7708,7 +7705,7 @@ XLogSaveBufferForHint(Buffer buffer)
* and reset rdata for any actual WAL record insert.
*/
rdata[0].buffer = buffer;
rdata[0].buffer_std = true;
rdata[0].buffer_std = buffer_std;
/*
* Check buffer while not holding an exclusive lock.
@@ -7722,6 +7719,9 @@ XLogSaveBufferForHint(Buffer buffer)
* Copy buffer so we don't have to worry about concurrent hint bit or
* lsn updates. We assume pd_lower/upper cannot be changed without an
* exclusive lock, so the contents bkp are not racy.
*
* With buffer_std set to false, XLogCheckBuffer() sets hole_length and
* hole_offset to 0; so the following code is safe for either case.
*/
memcpy(copied_buffer, origdata, bkpb.hole_offset);
memcpy(copied_buffer + bkpb.hole_offset,
@@ -7744,7 +7744,7 @@ XLogSaveBufferForHint(Buffer buffer)
rdata[1].buffer = InvalidBuffer;
rdata[1].next = NULL;
recptr = XLogInsert(RM_XLOG_ID, XLOG_HINT, rdata);
recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI, rdata);
}
return recptr;
@@ -8109,14 +8109,14 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
{
/* nothing to do here */
}
else if (info == XLOG_HINT)
else if (info == XLOG_FPI)
{
char *data;
BkpBlock bkpb;
/*
* Hint bit records contain a backup block stored "inline" in the
* normal data since the locking when writing hint records isn't
* Full-page image (FPI) records contain a backup block stored "inline"
* in the normal data since the locking when writing hint records isn't
* sufficient to use the normal backup block mechanism, which assumes
* exclusive lock on the buffer supplied.
*