1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-20 05:03:10 +03:00

Remove old-style VACUUM FULL (which was known for a little while as

VACUUM FULL INPLACE), along with a boatload of subsidiary code and complexity.
Per discussion, the use case for this method of vacuuming is no longer large
enough to justify maintaining it; not to mention that we don't wish to invest
the work that would be needed to make it play nicely with Hot Standby.

Aside from the code directly related to old-style VACUUM FULL, this commit
removes support for certain WAL record types that could only be generated
within VACUUM FULL, redirect-pointer removal in heap_page_prune, and
nontransactional generation of cache invalidation sinval messages (the last
being the sticking point for Hot Standby).

We still have to retain all code that copes with finding HEAP_MOVED_OFF and
HEAP_MOVED_IN flag bits on existing tuples.  This can't be removed as long
as we want to support in-place update from pre-9.0 databases.
This commit is contained in:
Tom Lane
2010-02-08 04:33:55 +00:00
parent 1ddc2703a9
commit 0a469c8769
41 changed files with 247 additions and 3737 deletions

View File

@ -1,4 +1,4 @@
$PostgreSQL: pgsql/src/backend/access/heap/README.HOT,v 1.4 2008/10/02 20:59:31 momjian Exp $
$PostgreSQL: pgsql/src/backend/access/heap/README.HOT,v 1.5 2010/02/08 04:33:52 tgl Exp $
Heap Only Tuples (HOT)
======================
@ -255,27 +255,6 @@ dead heap-only tuples, and cleans up any dead line pointers as if they were
regular dead tuples.
VACUUM FULL
-----------
VACUUM FULL performs an extra operation of collapsing out redirecting line
pointers, by moving the first non-DEAD tuple of each HOT chain to the root
position and clearing its heap-only-tuple flag. This effectively changes
the user-visible CTID of that tuple. This would be completely unsafe
during normal concurrent operation, but since VACUUM FULL takes full
exclusive lock on the table, it should be OK. (Note that VACUUM FULL has
always felt free to change tuples' CTIDs by moving them across pages.)
Eliminating redirection links means that the main body of VACUUM FULL
doesn't have to deal with them, which seems a good thing since VACUUM FULL
is horrendously complex already.
When VACUUM FULL tries to move tuple chains, it does not distinguish regular
and heap-only tuples, but just moves both types the same. This is OK because
it will move the entire non-DEAD tail of an update chain and remove index
entries for each item moved. At worst, we'll uselessly search for index
entries matching the heap-only tuples included in the move.
Statistics
----------

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.285 2010/02/03 10:01:29 heikki Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.286 2010/02/08 04:33:52 tgl Exp $
*
*
* INTERFACE ROUTINES
@ -79,7 +79,7 @@ static HeapScanDesc heap_beginscan_internal(Relation relation,
bool allow_strat, bool allow_sync,
bool is_bitmapscan);
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move,
ItemPointerData from, Buffer newbuf, HeapTuple newtup,
bool all_visible_cleared, bool new_all_visible_cleared);
static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
HeapTuple oldtup, HeapTuple newtup);
@ -2785,7 +2785,7 @@ l2:
if (!relation->rd_istemp)
{
XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
newbuf, heaptup, false,
newbuf, heaptup,
all_visible_cleared,
all_visible_cleared_new);
@ -3664,9 +3664,13 @@ recheck_xmax:
}
/*
* Although xvac per se could only be set by VACUUM, it shares physical
* storage space with cmax, and so could be wiped out by someone setting
* xmax. Hence recheck after changing lock, same as for xmax itself.
* Although xvac per se could only be set by old-style VACUUM FULL, it
* shares physical storage space with cmax, and so could be wiped out by
* someone setting xmax. Hence recheck after changing lock, same as for
* xmax itself.
*
* Old-style VACUUM FULL is gone, but we have to keep this code as long
* as we support having MOVED_OFF/MOVED_IN tuples in the database.
*/
recheck_xvac:
if (tuple->t_infomask & HEAP_MOVED)
@ -3785,8 +3789,7 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
TransactionId xmax = HeapTupleHeaderGetXmax(tuple);
TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
if (tuple->t_infomask & HEAP_MOVED_OFF ||
tuple->t_infomask & HEAP_MOVED_IN)
if (tuple->t_infomask & HEAP_MOVED)
{
if (TransactionIdPrecedes(*latestRemovedXid, xvac))
*latestRemovedXid = xvac;
@ -3844,7 +3847,7 @@ log_heap_clean(Relation reln, Buffer buffer,
OffsetNumber *redirected, int nredirected,
OffsetNumber *nowdead, int ndead,
OffsetNumber *nowunused, int nunused,
TransactionId latestRemovedXid, bool redirect_move)
TransactionId latestRemovedXid)
{
xl_heap_clean xlrec;
uint8 info;
@ -3915,7 +3918,7 @@ log_heap_clean(Relation reln, Buffer buffer,
rdata[3].buffer_std = true;
rdata[3].next = NULL;
info = redirect_move ? XLOG_HEAP2_CLEAN_MOVE : XLOG_HEAP2_CLEAN;
info = XLOG_HEAP2_CLEAN;
recptr = XLogInsert(RM_HEAP2_ID, info, rdata);
return recptr;
@ -3970,23 +3973,11 @@ log_heap_freeze(Relation reln, Buffer buffer,
*/
static XLogRecPtr
log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup, bool move,
Buffer newbuf, HeapTuple newtup,
bool all_visible_cleared, bool new_all_visible_cleared)
{
/*
* Note: xlhdr is declared to have adequate size and correct alignment for
* an xl_heap_header. However the two tids, if present at all, will be
* packed in with no wasted space after the xl_heap_header; they aren't
* necessarily aligned as implied by this struct declaration.
*/
struct
{
xl_heap_header hdr;
TransactionId tid1;
TransactionId tid2;
} xlhdr;
int hsize = SizeOfHeapHeader;
xl_heap_update xlrec;
xl_heap_header xlhdr;
uint8 info;
XLogRecPtr recptr;
XLogRecData rdata[4];
@ -3995,12 +3986,7 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
/* Caller should not call me on a temp relation */
Assert(!reln->rd_istemp);
if (move)
{
Assert(!HeapTupleIsHeapOnly(newtup));
info = XLOG_HEAP_MOVE;
}
else if (HeapTupleIsHeapOnly(newtup))
if (HeapTupleIsHeapOnly(newtup))
info = XLOG_HEAP_HOT_UPDATE;
else
info = XLOG_HEAP_UPDATE;
@ -4022,30 +4008,16 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
rdata[1].buffer_std = true;
rdata[1].next = &(rdata[2]);
xlhdr.hdr.t_infomask2 = newtup->t_data->t_infomask2;
xlhdr.hdr.t_infomask = newtup->t_data->t_infomask;
xlhdr.hdr.t_hoff = newtup->t_data->t_hoff;
if (move) /* remember xmax & xmin */
{
TransactionId xid[2]; /* xmax, xmin */
if (newtup->t_data->t_infomask & (HEAP_XMAX_INVALID | HEAP_IS_LOCKED))
xid[0] = InvalidTransactionId;
else
xid[0] = HeapTupleHeaderGetXmax(newtup->t_data);
xid[1] = HeapTupleHeaderGetXmin(newtup->t_data);
memcpy((char *) &xlhdr + hsize,
(char *) xid,
2 * sizeof(TransactionId));
hsize += 2 * sizeof(TransactionId);
}
xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
xlhdr.t_infomask = newtup->t_data->t_infomask;
xlhdr.t_hoff = newtup->t_data->t_hoff;
/*
* As with insert records, we need not store the rdata[2] segment if we
* decide to store the whole buffer instead.
*/
rdata[2].data = (char *) &xlhdr;
rdata[2].len = hsize;
rdata[2].len = SizeOfHeapHeader;
rdata[2].buffer = newbuf;
rdata[2].buffer_std = true;
rdata[2].next = &(rdata[3]);
@ -4070,19 +4042,6 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
return recptr;
}
/*
* Perform XLogInsert for a heap-move operation. Caller must already
* have modified the buffers and marked them dirty.
*/
XLogRecPtr
log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup,
bool all_visible_cleared, bool new_all_visible_cleared)
{
return log_heap_update(reln, oldbuf, from, newbuf, newtup, true,
all_visible_cleared, new_all_visible_cleared);
}
/*
* Perform XLogInsert of a HEAP_NEWPAGE record to WAL. Caller is responsible
* for writing the page to disk after calling this routine.
@ -4149,10 +4108,10 @@ heap_xlog_cleanup_info(XLogRecPtr lsn, XLogRecord *record)
}
/*
* Handles CLEAN and CLEAN_MOVE record types
* Handles HEAP_CLEAN record type
*/
static void
heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
Buffer buffer;
@ -4171,7 +4130,8 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
* no queries running for which the removed tuples are still visible.
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid,
xlrec->node);
RestoreBkpBlocks(lsn, record, true);
@ -4203,8 +4163,7 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
heap_page_prune_execute(buffer,
redirected, nredirected,
nowdead, ndead,
nowunused, nunused,
clean_move);
nowunused, nunused);
freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
@ -4489,10 +4448,10 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
}
/*
* Handles UPDATE, HOT_UPDATE & MOVE
* Handles UPDATE and HOT_UPDATE
*/
static void
heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
{
xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
Buffer buffer;
@ -4558,33 +4517,19 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
htup = (HeapTupleHeader) PageGetItem(page, lp);
if (move)
{
htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
HEAP_MOVED_IN);
htup->t_infomask |= HEAP_MOVED_OFF;
HeapTupleHeaderClearHotUpdated(htup);
HeapTupleHeaderSetXvac(htup, record->xl_xid);
/* Make sure there is no forward chain link in t_ctid */
htup->t_ctid = xlrec->target.tid;
}
htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID |
HEAP_XMAX_IS_MULTI |
HEAP_IS_LOCKED |
HEAP_MOVED);
if (hot_update)
HeapTupleHeaderSetHotUpdated(htup);
else
{
htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID |
HEAP_XMAX_IS_MULTI |
HEAP_IS_LOCKED |
HEAP_MOVED);
if (hot_update)
HeapTupleHeaderSetHotUpdated(htup);
else
HeapTupleHeaderClearHotUpdated(htup);
HeapTupleHeaderSetXmax(htup, record->xl_xid);
HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
/* Set forward chain link in t_ctid */
htup->t_ctid = xlrec->newtid;
}
HeapTupleHeaderClearHotUpdated(htup);
HeapTupleHeaderSetXmax(htup, record->xl_xid);
HeapTupleHeaderSetCmax(htup, FirstCommandId, false);
/* Set forward chain link in t_ctid */
htup->t_ctid = xlrec->newtid;
/* Mark the page as a candidate for pruning */
PageSetPrunable(page, record->xl_xid);
@ -4655,8 +4600,6 @@ newsame:;
elog(PANIC, "heap_update_redo: invalid max offset number");
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
if (move)
hsize += (2 * sizeof(TransactionId));
newlen = record->xl_len - hsize;
Assert(newlen <= MaxHeapTupleSize);
@ -4674,22 +4617,8 @@ newsame:;
htup->t_infomask = xlhdr.t_infomask;
htup->t_hoff = xlhdr.t_hoff;
if (move)
{
TransactionId xid[2]; /* xmax, xmin */
memcpy((char *) xid,
(char *) xlrec + SizeOfHeapUpdate + SizeOfHeapHeader,
2 * sizeof(TransactionId));
HeapTupleHeaderSetXmin(htup, xid[1]);
HeapTupleHeaderSetXmax(htup, xid[0]);
HeapTupleHeaderSetXvac(htup, record->xl_xid);
}
else
{
HeapTupleHeaderSetXmin(htup, record->xl_xid);
HeapTupleHeaderSetCmin(htup, FirstCommandId);
}
HeapTupleHeaderSetXmin(htup, record->xl_xid);
HeapTupleHeaderSetCmin(htup, FirstCommandId);
/* Make sure there is no forward chain link in t_ctid */
htup->t_ctid = xlrec->newtid;
@ -4857,13 +4786,10 @@ heap_redo(XLogRecPtr lsn, XLogRecord *record)
heap_xlog_delete(lsn, record);
break;
case XLOG_HEAP_UPDATE:
heap_xlog_update(lsn, record, false, false);
break;
case XLOG_HEAP_MOVE:
heap_xlog_update(lsn, record, true, false);
heap_xlog_update(lsn, record, false);
break;
case XLOG_HEAP_HOT_UPDATE:
heap_xlog_update(lsn, record, false, true);
heap_xlog_update(lsn, record, true);
break;
case XLOG_HEAP_NEWPAGE:
heap_xlog_newpage(lsn, record);
@ -4895,10 +4821,7 @@ heap2_redo(XLogRecPtr lsn, XLogRecord *record)
heap_xlog_freeze(lsn, record);
break;
case XLOG_HEAP2_CLEAN:
heap_xlog_clean(lsn, record, false);
break;
case XLOG_HEAP2_CLEAN_MOVE:
heap_xlog_clean(lsn, record, true);
heap_xlog_clean(lsn, record);
break;
case XLOG_HEAP2_CLEANUP_INFO:
heap_xlog_cleanup_info(lsn, record);
@ -4953,19 +4876,6 @@ heap_desc(StringInfo buf, uint8 xl_info, char *rec)
ItemPointerGetBlockNumber(&(xlrec->newtid)),
ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_MOVE)
{
xl_heap_update *xlrec = (xl_heap_update *) rec;
if (xl_info & XLOG_HEAP_INIT_PAGE)
appendStringInfo(buf, "move(init): ");
else
appendStringInfo(buf, "move: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; new %u/%u",
ItemPointerGetBlockNumber(&(xlrec->newtid)),
ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_HOT_UPDATE)
{
xl_heap_update *xlrec = (xl_heap_update *) rec;
@ -5037,15 +4947,6 @@ heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.relNode, xlrec->block,
xlrec->latestRemovedXid);
}
else if (info == XLOG_HEAP2_CLEAN_MOVE)
{
xl_heap_clean *xlrec = (xl_heap_clean *) rec;
appendStringInfo(buf, "clean_move: rel %u/%u/%u; blk %u remxid %u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode, xlrec->block,
xlrec->latestRemovedXid);
}
else if (info == XLOG_HEAP2_CLEANUP_INFO)
{
xl_heap_cleanup_info *xlrec = (xl_heap_cleanup_info *) rec;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.20 2010/01/02 16:57:34 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.21 2010/02/08 04:33:53 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@ -21,7 +21,6 @@
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/off.h"
#include "utils/inval.h"
#include "utils/rel.h"
#include "utils/tqual.h"
@ -46,8 +45,7 @@ typedef struct
static int heap_prune_chain(Relation relation, Buffer buffer,
OffsetNumber rootoffnum,
TransactionId OldestXmin,
PruneState *prstate,
bool redirect_move);
PruneState *prstate);
static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
static void heap_prune_record_redirect(PruneState *prstate,
OffsetNumber offnum, OffsetNumber rdoffnum);
@ -123,8 +121,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
/* OK to prune (though not to remove redirects) */
(void) heap_page_prune(relation, buffer, OldestXmin, false, true);
/* OK to prune */
(void) heap_page_prune(relation, buffer, OldestXmin, true);
}
/* And release buffer lock */
@ -141,14 +139,6 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
* OldestXmin is the cutoff XID used to distinguish whether tuples are DEAD
* or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum).
*
* If redirect_move is set, we remove redirecting line pointers by
* updating the root line pointer to point directly to the first non-dead
* tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple's effective CTID, and is therefore unsafe except within VACUUM FULL.
* The only reason we support this capability at all is that by using it,
* VACUUM FULL need not cope with LP_REDIRECT items at all; which seems a
* good thing since VACUUM FULL is overly complicated already.
*
* If report_stats is true then we send the number of reclaimed heap-only
* tuples to pgstats. (This must be FALSE during vacuum, since vacuum will
* send its own new total to pgstats, and we don't want this delta applied
@ -158,7 +148,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
*/
int
heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
bool redirect_move, bool report_stats)
bool report_stats)
{
int ndeleted = 0;
Page page = BufferGetPage(buffer);
@ -172,17 +162,10 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* logic as possible out of the critical section, and also ensures that
* WAL replay will work the same as the normal case.
*
* First, inform inval.c that upcoming CacheInvalidateHeapTuple calls are
* nontransactional.
*/
if (redirect_move)
BeginNonTransactionalInvalidation();
/*
* Initialize the new pd_prune_xid value to zero (indicating no prunable
* tuples). If we find any tuples which may soon become prunable, we will
* save the lowest relevant XID in new_prune_xid. Also initialize the rest
* of our working state.
* First, initialize the new pd_prune_xid value to zero (indicating no
* prunable tuples). If we find any tuples which may soon become
* prunable, we will save the lowest relevant XID in new_prune_xid.
* Also initialize the rest of our working state.
*/
prstate.new_prune_xid = InvalidTransactionId;
prstate.latestRemovedXid = InvalidTransactionId;
@ -209,22 +192,9 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
/* Process this item or chain of items */
ndeleted += heap_prune_chain(relation, buffer, offnum,
OldestXmin,
&prstate,
redirect_move);
&prstate);
}
/*
* Send invalidation messages for any tuples we are about to move. It is
* safe to do this now, even though we could theoretically still fail
* before making the actual page update, because a useless cache
* invalidation doesn't hurt anything. Also, no one else can reload the
* tuples while we have exclusive buffer lock, so it's not too early to
* send the invals. This avoids sending the invals while inside the
* critical section, which is a good thing for robustness.
*/
if (redirect_move)
EndNonTransactionalInvalidation();
/* Any error while applying the changes is critical */
START_CRIT_SECTION();
@ -238,8 +208,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
heap_page_prune_execute(buffer,
prstate.redirected, prstate.nredirected,
prstate.nowdead, prstate.ndead,
prstate.nowunused, prstate.nunused,
redirect_move);
prstate.nowunused, prstate.nunused);
/*
* Update the page's pd_prune_xid field to either zero, or the lowest
@ -257,7 +226,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
MarkBufferDirty(buffer);
/*
* Emit a WAL HEAP_CLEAN or HEAP_CLEAN_MOVE record showing what we did
* Emit a WAL HEAP_CLEAN record showing what we did
*/
if (!relation->rd_istemp)
{
@ -267,7 +236,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
prstate.redirected, prstate.nredirected,
prstate.nowdead, prstate.ndead,
prstate.nowunused, prstate.nunused,
prstate.latestRemovedXid, redirect_move);
prstate.latestRemovedXid);
PageSetLSN(BufferGetPage(buffer), recptr);
PageSetTLI(BufferGetPage(buffer), ThisTimeLineID);
@ -349,16 +318,12 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
*
* If redirect_move is true, we intend to get rid of redirecting line pointers,
* not just make redirection entries.
*
* Returns the number of tuples (to be) deleted from the page.
*/
static int
heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
TransactionId OldestXmin,
PruneState *prstate,
bool redirect_move)
PruneState *prstate)
{
int ndeleted = 0;
Page dp = (Page) BufferGetPage(buffer);
@ -366,7 +331,6 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
ItemId rootlp;
HeapTupleHeader htup;
OffsetNumber latestdead = InvalidOffsetNumber,
redirect_target = InvalidOffsetNumber,
maxoff = PageGetMaxOffsetNumber(dp),
offnum;
OffsetNumber chainitems[MaxHeapTuplesPerPage];
@ -592,12 +556,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
if (i >= nchain)
heap_prune_record_dead(prstate, rootoffnum);
else
{
heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]);
/* If the redirection will be a move, need more processing */
if (redirect_move)
redirect_target = chainitems[i];
}
}
else if (nchain < 2 && ItemIdIsRedirected(rootlp))
{
@ -610,42 +569,6 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
*/
heap_prune_record_dead(prstate, rootoffnum);
}
else if (redirect_move && ItemIdIsRedirected(rootlp))
{
/*
* If we desire to eliminate LP_REDIRECT items by moving tuples, make
* a redirection entry for each redirected root item; this will cause
* heap_page_prune_execute to actually do the move. (We get here only
* when there are no DEAD tuples in the chain; otherwise the
* redirection entry was made above.)
*/
heap_prune_record_redirect(prstate, rootoffnum, chainitems[1]);
redirect_target = chainitems[1];
}
/*
* If we are going to implement a redirect by moving tuples, we have to
* issue a cache invalidation against the redirection target tuple,
* because its CTID will be effectively changed by the move. Note that
* CacheInvalidateHeapTuple only queues the request, it doesn't send it;
* if we fail before reaching EndNonTransactionalInvalidation, nothing
* happens and no harm is done.
*/
if (OffsetNumberIsValid(redirect_target))
{
ItemId firstlp = PageGetItemId(dp, redirect_target);
HeapTupleData firsttup;
Assert(ItemIdIsNormal(firstlp));
/* Set up firsttup to reference the tuple at its existing CTID */
firsttup.t_data = (HeapTupleHeader) PageGetItem(dp, firstlp);
firsttup.t_len = ItemIdGetLength(firstlp);
ItemPointerSet(&firsttup.t_self,
BufferGetBlockNumber(buffer),
redirect_target);
firsttup.t_tableOid = RelationGetRelid(relation);
CacheInvalidateHeapTuple(relation, &firsttup);
}
return ndeleted;
}
@ -715,14 +638,13 @@ void
heap_page_prune_execute(Buffer buffer,
OffsetNumber *redirected, int nredirected,
OffsetNumber *nowdead, int ndead,
OffsetNumber *nowunused, int nunused,
bool redirect_move)
OffsetNumber *nowunused, int nunused)
{
Page page = (Page) BufferGetPage(buffer);
OffsetNumber *offnum;
int i;
/* Update all redirected or moved line pointers */
/* Update all redirected line pointers */
offnum = redirected;
for (i = 0; i < nredirected; i++)
{
@ -730,30 +652,7 @@ heap_page_prune_execute(Buffer buffer,
OffsetNumber tooff = *offnum++;
ItemId fromlp = PageGetItemId(page, fromoff);
if (redirect_move)
{
/* Physically move the "to" item to the "from" slot */
ItemId tolp = PageGetItemId(page, tooff);
HeapTupleHeader htup;
*fromlp = *tolp;
ItemIdSetUnused(tolp);
/*
* Change heap-only status of the tuple because after the line
* pointer manipulation, it's no longer a heap-only tuple, but is
* directly pointed to by index entries.
*/
Assert(ItemIdIsNormal(fromlp));
htup = (HeapTupleHeader) PageGetItem(page, fromlp);
Assert(HeapTupleHeaderIsHeapOnly(htup));
HeapTupleHeaderClearHeapOnly(htup);
}
else
{
/* Just insert a REDIRECT link at fromoff */
ItemIdSetRedirect(fromlp, tooff);
}
ItemIdSetRedirect(fromlp, tooff);
}
/* Update all now-dead line pointers */