1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-09 06:21:09 +03:00

Change internal RelFileNode references to RelFileNumber or RelFileLocator.

We have been using the term RelFileNode to refer to either (1) the
integer that is used to name the sequence of files for a certain relation
within the directory set aside for that tablespace/database combination;
or (2) that value plus the OIDs of the tablespace and database; or
occasionally (3) the whole series of files created for a relation
based on those values. Using the same name for more than one thing is
confusing.

Replace RelFileNode with RelFileNumber when we're talking about just the
single number, i.e. (1) from above, and with RelFileLocator when we're
talking about all the things that are needed to locate a relation's files
on disk, i.e. (2) from above. In the places where we refer to (3) as
a relfilenode, instead refer to "relation storage".

Since there is a ton of SQL code in the world that knows about
pg_class.relfilenode, don't change the name of that column, or of other
SQL-facing things that derive their name from it.

On the other hand, do adjust closely-related internal terminology. For
example, the structure member names dbNode and spcNode appear to be
derived from the fact that the structure itself was called RelFileNode,
so change those to dbOid and spcOid. Likewise, various variables with
names like rnode and relnode get renamed appropriately, according to
how they're being used in context.

Hopefully, this is clearer than before. It is also preparation for
future patches that intend to widen the relfilenumber fields from its
current width of 32 bits. Variables that store a relfilenumber are now
declared as type RelFileNumber rather than type Oid; right now, these
are the same, but that can now more easily be changed.

Dilip Kumar, per an idea from me. Reviewed also by Andres Freund.
I fixed some whitespace issues, changed a couple of words in a
comment, and made one other minor correction.

Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com
Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com
Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
This commit is contained in:
Robert Haas
2022-07-06 11:39:09 -04:00
parent 7775c748db
commit b0a55e4329
138 changed files with 1640 additions and 1606 deletions

View File

@@ -90,7 +90,7 @@ bool trace_syncscan = false;
*/
typedef struct ss_scan_location_t
{
RelFileNode relfilenode; /* identity of a relation */
RelFileLocator relfilelocator; /* identity of a relation */
BlockNumber location; /* last-reported location in the relation */
} ss_scan_location_t;
@@ -115,7 +115,7 @@ typedef struct ss_scan_locations_t
static ss_scan_locations_t *scan_locations;
/* prototypes for internal functions */
static BlockNumber ss_search(RelFileNode relfilenode,
static BlockNumber ss_search(RelFileLocator relfilelocator,
BlockNumber location, bool set);
@@ -159,9 +159,9 @@ SyncScanShmemInit(void)
* these invalid entries will fall off the LRU list and get
* replaced with real entries.
*/
item->location.relfilenode.spcNode = InvalidOid;
item->location.relfilenode.dbNode = InvalidOid;
item->location.relfilenode.relNode = InvalidOid;
item->location.relfilelocator.spcOid = InvalidOid;
item->location.relfilelocator.dbOid = InvalidOid;
item->location.relfilelocator.relNumber = InvalidRelFileNumber;
item->location.location = InvalidBlockNumber;
item->prev = (i > 0) ?
@@ -176,10 +176,10 @@ SyncScanShmemInit(void)
/*
* ss_search --- search the scan_locations structure for an entry with the
* given relfilenode.
* given relfilelocator.
*
* If "set" is true, the location is updated to the given location. If no
* entry for the given relfilenode is found, it will be created at the head
* entry for the given relfilelocator is found, it will be created at the head
* of the list with the given location, even if "set" is false.
*
* In any case, the location after possible update is returned.
@@ -188,7 +188,7 @@ SyncScanShmemInit(void)
* data structure.
*/
static BlockNumber
ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
ss_search(RelFileLocator relfilelocator, BlockNumber location, bool set)
{
ss_lru_item_t *item;
@@ -197,7 +197,8 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
{
bool match;
match = RelFileNodeEquals(item->location.relfilenode, relfilenode);
match = RelFileLocatorEquals(item->location.relfilelocator,
relfilelocator);
if (match || item->next == NULL)
{
@@ -207,7 +208,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
*/
if (!match)
{
item->location.relfilenode = relfilenode;
item->location.relfilelocator = relfilelocator;
item->location.location = location;
}
else if (set)
@@ -255,7 +256,7 @@ ss_get_location(Relation rel, BlockNumber relnblocks)
BlockNumber startloc;
LWLockAcquire(SyncScanLock, LW_EXCLUSIVE);
startloc = ss_search(rel->rd_node, 0, false);
startloc = ss_search(rel->rd_locator, 0, false);
LWLockRelease(SyncScanLock);
/*
@@ -281,8 +282,8 @@ ss_get_location(Relation rel, BlockNumber relnblocks)
* ss_report_location --- update the current scan location
*
* Writes an entry into the shared Sync Scan state of the form
* (relfilenode, blocknumber), overwriting any existing entry for the
* same relfilenode.
* (relfilelocator, blocknumber), overwriting any existing entry for the
* same relfilelocator.
*/
void
ss_report_location(Relation rel, BlockNumber location)
@@ -309,7 +310,7 @@ ss_report_location(Relation rel, BlockNumber location)
{
if (LWLockConditionalAcquire(SyncScanLock, LW_EXCLUSIVE))
{
(void) ss_search(rel->rd_node, location, true);
(void) ss_search(rel->rd_locator, location, true);
LWLockRelease(SyncScanLock);
}
#ifdef TRACE_SYNCSCAN

View File

@@ -470,7 +470,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
savedRightLink = GinPageGetOpaque(page)->rightlink;
/* Begin setting up WAL record */
data.node = btree->index->rd_node;
data.locator = btree->index->rd_locator;
data.flags = xlflags;
if (BufferIsValid(childbuf))
{

View File

@@ -235,7 +235,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
needWal = RelationNeedsWAL(index);
data.node = index->rd_node;
data.locator = index->rd_locator;
data.ntuples = 0;
data.newRightlink = data.prevTail = InvalidBlockNumber;

View File

@@ -688,7 +688,7 @@ ginUpdateStats(Relation index, const GinStatsData *stats, bool is_build)
XLogRecPtr recptr;
ginxlogUpdateMeta data;
data.node = index->rd_node;
data.locator = index->rd_locator;
data.ntuples = 0;
data.newRightlink = data.prevTail = InvalidBlockNumber;
memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));

View File

@@ -95,13 +95,13 @@ ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rda
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber)
{
RelFileNode node;
RelFileLocator locator;
ForkNumber forknum;
BlockNumber blknum;
BufferGetTag(buffer, &node, &forknum, &blknum);
BufferGetTag(buffer, &locator, &forknum, &blknum);
elog(ERROR, "failed to add item to index page in %u/%u/%u",
node.spcNode, node.dbNode, node.relNode);
locator.spcOid, locator.dbOid, locator.relNumber);
}
}

View File

@@ -462,7 +462,7 @@ gist_indexsortbuild(GISTBuildState *state)
smgrwrite(RelationGetSmgr(state->indexrel), MAIN_FORKNUM, GIST_ROOT_BLKNO,
levelstate->pages[0], true);
if (RelationNeedsWAL(state->indexrel))
log_newpage(&state->indexrel->rd_node, MAIN_FORKNUM, GIST_ROOT_BLKNO,
log_newpage(&state->indexrel->rd_locator, MAIN_FORKNUM, GIST_ROOT_BLKNO,
levelstate->pages[0], true);
pfree(levelstate->pages[0]);
@@ -663,7 +663,7 @@ gist_indexsortbuild_flush_ready_pages(GISTBuildState *state)
}
if (RelationNeedsWAL(state->indexrel))
log_newpages(&state->indexrel->rd_node, MAIN_FORKNUM, state->ready_num_pages,
log_newpages(&state->indexrel->rd_locator, MAIN_FORKNUM, state->ready_num_pages,
state->ready_blknos, state->ready_pages, true);
for (int i = 0; i < state->ready_num_pages; i++)

View File

@@ -191,11 +191,12 @@ gistRedoDeleteRecord(XLogReaderState *record)
*/
if (InHotStandby)
{
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rnode);
ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid,
rlocator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
@@ -395,7 +396,7 @@ gistRedoPageReuse(XLogReaderState *record)
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid,
xlrec->node);
xlrec->locator);
}
void
@@ -607,7 +608,7 @@ gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId latestRemov
*/
/* XLOG stuff */
xlrec_reuse.node = rel->rd_node;
xlrec_reuse.locator = rel->rd_locator;
xlrec_reuse.block = blkno;
xlrec_reuse.latestRemovedFullXid = latestRemovedXid;

View File

@@ -999,10 +999,10 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
*/
if (InHotStandby)
{
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rnode);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rlocator);
}
action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer);

View File

@@ -428,7 +428,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
MarkBufferDirty(buf);
if (use_wal)
log_newpage(&rel->rd_node,
log_newpage(&rel->rd_locator,
forkNum,
blkno,
BufferGetPage(buf),
@@ -1019,7 +1019,7 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
if (RelationNeedsWAL(rel))
log_newpage(&rel->rd_node,
log_newpage(&rel->rd_locator,
MAIN_FORKNUM,
lastblock,
zerobuf.data,

View File

@@ -8189,7 +8189,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
* heap_buffer, if necessary.
*/
XLogRecPtr
log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer,
TransactionId cutoff_xid, uint8 vmflags)
{
xl_heap_visible xlrec;
@@ -8454,7 +8454,7 @@ log_heap_new_cid(Relation relation, HeapTuple tup)
Assert(tup->t_tableOid != InvalidOid);
xlrec.top_xid = GetTopTransactionId();
xlrec.target_node = relation->rd_node;
xlrec.target_locator = relation->rd_locator;
xlrec.target_tid = tup->t_self;
/*
@@ -8623,18 +8623,18 @@ heap_xlog_prune(XLogReaderState *record)
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_prune *xlrec = (xl_heap_prune *) XLogRecGetData(record);
Buffer buffer;
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber blkno;
XLogRedoAction action;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
/*
* We're about to remove tuples. In Hot Standby mode, ensure that there's
* no queries running for which the removed tuples are still visible.
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
/*
* If we have a full-page image, restore it (using a cleanup lock) and
@@ -8694,7 +8694,7 @@ heap_xlog_prune(XLogReaderState *record)
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
}
@@ -8751,9 +8751,9 @@ heap_xlog_vacuum(XLogReaderState *record)
if (BufferIsValid(buffer))
{
Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
UnlockReleaseBuffer(buffer);
@@ -8766,7 +8766,7 @@ heap_xlog_vacuum(XLogReaderState *record)
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
}
@@ -8786,11 +8786,11 @@ heap_xlog_visible(XLogReaderState *record)
Buffer vmbuffer = InvalidBuffer;
Buffer buffer;
Page page;
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber blkno;
XLogRedoAction action;
XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
/*
* If there are any Hot Standby transactions running that have an xmin
@@ -8802,7 +8802,7 @@ heap_xlog_visible(XLogReaderState *record)
* rather than killing the transaction outright.
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rlocator);
/*
* Read the heap page, if it still exists. If the heap file has dropped or
@@ -8865,7 +8865,7 @@ heap_xlog_visible(XLogReaderState *record)
* FSM data is not in the page anyway.
*/
if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
XLogRecordPageWithFreeSpace(rnode, blkno, space);
XLogRecordPageWithFreeSpace(rlocator, blkno, space);
}
/*
@@ -8890,7 +8890,7 @@ heap_xlog_visible(XLogReaderState *record)
*/
LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
reln = CreateFakeRelcacheEntry(rnode);
reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, blkno, &vmbuffer);
/*
@@ -8933,13 +8933,13 @@ heap_xlog_freeze_page(XLogReaderState *record)
*/
if (InHotStandby)
{
RelFileNode rnode;
RelFileLocator rlocator;
TransactionId latestRemovedXid = cutoff_xid;
TransactionIdRetreat(latestRemovedXid);
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rnode);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rlocator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
@@ -9007,10 +9007,10 @@ heap_xlog_delete(XLogReaderState *record)
ItemId lp = NULL;
HeapTupleHeader htup;
BlockNumber blkno;
RelFileNode target_node;
RelFileLocator target_locator;
ItemPointerData target_tid;
XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
ItemPointerSetBlockNumber(&target_tid, blkno);
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
@@ -9020,7 +9020,7 @@ heap_xlog_delete(XLogReaderState *record)
*/
if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(target_node);
Relation reln = CreateFakeRelcacheEntry(target_locator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@@ -9086,12 +9086,12 @@ heap_xlog_insert(XLogReaderState *record)
xl_heap_header xlhdr;
uint32 newlen;
Size freespace = 0;
RelFileNode target_node;
RelFileLocator target_locator;
BlockNumber blkno;
ItemPointerData target_tid;
XLogRedoAction action;
XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
ItemPointerSetBlockNumber(&target_tid, blkno);
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
@@ -9101,7 +9101,7 @@ heap_xlog_insert(XLogReaderState *record)
*/
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(target_node);
Relation reln = CreateFakeRelcacheEntry(target_locator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@@ -9184,7 +9184,7 @@ heap_xlog_insert(XLogReaderState *record)
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
}
/*
@@ -9195,7 +9195,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
{
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_multi_insert *xlrec;
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber blkno;
Buffer buffer;
Page page;
@@ -9217,7 +9217,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
*/
xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
/* check that the mutually exclusive flags are not both set */
Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
@@ -9229,7 +9229,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
*/
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@@ -9331,7 +9331,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
/*
@@ -9342,7 +9342,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
{
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber oldblk;
BlockNumber newblk;
ItemPointerData newtid;
@@ -9371,7 +9371,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
oldtup.t_data = NULL;
oldtup.t_len = 0;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &newblk);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
{
/* HOT updates are never done across pages */
@@ -9388,7 +9388,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
*/
if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, oldblk, &vmbuffer);
@@ -9472,7 +9472,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
*/
if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, newblk, &vmbuffer);
@@ -9606,7 +9606,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
* totally accurate anyway.
*/
if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
}
static void
@@ -9662,13 +9662,13 @@ heap_xlog_lock(XLogReaderState *record)
*/
if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
{
RelFileNode rnode;
RelFileLocator rlocator;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
Relation reln;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
reln = CreateFakeRelcacheEntry(rnode);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, block, &vmbuffer);
visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
@@ -9735,13 +9735,13 @@ heap_xlog_lock_updated(XLogReaderState *record)
*/
if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
{
RelFileNode rnode;
RelFileLocator rlocator;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
Relation reln;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
reln = CreateFakeRelcacheEntry(rnode);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, block, &vmbuffer);
visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);

View File

@@ -566,11 +566,11 @@ tuple_lock_retry:
*/
static void
heapam_relation_set_new_filenode(Relation rel,
const RelFileNode *newrnode,
char persistence,
TransactionId *freezeXid,
MultiXactId *minmulti)
heapam_relation_set_new_filelocator(Relation rel,
const RelFileLocator *newrlocator,
char persistence,
TransactionId *freezeXid,
MultiXactId *minmulti)
{
SMgrRelation srel;
@@ -591,7 +591,7 @@ heapam_relation_set_new_filenode(Relation rel,
*/
*minmulti = GetOldestMultiXactId();
srel = RelationCreateStorage(*newrnode, persistence, true);
srel = RelationCreateStorage(*newrlocator, persistence, true);
/*
* If required, set up an init fork for an unlogged table so that it can
@@ -608,7 +608,7 @@ heapam_relation_set_new_filenode(Relation rel,
rel->rd_rel->relkind == RELKIND_MATVIEW ||
rel->rd_rel->relkind == RELKIND_TOASTVALUE);
smgrcreate(srel, INIT_FORKNUM, false);
log_smgrcreate(newrnode, INIT_FORKNUM);
log_smgrcreate(newrlocator, INIT_FORKNUM);
smgrimmedsync(srel, INIT_FORKNUM);
}
@@ -622,11 +622,11 @@ heapam_relation_nontransactional_truncate(Relation rel)
}
static void
heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
heapam_relation_copy_data(Relation rel, const RelFileLocator *newrlocator)
{
SMgrRelation dstrel;
dstrel = smgropen(*newrnode, rel->rd_backend);
dstrel = smgropen(*newrlocator, rel->rd_backend);
/*
* Since we copy the file directly without looking at the shared buffers,
@@ -640,10 +640,10 @@ heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
* Create and copy all forks of the relation, and schedule unlinking of
* old physical files.
*
* NOTE: any conflict in relfilenode value will be caught in
* NOTE: any conflict in relfilenumber value will be caught in
* RelationCreateStorage().
*/
RelationCreateStorage(*newrnode, rel->rd_rel->relpersistence, true);
RelationCreateStorage(*newrlocator, rel->rd_rel->relpersistence, true);
/* copy main fork */
RelationCopyStorage(RelationGetSmgr(rel), dstrel, MAIN_FORKNUM,
@@ -664,7 +664,7 @@ heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
if (RelationIsPermanent(rel) ||
(rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
forkNum == INIT_FORKNUM))
log_smgrcreate(newrnode, forkNum);
log_smgrcreate(newrlocator, forkNum);
RelationCopyStorage(RelationGetSmgr(rel), dstrel, forkNum,
rel->rd_rel->relpersistence);
}
@@ -2569,7 +2569,7 @@ static const TableAmRoutine heapam_methods = {
.tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
.index_delete_tuples = heap_index_delete_tuples,
.relation_set_new_filenode = heapam_relation_set_new_filenode,
.relation_set_new_filelocator = heapam_relation_set_new_filelocator,
.relation_nontransactional_truncate = heapam_relation_nontransactional_truncate,
.relation_copy_data = heapam_relation_copy_data,
.relation_copy_for_cluster = heapam_relation_copy_for_cluster,

View File

@@ -318,7 +318,7 @@ end_heap_rewrite(RewriteState state)
if (state->rs_buffer_valid)
{
if (RelationNeedsWAL(state->rs_new_rel))
log_newpage(&state->rs_new_rel->rd_node,
log_newpage(&state->rs_new_rel->rd_locator,
MAIN_FORKNUM,
state->rs_blockno,
state->rs_buffer,
@@ -679,7 +679,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
/* XLOG stuff */
if (RelationNeedsWAL(state->rs_new_rel))
log_newpage(&state->rs_new_rel->rd_node,
log_newpage(&state->rs_new_rel->rd_locator,
MAIN_FORKNUM,
state->rs_blockno,
page,
@@ -742,7 +742,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
* When doing logical decoding - which relies on using cmin/cmax of catalog
* tuples, via xl_heap_new_cid records - heap rewrites have to log enough
* information to allow the decoding backend to update its internal mapping
* of (relfilenode,ctid) => (cmin, cmax) to be correct for the rewritten heap.
* of (relfilelocator,ctid) => (cmin, cmax) to be correct for the rewritten heap.
*
* For that, every time we find a tuple that's been modified in a catalog
* relation within the xmin horizon of any decoding slot, we log a mapping
@@ -1080,9 +1080,9 @@ logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid,
return;
/* fill out mapping information */
map.old_node = state->rs_old_rel->rd_node;
map.old_locator = state->rs_old_rel->rd_locator;
map.old_tid = old_tid;
map.new_node = state->rs_new_rel->rd_node;
map.new_locator = state->rs_new_rel->rd_locator;
map.new_tid = new_tid;
/* ---

View File

@@ -283,7 +283,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
if (XLogRecPtrIsInvalid(recptr))
{
Assert(!InRecovery);
recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf,
recptr = log_heap_visible(rel->rd_locator, heapBuf, vmBuf,
cutoff_xid, flags);
/*
@@ -668,7 +668,7 @@ vm_extend(Relation rel, BlockNumber vm_nblocks)
* to keep checking for creation or extension of the file, which happens
* infrequently.
*/
CacheInvalidateSmgr(reln->smgr_rnode);
CacheInvalidateSmgr(reln->smgr_rlocator);
UnlockRelationForExtension(rel, ExclusiveLock);
}

View File

@@ -836,7 +836,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, FullTransactionId safexid)
*/
/* XLOG stuff */
xlrec_reuse.node = rel->rd_node;
xlrec_reuse.locator = rel->rd_locator;
xlrec_reuse.block = blkno;
xlrec_reuse.latestRemovedFullXid = safexid;

View File

@@ -166,7 +166,7 @@ btbuildempty(Relation index)
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
log_newpage(&RelationGetSmgr(index)->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&RelationGetSmgr(index)->smgr_rlocator.locator, INIT_FORKNUM,
BTREE_METAPAGE, metapage, true);
/*

View File

@@ -647,7 +647,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
if (wstate->btws_use_wal)
{
/* We use the XLOG_FPI record type for this */
log_newpage(&wstate->index->rd_node, MAIN_FORKNUM, blkno, page, true);
log_newpage(&wstate->index->rd_locator, MAIN_FORKNUM, blkno, page, true);
}
/*

View File

@@ -664,11 +664,11 @@ btree_xlog_delete(XLogReaderState *record)
*/
if (InHotStandby)
{
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
}
/*
@@ -1006,7 +1006,7 @@ btree_xlog_reuse_page(XLogReaderState *record)
if (InHotStandby)
ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid,
xlrec->node);
xlrec->locator);
}
void

View File

@@ -15,7 +15,7 @@
#include "access/generic_xlog.h"
#include "lib/stringinfo.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
/*
* Description of generic xlog record: write page regions that this record

View File

@@ -17,7 +17,7 @@
#include "access/ginxlog.h"
#include "access/xlogutils.h"
#include "lib/stringinfo.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
static void
desc_recompress_leaf(StringInfo buf, ginxlogRecompressDataLeaf *insertData)

View File

@@ -16,7 +16,7 @@
#include "access/gistxlog.h"
#include "lib/stringinfo.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
static void
out_gistxlogPageUpdate(StringInfo buf, gistxlogPageUpdate *xlrec)
@@ -27,8 +27,8 @@ static void
out_gistxlogPageReuse(StringInfo buf, gistxlogPageReuse *xlrec)
{
appendStringInfo(buf, "rel %u/%u/%u; blk %u; latestRemovedXid %u:%u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode, xlrec->block,
xlrec->locator.spcOid, xlrec->locator.dbOid,
xlrec->locator.relNumber, xlrec->block,
EpochFromFullTransactionId(xlrec->latestRemovedFullXid),
XidFromFullTransactionId(xlrec->latestRemovedFullXid));
}

View File

@@ -170,9 +170,9 @@ heap2_desc(StringInfo buf, XLogReaderState *record)
xl_heap_new_cid *xlrec = (xl_heap_new_cid *) rec;
appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
xlrec->target_node.spcNode,
xlrec->target_node.dbNode,
xlrec->target_node.relNode,
xlrec->target_locator.spcOid,
xlrec->target_locator.dbOid,
xlrec->target_locator.relNumber,
ItemPointerGetBlockNumber(&(xlrec->target_tid)),
ItemPointerGetOffsetNumber(&(xlrec->target_tid)));
appendStringInfo(buf, "; cmin: %u, cmax: %u, combo: %u",

View File

@@ -101,8 +101,8 @@ btree_desc(StringInfo buf, XLogReaderState *record)
xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) rec;
appendStringInfo(buf, "rel %u/%u/%u; latestRemovedXid %u:%u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode,
xlrec->locator.spcOid, xlrec->locator.dbOid,
xlrec->locator.relNumber,
EpochFromFullTransactionId(xlrec->latestRemovedFullXid),
XidFromFullTransactionId(xlrec->latestRemovedFullXid));
break;

View File

@@ -26,8 +26,8 @@ seq_desc(StringInfo buf, XLogReaderState *record)
if (info == XLOG_SEQ_LOG)
appendStringInfo(buf, "rel %u/%u/%u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
xlrec->locator.spcOid, xlrec->locator.dbOid,
xlrec->locator.relNumber);
}
const char *

View File

@@ -26,7 +26,7 @@ smgr_desc(StringInfo buf, XLogReaderState *record)
if (info == XLOG_SMGR_CREATE)
{
xl_smgr_create *xlrec = (xl_smgr_create *) rec;
char *path = relpathperm(xlrec->rnode, xlrec->forkNum);
char *path = relpathperm(xlrec->rlocator, xlrec->forkNum);
appendStringInfoString(buf, path);
pfree(path);
@@ -34,7 +34,7 @@ smgr_desc(StringInfo buf, XLogReaderState *record)
else if (info == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) rec;
char *path = relpathperm(xlrec->rnode, MAIN_FORKNUM);
char *path = relpathperm(xlrec->rlocator, MAIN_FORKNUM);
appendStringInfo(buf, "%s to %u blocks flags %d", path,
xlrec->blkno, xlrec->flags);

View File

@@ -73,15 +73,15 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
data += parsed->nsubxacts * sizeof(TransactionId);
}
if (parsed->xinfo & XACT_XINFO_HAS_RELFILENODES)
if (parsed->xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
xl_xact_relfilenodes *xl_relfilenodes = (xl_xact_relfilenodes *) data;
xl_xact_relfilelocators *xl_rellocators = (xl_xact_relfilelocators *) data;
parsed->nrels = xl_relfilenodes->nrels;
parsed->xnodes = xl_relfilenodes->xnodes;
parsed->nrels = xl_rellocators->nrels;
parsed->xlocators = xl_rellocators->xlocators;
data += MinSizeOfXactRelfilenodes;
data += xl_relfilenodes->nrels * sizeof(RelFileNode);
data += MinSizeOfXactRelfileLocators;
data += xl_rellocators->nrels * sizeof(RelFileLocator);
}
if (parsed->xinfo & XACT_XINFO_HAS_DROPPED_STATS)
@@ -179,15 +179,15 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
data += parsed->nsubxacts * sizeof(TransactionId);
}
if (parsed->xinfo & XACT_XINFO_HAS_RELFILENODES)
if (parsed->xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
xl_xact_relfilenodes *xl_relfilenodes = (xl_xact_relfilenodes *) data;
xl_xact_relfilelocators *xl_rellocator = (xl_xact_relfilelocators *) data;
parsed->nrels = xl_relfilenodes->nrels;
parsed->xnodes = xl_relfilenodes->xnodes;
parsed->nrels = xl_rellocator->nrels;
parsed->xlocators = xl_rellocator->xlocators;
data += MinSizeOfXactRelfilenodes;
data += xl_relfilenodes->nrels * sizeof(RelFileNode);
data += MinSizeOfXactRelfileLocators;
data += xl_rellocator->nrels * sizeof(RelFileLocator);
}
if (parsed->xinfo & XACT_XINFO_HAS_DROPPED_STATS)
@@ -260,11 +260,11 @@ ParsePrepareRecord(uint8 info, xl_xact_prepare *xlrec, xl_xact_parsed_prepare *p
parsed->subxacts = (TransactionId *) bufptr;
bufptr += MAXALIGN(xlrec->nsubxacts * sizeof(TransactionId));
parsed->xnodes = (RelFileNode *) bufptr;
bufptr += MAXALIGN(xlrec->ncommitrels * sizeof(RelFileNode));
parsed->xlocators = (RelFileLocator *) bufptr;
bufptr += MAXALIGN(xlrec->ncommitrels * sizeof(RelFileLocator));
parsed->abortnodes = (RelFileNode *) bufptr;
bufptr += MAXALIGN(xlrec->nabortrels * sizeof(RelFileNode));
parsed->abortlocators = (RelFileLocator *) bufptr;
bufptr += MAXALIGN(xlrec->nabortrels * sizeof(RelFileLocator));
parsed->stats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(xlrec->ncommitstats * sizeof(xl_xact_stats_item));
@@ -278,7 +278,7 @@ ParsePrepareRecord(uint8 info, xl_xact_prepare *xlrec, xl_xact_parsed_prepare *p
static void
xact_desc_relations(StringInfo buf, char *label, int nrels,
RelFileNode *xnodes)
RelFileLocator *xlocators)
{
int i;
@@ -287,7 +287,7 @@ xact_desc_relations(StringInfo buf, char *label, int nrels,
appendStringInfo(buf, "; %s:", label);
for (i = 0; i < nrels; i++)
{
char *path = relpathperm(xnodes[i], MAIN_FORKNUM);
char *path = relpathperm(xlocators[i], MAIN_FORKNUM);
appendStringInfo(buf, " %s", path);
pfree(path);
@@ -340,7 +340,7 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
appendStringInfoString(buf, timestamptz_to_str(xlrec->xact_time));
xact_desc_relations(buf, "rels", parsed.nrels, parsed.xnodes);
xact_desc_relations(buf, "rels", parsed.nrels, parsed.xlocators);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);
xact_desc_stats(buf, "", parsed.nstats, parsed.stats);
@@ -376,7 +376,7 @@ xact_desc_abort(StringInfo buf, uint8 info, xl_xact_abort *xlrec, RepOriginId or
appendStringInfoString(buf, timestamptz_to_str(xlrec->xact_time));
xact_desc_relations(buf, "rels", parsed.nrels, parsed.xnodes);
xact_desc_relations(buf, "rels", parsed.nrels, parsed.xlocators);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);
if (parsed.xinfo & XACT_XINFO_HAS_ORIGIN)
@@ -400,9 +400,9 @@ xact_desc_prepare(StringInfo buf, uint8 info, xl_xact_prepare *xlrec, RepOriginI
appendStringInfo(buf, "gid %s: ", parsed.twophase_gid);
appendStringInfoString(buf, timestamptz_to_str(parsed.xact_time));
xact_desc_relations(buf, "rels(commit)", parsed.nrels, parsed.xnodes);
xact_desc_relations(buf, "rels(commit)", parsed.nrels, parsed.xlocators);
xact_desc_relations(buf, "rels(abort)", parsed.nabortrels,
parsed.abortnodes);
parsed.abortlocators);
xact_desc_stats(buf, "commit ", parsed.nstats, parsed.stats);
xact_desc_stats(buf, "abort ", parsed.nabortstats, parsed.abortstats);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);

View File

@@ -219,12 +219,12 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rnode, &forknum, &blk, NULL))
&rlocator, &forknum, &blk, NULL))
continue;
if (detailed_format)
@@ -239,7 +239,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
appendStringInfo(buf,
"blkref #%d: rel %u/%u/%u fork %s blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forkNames[forknum],
blk);
@@ -299,7 +299,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
appendStringInfo(buf,
", blkref #%d: rel %u/%u/%u fork %s blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forkNames[forknum],
blk);
}
@@ -308,7 +308,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
appendStringInfo(buf,
", blkref #%d: rel %u/%u/%u blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
blk);
}

View File

@@ -171,7 +171,7 @@ spgbuildempty(Relation index)
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
(char *) page, true);
log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_METAPAGE_BLKNO, page, true);
/* Likewise for the root page. */
@@ -180,7 +180,7 @@ spgbuildempty(Relation index)
PageSetChecksumInplace(page, SPGIST_ROOT_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_ROOT_BLKNO,
(char *) page, true);
log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_ROOT_BLKNO, page, true);
/* Likewise for the null-tuples root page. */
@@ -189,7 +189,7 @@ spgbuildempty(Relation index)
PageSetChecksumInplace(page, SPGIST_NULL_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_NULL_BLKNO,
(char *) page, true);
log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_NULL_BLKNO, page, true);
/*

View File

@@ -877,11 +877,11 @@ spgRedoVacuumRedirect(XLogReaderState *record)
{
if (TransactionIdIsValid(xldata->newestRedirectXid))
{
RelFileNode node;
RelFileLocator locator;
XLogRecGetBlockTag(record, 0, &node, NULL, NULL);
XLogRecGetBlockTag(record, 0, &locator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->newestRedirectXid,
node);
locator);
}
}

View File

@@ -82,7 +82,7 @@ GetTableAmRoutine(Oid amhandler)
Assert(routine->tuple_update != NULL);
Assert(routine->tuple_lock != NULL);
Assert(routine->relation_set_new_filenode != NULL);
Assert(routine->relation_set_new_filelocator != NULL);
Assert(routine->relation_nontransactional_truncate != NULL);
Assert(routine->relation_copy_data != NULL);
Assert(routine->relation_copy_for_cluster != NULL);

View File

@@ -557,7 +557,7 @@ void XLogRegisterBuffer(uint8 block_id, Buffer buf, uint8 flags);
XLogRegisterBuffer adds information about a data block to the WAL record.
block_id is an arbitrary number used to identify this page reference in
the redo routine. The information needed to re-find the page at redo -
relfilenode, fork, and block number - are included in the WAL record.
relfilelocator, fork, and block number - are included in the WAL record.
XLogInsert will automatically include a full copy of the page contents, if
this is the first modification of the buffer since the last checkpoint.
@@ -692,7 +692,7 @@ by having database restart search for files that don't have any committed
entry in pg_class, but that currently isn't done because of the possibility
of deleting data that is useful for forensic analysis of the crash.
Orphan files are harmless --- at worst they waste a bit of disk space ---
because we check for on-disk collisions when allocating new relfilenode
because we check for on-disk collisions when allocating new relfilenumber
OIDs. So cleaning up isn't really necessary.
3. Deleting a table, which requires an unlink() that could fail.
@@ -725,10 +725,10 @@ then restart recovery. This is part of the reason for not writing a WAL
entry until we've successfully done the original action.
Skipping WAL for New RelFileNode
Skipping WAL for New RelFileLocator
--------------------------------
Under wal_level=minimal, if a change modifies a relfilenode that ROLLBACK
Under wal_level=minimal, if a change modifies a relfilenumber that ROLLBACK
would unlink, in-tree access methods write no WAL for that change. Code that
writes WAL without calling RelationNeedsWAL() must check for this case. This
skipping is mandatory. If a WAL-writing change preceded a WAL-skipping change
@@ -748,9 +748,9 @@ unconditionally for permanent relations. Under these approaches, the access
method callbacks must not call functions that react to RelationNeedsWAL().
This applies only to WAL records whose replay would modify bytes stored in the
new relfilenode. It does not apply to other records about the relfilenode,
new relfilenumber. It does not apply to other records about the relfilenumber,
such as XLOG_SMGR_CREATE. Because it operates at the level of individual
relfilenodes, RelationNeedsWAL() can differ for tightly-coupled relations.
relfilenumbers, RelationNeedsWAL() can differ for tightly-coupled relations.
Consider "CREATE TABLE t (); BEGIN; ALTER TABLE t ADD c text; ..." in which
ALTER TABLE adds a TOAST relation. The TOAST relation will skip WAL, while
the table owning it will not. ALTER TABLE SET TABLESPACE will cause a table
@@ -860,7 +860,7 @@ Changes to a temp table are not WAL-logged, hence could reach disk in
advance of T1's commit, but we don't care since temp table contents don't
survive crashes anyway.
Database writes that skip WAL for new relfilenodes are also safe. In these
Database writes that skip WAL for new relfilenumbers are also safe. In these
cases it's entirely possible for the data to reach disk before T1's commit,
because T1 will fsync it down to disk without any sort of interlock. However,
all these paths are designed to write data that no other transaction can see

View File

@@ -126,7 +126,7 @@ worker. This includes:
an index that is currently being rebuilt.
- Active relmapper.c mapping state. This is needed to allow consistent
answers when fetching the current relfilenode for relation oids of
answers when fetching the current relfilenumber for relation oids of
mapped relations.
To prevent unprincipled deadlocks when running in parallel mode, this code

View File

@@ -204,7 +204,7 @@ static void RecordTransactionCommitPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
int nrels,
RelFileNode *rels,
RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
int ninvalmsgs,
@@ -215,7 +215,7 @@ static void RecordTransactionAbortPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
int nrels,
RelFileNode *rels,
RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
const char *gid);
@@ -951,8 +951,8 @@ TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
*
* 1. TwoPhaseFileHeader
* 2. TransactionId[] (subtransactions)
* 3. RelFileNode[] (files to be deleted at commit)
* 4. RelFileNode[] (files to be deleted at abort)
* 3. RelFileLocator[] (files to be deleted at commit)
* 4. RelFileLocator[] (files to be deleted at abort)
* 5. SharedInvalidationMessage[] (inval messages to be sent at commit)
* 6. TwoPhaseRecordOnDisk
* 7. ...
@@ -1047,8 +1047,8 @@ StartPrepare(GlobalTransaction gxact)
TransactionId xid = gxact->xid;
TwoPhaseFileHeader hdr;
TransactionId *children;
RelFileNode *commitrels;
RelFileNode *abortrels;
RelFileLocator *commitrels;
RelFileLocator *abortrels;
xl_xact_stats_item *abortstats = NULL;
xl_xact_stats_item *commitstats = NULL;
SharedInvalidationMessage *invalmsgs;
@@ -1102,12 +1102,12 @@ StartPrepare(GlobalTransaction gxact)
}
if (hdr.ncommitrels > 0)
{
save_state_data(commitrels, hdr.ncommitrels * sizeof(RelFileNode));
save_state_data(commitrels, hdr.ncommitrels * sizeof(RelFileLocator));
pfree(commitrels);
}
if (hdr.nabortrels > 0)
{
save_state_data(abortrels, hdr.nabortrels * sizeof(RelFileNode));
save_state_data(abortrels, hdr.nabortrels * sizeof(RelFileLocator));
pfree(abortrels);
}
if (hdr.ncommitstats > 0)
@@ -1489,9 +1489,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
TwoPhaseFileHeader *hdr;
TransactionId latestXid;
TransactionId *children;
RelFileNode *commitrels;
RelFileNode *abortrels;
RelFileNode *delrels;
RelFileLocator *commitrels;
RelFileLocator *abortrels;
RelFileLocator *delrels;
int ndelrels;
xl_xact_stats_item *commitstats;
xl_xact_stats_item *abortstats;
@@ -1525,10 +1525,10 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
bufptr += MAXALIGN(hdr->gidlen);
children = (TransactionId *) bufptr;
bufptr += MAXALIGN(hdr->nsubxacts * sizeof(TransactionId));
commitrels = (RelFileNode *) bufptr;
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
abortrels = (RelFileNode *) bufptr;
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
commitrels = (RelFileLocator *) bufptr;
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileLocator));
abortrels = (RelFileLocator *) bufptr;
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileLocator));
commitstats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
abortstats = (xl_xact_stats_item *) bufptr;
@@ -2100,8 +2100,8 @@ RecoverPreparedTransactions(void)
bufptr += MAXALIGN(hdr->gidlen);
subxids = (TransactionId *) bufptr;
bufptr += MAXALIGN(hdr->nsubxacts * sizeof(TransactionId));
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileLocator));
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileLocator));
bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
bufptr += MAXALIGN(hdr->nabortstats * sizeof(xl_xact_stats_item));
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
@@ -2285,7 +2285,7 @@ RecordTransactionCommitPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
int nrels,
RelFileNode *rels,
RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
int ninvalmsgs,
@@ -2383,7 +2383,7 @@ RecordTransactionAbortPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
int nrels,
RelFileNode *rels,
RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
const char *gid)

View File

@@ -521,7 +521,7 @@ ForceTransactionIdLimitUpdate(void)
* wide, counter wraparound will occur eventually, and therefore it is unwise
* to assume they are unique unless precautions are taken to make them so.
* Hence, this routine should generally not be used directly. The only direct
* callers should be GetNewOidWithIndex() and GetNewRelFileNode() in
* callers should be GetNewOidWithIndex() and GetNewRelFileNumber() in
* catalog/catalog.c.
*/
Oid

View File

@@ -1282,7 +1282,7 @@ RecordTransactionCommit(void)
bool markXidCommitted = TransactionIdIsValid(xid);
TransactionId latestXid = InvalidTransactionId;
int nrels;
RelFileNode *rels;
RelFileLocator *rels;
int nchildren;
TransactionId *children;
int ndroppedstats = 0;
@@ -1705,7 +1705,7 @@ RecordTransactionAbort(bool isSubXact)
TransactionId xid = GetCurrentTransactionIdIfAny();
TransactionId latestXid;
int nrels;
RelFileNode *rels;
RelFileLocator *rels;
int ndroppedstats = 0;
xl_xact_stats_item *droppedstats = NULL;
int nchildren;
@@ -5586,7 +5586,7 @@ xactGetCommittedChildren(TransactionId **ptr)
XLogRecPtr
XactLogCommitRecord(TimestampTz commit_time,
int nsubxacts, TransactionId *subxacts,
int nrels, RelFileNode *rels,
int nrels, RelFileLocator *rels,
int ndroppedstats, xl_xact_stats_item *droppedstats,
int nmsgs, SharedInvalidationMessage *msgs,
bool relcacheInval,
@@ -5597,7 +5597,7 @@ XactLogCommitRecord(TimestampTz commit_time,
xl_xact_xinfo xl_xinfo;
xl_xact_dbinfo xl_dbinfo;
xl_xact_subxacts xl_subxacts;
xl_xact_relfilenodes xl_relfilenodes;
xl_xact_relfilelocators xl_relfilelocators;
xl_xact_stats_items xl_dropped_stats;
xl_xact_invals xl_invals;
xl_xact_twophase xl_twophase;
@@ -5651,8 +5651,8 @@ XactLogCommitRecord(TimestampTz commit_time,
if (nrels > 0)
{
xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILENODES;
xl_relfilenodes.nrels = nrels;
xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILELOCATORS;
xl_relfilelocators.nrels = nrels;
info |= XLR_SPECIAL_REL_UPDATE;
}
@@ -5710,12 +5710,12 @@ XactLogCommitRecord(TimestampTz commit_time,
nsubxacts * sizeof(TransactionId));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILENODES)
if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
XLogRegisterData((char *) (&xl_relfilenodes),
MinSizeOfXactRelfilenodes);
XLogRegisterData((char *) (&xl_relfilelocators),
MinSizeOfXactRelfileLocators);
XLogRegisterData((char *) rels,
nrels * sizeof(RelFileNode));
nrels * sizeof(RelFileLocator));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_DROPPED_STATS)
@@ -5758,7 +5758,7 @@ XactLogCommitRecord(TimestampTz commit_time,
XLogRecPtr
XactLogAbortRecord(TimestampTz abort_time,
int nsubxacts, TransactionId *subxacts,
int nrels, RelFileNode *rels,
int nrels, RelFileLocator *rels,
int ndroppedstats, xl_xact_stats_item *droppedstats,
int xactflags, TransactionId twophase_xid,
const char *twophase_gid)
@@ -5766,7 +5766,7 @@ XactLogAbortRecord(TimestampTz abort_time,
xl_xact_abort xlrec;
xl_xact_xinfo xl_xinfo;
xl_xact_subxacts xl_subxacts;
xl_xact_relfilenodes xl_relfilenodes;
xl_xact_relfilelocators xl_relfilelocators;
xl_xact_stats_items xl_dropped_stats;
xl_xact_twophase xl_twophase;
xl_xact_dbinfo xl_dbinfo;
@@ -5800,8 +5800,8 @@ XactLogAbortRecord(TimestampTz abort_time,
if (nrels > 0)
{
xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILENODES;
xl_relfilenodes.nrels = nrels;
xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILELOCATORS;
xl_relfilelocators.nrels = nrels;
info |= XLR_SPECIAL_REL_UPDATE;
}
@@ -5864,12 +5864,12 @@ XactLogAbortRecord(TimestampTz abort_time,
nsubxacts * sizeof(TransactionId));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILENODES)
if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
XLogRegisterData((char *) (&xl_relfilenodes),
MinSizeOfXactRelfilenodes);
XLogRegisterData((char *) (&xl_relfilelocators),
MinSizeOfXactRelfileLocators);
XLogRegisterData((char *) rels,
nrels * sizeof(RelFileNode));
nrels * sizeof(RelFileLocator));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_DROPPED_STATS)
@@ -6010,7 +6010,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
XLogFlush(lsn);
/* Make sure files supposed to be dropped are dropped */
DropRelationFiles(parsed->xnodes, parsed->nrels, true);
DropRelationFiles(parsed->xlocators, parsed->nrels, true);
}
if (parsed->nstats > 0)
@@ -6121,7 +6121,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid,
*/
XLogFlush(lsn);
DropRelationFiles(parsed->xnodes, parsed->nrels, true);
DropRelationFiles(parsed->xlocators, parsed->nrels, true);
}
if (parsed->nstats > 0)

View File

@@ -70,7 +70,7 @@ typedef struct
{
bool in_use; /* is this slot in use? */
uint8 flags; /* REGBUF_* flags */
RelFileNode rnode; /* identifies the relation and block */
RelFileLocator rlocator; /* identifies the relation and block */
ForkNumber forkno;
BlockNumber block;
Page page; /* page content */
@@ -257,7 +257,7 @@ XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
regbuf = &registered_buffers[block_id];
BufferGetTag(buffer, &regbuf->rnode, &regbuf->forkno, &regbuf->block);
BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
regbuf->page = BufferGetPage(buffer);
regbuf->flags = flags;
regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
@@ -278,7 +278,7 @@ XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
if (i == block_id || !regbuf_old->in_use)
continue;
Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
regbuf_old->forkno != regbuf->forkno ||
regbuf_old->block != regbuf->block);
}
@@ -293,7 +293,7 @@ XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
* shared buffer pool (i.e. when you don't have a Buffer for it).
*/
void
XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum,
BlockNumber blknum, Page page, uint8 flags)
{
registered_buffer *regbuf;
@@ -308,7 +308,7 @@ XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
regbuf = &registered_buffers[block_id];
regbuf->rnode = *rnode;
regbuf->rlocator = *rlocator;
regbuf->forkno = forknum;
regbuf->block = blknum;
regbuf->page = page;
@@ -331,7 +331,7 @@ XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
if (i == block_id || !regbuf_old->in_use)
continue;
Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
regbuf_old->forkno != regbuf->forkno ||
regbuf_old->block != regbuf->block);
}
@@ -768,7 +768,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
rdt_datas_last = regbuf->rdata_tail;
}
if (prev_regbuf && RelFileNodeEquals(regbuf->rnode, prev_regbuf->rnode))
if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
{
samerel = true;
bkpb.fork_flags |= BKPBLOCK_SAME_REL;
@@ -793,8 +793,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
}
if (!samerel)
{
memcpy(scratch, &regbuf->rnode, sizeof(RelFileNode));
scratch += sizeof(RelFileNode);
memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
scratch += sizeof(RelFileLocator);
}
memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
scratch += sizeof(BlockNumber);
@@ -1031,7 +1031,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
int flags = 0;
PGAlignedBlock copied_buffer;
char *origdata = (char *) BufferGetBlock(buffer);
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forkno;
BlockNumber blkno;
@@ -1058,8 +1058,8 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
if (buffer_std)
flags |= REGBUF_STANDARD;
BufferGetTag(buffer, &rnode, &forkno, &blkno);
XLogRegisterBlock(0, &rnode, forkno, blkno, copied_buffer.data, flags);
BufferGetTag(buffer, &rlocator, &forkno, &blkno);
XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data, flags);
recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI_FOR_HINT);
}
@@ -1080,7 +1080,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
* the unused space to be left out from the WAL record, making it smaller.
*/
XLogRecPtr
log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
log_newpage(RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blkno,
Page page, bool page_std)
{
int flags;
@@ -1091,7 +1091,7 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
flags |= REGBUF_STANDARD;
XLogBeginInsert();
XLogRegisterBlock(0, rnode, forkNum, blkno, page, flags);
XLogRegisterBlock(0, rlocator, forkNum, blkno, page, flags);
recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
/*
@@ -1112,7 +1112,7 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
* because we can write multiple pages in a single WAL record.
*/
void
log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
log_newpages(RelFileLocator *rlocator, ForkNumber forkNum, int num_pages,
BlockNumber *blknos, Page *pages, bool page_std)
{
int flags;
@@ -1142,7 +1142,7 @@ log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
nbatch = 0;
while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
{
XLogRegisterBlock(nbatch, rnode, forkNum, blknos[i], pages[i], flags);
XLogRegisterBlock(nbatch, rlocator, forkNum, blknos[i], pages[i], flags);
i++;
nbatch++;
}
@@ -1177,16 +1177,16 @@ XLogRecPtr
log_newpage_buffer(Buffer buffer, bool page_std)
{
Page page = BufferGetPage(buffer);
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forkNum;
BlockNumber blkno;
/* Shared buffers should be modified in a critical section. */
Assert(CritSectionCount > 0);
BufferGetTag(buffer, &rnode, &forkNum, &blkno);
BufferGetTag(buffer, &rlocator, &forkNum, &blkno);
return log_newpage(&rnode, forkNum, blkno, page, page_std);
return log_newpage(&rlocator, forkNum, blkno, page, page_std);
}
/*

View File

@@ -138,7 +138,7 @@ struct XLogPrefetcher
dlist_head filter_queue;
/* Book-keeping to avoid repeat prefetches. */
RelFileNode recent_rnode[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
RelFileLocator recent_rlocator[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
BlockNumber recent_block[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
int recent_idx;
@@ -161,7 +161,7 @@ struct XLogPrefetcher
*/
typedef struct XLogPrefetcherFilter
{
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecPtr filter_until_replayed;
BlockNumber filter_from_block;
dlist_node link;
@@ -187,11 +187,11 @@ typedef struct XLogPrefetchStats
} XLogPrefetchStats;
static inline void XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher,
RelFileNode rnode,
RelFileLocator rlocator,
BlockNumber blockno,
XLogRecPtr lsn);
static inline bool XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher,
RelFileNode rnode,
RelFileLocator rlocator,
BlockNumber blockno);
static inline void XLogPrefetcherCompleteFilters(XLogPrefetcher *prefetcher,
XLogRecPtr replaying_lsn);
@@ -365,7 +365,7 @@ XLogPrefetcherAllocate(XLogReaderState *reader)
{
XLogPrefetcher *prefetcher;
static HASHCTL hash_table_ctl = {
.keysize = sizeof(RelFileNode),
.keysize = sizeof(RelFileLocator),
.entrysize = sizeof(XLogPrefetcherFilter)
};
@@ -568,22 +568,23 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
{
xl_dbase_create_file_copy_rec *xlrec =
(xl_dbase_create_file_copy_rec *) record->main_data;
RelFileNode rnode = {InvalidOid, xlrec->db_id, InvalidOid};
RelFileLocator rlocator =
{InvalidOid, xlrec->db_id, InvalidRelFileNumber};
/*
* Don't try to prefetch anything in this database until
* it has been created, or we might confuse the blocks of
* different generations, if a database OID or relfilenode
* is reused. It's also more efficient than discovering
* that relations don't exist on disk yet with ENOENT
* errors.
* different generations, if a database OID or
* relfilenumber is reused. It's also more efficient than
* discovering that relations don't exist on disk yet with
* ENOENT errors.
*/
XLogPrefetcherAddFilter(prefetcher, rnode, 0, record->lsn);
XLogPrefetcherAddFilter(prefetcher, rlocator, 0, record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in database %u until %X/%X is replayed due to raw file copy",
rnode.dbNode,
rlocator.dbOid,
LSN_FORMAT_ARGS(record->lsn));
#endif
}
@@ -601,19 +602,19 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
* Don't prefetch anything for this whole relation
* until it has been created. Otherwise we might
* confuse the blocks of different generations, if a
* relfilenode is reused. This also avoids the need
* relfilenumber is reused. This also avoids the need
* to discover the problem via extra syscalls that
* report ENOENT.
*/
XLogPrefetcherAddFilter(prefetcher, xlrec->rnode, 0,
XLogPrefetcherAddFilter(prefetcher, xlrec->rlocator, 0,
record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u until %X/%X is replayed, which creates the relation",
xlrec->rnode.spcNode,
xlrec->rnode.dbNode,
xlrec->rnode.relNode,
xlrec->rlocator.spcOid,
xlrec->rlocator.dbOid,
xlrec->rlocator.relNumber,
LSN_FORMAT_ARGS(record->lsn));
#endif
}
@@ -627,16 +628,16 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
* Don't consider prefetching anything in the truncated
* range until the truncation has been performed.
*/
XLogPrefetcherAddFilter(prefetcher, xlrec->rnode,
XLogPrefetcherAddFilter(prefetcher, xlrec->rlocator,
xlrec->blkno,
record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, which truncates the relation",
xlrec->rnode.spcNode,
xlrec->rnode.dbNode,
xlrec->rnode.relNode,
xlrec->rlocator.spcOid,
xlrec->rlocator.dbOid,
xlrec->rlocator.relNumber,
xlrec->blkno,
LSN_FORMAT_ARGS(record->lsn));
#endif
@@ -688,7 +689,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
}
/* Should we skip prefetching this block due to a filter? */
if (XLogPrefetcherIsFiltered(prefetcher, block->rnode, block->blkno))
if (XLogPrefetcherIsFiltered(prefetcher, block->rlocator, block->blkno))
{
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
@@ -698,7 +699,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
for (int i = 0; i < XLOGPREFETCHER_SEQ_WINDOW_SIZE; ++i)
{
if (block->blkno == prefetcher->recent_block[i] &&
RelFileNodeEquals(block->rnode, prefetcher->recent_rnode[i]))
RelFileLocatorEquals(block->rlocator, prefetcher->recent_rlocator[i]))
{
/*
* XXX If we also remembered where it was, we could set
@@ -709,7 +710,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
return LRQ_NEXT_NO_IO;
}
}
prefetcher->recent_rnode[prefetcher->recent_idx] = block->rnode;
prefetcher->recent_rlocator[prefetcher->recent_idx] = block->rlocator;
prefetcher->recent_block[prefetcher->recent_idx] = block->blkno;
prefetcher->recent_idx =
(prefetcher->recent_idx + 1) % XLOGPREFETCHER_SEQ_WINDOW_SIZE;
@@ -719,7 +720,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
* same relation (with some scheme to handle invalidations
* safely), but for now we'll call smgropen() every time.
*/
reln = smgropen(block->rnode, InvalidBackendId);
reln = smgropen(block->rlocator, InvalidBackendId);
/*
* If the relation file doesn't exist on disk, for example because
@@ -733,12 +734,12 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing all prefetch in relation %u/%u/%u until %X/%X is replayed, because the relation does not exist on disk",
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
LSN_FORMAT_ARGS(record->lsn));
#endif
XLogPrefetcherAddFilter(prefetcher, block->rnode, 0,
XLogPrefetcherAddFilter(prefetcher, block->rlocator, 0,
record->lsn);
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
@@ -754,13 +755,13 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, because the relation is too small",
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
block->blkno,
LSN_FORMAT_ARGS(record->lsn));
#endif
XLogPrefetcherAddFilter(prefetcher, block->rnode, block->blkno,
XLogPrefetcherAddFilter(prefetcher, block->rlocator, block->blkno,
record->lsn);
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
@@ -793,9 +794,9 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
*/
elog(ERROR,
"could not prefetch relation %u/%u/%u block %u",
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
block->blkno);
}
}
@@ -852,17 +853,17 @@ pg_stat_get_recovery_prefetch(PG_FUNCTION_ARGS)
}
/*
* Don't prefetch any blocks >= 'blockno' from a given 'rnode', until 'lsn'
* Don't prefetch any blocks >= 'blockno' from a given 'rlocator', until 'lsn'
* has been replayed.
*/
static inline void
XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileNode rnode,
XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileLocator rlocator,
BlockNumber blockno, XLogRecPtr lsn)
{
XLogPrefetcherFilter *filter;
bool found;
filter = hash_search(prefetcher->filter_table, &rnode, HASH_ENTER, &found);
filter = hash_search(prefetcher->filter_table, &rlocator, HASH_ENTER, &found);
if (!found)
{
/*
@@ -875,9 +876,10 @@ XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileNode rnode,
else
{
/*
* We were already filtering this rnode. Extend the filter's lifetime
* to cover this WAL record, but leave the lower of the block numbers
* there because we don't want to have to track individual blocks.
* We were already filtering this rlocator. Extend the filter's
* lifetime to cover this WAL record, but leave the lower of the block
* numbers there because we don't want to have to track individual
* blocks.
*/
filter->filter_until_replayed = lsn;
dlist_delete(&filter->link);
@@ -890,7 +892,7 @@ XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileNode rnode,
* Have we replayed any records that caused us to begin filtering a block
* range? That means that relations should have been created, extended or
* dropped as required, so we can stop filtering out accesses to a given
* relfilenode.
* relfilenumber.
*/
static inline void
XLogPrefetcherCompleteFilters(XLogPrefetcher *prefetcher, XLogRecPtr replaying_lsn)
@@ -913,7 +915,7 @@ XLogPrefetcherCompleteFilters(XLogPrefetcher *prefetcher, XLogRecPtr replaying_l
* Check if a given block should be skipped due to a filter.
*/
static inline bool
XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileNode rnode,
XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileLocator rlocator,
BlockNumber blockno)
{
/*
@@ -925,13 +927,13 @@ XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileNode rnode,
XLogPrefetcherFilter *filter;
/* See if the block range is filtered. */
filter = hash_search(prefetcher->filter_table, &rnode, HASH_FIND, NULL);
filter = hash_search(prefetcher->filter_table, &rlocator, HASH_FIND, NULL);
if (filter && filter->filter_from_block <= blockno)
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (blocks >= %u filtered)",
rnode.spcNode, rnode.dbNode, rnode.relNode, blockno,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno,
LSN_FORMAT_ARGS(filter->filter_until_replayed),
filter->filter_from_block);
#endif
@@ -939,15 +941,15 @@ XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileNode rnode,
}
/* See if the whole database is filtered. */
rnode.relNode = InvalidOid;
rnode.spcNode = InvalidOid;
filter = hash_search(prefetcher->filter_table, &rnode, HASH_FIND, NULL);
rlocator.relNumber = InvalidRelFileNumber;
rlocator.spcOid = InvalidOid;
filter = hash_search(prefetcher->filter_table, &rlocator, HASH_FIND, NULL);
if (filter)
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (whole database)",
rnode.spcNode, rnode.dbNode, rnode.relNode, blockno,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno,
LSN_FORMAT_ARGS(filter->filter_until_replayed));
#endif
return true;

View File

@@ -1638,7 +1638,7 @@ DecodeXLogRecord(XLogReaderState *state,
char *out;
uint32 remaining;
uint32 datatotal;
RelFileNode *rnode = NULL;
RelFileLocator *rlocator = NULL;
uint8 block_id;
decoded->header = *record;
@@ -1823,12 +1823,12 @@ DecodeXLogRecord(XLogReaderState *state,
}
if (!(fork_flags & BKPBLOCK_SAME_REL))
{
COPY_HEADER_FIELD(&blk->rnode, sizeof(RelFileNode));
rnode = &blk->rnode;
COPY_HEADER_FIELD(&blk->rlocator, sizeof(RelFileLocator));
rlocator = &blk->rlocator;
}
else
{
if (rnode == NULL)
if (rlocator == NULL)
{
report_invalid_record(state,
"BKPBLOCK_SAME_REL set but no previous rel at %X/%X",
@@ -1836,7 +1836,7 @@ DecodeXLogRecord(XLogReaderState *state,
goto err;
}
blk->rnode = *rnode;
blk->rlocator = *rlocator;
}
COPY_HEADER_FIELD(&blk->blkno, sizeof(BlockNumber));
}
@@ -1926,10 +1926,11 @@ err:
*/
void
XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id,
RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
RelFileLocator *rlocator, ForkNumber *forknum,
BlockNumber *blknum)
{
if (!XLogRecGetBlockTagExtended(record, block_id, rnode, forknum, blknum,
NULL))
if (!XLogRecGetBlockTagExtended(record, block_id, rlocator, forknum,
blknum, NULL))
{
#ifndef FRONTEND
elog(ERROR, "failed to locate backup block with ID %d in WAL record",
@@ -1945,13 +1946,13 @@ XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id,
* Returns information about the block that a block reference refers to,
* optionally including the buffer that the block may already be in.
*
* If the WAL record contains a block reference with the given ID, *rnode,
* If the WAL record contains a block reference with the given ID, *rlocator,
* *forknum, *blknum and *prefetch_buffer are filled in (if not NULL), and
* returns true. Otherwise returns false.
*/
bool
XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id,
RelFileNode *rnode, ForkNumber *forknum,
RelFileLocator *rlocator, ForkNumber *forknum,
BlockNumber *blknum,
Buffer *prefetch_buffer)
{
@@ -1961,8 +1962,8 @@ XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id,
return false;
bkpb = &record->record->blocks[block_id];
if (rnode)
*rnode = bkpb->rnode;
if (rlocator)
*rlocator = bkpb->rlocator;
if (forknum)
*forknum = bkpb->forknum;
if (blknum)

View File

@@ -2166,24 +2166,26 @@ xlog_block_info(StringInfo buf, XLogReaderState *record)
/* decode block references */
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rnode, &forknum, &blk, NULL))
&rlocator, &forknum, &blk, NULL))
continue;
if (forknum != MAIN_FORKNUM)
appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, fork %u, blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid,
rlocator.relNumber,
forknum,
blk);
else
appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid,
rlocator.relNumber,
blk);
if (XLogRecHasBlockImage(record, block_id))
appendStringInfoString(buf, " FPW");
@@ -2285,7 +2287,7 @@ static void
verifyBackupPageConsistency(XLogReaderState *record)
{
RmgrData rmgr = GetRmgr(XLogRecGetRmid(record));
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blkno;
int block_id;
@@ -2302,7 +2304,7 @@ verifyBackupPageConsistency(XLogReaderState *record)
Page page;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rnode, &forknum, &blkno, NULL))
&rlocator, &forknum, &blkno, NULL))
{
/*
* WAL record doesn't contain a block reference with the given id.
@@ -2327,7 +2329,7 @@ verifyBackupPageConsistency(XLogReaderState *record)
* Read the contents from the current buffer and store it in a
* temporary page.
*/
buf = XLogReadBufferExtended(rnode, forknum, blkno,
buf = XLogReadBufferExtended(rlocator, forknum, blkno,
RBM_NORMAL_NO_LOG,
InvalidBuffer);
if (!BufferIsValid(buf))
@@ -2377,7 +2379,7 @@ verifyBackupPageConsistency(XLogReaderState *record)
{
elog(FATAL,
"inconsistent page found, rel %u/%u/%u, forknum %u, blkno %u",
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forknum, blkno);
}
}

View File

@@ -67,7 +67,7 @@ HotStandbyState standbyState = STANDBY_DISABLED;
*/
typedef struct xl_invalid_page_key
{
RelFileNode node; /* the relation */
RelFileLocator locator; /* the relation */
ForkNumber forkno; /* the fork number */
BlockNumber blkno; /* the page */
} xl_invalid_page_key;
@@ -86,10 +86,10 @@ static int read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPa
/* Report a reference to an invalid page */
static void
report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno,
report_invalid_page(int elevel, RelFileLocator locator, ForkNumber forkno,
BlockNumber blkno, bool present)
{
char *path = relpathperm(node, forkno);
char *path = relpathperm(locator, forkno);
if (present)
elog(elevel, "page %u of relation %s is uninitialized",
@@ -102,7 +102,7 @@ report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno,
/* Log a reference to an invalid page */
static void
log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
log_invalid_page(RelFileLocator locator, ForkNumber forkno, BlockNumber blkno,
bool present)
{
xl_invalid_page_key key;
@@ -119,7 +119,7 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
*/
if (reachedConsistency)
{
report_invalid_page(WARNING, node, forkno, blkno, present);
report_invalid_page(WARNING, locator, forkno, blkno, present);
elog(ignore_invalid_pages ? WARNING : PANIC,
"WAL contains references to invalid pages");
}
@@ -130,7 +130,7 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
* something about the XLOG record that generated the reference).
*/
if (message_level_is_interesting(DEBUG1))
report_invalid_page(DEBUG1, node, forkno, blkno, present);
report_invalid_page(DEBUG1, locator, forkno, blkno, present);
if (invalid_page_tab == NULL)
{
@@ -147,7 +147,7 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
}
/* we currently assume xl_invalid_page_key contains no padding */
key.node = node;
key.locator = locator;
key.forkno = forkno;
key.blkno = blkno;
hentry = (xl_invalid_page *)
@@ -166,7 +166,8 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
/* Forget any invalid pages >= minblkno, because they've been dropped */
static void
forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
BlockNumber minblkno)
{
HASH_SEQ_STATUS status;
xl_invalid_page *hentry;
@@ -178,13 +179,13 @@ forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
if (RelFileNodeEquals(hentry->key.node, node) &&
if (RelFileLocatorEquals(hentry->key.locator, locator) &&
hentry->key.forkno == forkno &&
hentry->key.blkno >= minblkno)
{
if (message_level_is_interesting(DEBUG2))
{
char *path = relpathperm(hentry->key.node, forkno);
char *path = relpathperm(hentry->key.locator, forkno);
elog(DEBUG2, "page %u of relation %s has been dropped",
hentry->key.blkno, path);
@@ -213,11 +214,11 @@ forget_invalid_pages_db(Oid dbid)
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
if (hentry->key.node.dbNode == dbid)
if (hentry->key.locator.dbOid == dbid)
{
if (message_level_is_interesting(DEBUG2))
{
char *path = relpathperm(hentry->key.node, hentry->key.forkno);
char *path = relpathperm(hentry->key.locator, hentry->key.forkno);
elog(DEBUG2, "page %u of relation %s has been dropped",
hentry->key.blkno, path);
@@ -261,7 +262,7 @@ XLogCheckInvalidPages(void)
*/
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno,
hentry->key.blkno, hentry->present);
foundone = true;
}
@@ -356,7 +357,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
Buffer *buf)
{
XLogRecPtr lsn = record->EndRecPtr;
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blkno;
Buffer prefetch_buffer;
@@ -364,7 +365,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
bool zeromode;
bool willinit;
if (!XLogRecGetBlockTagExtended(record, block_id, &rnode, &forknum, &blkno,
if (!XLogRecGetBlockTagExtended(record, block_id, &rlocator, &forknum, &blkno,
&prefetch_buffer))
{
/* Caller specified a bogus block_id */
@@ -387,7 +388,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
if (XLogRecBlockImageApply(record, block_id))
{
Assert(XLogRecHasBlockImage(record, block_id));
*buf = XLogReadBufferExtended(rnode, forknum, blkno,
*buf = XLogReadBufferExtended(rlocator, forknum, blkno,
get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK,
prefetch_buffer);
page = BufferGetPage(*buf);
@@ -418,7 +419,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
}
else
{
*buf = XLogReadBufferExtended(rnode, forknum, blkno, mode, prefetch_buffer);
*buf = XLogReadBufferExtended(rlocator, forknum, blkno, mode, prefetch_buffer);
if (BufferIsValid(*buf))
{
if (mode != RBM_ZERO_AND_LOCK && mode != RBM_ZERO_AND_CLEANUP_LOCK)
@@ -468,7 +469,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
* they will be invisible to tools that need to know which pages are modified.
*/
Buffer
XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
XLogReadBufferExtended(RelFileLocator rlocator, ForkNumber forknum,
BlockNumber blkno, ReadBufferMode mode,
Buffer recent_buffer)
{
@@ -481,14 +482,14 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
/* Do we have a clue where the buffer might be already? */
if (BufferIsValid(recent_buffer) &&
mode == RBM_NORMAL &&
ReadRecentBuffer(rnode, forknum, blkno, recent_buffer))
ReadRecentBuffer(rlocator, forknum, blkno, recent_buffer))
{
buffer = recent_buffer;
goto recent_buffer_fast_path;
}
/* Open the relation at smgr level */
smgr = smgropen(rnode, InvalidBackendId);
smgr = smgropen(rlocator, InvalidBackendId);
/*
* Create the target file if it doesn't already exist. This lets us cope
@@ -505,7 +506,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
if (blkno < lastblock)
{
/* page exists in file */
buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
mode, NULL, true);
}
else
@@ -513,7 +514,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
/* hm, page doesn't exist in file */
if (mode == RBM_NORMAL)
{
log_invalid_page(rnode, forknum, blkno, false);
log_invalid_page(rlocator, forknum, blkno, false);
return InvalidBuffer;
}
if (mode == RBM_NORMAL_NO_LOG)
@@ -530,7 +531,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
}
buffer = ReadBufferWithoutRelcache(rnode, forknum,
buffer = ReadBufferWithoutRelcache(rlocator, forknum,
P_NEW, mode, NULL, true);
}
while (BufferGetBlockNumber(buffer) < blkno);
@@ -540,7 +541,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
mode, NULL, true);
}
}
@@ -559,7 +560,7 @@ recent_buffer_fast_path:
if (PageIsNew(page))
{
ReleaseBuffer(buffer);
log_invalid_page(rnode, forknum, blkno, true);
log_invalid_page(rlocator, forknum, blkno, true);
return InvalidBuffer;
}
}
@@ -594,7 +595,7 @@ typedef FakeRelCacheEntryData *FakeRelCacheEntry;
* Caller must free the returned entry with FreeFakeRelcacheEntry().
*/
Relation
CreateFakeRelcacheEntry(RelFileNode rnode)
CreateFakeRelcacheEntry(RelFileLocator rlocator)
{
FakeRelCacheEntry fakeentry;
Relation rel;
@@ -604,7 +605,7 @@ CreateFakeRelcacheEntry(RelFileNode rnode)
rel = (Relation) fakeentry;
rel->rd_rel = &fakeentry->pgc;
rel->rd_node = rnode;
rel->rd_locator = rlocator;
/*
* We will never be working with temp rels during recovery or while
@@ -615,18 +616,18 @@ CreateFakeRelcacheEntry(RelFileNode rnode)
/* It must be a permanent table here */
rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
/* We don't know the name of the relation; use relfilenode instead */
sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
/* We don't know the name of the relation; use relfilenumber instead */
sprintf(RelationGetRelationName(rel), "%u", rlocator.relNumber);
/*
* We set up the lockRelId in case anything tries to lock the dummy
* relation. Note that this is fairly bogus since relNode may be
* relation. Note that this is fairly bogus since relNumber may be
* different from the relation's OID. It shouldn't really matter though.
* In recovery, we are running by ourselves and can't have any lock
* conflicts. While syncing, we already hold AccessExclusiveLock.
*/
rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
rel->rd_lockInfo.lockRelId.dbId = rlocator.dbOid;
rel->rd_lockInfo.lockRelId.relId = rlocator.relNumber;
rel->rd_smgr = NULL;
@@ -652,9 +653,9 @@ FreeFakeRelcacheEntry(Relation fakerel)
* any open "invalid-page" records for the relation.
*/
void
XLogDropRelation(RelFileNode rnode, ForkNumber forknum)
XLogDropRelation(RelFileLocator rlocator, ForkNumber forknum)
{
forget_invalid_pages(rnode, forknum, 0);
forget_invalid_pages(rlocator, forknum, 0);
}
/*
@@ -682,10 +683,10 @@ XLogDropDatabase(Oid dbid)
* We need to clean up any open "invalid-page" records for the dropped pages.
*/
void
XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
XLogTruncateRelation(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber nblocks)
{
forget_invalid_pages(rnode, forkNum, nblocks);
forget_invalid_pages(rlocator, forkNum, nblocks);
}
/*