1
0
mirror of https://github.com/postgres/postgres.git synced 2025-09-02 04:21:28 +03:00

Change internal RelFileNode references to RelFileNumber or RelFileLocator.

We have been using the term RelFileNode to refer to either (1) the
integer that is used to name the sequence of files for a certain relation
within the directory set aside for that tablespace/database combination;
or (2) that value plus the OIDs of the tablespace and database; or
occasionally (3) the whole series of files created for a relation
based on those values. Using the same name for more than one thing is
confusing.

Replace RelFileNode with RelFileNumber when we're talking about just the
single number, i.e. (1) from above, and with RelFileLocator when we're
talking about all the things that are needed to locate a relation's files
on disk, i.e. (2) from above. In the places where we refer to (3) as
a relfilenode, instead refer to "relation storage".

Since there is a ton of SQL code in the world that knows about
pg_class.relfilenode, don't change the name of that column, or of other
SQL-facing things that derive their name from it.

On the other hand, do adjust closely-related internal terminology. For
example, the structure member names dbNode and spcNode appear to be
derived from the fact that the structure itself was called RelFileNode,
so change those to dbOid and spcOid. Likewise, various variables with
names like rnode and relnode get renamed appropriately, according to
how they're being used in context.

Hopefully, this is clearer than before. It is also preparation for
future patches that intend to widen the relfilenumber fields from its
current width of 32 bits. Variables that store a relfilenumber are now
declared as type RelFileNumber rather than type Oid; right now, these
are the same, but that can now more easily be changed.

Dilip Kumar, per an idea from me. Reviewed also by Andres Freund.
I fixed some whitespace issues, changed a couple of words in a
comment, and made one other minor correction.

Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com
Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com
Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
This commit is contained in:
Robert Haas
2022-07-06 11:39:09 -04:00
parent 7775c748db
commit b0a55e4329
138 changed files with 1640 additions and 1606 deletions

View File

@@ -121,12 +121,12 @@ typedef struct CkptTsStatus
* Type for array used to sort SMgrRelations
*
* FlushRelationsAllBuffers shares the same comparator function with
* DropRelFileNodesAllBuffers. Pointer to this struct and RelFileNode must be
* DropRelFileLocatorsAllBuffers. Pointer to this struct and RelFileLocator must be
* compatible.
*/
typedef struct SMgrSortArray
{
RelFileNode rnode; /* This must be the first member */
RelFileLocator rlocator; /* This must be the first member */
SMgrRelation srel;
} SMgrSortArray;
@@ -483,16 +483,16 @@ static BufferDesc *BufferAlloc(SMgrRelation smgr,
BufferAccessStrategy strategy,
bool *foundPtr);
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln);
static void FindAndDropRelFileNodeBuffers(RelFileNode rnode,
ForkNumber forkNum,
BlockNumber nForkBlock,
BlockNumber firstDelBlock);
static void FindAndDropRelFileLocatorBuffers(RelFileLocator rlocator,
ForkNumber forkNum,
BlockNumber nForkBlock,
BlockNumber firstDelBlock);
static void RelationCopyStorageUsingBuffer(Relation src, Relation dst,
ForkNumber forkNum,
bool isunlogged);
static void AtProcExit_Buffers(int code, Datum arg);
static void CheckForBufferLeaks(void);
static int rnode_comparator(const void *p1, const void *p2);
static int rlocator_comparator(const void *p1, const void *p2);
static inline int buffertag_comparator(const BufferTag *a, const BufferTag *b);
static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
@@ -515,7 +515,7 @@ PrefetchSharedBuffer(SMgrRelation smgr_reln,
Assert(BlockNumberIsValid(blockNum));
/* create a tag so we can lookup the buffer */
INIT_BUFFERTAG(newTag, smgr_reln->smgr_rnode.node,
INIT_BUFFERTAG(newTag, smgr_reln->smgr_rlocator.locator,
forkNum, blockNum);
/* determine its hash code and partition lock ID */
@@ -620,7 +620,7 @@ PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
* tag. In that case, the buffer is pinned and the usage count is bumped.
*/
bool
ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum,
Buffer recent_buffer)
{
BufferDesc *bufHdr;
@@ -632,7 +632,7 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
ReservePrivateRefCountEntry();
INIT_BUFFERTAG(tag, rnode, forkNum, blockNum);
INIT_BUFFERTAG(tag, rlocator, forkNum, blockNum);
if (BufferIsLocal(recent_buffer))
{
@@ -786,13 +786,13 @@ ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
* BackendId).
*/
Buffer
ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum,
ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber blockNum, ReadBufferMode mode,
BufferAccessStrategy strategy, bool permanent)
{
bool hit;
SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
SMgrRelation smgr = smgropen(rlocator, InvalidBackendId);
return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
@@ -824,10 +824,10 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
isExtend = (blockNum == P_NEW);
TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode,
smgr->smgr_rnode.backend,
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber,
smgr->smgr_rlocator.backend,
isExtend);
/* Substitute proper block number if caller asked for P_NEW */
@@ -839,7 +839,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("cannot extend relation %s beyond %u blocks",
relpath(smgr->smgr_rnode, forkNum),
relpath(smgr->smgr_rlocator, forkNum),
P_NEW)));
}
@@ -886,10 +886,10 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
VacuumCostBalance += VacuumCostPageHit;
TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode,
smgr->smgr_rnode.backend,
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber,
smgr->smgr_rlocator.backend,
isExtend,
found);
@@ -926,7 +926,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
if (!PageIsNew((Page) bufBlock))
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
blockNum, relpath(smgr->smgr_rnode, forkNum)),
blockNum, relpath(smgr->smgr_rlocator, forkNum)),
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
@@ -1028,7 +1028,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page in block %u of relation %s; zeroing out page",
blockNum,
relpath(smgr->smgr_rnode, forkNum))));
relpath(smgr->smgr_rlocator, forkNum))));
MemSet((char *) bufBlock, 0, BLCKSZ);
}
else
@@ -1036,7 +1036,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page in block %u of relation %s",
blockNum,
relpath(smgr->smgr_rnode, forkNum))));
relpath(smgr->smgr_rlocator, forkNum))));
}
}
}
@@ -1076,10 +1076,10 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
VacuumCostBalance += VacuumCostPageMiss;
TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode,
smgr->smgr_rnode.backend,
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber,
smgr->smgr_rlocator.backend,
isExtend,
found);
@@ -1124,7 +1124,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
uint32 buf_state;
/* create a tag so we can lookup the buffer */
INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* determine its hash code and partition lock ID */
newHash = BufTableHashCode(&newTag);
@@ -1255,9 +1255,9 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/* OK, do the I/O */
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber);
FlushBuffer(buf, NULL);
LWLockRelease(BufferDescriptorGetContentLock(buf));
@@ -1266,9 +1266,9 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
&buf->tag);
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber);
}
else
{
@@ -1647,7 +1647,7 @@ ReleaseAndReadBuffer(Buffer buffer,
{
bufHdr = GetLocalBufferDescriptor(-buffer - 1);
if (bufHdr->tag.blockNum == blockNum &&
RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
bufHdr->tag.forkNum == forkNum)
return buffer;
ResourceOwnerForgetBuffer(CurrentResourceOwner, buffer);
@@ -1658,7 +1658,7 @@ ReleaseAndReadBuffer(Buffer buffer,
bufHdr = GetBufferDescriptor(buffer - 1);
/* we have pin, so it's ok to examine tag without spinlock */
if (bufHdr->tag.blockNum == blockNum &&
RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
bufHdr->tag.forkNum == forkNum)
return buffer;
UnpinBuffer(bufHdr, true);
@@ -2000,8 +2000,8 @@ BufferSync(int flags)
item = &CkptBufferIds[num_to_scan++];
item->buf_id = buf_id;
item->tsId = bufHdr->tag.rnode.spcNode;
item->relNode = bufHdr->tag.rnode.relNode;
item->tsId = bufHdr->tag.rlocator.spcOid;
item->relNumber = bufHdr->tag.rlocator.relNumber;
item->forkNum = bufHdr->tag.forkNum;
item->blockNum = bufHdr->tag.blockNum;
}
@@ -2708,7 +2708,7 @@ PrintBufferLeakWarning(Buffer buffer)
}
/* theoretically we should lock the bufhdr here */
path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
path = relpathbackend(buf->tag.rlocator, backend, buf->tag.forkNum);
buf_state = pg_atomic_read_u32(&buf->state);
elog(WARNING,
"buffer refcount leak: [%03d] "
@@ -2769,11 +2769,11 @@ BufferGetBlockNumber(Buffer buffer)
/*
* BufferGetTag
* Returns the relfilenode, fork number and block number associated with
* Returns the relfilelocator, fork number and block number associated with
* a buffer.
*/
void
BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum,
BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
BlockNumber *blknum)
{
BufferDesc *bufHdr;
@@ -2787,7 +2787,7 @@ BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum,
bufHdr = GetBufferDescriptor(buffer - 1);
/* pinned, so OK to read tag without spinlock */
*rnode = bufHdr->tag.rnode;
*rlocator = bufHdr->tag.rlocator;
*forknum = bufHdr->tag.forkNum;
*blknum = bufHdr->tag.blockNum;
}
@@ -2838,13 +2838,13 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
/* Find smgr relation for buffer */
if (reln == NULL)
reln = smgropen(buf->tag.rnode, InvalidBackendId);
reln = smgropen(buf->tag.rlocator, InvalidBackendId);
TRACE_POSTGRESQL_BUFFER_FLUSH_START(buf->tag.forkNum,
buf->tag.blockNum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode);
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber);
buf_state = LockBufHdr(buf);
@@ -2922,9 +2922,9 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(buf->tag.forkNum,
buf->tag.blockNum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode);
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber);
/* Pop the error context stack */
error_context_stack = errcallback.previous;
@@ -3026,7 +3026,7 @@ BufferGetLSNAtomic(Buffer buffer)
}
/* ---------------------------------------------------------------------
* DropRelFileNodeBuffers
* DropRelFileLocatorBuffers
*
* This function removes from the buffer pool all the pages of the
* specified relation forks that have block numbers >= firstDelBlock.
@@ -3047,25 +3047,25 @@ BufferGetLSNAtomic(Buffer buffer)
* --------------------------------------------------------------------
*/
void
DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
int nforks, BlockNumber *firstDelBlock)
DropRelFileLocatorBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
int nforks, BlockNumber *firstDelBlock)
{
int i;
int j;
RelFileNodeBackend rnode;
RelFileLocatorBackend rlocator;
BlockNumber nForkBlock[MAX_FORKNUM];
uint64 nBlocksToInvalidate = 0;
rnode = smgr_reln->smgr_rnode;
rlocator = smgr_reln->smgr_rlocator;
/* If it's a local relation, it's localbuf.c's problem. */
if (RelFileNodeBackendIsTemp(rnode))
if (RelFileLocatorBackendIsTemp(rlocator))
{
if (rnode.backend == MyBackendId)
if (rlocator.backend == MyBackendId)
{
for (j = 0; j < nforks; j++)
DropRelFileNodeLocalBuffers(rnode.node, forkNum[j],
firstDelBlock[j]);
DropRelFileLocatorLocalBuffers(rlocator.locator, forkNum[j],
firstDelBlock[j]);
}
return;
}
@@ -3115,8 +3115,8 @@ DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
{
for (j = 0; j < nforks; j++)
FindAndDropRelFileNodeBuffers(rnode.node, forkNum[j],
nForkBlock[j], firstDelBlock[j]);
FindAndDropRelFileLocatorBuffers(rlocator.locator, forkNum[j],
nForkBlock[j], firstDelBlock[j]);
return;
}
@@ -3138,17 +3138,17 @@ DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
* false positives are safe because we'll recheck after getting the
* buffer lock.
*
* We could check forkNum and blockNum as well as the rnode, but the
* incremental win from doing so seems small.
* We could check forkNum and blockNum as well as the rlocator, but
* the incremental win from doing so seems small.
*/
if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator))
continue;
buf_state = LockBufHdr(bufHdr);
for (j = 0; j < nforks; j++)
{
if (RelFileNodeEquals(bufHdr->tag.rnode, rnode.node) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator) &&
bufHdr->tag.forkNum == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
@@ -3162,16 +3162,16 @@ DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
}
/* ---------------------------------------------------------------------
* DropRelFileNodesAllBuffers
* DropRelFileLocatorsAllBuffers
*
* This function removes from the buffer pool all the pages of all
* forks of the specified relations. It's equivalent to calling
* DropRelFileNodeBuffers once per fork per relation with
* DropRelFileLocatorBuffers once per fork per relation with
* firstDelBlock = 0.
* --------------------------------------------------------------------
*/
void
DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
DropRelFileLocatorsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
{
int i;
int j;
@@ -3179,22 +3179,22 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
SMgrRelation *rels;
BlockNumber (*block)[MAX_FORKNUM + 1];
uint64 nBlocksToInvalidate = 0;
RelFileNode *nodes;
RelFileLocator *locators;
bool cached = true;
bool use_bsearch;
if (nnodes == 0)
if (nlocators == 0)
return;
rels = palloc(sizeof(SMgrRelation) * nnodes); /* non-local relations */
rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
/* If it's a local relation, it's localbuf.c's problem. */
for (i = 0; i < nnodes; i++)
for (i = 0; i < nlocators; i++)
{
if (RelFileNodeBackendIsTemp(smgr_reln[i]->smgr_rnode))
if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
{
if (smgr_reln[i]->smgr_rnode.backend == MyBackendId)
DropRelFileNodeAllLocalBuffers(smgr_reln[i]->smgr_rnode.node);
if (smgr_reln[i]->smgr_rlocator.backend == MyBackendId)
DropRelFileLocatorAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
}
else
rels[n++] = smgr_reln[i];
@@ -3219,7 +3219,7 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
/*
* We can avoid scanning the entire buffer pool if we know the exact size
* of each of the given relation forks. See DropRelFileNodeBuffers.
* of each of the given relation forks. See DropRelFileLocatorBuffers.
*/
for (i = 0; i < n && cached; i++)
{
@@ -3257,8 +3257,8 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
continue;
/* drop all the buffers for a particular relation fork */
FindAndDropRelFileNodeBuffers(rels[i]->smgr_rnode.node,
j, block[i][j], 0);
FindAndDropRelFileLocatorBuffers(rels[i]->smgr_rlocator.locator,
j, block[i][j], 0);
}
}
@@ -3268,9 +3268,9 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
}
pfree(block);
nodes = palloc(sizeof(RelFileNode) * n); /* non-local relations */
locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
for (i = 0; i < n; i++)
nodes[i] = rels[i]->smgr_rnode.node;
locators[i] = rels[i]->smgr_rlocator.locator;
/*
* For low number of relations to drop just use a simple walk through, to
@@ -3280,19 +3280,19 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
*/
use_bsearch = n > RELS_BSEARCH_THRESHOLD;
/* sort the list of rnodes if necessary */
/* sort the list of rlocators if necessary */
if (use_bsearch)
pg_qsort(nodes, n, sizeof(RelFileNode), rnode_comparator);
pg_qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
for (i = 0; i < NBuffers; i++)
{
RelFileNode *rnode = NULL;
RelFileLocator *rlocator = NULL;
BufferDesc *bufHdr = GetBufferDescriptor(i);
uint32 buf_state;
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (!use_bsearch)
@@ -3301,37 +3301,37 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
for (j = 0; j < n; j++)
{
if (RelFileNodeEquals(bufHdr->tag.rnode, nodes[j]))
if (RelFileLocatorEquals(bufHdr->tag.rlocator, locators[j]))
{
rnode = &nodes[j];
rlocator = &locators[j];
break;
}
}
}
else
{
rnode = bsearch((const void *) &(bufHdr->tag.rnode),
nodes, n, sizeof(RelFileNode),
rnode_comparator);
rlocator = bsearch((const void *) &(bufHdr->tag.rlocator),
locators, n, sizeof(RelFileLocator),
rlocator_comparator);
}
/* buffer doesn't belong to any of the given relfilenodes; skip it */
if (rnode == NULL)
/* buffer doesn't belong to any of the given relfilelocators; skip it */
if (rlocator == NULL)
continue;
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, (*rnode)))
if (RelFileLocatorEquals(bufHdr->tag.rlocator, (*rlocator)))
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
}
pfree(nodes);
pfree(locators);
pfree(rels);
}
/* ---------------------------------------------------------------------
* FindAndDropRelFileNodeBuffers
* FindAndDropRelFileLocatorBuffers
*
* This function performs look up in BufMapping table and removes from the
* buffer pool all the pages of the specified relation fork that has block
@@ -3340,9 +3340,9 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
* --------------------------------------------------------------------
*/
static void
FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
BlockNumber nForkBlock,
BlockNumber firstDelBlock)
FindAndDropRelFileLocatorBuffers(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber nForkBlock,
BlockNumber firstDelBlock)
{
BlockNumber curBlock;
@@ -3356,7 +3356,7 @@ FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
uint32 buf_state;
/* create a tag so we can lookup the buffer */
INIT_BUFFERTAG(bufTag, rnode, forkNum, curBlock);
INIT_BUFFERTAG(bufTag, rlocator, forkNum, curBlock);
/* determine its hash code and partition lock ID */
bufHash = BufTableHashCode(&bufTag);
@@ -3380,7 +3380,7 @@ FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
*/
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
bufHdr->tag.forkNum == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
InvalidateBuffer(bufHdr); /* releases spinlock */
@@ -3397,7 +3397,7 @@ FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
* bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
* tree no longer exists. Implementation is pretty similar to
* DropRelFileNodeBuffers() which is for destroying just one relation.
* DropRelFileLocatorBuffers() which is for destroying just one relation.
* --------------------------------------------------------------------
*/
void
@@ -3416,14 +3416,14 @@ DropDatabaseBuffers(Oid dbid)
uint32 buf_state;
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (bufHdr->tag.rnode.dbNode != dbid)
if (bufHdr->tag.rlocator.dbOid != dbid)
continue;
buf_state = LockBufHdr(bufHdr);
if (bufHdr->tag.rnode.dbNode == dbid)
if (bufHdr->tag.rlocator.dbOid == dbid)
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
@@ -3453,7 +3453,7 @@ PrintBufferDescs(void)
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
relpathbackend(buf->tag.rnode, InvalidBackendId, buf->tag.forkNum),
relpathbackend(buf->tag.rlocator, InvalidBackendId, buf->tag.forkNum),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
@@ -3478,7 +3478,7 @@ PrintPinnedBufs(void)
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
relpathperm(buf->tag.rnode, buf->tag.forkNum),
relpathperm(buf->tag.rlocator, buf->tag.forkNum),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
@@ -3517,7 +3517,7 @@ FlushRelationBuffers(Relation rel)
uint32 buf_state;
bufHdr = GetLocalBufferDescriptor(i);
if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
(BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
@@ -3561,16 +3561,16 @@ FlushRelationBuffers(Relation rel)
bufHdr = GetBufferDescriptor(i);
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator))
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@@ -3608,21 +3608,21 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
for (i = 0; i < nrels; i++)
{
Assert(!RelFileNodeBackendIsTemp(smgrs[i]->smgr_rnode));
Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
srels[i].rnode = smgrs[i]->smgr_rnode.node;
srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
srels[i].srel = smgrs[i];
}
/*
* Save the bsearch overhead for low number of relations to sync. See
* DropRelFileNodesAllBuffers for details.
* DropRelFileLocatorsAllBuffers for details.
*/
use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
/* sort the list of SMgrRelations if necessary */
if (use_bsearch)
pg_qsort(srels, nrels, sizeof(SMgrSortArray), rnode_comparator);
pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
/* Make sure we can handle the pin inside the loop */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
@@ -3634,8 +3634,8 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
uint32 buf_state;
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (!use_bsearch)
@@ -3644,7 +3644,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
for (j = 0; j < nrels; j++)
{
if (RelFileNodeEquals(bufHdr->tag.rnode, srels[j].rnode))
if (RelFileLocatorEquals(bufHdr->tag.rlocator, srels[j].rlocator))
{
srelent = &srels[j];
break;
@@ -3653,19 +3653,19 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
}
else
{
srelent = bsearch((const void *) &(bufHdr->tag.rnode),
srelent = bsearch((const void *) &(bufHdr->tag.rlocator),
srels, nrels, sizeof(SMgrSortArray),
rnode_comparator);
rlocator_comparator);
}
/* buffer doesn't belong to any of the given relfilenodes; skip it */
/* buffer doesn't belong to any of the given relfilelocators; skip it */
if (srelent == NULL)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, srelent->rnode) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, srelent->rlocator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@@ -3729,7 +3729,7 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
CHECK_FOR_INTERRUPTS();
/* Read block from source relation. */
srcBuf = ReadBufferWithoutRelcache(src->rd_node, forkNum, blkno,
srcBuf = ReadBufferWithoutRelcache(src->rd_locator, forkNum, blkno,
RBM_NORMAL, bstrategy_src,
permanent);
srcPage = BufferGetPage(srcBuf);
@@ -3740,7 +3740,7 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
}
/* Use P_NEW to extend the destination relation. */
dstBuf = ReadBufferWithoutRelcache(dst->rd_node, forkNum, P_NEW,
dstBuf = ReadBufferWithoutRelcache(dst->rd_locator, forkNum, P_NEW,
RBM_NORMAL, bstrategy_dst,
permanent);
LockBuffer(dstBuf, BUFFER_LOCK_EXCLUSIVE);
@@ -3775,8 +3775,8 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
* --------------------------------------------------------------------
*/
void
CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
bool permanent)
CreateAndCopyRelationData(RelFileLocator src_rlocator,
RelFileLocator dst_rlocator, bool permanent)
{
Relation src_rel;
Relation dst_rel;
@@ -3793,8 +3793,8 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
* used the smgr layer directly, we would have to worry about
* invalidations.
*/
src_rel = CreateFakeRelcacheEntry(src_rnode);
dst_rel = CreateFakeRelcacheEntry(dst_rnode);
src_rel = CreateFakeRelcacheEntry(src_rlocator);
dst_rel = CreateFakeRelcacheEntry(dst_rlocator);
/*
* Create and copy all forks of the relation. During create database we
@@ -3802,7 +3802,7 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
* directory. Therefore, each individual relation doesn't need to be
* registered for cleanup.
*/
RelationCreateStorage(dst_rnode, relpersistence, false);
RelationCreateStorage(dst_rlocator, relpersistence, false);
/* copy main fork. */
RelationCopyStorageUsingBuffer(src_rel, dst_rel, MAIN_FORKNUM, permanent);
@@ -3820,7 +3820,7 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
* init fork of an unlogged relation.
*/
if (permanent || forkNum == INIT_FORKNUM)
log_smgrcreate(&dst_rnode, forkNum);
log_smgrcreate(&dst_rlocator, forkNum);
/* Copy a fork's data, block by block. */
RelationCopyStorageUsingBuffer(src_rel, dst_rel, forkNum,
@@ -3864,16 +3864,16 @@ FlushDatabaseBuffers(Oid dbid)
bufHdr = GetBufferDescriptor(i);
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (bufHdr->tag.rnode.dbNode != dbid)
if (bufHdr->tag.rlocator.dbOid != dbid)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
if (bufHdr->tag.rnode.dbNode == dbid &&
if (bufHdr->tag.rlocator.dbOid == dbid &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@@ -4034,7 +4034,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
(pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
{
/*
* If we must not write WAL, due to a relfilenode-specific
* If we must not write WAL, due to a relfilelocator-specific
* condition or being in recovery, don't dirty the page. We can
* set the hint, just not dirty the page as a result so the hint
* is lost when we evict the page or shutdown.
@@ -4042,7 +4042,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
* See src/backend/storage/page/README for longer discussion.
*/
if (RecoveryInProgress() ||
RelFileNodeSkippingWAL(bufHdr->tag.rnode))
RelFileLocatorSkippingWAL(bufHdr->tag.rlocator))
return;
/*
@@ -4651,7 +4651,7 @@ AbortBufferIO(void)
/* Buffer is pinned, so we can read tag without spinlock */
char *path;
path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
path = relpathperm(buf->tag.rlocator, buf->tag.forkNum);
ereport(WARNING,
(errcode(ERRCODE_IO_ERROR),
errmsg("could not write block %u of %s",
@@ -4675,7 +4675,7 @@ shared_buffer_write_error_callback(void *arg)
/* Buffer is pinned, so we can read the tag without locking the spinlock */
if (bufHdr != NULL)
{
char *path = relpathperm(bufHdr->tag.rnode, bufHdr->tag.forkNum);
char *path = relpathperm(bufHdr->tag.rlocator, bufHdr->tag.forkNum);
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);
@@ -4693,7 +4693,7 @@ local_buffer_write_error_callback(void *arg)
if (bufHdr != NULL)
{
char *path = relpathbackend(bufHdr->tag.rnode, MyBackendId,
char *path = relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum);
errcontext("writing block %u of relation %s",
@@ -4703,27 +4703,27 @@ local_buffer_write_error_callback(void *arg)
}
/*
* RelFileNode qsort/bsearch comparator; see RelFileNodeEquals.
* RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
*/
static int
rnode_comparator(const void *p1, const void *p2)
rlocator_comparator(const void *p1, const void *p2)
{
RelFileNode n1 = *(const RelFileNode *) p1;
RelFileNode n2 = *(const RelFileNode *) p2;
RelFileLocator n1 = *(const RelFileLocator *) p1;
RelFileLocator n2 = *(const RelFileLocator *) p2;
if (n1.relNode < n2.relNode)
if (n1.relNumber < n2.relNumber)
return -1;
else if (n1.relNode > n2.relNode)
else if (n1.relNumber > n2.relNumber)
return 1;
if (n1.dbNode < n2.dbNode)
if (n1.dbOid < n2.dbOid)
return -1;
else if (n1.dbNode > n2.dbNode)
else if (n1.dbOid > n2.dbOid)
return 1;
if (n1.spcNode < n2.spcNode)
if (n1.spcOid < n2.spcOid)
return -1;
else if (n1.spcNode > n2.spcNode)
else if (n1.spcOid > n2.spcOid)
return 1;
else
return 0;
@@ -4789,7 +4789,7 @@ buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
{
int ret;
ret = rnode_comparator(&ba->rnode, &bb->rnode);
ret = rlocator_comparator(&ba->rlocator, &bb->rlocator);
if (ret != 0)
return ret;
@@ -4822,9 +4822,9 @@ ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
else if (a->tsId > b->tsId)
return 1;
/* compare relation */
if (a->relNode < b->relNode)
if (a->relNumber < b->relNumber)
return -1;
else if (a->relNode > b->relNode)
else if (a->relNumber > b->relNumber)
return 1;
/* compare fork */
else if (a->forkNum < b->forkNum)
@@ -4960,7 +4960,7 @@ IssuePendingWritebacks(WritebackContext *context)
next = &context->pending_writebacks[i + ahead + 1];
/* different file, stop */
if (!RelFileNodeEquals(cur->tag.rnode, next->tag.rnode) ||
if (!RelFileLocatorEquals(cur->tag.rlocator, next->tag.rlocator) ||
cur->tag.forkNum != next->tag.forkNum)
break;
@@ -4979,7 +4979,7 @@ IssuePendingWritebacks(WritebackContext *context)
i += ahead;
/* and finally tell the kernel to write the data to storage */
reln = smgropen(tag.rnode, InvalidBackendId);
reln = smgropen(tag.rlocator, InvalidBackendId);
smgrwriteback(reln, tag.forkNum, tag.blockNum, nblocks);
}

View File

@@ -68,7 +68,7 @@ PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum,
BufferTag newTag; /* identity of requested block */
LocalBufferLookupEnt *hresult;
INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* Initialize local buffers if first request in this session */
if (LocalBufHash == NULL)
@@ -117,7 +117,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
bool found;
uint32 buf_state;
INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* Initialize local buffers if first request in this session */
if (LocalBufHash == NULL)
@@ -134,7 +134,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag));
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
smgr->smgr_rnode.node.relNode, forkNum, blockNum, -b - 1);
smgr->smgr_rlocator.locator.relNumber, forkNum, blockNum, -b - 1);
#endif
buf_state = pg_atomic_read_u32(&bufHdr->state);
@@ -162,7 +162,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
smgr->smgr_rnode.node.relNode, forkNum, blockNum,
smgr->smgr_rlocator.locator.relNumber, forkNum, blockNum,
-nextFreeLocalBuf - 1);
#endif
@@ -215,7 +215,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
/* Find smgr relation for buffer */
oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
oreln = smgropen(bufHdr->tag.rlocator, MyBackendId);
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
@@ -312,7 +312,7 @@ MarkLocalBufferDirty(Buffer buffer)
}
/*
* DropRelFileNodeLocalBuffers
* DropRelFileLocatorLocalBuffers
* This function removes from the buffer pool all the pages of the
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
@@ -320,11 +320,11 @@ MarkLocalBufferDirty(Buffer buffer)
* out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* See DropRelFileNodeBuffers in bufmgr.c for more notes.
* See DropRelFileLocatorBuffers in bufmgr.c for more notes.
*/
void
DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
BlockNumber firstDelBlock)
DropRelFileLocatorLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber firstDelBlock)
{
int i;
@@ -337,14 +337,14 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
buf_state = pg_atomic_read_u32(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
bufHdr->tag.forkNum == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
relpathbackend(bufHdr->tag.rnode, MyBackendId,
relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum),
LocalRefCount[i]);
/* Remove entry from hashtable */
@@ -363,14 +363,14 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
}
/*
* DropRelFileNodeAllLocalBuffers
* DropRelFileLocatorAllLocalBuffers
* This function removes from the buffer pool all pages of all forks
* of the specified relation.
*
* See DropRelFileNodesAllBuffers in bufmgr.c for more notes.
* See DropRelFileLocatorsAllBuffers in bufmgr.c for more notes.
*/
void
DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
DropRelFileLocatorAllLocalBuffers(RelFileLocator rlocator)
{
int i;
@@ -383,12 +383,12 @@ DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
buf_state = pg_atomic_read_u32(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
RelFileNodeEquals(bufHdr->tag.rnode, rnode))
RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator))
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
relpathbackend(bufHdr->tag.rnode, MyBackendId,
relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum),
LocalRefCount[i]);
/* Remove entry from hashtable */
@@ -589,8 +589,8 @@ AtProcExit_LocalBuffers(void)
{
/*
* We shouldn't be holding any remaining pins; if we are, and assertions
* aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
* to drop the temp rels.
* aren't enabled, we'll fail later in DropRelFileLocatorBuffers while
* trying to drop the temp rels.
*/
CheckForLocalBufferLeaks();
}

View File

@@ -196,7 +196,7 @@ RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
* WAL replay
*/
void
XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk,
Size spaceAvail)
{
int new_cat = fsm_space_avail_to_cat(spaceAvail);
@@ -211,8 +211,8 @@ XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
blkno = fsm_logical_to_physical(addr);
/* If the page doesn't exist already, extend */
buf = XLogReadBufferExtended(rnode, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR,
InvalidBuffer);
buf = XLogReadBufferExtended(rlocator, FSM_FORKNUM, blkno,
RBM_ZERO_ON_ERROR, InvalidBuffer);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
page = BufferGetPage(buf);

View File

@@ -268,13 +268,13 @@ restart:
*
* Fix the corruption and restart.
*/
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blknum;
BufferGetTag(buf, &rnode, &forknum, &blknum);
BufferGetTag(buf, &rlocator, &forknum, &blknum);
elog(DEBUG1, "fixing corrupt FSM block %u, relation %u/%u/%u",
blknum, rnode.spcNode, rnode.dbNode, rnode.relNode);
blknum, rlocator.spcOid, rlocator.dbOid, rlocator.relNumber);
/* make sure we hold an exclusive lock */
if (!exclusive_lock_held)

View File

@@ -442,7 +442,7 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
}
void
ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode node)
ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocator locator)
{
VirtualTransactionId *backends;
@@ -461,7 +461,7 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode
return;
backends = GetConflictingVirtualXIDs(latestRemovedXid,
node.dbNode);
locator.dbOid);
ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_SNAPSHOT,
@@ -475,7 +475,7 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode
*/
void
ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid,
RelFileNode node)
RelFileLocator locator)
{
/*
* ResolveRecoveryConflictWithSnapshot operates on 32-bit TransactionIds,
@@ -493,7 +493,7 @@ ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXi
TransactionId latestRemovedXid;
latestRemovedXid = XidFromFullTransactionId(latestRemovedFullXid);
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, node);
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, locator);
}
}

View File

@@ -1997,7 +1997,7 @@ PageIsPredicateLocked(Relation relation, BlockNumber blkno)
PREDICATELOCKTARGET *target;
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
blkno);
@@ -2576,7 +2576,7 @@ PredicateLockRelation(Relation relation, Snapshot snapshot)
return;
SET_PREDICATELOCKTARGETTAG_RELATION(tag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id);
PredicateLockAcquire(&tag);
}
@@ -2599,7 +2599,7 @@ PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
return;
SET_PREDICATELOCKTARGETTAG_PAGE(tag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
blkno);
PredicateLockAcquire(&tag);
@@ -2638,13 +2638,13 @@ PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot,
* level lock.
*/
SET_PREDICATELOCKTARGETTAG_RELATION(tag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id);
if (PredicateLockExists(&tag))
return;
SET_PREDICATELOCKTARGETTAG_TUPLE(tag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
@@ -2974,7 +2974,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
if (!PredicateLockingNeededForRelation(relation))
return;
dbId = relation->rd_node.dbNode;
dbId = relation->rd_locator.dbOid;
relId = relation->rd_id;
if (relation->rd_index == NULL)
{
@@ -3194,11 +3194,11 @@ PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
Assert(BlockNumberIsValid(newblkno));
SET_PREDICATELOCKTARGETTAG_PAGE(oldtargettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
oldblkno);
SET_PREDICATELOCKTARGETTAG_PAGE(newtargettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
newblkno);
@@ -4478,7 +4478,7 @@ CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber b
if (tid != NULL)
{
SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
@@ -4488,14 +4488,14 @@ CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber b
if (blkno != InvalidBlockNumber)
{
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
blkno);
CheckTargetForConflictsIn(&targettag);
}
SET_PREDICATELOCKTARGETTAG_RELATION(targettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id);
CheckTargetForConflictsIn(&targettag);
}
@@ -4556,7 +4556,7 @@ CheckTableForSerializableConflictIn(Relation relation)
Assert(relation->rd_index == NULL); /* not an index relation */
dbId = relation->rd_node.dbNode;
dbId = relation->rd_locator.dbOid;
heapId = relation->rd_id;
LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);

View File

@@ -46,7 +46,7 @@ physical relation in system catalogs.
It is assumed that the main fork, fork number 0 or MAIN_FORKNUM, always
exists. Fork numbers are assigned in src/include/common/relpath.h.
Functions in smgr.c and md.c take an extra fork number argument, in addition
to relfilenode and block number, to identify which relation fork you want to
to relfilelocator and block number, to identify which relation fork you want to
access. Since most code wants to access the main fork, a shortcut version of
ReadBuffer that accesses MAIN_FORKNUM is provided in the buffer manager for
convenience.

View File

@@ -35,7 +35,7 @@
#include "storage/bufmgr.h"
#include "storage/fd.h"
#include "storage/md.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
#include "storage/smgr.h"
#include "storage/sync.h"
#include "utils/hsearch.h"
@@ -89,11 +89,11 @@ static MemoryContext MdCxt; /* context for all MdfdVec objects */
/* Populate a file tag describing an md.c segment file. */
#define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \
#define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \
( \
memset(&(a), 0, sizeof(FileTag)), \
(a).handler = SYNC_HANDLER_MD, \
(a).rnode = (xx_rnode), \
(a).rlocator = (xx_rlocator), \
(a).forknum = (xx_forknum), \
(a).segno = (xx_segno) \
)
@@ -121,14 +121,14 @@ static MemoryContext MdCxt; /* context for all MdfdVec objects */
/* local routines */
static void mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum,
static void mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forkNum,
bool isRedo);
static MdfdVec *mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior);
static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
MdfdVec *seg);
static void register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
static void register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno);
static void register_forget_request(RelFileNodeBackend rnode, ForkNumber forknum,
static void register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno);
static void _fdvec_resize(SMgrRelation reln,
ForkNumber forknum,
@@ -199,11 +199,11 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* should be here and not in commands/tablespace.c? But that would imply
* importing a lot of stuff that smgr.c oughtn't know, either.
*/
TablespaceCreateDbspace(reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
TablespaceCreateDbspace(reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
isRedo);
path = relpath(reln->smgr_rnode, forkNum);
path = relpath(reln->smgr_rlocator, forkNum);
fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY);
@@ -234,7 +234,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
/*
* mdunlink() -- Unlink a relation.
*
* Note that we're passed a RelFileNodeBackend --- by the time this is called,
* Note that we're passed a RelFileLocatorBackend --- by the time this is called,
* there won't be an SMgrRelation hashtable entry anymore.
*
* forkNum can be a fork number to delete a specific fork, or InvalidForkNumber
@@ -243,10 +243,10 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* For regular relations, we don't unlink the first segment file of the rel,
* but just truncate it to zero length, and record a request to unlink it after
* the next checkpoint. Additional segments can be unlinked immediately,
* however. Leaving the empty file in place prevents that relfilenode
* number from being reused. The scenario this protects us from is:
* however. Leaving the empty file in place prevents that relfilenumber
* from being reused. The scenario this protects us from is:
* 1. We delete a relation (and commit, and actually remove its file).
* 2. We create a new relation, which by chance gets the same relfilenode as
* 2. We create a new relation, which by chance gets the same relfilenumber as
* the just-deleted one (OIDs must've wrapped around for that to happen).
* 3. We crash before another checkpoint occurs.
* During replay, we would delete the file and then recreate it, which is fine
@@ -254,18 +254,18 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as we do at wal_level=minimal), the contents of
* the file would be lost forever. By leaving the empty file until after the
* next checkpoint, we prevent reassignment of the relfilenode number until
* it's safe, because relfilenode assignment skips over any existing file.
* next checkpoint, we prevent reassignment of the relfilenumber until it's
* safe, because relfilenumber assignment skips over any existing file.
*
* We do not need to go through this dance for temp relations, though, because
* we never make WAL entries for temp rels, and so a temp rel poses no threat
* to the health of a regular rel that has taken over its relfilenode number.
* to the health of a regular rel that has taken over its relfilenumber.
* The fact that temp rels and regular rels have different file naming
* patterns provides additional safety.
*
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
* relfilenode number from being recycled. Also, we do not carefully
* relfilenumber from being recycled. Also, we do not carefully
* track whether other forks have been created or not, but just attempt to
* unlink them unconditionally; so we should never complain about ENOENT.
*
@@ -278,16 +278,16 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* we are usually not in a transaction anymore when this is called.
*/
void
mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
mdunlink(RelFileLocatorBackend rlocator, ForkNumber forkNum, bool isRedo)
{
/* Now do the per-fork work */
if (forkNum == InvalidForkNumber)
{
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
mdunlinkfork(rnode, forkNum, isRedo);
mdunlinkfork(rlocator, forkNum, isRedo);
}
else
mdunlinkfork(rnode, forkNum, isRedo);
mdunlinkfork(rlocator, forkNum, isRedo);
}
/*
@@ -315,25 +315,25 @@ do_truncate(const char *path)
}
static void
mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forkNum, bool isRedo)
{
char *path;
int ret;
path = relpath(rnode, forkNum);
path = relpath(rlocator, forkNum);
/*
* Delete or truncate the first segment.
*/
if (isRedo || forkNum != MAIN_FORKNUM || RelFileNodeBackendIsTemp(rnode))
if (isRedo || forkNum != MAIN_FORKNUM || RelFileLocatorBackendIsTemp(rlocator))
{
if (!RelFileNodeBackendIsTemp(rnode))
if (!RelFileLocatorBackendIsTemp(rlocator))
{
/* Prevent other backends' fds from holding on to the disk space */
ret = do_truncate(path);
/* Forget any pending sync requests for the first segment */
register_forget_request(rnode, forkNum, 0 /* first seg */ );
register_forget_request(rlocator, forkNum, 0 /* first seg */ );
}
else
ret = 0;
@@ -354,7 +354,7 @@ mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
ret = do_truncate(path);
/* Register request to unlink first segment later */
register_unlink_segment(rnode, forkNum, 0 /* first seg */ );
register_unlink_segment(rlocator, forkNum, 0 /* first seg */ );
}
/*
@@ -373,7 +373,7 @@ mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
{
sprintf(segpath, "%s.%u", path, segno);
if (!RelFileNodeBackendIsTemp(rnode))
if (!RelFileLocatorBackendIsTemp(rlocator))
{
/*
* Prevent other backends' fds from holding on to the disk
@@ -386,7 +386,7 @@ mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
* Forget any pending sync requests for this segment before we
* try to unlink.
*/
register_forget_request(rnode, forkNum, segno);
register_forget_request(rlocator, forkNum, segno);
}
if (unlink(segpath) < 0)
@@ -437,7 +437,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("cannot extend file \"%s\" beyond %u blocks",
relpath(reln->smgr_rnode, forknum),
relpath(reln->smgr_rlocator, forknum),
InvalidBlockNumber)));
v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
@@ -490,7 +490,7 @@ mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
if (reln->md_num_open_segs[forknum] > 0)
return &reln->md_seg_fds[forknum][0];
path = relpath(reln->smgr_rnode, forknum);
path = relpath(reln->smgr_rlocator, forknum);
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY);
@@ -645,10 +645,10 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
MdfdVec *v;
TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rnode.backend);
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
reln->smgr_rlocator.backend);
v = _mdfd_getseg(reln, forknum, blocknum, false,
EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
@@ -660,10 +660,10 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_READ);
TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rnode.backend,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
reln->smgr_rlocator.backend,
nbytes,
BLCKSZ);
@@ -715,10 +715,10 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
#endif
TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rnode.backend);
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
reln->smgr_rlocator.backend);
v = _mdfd_getseg(reln, forknum, blocknum, skipFsync,
EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
@@ -730,10 +730,10 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_WRITE);
TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rnode.backend,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
reln->smgr_rlocator.backend,
nbytes,
BLCKSZ);
@@ -842,7 +842,7 @@ mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
return;
ereport(ERROR,
(errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
relpath(reln->smgr_rnode, forknum),
relpath(reln->smgr_rlocator, forknum),
nblocks, curnblk)));
}
if (nblocks == curnblk)
@@ -983,7 +983,7 @@ register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
{
FileTag tag;
INIT_MD_FILETAG(tag, reln->smgr_rnode.node, forknum, seg->mdfd_segno);
INIT_MD_FILETAG(tag, reln->smgr_rlocator.locator, forknum, seg->mdfd_segno);
/* Temp relations should never be fsync'd */
Assert(!SmgrIsTemp(reln));
@@ -1005,15 +1005,15 @@ register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
* register_unlink_segment() -- Schedule a file to be deleted after next checkpoint
*/
static void
register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno)
{
FileTag tag;
INIT_MD_FILETAG(tag, rnode.node, forknum, segno);
INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
/* Should never be used with temp relations */
Assert(!RelFileNodeBackendIsTemp(rnode));
Assert(!RelFileLocatorBackendIsTemp(rlocator));
RegisterSyncRequest(&tag, SYNC_UNLINK_REQUEST, true /* retryOnError */ );
}
@@ -1022,12 +1022,12 @@ register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
* register_forget_request() -- forget any fsyncs for a relation fork's segment
*/
static void
register_forget_request(RelFileNodeBackend rnode, ForkNumber forknum,
register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno)
{
FileTag tag;
INIT_MD_FILETAG(tag, rnode.node, forknum, segno);
INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true /* retryOnError */ );
}
@@ -1039,13 +1039,13 @@ void
ForgetDatabaseSyncRequests(Oid dbid)
{
FileTag tag;
RelFileNode rnode;
RelFileLocator rlocator;
rnode.dbNode = dbid;
rnode.spcNode = 0;
rnode.relNode = 0;
rlocator.dbOid = dbid;
rlocator.spcOid = 0;
rlocator.relNumber = 0;
INIT_MD_FILETAG(tag, rnode, InvalidForkNumber, InvalidBlockNumber);
INIT_MD_FILETAG(tag, rlocator, InvalidForkNumber, InvalidBlockNumber);
RegisterSyncRequest(&tag, SYNC_FILTER_REQUEST, true /* retryOnError */ );
}
@@ -1054,7 +1054,7 @@ ForgetDatabaseSyncRequests(Oid dbid)
* DropRelationFiles -- drop files of all given relations
*/
void
DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo)
DropRelationFiles(RelFileLocator *delrels, int ndelrels, bool isRedo)
{
SMgrRelation *srels;
int i;
@@ -1129,7 +1129,7 @@ _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
char *path,
*fullpath;
path = relpath(reln->smgr_rnode, forknum);
path = relpath(reln->smgr_rlocator, forknum);
if (segno > 0)
{
@@ -1345,7 +1345,7 @@ _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
int
mdsyncfiletag(const FileTag *ftag, char *path)
{
SMgrRelation reln = smgropen(ftag->rnode, InvalidBackendId);
SMgrRelation reln = smgropen(ftag->rlocator, InvalidBackendId);
File file;
bool need_to_close;
int result,
@@ -1395,7 +1395,7 @@ mdunlinkfiletag(const FileTag *ftag, char *path)
char *p;
/* Compute the path. */
p = relpathperm(ftag->rnode, MAIN_FORKNUM);
p = relpathperm(ftag->rlocator, MAIN_FORKNUM);
strlcpy(path, p, MAXPGPATH);
pfree(p);
@@ -1417,5 +1417,5 @@ mdfiletagmatches(const FileTag *ftag, const FileTag *candidate)
* We'll return true for all candidates that have the same database OID as
* the ftag from the SYNC_FILTER_REQUEST request, so they're forgotten.
*/
return ftag->rnode.dbNode == candidate->rnode.dbNode;
return ftag->rlocator.dbOid == candidate->rlocator.dbOid;
}

View File

@@ -46,7 +46,7 @@ typedef struct f_smgr
void (*smgr_create) (SMgrRelation reln, ForkNumber forknum,
bool isRedo);
bool (*smgr_exists) (SMgrRelation reln, ForkNumber forknum);
void (*smgr_unlink) (RelFileNodeBackend rnode, ForkNumber forknum,
void (*smgr_unlink) (RelFileLocatorBackend rlocator, ForkNumber forknum,
bool isRedo);
void (*smgr_extend) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
@@ -143,9 +143,9 @@ smgrshutdown(int code, Datum arg)
* This does not attempt to actually open the underlying file.
*/
SMgrRelation
smgropen(RelFileNode rnode, BackendId backend)
smgropen(RelFileLocator rlocator, BackendId backend)
{
RelFileNodeBackend brnode;
RelFileLocatorBackend brlocator;
SMgrRelation reln;
bool found;
@@ -154,7 +154,7 @@ smgropen(RelFileNode rnode, BackendId backend)
/* First time through: initialize the hash table */
HASHCTL ctl;
ctl.keysize = sizeof(RelFileNodeBackend);
ctl.keysize = sizeof(RelFileLocatorBackend);
ctl.entrysize = sizeof(SMgrRelationData);
SMgrRelationHash = hash_create("smgr relation table", 400,
&ctl, HASH_ELEM | HASH_BLOBS);
@@ -162,10 +162,10 @@ smgropen(RelFileNode rnode, BackendId backend)
}
/* Look up or create an entry */
brnode.node = rnode;
brnode.backend = backend;
brlocator.locator = rlocator;
brlocator.backend = backend;
reln = (SMgrRelation) hash_search(SMgrRelationHash,
(void *) &brnode,
(void *) &brlocator,
HASH_ENTER, &found);
/* Initialize it if not present before */
@@ -267,7 +267,7 @@ smgrclose(SMgrRelation reln)
dlist_delete(&reln->node);
if (hash_search(SMgrRelationHash,
(void *) &(reln->smgr_rnode),
(void *) &(reln->smgr_rlocator),
HASH_REMOVE, NULL) == NULL)
elog(ERROR, "SMgrRelation hashtable corrupted");
@@ -335,15 +335,15 @@ smgrcloseall(void)
}
/*
* smgrclosenode() -- Close SMgrRelation object for given RelFileNode,
* smgrcloserellocator() -- Close SMgrRelation object for given RelFileLocator,
* if one exists.
*
* This has the same effects as smgrclose(smgropen(rnode)), but it avoids
* This has the same effects as smgrclose(smgropen(rlocator)), but it avoids
* uselessly creating a hashtable entry only to drop it again when no
* such entry exists already.
*/
void
smgrclosenode(RelFileNodeBackend rnode)
smgrcloserellocator(RelFileLocatorBackend rlocator)
{
SMgrRelation reln;
@@ -352,7 +352,7 @@ smgrclosenode(RelFileNodeBackend rnode)
return;
reln = (SMgrRelation) hash_search(SMgrRelationHash,
(void *) &rnode,
(void *) &rlocator,
HASH_FIND, NULL);
if (reln != NULL)
smgrclose(reln);
@@ -420,7 +420,7 @@ void
smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
{
int i = 0;
RelFileNodeBackend *rnodes;
RelFileLocatorBackend *rlocators;
ForkNumber forknum;
if (nrels == 0)
@@ -430,19 +430,19 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
* Get rid of any remaining buffers for the relations. bufmgr will just
* drop them without bothering to write the contents.
*/
DropRelFileNodesAllBuffers(rels, nrels);
DropRelFileLocatorsAllBuffers(rels, nrels);
/*
* create an array which contains all relations to be dropped, and close
* each relation's forks at the smgr level while at it
*/
rnodes = palloc(sizeof(RelFileNodeBackend) * nrels);
rlocators = palloc(sizeof(RelFileLocatorBackend) * nrels);
for (i = 0; i < nrels; i++)
{
RelFileNodeBackend rnode = rels[i]->smgr_rnode;
RelFileLocatorBackend rlocator = rels[i]->smgr_rlocator;
int which = rels[i]->smgr_which;
rnodes[i] = rnode;
rlocators[i] = rlocator;
/* Close the forks at smgr level */
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
@@ -458,7 +458,7 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
* closed our own smgr rel.
*/
for (i = 0; i < nrels; i++)
CacheInvalidateSmgr(rnodes[i]);
CacheInvalidateSmgr(rlocators[i]);
/*
* Delete the physical file(s).
@@ -473,10 +473,10 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
int which = rels[i]->smgr_which;
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
smgrsw[which].smgr_unlink(rnodes[i], forknum, isRedo);
smgrsw[which].smgr_unlink(rlocators[i], forknum, isRedo);
}
pfree(rnodes);
pfree(rlocators);
}
@@ -631,7 +631,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
* just drop them without bothering to write the contents.
*/
DropRelFileNodeBuffers(reln, forknum, nforks, nblocks);
DropRelFileLocatorBuffers(reln, forknum, nforks, nblocks);
/*
* Send a shared-inval message to force other backends to close any smgr
@@ -643,7 +643,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
* is a performance-critical path.) As in the unlink code, we want to be
* sure the message is sent before we start changing things on-disk.
*/
CacheInvalidateSmgr(reln->smgr_rnode);
CacheInvalidateSmgr(reln->smgr_rlocator);
/* Do the truncation */
for (i = 0; i < nforks; i++)