1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-15 03:41:20 +03:00

Scan the buffer pool just once, not once per fork, during relation drop.

This provides a speedup of about 4X when NBuffers is large enough.
There is also a useful reduction in sinval traffic, since we
only do CacheInvalidateSmgr() once not once per fork.

Simon Riggs, reviewed and somewhat revised by Tom Lane
This commit is contained in:
Tom Lane
2012-06-07 17:42:27 -04:00
parent 5baf6da717
commit ece01aae47
9 changed files with 150 additions and 23 deletions

View File

@@ -2020,7 +2020,7 @@ BufferIsPermanent(Buffer buffer)
* DropRelFileNodeBuffers
*
* This function removes from the buffer pool all the pages of the
* specified relation that have block numbers >= firstDelBlock.
* specified relation fork that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* Dirty pages are simply dropped, without bothering to write them
* out first. Therefore, this is NOT rollback-able, and so should be
@@ -2089,6 +2089,46 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber forkNum,
}
}
/* ---------------------------------------------------------------------
* DropRelFileNodeAllBuffers
*
* This function removes from the buffer pool all the pages of all
* forks of the specified relation. It's equivalent to calling
* DropRelFileNodeBuffers once per fork with firstDelBlock = 0.
* --------------------------------------------------------------------
*/
void
DropRelFileNodeAllBuffers(RelFileNodeBackend rnode)
{
int i;
/* If it's a local relation, it's localbuf.c's problem. */
if (rnode.backend != InvalidBackendId)
{
if (rnode.backend == MyBackendId)
DropRelFileNodeAllLocalBuffers(rnode.node);
return;
}
for (i = 0; i < NBuffers; i++)
{
volatile BufferDesc *bufHdr = &BufferDescriptors[i];
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
*/
if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
continue;
LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr);
}
}
/* ---------------------------------------------------------------------
* DropDatabaseBuffers
*

View File

@@ -330,6 +330,46 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
}
}
/*
* DropRelFileNodeAllLocalBuffers
* This function removes from the buffer pool all pages of all forks
* of the specified relation.
*
* See DropRelFileNodeAllBuffers in bufmgr.c for more notes.
*/
void
DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
{
int i;
for (i = 0; i < NLocBuffer; i++)
{
BufferDesc *bufHdr = &LocalBufferDescriptors[i];
LocalBufferLookupEnt *hresult;
if ((bufHdr->flags & BM_TAG_VALID) &&
RelFileNodeEquals(bufHdr->tag.rnode, rnode))
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
relpathbackend(bufHdr->tag.rnode, MyBackendId,
bufHdr->tag.forkNum),
LocalRefCount[i]);
/* Remove entry from hashtable */
hresult = (LocalBufferLookupEnt *)
hash_search(LocalBufHash, (void *) &bufHdr->tag,
HASH_REMOVE, NULL);
if (!hresult) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
/* Mark buffer invalid */
CLEAR_BUFFERTAG(bufHdr->tag);
bufHdr->flags = 0;
bufHdr->usage_count = 0;
}
}
}
/*
* InitLocalBuffers -
* init the local buffer cache. Since most queries (esp. multi-user ones)