1
0
mirror of https://github.com/postgres/postgres.git synced 2025-08-27 07:42:10 +03:00

Remove the limit on the number of entries allowed in catcaches, and

remove the infrastructure needed to enforce the limit, ie, the global
LRU list of cache entries.  On small-to-middling databases this wins
because maintaining the LRU list is a waste of time.  On large databases
this wins because it's better to keep more cache entries (we assume
such users can afford to use some more per-backend memory than was
contemplated in the Berkeley-era catcache design).  This provides a
noticeable improvement in the speed of psql \d on a 10000-table
database, though it doesn't make it instantaneous.

While at it, use per-catcache settings for the number of hash buckets
per catcache, rather than the former one-size-fits-all value.  It's a
bit silly to be using the same number of hash buckets for, eg, pg_am
and pg_attribute.  The specific values I used might need some tuning,
but they seem to be in the right ballpark based on CATCACHE_STATS
results from the standard regression tests.
This commit is contained in:
Tom Lane
2006-06-15 02:08:09 +00:00
parent e1e133f264
commit 8b9bc234ad
3 changed files with 181 additions and 206 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.128 2006/03/05 15:58:45 momjian Exp $
* $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.129 2006/06/15 02:08:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,20 +37,6 @@
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
/*
* Constants related to size of the catcache.
*
* NCCBUCKETS must be a power of two and must be less than 64K (because
* SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
* practice it should be a lot less, anyway, to avoid chewing up too much
* space on hash bucket headers.
*
* MAXCCTUPLES could be as small as a few hundred, if per-backend memory
* consumption is at a premium.
*/
#define NCCBUCKETS 256 /* Hash buckets per CatCache */
#define MAXCCTUPLES 5000 /* Maximum # of tuples in all caches */
/*
* Given a hash value and the size of the hash table, find the bucket
* in which the hash value belongs. Since the hash table must contain
@@ -89,7 +75,7 @@ static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
HeapTuple tuple);
#ifdef CATCACHE_STATS
static void CatCachePrintStats(void);
static void CatCachePrintStats(int code, Datum arg);
#endif
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
@@ -97,7 +83,6 @@ static void CatalogCacheInitializeCache(CatCache *cache);
static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
uint32 hashValue, Index hashIndex,
bool negative);
static void CatalogCacheCleanup(CatCTup *savect);
static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
@@ -281,7 +266,7 @@ CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
#ifdef CATCACHE_STATS
static void
CatCachePrintStats(void)
CatCachePrintStats(int code, Datum arg)
{
CatCache *cache;
long cc_searches = 0;
@@ -289,18 +274,14 @@ CatCachePrintStats(void)
long cc_neg_hits = 0;
long cc_newloads = 0;
long cc_invals = 0;
long cc_discards = 0;
long cc_lsearches = 0;
long cc_lhits = 0;
elog(DEBUG2, "catcache stats dump: %d/%d tuples in catcaches",
CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
{
if (cache->cc_ntup == 0 && cache->cc_searches == 0)
continue; /* don't print unused caches */
elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
cache->cc_relname,
cache->cc_indexoid,
cache->cc_ntup,
@@ -312,7 +293,6 @@ CatCachePrintStats(void)
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
cache->cc_invals,
cache->cc_discards,
cache->cc_lsearches,
cache->cc_lhits);
cc_searches += cache->cc_searches;
@@ -320,11 +300,10 @@ CatCachePrintStats(void)
cc_neg_hits += cache->cc_neg_hits;
cc_newloads += cache->cc_newloads;
cc_invals += cache->cc_invals;
cc_discards += cache->cc_discards;
cc_lsearches += cache->cc_lsearches;
cc_lhits += cache->cc_lhits;
}
elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
CacheHdr->ch_ntup,
cc_searches,
cc_hits,
@@ -334,7 +313,6 @@ CatCachePrintStats(void)
cc_searches - cc_hits - cc_neg_hits - cc_newloads,
cc_searches - cc_hits - cc_neg_hits,
cc_invals,
cc_discards,
cc_lsearches,
cc_lhits);
}
@@ -367,8 +345,7 @@ CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
return; /* nothing left to do */
}
/* delink from linked lists */
DLRemove(&ct->lrulist_elem);
/* delink from linked list */
DLRemove(&ct->cache_elem);
/* free associated tuple data */
@@ -568,11 +545,13 @@ AtEOXact_CatCache(bool isCommit)
if (assert_enabled)
{
CatCache *ccp;
Dlelem *elt;
/* Check CatCLists */
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
Dlelem *elt;
int i;
/* Check CatCLists */
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
{
CatCList *cl = (CatCList *) DLE_VAL(elt);
@@ -581,16 +560,21 @@ AtEOXact_CatCache(bool isCommit)
Assert(cl->refcount == 0);
Assert(!cl->dead);
}
}
/* Check individual tuples */
for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = DLGetSucc(elt))
{
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
/* Check individual tuples */
for (i = 0; i < ccp->cc_nbuckets; i++)
{
for (elt = DLGetHead(&ccp->cc_bucket[i]);
elt;
elt = DLGetSucc(elt))
{
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
Assert(ct->ct_magic == CT_MAGIC);
Assert(ct->refcount == 0);
Assert(!ct->dead);
Assert(ct->ct_magic == CT_MAGIC);
Assert(ct->refcount == 0);
Assert(!ct->dead);
}
}
}
}
#endif
@@ -796,12 +780,27 @@ InitCatCache(int id,
Oid indexoid,
int reloidattr,
int nkeys,
const int *key)
const int *key,
int nbuckets)
{
CatCache *cp;
MemoryContext oldcxt;
int i;
/*
* nbuckets is the number of hash buckets to use in this catcache.
* Currently we just use a hard-wired estimate of an appropriate size
* for each cache; maybe later make them dynamically resizable?
*
* nbuckets must be a power of two. We check this via Assert rather than
* a full runtime check because the values will be coming from constant
* tables.
*
* If you're confused by the power-of-two check, see comments in
* bitmapset.c for an explanation.
*/
Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
/*
* first switch to the cache context so our allocations do not vanish at
* the end of a transaction
@@ -812,17 +811,15 @@ InitCatCache(int id,
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
* if first time through, initialize the cache group header, including
* global LRU list header
* if first time through, initialize the cache group header
*/
if (CacheHdr == NULL)
{
CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
CacheHdr->ch_caches = NULL;
CacheHdr->ch_ntup = 0;
CacheHdr->ch_maxtup = MAXCCTUPLES;
DLInitList(&CacheHdr->ch_lrulist);
#ifdef CATCACHE_STATS
/* set up to dump stats at backend exit */
on_proc_exit(CatCachePrintStats, 0);
#endif
}
@@ -832,7 +829,7 @@ InitCatCache(int id,
*
* Note: we assume zeroing initializes the Dllist headers correctly
*/
cp = (CatCache *) palloc0(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(Dllist));
/*
* initialize the cache's relation information for the relation
@@ -847,7 +844,7 @@ InitCatCache(int id,
cp->cc_tupdesc = (TupleDesc) NULL;
cp->cc_reloidattr = reloidattr;
cp->cc_ntup = 0;
cp->cc_nbuckets = NCCBUCKETS;
cp->cc_nbuckets = nbuckets;
cp->cc_nkeys = nkeys;
for (i = 0; i < nkeys; ++i)
cp->cc_key[i] = key[i];
@@ -1162,13 +1159,11 @@ SearchCatCache(CatCache *cache,
continue;
/*
* we found a match in the cache: move it to the front of the global
* LRU list. We also move it to the front of the list for its
* hashbucket, in order to speed subsequent searches. (The most
* frequently accessed elements in any hashbucket will tend to be near
* the front of the hashbucket's list.)
* We found a match in the cache. Move it to the front of the list
* for its hashbucket, in order to speed subsequent searches. (The
* most frequently accessed elements in any hashbucket will tend to be
* near the front of the hashbucket's list.)
*/
DLMoveToFront(&ct->lrulist_elem);
DLMoveToFront(&ct->cache_elem);
/*
@@ -1414,14 +1409,12 @@ SearchCatCacheList(CatCache *cache,
continue;
/*
* We found a matching list: mark it as touched since the last
* CatalogCacheCleanup() sweep. Also move the list to the front of
* the cache's list-of-lists, to speed subsequent searches. (We do not
* We found a matching list. Move the list to the front of the
* cache's list-of-lists, to speed subsequent searches. (We do not
* move the members to the fronts of their hashbucket lists, however,
* since there's no point in that unless they are searched for
* individually.)
*/
cl->touched = true;
DLMoveToFront(&cl->cache_elem);
/* Bump the list's refcount and return it */
@@ -1504,10 +1497,7 @@ SearchCatCacheList(CatCache *cache,
if (ct->c_list)
continue;
/* Found a match, so move it to front */
DLMoveToFront(&ct->lrulist_elem);
break;
break; /* A-OK */
}
if (elt == NULL)
@@ -1577,7 +1567,6 @@ SearchCatCacheList(CatCache *cache,
cl->refcount = 0; /* for the moment */
cl->dead = false;
cl->ordered = ordered;
cl->touched = false; /* we already moved members to front */
cl->nkeys = nkeys;
cl->hash_value = lHashValue;
cl->n_members = nmembers;
@@ -1654,11 +1643,10 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
/*
* Finish initializing the CatCTup header, and add it to the cache's
* linked lists and counts.
* linked list and counts.
*/
ct->ct_magic = CT_MAGIC;
ct->my_cache = cache;
DLInitElem(&ct->lrulist_elem, (void *) ct);
DLInitElem(&ct->cache_elem, (void *) ct);
ct->c_list = NULL;
ct->refcount = 0; /* for the moment */
@@ -1666,97 +1654,14 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
ct->negative = negative;
ct->hash_value = hashValue;
DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
cache->cc_ntup++;
CacheHdr->ch_ntup++;
/*
* If we've exceeded the desired size of the caches, try to throw away the
* least recently used entry(s). NB: be careful not to throw away the
* newly-built entry...
*/
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
CatalogCacheCleanup(ct);
return ct;
}
/*
* CatalogCacheCleanup
* Try to reduce the size of the catcaches when they get too big
*
* savect can be NULL, or a specific CatCTup not to remove even if it
* has zero refcount.
*/
static void
CatalogCacheCleanup(CatCTup *savect)
{
int tup_target;
CatCache *ccp;
Dlelem *elt,
*prevelt;
/*
* Each time we have to do this, try to cut the cache size down to about
* 90% of the maximum.
*/
tup_target = (CacheHdr->ch_maxtup * 9) / 10;
/*
* Our strategy for managing CatCLists is that, each time we have to throw
* away some cache entries, we first move-to-front all the members of
* CatCLists that have been touched since the last cleanup sweep. Then we
* do strict LRU elimination by individual tuples, zapping a list if any
* of its members gets zapped. Before PostgreSQL 8.1, we moved members to
* front each time their owning list was touched, which was arguably more
* fair in balancing list members against standalone tuples --- but the
* overhead for large lists was horrendous. This scheme is more heavily
* biased towards preserving lists, but that is not necessarily bad
* either.
*/
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
{
CatCList *cl = (CatCList *) DLE_VAL(elt);
Assert(cl->cl_magic == CL_MAGIC);
if (cl->touched && !cl->dead)
{
int i;
for (i = 0; i < cl->n_members; i++)
DLMoveToFront(&cl->members[i]->lrulist_elem);
}
cl->touched = false;
}
}
/* Now get rid of unreferenced tuples in reverse global LRU order */
for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
{
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
prevelt = DLGetPred(elt);
if (ct->refcount == 0 &&
(ct->c_list == NULL || ct->c_list->refcount == 0) &&
ct != savect)
{
#ifdef CATCACHE_STATS
ct->my_cache->cc_discards++;
#endif
CatCacheRemoveCTup(ct->my_cache, ct);
/* Quit when we've removed enough tuples */
if (CacheHdr->ch_ntup <= tup_target)
break;
}
}
}
/*
* build_dummy_tuple
* Generate a palloc'd HeapTuple that contains the specified key