1
0
mirror of https://github.com/postgres/postgres.git synced 2026-01-27 21:43:08 +03:00
Files
postgres/src/include/utils/catcache.h
Tom Lane db01c90b2f Silence Valgrind leakage complaints in more-or-less-hackish ways.
These changes don't actually fix any leaks.  They just make sure that
Valgrind will find pointers to data structures that remain allocated
at process exit, and thus not falsely complain about leaks.  In
particular, we are trying to avoid situations where there is no
pointer to the beginning of an allocated block (except possibly
within the block itself, which Valgrind won't count).

* Because dynahash.c never frees hashtable storage except by deleting
the whole hashtable context, it doesn't bother to track the individual
blocks of elements allocated by element_alloc().  This results in
"possibly lost" complaints from Valgrind except when the first element
of each block is actively in use.  (Otherwise it'll be on a freelist,
but very likely only reachable via "interior pointers" within element
blocks, which doesn't satisfy Valgrind.)

To fix, if we're building with USE_VALGRIND, expend an extra pointer's
worth of space in each element block so that we can chain them all
together from the HTAB header.  Skip this in shared hashtables though:
Valgrind doesn't track those, and we'd need additional locking to make
it safe to manipulate a shared chain.

While here, update a comment obsoleted by 9c911ec06.

* Put the dlist_node fields of catctup and catclist structs first.
This ensures that the dlist pointers point to the starts of these
palloc blocks, and thus that Valgrind won't consider them
"possibly lost".

* The postmaster's PMChild structs and the autovac launcher's
avl_dbase structs also have the dlist_node-is-not-first problem,
but putting it first still wouldn't silence the warning because we
bulk-allocate those structs in an array, so that Valgrind sees a
single allocation.  Commonly the first array element will be pointed
to only from some later element, so that the reference would be an
interior pointer even if it pointed to the array start.  (This is the
same issue as for dynahash elements.)  Since these are pretty simple
data structures, I don't feel too bad about faking out Valgrind by
just keeping a static pointer to the array start.

(This is all quite hacky, and it's not hard to imagine usages where
we'd need some other idea in order to have reasonable leak tracking of
structures that are only accessible via dlist_node lists.  But these
changes seem to be enough to silence this class of leakage complaints
for the moment.)

* Free a couple of data structures manually near the end of an
autovacuum worker's run when USE_VALGRIND, and ensure that the final
vac_update_datfrozenxid() call is done in a non-permanent context.
This doesn't have any real effect on the process's total memory
consumption, since we're going to exit as soon as that last
transaction is done.  But it does pacify Valgrind.

* Valgrind complains about the postmaster's socket-files and
lock-files lists being leaked, which we can silence by just
not nulling out the static pointers to them.

* Valgrind seems not to consider the global "environ" variable as
a valid root pointer; so when we allocate a new environment array,
it claims that data is leaked.  To fix that, keep our own
statically-allocated copy of the pointer, similarly to the previous
item.

Author: Tom Lane <tgl@sss.pgh.pa.us>
Reviewed-by: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/285483.1746756246@sss.pgh.pa.us
2025-08-02 21:59:46 -04:00

238 lines
8.7 KiB
C

/*-------------------------------------------------------------------------
*
* catcache.h
* Low-level catalog cache definitions.
*
* NOTE: every catalog cache must have a corresponding unique index on
* the system table that it caches --- ie, the index must match the keys
* used to do lookups in this cache. All cache fetches are done with
* indexscans (under normal conditions). The index should be unique to
* guarantee that there can only be one matching row for a key combination.
*
*
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/catcache.h
*
*-------------------------------------------------------------------------
*/
#ifndef CATCACHE_H
#define CATCACHE_H
#include "access/htup.h"
#include "access/skey.h"
#include "lib/ilist.h"
#include "utils/relcache.h"
/*
* struct catctup: individual tuple in the cache.
* struct catclist: list of tuples matching a partial key.
* struct catcache: information for managing a cache.
* struct catcacheheader: information for managing all the caches.
*/
#define CATCACHE_MAXKEYS 4
/* function computing a datum's hash */
typedef uint32 (*CCHashFN) (Datum datum);
/* function computing equality of two datums */
typedef bool (*CCFastEqualFN) (Datum a, Datum b);
typedef struct catcache
{
int id; /* cache identifier --- see syscache.h */
int cc_nbuckets; /* # of hash buckets in this cache */
TupleDesc cc_tupdesc; /* tuple descriptor (copied from reldesc) */
dlist_head *cc_bucket; /* hash buckets */
CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]; /* hash function for each key */
CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]; /* fast equal function for
* each key */
int cc_keyno[CATCACHE_MAXKEYS]; /* AttrNumber of each key */
int cc_nkeys; /* # of keys (1..CATCACHE_MAXKEYS) */
int cc_ntup; /* # of tuples currently in this cache */
int cc_nlist; /* # of CatCLists currently in this cache */
int cc_nlbuckets; /* # of CatCList hash buckets in this cache */
dlist_head *cc_lbucket; /* hash buckets for CatCLists */
const char *cc_relname; /* name of relation the tuples come from */
Oid cc_reloid; /* OID of relation the tuples come from */
Oid cc_indexoid; /* OID of index matching cache keys */
bool cc_relisshared; /* is relation shared across databases? */
slist_node cc_next; /* list link */
ScanKeyData cc_skey[CATCACHE_MAXKEYS]; /* precomputed key info for heap
* scans */
/*
* Keep these at the end, so that compiling catcache.c with CATCACHE_STATS
* doesn't break ABI for other modules
*/
#ifdef CATCACHE_STATS
long cc_searches; /* total # searches against this cache */
long cc_hits; /* # of matches against existing entry */
long cc_neg_hits; /* # of matches against negative entry */
long cc_newloads; /* # of successful loads of new entry */
/*
* cc_searches - (cc_hits + cc_neg_hits + cc_newloads) is number of failed
* searches, each of which will result in loading a negative entry
*/
long cc_invals; /* # of entries invalidated from cache */
long cc_lsearches; /* total # list-searches */
long cc_lhits; /* # of matches against existing lists */
#endif
} CatCache;
typedef struct catctup
{
/*
* Each tuple in a cache is a member of a dlist that stores the elements
* of its hash bucket. We keep each dlist in LRU order to speed repeated
* lookups. Keep the dlist_node field first so that Valgrind understands
* the struct is reachable.
*/
dlist_node cache_elem; /* list member of per-bucket list */
int ct_magic; /* for identifying CatCTup entries */
#define CT_MAGIC 0x57261502
uint32 hash_value; /* hash value for this tuple's keys */
/*
* Lookup keys for the entry. By-reference datums point into the tuple for
* positive cache entries, and are separately allocated for negative ones.
*/
Datum keys[CATCACHE_MAXKEYS];
/*
* A tuple marked "dead" must not be returned by subsequent searches.
* However, it won't be physically deleted from the cache until its
* refcount goes to zero. (If it's a member of a CatCList, the list's
* refcount must go to zero, too; also, remember to mark the list dead at
* the same time the tuple is marked.)
*
* A negative cache entry is an assertion that there is no tuple matching
* a particular key. This is just as useful as a normal entry so far as
* avoiding catalog searches is concerned. Management of positive and
* negative entries is identical.
*/
int refcount; /* number of active references */
bool dead; /* dead but not yet removed? */
bool negative; /* negative cache entry? */
HeapTupleData tuple; /* tuple management header */
/*
* The tuple may also be a member of at most one CatCList. (If a single
* catcache is list-searched with varying numbers of keys, we may have to
* make multiple entries for the same tuple because of this restriction.
* Currently, that's not expected to be common, so we accept the potential
* inefficiency.)
*/
struct catclist *c_list; /* containing CatCList, or NULL if none */
CatCache *my_cache; /* link to owning catcache */
/* properly aligned tuple data follows, unless a negative entry */
} CatCTup;
/*
* A CatCList describes the result of a partial search, ie, a search using
* only the first K key columns of an N-key cache. We store the keys used
* into the keys attribute to represent the stored key set. The CatCList
* object contains links to cache entries for all the table rows satisfying
* the partial key. (Note: none of these will be negative cache entries.)
*
* A CatCList is only a member of a per-cache list; we do not currently
* divide them into hash buckets.
*
* A list marked "dead" must not be returned by subsequent searches.
* However, it won't be physically deleted from the cache until its
* refcount goes to zero. (A list should be marked dead if any of its
* member entries are dead.)
*
* If "ordered" is true then the member tuples appear in the order of the
* cache's underlying index. This will be true in normal operation, but
* might not be true during bootstrap or recovery operations. (namespace.c
* is able to save some cycles when it is true.)
*/
typedef struct catclist
{
/*
* Keep the dlist_node field first so that Valgrind understands the struct
* is reachable.
*/
dlist_node cache_elem; /* list member of per-catcache list */
int cl_magic; /* for identifying CatCList entries */
#define CL_MAGIC 0x52765103
uint32 hash_value; /* hash value for lookup keys */
/*
* Lookup keys for the entry, with the first nkeys elements being valid.
* All by-reference are separately allocated.
*/
Datum keys[CATCACHE_MAXKEYS];
int refcount; /* number of active references */
bool dead; /* dead but not yet removed? */
bool ordered; /* members listed in index order? */
short nkeys; /* number of lookup keys specified */
int n_members; /* number of member tuples */
CatCache *my_cache; /* link to owning catcache */
CatCTup *members[FLEXIBLE_ARRAY_MEMBER]; /* members */
} CatCList;
typedef struct catcacheheader
{
slist_head ch_caches; /* head of list of CatCache structs */
int ch_ntup; /* # of tuples in all caches */
} CatCacheHeader;
/* this extern duplicates utils/memutils.h... */
extern PGDLLIMPORT MemoryContext CacheMemoryContext;
extern void CreateCacheMemoryContext(void);
extern CatCache *InitCatCache(int id, Oid reloid, Oid indexoid,
int nkeys, const int *key,
int nbuckets);
extern void InitCatCachePhase2(CatCache *cache, bool touch_index);
extern HeapTuple SearchCatCache(CatCache *cache,
Datum v1, Datum v2, Datum v3, Datum v4);
extern HeapTuple SearchCatCache1(CatCache *cache,
Datum v1);
extern HeapTuple SearchCatCache2(CatCache *cache,
Datum v1, Datum v2);
extern HeapTuple SearchCatCache3(CatCache *cache,
Datum v1, Datum v2, Datum v3);
extern HeapTuple SearchCatCache4(CatCache *cache,
Datum v1, Datum v2, Datum v3, Datum v4);
extern void ReleaseCatCache(HeapTuple tuple);
extern uint32 GetCatCacheHashValue(CatCache *cache,
Datum v1, Datum v2,
Datum v3, Datum v4);
extern CatCList *SearchCatCacheList(CatCache *cache, int nkeys,
Datum v1, Datum v2,
Datum v3);
extern void ReleaseCatCacheList(CatCList *list);
extern void ResetCatalogCaches(void);
extern void ResetCatalogCachesExt(bool debug_discard);
extern void CatalogCacheFlushCatalog(Oid catId);
extern void CatCacheInvalidate(CatCache *cache, uint32 hashValue);
extern void PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple,
void (*function) (int, uint32, Oid, void *),
void *context);
#endif /* CATCACHE_H */