mirror of
https://github.com/postgres/postgres.git
synced 2025-11-13 16:22:44 +03:00
Further cleanup of dynahash.c API, in pursuit of portability and
readability. Bizarre '(long *) TRUE' return convention is gone, in favor of just raising an error internally in dynahash.c when we detect hashtable corruption. HashTableWalk is gone, in favor of using hash_seq_search directly, since it had no hope of working with non-LONGALIGNable datatypes. Simplify some other code that was made undesirably grotty by promixity to HashTableWalk.
This commit is contained in:
@@ -18,7 +18,7 @@
|
||||
* Portions Copyright (c) 2000-2001, PostgreSQL Global Development Group
|
||||
* Copyright 1999 Jan Wieck
|
||||
*
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.26 2001/10/01 05:36:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.27 2001/10/05 17:28:12 tgl Exp $
|
||||
*
|
||||
* ----------
|
||||
*/
|
||||
@@ -2990,14 +2990,14 @@ ri_InitHashTables(void)
|
||||
ctl.keysize = sizeof(RI_QueryKey);
|
||||
ctl.entrysize = sizeof(RI_QueryHashEntry);
|
||||
ctl.hash = tag_hash;
|
||||
ri_query_cache = hash_create(RI_INIT_QUERYHASHSIZE, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
ri_query_cache = hash_create("RI query cache", RI_INIT_QUERYHASHSIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(RI_OpreqHashEntry);
|
||||
ctl.hash = tag_hash;
|
||||
ri_opreq_cache = hash_create(RI_INIT_OPREQHASHSIZE, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
ri_opreq_cache = hash_create("RI OpReq cache", RI_INIT_OPREQHASHSIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
}
|
||||
|
||||
|
||||
@@ -3012,7 +3012,6 @@ static void *
|
||||
ri_FetchPreparedPlan(RI_QueryKey *key)
|
||||
{
|
||||
RI_QueryHashEntry *entry;
|
||||
bool found;
|
||||
|
||||
/*
|
||||
* On the first call initialize the hashtable
|
||||
@@ -3025,10 +3024,8 @@ ri_FetchPreparedPlan(RI_QueryKey *key)
|
||||
*/
|
||||
entry = (RI_QueryHashEntry *) hash_search(ri_query_cache,
|
||||
(void *) key,
|
||||
HASH_FIND, &found);
|
||||
HASH_FIND, NULL);
|
||||
if (entry == NULL)
|
||||
elog(FATAL, "error in RI plan cache");
|
||||
if (!found)
|
||||
return NULL;
|
||||
return entry->plan;
|
||||
}
|
||||
@@ -3059,7 +3056,7 @@ ri_HashPreparedPlan(RI_QueryKey *key, void *plan)
|
||||
(void *) key,
|
||||
HASH_ENTER, &found);
|
||||
if (entry == NULL)
|
||||
elog(FATAL, "can't insert into RI plan cache");
|
||||
elog(ERROR, "out of memory for RI plan cache");
|
||||
entry->plan = plan;
|
||||
}
|
||||
|
||||
@@ -3235,16 +3232,14 @@ ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue)
|
||||
*/
|
||||
entry = (RI_OpreqHashEntry *) hash_search(ri_opreq_cache,
|
||||
(void *) &typeid,
|
||||
HASH_FIND, &found);
|
||||
if (entry == NULL)
|
||||
elog(FATAL, "error in RI operator cache");
|
||||
HASH_FIND, NULL);
|
||||
|
||||
/*
|
||||
* If not found, lookup the OPERNAME system cache for it to get the
|
||||
* func OID, then do the function manager lookup, and remember that
|
||||
* info.
|
||||
*/
|
||||
if (!found)
|
||||
if (!entry)
|
||||
{
|
||||
HeapTuple opr_tup;
|
||||
Oid opr_proc;
|
||||
@@ -3278,7 +3273,7 @@ ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue)
|
||||
(void *) &typeid,
|
||||
HASH_ENTER, &found);
|
||||
if (entry == NULL)
|
||||
elog(FATAL, "can't insert into RI operator cache");
|
||||
elog(ERROR, "out of memory for RI operator cache");
|
||||
|
||||
entry->typeid = typeid;
|
||||
memcpy(&(entry->oprfmgrinfo), &finfo, sizeof(FmgrInfo));
|
||||
|
||||
161
src/backend/utils/cache/relcache.c
vendored
161
src/backend/utils/cache/relcache.c
vendored
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.144 2001/10/01 05:36:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.145 2001/10/05 17:28:12 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -48,12 +48,12 @@
|
||||
#include "catalog/pg_rewrite.h"
|
||||
#include "catalog/pg_type.h"
|
||||
#include "commands/trigger.h"
|
||||
#include "lib/hasht.h"
|
||||
#include "miscadmin.h"
|
||||
#include "storage/smgr.h"
|
||||
#include "utils/builtins.h"
|
||||
#include "utils/catcache.h"
|
||||
#include "utils/fmgroids.h"
|
||||
#include "utils/hsearch.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/relcache.h"
|
||||
#include "utils/temprel.h"
|
||||
@@ -144,38 +144,33 @@ do { \
|
||||
HASH_ENTER, \
|
||||
&found); \
|
||||
if (namehentry == NULL) \
|
||||
elog(FATAL, "can't insert into relation descriptor cache"); \
|
||||
if (found && !IsBootstrapProcessingMode()) \
|
||||
/* used to give notice -- now just keep quiet */ ; \
|
||||
elog(ERROR, "out of memory for relation descriptor cache"); \
|
||||
/* used to give notice if found -- now just keep quiet */ ; \
|
||||
namehentry->reldesc = RELATION; \
|
||||
idhentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(void *) &(RELATION->rd_id), \
|
||||
HASH_ENTER, \
|
||||
&found); \
|
||||
if (idhentry == NULL) \
|
||||
elog(FATAL, "can't insert into relation descriptor cache"); \
|
||||
if (found && !IsBootstrapProcessingMode()) \
|
||||
/* used to give notice -- now just keep quiet */ ; \
|
||||
elog(ERROR, "out of memory for relation descriptor cache"); \
|
||||
/* used to give notice if found -- now just keep quiet */ ; \
|
||||
idhentry->reldesc = RELATION; \
|
||||
nodentry = (RelNodeCacheEnt*)hash_search(RelationNodeCache, \
|
||||
(void *) &(RELATION->rd_node), \
|
||||
HASH_ENTER, \
|
||||
&found); \
|
||||
if (nodentry == NULL) \
|
||||
elog(FATAL, "can't insert into relation descriptor cache"); \
|
||||
if (found && !IsBootstrapProcessingMode()) \
|
||||
/* used to give notice -- now just keep quiet */ ; \
|
||||
elog(ERROR, "out of memory for relation descriptor cache"); \
|
||||
/* used to give notice if found -- now just keep quiet */ ; \
|
||||
nodentry->reldesc = RELATION; \
|
||||
} while(0)
|
||||
|
||||
#define RelationNameCacheLookup(NAME, RELATION) \
|
||||
do { \
|
||||
RelNameCacheEnt *hentry; bool found; \
|
||||
RelNameCacheEnt *hentry; \
|
||||
hentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
|
||||
(void *) (NAME),HASH_FIND,&found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in CACHE"); \
|
||||
if (found) \
|
||||
(void *) (NAME), HASH_FIND,NULL); \
|
||||
if (hentry) \
|
||||
RELATION = hentry->reldesc; \
|
||||
else \
|
||||
RELATION = NULL; \
|
||||
@@ -184,12 +179,9 @@ do { \
|
||||
#define RelationIdCacheLookup(ID, RELATION) \
|
||||
do { \
|
||||
RelIdCacheEnt *hentry; \
|
||||
bool found; \
|
||||
hentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(void *)&(ID),HASH_FIND, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in CACHE"); \
|
||||
if (found) \
|
||||
(void *)&(ID), HASH_FIND,NULL); \
|
||||
if (hentry) \
|
||||
RELATION = hentry->reldesc; \
|
||||
else \
|
||||
RELATION = NULL; \
|
||||
@@ -198,12 +190,9 @@ do { \
|
||||
#define RelationNodeCacheLookup(NODE, RELATION) \
|
||||
do { \
|
||||
RelNodeCacheEnt *hentry; \
|
||||
bool found; \
|
||||
hentry = (RelNodeCacheEnt*)hash_search(RelationNodeCache, \
|
||||
(void *)&(NODE),HASH_FIND, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in CACHE"); \
|
||||
if (found) \
|
||||
(void *)&(NODE), HASH_FIND,NULL); \
|
||||
if (hentry) \
|
||||
RELATION = hentry->reldesc; \
|
||||
else \
|
||||
RELATION = NULL; \
|
||||
@@ -212,29 +201,22 @@ do { \
|
||||
#define RelationCacheDelete(RELATION) \
|
||||
do { \
|
||||
RelNameCacheEnt *namehentry; RelIdCacheEnt *idhentry; \
|
||||
char *relname; RelNodeCacheEnt *nodentry; bool found; \
|
||||
char *relname; RelNodeCacheEnt *nodentry; \
|
||||
relname = RelationGetPhysicalRelationName(RELATION); \
|
||||
namehentry = (RelNameCacheEnt*)hash_search(RelationNameCache, \
|
||||
relname, \
|
||||
HASH_REMOVE, \
|
||||
&found); \
|
||||
HASH_REMOVE, NULL); \
|
||||
if (namehentry == NULL) \
|
||||
elog(FATAL, "can't delete from relation descriptor cache"); \
|
||||
if (!found) \
|
||||
elog(NOTICE, "trying to delete a reldesc that does not exist."); \
|
||||
idhentry = (RelIdCacheEnt*)hash_search(RelationIdCache, \
|
||||
(void *)&(RELATION->rd_id), \
|
||||
HASH_REMOVE, &found); \
|
||||
HASH_REMOVE, NULL); \
|
||||
if (idhentry == NULL) \
|
||||
elog(FATAL, "can't delete from relation descriptor cache"); \
|
||||
if (!found) \
|
||||
elog(NOTICE, "trying to delete a reldesc that does not exist."); \
|
||||
nodentry = (RelNodeCacheEnt*)hash_search(RelationNodeCache, \
|
||||
(void *)&(RELATION->rd_node), \
|
||||
HASH_REMOVE, &found); \
|
||||
HASH_REMOVE, NULL); \
|
||||
if (nodentry == NULL) \
|
||||
elog(FATAL, "can't delete from relation descriptor cache"); \
|
||||
if (!found) \
|
||||
elog(NOTICE, "trying to delete a reldesc that does not exist."); \
|
||||
} while(0)
|
||||
|
||||
@@ -248,8 +230,6 @@ static void RelationReloadClassinfo(Relation relation);
|
||||
#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
|
||||
static void RelationFlushRelation(Relation relation);
|
||||
static Relation RelationNameCacheGetRelation(const char *relationName);
|
||||
static void RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp);
|
||||
static void RelationCacheAbortWalker(Relation *relationPtr, Datum dummy);
|
||||
static void init_irels(void);
|
||||
static void write_irels(void);
|
||||
|
||||
@@ -1842,58 +1822,56 @@ RelationFlushIndexes(Relation *r,
|
||||
*
|
||||
* We do this in two phases: the first pass deletes deletable items, and
|
||||
* the second one rebuilds the rebuildable items. This is essential for
|
||||
* safety, because HashTableWalk only copes with concurrent deletion of
|
||||
* safety, because hash_seq_search only copes with concurrent deletion of
|
||||
* the element it is currently visiting. If a second SI overflow were to
|
||||
* occur while we are walking the table, resulting in recursive entry to
|
||||
* this routine, we could crash because the inner invocation blows away
|
||||
* the entry next to be visited by the outer scan. But this way is OK,
|
||||
* because (a) during the first pass we won't process any more SI messages,
|
||||
* so HashTableWalk will complete safely; (b) during the second pass we
|
||||
* so hash_seq_search will complete safely; (b) during the second pass we
|
||||
* only hold onto pointers to nondeletable entries.
|
||||
*/
|
||||
void
|
||||
RelationCacheInvalidate(void)
|
||||
{
|
||||
HASH_SEQ_STATUS status;
|
||||
RelNameCacheEnt *namehentry;
|
||||
Relation relation;
|
||||
List *rebuildList = NIL;
|
||||
List *l;
|
||||
|
||||
/* Phase 1 */
|
||||
HashTableWalk(RelationNameCache,
|
||||
(HashtFunc) RelationCacheInvalidateWalker,
|
||||
PointerGetDatum(&rebuildList));
|
||||
hash_seq_init(&status, RelationNameCache);
|
||||
|
||||
while ((namehentry = (RelNameCacheEnt *) hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
relation = namehentry->reldesc;
|
||||
|
||||
/* Ignore xact-local relations, since they are never SI targets */
|
||||
if (relation->rd_myxactonly)
|
||||
continue;
|
||||
|
||||
if (RelationHasReferenceCountZero(relation))
|
||||
{
|
||||
/* Delete this entry immediately */
|
||||
RelationClearRelation(relation, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Add entry to list of stuff to rebuild in second pass */
|
||||
rebuildList = lcons(relation, rebuildList);
|
||||
}
|
||||
}
|
||||
|
||||
/* Phase 2: rebuild the items found to need rebuild in phase 1 */
|
||||
foreach(l, rebuildList)
|
||||
{
|
||||
Relation relation = (Relation) lfirst(l);
|
||||
|
||||
relation = (Relation) lfirst(l);
|
||||
RelationClearRelation(relation, true);
|
||||
}
|
||||
freeList(rebuildList);
|
||||
}
|
||||
|
||||
static void
|
||||
RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp)
|
||||
{
|
||||
Relation relation = *relationPtr;
|
||||
List **rebuildList = (List **) DatumGetPointer(listp);
|
||||
|
||||
/* We can ignore xact-local relations, since they are never SI targets */
|
||||
if (relation->rd_myxactonly)
|
||||
return;
|
||||
|
||||
if (RelationHasReferenceCountZero(relation))
|
||||
{
|
||||
/* Delete this entry immediately */
|
||||
RelationClearRelation(relation, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Add entry to list of stuff to rebuild in second pass */
|
||||
*rebuildList = lcons(relation, *rebuildList);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* RelationCacheAbort
|
||||
*
|
||||
@@ -1910,20 +1888,20 @@ RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp)
|
||||
void
|
||||
RelationCacheAbort(void)
|
||||
{
|
||||
HashTableWalk(RelationNameCache,
|
||||
(HashtFunc) RelationCacheAbortWalker,
|
||||
0);
|
||||
}
|
||||
HASH_SEQ_STATUS status;
|
||||
RelNameCacheEnt *namehentry;
|
||||
|
||||
static void
|
||||
RelationCacheAbortWalker(Relation *relationPtr, Datum dummy)
|
||||
{
|
||||
Relation relation = *relationPtr;
|
||||
hash_seq_init(&status, RelationNameCache);
|
||||
|
||||
if (relation->rd_isnailed)
|
||||
RelationSetReferenceCount(relation, 1);
|
||||
else
|
||||
RelationSetReferenceCount(relation, 0);
|
||||
while ((namehentry = (RelNameCacheEnt *) hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
Relation relation = namehentry->reldesc;
|
||||
|
||||
if (relation->rd_isnailed)
|
||||
RelationSetReferenceCount(relation, 1);
|
||||
else
|
||||
RelationSetReferenceCount(relation, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2095,19 +2073,20 @@ RelationCacheInitialize(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(NameData);
|
||||
ctl.entrysize = sizeof(RelNameCacheEnt);
|
||||
RelationNameCache = hash_create(INITRELCACHESIZE, &ctl, HASH_ELEM);
|
||||
RelationNameCache = hash_create("Relcache by name", INITRELCACHESIZE,
|
||||
&ctl, HASH_ELEM);
|
||||
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(RelIdCacheEnt);
|
||||
ctl.hash = tag_hash;
|
||||
RelationIdCache = hash_create(INITRELCACHESIZE, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
|
||||
ctl.keysize = sizeof(RelFileNode);
|
||||
ctl.entrysize = sizeof(RelNodeCacheEnt);
|
||||
ctl.hash = tag_hash;
|
||||
RelationNodeCache = hash_create(INITRELCACHESIZE, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
RelationNodeCache = hash_create("Relcache by rnode", INITRELCACHESIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
|
||||
/*
|
||||
* initialize the cache with pre-made relation descriptors for some of
|
||||
@@ -2187,19 +2166,21 @@ CreateDummyCaches(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(NameData);
|
||||
ctl.entrysize = sizeof(RelNameCacheEnt);
|
||||
RelationNameCache = hash_create(INITRELCACHESIZE, &ctl, HASH_ELEM);
|
||||
RelationNameCache = hash_create("Relcache by name", INITRELCACHESIZE,
|
||||
&ctl, HASH_ELEM);
|
||||
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(RelIdCacheEnt);
|
||||
ctl.hash = tag_hash;
|
||||
RelationIdCache = hash_create(INITRELCACHESIZE, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
|
||||
ctl.keysize = sizeof(RelFileNode);
|
||||
ctl.entrysize = sizeof(RelNodeCacheEnt);
|
||||
ctl.hash = tag_hash;
|
||||
RelationNodeCache = hash_create(INITRELCACHESIZE, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
RelationNodeCache = hash_create("Relcache by rnode", INITRELCACHESIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.37 2001/10/01 05:36:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.38 2001/10/05 17:28:13 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -71,6 +71,7 @@ static bool dir_realloc(HTAB *hashp);
|
||||
static bool expand_table(HTAB *hashp);
|
||||
static bool hdefault(HTAB *hashp);
|
||||
static bool init_htab(HTAB *hashp, long nelem);
|
||||
static void hash_corrupted(HTAB *hashp);
|
||||
|
||||
|
||||
/*
|
||||
@@ -100,7 +101,7 @@ static long hash_accesses,
|
||||
/************************** CREATE ROUTINES **********************/
|
||||
|
||||
HTAB *
|
||||
hash_create(long nelem, HASHCTL *info, int flags)
|
||||
hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
|
||||
{
|
||||
HTAB *hashp;
|
||||
HASHHDR *hctl;
|
||||
@@ -125,6 +126,9 @@ hash_create(long nelem, HASHCTL *info, int flags)
|
||||
return NULL;
|
||||
MemSet(hashp, 0, sizeof(HTAB));
|
||||
|
||||
hashp->tabname = (char *) MEM_ALLOC(strlen(tabname) + 1);
|
||||
strcpy(hashp->tabname, tabname);
|
||||
|
||||
if (flags & HASH_FUNCTION)
|
||||
hashp->hash = info->hash;
|
||||
else
|
||||
@@ -140,6 +144,7 @@ hash_create(long nelem, HASHCTL *info, int flags)
|
||||
hashp->dir = info->dir;
|
||||
hashp->alloc = info->alloc;
|
||||
hashp->hcxt = NULL;
|
||||
hashp->isshared = true;
|
||||
|
||||
/* hash table already exists, we're just attaching to it */
|
||||
if (flags & HASH_ATTACH)
|
||||
@@ -152,6 +157,7 @@ hash_create(long nelem, HASHCTL *info, int flags)
|
||||
hashp->dir = NULL;
|
||||
hashp->alloc = MEM_ALLOC;
|
||||
hashp->hcxt = DynaHashCxt;
|
||||
hashp->isshared = false;
|
||||
}
|
||||
|
||||
if (!hashp->hctl)
|
||||
@@ -434,12 +440,13 @@ hash_destroy(HTAB *hashp)
|
||||
* by the caller of hash_create()).
|
||||
*/
|
||||
MEM_FREE(hashp->hctl);
|
||||
MEM_FREE(hashp->tabname);
|
||||
MEM_FREE(hashp);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
hash_stats(char *where, HTAB *hashp)
|
||||
hash_stats(const char *where, HTAB *hashp)
|
||||
{
|
||||
#if HASH_STATISTICS
|
||||
|
||||
@@ -476,24 +483,37 @@ call_hash(HTAB *hashp, void *k)
|
||||
return (uint32) bucket;
|
||||
}
|
||||
|
||||
/*
|
||||
/*----------
|
||||
* hash_search -- look up key in table and perform action
|
||||
*
|
||||
* action is one of HASH_FIND/HASH_ENTER/HASH_REMOVE
|
||||
* action is one of:
|
||||
* HASH_FIND: look up key in table
|
||||
* HASH_ENTER: look up key in table, creating entry if not present
|
||||
* HASH_REMOVE: look up key in table, remove entry if present
|
||||
* HASH_FIND_SAVE: look up key in table, also save in static var
|
||||
* HASH_REMOVE_SAVED: remove entry saved by HASH_FIND_SAVE
|
||||
*
|
||||
* RETURNS: NULL if table is corrupted, a pointer to the element
|
||||
* found/removed/entered if applicable, TRUE otherwise.
|
||||
* foundPtr is TRUE if we found an element in the table
|
||||
* (FALSE if we entered one).
|
||||
* Return value is a pointer to the element found/entered/removed if any,
|
||||
* or NULL if no match was found. (NB: in the case of the REMOVE actions,
|
||||
* the result is a dangling pointer that shouldn't be dereferenced!)
|
||||
* A NULL result for HASH_ENTER implies we ran out of memory.
|
||||
*
|
||||
* If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an
|
||||
* existing entry in the table, FALSE otherwise. This is needed in the
|
||||
* HASH_ENTER case, but is redundant with the return value otherwise.
|
||||
*
|
||||
* The HASH_FIND_SAVE/HASH_REMOVE_SAVED interface is a hack to save one
|
||||
* table lookup in a find/process/remove scenario. Note that no other
|
||||
* addition or removal in the table can safely happen in between.
|
||||
*----------
|
||||
*/
|
||||
void *
|
||||
hash_search(HTAB *hashp,
|
||||
void *keyPtr,
|
||||
HASHACTION action, /* HASH_FIND / HASH_ENTER / HASH_REMOVE
|
||||
* HASH_FIND_SAVE / HASH_REMOVE_SAVED */
|
||||
HASHACTION action,
|
||||
bool *foundPtr)
|
||||
{
|
||||
HASHHDR *hctl;
|
||||
HASHHDR *hctl = hashp->hctl;
|
||||
uint32 bucket;
|
||||
long segment_num;
|
||||
long segment_ndx;
|
||||
@@ -507,21 +527,14 @@ hash_search(HTAB *hashp,
|
||||
HASHBUCKET *prevBucketPtr;
|
||||
} saveState;
|
||||
|
||||
Assert(hashp);
|
||||
Assert(keyPtr);
|
||||
Assert((action == HASH_FIND) ||
|
||||
(action == HASH_REMOVE) ||
|
||||
(action == HASH_ENTER) ||
|
||||
(action == HASH_FIND_SAVE) ||
|
||||
(action == HASH_REMOVE_SAVED));
|
||||
|
||||
hctl = hashp->hctl;
|
||||
|
||||
#if HASH_STATISTICS
|
||||
hash_accesses++;
|
||||
hashp->hctl->accesses++;
|
||||
hctl->accesses++;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do the initial lookup (or recall result of prior lookup)
|
||||
*/
|
||||
if (action == HASH_REMOVE_SAVED)
|
||||
{
|
||||
currBucket = saveState.currBucket;
|
||||
@@ -540,7 +553,8 @@ hash_search(HTAB *hashp,
|
||||
|
||||
segp = hashp->dir[segment_num];
|
||||
|
||||
Assert(segp);
|
||||
if (segp == NULL)
|
||||
hash_corrupted(hashp);
|
||||
|
||||
prevBucketPtr = &segp[segment_ndx];
|
||||
currBucket = *prevBucketPtr;
|
||||
@@ -556,23 +570,32 @@ hash_search(HTAB *hashp,
|
||||
currBucket = *prevBucketPtr;
|
||||
#if HASH_STATISTICS
|
||||
hash_collisions++;
|
||||
hashp->hctl->collisions++;
|
||||
hctl->collisions++;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* if we found an entry or if we weren't trying to insert, we're done
|
||||
* now.
|
||||
*/
|
||||
*foundPtr = (bool) (currBucket != NULL);
|
||||
if (foundPtr)
|
||||
*foundPtr = (bool) (currBucket != NULL);
|
||||
|
||||
/*
|
||||
* OK, now what?
|
||||
*/
|
||||
switch (action)
|
||||
{
|
||||
case HASH_ENTER:
|
||||
case HASH_FIND:
|
||||
if (currBucket != NULL)
|
||||
return (void *) ELEMENTKEY(currBucket);
|
||||
break;
|
||||
return NULL;
|
||||
|
||||
case HASH_FIND_SAVE:
|
||||
if (currBucket != NULL)
|
||||
{
|
||||
saveState.currBucket = currBucket;
|
||||
saveState.prevBucketPtr = prevBucketPtr;
|
||||
return (void *) ELEMENTKEY(currBucket);
|
||||
}
|
||||
return NULL;
|
||||
|
||||
case HASH_REMOVE:
|
||||
case HASH_REMOVE_SAVED:
|
||||
@@ -595,78 +618,57 @@ hash_search(HTAB *hashp,
|
||||
*/
|
||||
return (void *) ELEMENTKEY(currBucket);
|
||||
}
|
||||
return (void *) TRUE;
|
||||
return NULL;
|
||||
|
||||
case HASH_FIND:
|
||||
case HASH_ENTER:
|
||||
/* Return existing element if found, else create one */
|
||||
if (currBucket != NULL)
|
||||
return (void *) ELEMENTKEY(currBucket);
|
||||
return (void *) TRUE;
|
||||
|
||||
case HASH_FIND_SAVE:
|
||||
if (currBucket != NULL)
|
||||
/* get the next free element */
|
||||
currBucket = hctl->freeList;
|
||||
if (currBucket == NULL)
|
||||
{
|
||||
saveState.currBucket = currBucket;
|
||||
saveState.prevBucketPtr = prevBucketPtr;
|
||||
return (void *) ELEMENTKEY(currBucket);
|
||||
/* no free elements. allocate another chunk of buckets */
|
||||
if (!element_alloc(hashp))
|
||||
return NULL; /* out of memory */
|
||||
currBucket = hctl->freeList;
|
||||
Assert(currBucket != NULL);
|
||||
}
|
||||
return (void *) TRUE;
|
||||
|
||||
default:
|
||||
/* can't get here */
|
||||
return NULL;
|
||||
hctl->freeList = currBucket->link;
|
||||
|
||||
/* link into hashbucket chain */
|
||||
*prevBucketPtr = currBucket;
|
||||
currBucket->link = NULL;
|
||||
|
||||
/* copy key into record */
|
||||
memcpy(ELEMENTKEY(currBucket), keyPtr, hctl->keysize);
|
||||
|
||||
/* caller is expected to fill the data field on return */
|
||||
|
||||
/* Check if it is time to split the segment */
|
||||
if (++hctl->nentries / (hctl->max_bucket + 1) > hctl->ffactor)
|
||||
{
|
||||
/*
|
||||
* NOTE: failure to expand table is not a fatal error, it just
|
||||
* means we have to run at higher fill factor than we wanted.
|
||||
*/
|
||||
expand_table(hashp);
|
||||
}
|
||||
|
||||
return (void *) ELEMENTKEY(currBucket);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we got here, then we didn't find the element and we have to
|
||||
* insert it into the hash table
|
||||
*/
|
||||
Assert(currBucket == NULL);
|
||||
elog(ERROR, "hash_search: bogus action %d", (int) action);
|
||||
|
||||
/* get the next free bucket */
|
||||
currBucket = hctl->freeList;
|
||||
if (currBucket == NULL)
|
||||
{
|
||||
/* no free elements. allocate another chunk of buckets */
|
||||
if (!element_alloc(hashp))
|
||||
return NULL;
|
||||
currBucket = hctl->freeList;
|
||||
}
|
||||
Assert(currBucket != NULL);
|
||||
|
||||
hctl->freeList = currBucket->link;
|
||||
|
||||
/* link into chain */
|
||||
*prevBucketPtr = currBucket;
|
||||
currBucket->link = NULL;
|
||||
|
||||
/* copy key into record */
|
||||
memcpy(ELEMENTKEY(currBucket), keyPtr, hctl->keysize);
|
||||
|
||||
/*
|
||||
* let the caller initialize the data field after hash_search returns.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Check if it is time to split the segment
|
||||
*/
|
||||
if (++hctl->nentries / (hctl->max_bucket + 1) > hctl->ffactor)
|
||||
{
|
||||
|
||||
/*
|
||||
* NOTE: failure to expand table is not a fatal error, it just
|
||||
* means we have to run at higher fill factor than we wanted.
|
||||
*/
|
||||
expand_table(hashp);
|
||||
}
|
||||
|
||||
return (void *) ELEMENTKEY(currBucket);
|
||||
return NULL; /* keep compiler quiet */
|
||||
}
|
||||
|
||||
/*
|
||||
* hash_seq_init/_search
|
||||
* Sequentially search through hash table and return
|
||||
* all the elements one by one, return NULL on error and
|
||||
* return (void *) TRUE in the end.
|
||||
* all the elements one by one, return NULL when no more.
|
||||
*
|
||||
* NOTE: caller may delete the returned element before continuing the scan.
|
||||
* However, deleting any other element while the scan is in progress is
|
||||
@@ -717,8 +719,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
|
||||
*/
|
||||
segp = hashp->dir[segment_num];
|
||||
if (segp == NULL)
|
||||
/* this is probably an error */
|
||||
return NULL;
|
||||
hash_corrupted(hashp);
|
||||
|
||||
/*
|
||||
* now find the right index into the segment for the first item in
|
||||
@@ -734,7 +735,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
|
||||
++status->curBucket;
|
||||
}
|
||||
|
||||
return (void *) TRUE; /* out of buckets */
|
||||
return NULL; /* out of buckets */
|
||||
}
|
||||
|
||||
|
||||
@@ -923,6 +924,20 @@ element_alloc(HTAB *hashp)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* complain when we have detected a corrupted hashtable */
|
||||
static void
|
||||
hash_corrupted(HTAB *hashp)
|
||||
{
|
||||
/*
|
||||
* If the corruption is in a shared hashtable, we'd better force a
|
||||
* systemwide restart. Otherwise, just shut down this one backend.
|
||||
*/
|
||||
if (hashp->isshared)
|
||||
elog(STOP, "Hash table '%s' corrupted", hashp->tabname);
|
||||
else
|
||||
elog(FATAL, "Hash table '%s' corrupted", hashp->tabname);
|
||||
}
|
||||
|
||||
/* calculate ceil(log base 2) of num */
|
||||
int
|
||||
my_log2(long num)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.42 2001/10/01 05:36:16 tgl Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.43 2001/10/05 17:28:13 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@@ -33,7 +33,7 @@
|
||||
|
||||
#include "postgres.h"
|
||||
|
||||
#include "lib/hasht.h"
|
||||
#include "utils/hsearch.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/portal.h"
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
* ----------------
|
||||
*/
|
||||
|
||||
#define MAX_PORTALNAME_LEN 64 /* XXX LONGALIGNable value */
|
||||
#define MAX_PORTALNAME_LEN 64
|
||||
|
||||
typedef struct portalhashent
|
||||
{
|
||||
@@ -54,15 +54,13 @@ static HTAB *PortalHashTable = NULL;
|
||||
|
||||
#define PortalHashTableLookup(NAME, PORTAL) \
|
||||
do { \
|
||||
PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
|
||||
PortalHashEnt *hentry; char key[MAX_PORTALNAME_LEN]; \
|
||||
\
|
||||
MemSet(key, 0, MAX_PORTALNAME_LEN); \
|
||||
snprintf(key, MAX_PORTALNAME_LEN - 1, "%s", NAME); \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_FIND, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
if (found) \
|
||||
key, HASH_FIND, NULL); \
|
||||
if (hentry) \
|
||||
PORTAL = hentry->portal; \
|
||||
else \
|
||||
PORTAL = NULL; \
|
||||
@@ -77,7 +75,7 @@ do { \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_ENTER, &found); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
elog(ERROR, "out of memory in PortalHashTable"); \
|
||||
if (found) \
|
||||
elog(NOTICE, "trying to insert a portal name that exists."); \
|
||||
hentry->portal = PORTAL; \
|
||||
@@ -85,15 +83,13 @@ do { \
|
||||
|
||||
#define PortalHashTableDelete(PORTAL) \
|
||||
do { \
|
||||
PortalHashEnt *hentry; bool found; char key[MAX_PORTALNAME_LEN]; \
|
||||
PortalHashEnt *hentry; char key[MAX_PORTALNAME_LEN]; \
|
||||
\
|
||||
MemSet(key, 0, MAX_PORTALNAME_LEN); \
|
||||
snprintf(key, MAX_PORTALNAME_LEN - 1, "%s", PORTAL->name); \
|
||||
hentry = (PortalHashEnt*)hash_search(PortalHashTable, \
|
||||
key, HASH_REMOVE, &found); \
|
||||
key, HASH_REMOVE, NULL); \
|
||||
if (hentry == NULL) \
|
||||
elog(FATAL, "error in PortalHashTable"); \
|
||||
if (!found) \
|
||||
elog(NOTICE, "trying to delete portal name that does not exist."); \
|
||||
} while(0)
|
||||
|
||||
@@ -129,7 +125,8 @@ EnablePortalManager(void)
|
||||
* use PORTALS_PER_USER, defined in utils/portal.h as a guess of how
|
||||
* many hash table entries to create, initially
|
||||
*/
|
||||
PortalHashTable = hash_create(PORTALS_PER_USER * 3, &ctl, HASH_ELEM);
|
||||
PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
|
||||
&ctl, HASH_ELEM);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -234,15 +231,10 @@ CreatePortal(char *name)
|
||||
* Exceptions:
|
||||
* BadState if called when disabled.
|
||||
* BadArg if portal is invalid.
|
||||
*
|
||||
* Note peculiar calling convention: pass a pointer to a portal pointer.
|
||||
* This is mainly so that this routine can be used as a hashtable walker.
|
||||
*/
|
||||
void
|
||||
PortalDrop(Portal *portalP)
|
||||
PortalDrop(Portal portal)
|
||||
{
|
||||
Portal portal = *portalP;
|
||||
|
||||
AssertArg(PortalIsValid(portal));
|
||||
|
||||
/* remove portal from hash table */
|
||||
@@ -262,9 +254,23 @@ PortalDrop(Portal *portalP)
|
||||
|
||||
/*
|
||||
* Destroy all portals created in the current transaction (ie, all of them).
|
||||
*
|
||||
* XXX This assumes that portals can be deleted in a random order, ie,
|
||||
* no portal has a reference to any other (at least not one that will be
|
||||
* exercised during deletion). I think this is okay at the moment, but
|
||||
* we've had bugs of that ilk in the past. Keep a close eye on cursor
|
||||
* references...
|
||||
*/
|
||||
void
|
||||
AtEOXact_portals(void)
|
||||
{
|
||||
HashTableWalk(PortalHashTable, (HashtFunc) PortalDrop, 0);
|
||||
HASH_SEQ_STATUS status;
|
||||
PortalHashEnt *hentry;
|
||||
|
||||
hash_seq_init(&status, PortalHashTable);
|
||||
|
||||
while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
PortalDrop(hentry->portal);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user