mirror of
https://github.com/postgres/postgres.git
synced 2025-06-25 01:02:05 +03:00
Improve hash_create's API for selecting simple-binary-key hash functions.
Previously, if you wanted anything besides C-string hash keys, you had to specify a custom hashing function to hash_create(). Nearly all such callers were specifying tag_hash or oid_hash; which is tedious, and rather error-prone, since a caller could easily miss the opportunity to optimize by using hash_uint32 when appropriate. Replace this with a design whereby callers using simple binary-data keys just specify HASH_BLOBS and don't need to mess with specific support functions. hash_create() itself will take care of optimizing when the key size is four bytes. This nets out saving a few hundred bytes of code space, and offers a measurable performance improvement in tidbitmap.c (which was not exploiting the opportunity to use hash_uint32 for its 4-byte keys). There might be some wins elsewhere too, I didn't analyze closely. In future we could look into offering a similar optimized hashing function for 8-byte keys. Under this design that could be done in a centralized and machine-independent fashion, whereas getting it right for keys of platform-dependent sizes would've been notationally painful before. For the moment, the old way still works fine, so as not to break source code compatibility for loadable modules. Eventually we might want to remove tag_hash and friends from the exported API altogether, since there's no real need for them to be explicitly referenced from outside dynahash.c. Teodor Sigaev and Tom Lane
This commit is contained in:
@ -1142,12 +1142,10 @@ gistInitParentMap(GISTBuildState *buildstate)
|
||||
hashCtl.keysize = sizeof(BlockNumber);
|
||||
hashCtl.entrysize = sizeof(ParentMapEntry);
|
||||
hashCtl.hcxt = CurrentMemoryContext;
|
||||
hashCtl.hash = oid_hash;
|
||||
buildstate->parentMap = hash_create("gistbuild parent map",
|
||||
1024,
|
||||
&hashCtl,
|
||||
HASH_ELEM | HASH_CONTEXT
|
||||
| HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -76,16 +76,14 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
|
||||
* nodeBuffersTab hash is association between index blocks and it's
|
||||
* buffers.
|
||||
*/
|
||||
memset(&hashCtl, 0, sizeof(hashCtl));
|
||||
hashCtl.keysize = sizeof(BlockNumber);
|
||||
hashCtl.entrysize = sizeof(GISTNodeBuffer);
|
||||
hashCtl.hcxt = CurrentMemoryContext;
|
||||
hashCtl.hash = tag_hash;
|
||||
hashCtl.match = memcmp;
|
||||
gfbb->nodeBuffersTab = hash_create("gistbuildbuffers",
|
||||
1024,
|
||||
&hashCtl,
|
||||
HASH_ELEM | HASH_CONTEXT
|
||||
| HASH_FUNCTION | HASH_COMPARE);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
gfbb->bufferEmptyingQueue = NIL;
|
||||
|
||||
|
@ -283,13 +283,12 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
|
||||
hash_ctl.keysize = sizeof(TidHashKey);
|
||||
hash_ctl.entrysize = sizeof(UnresolvedTupData);
|
||||
hash_ctl.hcxt = state->rs_cxt;
|
||||
hash_ctl.hash = tag_hash;
|
||||
|
||||
state->rs_unresolved_tups =
|
||||
hash_create("Rewrite / Unresolved ctids",
|
||||
128, /* arbitrary initial size */
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
hash_ctl.entrysize = sizeof(OldToNewMappingData);
|
||||
|
||||
@ -297,7 +296,7 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
|
||||
hash_create("Rewrite / Old to new tid map",
|
||||
128, /* arbitrary initial size */
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
MemoryContextSwitchTo(old_cxt);
|
||||
|
||||
@ -834,13 +833,12 @@ logical_begin_heap_rewrite(RewriteState state)
|
||||
hash_ctl.keysize = sizeof(TransactionId);
|
||||
hash_ctl.entrysize = sizeof(RewriteMappingFile);
|
||||
hash_ctl.hcxt = state->rs_cxt;
|
||||
hash_ctl.hash = tag_hash;
|
||||
|
||||
state->rs_logical_mappings =
|
||||
hash_create("Logical rewrite mapping",
|
||||
128, /* arbitrary initial size */
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -107,12 +107,11 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
|
||||
memset(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(xl_invalid_page_key);
|
||||
ctl.entrysize = sizeof(xl_invalid_page);
|
||||
ctl.hash = tag_hash;
|
||||
|
||||
invalid_page_tab = hash_create("XLOG invalid-page table",
|
||||
100,
|
||||
&ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
/* we currently assume xl_invalid_page_key contains no padding */
|
||||
|
@ -986,10 +986,9 @@ create_seq_hashtable(void)
|
||||
memset(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(SeqTableData);
|
||||
ctl.hash = oid_hash;
|
||||
|
||||
seqhashtab = hash_create("Sequence values", 16, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -221,12 +221,11 @@ tbm_create_pagetable(TIDBitmap *tbm)
|
||||
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(BlockNumber);
|
||||
hash_ctl.entrysize = sizeof(PagetableEntry);
|
||||
hash_ctl.hash = tag_hash;
|
||||
hash_ctl.hcxt = tbm->mcxt;
|
||||
tbm->pagetable = hash_create("TIDBitmap",
|
||||
128, /* start small and extend */
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/* If entry1 is valid, push it into the hashtable */
|
||||
if (tbm->status == TBM_ONE_PAGE)
|
||||
|
@ -1711,9 +1711,8 @@ lookup_proof_cache(Oid pred_op, Oid clause_op, bool refute_it)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(OprProofCacheKey);
|
||||
ctl.entrysize = sizeof(OprProofCacheEntry);
|
||||
ctl.hash = tag_hash;
|
||||
OprProofCacheHash = hash_create("Btree proof lookup cache", 256,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/* Arrange to flush cache on pg_amop changes */
|
||||
CacheRegisterSyscacheCallback(AMOPOPID,
|
||||
|
@ -1059,9 +1059,8 @@ find_oper_cache_entry(OprCacheKey *key)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(OprCacheKey);
|
||||
ctl.entrysize = sizeof(OprCacheEntry);
|
||||
ctl.hash = tag_hash;
|
||||
OprCacheHash = hash_create("Operator lookup cache", 256,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/* Arrange to flush cache on pg_operator and pg_cast changes */
|
||||
CacheRegisterSyscacheCallback(OPERNAMENSP,
|
||||
|
@ -922,10 +922,9 @@ rebuild_database_list(Oid newdb)
|
||||
*/
|
||||
hctl.keysize = sizeof(Oid);
|
||||
hctl.entrysize = sizeof(avl_dbase);
|
||||
hctl.hash = oid_hash;
|
||||
hctl.hcxt = tmpcxt;
|
||||
dbhash = hash_create("db hash", 20, &hctl, /* magic number here FIXME */
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/* start by inserting the new database */
|
||||
score = 0;
|
||||
@ -1997,12 +1996,11 @@ do_autovacuum(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(av_relation);
|
||||
ctl.hash = oid_hash;
|
||||
|
||||
table_toast_map = hash_create("TOAST to main relid map",
|
||||
100,
|
||||
&ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/*
|
||||
* Scan pg_class to determine which tables to vacuum.
|
||||
|
@ -1212,13 +1212,12 @@ CompactCheckpointerRequestQueue(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(CheckpointerRequest);
|
||||
ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
|
||||
ctl.hash = tag_hash;
|
||||
ctl.hcxt = CurrentMemoryContext;
|
||||
|
||||
htab = hash_create("CompactCheckpointerRequestQueue",
|
||||
CheckpointerShmem->num_requests,
|
||||
&ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/*
|
||||
* The basic idea here is that a request can be skipped if it's followed
|
||||
|
@ -1132,12 +1132,11 @@ pgstat_collect_oids(Oid catalogid)
|
||||
memset(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(Oid);
|
||||
hash_ctl.hash = oid_hash;
|
||||
hash_ctl.hcxt = CurrentMemoryContext;
|
||||
htab = hash_create("Temporary table of OIDs",
|
||||
PGSTAT_TAB_HASH_SIZE,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
rel = heap_open(catalogid, AccessShareLock);
|
||||
snapshot = RegisterSnapshot(GetLatestSnapshot());
|
||||
@ -1520,11 +1519,10 @@ pgstat_init_function_usage(FunctionCallInfoData *fcinfo,
|
||||
memset(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(PgStat_BackendFunctionEntry);
|
||||
hash_ctl.hash = oid_hash;
|
||||
pgStatFunctions = hash_create("Function stat entries",
|
||||
PGSTAT_FUNCTION_HASH_SIZE,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
/* Get the stats entry for this function, create if necessary */
|
||||
@ -3483,19 +3481,17 @@ reset_dbentry_counters(PgStat_StatDBEntry *dbentry)
|
||||
memset(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
|
||||
hash_ctl.hash = oid_hash;
|
||||
dbentry->tables = hash_create("Per-database table",
|
||||
PGSTAT_TAB_HASH_SIZE,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(PgStat_StatFuncEntry);
|
||||
hash_ctl.hash = oid_hash;
|
||||
dbentry->functions = hash_create("Per-database function",
|
||||
PGSTAT_FUNCTION_HASH_SIZE,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3902,10 +3898,9 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
|
||||
memset(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(PgStat_StatDBEntry);
|
||||
hash_ctl.hash = oid_hash;
|
||||
hash_ctl.hcxt = pgStatLocalContext;
|
||||
dbhash = hash_create("Databases hash", PGSTAT_DB_HASH_SIZE, &hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/*
|
||||
* Clear out global and archiver statistics so they start from zero in
|
||||
@ -4026,21 +4021,19 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
|
||||
memset(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
|
||||
hash_ctl.hash = oid_hash;
|
||||
hash_ctl.hcxt = pgStatLocalContext;
|
||||
dbentry->tables = hash_create("Per-database table",
|
||||
PGSTAT_TAB_HASH_SIZE,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(PgStat_StatFuncEntry);
|
||||
hash_ctl.hash = oid_hash;
|
||||
hash_ctl.hcxt = pgStatLocalContext;
|
||||
dbentry->functions = hash_create("Per-database function",
|
||||
PGSTAT_FUNCTION_HASH_SIZE,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/*
|
||||
* If requested, read the data from the database-specific
|
||||
|
@ -245,11 +245,10 @@ ReorderBufferAllocate(void)
|
||||
|
||||
hash_ctl.keysize = sizeof(TransactionId);
|
||||
hash_ctl.entrysize = sizeof(ReorderBufferTXNByIdEnt);
|
||||
hash_ctl.hash = tag_hash;
|
||||
hash_ctl.hcxt = buffer->context;
|
||||
|
||||
buffer->by_txn = hash_create("ReorderBufferByXid", 1000, &hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
buffer->by_txn_last_xid = InvalidTransactionId;
|
||||
buffer->by_txn_last_txn = NULL;
|
||||
@ -1111,7 +1110,6 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
|
||||
|
||||
hash_ctl.keysize = sizeof(ReorderBufferTupleCidKey);
|
||||
hash_ctl.entrysize = sizeof(ReorderBufferTupleCidEnt);
|
||||
hash_ctl.hash = tag_hash;
|
||||
hash_ctl.hcxt = rb->context;
|
||||
|
||||
/*
|
||||
@ -1120,7 +1118,7 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
|
||||
*/
|
||||
txn->tuplecid_hash =
|
||||
hash_create("ReorderBufferTupleCid", txn->ntuplecids, &hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
dlist_foreach(iter, &txn->tuplecids)
|
||||
{
|
||||
@ -2434,10 +2432,9 @@ ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
|
||||
memset(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(ReorderBufferToastEnt);
|
||||
hash_ctl.hash = tag_hash;
|
||||
hash_ctl.hcxt = rb->context;
|
||||
txn->toast_hash = hash_create("ReorderBufferToastHash", 5, &hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -59,13 +59,12 @@ InitBufTable(int size)
|
||||
/* BufferTag maps to Buffer */
|
||||
info.keysize = sizeof(BufferTag);
|
||||
info.entrysize = sizeof(BufferLookupEnt);
|
||||
info.hash = tag_hash;
|
||||
info.num_partitions = NUM_BUFFER_PARTITIONS;
|
||||
|
||||
SharedBufHash = ShmemInitHash("Shared Buffer Lookup Table",
|
||||
size, size,
|
||||
&info,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2063,10 +2063,9 @@ InitBufferPoolAccess(void)
|
||||
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(int32);
|
||||
hash_ctl.entrysize = sizeof(PrivateRefCountArray);
|
||||
hash_ctl.hash = oid_hash; /* a bit more efficient than tag_hash */
|
||||
|
||||
PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -415,12 +415,11 @@ InitLocalBuffers(void)
|
||||
MemSet(&info, 0, sizeof(info));
|
||||
info.keysize = sizeof(BufferTag);
|
||||
info.entrysize = sizeof(LocalBufferLookupEnt);
|
||||
info.hash = tag_hash;
|
||||
|
||||
LocalBufHash = hash_create("Local Buffer Lookup Table",
|
||||
nbufs,
|
||||
&info,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
if (!LocalBufHash)
|
||||
elog(ERROR, "could not initialize local buffer hash table");
|
||||
|
@ -373,7 +373,6 @@ void
|
||||
InitLocks(void)
|
||||
{
|
||||
HASHCTL info;
|
||||
int hash_flags;
|
||||
long init_table_size,
|
||||
max_table_size;
|
||||
bool found;
|
||||
@ -392,15 +391,13 @@ InitLocks(void)
|
||||
MemSet(&info, 0, sizeof(info));
|
||||
info.keysize = sizeof(LOCKTAG);
|
||||
info.entrysize = sizeof(LOCK);
|
||||
info.hash = tag_hash;
|
||||
info.num_partitions = NUM_LOCK_PARTITIONS;
|
||||
hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
|
||||
|
||||
LockMethodLockHash = ShmemInitHash("LOCK hash",
|
||||
init_table_size,
|
||||
max_table_size,
|
||||
&info,
|
||||
hash_flags);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
|
||||
|
||||
/* Assume an average of 2 holders per lock */
|
||||
max_table_size *= 2;
|
||||
@ -414,13 +411,12 @@ InitLocks(void)
|
||||
info.entrysize = sizeof(PROCLOCK);
|
||||
info.hash = proclock_hash;
|
||||
info.num_partitions = NUM_LOCK_PARTITIONS;
|
||||
hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
|
||||
|
||||
LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
|
||||
init_table_size,
|
||||
max_table_size,
|
||||
&info,
|
||||
hash_flags);
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
|
||||
|
||||
/*
|
||||
* Allocate fast-path structures.
|
||||
@ -445,13 +441,11 @@ InitLocks(void)
|
||||
|
||||
info.keysize = sizeof(LOCALLOCKTAG);
|
||||
info.entrysize = sizeof(LOCALLOCK);
|
||||
info.hash = tag_hash;
|
||||
hash_flags = (HASH_ELEM | HASH_FUNCTION);
|
||||
|
||||
LockMethodLocalHash = hash_create("LOCALLOCK hash",
|
||||
16,
|
||||
&info,
|
||||
hash_flags);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
|
||||
|
@ -167,10 +167,9 @@ init_lwlock_stats(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(lwlock_stats_key);
|
||||
ctl.entrysize = sizeof(lwlock_stats);
|
||||
ctl.hash = tag_hash;
|
||||
ctl.hcxt = lwlock_stats_cxt;
|
||||
lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
if (!exit_registered)
|
||||
{
|
||||
on_shmem_exit(print_lwlock_stats, 0);
|
||||
|
@ -286,7 +286,7 @@
|
||||
* the lock partition number from the hashcode.
|
||||
*/
|
||||
#define PredicateLockTargetTagHashCode(predicatelocktargettag) \
|
||||
(tag_hash((predicatelocktargettag), sizeof(PREDICATELOCKTARGETTAG)))
|
||||
get_hash_value(PredicateLockTargetHash, predicatelocktargettag)
|
||||
|
||||
/*
|
||||
* Given a predicate lock tag, and the hash for its target,
|
||||
@ -1095,7 +1095,6 @@ void
|
||||
InitPredicateLocks(void)
|
||||
{
|
||||
HASHCTL info;
|
||||
int hash_flags;
|
||||
long max_table_size;
|
||||
Size requestSize;
|
||||
bool found;
|
||||
@ -1113,15 +1112,14 @@ InitPredicateLocks(void)
|
||||
MemSet(&info, 0, sizeof(info));
|
||||
info.keysize = sizeof(PREDICATELOCKTARGETTAG);
|
||||
info.entrysize = sizeof(PREDICATELOCKTARGET);
|
||||
info.hash = tag_hash;
|
||||
info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
|
||||
hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION | HASH_FIXED_SIZE);
|
||||
|
||||
PredicateLockTargetHash = ShmemInitHash("PREDICATELOCKTARGET hash",
|
||||
max_table_size,
|
||||
max_table_size,
|
||||
&info,
|
||||
hash_flags);
|
||||
HASH_ELEM | HASH_BLOBS |
|
||||
HASH_PARTITION | HASH_FIXED_SIZE);
|
||||
|
||||
/* Assume an average of 2 xacts per target */
|
||||
max_table_size *= 2;
|
||||
@ -1143,13 +1141,13 @@ InitPredicateLocks(void)
|
||||
info.entrysize = sizeof(PREDICATELOCK);
|
||||
info.hash = predicatelock_hash;
|
||||
info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
|
||||
hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION | HASH_FIXED_SIZE);
|
||||
|
||||
PredicateLockHash = ShmemInitHash("PREDICATELOCK hash",
|
||||
max_table_size,
|
||||
max_table_size,
|
||||
&info,
|
||||
hash_flags);
|
||||
HASH_ELEM | HASH_FUNCTION |
|
||||
HASH_PARTITION | HASH_FIXED_SIZE);
|
||||
|
||||
/*
|
||||
* Compute size for serializable transaction hashtable. Note these
|
||||
@ -1224,14 +1222,13 @@ InitPredicateLocks(void)
|
||||
MemSet(&info, 0, sizeof(info));
|
||||
info.keysize = sizeof(SERIALIZABLEXIDTAG);
|
||||
info.entrysize = sizeof(SERIALIZABLEXID);
|
||||
info.hash = tag_hash;
|
||||
hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_FIXED_SIZE);
|
||||
|
||||
SerializableXidHash = ShmemInitHash("SERIALIZABLEXID hash",
|
||||
max_table_size,
|
||||
max_table_size,
|
||||
&info,
|
||||
hash_flags);
|
||||
HASH_ELEM | HASH_BLOBS |
|
||||
HASH_FIXED_SIZE);
|
||||
|
||||
/*
|
||||
* Allocate space for tracking rw-conflicts in lists attached to the
|
||||
@ -1793,11 +1790,10 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
|
||||
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
|
||||
hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
|
||||
hash_ctl.hash = tag_hash;
|
||||
LocalPredicateLockHash = hash_create("Local predicate lock",
|
||||
max_predicate_locks_per_xact,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
return snapshot;
|
||||
}
|
||||
|
@ -229,12 +229,11 @@ mdinit(void)
|
||||
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(RelFileNode);
|
||||
hash_ctl.entrysize = sizeof(PendingOperationEntry);
|
||||
hash_ctl.hash = tag_hash;
|
||||
hash_ctl.hcxt = pendingOpsCxt;
|
||||
pendingOpsTable = hash_create("Pending Ops Table",
|
||||
100L,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
pendingUnlinks = NIL;
|
||||
}
|
||||
}
|
||||
|
@ -146,9 +146,8 @@ smgropen(RelFileNode rnode, BackendId backend)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(RelFileNodeBackend);
|
||||
ctl.entrysize = sizeof(SMgrRelationData);
|
||||
ctl.hash = tag_hash;
|
||||
SMgrRelationHash = hash_create("smgr relation table", 400,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
first_unowned_reln = NULL;
|
||||
}
|
||||
|
||||
|
@ -290,12 +290,11 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
|
||||
MemSet(&count_hash_ctl, 0, sizeof(count_hash_ctl));
|
||||
count_hash_ctl.keysize = sizeof(int);
|
||||
count_hash_ctl.entrysize = sizeof(DECountItem);
|
||||
count_hash_ctl.hash = tag_hash;
|
||||
count_hash_ctl.hcxt = CurrentMemoryContext;
|
||||
count_tab = hash_create("Array distinct element count table",
|
||||
64,
|
||||
&count_hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/* Initialize counters. */
|
||||
b_current = 1;
|
||||
|
@ -875,9 +875,8 @@ lookup_collation_cache(Oid collation, bool set_flags)
|
||||
memset(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(collation_cache_entry);
|
||||
ctl.hash = oid_hash;
|
||||
collation_cache = hash_create("Collation cache", 100, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
cache_entry = hash_search(collation_cache, &collation, HASH_ENTER, &found);
|
||||
|
@ -3326,10 +3326,9 @@ ri_InitHashTables(void)
|
||||
memset(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(RI_ConstraintInfo);
|
||||
ctl.hash = oid_hash;
|
||||
ri_constraint_cache = hash_create("RI constraint cache",
|
||||
RI_INIT_CONSTRAINTHASHSIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/* Arrange to flush cache on pg_constraint changes */
|
||||
CacheRegisterSyscacheCallback(CONSTROID,
|
||||
@ -3339,18 +3338,16 @@ ri_InitHashTables(void)
|
||||
memset(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(RI_QueryKey);
|
||||
ctl.entrysize = sizeof(RI_QueryHashEntry);
|
||||
ctl.hash = tag_hash;
|
||||
ri_query_cache = hash_create("RI query cache",
|
||||
RI_INIT_QUERYHASHSIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
memset(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(RI_CompareKey);
|
||||
ctl.entrysize = sizeof(RI_CompareHashEntry);
|
||||
ctl.hash = tag_hash;
|
||||
ri_compare_cache = hash_create("RI compare cache",
|
||||
RI_INIT_QUERYHASHSIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
|
||||
|
3
src/backend/utils/cache/attoptcache.c
vendored
3
src/backend/utils/cache/attoptcache.c
vendored
@ -82,10 +82,9 @@ InitializeAttoptCache(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(AttoptCacheKey);
|
||||
ctl.entrysize = sizeof(AttoptCacheEntry);
|
||||
ctl.hash = tag_hash;
|
||||
AttoptCacheHash =
|
||||
hash_create("Attopt cache", 256, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/* Make sure we've initialized CacheMemoryContext. */
|
||||
if (!CacheMemoryContext)
|
||||
|
3
src/backend/utils/cache/evtcache.c
vendored
3
src/backend/utils/cache/evtcache.c
vendored
@ -123,10 +123,9 @@ BuildEventTriggerCache(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(EventTriggerEvent);
|
||||
ctl.entrysize = sizeof(EventTriggerCacheEntry);
|
||||
ctl.hash = tag_hash;
|
||||
ctl.hcxt = EventTriggerCacheContext;
|
||||
cache = hash_create("Event Trigger Cache", 32, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/*
|
||||
* Prepare to scan pg_event_trigger in name order.
|
||||
|
6
src/backend/utils/cache/relcache.c
vendored
6
src/backend/utils/cache/relcache.c
vendored
@ -1409,9 +1409,8 @@ LookupOpclassInfo(Oid operatorClassOid,
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(OpClassCacheEnt);
|
||||
ctl.hash = oid_hash;
|
||||
OpClassCache = hash_create("Operator class cache", 64,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/* Also make sure CacheMemoryContext exists */
|
||||
if (!CacheMemoryContext)
|
||||
@ -3140,9 +3139,8 @@ RelationCacheInitialize(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(RelIdCacheEnt);
|
||||
ctl.hash = oid_hash;
|
||||
RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/*
|
||||
* relation mapper needs to be initialized too
|
||||
|
3
src/backend/utils/cache/relfilenodemap.c
vendored
3
src/backend/utils/cache/relfilenodemap.c
vendored
@ -115,7 +115,6 @@ InitializeRelfilenodeMap(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(RelfilenodeMapKey);
|
||||
ctl.entrysize = sizeof(RelfilenodeMapEntry);
|
||||
ctl.hash = tag_hash;
|
||||
ctl.hcxt = CacheMemoryContext;
|
||||
|
||||
/*
|
||||
@ -125,7 +124,7 @@ InitializeRelfilenodeMap(void)
|
||||
*/
|
||||
RelfilenodeMapHash =
|
||||
hash_create("RelfilenodeMap cache", 1024, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
/* Watch for invalidation events. */
|
||||
CacheRegisterRelcacheCallback(RelfilenodeMapInvalidateCallback,
|
||||
|
3
src/backend/utils/cache/spccache.c
vendored
3
src/backend/utils/cache/spccache.c
vendored
@ -81,10 +81,9 @@ InitializeTableSpaceCache(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(TableSpaceCacheEntry);
|
||||
ctl.hash = oid_hash;
|
||||
TableSpaceCacheHash =
|
||||
hash_create("TableSpace cache", 16, &ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/* Make sure we've initialized CacheMemoryContext. */
|
||||
if (!CacheMemoryContext)
|
||||
|
9
src/backend/utils/cache/ts_cache.c
vendored
9
src/backend/utils/cache/ts_cache.c
vendored
@ -120,9 +120,8 @@ lookup_ts_parser_cache(Oid prsId)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(TSParserCacheEntry);
|
||||
ctl.hash = oid_hash;
|
||||
TSParserCacheHash = hash_create("Tsearch parser cache", 4,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
/* Flush cache on pg_ts_parser changes */
|
||||
CacheRegisterSyscacheCallback(TSPARSEROID, InvalidateTSCacheCallBack,
|
||||
PointerGetDatum(TSParserCacheHash));
|
||||
@ -219,9 +218,8 @@ lookup_ts_dictionary_cache(Oid dictId)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(TSDictionaryCacheEntry);
|
||||
ctl.hash = oid_hash;
|
||||
TSDictionaryCacheHash = hash_create("Tsearch dictionary cache", 8,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
/* Flush cache on pg_ts_dict and pg_ts_template changes */
|
||||
CacheRegisterSyscacheCallback(TSDICTOID, InvalidateTSCacheCallBack,
|
||||
PointerGetDatum(TSDictionaryCacheHash));
|
||||
@ -368,9 +366,8 @@ init_ts_config_cache(void)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(TSConfigCacheEntry);
|
||||
ctl.hash = oid_hash;
|
||||
TSConfigCacheHash = hash_create("Tsearch configuration cache", 16,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
/* Flush cache on pg_ts_config and pg_ts_config_map changes */
|
||||
CacheRegisterSyscacheCallback(TSCONFIGOID, InvalidateTSCacheCallBack,
|
||||
PointerGetDatum(TSConfigCacheHash));
|
||||
|
6
src/backend/utils/cache/typcache.c
vendored
6
src/backend/utils/cache/typcache.c
vendored
@ -166,9 +166,8 @@ lookup_type_cache(Oid type_id, int flags)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = sizeof(Oid);
|
||||
ctl.entrysize = sizeof(TypeCacheEntry);
|
||||
ctl.hash = oid_hash;
|
||||
TypeCacheHash = hash_create("Type information cache", 64,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/* Also set up callbacks for SI invalidations */
|
||||
CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
|
||||
@ -846,9 +845,8 @@ assign_record_type_typmod(TupleDesc tupDesc)
|
||||
MemSet(&ctl, 0, sizeof(ctl));
|
||||
ctl.keysize = REC_HASH_KEYS * sizeof(Oid);
|
||||
ctl.entrysize = sizeof(RecordCacheEntry);
|
||||
ctl.hash = tag_hash;
|
||||
RecordCacheHash = hash_create("Record information cache", 64,
|
||||
&ctl, HASH_ELEM | HASH_FUNCTION);
|
||||
&ctl, HASH_ELEM | HASH_BLOBS);
|
||||
|
||||
/* Also make sure CacheMemoryContext exists */
|
||||
if (!CacheMemoryContext)
|
||||
|
@ -540,11 +540,10 @@ record_C_func(HeapTuple procedureTuple,
|
||||
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(Oid);
|
||||
hash_ctl.entrysize = sizeof(CFuncHashTabEntry);
|
||||
hash_ctl.hash = oid_hash;
|
||||
CFuncHash = hash_create("CFuncHash",
|
||||
100,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION);
|
||||
HASH_ELEM | HASH_BLOBS);
|
||||
}
|
||||
|
||||
entry = (CFuncHashTabEntry *)
|
||||
|
@ -26,6 +26,20 @@
|
||||
* in local memory, we typically use palloc() which will throw error on
|
||||
* failure. The code in this file has to cope with both cases.
|
||||
*
|
||||
* dynahash.c provides support for these types of lookup keys:
|
||||
*
|
||||
* 1. Null-terminated C strings (truncated if necessary to fit in keysize),
|
||||
* compared as though by strcmp(). This is the default behavior.
|
||||
*
|
||||
* 2. Arbitrary binary data of size keysize, compared as though by memcmp().
|
||||
* (Caller must ensure there are no undefined padding bits in the keys!)
|
||||
* This is selected by specifying HASH_BLOBS flag to hash_create.
|
||||
*
|
||||
* 3. More complex key behavior can be selected by specifying user-supplied
|
||||
* hashing, comparison, and/or key-copying functions. At least a hashing
|
||||
* function must be supplied; comparison defaults to memcmp() and key copying
|
||||
* to memcpy() when a user-defined hashing function is selected.
|
||||
*
|
||||
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
@ -305,15 +319,32 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
|
||||
hashp->tabname = (char *) (hashp + 1);
|
||||
strcpy(hashp->tabname, tabname);
|
||||
|
||||
/*
|
||||
* Select the appropriate hash function (see comments at head of file).
|
||||
*/
|
||||
if (flags & HASH_FUNCTION)
|
||||
hashp->hash = info->hash;
|
||||
else if (flags & HASH_BLOBS)
|
||||
{
|
||||
/* We can optimize hashing for common key sizes */
|
||||
Assert(flags & HASH_ELEM);
|
||||
if (info->keysize == sizeof(uint32))
|
||||
hashp->hash = uint32_hash;
|
||||
else
|
||||
hashp->hash = tag_hash;
|
||||
}
|
||||
else
|
||||
hashp->hash = string_hash; /* default hash function */
|
||||
|
||||
/*
|
||||
* If you don't specify a match function, it defaults to string_compare if
|
||||
* you used string_hash (either explicitly or by default) and to memcmp
|
||||
* otherwise. (Prior to PostgreSQL 7.4, memcmp was always used.)
|
||||
* otherwise.
|
||||
*
|
||||
* Note: explicitly specifying string_hash is deprecated, because this
|
||||
* might not work for callers in loadable modules on some platforms due to
|
||||
* referencing a trampoline instead of the string_hash function proper.
|
||||
* Just let it default, eh?
|
||||
*/
|
||||
if (flags & HASH_COMPARE)
|
||||
hashp->match = info->match;
|
||||
@ -332,6 +363,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
|
||||
else
|
||||
hashp->keycopy = memcpy;
|
||||
|
||||
/* And select the entry allocation function, too. */
|
||||
if (flags & HASH_ALLOC)
|
||||
hashp->alloc = info->alloc;
|
||||
else
|
||||
|
@ -55,15 +55,15 @@ tag_hash(const void *key, Size keysize)
|
||||
}
|
||||
|
||||
/*
|
||||
* oid_hash: hash function for keys that are OIDs
|
||||
* uint32_hash: hash function for keys that are uint32 or int32
|
||||
*
|
||||
* (tag_hash works for this case too, but is slower)
|
||||
*/
|
||||
uint32
|
||||
oid_hash(const void *key, Size keysize)
|
||||
uint32_hash(const void *key, Size keysize)
|
||||
{
|
||||
Assert(keysize == sizeof(Oid));
|
||||
return DatumGetUInt32(hash_uint32((uint32) *((const Oid *) key)));
|
||||
Assert(keysize == sizeof(uint32));
|
||||
return DatumGetUInt32(hash_uint32(*((const uint32 *) key)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -218,13 +218,12 @@ GetComboCommandId(CommandId cmin, CommandId cmax)
|
||||
memset(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = sizeof(ComboCidKeyData);
|
||||
hash_ctl.entrysize = sizeof(ComboCidEntryData);
|
||||
hash_ctl.hash = tag_hash;
|
||||
hash_ctl.hcxt = TopTransactionContext;
|
||||
|
||||
comboHash = hash_create("Combo CIDs",
|
||||
CCID_HASH_SIZE,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
||||
|
||||
comboCids = (ComboCidKeyData *)
|
||||
MemoryContextAlloc(TopTransactionContext,
|
||||
|
Reference in New Issue
Block a user