1
0
mirror of https://github.com/postgres/postgres.git synced 2025-11-13 16:22:44 +03:00

Shared-memory hashtables have non-extensible directories, which means

it's a good idea to choose the directory size based on the expected
number of entries.  But ShmemInitHash was using a hard-wired constant.
Boo hiss.  This accounts for recent report of postmaster failure when
asking for 64K or more buffers.
This commit is contained in:
Tom Lane
2000-02-26 05:25:55 +00:00
parent c05abfb1a8
commit 08b1040374
3 changed files with 53 additions and 25 deletions

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.48 2000/01/26 05:56:58 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.49 2000/02/26 05:25:55 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -332,7 +332,7 @@ ShmemIsValid(unsigned long addr)
HTAB *
ShmemInitHash(char *name, /* table string name for shmem index */
long init_size, /* initial table size */
long max_size, /* max size of the table (NOT USED) */
long max_size, /* max size of the table */
HASHCTL *infoP, /* info about key and bucket size */
int hash_flags) /* info about infoP */
{
@@ -340,19 +340,21 @@ ShmemInitHash(char *name, /* table string name for shmem index */
long *location;
/*
* Hash tables allocated in shared memory have a fixed directory; it
* can't grow or other backends wouldn't be able to find it. The
* segbase is for calculating pointer values. The shared memory
* Hash tables allocated in shared memory have a fixed directory;
* it can't grow or other backends wouldn't be able to find it.
* So, make sure we make it big enough to start with.
*
* The segbase is for calculating pointer values. The shared memory
* allocator must be specified too.
*/
infoP->dsize = infoP->max_dsize = DEF_DIRSIZE;
infoP->dsize = infoP->max_dsize = hash_select_dirsize(max_size);
infoP->segbase = (long *) ShmemBase;
infoP->alloc = ShmemAlloc;
hash_flags |= HASH_SHARED_MEM | HASH_DIRSIZE;
/* look it up in the shmem index */
location = ShmemInitStruct(name,
sizeof(HHDR) + DEF_DIRSIZE * sizeof(SEG_OFFSET),
sizeof(HHDR) + infoP->dsize * sizeof(SEG_OFFSET),
&found);
/*

View File

@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.28 2000/01/26 05:57:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.29 2000/02/26 05:25:54 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -328,10 +328,7 @@ init_htab(HTAB *hashp, int nelem)
{
*segp = seg_alloc(hashp);
if (*segp == (SEG_OFFSET) 0)
{
hash_destroy(hashp);
return 0;
}
return -1;
}
#if HASH_DEBUG
@@ -392,6 +389,34 @@ hash_estimate_size(long num_entries, long keysize, long datasize)
return size;
}
/*
* Select an appropriate directory size for a hashtable with the given
* maximum number of entries.
* This is only needed for hashtables in shared memory, whose directories
* cannot be expanded dynamically.
* NB: assumes that all hash structure parameters have default values!
*
* XXX this had better agree with the behavior of init_htab()...
*/
long
hash_select_dirsize(long num_entries)
{
long nBuckets,
nSegments,
nDirEntries;
/* estimate number of buckets wanted */
nBuckets = 1L << my_log2((num_entries - 1) / DEF_FFACTOR + 1);
/* # of segments needed for nBuckets */
nSegments = 1L << my_log2((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
return nDirEntries;
}
/********************** DESTROY ROUTINES ************************/