mirror of
https://github.com/postgres/postgres.git
synced 2025-07-23 03:21:12 +03:00
Fix --disable-spinlocks in 9.2 and 9.3 branches.
My back-patch of the 9.4-era commit44cd47c1d4
into 9.2 and 9.3 fixed HPPA builds as expected, but it broke --disable-spinlocks builds, because the dummy spinlock is initialized before the underlying semaphore infrastructure is alive. In 9.4 and up this works because of commitdaa7527afc
, which decoupled initialization of an slock_t variable from access to the actual system semaphore object. The best solution seems to be to back-port that patch, which should be a net win anyway because it improves the usability of --disable-spinlocks builds in the older branches; and it's been out long enough now to not be worrisome from a stability perspective.
This commit is contained in:
@ -500,6 +500,9 @@ typedef struct
|
||||
slock_t *ShmemLock;
|
||||
VariableCache ShmemVariableCache;
|
||||
Backend *ShmemBackendArray;
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
PGSemaphore SpinlockSemaArray;
|
||||
#endif
|
||||
LWLock *LWLockArray;
|
||||
slock_t *ProcStructLock;
|
||||
PROC_HDR *ProcGlobal;
|
||||
@ -6050,6 +6053,9 @@ save_backend_variables(BackendParameters *param, Port *port,
|
||||
param->ShmemVariableCache = ShmemVariableCache;
|
||||
param->ShmemBackendArray = ShmemBackendArray;
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
param->SpinlockSemaArray = SpinlockSemaArray;
|
||||
#endif
|
||||
param->LWLockArray = LWLockArray;
|
||||
param->ProcStructLock = ProcStructLock;
|
||||
param->ProcGlobal = ProcGlobal;
|
||||
@ -6278,6 +6284,9 @@ restore_backend_variables(BackendParameters *param, Port *port)
|
||||
ShmemVariableCache = param->ShmemVariableCache;
|
||||
ShmemBackendArray = param->ShmemBackendArray;
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
SpinlockSemaArray = param->SpinlockSemaArray;
|
||||
#endif
|
||||
LWLockArray = param->LWLockArray;
|
||||
ProcStructLock = param->ProcStructLock;
|
||||
ProcGlobal = param->ProcGlobal;
|
||||
|
@ -103,6 +103,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
|
||||
* need to be so careful during the actual allocation phase.
|
||||
*/
|
||||
size = 100000;
|
||||
size = add_size(size, SpinlockSemaSize());
|
||||
size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
|
||||
sizeof(ShmemIndexEnt)));
|
||||
size = add_size(size, BufferShmemSize());
|
||||
|
@ -116,9 +116,24 @@ InitShmemAllocation(void)
|
||||
Assert(shmhdr != NULL);
|
||||
|
||||
/*
|
||||
* Initialize the spinlock used by ShmemAlloc. We have to do the space
|
||||
* allocation the hard way, since obviously ShmemAlloc can't be called
|
||||
* yet.
|
||||
* If spinlocks are disabled, initialize emulation layer. We have to do
|
||||
* the space allocation the hard way, since obviously ShmemAlloc can't be
|
||||
* called yet.
|
||||
*/
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
{
|
||||
PGSemaphore spinsemas;
|
||||
|
||||
spinsemas = (PGSemaphore) (((char *) shmhdr) + shmhdr->freeoffset);
|
||||
shmhdr->freeoffset += MAXALIGN(SpinlockSemaSize());
|
||||
SpinlockSemaInit(spinsemas);
|
||||
Assert(shmhdr->freeoffset <= shmhdr->totalsize);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize the spinlock used by ShmemAlloc; we have to do this the hard
|
||||
* way, too, for the same reasons as above.
|
||||
*/
|
||||
ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
|
||||
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
|
||||
|
@ -25,9 +25,24 @@
|
||||
#include "miscadmin.h"
|
||||
#include "replication/walsender.h"
|
||||
#include "storage/lwlock.h"
|
||||
#include "storage/pg_sema.h"
|
||||
#include "storage/spin.h"
|
||||
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
PGSemaphore SpinlockSemaArray;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Report the amount of shared memory needed to store semaphores for spinlock
|
||||
* support.
|
||||
*/
|
||||
Size
|
||||
SpinlockSemaSize(void)
|
||||
{
|
||||
return SpinlockSemas() * sizeof(PGSemaphoreData);
|
||||
}
|
||||
|
||||
#ifdef HAVE_SPINLOCKS
|
||||
|
||||
/*
|
||||
@ -51,21 +66,20 @@ SpinlockSemas(void)
|
||||
int
|
||||
SpinlockSemas(void)
|
||||
{
|
||||
int nsemas;
|
||||
return NUM_SPINLOCK_SEMAPHORES;
|
||||
}
|
||||
|
||||
/*
|
||||
* It would be cleaner to distribute this logic into the affected modules,
|
||||
* similar to the way shmem space estimation is handled.
|
||||
*
|
||||
* For now, though, there are few enough users of spinlocks that we just
|
||||
* keep the knowledge here.
|
||||
*/
|
||||
nsemas = NumLWLocks(); /* one for each lwlock */
|
||||
nsemas += NBuffers; /* one for each buffer header */
|
||||
nsemas += max_wal_senders; /* one for each wal sender process */
|
||||
nsemas += 30; /* plus a bunch for other small-scale use */
|
||||
/*
|
||||
* Initialize semaphores.
|
||||
*/
|
||||
extern void
|
||||
SpinlockSemaInit(PGSemaphore spinsemas)
|
||||
{
|
||||
int i;
|
||||
|
||||
return nsemas;
|
||||
for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i)
|
||||
PGSemaphoreCreate(&spinsemas[i]);
|
||||
SpinlockSemaArray = spinsemas;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -75,13 +89,15 @@ SpinlockSemas(void)
|
||||
void
|
||||
s_init_lock_sema(volatile slock_t *lock)
|
||||
{
|
||||
PGSemaphoreCreate((PGSemaphore) lock);
|
||||
static int counter = 0;
|
||||
|
||||
*lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
|
||||
}
|
||||
|
||||
void
|
||||
s_unlock_sema(volatile slock_t *lock)
|
||||
{
|
||||
PGSemaphoreUnlock((PGSemaphore) lock);
|
||||
PGSemaphoreUnlock(&SpinlockSemaArray[*lock]);
|
||||
}
|
||||
|
||||
bool
|
||||
@ -96,7 +112,7 @@ int
|
||||
tas_sema(volatile slock_t *lock)
|
||||
{
|
||||
/* Note that TAS macros return 0 if *success* */
|
||||
return !PGSemaphoreTryLock((PGSemaphore) lock);
|
||||
return !PGSemaphoreTryLock(&SpinlockSemaArray[*lock]);
|
||||
}
|
||||
|
||||
#endif /* !HAVE_SPINLOCKS */
|
||||
|
@ -56,6 +56,14 @@
|
||||
*/
|
||||
#define NUM_USER_DEFINED_LWLOCKS 4
|
||||
|
||||
/*
|
||||
* When we don't have native spinlocks, we use semaphores to simulate them.
|
||||
* Decreasing this value reduces consumption of OS resources; increasing it
|
||||
* may improve performance, but supplying a real spinlock implementation is
|
||||
* probably far better.
|
||||
*/
|
||||
#define NUM_SPINLOCK_SEMAPHORES 1024
|
||||
|
||||
/*
|
||||
* Define this if you want to allow the lo_import and lo_export SQL
|
||||
* functions to be executed by ordinary users. By default these
|
||||
|
@ -94,11 +94,8 @@
|
||||
#ifndef S_LOCK_H
|
||||
#define S_LOCK_H
|
||||
|
||||
#include "storage/pg_sema.h"
|
||||
|
||||
#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
|
||||
|
||||
|
||||
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
|
||||
/*************************************************************************
|
||||
* All the gcc inlines
|
||||
@ -1032,7 +1029,7 @@ spin_delay(void)
|
||||
* to fall foul of kernel limits on number of semaphores, so don't use this
|
||||
* unless you must! The subroutines appear in spin.c.
|
||||
*/
|
||||
typedef PGSemaphoreData slock_t;
|
||||
typedef int slock_t;
|
||||
|
||||
extern bool s_lock_free_sema(volatile slock_t *lock);
|
||||
extern void s_unlock_sema(volatile slock_t *lock);
|
||||
|
@ -57,6 +57,9 @@
|
||||
#define SPIN_H
|
||||
|
||||
#include "storage/s_lock.h"
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
#include "storage/pg_sema.h"
|
||||
#endif
|
||||
|
||||
|
||||
#define SpinLockInit(lock) S_INIT_LOCK(lock)
|
||||
@ -69,5 +72,11 @@
|
||||
|
||||
|
||||
extern int SpinlockSemas(void);
|
||||
extern Size SpinlockSemaSize(void);
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
extern void SpinlockSemaInit(PGSemaphore);
|
||||
extern PGSemaphore SpinlockSemaArray;
|
||||
#endif
|
||||
|
||||
#endif /* SPIN_H */
|
||||
|
Reference in New Issue
Block a user