mirror of
https://github.com/postgres/postgres.git
synced 2025-10-29 22:49:41 +03:00
Remove --disable-spinlocks.
A later change will require atomic support, so it wouldn't make sense for a hypothetical new system not to be able to implement spinlocks. Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi> Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> (concept, not the patch) Reviewed-by: Andres Freund <andres@anarazel.de> (concept, not the patch) Discussion: https://postgr.es/m/3351991.1697728588%40sss.pgh.pa.us
This commit is contained in:
@@ -94,7 +94,6 @@ CalculateShmemSize(int *num_semaphores)
|
||||
|
||||
/* Compute number of semaphores we'll need */
|
||||
numSemas = ProcGlobalSemas();
|
||||
numSemas += SpinlockSemas();
|
||||
|
||||
/* Return the number of semaphores if requested by the caller */
|
||||
if (num_semaphores)
|
||||
@@ -111,7 +110,6 @@ CalculateShmemSize(int *num_semaphores)
|
||||
*/
|
||||
size = 100000;
|
||||
size = add_size(size, PGSemaphoreShmemSize(numSemas));
|
||||
size = add_size(size, SpinlockSemaSize());
|
||||
size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
|
||||
sizeof(ShmemIndexEnt)));
|
||||
size = add_size(size, dsm_estimate_size());
|
||||
@@ -225,14 +223,6 @@ CreateSharedMemoryAndSemaphores(void)
|
||||
*/
|
||||
PGReserveSemaphores(numSemas);
|
||||
|
||||
/*
|
||||
* If spinlocks are disabled, initialize emulation layer (which depends on
|
||||
* semaphores, so the order is important here).
|
||||
*/
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
SpinlockSemaInit();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set up shared memory allocation mechanism
|
||||
*/
|
||||
|
||||
@@ -21,7 +21,6 @@ OBJS = \
|
||||
predicate.o \
|
||||
proc.o \
|
||||
s_lock.o \
|
||||
spin.o
|
||||
|
||||
include $(top_srcdir)/src/backend/common.mk
|
||||
|
||||
|
||||
@@ -9,5 +9,4 @@ backend_sources += files(
|
||||
'predicate.c',
|
||||
'proc.c',
|
||||
's_lock.c',
|
||||
'spin.c',
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* s_lock.c
|
||||
* Hardware-dependent implementation of spinlocks.
|
||||
* Implementation of spinlocks.
|
||||
*
|
||||
* When waiting for a contended spinlock we loop tightly for awhile, then
|
||||
* delay using pg_usleep() and try again. Preferably, "awhile" should be a
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
/*-------------------------------------------------------------------------
|
||||
*
|
||||
* spin.c
|
||||
* Hardware-independent implementation of spinlocks.
|
||||
*
|
||||
*
|
||||
* For machines that have test-and-set (TAS) instructions, s_lock.h/.c
|
||||
* define the spinlock implementation. This file contains only a stub
|
||||
* implementation for spinlocks using PGSemaphores. Unless semaphores
|
||||
* are implemented in a way that doesn't involve a kernel call, this
|
||||
* is too slow to be very useful :-(
|
||||
*
|
||||
*
|
||||
* Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* src/backend/storage/lmgr/spin.c
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#include "postgres.h"
|
||||
|
||||
#include "storage/pg_sema.h"
|
||||
#include "storage/shmem.h"
|
||||
#include "storage/spin.h"
|
||||
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
|
||||
/*
|
||||
* No TAS, so spinlocks are implemented as PGSemaphores.
|
||||
*/
|
||||
|
||||
#ifndef HAVE_ATOMICS
|
||||
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
|
||||
#else
|
||||
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
|
||||
#endif /* HAVE_ATOMICS */
|
||||
|
||||
PGSemaphore *SpinlockSemaArray;
|
||||
|
||||
#else /* !HAVE_SPINLOCKS */
|
||||
|
||||
#define NUM_EMULATION_SEMAPHORES 0
|
||||
|
||||
#endif /* HAVE_SPINLOCKS */
|
||||
|
||||
/*
|
||||
* Report the amount of shared memory needed to store semaphores for spinlock
|
||||
* support.
|
||||
*/
|
||||
Size
|
||||
SpinlockSemaSize(void)
|
||||
{
|
||||
return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphore);
|
||||
}
|
||||
|
||||
/*
|
||||
* Report number of semaphores needed to support spinlocks.
|
||||
*/
|
||||
int
|
||||
SpinlockSemas(void)
|
||||
{
|
||||
return NUM_EMULATION_SEMAPHORES;
|
||||
}
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
|
||||
/*
|
||||
* Initialize spinlock emulation.
|
||||
*
|
||||
* This must be called after PGReserveSemaphores().
|
||||
*/
|
||||
void
|
||||
SpinlockSemaInit(void)
|
||||
{
|
||||
PGSemaphore *spinsemas;
|
||||
int nsemas = SpinlockSemas();
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We must use ShmemAllocUnlocked(), since the spinlock protecting
|
||||
* ShmemAlloc() obviously can't be ready yet.
|
||||
*/
|
||||
spinsemas = (PGSemaphore *) ShmemAllocUnlocked(SpinlockSemaSize());
|
||||
for (i = 0; i < nsemas; ++i)
|
||||
spinsemas[i] = PGSemaphoreCreate();
|
||||
SpinlockSemaArray = spinsemas;
|
||||
}
|
||||
|
||||
/*
|
||||
* s_lock.h hardware-spinlock emulation using semaphores
|
||||
*
|
||||
* We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to
|
||||
* map multiple spinlocks onto one semaphore because no process should ever
|
||||
* hold more than one at a time. We just need enough semaphores so that we
|
||||
* aren't adding too much extra contention from that.
|
||||
*
|
||||
* There is one exception to the restriction of only holding one spinlock at a
|
||||
* time, which is that it's ok if emulated atomic operations are nested inside
|
||||
* spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
|
||||
* we make sure "normal" spinlocks and atomics backed by spinlocks use
|
||||
* distinct semaphores (see the nested argument to s_init_lock_sema).
|
||||
*
|
||||
* slock_t is just an int for this implementation; it holds the spinlock
|
||||
* number from 1..NUM_EMULATION_SEMAPHORES. We intentionally ensure that 0
|
||||
* is not a valid value, so that testing with this code can help find
|
||||
* failures to initialize spinlocks.
|
||||
*/
|
||||
|
||||
static inline void
|
||||
s_check_valid(int lockndx)
|
||||
{
|
||||
if (unlikely(lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES))
|
||||
elog(ERROR, "invalid spinlock number: %d", lockndx);
|
||||
}
|
||||
|
||||
void
|
||||
s_init_lock_sema(volatile slock_t *lock, bool nested)
|
||||
{
|
||||
static uint32 counter = 0;
|
||||
uint32 offset;
|
||||
uint32 sema_total;
|
||||
uint32 idx;
|
||||
|
||||
if (nested)
|
||||
{
|
||||
/*
|
||||
* To allow nesting atomics inside spinlocked sections, use a
|
||||
* different spinlock. See comment above.
|
||||
*/
|
||||
offset = 1 + NUM_SPINLOCK_SEMAPHORES;
|
||||
sema_total = NUM_ATOMICS_SEMAPHORES;
|
||||
}
|
||||
else
|
||||
{
|
||||
offset = 1;
|
||||
sema_total = NUM_SPINLOCK_SEMAPHORES;
|
||||
}
|
||||
|
||||
idx = (counter++ % sema_total) + offset;
|
||||
|
||||
/* double check we did things correctly */
|
||||
s_check_valid(idx);
|
||||
|
||||
*lock = idx;
|
||||
}
|
||||
|
||||
void
|
||||
s_unlock_sema(volatile slock_t *lock)
|
||||
{
|
||||
int lockndx = *lock;
|
||||
|
||||
s_check_valid(lockndx);
|
||||
|
||||
PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]);
|
||||
}
|
||||
|
||||
bool
|
||||
s_lock_free_sema(volatile slock_t *lock)
|
||||
{
|
||||
/* We don't currently use S_LOCK_FREE anyway */
|
||||
elog(ERROR, "spin.c does not support S_LOCK_FREE()");
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
tas_sema(volatile slock_t *lock)
|
||||
{
|
||||
int lockndx = *lock;
|
||||
|
||||
s_check_valid(lockndx);
|
||||
|
||||
/* Note that TAS macros return 0 if *success* */
|
||||
return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]);
|
||||
}
|
||||
|
||||
#endif /* !HAVE_SPINLOCKS */
|
||||
Reference in New Issue
Block a user