From e25626677f8076eb3ce94586136c5464ee154381 Mon Sep 17 00:00:00 2001 From: Thomas Munro Date: Tue, 30 Jul 2024 21:45:01 +1200 Subject: [PATCH] Remove --disable-spinlocks. A later change will require atomic support, so it wouldn't make sense for a hypothetical new system not to be able to implement spinlocks. Reviewed-by: Heikki Linnakangas Reviewed-by: Tom Lane (concept, not the patch) Reviewed-by: Andres Freund (concept, not the patch) Discussion: https://postgr.es/m/3351991.1697728588%40sss.pgh.pa.us --- configure | 40 ------ configure.ac | 13 -- doc/src/sgml/installation.sgml | 37 +---- meson.build | 6 - src/backend/port/atomics.c | 26 ---- src/backend/port/posix_sema.c | 3 +- src/backend/port/sysv_sema.c | 3 +- src/backend/postmaster/launch_backend.c | 8 -- src/backend/storage/ipc/ipci.c | 10 -- src/backend/storage/lmgr/Makefile | 1 - src/backend/storage/lmgr/meson.build | 1 - src/backend/storage/lmgr/s_lock.c | 2 +- src/backend/storage/lmgr/spin.c | 180 ------------------------ src/include/pg_config.h.in | 3 - src/include/pg_config_manual.h | 15 -- src/include/port/atomics.h | 4 +- src/include/port/atomics/fallback.h | 4 +- src/include/storage/s_lock.h | 39 +---- src/include/storage/spin.h | 18 +-- src/test/regress/regress.c | 86 ----------- 20 files changed, 13 insertions(+), 486 deletions(-) delete mode 100644 src/backend/storage/lmgr/spin.c diff --git a/configure b/configure index ea5514fab1a..f8deaa8d78a 100755 --- a/configure +++ b/configure @@ -836,7 +836,6 @@ enable_integer_datetimes enable_nls with_pgport enable_rpath -enable_spinlocks enable_atomics enable_debug enable_profiling @@ -1529,7 +1528,6 @@ Optional Features: enable Native Language Support --disable-rpath do not embed shared library search path in executables - --disable-spinlocks do not use spinlocks --disable-atomics do not use atomic operations --enable-debug build with debugging symbols (-g) --enable-profiling build with profiling enabled @@ -3266,33 +3264,6 @@ fi -# -# Spinlocks -# - - -# Check whether --enable-spinlocks was given. -if test "${enable_spinlocks+set}" = set; then : - enableval=$enable_spinlocks; - case $enableval in - yes) - : - ;; - no) - : - ;; - *) - as_fn_error $? "no argument expected for --enable-spinlocks option" "$LINENO" 5 - ;; - esac - -else - enable_spinlocks=yes - -fi - - - # # Atomic operations # @@ -12185,17 +12156,6 @@ fi fi -if test "$enable_spinlocks" = yes; then - -$as_echo "#define HAVE_SPINLOCKS 1" >>confdefs.h - -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: -*** Not using spinlocks will cause poor performance." >&5 -$as_echo "$as_me: WARNING: -*** Not using spinlocks will cause poor performance." >&2;} -fi - if test "$enable_atomics" = yes; then $as_echo "#define HAVE_ATOMICS 1" >>confdefs.h diff --git a/configure.ac b/configure.ac index 0089e78b687..a72169f5745 100644 --- a/configure.ac +++ b/configure.ac @@ -186,12 +186,6 @@ PGAC_ARG_BOOL(enable, rpath, yes, [do not embed shared library search path in executables]) AC_SUBST(enable_rpath) -# -# Spinlocks -# -PGAC_ARG_BOOL(enable, spinlocks, yes, - [do not use spinlocks]) - # # Atomic operations # @@ -1296,13 +1290,6 @@ failure. It is possible the compiler isn't looking in the proper directory. Use --without-zlib to disable zlib support.])]) fi -if test "$enable_spinlocks" = yes; then - AC_DEFINE(HAVE_SPINLOCKS, 1, [Define to 1 if you have spinlocks.]) -else - AC_MSG_WARN([ -*** Not using spinlocks will cause poor performance.]) -fi - if test "$enable_atomics" = yes; then AC_DEFINE(HAVE_ATOMICS, 1, [Define to 1 if you want to use atomics if available.]) else diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 4784834ab9f..3f19f272b17 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -1258,22 +1258,6 @@ build-postgresql: - - - - - Allow the build to succeed even if PostgreSQL - has no CPU spinlock support for the platform. The lack of - spinlock support will result in very poor performance; therefore, - this option should only be used if the build aborts and - informs you that the platform lacks spinlock support. If this - option is required to build PostgreSQL on - your platform, please report the problem to the - PostgreSQL developers. - - - - @@ -2690,23 +2674,6 @@ ninja install - - - - - This option is set to true by default; setting it to false will - allow the build to succeed even if PostgreSQL - has no CPU spinlock support for the platform. The lack of - spinlock support will result in very poor performance; therefore, - this option should only be changed if the build aborts and - informs you that the platform lacks spinlock support. If setting this - option to false is required to build PostgreSQL on - your platform, please report the problem to the - PostgreSQL developers. - - - - @@ -2719,6 +2686,7 @@ ninja install + @@ -3393,9 +3361,6 @@ export MANPATH these CPU architectures: x86, PowerPC, S/390, SPARC, ARM, MIPS, and RISC-V, including big-endian, little-endian, 32-bit, and 64-bit variants where applicable. - It is often - possible to build on an unsupported CPU type by configuring with - , but performance will be poor. diff --git a/meson.build b/meson.build index 27805b9bcc9..6a0d5383652 100644 --- a/meson.build +++ b/meson.build @@ -2089,12 +2089,6 @@ endif # Atomics ############################################################### -if not get_option('spinlocks') - warning('Not using spinlocks will cause poor performance') -else - cdata.set('HAVE_SPINLOCKS', 1) -endif - if not get_option('atomics') warning('Not using atomics will cause poor performance') else diff --git a/src/backend/port/atomics.c b/src/backend/port/atomics.c index 93789b4e058..cd7ede96726 100644 --- a/src/backend/port/atomics.c +++ b/src/backend/port/atomics.c @@ -57,17 +57,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr) StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t), "size mismatch of atomic_flag vs slock_t"); -#ifndef HAVE_SPINLOCKS - - /* - * NB: If we're using semaphore based TAS emulation, be careful to use a - * separate set of semaphores. Otherwise we'd get in trouble if an atomic - * var would be manipulated while spinlock is held. - */ - s_init_lock_sema((slock_t *) &ptr->sema, true); -#else SpinLockInit((slock_t *) &ptr->sema); -#endif ptr->value = false; } @@ -108,15 +98,7 @@ pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_) StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t), "size mismatch of atomic_uint32 vs slock_t"); - /* - * If we're using semaphore based atomic flags, be careful about nested - * usage of atomics while a spinlock is held. - */ -#ifndef HAVE_SPINLOCKS - s_init_lock_sema((slock_t *) &ptr->sema, true); -#else SpinLockInit((slock_t *) &ptr->sema); -#endif ptr->value = val_; } @@ -184,15 +166,7 @@ pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_) StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t), "size mismatch of atomic_uint64 vs slock_t"); - /* - * If we're using semaphore based atomic flags, be careful about nested - * usage of atomics while a spinlock is held. - */ -#ifndef HAVE_SPINLOCKS - s_init_lock_sema((slock_t *) &ptr->sema, true); -#else SpinLockInit((slock_t *) &ptr->sema); -#endif ptr->value = val_; } diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c index 5886d2233f5..64186ec0a7e 100644 --- a/src/backend/port/posix_sema.c +++ b/src/backend/port/posix_sema.c @@ -217,8 +217,7 @@ PGReserveSemaphores(int maxSemas) /* * We must use ShmemAllocUnlocked(), since the spinlock protecting - * ShmemAlloc() won't be ready yet. (This ordering is necessary when we - * are emulating spinlocks with semaphores.) + * ShmemAlloc() won't be ready yet. */ sharedSemas = (PGSemaphore) ShmemAllocUnlocked(PGSemaphoreShmemSize(maxSemas)); diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c index 1454f96b5f3..5b88a92bc95 100644 --- a/src/backend/port/sysv_sema.c +++ b/src/backend/port/sysv_sema.c @@ -325,8 +325,7 @@ PGReserveSemaphores(int maxSemas) /* * We must use ShmemAllocUnlocked(), since the spinlock protecting - * ShmemAlloc() won't be ready yet. (This ordering is necessary when we - * are emulating spinlocks with semaphores.) + * ShmemAlloc() won't be ready yet. */ sharedSemas = (PGSemaphore) ShmemAllocUnlocked(PGSemaphoreShmemSize(maxSemas)); diff --git a/src/backend/postmaster/launch_backend.c b/src/backend/postmaster/launch_backend.c index 5388cc82048..20ab82fe34a 100644 --- a/src/backend/postmaster/launch_backend.c +++ b/src/backend/postmaster/launch_backend.c @@ -108,9 +108,7 @@ typedef struct #ifdef USE_INJECTION_POINTS struct InjectionPointsCtl *ActiveInjectionPoints; #endif -#ifndef HAVE_SPINLOCKS PGSemaphore *SpinlockSemaArray; -#endif int NamedLWLockTrancheRequests; NamedLWLockTranche *NamedLWLockTrancheArray; LWLockPadded *MainLWLockArray; @@ -724,9 +722,6 @@ save_backend_variables(BackendParameters *param, ClientSocket *client_sock, param->ActiveInjectionPoints = ActiveInjectionPoints; #endif -#ifndef HAVE_SPINLOCKS - param->SpinlockSemaArray = SpinlockSemaArray; -#endif param->NamedLWLockTrancheRequests = NamedLWLockTrancheRequests; param->NamedLWLockTrancheArray = NamedLWLockTrancheArray; param->MainLWLockArray = MainLWLockArray; @@ -986,9 +981,6 @@ restore_backend_variables(BackendParameters *param) ActiveInjectionPoints = param->ActiveInjectionPoints; #endif -#ifndef HAVE_SPINLOCKS - SpinlockSemaArray = param->SpinlockSemaArray; -#endif NamedLWLockTrancheRequests = param->NamedLWLockTrancheRequests; NamedLWLockTrancheArray = param->NamedLWLockTrancheArray; MainLWLockArray = param->MainLWLockArray; diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c index b6c3b169509..34e4d17b67d 100644 --- a/src/backend/storage/ipc/ipci.c +++ b/src/backend/storage/ipc/ipci.c @@ -94,7 +94,6 @@ CalculateShmemSize(int *num_semaphores) /* Compute number of semaphores we'll need */ numSemas = ProcGlobalSemas(); - numSemas += SpinlockSemas(); /* Return the number of semaphores if requested by the caller */ if (num_semaphores) @@ -111,7 +110,6 @@ CalculateShmemSize(int *num_semaphores) */ size = 100000; size = add_size(size, PGSemaphoreShmemSize(numSemas)); - size = add_size(size, SpinlockSemaSize()); size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE, sizeof(ShmemIndexEnt))); size = add_size(size, dsm_estimate_size()); @@ -225,14 +223,6 @@ CreateSharedMemoryAndSemaphores(void) */ PGReserveSemaphores(numSemas); - /* - * If spinlocks are disabled, initialize emulation layer (which depends on - * semaphores, so the order is important here). - */ -#ifndef HAVE_SPINLOCKS - SpinlockSemaInit(); -#endif - /* * Set up shared memory allocation mechanism */ diff --git a/src/backend/storage/lmgr/Makefile b/src/backend/storage/lmgr/Makefile index 3f89548bde6..6cbaf23b855 100644 --- a/src/backend/storage/lmgr/Makefile +++ b/src/backend/storage/lmgr/Makefile @@ -21,7 +21,6 @@ OBJS = \ predicate.o \ proc.o \ s_lock.o \ - spin.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/storage/lmgr/meson.build b/src/backend/storage/lmgr/meson.build index 05ac41e809a..d43511925e1 100644 --- a/src/backend/storage/lmgr/meson.build +++ b/src/backend/storage/lmgr/meson.build @@ -9,5 +9,4 @@ backend_sources += files( 'predicate.c', 'proc.c', 's_lock.c', - 'spin.c', ) diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c index cba48b3e778..69549a65dba 100644 --- a/src/backend/storage/lmgr/s_lock.c +++ b/src/backend/storage/lmgr/s_lock.c @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * s_lock.c - * Hardware-dependent implementation of spinlocks. + * Implementation of spinlocks. * * When waiting for a contended spinlock we loop tightly for awhile, then * delay using pg_usleep() and try again. Preferably, "awhile" should be a diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c deleted file mode 100644 index 50cb99cd3b6..00000000000 --- a/src/backend/storage/lmgr/spin.c +++ /dev/null @@ -1,180 +0,0 @@ -/*------------------------------------------------------------------------- - * - * spin.c - * Hardware-independent implementation of spinlocks. - * - * - * For machines that have test-and-set (TAS) instructions, s_lock.h/.c - * define the spinlock implementation. This file contains only a stub - * implementation for spinlocks using PGSemaphores. Unless semaphores - * are implemented in a way that doesn't involve a kernel call, this - * is too slow to be very useful :-( - * - * - * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * - * IDENTIFICATION - * src/backend/storage/lmgr/spin.c - * - *------------------------------------------------------------------------- - */ -#include "postgres.h" - -#include "storage/pg_sema.h" -#include "storage/shmem.h" -#include "storage/spin.h" - - -#ifndef HAVE_SPINLOCKS - -/* - * No TAS, so spinlocks are implemented as PGSemaphores. - */ - -#ifndef HAVE_ATOMICS -#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES) -#else -#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES) -#endif /* HAVE_ATOMICS */ - -PGSemaphore *SpinlockSemaArray; - -#else /* !HAVE_SPINLOCKS */ - -#define NUM_EMULATION_SEMAPHORES 0 - -#endif /* HAVE_SPINLOCKS */ - -/* - * Report the amount of shared memory needed to store semaphores for spinlock - * support. - */ -Size -SpinlockSemaSize(void) -{ - return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphore); -} - -/* - * Report number of semaphores needed to support spinlocks. - */ -int -SpinlockSemas(void) -{ - return NUM_EMULATION_SEMAPHORES; -} - -#ifndef HAVE_SPINLOCKS - -/* - * Initialize spinlock emulation. - * - * This must be called after PGReserveSemaphores(). - */ -void -SpinlockSemaInit(void) -{ - PGSemaphore *spinsemas; - int nsemas = SpinlockSemas(); - int i; - - /* - * We must use ShmemAllocUnlocked(), since the spinlock protecting - * ShmemAlloc() obviously can't be ready yet. - */ - spinsemas = (PGSemaphore *) ShmemAllocUnlocked(SpinlockSemaSize()); - for (i = 0; i < nsemas; ++i) - spinsemas[i] = PGSemaphoreCreate(); - SpinlockSemaArray = spinsemas; -} - -/* - * s_lock.h hardware-spinlock emulation using semaphores - * - * We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to - * map multiple spinlocks onto one semaphore because no process should ever - * hold more than one at a time. We just need enough semaphores so that we - * aren't adding too much extra contention from that. - * - * There is one exception to the restriction of only holding one spinlock at a - * time, which is that it's ok if emulated atomic operations are nested inside - * spinlocks. To avoid the danger of spinlocks and atomic using the same sema, - * we make sure "normal" spinlocks and atomics backed by spinlocks use - * distinct semaphores (see the nested argument to s_init_lock_sema). - * - * slock_t is just an int for this implementation; it holds the spinlock - * number from 1..NUM_EMULATION_SEMAPHORES. We intentionally ensure that 0 - * is not a valid value, so that testing with this code can help find - * failures to initialize spinlocks. - */ - -static inline void -s_check_valid(int lockndx) -{ - if (unlikely(lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES)) - elog(ERROR, "invalid spinlock number: %d", lockndx); -} - -void -s_init_lock_sema(volatile slock_t *lock, bool nested) -{ - static uint32 counter = 0; - uint32 offset; - uint32 sema_total; - uint32 idx; - - if (nested) - { - /* - * To allow nesting atomics inside spinlocked sections, use a - * different spinlock. See comment above. - */ - offset = 1 + NUM_SPINLOCK_SEMAPHORES; - sema_total = NUM_ATOMICS_SEMAPHORES; - } - else - { - offset = 1; - sema_total = NUM_SPINLOCK_SEMAPHORES; - } - - idx = (counter++ % sema_total) + offset; - - /* double check we did things correctly */ - s_check_valid(idx); - - *lock = idx; -} - -void -s_unlock_sema(volatile slock_t *lock) -{ - int lockndx = *lock; - - s_check_valid(lockndx); - - PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]); -} - -bool -s_lock_free_sema(volatile slock_t *lock) -{ - /* We don't currently use S_LOCK_FREE anyway */ - elog(ERROR, "spin.c does not support S_LOCK_FREE()"); - return false; -} - -int -tas_sema(volatile slock_t *lock) -{ - int lockndx = *lock; - - s_check_valid(lockndx); - - /* Note that TAS macros return 0 if *success* */ - return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]); -} - -#endif /* !HAVE_SPINLOCKS */ diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in index 3dea3856aaf..e6c06f61027 100644 --- a/src/include/pg_config.h.in +++ b/src/include/pg_config.h.in @@ -382,9 +382,6 @@ /* Define to 1 if the system has the type `socklen_t'. */ #undef HAVE_SOCKLEN_T -/* Define to 1 if you have spinlocks. */ -#undef HAVE_SPINLOCKS - /* Define to 1 if you have the `SSL_CTX_set_cert_cb' function. */ #undef HAVE_SSL_CTX_SET_CERT_CB diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h index f941ee2faf8..11f74f4b56d 100644 --- a/src/include/pg_config_manual.h +++ b/src/include/pg_config_manual.h @@ -86,21 +86,6 @@ #define USE_FLOAT8_BYVAL 1 #endif -/* - * When we don't have native spinlocks, we use semaphores to simulate them. - * Decreasing this value reduces consumption of OS resources; increasing it - * may improve performance, but supplying a real spinlock implementation is - * probably far better. - */ -#define NUM_SPINLOCK_SEMAPHORES 128 - -/* - * When we have neither spinlocks nor atomic operations support we're - * implementing atomic operations on top of spinlock on top of semaphores. To - * be safe against atomic operations while holding a spinlock separate - * semaphores have to be used. - */ -#define NUM_ATOMICS_SEMAPHORES 64 /* * MAXPGPATH: standard size of a pathname buffer in PostgreSQL (hence, diff --git a/src/include/port/atomics.h b/src/include/port/atomics.h index f6fa432d2df..03134e3b7bb 100644 --- a/src/include/port/atomics.h +++ b/src/include/port/atomics.h @@ -16,8 +16,8 @@ * * There exist generic, hardware independent, implementations for several * compilers which might be sufficient, although possibly not optimal, for a - * new platform. If no such generic implementation is available spinlocks (or - * even OS provided semaphores) will be used to implement the API. + * new platform. If no such generic implementation is available spinlocks will + * be used to implement the API. * * Implement _u64 atomics if and only if your platform can use them * efficiently (and obviously correctly). diff --git a/src/include/port/atomics/fallback.h b/src/include/port/atomics/fallback.h index 34cfee110fb..2e3eef4acaf 100644 --- a/src/include/port/atomics/fallback.h +++ b/src/include/port/atomics/fallback.h @@ -20,9 +20,7 @@ #ifndef pg_memory_barrier_impl /* * If we have no memory barrier implementation for this architecture, we - * fall back to acquiring and releasing a spinlock. This might, in turn, - * fall back to the semaphore-based spinlock implementation, which will be - * amazingly slow. + * fall back to acquiring and releasing a spinlock. * * It's not self-evident that every possible legal implementation of a * spinlock acquire-and-release would be equivalent to a full memory barrier. diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h index 02c68513a53..e94ed5f48bd 100644 --- a/src/include/storage/s_lock.h +++ b/src/include/storage/s_lock.h @@ -1,10 +1,10 @@ /*------------------------------------------------------------------------- * * s_lock.h - * Hardware-dependent implementation of spinlocks. + * Implementation of spinlocks. * * NOTE: none of the macros in this file are intended to be called directly. - * Call them through the hardware-independent macros in spin.h. + * Call them through the macros in spin.h. * * The following hardware-dependent macros must be provided for each * supported platform: @@ -78,13 +78,6 @@ * in assembly language to execute a hardware atomic-test-and-set * instruction. Equivalent OS-supplied mutex routines could be used too. * - * If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not - * defined), then we fall back on an emulation that uses SysV semaphores - * (see spin.c). This emulation will be MUCH MUCH slower than a proper TAS() - * implementation, because of the cost of a kernel call per lock or unlock. - * An old report is that Postgres spends around 40% of its time in semop(2) - * when using the SysV semaphore code. - * * * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -100,8 +93,6 @@ #error "s_lock.h may not be included from frontend code" #endif -#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */ - #if defined(__GNUC__) || defined(__INTEL_COMPILER) /************************************************************************* * All the gcc inlines @@ -655,34 +646,10 @@ spin_delay(void) /* Blow up if we didn't have any way to do spinlocks */ #ifndef HAS_TEST_AND_SET -#error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@lists.postgresql.org. +#error PostgreSQL does not have spinlock support on this platform. Please report this to pgsql-bugs@lists.postgresql.org. #endif -#else /* !HAVE_SPINLOCKS */ - - -/* - * Fake spinlock implementation using semaphores --- slow and prone - * to fall foul of kernel limits on number of semaphores, so don't use this - * unless you must! The subroutines appear in spin.c. - */ -typedef int slock_t; - -extern bool s_lock_free_sema(volatile slock_t *lock); -extern void s_unlock_sema(volatile slock_t *lock); -extern void s_init_lock_sema(volatile slock_t *lock, bool nested); -extern int tas_sema(volatile slock_t *lock); - -#define S_LOCK_FREE(lock) s_lock_free_sema(lock) -#define S_UNLOCK(lock) s_unlock_sema(lock) -#define S_INIT_LOCK(lock) s_init_lock_sema(lock, false) -#define TAS(lock) tas_sema(lock) - - -#endif /* HAVE_SPINLOCKS */ - - /* * Default Definitions - override these above as needed. */ diff --git a/src/include/storage/spin.h b/src/include/storage/spin.h index c0679c59992..3ae2a56d073 100644 --- a/src/include/storage/spin.h +++ b/src/include/storage/spin.h @@ -1,11 +1,11 @@ /*------------------------------------------------------------------------- * * spin.h - * Hardware-independent implementation of spinlocks. + * API for spinlocks. * * - * The hardware-independent interface to spinlocks is defined by the - * typedef "slock_t" and these macros: + * The interface to spinlocks is defined by the typedef "slock_t" and + * these macros: * * void SpinLockInit(volatile slock_t *lock) * Initialize a spinlock (to the unlocked state). @@ -52,9 +52,6 @@ #define SPIN_H #include "storage/s_lock.h" -#ifndef HAVE_SPINLOCKS -#include "storage/pg_sema.h" -#endif #define SpinLockInit(lock) S_INIT_LOCK(lock) @@ -65,13 +62,4 @@ #define SpinLockFree(lock) S_LOCK_FREE(lock) - -extern int SpinlockSemas(void); -extern Size SpinlockSemaSize(void); - -#ifndef HAVE_SPINLOCKS -extern void SpinlockSemaInit(void); -extern PGDLLIMPORT PGSemaphore *SpinlockSemaArray; -#endif - #endif /* SPIN_H */ diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c index 45a6ad3c49e..14aad5a0c6e 100644 --- a/src/test/regress/regress.c +++ b/src/test/regress/regress.c @@ -887,92 +887,8 @@ test_spinlock(void) if (memcmp(struct_w_lock.data_after, "ef12", 4) != 0) elog(ERROR, "padding after spinlock modified"); } - - /* - * Ensure that allocating more than INT32_MAX emulated spinlocks works. - * That's interesting because the spinlock emulation uses a 32bit integer - * to map spinlocks onto semaphores. There've been bugs... - */ -#ifndef HAVE_SPINLOCKS - { - /* - * Initialize enough spinlocks to advance counter close to wraparound. - * It's too expensive to perform acquire/release for each, as those - * may be syscalls when the spinlock emulation is used (and even just - * atomic TAS would be expensive). - */ - for (uint32 i = 0; i < INT32_MAX - 100000; i++) - { - slock_t lock; - - SpinLockInit(&lock); - } - - for (uint32 i = 0; i < 200000; i++) - { - slock_t lock; - - SpinLockInit(&lock); - - SpinLockAcquire(&lock); - SpinLockRelease(&lock); - SpinLockAcquire(&lock); - SpinLockRelease(&lock); - } - } -#endif } -/* - * Verify that performing atomic ops inside a spinlock isn't a - * problem. Realistically that's only going to be a problem when both - * --disable-spinlocks and --disable-atomics are used, but it's cheap enough - * to just always test. - * - * The test works by initializing enough atomics that we'd conflict if there - * were an overlap between a spinlock and an atomic by holding a spinlock - * while manipulating more than NUM_SPINLOCK_SEMAPHORES atomics. - * - * NUM_TEST_ATOMICS doesn't really need to be more than - * NUM_SPINLOCK_SEMAPHORES, but it seems better to test a bit more - * extensively. - */ -static void -test_atomic_spin_nest(void) -{ - slock_t lock; -#define NUM_TEST_ATOMICS (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES + 27) - pg_atomic_uint32 atomics32[NUM_TEST_ATOMICS]; - pg_atomic_uint64 atomics64[NUM_TEST_ATOMICS]; - - SpinLockInit(&lock); - - for (int i = 0; i < NUM_TEST_ATOMICS; i++) - { - pg_atomic_init_u32(&atomics32[i], 0); - pg_atomic_init_u64(&atomics64[i], 0); - } - - /* just so it's not all zeroes */ - for (int i = 0; i < NUM_TEST_ATOMICS; i++) - { - EXPECT_EQ_U32(pg_atomic_fetch_add_u32(&atomics32[i], i), 0); - EXPECT_EQ_U64(pg_atomic_fetch_add_u64(&atomics64[i], i), 0); - } - - /* test whether we can do atomic op with lock held */ - SpinLockAcquire(&lock); - for (int i = 0; i < NUM_TEST_ATOMICS; i++) - { - EXPECT_EQ_U32(pg_atomic_fetch_sub_u32(&atomics32[i], i), i); - EXPECT_EQ_U32(pg_atomic_read_u32(&atomics32[i]), 0); - EXPECT_EQ_U64(pg_atomic_fetch_sub_u64(&atomics64[i], i), i); - EXPECT_EQ_U64(pg_atomic_read_u64(&atomics64[i]), 0); - } - SpinLockRelease(&lock); -} -#undef NUM_TEST_ATOMICS - PG_FUNCTION_INFO_V1(test_atomic_ops); Datum test_atomic_ops(PG_FUNCTION_ARGS) @@ -989,8 +905,6 @@ test_atomic_ops(PG_FUNCTION_ARGS) */ test_spinlock(); - test_atomic_spin_nest(); - PG_RETURN_BOOL(true); }