mirror of
https://github.com/postgres/postgres.git
synced 2025-09-02 04:21:28 +03:00
Remove --disable-spinlocks.
A later change will require atomic support, so it wouldn't make sense for a hypothetical new system not to be able to implement spinlocks. Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi> Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> (concept, not the patch) Reviewed-by: Andres Freund <andres@anarazel.de> (concept, not the patch) Discussion: https://postgr.es/m/3351991.1697728588%40sss.pgh.pa.us
This commit is contained in:
@@ -57,17 +57,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
|
||||
StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
|
||||
"size mismatch of atomic_flag vs slock_t");
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
|
||||
/*
|
||||
* NB: If we're using semaphore based TAS emulation, be careful to use a
|
||||
* separate set of semaphores. Otherwise we'd get in trouble if an atomic
|
||||
* var would be manipulated while spinlock is held.
|
||||
*/
|
||||
s_init_lock_sema((slock_t *) &ptr->sema, true);
|
||||
#else
|
||||
SpinLockInit((slock_t *) &ptr->sema);
|
||||
#endif
|
||||
|
||||
ptr->value = false;
|
||||
}
|
||||
@@ -108,15 +98,7 @@ pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
|
||||
StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
|
||||
"size mismatch of atomic_uint32 vs slock_t");
|
||||
|
||||
/*
|
||||
* If we're using semaphore based atomic flags, be careful about nested
|
||||
* usage of atomics while a spinlock is held.
|
||||
*/
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
s_init_lock_sema((slock_t *) &ptr->sema, true);
|
||||
#else
|
||||
SpinLockInit((slock_t *) &ptr->sema);
|
||||
#endif
|
||||
ptr->value = val_;
|
||||
}
|
||||
|
||||
@@ -184,15 +166,7 @@ pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
|
||||
StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
|
||||
"size mismatch of atomic_uint64 vs slock_t");
|
||||
|
||||
/*
|
||||
* If we're using semaphore based atomic flags, be careful about nested
|
||||
* usage of atomics while a spinlock is held.
|
||||
*/
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
s_init_lock_sema((slock_t *) &ptr->sema, true);
|
||||
#else
|
||||
SpinLockInit((slock_t *) &ptr->sema);
|
||||
#endif
|
||||
ptr->value = val_;
|
||||
}
|
||||
|
||||
|
@@ -217,8 +217,7 @@ PGReserveSemaphores(int maxSemas)
|
||||
|
||||
/*
|
||||
* We must use ShmemAllocUnlocked(), since the spinlock protecting
|
||||
* ShmemAlloc() won't be ready yet. (This ordering is necessary when we
|
||||
* are emulating spinlocks with semaphores.)
|
||||
* ShmemAlloc() won't be ready yet.
|
||||
*/
|
||||
sharedSemas = (PGSemaphore)
|
||||
ShmemAllocUnlocked(PGSemaphoreShmemSize(maxSemas));
|
||||
|
@@ -325,8 +325,7 @@ PGReserveSemaphores(int maxSemas)
|
||||
|
||||
/*
|
||||
* We must use ShmemAllocUnlocked(), since the spinlock protecting
|
||||
* ShmemAlloc() won't be ready yet. (This ordering is necessary when we
|
||||
* are emulating spinlocks with semaphores.)
|
||||
* ShmemAlloc() won't be ready yet.
|
||||
*/
|
||||
sharedSemas = (PGSemaphore)
|
||||
ShmemAllocUnlocked(PGSemaphoreShmemSize(maxSemas));
|
||||
|
Reference in New Issue
Block a user