mirror of
https://github.com/postgres/postgres.git
synced 2025-08-22 21:53:06 +03:00
Improve 64bit atomics support.
When adding atomics back in b64d92f1a
, I added 64bit support as
optional; there wasn't yet a direct user in sight. That turned out to
be a bit short-sighted, it'd already have been useful a number of times.
Add a fallback implementation of 64bit atomics, just like the one we
have for 32bit atomics.
Additionally optimize reads/writes to 64bit on a number of platforms
where aligned writes of that size are atomic. This can now be tested
with PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY.
Author: Andres Freund
Reviewed-By: Amit Kapila
Discussion: https://postgr.es/m/20160330230914.GH13305@awork2.anarazel.de
This commit is contained in:
@@ -89,7 +89,7 @@ void
|
||||
pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
|
||||
{
|
||||
StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
|
||||
"size mismatch of atomic_flag vs slock_t");
|
||||
"size mismatch of atomic_uint32 vs slock_t");
|
||||
|
||||
/*
|
||||
* If we're using semaphore based atomic flags, be careful about nested
|
||||
@@ -157,3 +157,66 @@ pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
|
||||
}
|
||||
|
||||
#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
|
||||
|
||||
|
||||
#ifdef PG_HAVE_ATOMIC_U64_SIMULATION
|
||||
|
||||
void
|
||||
pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
|
||||
{
|
||||
StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
|
||||
"size mismatch of atomic_uint64 vs slock_t");
|
||||
|
||||
/*
|
||||
* If we're using semaphore based atomic flags, be careful about nested
|
||||
* usage of atomics while a spinlock is held.
|
||||
*/
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
s_init_lock_sema((slock_t *) &ptr->sema, true);
|
||||
#else
|
||||
SpinLockInit((slock_t *) &ptr->sema);
|
||||
#endif
|
||||
ptr->value = val_;
|
||||
}
|
||||
|
||||
bool
|
||||
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
|
||||
uint64 *expected, uint64 newval)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
/*
|
||||
* Do atomic op under a spinlock. It might look like we could just skip
|
||||
* the cmpxchg if the lock isn't available, but that'd just emulate a
|
||||
* 'weak' compare and swap. I.e. one that allows spurious failures. Since
|
||||
* several algorithms rely on a strong variant and that is efficiently
|
||||
* implementable on most major architectures let's emulate it here as
|
||||
* well.
|
||||
*/
|
||||
SpinLockAcquire((slock_t *) &ptr->sema);
|
||||
|
||||
/* perform compare/exchange logic */
|
||||
ret = ptr->value == *expected;
|
||||
*expected = ptr->value;
|
||||
if (ret)
|
||||
ptr->value = newval;
|
||||
|
||||
/* and release lock */
|
||||
SpinLockRelease((slock_t *) &ptr->sema);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64
|
||||
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
|
||||
{
|
||||
uint64 oldval;
|
||||
|
||||
SpinLockAcquire((slock_t *) &ptr->sema);
|
||||
oldval = ptr->value;
|
||||
ptr->value += add_;
|
||||
SpinLockRelease((slock_t *) &ptr->sema);
|
||||
return oldval;
|
||||
}
|
||||
|
||||
#endif /* PG_HAVE_ATOMIC_U64_SIMULATION */
|
||||
|
Reference in New Issue
Block a user