mirror of
https://github.com/postgres/postgres.git
synced 2025-09-09 13:09:39 +03:00
Previously we had a fallback implementation that made a harmless system
call, based on the assumption that system calls must contain a memory
barrier. That shouldn't be reached on any current system, and it seems
highly likely that we can easily find out how to request explicit memory
barriers, if we've already had to find out how to do atomics on a
hypothetical new system.
Removed comments and a function name referred to a spinlock used for
fallback memory barriers, but that changed in 1b468a13
, which left some
misleading words behind in a few places.
Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Suggested-by: Andres Freund <andres@anarazel.de>
Discussion: https://postgr.es/m/721bf39a-ed8a-44b0-8b8e-be3bd81db748%40technowledgy.de
Discussion: https://postgr.es/m/3351991.1697728588%40sss.pgh.pa.us
74 lines
1.8 KiB
C
74 lines
1.8 KiB
C
/*-------------------------------------------------------------------------
|
|
*
|
|
* atomics.c
|
|
* Non-Inline parts of the atomics implementation
|
|
*
|
|
* Portions Copyright (c) 2013-2024, PostgreSQL Global Development Group
|
|
*
|
|
*
|
|
* IDENTIFICATION
|
|
* src/backend/port/atomics.c
|
|
*
|
|
*-------------------------------------------------------------------------
|
|
*/
|
|
#include "postgres.h"
|
|
|
|
#include "miscadmin.h"
|
|
#include "port/atomics.h"
|
|
#include "storage/spin.h"
|
|
|
|
|
|
#ifdef PG_HAVE_ATOMIC_U64_SIMULATION
|
|
|
|
void
|
|
pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
|
|
{
|
|
StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
|
|
"size mismatch of atomic_uint64 vs slock_t");
|
|
|
|
SpinLockInit((slock_t *) &ptr->sema);
|
|
ptr->value = val_;
|
|
}
|
|
|
|
bool
|
|
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
|
|
uint64 *expected, uint64 newval)
|
|
{
|
|
bool ret;
|
|
|
|
/*
|
|
* Do atomic op under a spinlock. It might look like we could just skip
|
|
* the cmpxchg if the lock isn't available, but that'd just emulate a
|
|
* 'weak' compare and swap. I.e. one that allows spurious failures. Since
|
|
* several algorithms rely on a strong variant and that is efficiently
|
|
* implementable on most major architectures let's emulate it here as
|
|
* well.
|
|
*/
|
|
SpinLockAcquire((slock_t *) &ptr->sema);
|
|
|
|
/* perform compare/exchange logic */
|
|
ret = ptr->value == *expected;
|
|
*expected = ptr->value;
|
|
if (ret)
|
|
ptr->value = newval;
|
|
|
|
/* and release lock */
|
|
SpinLockRelease((slock_t *) &ptr->sema);
|
|
|
|
return ret;
|
|
}
|
|
|
|
uint64
|
|
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
|
|
{
|
|
uint64 oldval;
|
|
|
|
SpinLockAcquire((slock_t *) &ptr->sema);
|
|
oldval = ptr->value;
|
|
ptr->value += add_;
|
|
SpinLockRelease((slock_t *) &ptr->sema);
|
|
return oldval;
|
|
}
|
|
|
|
#endif /* PG_HAVE_ATOMIC_U64_SIMULATION */
|