1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-11-17 02:43:26 +03:00

atomic: Consolidate atomic_read_barrier implementation

All ABIs, except alpha, powerpc, and x86_64, define it to
atomic_full_barrier/__sync_synchronize, which can be mapped to
__atomic_thread_fence (__ATOMIC_SEQ_CST) in most cases, with the
exception of aarch64 (where the acquire fence is generated as
'dmb ishld' instead of 'dmb ish').

For s390x, it defaults to a memory barrier where __sync_synchronize
emits a 'bcr 15,0' (which the manual describes as pipeline
synchronization).

For PowerPC, it allows the use of lwsync for additional chips
(since _ARCH_PWR4 does not cover all chips that support it).

Tested on aarch64-linux-gnu, where the acquire produces a different
instruction that the current code.

Co-authored-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
Reviewed-by: Wilco Dijkstra  <Wilco.Dijkstra@arm.com>
This commit is contained in:
Adhemerval Zanella
2025-09-11 10:49:45 -03:00
parent 70ee250fb8
commit 304b22d7f9
5 changed files with 1 additions and 19 deletions

View File

@@ -108,7 +108,7 @@
#ifndef atomic_read_barrier #ifndef atomic_read_barrier
# define atomic_read_barrier() atomic_full_barrier () # define atomic_read_barrier() __atomic_thread_fence (__ATOMIC_ACQUIRE);
#endif #endif

View File

@@ -22,5 +22,4 @@
/* XXX Is this actually correct? */ /* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1 #define ATOMIC_EXCHANGE_USES_CAS 1
#define atomic_read_barrier() __asm ("mb" : : : "memory");
#define atomic_write_barrier() __asm ("wmb" : : : "memory"); #define atomic_write_barrier() __asm ("wmb" : : : "memory");

View File

@@ -22,10 +22,6 @@
#include <atomic.h> #include <atomic.h>
#ifndef atomic_read_barrier
# define atomic_read_barrier() atomic_full_barrier ()
#endif
#ifndef atomic_write_barrier #ifndef atomic_write_barrier
# define atomic_write_barrier() atomic_full_barrier () # define atomic_write_barrier() atomic_full_barrier ()
#endif #endif

View File

@@ -37,23 +37,11 @@
#endif #endif
#ifdef _ARCH_PWR4 #ifdef _ARCH_PWR4
/*
* Newer powerpc64 processors support the new "light weight" sync (lwsync)
* So if the build is using -mcpu=[power4,power5,power5+,970] we can
* safely use lwsync.
*/
# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
/* /*
* "light weight" sync can also be used for the release barrier. * "light weight" sync can also be used for the release barrier.
*/ */
# define atomic_write_barrier() __asm ("lwsync" ::: "memory") # define atomic_write_barrier() __asm ("lwsync" ::: "memory")
#else #else
/*
* Older powerpc32 processors don't support the new "light weight"
* sync (lwsync). So the only safe option is to use normal sync
* for all powerpc32 applications.
*/
# define atomic_read_barrier() __asm ("sync" ::: "memory")
# define atomic_write_barrier() __asm ("sync" ::: "memory") # define atomic_write_barrier() __asm ("sync" ::: "memory")
#endif #endif

View File

@@ -31,7 +31,6 @@
#define ATOMIC_EXCHANGE_USES_CAS 0 #define ATOMIC_EXCHANGE_USES_CAS 0
#define atomic_read_barrier() __asm ("" ::: "memory")
#define atomic_write_barrier() __asm ("" ::: "memory") #define atomic_write_barrier() __asm ("" ::: "memory")
#define atomic_spin_nop() __asm ("pause") #define atomic_spin_nop() __asm ("pause")