mirror of
https://github.com/MariaDB/server.git
synced 2025-12-24 11:21:21 +03:00
atomic ops:
my_atomic_XX_t -> intXX, no implicit locking anymore
simplified framework, support for requested cleanups
dbug/dbug.c:
compiler warning
include/atomic/nolock.h:
my_atomic_XX_t -> intXX
include/atomic/rwlock.h:
my_atomic_XX_t -> intXX, no implicit locking anymore
include/atomic/x86-gcc.h:
my_atomic_XX_t -> intXX, no implicit locking anymore
include/atomic/x86-msvc.h:
my_atomic_XX_t -> intXX
simplified defines
support for cleanups
include/my_atomic.h:
my_atomic_XX_t -> intXX, no implicit locking anymore
simplified framework, support for requested cleanups
unittest/examples/no_plan-t.c:
compiler warning
unittest/mysys/Makefile.am:
fix for dependencies
unittest/mysys/my_atomic-t.c:
my_atomic_XX_t -> intXX, no implicit locking anymore
unittest/mytap/tap.c:
cosmetic fix
This commit is contained in:
@@ -16,44 +16,38 @@
|
||||
|
||||
/*
|
||||
XXX 64-bit atomic operations can be implemented using
|
||||
cmpxchg8b, if necessary
|
||||
cmpxchg8b, if necessary. Though I've heard that not all 64-bit
|
||||
architectures support double-word (128-bit) cas.
|
||||
*/
|
||||
|
||||
/* fix -ansi errors while maintaining readability */
|
||||
#define asm __asm__
|
||||
#define MY_ATOMIC_MODE "gcc-x86" ## LOCK
|
||||
|
||||
#define make_atomic_add_body8 \
|
||||
asm volatile (LOCK "xadd %0, %1;" : "+r" (v) , "+m" (a->val))
|
||||
#define make_atomic_swap_body8 \
|
||||
asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (a->val))
|
||||
#define make_atomic_cas_body8 \
|
||||
/* fix -ansi errors while maintaining readability */
|
||||
#ifndef asm
|
||||
#define asm __asm__
|
||||
#endif
|
||||
|
||||
#define make_atomic_add_body(S) \
|
||||
asm volatile (LOCK "xadd %0, %1;" : "+r" (v) , "+m" (*a))
|
||||
#define make_atomic_swap_body(S) \
|
||||
asm volatile ("xchg %0, %1;" : "+r" (v) , "+m" (*a))
|
||||
#define make_atomic_cas_body(S) \
|
||||
asm volatile (LOCK "cmpxchg %3, %0; setz %2;" \
|
||||
: "+m" (a->val), "+a" (*cmp), "=q" (ret): "r" (set))
|
||||
: "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set))
|
||||
|
||||
#ifdef MY_ATOMIC_MODE_DUMMY
|
||||
#define make_atomic_load_body8 ret=a->val
|
||||
#define make_atomic_store_body8 a->val=v
|
||||
#define make_atomic_load_body(S) ret=*a
|
||||
#define make_atomic_store_body(S) *a=v
|
||||
#else
|
||||
/*
|
||||
Actually 32-bit reads/writes are always atomic on x86
|
||||
But we add LOCK here anyway to force memory barriers
|
||||
*/
|
||||
#define make_atomic_load_body8 \
|
||||
#define make_atomic_load_body(S) \
|
||||
ret=0; \
|
||||
asm volatile (LOCK "cmpxchg %2, %0" \
|
||||
: "+m" (a->val), "+a" (ret): "r" (ret))
|
||||
#define make_atomic_store_body8 \
|
||||
asm volatile ("xchg %0, %1;" : "+m" (a->val) : "r" (v))
|
||||
: "+m" (*a), "+a" (ret): "r" (ret))
|
||||
#define make_atomic_store_body(S) \
|
||||
asm volatile ("xchg %0, %1;" : "+m" (*a) : "r" (v))
|
||||
#endif
|
||||
|
||||
#define make_atomic_add_body16 make_atomic_add_body8
|
||||
#define make_atomic_add_body32 make_atomic_add_body8
|
||||
#define make_atomic_cas_body16 make_atomic_cas_body8
|
||||
#define make_atomic_cas_body32 make_atomic_cas_body8
|
||||
#define make_atomic_load_body16 make_atomic_load_body8
|
||||
#define make_atomic_load_body32 make_atomic_load_body8
|
||||
#define make_atomic_store_body16 make_atomic_store_body8
|
||||
#define make_atomic_store_body32 make_atomic_store_body8
|
||||
#define make_atomic_swap_body16 make_atomic_swap_body8
|
||||
#define make_atomic_swap_body32 make_atomic_swap_body8
|
||||
|
||||
|
||||
Reference in New Issue
Block a user