1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

MDEV-11212 - Clean-up MariaDB atomic operations

Moved gcc specific code to gcc_builtins.h.
Moved intptr into the black magic code block.
Moved definition of atomic operations for "long" out of black magic code block.
This commit is contained in:
Sergey Vojtovich
2016-11-02 16:43:23 +04:00
parent c23399d3de
commit 81f280789b
2 changed files with 57 additions and 59 deletions

View File

@@ -32,6 +32,43 @@
ret= __atomic_load_n(a, __ATOMIC_SEQ_CST) ret= __atomic_load_n(a, __ATOMIC_SEQ_CST)
#define make_atomic_store_body(S) \ #define make_atomic_store_body(S) \
__atomic_store_n(a, v, __ATOMIC_SEQ_CST) __atomic_store_n(a, v, __ATOMIC_SEQ_CST)
#define MY_MEMORY_ORDER_RELAXED __ATOMIC_RELAXED
#define MY_MEMORY_ORDER_CONSUME __ATOMIC_CONSUME
#define MY_MEMORY_ORDER_ACQUIRE __ATOMIC_ACQUIRE
#define MY_MEMORY_ORDER_RELEASE __ATOMIC_RELEASE
#define MY_MEMORY_ORDER_ACQ_REL __ATOMIC_ACQ_REL
#define MY_MEMORY_ORDER_SEQ_CST __ATOMIC_SEQ_CST
#define my_atomic_store32_explicit(P, D, O) __atomic_store_n((P), (D), (O))
#define my_atomic_store64_explicit(P, D, O) __atomic_store_n((P), (D), (O))
#define my_atomic_storeptr_explicit(P, D, O) __atomic_store_n((P), (D), (O))
#define my_atomic_load32_explicit(P, O) __atomic_load_n((P), (O))
#define my_atomic_load64_explicit(P, O) __atomic_load_n((P), (O))
#define my_atomic_loadptr_explicit(P, O) __atomic_load_n((P), (O))
#define my_atomic_fas32_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
#define my_atomic_fas64_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
#define my_atomic_fasptr_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
#define my_atomic_add32_explicit(P, A, O) __atomic_fetch_add((P), (A), (O))
#define my_atomic_add64_explicit(P, A, O) __atomic_fetch_add((P), (A), (O))
#define my_atomic_cas32_weak_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), true, (S), (F))
#define my_atomic_cas64_weak_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), true, (S), (F))
#define my_atomic_casptr_weak_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), true, (S), (F))
#define my_atomic_cas32_strong_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), false, (S), (F))
#define my_atomic_cas64_strong_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), false, (S), (F))
#define my_atomic_casptr_strong_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), false, (S), (F))
#else #else
#define MY_ATOMIC_MODE "gcc-builtins-smp" #define MY_ATOMIC_MODE "gcc-builtins-smp"
#define make_atomic_load_body(S) \ #define make_atomic_load_body(S) \

View File

@@ -99,18 +99,13 @@
MY_MEMORY_ORDER_SEQ_CST - The operation has the same semantics as MY_MEMORY_ORDER_SEQ_CST - The operation has the same semantics as
acquire-release operation, and additionally has acquire-release operation, and additionally has
sequentially-consistent operation ordering. sequentially-consistent operation ordering.
*/
#define intptr void * We choose implementation as follows: on Windows using Visual C++ the native
implementation should be preferrable. When using gcc we prefer the Solaris
/* implementation before the gcc because of stability preference, we choose gcc
We choose implementation as follows:
------------------------------------
On Windows using Visual C++ the native implementation should be
preferrable. When using gcc we prefer the Solaris implementation
before the gcc because of stability preference, we choose gcc
builtins if available. builtins if available.
*/ */
#if defined(_MSC_VER) #if defined(_MSC_VER)
#include "atomic/generic-msvc.h" #include "atomic/generic-msvc.h"
#elif defined(HAVE_SOLARIS_ATOMIC) #elif defined(HAVE_SOLARIS_ATOMIC)
@@ -125,6 +120,8 @@
#error atomic ops for this platform are not implemented #error atomic ops for this platform are not implemented
#endif #endif
#define intptr void *
/* define missing functions by using the already generated ones */ /* define missing functions by using the already generated ones */
#ifndef make_atomic_add_body #ifndef make_atomic_add_body
#define make_atomic_add_body(S) \ #define make_atomic_add_body(S) \
@@ -207,20 +204,6 @@ make_atomic_store(32)
make_atomic_store(64) make_atomic_store(64)
make_atomic_store(ptr) make_atomic_store(ptr)
#if SIZEOF_LONG == 4
#define my_atomic_addlong(A,B) my_atomic_add32((int32*) (A), (B))
#define my_atomic_loadlong(A) my_atomic_load32((int32*) (A))
#define my_atomic_storelong(A,B) my_atomic_store32((int32*) (A), (B))
#define my_atomic_faslong(A,B) my_atomic_fas32((int32*) (A), (B))
#define my_atomic_caslong(A,B,C) my_atomic_cas32((int32*) (A), (int32*) (B), (C))
#else
#define my_atomic_addlong(A,B) my_atomic_add64((int64*) (A), (B))
#define my_atomic_loadlong(A) my_atomic_load64((int64*) (A))
#define my_atomic_storelong(A,B) my_atomic_store64((int64*) (A), (B))
#define my_atomic_faslong(A,B) my_atomic_fas64((int64*) (A), (B))
#define my_atomic_caslong(A,B,C) my_atomic_cas64((int64*) (A), (int64*) (B), (C))
#endif
#ifdef _atomic_h_cleanup_ #ifdef _atomic_h_cleanup_
#include _atomic_h_cleanup_ #include _atomic_h_cleanup_
#undef _atomic_h_cleanup_ #undef _atomic_h_cleanup_
@@ -247,43 +230,21 @@ make_atomic_store(ptr)
#define LF_BACKOFF (1) #define LF_BACKOFF (1)
#endif #endif
#ifdef __ATOMIC_SEQ_CST #if SIZEOF_LONG == 4
#define MY_MEMORY_ORDER_RELAXED __ATOMIC_RELAXED #define my_atomic_addlong(A,B) my_atomic_add32((int32*) (A), (B))
#define MY_MEMORY_ORDER_CONSUME __ATOMIC_CONSUME #define my_atomic_loadlong(A) my_atomic_load32((int32*) (A))
#define MY_MEMORY_ORDER_ACQUIRE __ATOMIC_ACQUIRE #define my_atomic_storelong(A,B) my_atomic_store32((int32*) (A), (B))
#define MY_MEMORY_ORDER_RELEASE __ATOMIC_RELEASE #define my_atomic_faslong(A,B) my_atomic_fas32((int32*) (A), (B))
#define MY_MEMORY_ORDER_ACQ_REL __ATOMIC_ACQ_REL #define my_atomic_caslong(A,B,C) my_atomic_cas32((int32*) (A), (int32*) (B), (C))
#define MY_MEMORY_ORDER_SEQ_CST __ATOMIC_SEQ_CST
#define my_atomic_store32_explicit(P, D, O) __atomic_store_n((P), (D), (O))
#define my_atomic_store64_explicit(P, D, O) __atomic_store_n((P), (D), (O))
#define my_atomic_storeptr_explicit(P, D, O) __atomic_store_n((P), (D), (O))
#define my_atomic_load32_explicit(P, O) __atomic_load_n((P), (O))
#define my_atomic_load64_explicit(P, O) __atomic_load_n((P), (O))
#define my_atomic_loadptr_explicit(P, O) __atomic_load_n((P), (O))
#define my_atomic_fas32_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
#define my_atomic_fas64_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
#define my_atomic_fasptr_explicit(P, D, O) __atomic_exchange_n((P), (D), (O))
#define my_atomic_add32_explicit(P, A, O) __atomic_fetch_add((P), (A), (O))
#define my_atomic_add64_explicit(P, A, O) __atomic_fetch_add((P), (A), (O))
#define my_atomic_cas32_weak_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), true, (S), (F))
#define my_atomic_cas64_weak_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), true, (S), (F))
#define my_atomic_casptr_weak_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), true, (S), (F))
#define my_atomic_cas32_strong_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), false, (S), (F))
#define my_atomic_cas64_strong_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), false, (S), (F))
#define my_atomic_casptr_strong_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), false, (S), (F))
#else #else
#define my_atomic_addlong(A,B) my_atomic_add64((int64*) (A), (B))
#define my_atomic_loadlong(A) my_atomic_load64((int64*) (A))
#define my_atomic_storelong(A,B) my_atomic_store64((int64*) (A), (B))
#define my_atomic_faslong(A,B) my_atomic_fas64((int64*) (A), (B))
#define my_atomic_caslong(A,B,C) my_atomic_cas64((int64*) (A), (int64*) (B), (C))
#endif
#ifndef MY_MEMORY_ORDER_SEQ_CST
#define MY_MEMORY_ORDER_RELAXED #define MY_MEMORY_ORDER_RELAXED
#define MY_MEMORY_ORDER_CONSUME #define MY_MEMORY_ORDER_CONSUME
#define MY_MEMORY_ORDER_ACQUIRE #define MY_MEMORY_ORDER_ACQUIRE