1
0
mirror of https://github.com/postgres/postgres.git synced 2025-07-28 23:42:10 +03:00

Revert "For all ppc compilers, implement pg_atomic_fetch_add_ with inline asm."

This reverts commit e7ff59686e.  It
defined pg_atomic_fetch_add_u32_impl() without defining
pg_atomic_compare_exchange_u32_impl(), which is incompatible with
src/include/port/atomics/fallback.h.  Per buildfarm member prairiedog.

Discussion: https://postgr.es/m/7517.1568470247@sss.pgh.pa.us
This commit is contained in:
Noah Misch
2019-09-14 19:38:41 -07:00
parent e7ff59686e
commit 87e9fae069
5 changed files with 66 additions and 161 deletions

View File

@ -329,9 +329,6 @@
/* Define to 1 if you have isinf(). */
#undef HAVE_ISINF
/* Define to 1 if __builtin_constant_p(x) implies "i"(x) acceptance. */
#undef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
/* Define to 1 if you have the <langinfo.h> header file. */
#undef HAVE_LANGINFO_H

View File

@ -25,103 +25,5 @@
#define pg_write_barrier_impl() __asm__ __volatile__ ("lwsync" : : : "memory")
#endif
#define PG_HAVE_ATOMIC_U32_SUPPORT
typedef struct pg_atomic_uint32
{
volatile uint32 value;
} pg_atomic_uint32;
/* 64bit atomics are only supported in 64bit mode */
#ifdef __64BIT__
#define PG_HAVE_ATOMIC_U64_SUPPORT
typedef struct pg_atomic_uint64
{
volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64;
#endif /* __64BIT__ */
#define PG_HAVE_ATOMIC_FETCH_ADD_U32
static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
uint32 _t;
uint32 res;
/*
* xlc has a no-longer-documented __fetch_and_add() intrinsic. In xlc
* 12.01.0000.0000, it emits a leading "sync" and trailing "isync". In
* xlc 13.01.0003.0004, it emits neither. Hence, using the intrinsic
* would add redundant syncs on xlc 12.
*/
#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
if (__builtin_constant_p(add_) &&
add_ <= PG_INT16_MAX && add_ >= PG_INT16_MIN)
__asm__ __volatile__(
" sync \n"
" lwarx %1,0,%4 \n"
" addi %0,%1,%3 \n"
" stwcx. %0,0,%4 \n"
" bne $-12 \n" /* branch to lwarx */
" isync \n"
: "=&r"(_t), "=&r"(res), "+m"(ptr->value)
: "i"(add_), "r"(&ptr->value)
: "memory", "cc");
else
#endif
__asm__ __volatile__(
" sync \n"
" lwarx %1,0,%4 \n"
" add %0,%1,%3 \n"
" stwcx. %0,0,%4 \n"
" bne $-12 \n" /* branch to lwarx */
" isync \n"
: "=&r"(_t), "=&r"(res), "+m"(ptr->value)
: "r"(add_), "r"(&ptr->value)
: "memory", "cc");
return res;
}
#ifdef PG_HAVE_ATOMIC_U64_SUPPORT
#define PG_HAVE_ATOMIC_FETCH_ADD_U64
static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
uint64 _t;
uint64 res;
/* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/ */
#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P
if (__builtin_constant_p(add_) &&
add_ <= PG_INT16_MAX && add_ >= PG_INT16_MIN)
__asm__ __volatile__(
" sync \n"
" ldarx %1,0,%4 \n"
" addi %0,%1,%3 \n"
" stdcx. %0,0,%4 \n"
" bne $-12 \n" /* branch to ldarx */
" isync \n"
: "=&r"(_t), "=&r"(res), "+m"(ptr->value)
: "i"(add_), "r"(&ptr->value)
: "memory", "cc");
else
#endif
__asm__ __volatile__(
" sync \n"
" ldarx %1,0,%4 \n"
" add %0,%1,%3 \n"
" stdcx. %0,0,%4 \n"
" bne $-12 \n" /* branch to ldarx */
" isync \n"
: "=&r"(_t), "=&r"(res), "+m"(ptr->value)
: "r"(add_), "r"(&ptr->value)
: "memory", "cc");
return res;
}
#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
/* per architecture manual doubleword accesses have single copy atomicity */
#define PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY

View File

@ -18,6 +18,23 @@
#if defined(HAVE_ATOMICS)
#define PG_HAVE_ATOMIC_U32_SUPPORT
typedef struct pg_atomic_uint32
{
volatile uint32 value;
} pg_atomic_uint32;
/* 64bit atomics are only supported in 64bit mode */
#ifdef __64BIT__
#define PG_HAVE_ATOMIC_U64_SUPPORT
typedef struct pg_atomic_uint64
{
volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64;
#endif /* __64BIT__ */
#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
@ -52,6 +69,33 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
return ret;
}
#define PG_HAVE_ATOMIC_FETCH_ADD_U32
static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
uint32 _t;
uint32 res;
/*
* xlc has a no-longer-documented __fetch_and_add() intrinsic. In xlc
* 12.01.0000.0000, it emits a leading "sync" and trailing "isync". In
* xlc 13.01.0003.0004, it emits neither. Hence, using the intrinsic
* would add redundant syncs on xlc 12.
*/
__asm__ __volatile__(
" sync \n"
" lwarx %1,0,%4 \n"
" add %0,%1,%3 \n"
" stwcx. %0,0,%4 \n"
" bne $-12 \n" /* branch to lwarx */
" isync \n"
: "=&r"(_t), "=&r"(res), "+m"(ptr->value)
: "r"(add_), "r"(&ptr->value)
: "memory", "cc");
return res;
}
#ifdef PG_HAVE_ATOMIC_U64_SUPPORT
#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
@ -71,6 +115,28 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
return ret;
}
#define PG_HAVE_ATOMIC_FETCH_ADD_U64
static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
uint64 _t;
uint64 res;
/* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/ */
__asm__ __volatile__(
" sync \n"
" ldarx %1,0,%4 \n"
" add %0,%1,%3 \n"
" stdcx. %0,0,%4 \n"
" bne $-12 \n" /* branch to ldarx */
" isync \n"
: "=&r"(_t), "=&r"(res), "+m"(ptr->value)
: "r"(add_), "r"(&ptr->value)
: "memory", "cc");
return res;
}
#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
#endif /* defined(HAVE_ATOMICS) */