1
0
mirror of https://github.com/MariaDB/server.git synced 2025-11-30 05:23:50 +03:00

MDEV-10813 - Clean-up InnoDB atomics, memory barriers and mutexes

Clean-up periodic mutex/rwlock waiters wake up. This was a hack needed to
workaround broken mutexes/rwlocks implementation. We must have sane
implementations now and don't need these anymore: release thread is
guaranteed to wake up waiters.

Removed redundant ifdef that has equivalent code in both branches.

Removed os0atomic.h and os0atomic.ic: not used anymore.

Clean-up unused cmake checks.
This commit is contained in:
Sergey Vojtovich
2016-09-14 15:12:54 +04:00
parent 5608a737ea
commit 2b47f8ff03
11 changed files with 0 additions and 1079 deletions

View File

@@ -1,397 +0,0 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
briefly in the InnoDB documentation. The contributions by Google are
incorporated with their permission, and subject to the conditions contained in
the file COPYING.Google.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file include/os0atomic.h
Macros for using atomics
Created 2012-09-23 Sunny Bains (Split from os0sync.h)
*******************************************************/
#ifndef os0atomic_h
#define os0atomic_h
#include "univ.i"
#ifdef _WIN32
/** On Windows, InterlockedExchange operates on LONG variable */
typedef LONG lock_word_t;
#elif defined(MUTEX_FUTEX)
typedef int lock_word_t;
# else
typedef ulint lock_word_t;
#endif /* _WIN32 */
#if defined __i386__ || defined __x86_64__ || defined _M_IX86 \
|| defined _M_X64 || defined __WIN__
#define IB_STRONG_MEMORY_MODEL
#endif /* __i386__ || __x86_64__ || _M_IX86 || _M_X64 || __WIN__ */
/**********************************************************//**
Atomic compare-and-swap and increment for InnoDB. */
/** Do an atomic test and set.
@param[in/out] ptr Memory location to set
@param[in] new_val new value
@return old value of memory location. */
UNIV_INLINE
lock_word_t
os_atomic_test_and_set(
volatile lock_word_t* ptr,
lock_word_t new_val);
/** Do an atomic compare and set
@param[in/out] ptr Memory location to set
@param[in] old_val old value to compare
@param[in] new_val new value to set
@return the value of ptr before the operation. */
UNIV_INLINE
lock_word_t
os_atomic_val_compare_and_swap(
volatile lock_word_t* ptr,
lock_word_t old_val,
lock_word_t new_val);
#ifdef _WIN32
/**********************************************************//**
Atomic compare and exchange of signed integers (both 32 and 64 bit).
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
lint
win_cmp_and_xchg_lint(
/*==================*/
volatile lint* ptr, /*!< in/out: source/destination */
lint new_val, /*!< in: exchange value */
lint old_val); /*!< in: value to compare to */
/**********************************************************//**
Atomic addition of signed integers.
@return Initial value of the variable pointed to by ptr */
UNIV_INLINE
lint
win_xchg_and_add(
/*=============*/
volatile lint* ptr, /*!< in/out: address of destination */
lint val); /*!< in: number to be added */
/**********************************************************//**
Atomic compare and exchange of unsigned integers.
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
ulint
win_cmp_and_xchg_ulint(
/*===================*/
volatile ulint* ptr, /*!< in/out: source/destination */
ulint new_val, /*!< in: exchange value */
ulint old_val); /*!< in: value to compare to */
/**********************************************************//**
Atomic compare and exchange of 32 bit unsigned integers.
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
DWORD
win_cmp_and_xchg_dword(
/*===================*/
volatile DWORD* ptr, /*!< in/out: source/destination */
DWORD new_val, /*!< in: exchange value */
DWORD old_val); /*!< in: value to compare to */
/**********************************************************//**
Returns true if swapped, ptr is pointer to target, old_val is value to
compare to, new_val is the value to swap in. */
# define os_compare_and_swap_lint(ptr, old_val, new_val) \
(win_cmp_and_xchg_lint(ptr, new_val, old_val) == old_val)
# define os_compare_and_swap_ulint(ptr, old_val, new_val) \
(win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val)
# define os_compare_and_swap_uint32(ptr, old_val, new_val) \
(InterlockedCompareExchange(ptr, new_val, old_val) == old_val)
/* windows thread objects can always be passed to windows atomic functions */
# define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
(win_cmp_and_xchg_dword(ptr, new_val, old_val) == old_val)
# define INNODB_RW_LOCKS_USE_ATOMICS
# define IB_ATOMICS_STARTUP_MSG \
"Mutexes and rw_locks use Windows interlocked functions"
/**********************************************************//**
Returns the resulting value, ptr is pointer to target, amount is the
amount of increment. */
# define os_atomic_increment_lint(ptr, amount) \
(win_xchg_and_add(ptr, amount) + amount)
# define os_atomic_increment_ulint(ptr, amount) \
(static_cast<ulint>(win_xchg_and_add( \
reinterpret_cast<volatile lint*>(ptr), \
static_cast<lint>(amount))) \
+ static_cast<ulint>(amount))
# define os_atomic_increment_uint32(ptr, amount) \
(static_cast<ulint>(InterlockedExchangeAdd( \
reinterpret_cast<long*>(ptr), \
static_cast<long>(amount))) \
+ static_cast<ulint>(amount))
# define os_atomic_increment_uint64(ptr, amount) \
(static_cast<ib_uint64_t>(InterlockedExchangeAdd64( \
reinterpret_cast<LONGLONG*>(ptr), \
static_cast<LONGLONG>(amount))) \
+ static_cast<ib_uint64_t>(amount))
/**********************************************************//**
Returns the resulting value, ptr is pointer to target, amount is the
amount to decrement. There is no atomic substract function on Windows */
# define os_atomic_decrement_lint(ptr, amount) \
(win_xchg_and_add(ptr, -(static_cast<lint>(amount))) - amount)
# define os_atomic_decrement_ulint(ptr, amount) \
(static_cast<ulint>(win_xchg_and_add( \
reinterpret_cast<volatile lint*>(ptr), \
-(static_cast<lint>(amount)))) \
- static_cast<ulint>(amount))
# define os_atomic_decrement_uint32(ptr, amount) \
(static_cast<ib_uint32_t>(InterlockedExchangeAdd( \
reinterpret_cast<long*>(ptr), \
-(static_cast<long>(amount)))) \
- static_cast<ib_uint32_t>(amount))
# define os_atomic_decrement_uint64(ptr, amount) \
(static_cast<ib_uint64_t>(InterlockedExchangeAdd64( \
reinterpret_cast<LONGLONG*>(ptr), \
-(static_cast<LONGLONG>(amount)))) \
- static_cast<ib_uint64_t>(amount))
#else
/* Fall back to GCC-style atomic builtins. */
/**********************************************************//**
Returns true if swapped, ptr is pointer to target, old_val is value to
compare to, new_val is the value to swap in. */
#if defined(HAVE_GCC_SYNC_BUILTINS)
# define os_compare_and_swap(ptr, old_val, new_val) \
__sync_bool_compare_and_swap(ptr, old_val, new_val)
# define os_compare_and_swap_ulint(ptr, old_val, new_val) \
os_compare_and_swap(ptr, old_val, new_val)
# define os_compare_and_swap_lint(ptr, old_val, new_val) \
os_compare_and_swap(ptr, old_val, new_val)
# define os_compare_and_swap_uint32(ptr, old_val, new_val) \
os_compare_and_swap(ptr, old_val, new_val)
#else
UNIV_INLINE
bool
os_compare_and_swap_ulint(volatile ulint* ptr, ulint old_val, ulint new_val)
{
#ifdef HAVE_IB_GCC_ATOMIC_SEQ_CST
return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
#else
return __sync_bool_compare_and_swap(ptr, old_val, new_val);
#endif
}
UNIV_INLINE
bool
os_compare_and_swap_lint(volatile lint* ptr, lint old_val, lint new_val)
{
#ifdef HAVE_IB_GCC_ATOMIC_SEQ_CST
return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
#else
return __sync_bool_compare_and_swap(ptr, old_val, new_val);
#endif
}
UNIV_INLINE
bool
os_compare_and_swap_uint32(volatile ib_uint32_t* ptr, ib_uint32_t old_val, ib_uint32_t new_val)
{
#ifdef HAVE_IB_GCC_ATOMIC_SEQ_CST
return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
#else
return __sync_bool_compare_and_swap(ptr, old_val, new_val);
#endif
}
#endif /* HAVE_GCC_SYNC_BUILTINS */
# ifdef HAVE_IB_ATOMIC_PTHREAD_T_GCC
#if defined(HAVE_GCC_SYNC_BUILTINS)
# define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
os_compare_and_swap(ptr, old_val, new_val)
#else
UNIV_INLINE
bool
os_compare_and_swap_thread_id(volatile os_thread_id_t* ptr, os_thread_id_t old_val, os_thread_id_t new_val)
{
#ifdef HAVE_IB_GCC_ATOMIC_SEQ_CST
return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
#else
return __sync_bool_compare_and_swap(ptr, old_val, new_val);
#endif
}
#endif /* HAVE_GCC_SYNC_BUILTINS */
# define INNODB_RW_LOCKS_USE_ATOMICS
# define IB_ATOMICS_STARTUP_MSG \
"Mutexes and rw_locks use GCC atomic builtins"
# else /* HAVE_IB_ATOMIC_PTHREAD_T_GCC */
# define IB_ATOMICS_STARTUP_MSG \
"Mutexes use GCC atomic builtins, rw_locks do not"
# endif /* HAVE_IB_ATOMIC_PTHREAD_T_GCC */
/**********************************************************//**
Returns the resulting value, ptr is pointer to target, amount is the
amount of increment. */
#if defined(HAVE_GCC_SYNC_BUILTINS)
# define os_atomic_increment(ptr, amount) \
__sync_add_and_fetch(ptr, amount)
#else
#ifdef HAVE_IB_GCC_ATOMIC_SEQ_CST
# define os_atomic_increment(ptr, amount) \
__atomic_add_fetch(ptr, amount, __ATOMIC_SEQ_CST)
#else
# define os_atomic_increment(ptr, amount) \
__sync_add_and_fetch(ptr, amount)
#endif
#endif /* HAVE_GCC_SYNC_BUILTINS */
# define os_atomic_increment_lint(ptr, amount) \
os_atomic_increment(ptr, amount)
# define os_atomic_increment_ulint(ptr, amount) \
os_atomic_increment(ptr, amount)
# define os_atomic_increment_uint32(ptr, amount ) \
os_atomic_increment(ptr, amount)
# define os_atomic_increment_uint64(ptr, amount) \
os_atomic_increment(ptr, amount)
/* Returns the resulting value, ptr is pointer to target, amount is the
amount to decrement. */
#if defined(HAVE_GCC_SYNC_BUILTINS)
# define os_atomic_decrement(ptr, amount) \
__sync_sub_and_fetch(ptr, amount)
#else
#ifdef HAVE_IB_GCC_ATOMIC_SEQ_CST
# define os_atomic_decrement(ptr, amount) \
__atomic_sub_fetch(ptr, amount, __ATOMIC_SEQ_CST)
#else
# define os_atomic_decrement(ptr, amount) \
__sync_sub_and_fetch(ptr, amount)
#endif
#endif /* HAVE_GCC_SYNC_BUILTINS */
# define os_atomic_decrement_lint(ptr, amount) \
os_atomic_decrement(ptr, amount)
# define os_atomic_decrement_ulint(ptr, amount) \
os_atomic_decrement(ptr, amount)
# define os_atomic_decrement_uint32(ptr, amount) \
os_atomic_decrement(ptr, amount)
# define os_atomic_decrement_uint64(ptr, amount) \
os_atomic_decrement(ptr, amount)
#endif
#define os_atomic_inc_ulint(m,v,d) os_atomic_increment_ulint(v, d)
#define os_atomic_dec_ulint(m,v,d) os_atomic_decrement_ulint(v, d)
#define TAS(l, n) os_atomic_test_and_set((l), (n))
#define CAS(l, o, n) os_atomic_val_compare_and_swap((l), (o), (n))
/** barrier definitions for memory ordering */
#ifdef HAVE_IB_GCC_ATOMIC_THREAD_FENCE
# define HAVE_MEMORY_BARRIER
# define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
# define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __atomic_thread_fence() is used for memory barrier"
#elif defined(HAVE_IB_GCC_SYNC_SYNCHRONISE)
# define HAVE_MEMORY_BARRIER
# define os_rmb __sync_synchronize()
# define os_wmb __sync_synchronize()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __sync_synchronize() is used for memory barrier"
#elif defined(HAVE_IB_MACHINE_BARRIER_SOLARIS)
# define HAVE_MEMORY_BARRIER
# include <mbarrier.h>
# define os_rmb __machine_r_barrier()
# define os_wmb __machine_w_barrier()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"Solaris memory ordering functions are used for memory barrier"
#elif defined(HAVE_WINDOWS_MM_FENCE) && defined(_WIN64)
# define HAVE_MEMORY_BARRIER
# include <mmintrin.h>
# define os_rmb _mm_lfence()
# define os_wmb _mm_sfence()
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"_mm_lfence() and _mm_sfence() are used for memory barrier"
#else
# define os_rmb
# define os_wmb
# define IB_MEMORY_BARRIER_STARTUP_MSG \
"Memory barrier is not used"
#endif
#ifndef UNIV_NONINL
#include "os0atomic.ic"
#endif /* UNIV_NOINL */
#endif /* !os0atomic_h */

View File

@@ -1,224 +0,0 @@
/*****************************************************************************
Copyright (c) 2013, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file include/os0atomics.ic
The interface to the operating system synchronization primitives.
Created 2012-09-23 Sunny Bains (Split from include/os0sync.ic)
*******************************************************/
#ifdef _WIN32
#include <winbase.h>
/* Use inline functions to make 64 and 32 bit versions of windows atomic
functions so that typecasts are evaluated at compile time. Take advantage
that lint is either __int64 or long int and windows atomic functions work
on __int64 and LONG */
/**********************************************************//**
Atomic compare and exchange of unsigned integers.
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
lint
win_cmp_and_xchg_lint(
/*==================*/
volatile lint* ptr, /*!< in/out: source/destination */
lint new_val, /*!< in: exchange value */
lint old_val) /*!< in: value to compare to */
{
# ifdef _WIN64
return(InterlockedCompareExchange64(ptr, new_val, old_val));
# else
return(InterlockedCompareExchange(ptr, new_val, old_val));
# endif /* _WIN64 */
}
/**********************************************************//**
Atomic addition of signed integers.
@return Initial value of the variable pointed to by ptr */
UNIV_INLINE
lint
win_xchg_and_add(
/*=============*/
volatile lint* ptr, /*!< in/out: address of destination */
lint val) /*!< in: number to be added */
{
#ifdef _WIN64
return(InterlockedExchangeAdd64(ptr, val));
#else
return(InterlockedExchangeAdd(ptr, val));
#endif /* _WIN64 */
}
/**********************************************************//**
Atomic compare and exchange of unsigned integers.
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
ulint
win_cmp_and_xchg_ulint(
/*===================*/
volatile ulint* ptr, /*!< in/out: source/destination */
ulint new_val, /*!< in: exchange value */
ulint old_val) /*!< in: value to compare to */
{
return((ulint) win_cmp_and_xchg_lint(
(volatile lint*) ptr,
(lint) new_val,
(lint) old_val));
}
/**********************************************************//**
Atomic compare and exchange of 32-bit unsigned integers.
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
DWORD
win_cmp_and_xchg_dword(
/*===================*/
volatile DWORD* ptr, /*!< in/out: source/destination */
DWORD new_val, /*!< in: exchange value */
DWORD old_val) /*!< in: value to compare to */
{
ut_ad(sizeof(DWORD) == sizeof(LONG)); /* We assume this. */
return(InterlockedCompareExchange(
(volatile LONG*) ptr,
(LONG) new_val,
(LONG) old_val));
}
/** Do an atomic test and set.
@param[in,out] ptr Memory location to set
@param[in] new_val new value
@return old value of memory location. */
UNIV_INLINE
lock_word_t
os_atomic_test_and_set(
volatile lock_word_t* ptr,
lock_word_t new_val)
{
return(InterlockedExchange(ptr, new_val));
}
/** Do an atomic compare and set
@param[in,out] ptr Memory location to set
@param[in] old_val old value to compare
@param[in] new_val new value to set
@return the value of ptr before the operation. */
UNIV_INLINE
lock_word_t
os_atomic_val_compare_and_swap(
volatile lock_word_t* ptr,
lock_word_t old_val,
lock_word_t new_val)
{
return(static_cast<lock_word_t>(win_cmp_and_xchg_lint(
reinterpret_cast<volatile lint*>(ptr),
static_cast<lint>(new_val),
static_cast<lint>(old_val))));
}
#elif defined(HAVE_IB_GCC_ATOMIC_COMPARE_EXCHANGE)
/** Do an atomic test and set.
@param[in,out] ptr Memory location to set
@param[in] new_val new value
@return old value of memory location. */
UNIV_INLINE
lock_word_t
os_atomic_test_and_set(
volatile lock_word_t* ptr,
lock_word_t new_val)
{
lock_word_t ret;
/* Silence a compiler warning about unused ptr. */
(void) ptr;
#if defined(__powerpc__) || defined(__aarch64__)
__atomic_exchange(ptr, &new_val, &ret, __ATOMIC_SEQ_CST);
#else
__atomic_exchange(ptr, &new_val, &ret, __ATOMIC_RELEASE);
#endif
return(ret);
}
/** Do an atomic compare and set
@param[in,out] ptr Memory location to set
@param[in] old_val old value to compare
@param[in] new_val new value to set
@return the value of ptr before the operation. */
UNIV_INLINE
lock_word_t
os_atomic_val_compare_and_swap(
volatile lock_word_t* ptr,
lock_word_t old_val,
lock_word_t new_val)
{
/* Silence a compiler warning about unused ptr. */
(void) ptr;
#if defined(__powerpc__) || defined(__aarch64__)
__atomic_compare_exchange(ptr, &old_val, &new_val, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
#else
__atomic_compare_exchange(ptr, &old_val, &new_val, false,
__ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
#endif
return(old_val);
}
#elif defined(IB_STRONG_MEMORY_MODEL)
/** Do an atomic test and set.
@param[in,out] ptr Memory location to set
@param[in] new_val new value
@return old value of memory location. */
UNIV_INLINE
lock_word_t
os_atomic_test_and_set(
volatile lock_word_t* ptr,
lock_word_t new_val)
{
return(__sync_lock_test_and_set(ptr, new_val));
}
/** Do an atomic compare and set
@param[in,out] ptr Memory location to set
@param[in] old_val old value to compare
@param[in] new_val new value to set
@return the value of ptr before the operation. */
UNIV_INLINE
lock_word_t
os_atomic_val_compare_and_swap(
volatile lock_word_t* ptr,
lock_word_t old_val,
lock_word_t new_val)
{
return(__sync_val_compare_and_swap(ptr, old_val, new_val));
}
#else
#error "Unsupported platform"
#endif /* _WIN32 */

View File

@@ -29,7 +29,6 @@ Created Feb 20, 2014 Vasil Dimov
#include "univ.i"
#include "os0atomic.h"
#include "ut0ut.h"
/** Execute a given function exactly once in a multi-threaded environment

View File

@@ -85,13 +85,6 @@ Note that one of the wait objects was signalled. */
void
sync_array_object_signalled();
/**********************************************************************//**
If the wakeup algorithm does not work perfectly at semaphore relases,
this function will do the waking (see the comment in mutex_exit). This
function should be called about every 1 second in the server. */
void
sync_arr_wake_threads_if_sema_free();
/**********************************************************************//**
Prints warnings of long semaphore waits to stderr.
@return TRUE if fatal semaphore wait threshold was exceeded */

View File

@@ -32,7 +32,6 @@ extern ulong srv_spin_wait_delay;
extern ulong srv_n_spin_wait_rounds;
extern ulong srv_force_recovery_crash;
#include "os0atomic.h"
#include "sync0policy.h"
#include "ib0mutex.h"
#include <set>
@@ -45,25 +44,6 @@ extern ulong srv_force_recovery_crash;
typedef OSMutex EventMutex;
#ifndef UNIV_DEBUG
# ifdef HAVE_IB_LINUX_FUTEX
UT_MUTEX_TYPE(TTASFutexMutex, GenericPolicy, FutexMutex);
UT_MUTEX_TYPE(TTASFutexMutex, BlockMutexPolicy, BlockFutexMutex);
# endif /* HAVE_IB_LINUX_FUTEX */
UT_MUTEX_TYPE(TTASMutex, GenericPolicy, SpinMutex);
UT_MUTEX_TYPE(TTASMutex, BlockMutexPolicy, BlockSpinMutex);
UT_MUTEX_TYPE(OSTrackMutex, GenericPolicy, SysMutex);
UT_MUTEX_TYPE(OSTrackMutex, BlockMutexPolicy, BlockSysMutex);
UT_MUTEX_TYPE(TTASEventMutex, GenericPolicy, SyncArrayMutex);
UT_MUTEX_TYPE(TTASEventMutex, BlockMutexPolicy, BlockSyncArrayMutex);
#else /* !UNIV_DEBUG */
# ifdef HAVE_IB_LINUX_FUTEX
UT_MUTEX_TYPE(TTASFutexMutex, GenericPolicy, FutexMutex);
UT_MUTEX_TYPE(TTASFutexMutex, BlockMutexPolicy, BlockFutexMutex);
@@ -78,8 +58,6 @@ UT_MUTEX_TYPE(OSTrackMutex, BlockMutexPolicy, BlockSysMutex);
UT_MUTEX_TYPE(TTASEventMutex, GenericPolicy, SyncArrayMutex);
UT_MUTEX_TYPE(TTASEventMutex, BlockMutexPolicy, BlockSyncArrayMutex);
#endif /* !UNIV_DEBUG */
#ifdef MUTEX_FUTEX
/** The default mutex type. */
typedef FutexMutex ib_mutex_t;

View File

@@ -35,10 +35,6 @@ Created 1/20/1994 Heikki Tuuri
#include "db0err.h"
#ifndef UNIV_HOTBACKUP
# include "os0atomic.h"
#endif /* UNIV_HOTBACKUP */
#include <time.h>
#ifndef MYSQL_SERVER

View File

@@ -158,200 +158,12 @@ IF(HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE)
ENDIF()
IF(NOT MSVC)
# either define HAVE_IB_GCC_ATOMIC_BUILTINS or not
# either define HAVE_IB_GCC_ATOMIC_BUILTINS or not
# workaround for gcc 4.1.2 RHEL5/x86, gcc atomic ops only work under -march=i686
IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "i686" AND CMAKE_COMPILER_IS_GNUCC AND
CMAKE_C_COMPILER_VERSION VERSION_LESS "4.1.3")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=i686")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=i686")
ENDIF()
CHECK_C_SOURCE(
"
int main()
{
long x;
long y;
long res;
x = 10;
y = 123;
res = __sync_bool_compare_and_swap(&x, x, y);
if (!res || x != y) {
return(1);
}
x = 10;
y = 123;
res = __sync_bool_compare_and_swap(&x, x + 1, y);
if (res || x != 10) {
return(1);
}
x = 10;
y = 123;
res = __sync_add_and_fetch(&x, y);
if (res != 123 + 10 || x != 123 + 10) {
return(1);
}
return(0);
}"
HAVE_IB_GCC_ATOMIC_BUILTINS
)
CHECK_C_SOURCE(
"
int main()
{
long res;
char c;
c = 10;
res = __sync_lock_test_and_set(&c, 123);
if (res != 10 || c != 123) {
return(1);
}
return(0);
}"
HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE
)
CHECK_C_SOURCE(
"#include<stdint.h>
int main()
{
int64_t x,y,res;
x = 10;
y = 123;
res = __sync_sub_and_fetch(&y, x);
if (res != y || y != 113) {
return(1);
}
res = __sync_add_and_fetch(&y, x);
if (res != y || y != 123) {
return(1);
}
return(0);
}"
HAVE_IB_GCC_ATOMIC_BUILTINS_64
)
CHECK_C_SOURCE(
"#include<stdint.h>
int main()
{
__sync_synchronize();
return(0);
}"
HAVE_IB_GCC_SYNC_SYNCHRONISE
)
CHECK_C_SOURCE(
"#include<stdint.h>
int main()
{
__atomic_thread_fence(__ATOMIC_ACQUIRE);
__atomic_thread_fence(__ATOMIC_RELEASE);
return(0);
}"
HAVE_IB_GCC_ATOMIC_THREAD_FENCE
)
CHECK_C_SOURCE(
"#include<stdint.h>
int main()
{
unsigned char c;
__atomic_test_and_set(&c, __ATOMIC_ACQUIRE);
__atomic_clear(&c, __ATOMIC_RELEASE);
return(0);
}"
HAVE_IB_GCC_ATOMIC_TEST_AND_SET
)
CHECK_C_SOURCE_RUNS(
"#include<stdint.h>
int main()
{
unsigned char a = 0;
unsigned char b = 0;
unsigned char c = 1;
__atomic_exchange(&a, &b, &c, __ATOMIC_RELEASE);
__atomic_compare_exchange(&a, &b, &c, 0,
__ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
return(0);
}"
HAVE_IB_GCC_ATOMIC_COMPARE_EXCHANGE
)
CHECK_C_SOURCE_RUNS(
"#include<stdint.h>
int main()
{
unsigned char a = 0;
unsigned char b = 0;
unsigned char c = 1;
__atomic_compare_exchange_n(&a, &b, &c, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return (0);
}"
HAVE_IB_GCC_ATOMIC_SEQ_CST
)
IF (HAVE_IB_GCC_ATOMIC_SEQ_CST)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_CST=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_BUILTINS)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_BYTE=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_BUILTINS_64)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_64=1)
ENDIF()
IF(HAVE_IB_GCC_SYNC_SYNCHRONISE)
ADD_DEFINITIONS(-DHAVE_IB_GCC_SYNC_SYNCHRONISE=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_THREAD_FENCE)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_THREAD_FENCE=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_TEST_AND_SET)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_TEST_AND_SET=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_COMPARE_EXCHANGE)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_COMPARE_EXCHANGE=1)
ENDIF()
# either define HAVE_IB_ATOMIC_PTHREAD_T_GCC or not
IF(NOT CMAKE_CROSSCOMPILING)
CHECK_C_SOURCE_RUNS(
"
#include <pthread.h>
#include <string.h>
int main() {
pthread_t x1;
pthread_t x2;
pthread_t x3;
memset(&x1, 0x0, sizeof(x1));
memset(&x2, 0x0, sizeof(x2));
memset(&x3, 0x0, sizeof(x3));
__sync_bool_compare_and_swap(&x1, x2, x3);
return(0);
}"
HAVE_IB_ATOMIC_PTHREAD_T_GCC)
ENDIF()
IF(HAVE_IB_ATOMIC_PTHREAD_T_GCC)
ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_GCC=1)
ENDIF()
# Only use futexes on Linux if GCC atomics are available
IF(NOT MSVC AND NOT CMAKE_CROSSCOMPILING)
@@ -402,73 +214,6 @@ IF(HAVE_C99_INITIALIZERS)
ADD_DEFINITIONS(-DHAVE_C99_INITIALIZERS)
ENDIF()
# Solaris atomics
IF(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
CHECK_FUNCTION_EXISTS(atomic_cas_ulong HAVE_ATOMIC_CAS_ULONG)
CHECK_FUNCTION_EXISTS(atomic_cas_32 HAVE_ATOMIC_CAS_32)
CHECK_FUNCTION_EXISTS(atomic_cas_64 HAVE_ATOMIC_CAS_64)
CHECK_FUNCTION_EXISTS(atomic_add_long_nv HAVE_ATOMIC_ADD_LONG_NV)
CHECK_FUNCTION_EXISTS(atomic_swap_uchar HAVE_ATOMIC_SWAP_UCHAR)
IF(HAVE_ATOMIC_CAS_ULONG AND
HAVE_ATOMIC_CAS_32 AND
HAVE_ATOMIC_CAS_64 AND
HAVE_ATOMIC_ADD_LONG_NV AND
HAVE_ATOMIC_SWAP_UCHAR)
SET(HAVE_IB_SOLARIS_ATOMICS 1)
ENDIF()
IF(HAVE_IB_SOLARIS_ATOMICS)
ADD_DEFINITIONS(-DHAVE_IB_SOLARIS_ATOMICS=1)
ENDIF()
# either define HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS or not
CHECK_C_SOURCE_COMPILES(
" #include <pthread.h>
#include <string.h>
int main(int argc, char** argv) {
pthread_t x1;
pthread_t x2;
pthread_t x3;
memset(&x1, 0x0, sizeof(x1));
memset(&x2, 0x0, sizeof(x2));
memset(&x3, 0x0, sizeof(x3));
if (sizeof(pthread_t) == 4) {
atomic_cas_32(&x1, x2, x3);
} else if (sizeof(pthread_t) == 8) {
atomic_cas_64(&x1, x2, x3);
} else {
return(1);
}
return(0);
}
" HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS)
CHECK_C_SOURCE_COMPILES(
"#include <mbarrier.h>
int main() {
__machine_r_barrier();
__machine_w_barrier();
return(0);
}"
HAVE_IB_MACHINE_BARRIER_SOLARIS)
IF(HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS)
ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_SOLARIS=1)
ENDIF()
IF(HAVE_IB_MACHINE_BARRIER_SOLARIS)
ADD_DEFINITIONS(-DHAVE_IB_MACHINE_BARRIER_SOLARIS=1)
ENDIF()
ENDIF()
IF(UNIX)
# this is needed to know which one of atomic_cas_32() or atomic_cas_64()
# to use in the source
@@ -481,11 +226,6 @@ IF(SIZEOF_PTHREAD_T)
ADD_DEFINITIONS(-DSIZEOF_PTHREAD_T=${SIZEOF_PTHREAD_T})
ENDIF()
IF(MSVC)
ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS)
ADD_DEFINITIONS(-DHAVE_WINDOWS_MM_FENCE)
ENDIF()
SET(MUTEXTYPE "event" CACHE STRING "Mutex type: event, sys or futex")
IF(MUTEXTYPE MATCHES "event")

View File

@@ -152,19 +152,6 @@ srv_conc_enter_innodb_with_atomics(
return;
}
if (srv_thread_concurrency == 0) {
if (notified_mysql) {
(void) my_atomic_addlint(
&srv_conc.n_waiting, -1);
thd_wait_end(trx->mysql_thd);
}
return;
}
if (srv_conc.n_active < (lint) srv_thread_concurrency) {
ulint n_active;

View File

@@ -426,12 +426,7 @@ ulong srv_doublewrite_batch_size = 120;
ulong srv_replication_delay = 0;
/*-------------------------------------------*/
#ifdef HAVE_MEMORY_BARRIER
/* No idea to wait long with memory barriers */
UNIV_INTERN ulong srv_n_spin_wait_rounds = 15;
#else
UNIV_INTERN ulong srv_n_spin_wait_rounds = 30;
#endif
ulong srv_spin_wait_delay = 6;
ibool srv_priority_boost = TRUE;
@@ -1896,8 +1891,6 @@ exit_func:
/*********************************************************************//**
A thread which prints warnings about semaphore waits which have lasted
too long. These can be used to track bugs which cause hangs.
Note: In order to make sync_arr_wake_threads_if_sema_free work as expected,
we should avoid waiting any mutexes in this function!
@return a dummy parameter */
extern "C"
os_thread_ret_t
@@ -1960,12 +1953,6 @@ loop:
eviction policy. */
buf_LRU_stat_update();
/* In case mutex_exit is not a memory barrier, it is
theoretically possible some threads are left waiting though
the semaphore is already released. Wake up those threads: */
sync_arr_wake_threads_if_sema_free();
if (sync_array_print_long_waits(&waiter, &sema)
&& sema == old_sema && os_thread_eq(waiter, old_waiter)) {
#if defined(WITH_WSREP) && defined(WITH_INNODB_DISALLOW_WRITES)

View File

@@ -1517,17 +1517,6 @@ innobase_start_or_create_for_mysql(void)
ib::info() << "Mutexes and rw_locks use GCC atomic builtins";
#endif
ib::info() << MUTEX_TYPE;
ib::info() << IB_MEMORY_BARRIER_STARTUP_MSG;
#ifndef HAVE_MEMORY_BARRIER
#if defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_X64 || defined _WIN32
#else
ib::warn() << "MySQL was built without a memory barrier capability on"
" this architecture, which might allow a mutex/rw_lock"
" violation under high thread concurrency. This may cause a"
" hang.";
#endif /* IA32 or AMD64 */
#endif /* HAVE_MEMORY_BARRIER */
ib::info() << "Compressed tables use zlib " ZLIB_VERSION
#ifdef UNIV_ZIP_DEBUG

View File

@@ -961,81 +961,6 @@ sync_array_detect_deadlock(
}
#endif /* UNIV_DEBUG */
/******************************************************************//**
Determines if we can wake up the thread waiting for a sempahore. */
static
bool
sync_arr_cell_can_wake_up(
/*======================*/
sync_cell_t* cell) /*!< in: cell to search */
{
rw_lock_t* lock;
switch (cell->request_type) {
WaitMutex* mutex;
BlockWaitMutex* bpmutex;
case SYNC_MUTEX:
mutex = cell->latch.mutex;
os_rmb;
if (mutex->state() == MUTEX_STATE_UNLOCKED) {
return(true);
}
break;
case SYNC_BUF_BLOCK:
bpmutex = cell->latch.bpmutex;
os_rmb;
if (bpmutex->state() == MUTEX_STATE_UNLOCKED) {
return(true);
}
break;
case RW_LOCK_X:
case RW_LOCK_SX:
lock = cell->latch.lock;
os_rmb;
if (lock->lock_word > X_LOCK_HALF_DECR) {
/* Either unlocked or only read locked. */
return(true);
}
break;
case RW_LOCK_X_WAIT:
lock = cell->latch.lock;
/* lock_word == 0 means all readers or sx have left */
os_rmb;
if (lock->lock_word == 0) {
return(true);
}
break;
case RW_LOCK_S:
lock = cell->latch.lock;
/* lock_word > 0 means no writer or reserved writer */
os_rmb;
if (lock->lock_word > 0) {
return(true);
}
}
return(false);
}
/**********************************************************************//**
Increments the signalled count. */
void
@@ -1045,58 +970,6 @@ sync_array_object_signalled()
++sg_count;
}
/**********************************************************************//**
If the wakeup algorithm does not work perfectly at semaphore relases,
this function will do the waking (see the comment in mutex_exit). This
function should be called about every 1 second in the server.
Note that there's a race condition between this thread and mutex_exit
changing the lock_word and calling signal_object, so sometimes this finds
threads to wake up even when nothing has gone wrong. */
static
void
sync_array_wake_threads_if_sema_free_low(
/*=====================================*/
sync_array_t* arr) /* in/out: wait array */
{
sync_array_enter(arr);
for (ulint i = 0; i < arr->next_free_slot; ++i) {
sync_cell_t* cell;
cell = sync_array_get_nth_cell(arr, i);
if (cell->latch.mutex != 0 && sync_arr_cell_can_wake_up(cell)) {
os_event_t event;
event = sync_cell_get_event(cell);
os_event_set(event);
}
}
sync_array_exit(arr);
}
/**********************************************************************//**
If the wakeup algorithm does not work perfectly at semaphore relases,
this function will do the waking (see the comment in mutex_exit). This
function should be called about every 1 second in the server.
Note that there's a race condition between this thread and mutex_exit
changing the lock_word and calling signal_object, so sometimes this finds
threads to wake up even when nothing has gone wrong. */
void
sync_arr_wake_threads_if_sema_free(void)
/*====================================*/
{
for (ulint i = 0; i < sync_array_size; ++i) {
sync_array_wake_threads_if_sema_free_low(
sync_wait_array[i]);
}
}
/**********************************************************************//**
Prints warnings of long semaphore waits to stderr.
@return TRUE if fatal semaphore wait threshold was exceeded */