1
0
mirror of https://github.com/MariaDB/server.git synced 2025-12-24 11:21:21 +03:00

MDEV-7437 remove suport for "atomics" with rwlocks

This commit is contained in:
Sergei Golubchik
2015-01-12 17:03:45 +01:00
parent 1f0ad6c6b3
commit e695db0f2d
40 changed files with 143 additions and 740 deletions

View File

@@ -473,7 +473,6 @@
#cmakedefine HAVE_SOLARIS_STYLE_GETHOST 1
#cmakedefine MY_ATOMIC_MODE_DUMMY 1
#cmakedefine MY_ATOMIC_MODE_RWLOCKS 1
#cmakedefine HAVE_GCC_ATOMIC_BUILTINS 1
#cmakedefine HAVE_SOLARIS_ATOMIC 1
#cmakedefine HAVE_DECL_SHM_HUGETLB 1

View File

@@ -964,8 +964,6 @@ MARK_AS_ADVANCED(NO_ALARM)
IF(CMAKE_COMPILER_IS_GNUCXX)
IF(WITH_ATOMIC_OPS STREQUAL "up")
SET(MY_ATOMIC_MODE_DUMMY 1 CACHE BOOL "Assume single-CPU mode, no concurrency")
ELSEIF(WITH_ATOMIC_OPS STREQUAL "rwlocks")
SET(MY_ATOMIC_MODE_RWLOCKS 1 CACHE BOOL "Use pthread rwlocks for atomic ops")
ELSEIF(WITH_ATOMIC_OPS STREQUAL "smp")
ELSEIF(NOT WITH_ATOMIC_OPS)
CHECK_CXX_SOURCE_COMPILES("
@@ -997,12 +995,8 @@ ELSE()
ENDIF()
ENDIF()
SET(WITH_ATOMIC_OPS "${WITH_ATOMIC_OPS}" CACHE STRING
"Implement atomic operations using pthread rwlocks (rwlocks); or atomic CPU
instructions for multi-processor (smp) or uniprocessor (up)
configuration. By default gcc built-in sync functions are used,
if available and 'smp' configuration otherwise.")
MARK_AS_ADVANCED(WITH_ATOMIC_OPS MY_ATOMIC_MODE_RWLOCK MY_ATOMIC_MODE_DUMMY)
SET(WITH_ATOMIC_OPS "${WITH_ATOMIC_OPS}" CACHE STRING "Implement atomic operations using atomic CPU instructions for multi-processor (smp) or uniprocessor (up) configuration. By default gcc built-in sync functions are used, if available and 'smp' configuration otherwise.")
MARK_AS_ADVANCED(WITH_ATOMIC_OPS MY_ATOMIC_MODE_DUMMY)
IF(WITH_VALGRIND)
SET(HAVE_valgrind 1)

View File

@@ -51,19 +51,4 @@
# endif
#endif
#if defined(make_atomic_cas_body)
/*
Type not used so minimal size (emptry struct has different size between C
and C++, zero-length array is gcc-specific).
*/
typedef char my_atomic_rwlock_t __attribute__ ((unused));
#define my_atomic_rwlock_destroy(name)
#define my_atomic_rwlock_init(name)
#define my_atomic_rwlock_rdlock(name)
#define my_atomic_rwlock_wrlock(name)
#define my_atomic_rwlock_rdunlock(name)
#define my_atomic_rwlock_wrunlock(name)
#endif
#endif /* ATOMIC_NOLOCK_INCLUDED */

View File

@@ -1,63 +0,0 @@
#ifndef ATOMIC_RWLOCK_INCLUDED
#define ATOMIC_RWLOCK_INCLUDED
/* Copyright (c) 2006 MySQL AB, 2009 Sun Microsystems, Inc.
Use is subject to license terms.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#define MY_ATOMIC_MODE_RWLOCKS 1
#ifdef MY_ATOMIC_MODE_DUMMY
/*
the following can never be enabled by ./configure, one need to put #define in
a source to trigger the following warning. The resulting code will be broken,
it only makes sense to do it to see now test_atomic detects broken
implementations (another way is to run a UP build on an SMP box).
*/
#warning MY_ATOMIC_MODE_DUMMY and MY_ATOMIC_MODE_RWLOCKS are incompatible
typedef char my_atomic_rwlock_t;
#define my_atomic_rwlock_destroy(name)
#define my_atomic_rwlock_init(name)
#define my_atomic_rwlock_rdlock(name)
#define my_atomic_rwlock_wrlock(name)
#define my_atomic_rwlock_rdunlock(name)
#define my_atomic_rwlock_wrunlock(name)
#define MY_ATOMIC_MODE "dummy (non-atomic)"
#else /* not MY_ATOMIC_MODE_DUMMY */
typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t;
#define my_atomic_rwlock_destroy(name) pthread_mutex_destroy(& (name)->rw)
#define my_atomic_rwlock_init(name) pthread_mutex_init(& (name)->rw, 0)
#define my_atomic_rwlock_rdlock(name) pthread_mutex_lock(& (name)->rw)
#define my_atomic_rwlock_wrlock(name) pthread_mutex_lock(& (name)->rw)
#define my_atomic_rwlock_rdunlock(name) pthread_mutex_unlock(& (name)->rw)
#define my_atomic_rwlock_wrunlock(name) pthread_mutex_unlock(& (name)->rw)
#define MY_ATOMIC_MODE "mutex"
#ifndef MY_ATOMIC_MODE_RWLOCKS
#define MY_ATOMIC_MODE_RWLOCKS 1
#endif
#endif
#define make_atomic_add_body(S) int ## S sav; sav= *a; *a+= v; v=sav;
#define make_atomic_fas_body(S) int ## S sav; sav= *a; *a= v; v=sav;
#define make_atomic_cas_body(S) if ((ret= (*a == *cmp))) *a= set; else *cmp=*a;
#define make_atomic_load_body(S) ret= *a;
#define make_atomic_store_body(S) *a= v;
#endif /* ATOMIC_RWLOCK_INCLUDED */

View File

@@ -13,52 +13,13 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef _lf_h
#define _lf_h
#ifndef INCLUDE_LF_INCLUDED
#define INCLUDE_LF_INCLUDED
#include <my_atomic.h>
C_MODE_START
/*
Helpers to define both func() and _func(), where
func() is a _func() protected by my_atomic_rwlock_wrlock()
*/
#define lock_wrap(f, t, proto_args, args, lock) \
t _ ## f proto_args; \
static inline t f proto_args \
{ \
t ret; \
my_atomic_rwlock_wrlock(lock); \
ret= _ ## f args; \
my_atomic_rwlock_wrunlock(lock); \
return ret; \
}
#define lock_wrap_void(f, proto_args, args, lock) \
void _ ## f proto_args; \
static inline void f proto_args \
{ \
my_atomic_rwlock_wrlock(lock); \
_ ## f args; \
my_atomic_rwlock_wrunlock(lock); \
}
#define nolock_wrap(f, t, proto_args, args) \
t _ ## f proto_args; \
static inline t f proto_args \
{ \
return _ ## f args; \
}
#define nolock_wrap_void(f, proto_args, args) \
void _ ## f proto_args; \
static inline void f proto_args \
{ \
_ ## f args; \
}
/*
wait-free dynamic array, see lf_dynarray.c
@@ -71,7 +32,6 @@ static inline void f proto_args \
typedef struct {
void * volatile level[LF_DYNARRAY_LEVELS];
uint size_of_element;
my_atomic_rwlock_t lock;
} LF_DYNARRAY;
typedef int (*lf_dynarray_func)(void *, void *);
@@ -79,16 +39,9 @@ typedef int (*lf_dynarray_func)(void *, void *);
void lf_dynarray_init(LF_DYNARRAY *array, uint element_size);
void lf_dynarray_destroy(LF_DYNARRAY *array);
nolock_wrap(lf_dynarray_value, void *,
(LF_DYNARRAY *array, uint idx),
(array, idx))
lock_wrap(lf_dynarray_lvalue, void *,
(LF_DYNARRAY *array, uint idx),
(array, idx),
&array->lock)
nolock_wrap(lf_dynarray_iterate, int,
(LF_DYNARRAY *array, lf_dynarray_func func, void *arg),
(array, func, arg))
void *lf_dynarray_value(LF_DYNARRAY *array, uint idx);
void *lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx);
int lf_dynarray_iterate(LF_DYNARRAY *array, lf_dynarray_func func, void *arg);
/*
pin manager for memory allocator, lf_alloc-pin.c
@@ -122,49 +75,25 @@ typedef struct {
-sizeof(void *)*(LF_PINBOX_PINS+1)];
} LF_PINS;
/*
shortcut macros to do an atomic_wrlock on a structure that uses pins
(e.g. lf_hash).
*/
#define lf_rwlock_by_pins(PINS) \
my_atomic_rwlock_wrlock(&(PINS)->pinbox->pinarray.lock)
#define lf_rwunlock_by_pins(PINS) \
my_atomic_rwlock_wrunlock(&(PINS)->pinbox->pinarray.lock)
/* compile-time assert to make sure we have enough pins. */
#define _lf_pin(PINS, PIN, ADDR) \
#define lf_pin(PINS, PIN, ADDR) \
do { \
compile_time_assert(PIN < LF_PINBOX_PINS); \
my_atomic_storeptr(&(PINS)->pin[PIN], (ADDR)); \
} while(0)
#define _lf_unpin(PINS, PIN) _lf_pin(PINS, PIN, NULL)
#define lf_pin(PINS, PIN, ADDR) \
do { \
lf_rwlock_by_pins(PINS); \
_lf_pin(PINS, PIN, ADDR); \
lf_rwunlock_by_pins(PINS); \
} while (0)
#define lf_unpin(PINS, PIN) lf_pin(PINS, PIN, NULL)
#define _lf_assert_pin(PINS, PIN) assert((PINS)->pin[PIN] != 0)
#define _lf_assert_unpin(PINS, PIN) assert((PINS)->pin[PIN] == 0)
#define lf_unpin(PINS, PIN) lf_pin(PINS, PIN, NULL)
#define lf_unpin(PINS, PIN) lf_pin(PINS, PIN, NULL)
#define lf_assert_pin(PINS, PIN) assert((PINS)->pin[PIN] != 0)
#define lf_assert_unpin(PINS, PIN) assert((PINS)->pin[PIN] == 0)
void lf_pinbox_init(LF_PINBOX *pinbox, uint free_ptr_offset,
lf_pinbox_free_func *free_func, void * free_func_arg);
void lf_pinbox_destroy(LF_PINBOX *pinbox);
lock_wrap(lf_pinbox_get_pins, LF_PINS *,
(LF_PINBOX *pinbox),
(pinbox),
&pinbox->pinarray.lock)
lock_wrap_void(lf_pinbox_put_pins,
(LF_PINS *pins),
(pins),
&pins->pinbox->pinarray.lock)
lock_wrap_void(lf_pinbox_free,
(LF_PINS *pins, void *addr),
(pins, addr),
&pins->pinbox->pinarray.lock)
LF_PINS *lf_pinbox_get_pins(LF_PINBOX *pinbox);
void lf_pinbox_put_pins(LF_PINS *pins);
void lf_pinbox_free(LF_PINS *pins, void *addr);
/*
memory allocator, lf_alloc-pin.c
@@ -184,20 +113,14 @@ void lf_alloc_destroy(LF_ALLOCATOR *allocator);
uint lf_alloc_pool_count(LF_ALLOCATOR *allocator);
/*
shortcut macros to access underlying pinbox functions from an LF_ALLOCATOR
see _lf_pinbox_get_pins() and _lf_pinbox_put_pins()
see lf_pinbox_get_pins() and lf_pinbox_put_pins()
*/
#define _lf_alloc_free(PINS, PTR) _lf_pinbox_free((PINS), (PTR))
#define lf_alloc_free(PINS, PTR) lf_pinbox_free((PINS), (PTR))
#define _lf_alloc_get_pins(A) _lf_pinbox_get_pins(&(A)->pinbox)
#define lf_alloc_get_pins(A) lf_pinbox_get_pins(&(A)->pinbox)
#define _lf_alloc_put_pins(PINS) _lf_pinbox_put_pins(PINS)
#define lf_alloc_put_pins(PINS) lf_pinbox_put_pins(PINS)
#define lf_alloc_direct_free(ALLOC, ADDR) my_free((ADDR))
lock_wrap(lf_alloc_new, void *,
(LF_PINS *pins),
(pins),
&pins->pinbox->pinarray.lock)
void *lf_alloc_new(LF_PINS *pins);
C_MODE_END
@@ -239,22 +162,15 @@ int lf_hash_iterate(LF_HASH *hash, LF_PINS *pins,
my_hash_walk_action action, void *argument);
/*
shortcut macros to access underlying pinbox functions from an LF_HASH
see _lf_pinbox_get_pins() and _lf_pinbox_put_pins()
see lf_pinbox_get_pins() and lf_pinbox_put_pins()
*/
#define _lf_hash_get_pins(HASH) _lf_alloc_get_pins(&(HASH)->alloc)
#define lf_hash_get_pins(HASH) lf_alloc_get_pins(&(HASH)->alloc)
#define _lf_hash_put_pins(PINS) _lf_pinbox_put_pins(PINS)
#define lf_hash_put_pins(PINS) lf_pinbox_put_pins(PINS)
#define lf_hash_search_unpin(PINS) lf_unpin((PINS), 2)
/*
cleanup
*/
#undef lock_wrap_void
#undef lock_wrap
#undef nolock_wrap_void
#undef nolock_wrap
C_MODE_END
#endif

View File

@@ -100,20 +100,10 @@
acquire-release operation, and additionally has
sequentially-consistent operation ordering.
NOTE This operations are not always atomic, so they always must be
enclosed in my_atomic_rwlock_rdlock(lock)/my_atomic_rwlock_rdunlock(lock)
or my_atomic_rwlock_wrlock(lock)/my_atomic_rwlock_wrunlock(lock).
Hint: if a code block makes intensive use of atomic ops, it make sense
to take/release rwlock once for the whole block, not for every statement.
On architectures where these operations are really atomic, rwlocks will
be optimized away.
8- and 16-bit atomics aren't implemented for windows (see generic-msvc.h),
but can be added, if necessary.
*/
#ifndef my_atomic_rwlock_init
#define intptr void *
/**
Currently we don't support 8-bit and 16-bit operations.
@@ -121,16 +111,14 @@
*/
#undef MY_ATOMIC_HAS_8_16
#ifndef MY_ATOMIC_MODE_RWLOCKS
/*
* Attempt to do atomic ops without locks
*/
#include "atomic/nolock.h"
#endif
#ifndef make_atomic_cas_body
/* nolock.h was not able to generate even a CAS function, fall back */
#include "atomic/rwlock.h"
#error atomic ops for this platform are not implemented
#endif
/* define missing functions by using the already generated ones */
@@ -340,8 +328,6 @@ make_atomic_store(ptr)
#define MY_ATOMIC_NOT_1CPU 1
extern int my_atomic_initialize();
#endif
#ifdef __ATOMIC_SEQ_CST
#define MY_MEMORY_ORDER_RELAXED __ATOMIC_RELAXED
#define MY_MEMORY_ORDER_CONSUME __ATOMIC_CONSUME

View File

@@ -105,7 +105,7 @@
#define LF_PINBOX_MAX_PINS 65536
static void _lf_pinbox_real_free(LF_PINS *pins);
static void lf_pinbox_real_free(LF_PINS *pins);
/*
Initialize a pinbox. Normally called from lf_alloc_init.
@@ -144,7 +144,7 @@ void lf_pinbox_destroy(LF_PINBOX *pinbox)
It is assumed that pins belong to a thread and are not transferable
between threads.
*/
LF_PINS *_lf_pinbox_get_pins(LF_PINBOX *pinbox)
LF_PINS *lf_pinbox_get_pins(LF_PINBOX *pinbox)
{
struct st_my_thread_var *var;
uint32 pins, next, top_ver;
@@ -171,12 +171,12 @@ LF_PINS *_lf_pinbox_get_pins(LF_PINBOX *pinbox)
note that the first allocated element has index 1 (pins==1).
index 0 is reserved to mean "NULL pointer"
*/
el= (LF_PINS *)_lf_dynarray_lvalue(&pinbox->pinarray, pins);
el= (LF_PINS *)lf_dynarray_lvalue(&pinbox->pinarray, pins);
if (unlikely(!el))
return 0;
break;
}
el= (LF_PINS *)_lf_dynarray_value(&pinbox->pinarray, pins);
el= (LF_PINS *)lf_dynarray_value(&pinbox->pinarray, pins);
next= el->link;
} while (!my_atomic_cas32((int32 volatile*) &pinbox->pinstack_top_ver,
(int32*) &top_ver,
@@ -206,7 +206,7 @@ LF_PINS *_lf_pinbox_get_pins(LF_PINBOX *pinbox)
empty the purgatory (XXX deadlock warning below!),
push LF_PINS structure to a stack
*/
void _lf_pinbox_put_pins(LF_PINS *pins)
void lf_pinbox_put_pins(LF_PINS *pins)
{
LF_PINBOX *pinbox= pins->pinbox;
uint32 top_ver, nr;
@@ -223,19 +223,15 @@ void _lf_pinbox_put_pins(LF_PINS *pins)
/*
XXX this will deadlock if other threads will wait for
the caller to do something after _lf_pinbox_put_pins(),
the caller to do something after lf_pinbox_put_pins(),
and they would have pinned addresses that the caller wants to free.
Thus: only free pins when all work is done and nobody can wait for you!!!
*/
while (pins->purgatory_count)
{
_lf_pinbox_real_free(pins);
lf_pinbox_real_free(pins);
if (pins->purgatory_count)
{
my_atomic_rwlock_wrunlock(&pins->pinbox->pinarray.lock);
pthread_yield();
my_atomic_rwlock_wrlock(&pins->pinbox->pinarray.lock);
}
}
top_ver= pinbox->pinstack_top_ver;
do
@@ -265,14 +261,14 @@ static int ptr_cmp(void **a, void **b)
Free an object allocated via pinbox allocator
DESCRIPTION
add an object to purgatory. if necessary, call _lf_pinbox_real_free()
add an object to purgatory. if necessary, calllf_pinbox_real_free()
to actually free something.
*/
void _lf_pinbox_free(LF_PINS *pins, void *addr)
void lf_pinbox_free(LF_PINS *pins, void *addr)
{
add_to_purgatory(pins, addr);
if (pins->purgatory_count % LF_PURGATORY_SIZE == 0)
_lf_pinbox_real_free(pins);
lf_pinbox_real_free(pins);
}
struct st_harvester {
@@ -281,7 +277,7 @@ struct st_harvester {
};
/*
callback for _lf_dynarray_iterate:
callback forlf_dynarray_iterate:
scan all pins of all threads and accumulate all pins
*/
static int harvest_pins(LF_PINS *el, struct st_harvester *hv)
@@ -308,7 +304,7 @@ static int harvest_pins(LF_PINS *el, struct st_harvester *hv)
}
/*
callback for _lf_dynarray_iterate:
callback forlf_dynarray_iterate:
scan all pins of all threads and see if addr is present there
*/
static int match_pins(LF_PINS *el, void *addr)
@@ -334,7 +330,7 @@ static int match_pins(LF_PINS *el, void *addr)
/*
Scan the purgatory and free everything that can be freed
*/
static void _lf_pinbox_real_free(LF_PINS *pins)
static void lf_pinbox_real_free(LF_PINS *pins)
{
int npins;
void *list;
@@ -356,7 +352,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins)
hv.granary= addr;
hv.npins= npins;
/* scan the dynarray and accumulate all pinned addresses */
_lf_dynarray_iterate(&pinbox->pinarray,
lf_dynarray_iterate(&pinbox->pinarray,
(lf_dynarray_func)harvest_pins, &hv);
npins= hv.granary-addr;
@@ -391,7 +387,7 @@ static void _lf_pinbox_real_free(LF_PINS *pins)
}
else /* no alloca - no cookie. linear search here */
{
if (_lf_dynarray_iterate(&pinbox->pinarray,
if (lf_dynarray_iterate(&pinbox->pinarray,
(lf_dynarray_func)match_pins, cur))
goto found;
}
@@ -413,7 +409,7 @@ found:
/* lock-free memory allocator for fixed-size objects */
/*
callback for _lf_pinbox_real_free to free a list of unpinned objects -
callback forlf_pinbox_real_free to free a list of unpinned objects -
add it back to the allocator stack
DESCRIPTION
@@ -495,7 +491,7 @@ void lf_alloc_destroy(LF_ALLOCATOR *allocator)
Pop an unused object from the stack or malloc it is the stack is empty.
pin[0] is used, it's removed on return.
*/
void *_lf_alloc_new(LF_PINS *pins)
void *lf_alloc_new(LF_PINS *pins)
{
LF_ALLOCATOR *allocator= (LF_ALLOCATOR *)(pins->pinbox->free_func_arg);
uchar *node;
@@ -504,7 +500,7 @@ void *_lf_alloc_new(LF_PINS *pins)
do
{
node= allocator->top;
_lf_pin(pins, 0, node);
lf_pin(pins, 0, node);
} while (node != allocator->top && LF_BACKOFF);
if (!node)
{
@@ -521,7 +517,7 @@ void *_lf_alloc_new(LF_PINS *pins)
(void *)&node, anext_node(node)))
break;
}
_lf_unpin(pins, 0);
lf_unpin(pins, 0);
return node;
}

View File

@@ -44,7 +44,6 @@ void lf_dynarray_init(LF_DYNARRAY *array, uint element_size)
{
bzero(array, sizeof(*array));
array->size_of_element= element_size;
my_atomic_rwlock_init(&array->lock);
}
static void recursive_free(void **alloc, int level)
@@ -68,7 +67,6 @@ void lf_dynarray_destroy(LF_DYNARRAY *array)
int i;
for (i= 0; i < LF_DYNARRAY_LEVELS; i++)
recursive_free(array->level[i], i);
my_atomic_rwlock_destroy(&array->lock);
}
static const ulong dynarray_idxes_in_prev_levels[LF_DYNARRAY_LEVELS]=
@@ -95,7 +93,7 @@ static const ulong dynarray_idxes_in_prev_level[LF_DYNARRAY_LEVELS]=
Returns a valid lvalue pointer to the element number 'idx'.
Allocates memory if necessary.
*/
void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
void *lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
{
void * ptr, * volatile * ptr_ptr= 0;
int i;
@@ -148,7 +146,7 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
Returns a pointer to the element number 'idx'
or NULL if an element does not exists
*/
void *_lf_dynarray_value(LF_DYNARRAY *array, uint idx)
void *lf_dynarray_value(LF_DYNARRAY *array, uint idx)
{
void * ptr, * volatile * ptr_ptr= 0;
int i;
@@ -189,14 +187,14 @@ static int recursive_iterate(LF_DYNARRAY *array, void *ptr, int level,
DESCRIPTION
lf_dynarray consists of a set of arrays, LF_DYNARRAY_LEVEL_LENGTH elements
each. _lf_dynarray_iterate() calls user-supplied function on every array
each. lf_dynarray_iterate() calls user-supplied function on every array
from the set. It is the fastest way to scan the array, faster than
for (i=0; i < N; i++) { func(_lf_dynarray_value(dynarray, i)); }
for (i=0; i < N; i++) { func(lf_dynarray_value(dynarray, i)); }
NOTE
if func() returns non-zero, the scan is aborted
*/
int _lf_dynarray_iterate(LF_DYNARRAY *array, lf_dynarray_func func, void *arg)
int lf_dynarray_iterate(LF_DYNARRAY *array, lf_dynarray_func func, void *arg)
{
int i, res;
for (i= 0; i < LF_DYNARRAY_LEVELS; i++)

View File

@@ -100,7 +100,7 @@ retry:
cursor->prev= (intptr *)head;
do { /* PTR() isn't necessary below, head is a dummy node */
cursor->curr= (LF_SLIST *)(*cursor->prev);
_lf_pin(pins, 1, cursor->curr);
lf_pin(pins, 1, cursor->curr);
} while (*cursor->prev != (intptr)cursor->curr && LF_BACKOFF);
for (;;)
@@ -115,7 +115,7 @@ retry:
do {
link= cursor->curr->link;
cursor->next= PTR(link);
_lf_pin(pins, 0, cursor->next);
lf_pin(pins, 0, cursor->next);
} while (link != cursor->curr->link && LF_BACKOFF);
if (!DELETED(link))
@@ -135,7 +135,7 @@ retry:
cursor->prev= &(cursor->curr->link);
if (!(cur_hashnr & 1)) /* dummy node */
head= (LF_SLIST **)cursor->prev;
_lf_pin(pins, 2, cursor->curr);
lf_pin(pins, 2, cursor->curr);
}
else
{
@@ -145,12 +145,12 @@ retry:
*/
if (my_atomic_casptr((void **) cursor->prev,
(void **) &cursor->curr, cursor->next) && LF_BACKOFF)
_lf_alloc_free(pins, cursor->curr);
lf_alloc_free(pins, cursor->curr);
else
goto retry;
}
cursor->curr= cursor->next;
_lf_pin(pins, 1, cursor->curr);
lf_pin(pins, 1, cursor->curr);
}
}
@@ -195,9 +195,9 @@ static LF_SLIST *linsert(LF_SLIST * volatile *head, CHARSET_INFO *cs,
}
}
}
_lf_unpin(pins, 0);
_lf_unpin(pins, 1);
_lf_unpin(pins, 2);
lf_unpin(pins, 0);
lf_unpin(pins, 1);
lf_unpin(pins, 2);
/*
Note that cursor.curr is not pinned here and the pointer is unreliable,
the object may dissapear anytime. But if it points to a dummy node, the
@@ -242,7 +242,7 @@ static int ldelete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
/* and remove it from the list */
if (my_atomic_casptr((void **)cursor.prev,
(void **)(char*)&cursor.curr, cursor.next))
_lf_alloc_free(pins, cursor.curr);
lf_alloc_free(pins, cursor.curr);
else
{
/*
@@ -258,9 +258,9 @@ static int ldelete(LF_SLIST * volatile *head, CHARSET_INFO *cs, uint32 hashnr,
}
}
}
_lf_unpin(pins, 0);
_lf_unpin(pins, 1);
_lf_unpin(pins, 2);
lf_unpin(pins, 0);
lf_unpin(pins, 1);
lf_unpin(pins, 2);
return res;
}
@@ -284,11 +284,11 @@ static LF_SLIST *lsearch(LF_SLIST * volatile *head, CHARSET_INFO *cs,
CURSOR cursor;
int res= lfind(head, cs, hashnr, key, keylen, &cursor, pins, 0);
if (res)
_lf_pin(pins, 2, cursor.curr);
lf_pin(pins, 2, cursor.curr);
else
_lf_unpin(pins, 2);
_lf_unpin(pins, 1);
_lf_unpin(pins, 0);
lf_unpin(pins, 2);
lf_unpin(pins, 1);
lf_unpin(pins, 0);
return res ? cursor.curr : 0;
}
@@ -352,7 +352,7 @@ void lf_hash_init(LF_HASH *hash, uint element_size, uint flags,
void lf_hash_destroy(LF_HASH *hash)
{
LF_SLIST *el, **head= (LF_SLIST **)_lf_dynarray_value(&hash->array, 0);
LF_SLIST *el, **head= (LF_SLIST **)lf_dynarray_value(&hash->array, 0);
if (head)
{
@@ -389,15 +389,14 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
int csize, bucket, hashnr;
LF_SLIST *node, * volatile *el;
lf_rwlock_by_pins(pins);
node= (LF_SLIST *)_lf_alloc_new(pins);
node= (LF_SLIST *)lf_alloc_new(pins);
if (unlikely(!node))
return -1;
memcpy(node+1, data, hash->element_size);
node->key= hash_key(hash, (uchar *)(node+1), &node->keylen);
hashnr= calc_hash(hash, node->key, node->keylen);
bucket= hashnr % hash->size;
el= _lf_dynarray_lvalue(&hash->array, bucket);
el= lf_dynarray_lvalue(&hash->array, bucket);
if (unlikely(!el))
return -1;
if (*el == NULL && unlikely(initialize_bucket(hash, el, bucket, pins)))
@@ -405,14 +404,12 @@ int lf_hash_insert(LF_HASH *hash, LF_PINS *pins, const void *data)
node->hashnr= my_reverse_bits(hashnr) | 1; /* normal node */
if (linsert(el, hash->charset, node, pins, hash->flags))
{
_lf_alloc_free(pins, node);
lf_rwunlock_by_pins(pins);
lf_alloc_free(pins, node);
return 1;
}
csize= hash->size;
if ((my_atomic_add32(&hash->count, 1)+1.0) / csize > MAX_LOAD)
my_atomic_cas32(&hash->size, &csize, csize*2);
lf_rwunlock_by_pins(pins);
return 0;
}
@@ -432,11 +429,10 @@ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
LF_SLIST * volatile *el;
uint bucket, hashnr= calc_hash(hash, (uchar *)key, keylen);
lf_rwlock_by_pins(pins);
/* hide OOM errors - if we cannot initalize a bucket, try the previous one */
for (bucket= hashnr % hash->size; ;bucket= my_clear_highest_bit(bucket))
{
el= _lf_dynarray_lvalue(&hash->array, bucket);
el= lf_dynarray_lvalue(&hash->array, bucket);
if (el && (*el || initialize_bucket(hash, el, bucket, pins) == 0))
break;
if (unlikely(bucket == 0))
@@ -445,11 +441,9 @@ int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen)
if (ldelete(el, hash->charset, my_reverse_bits(hashnr) | 1,
(uchar *)key, keylen, pins))
{
lf_rwunlock_by_pins(pins);
return 1;
}
my_atomic_add32(&hash->count, -1);
lf_rwunlock_by_pins(pins);
return 0;
}
@@ -469,11 +463,10 @@ void *lf_hash_search_using_hash_value(LF_HASH *hash, LF_PINS *pins,
LF_SLIST * volatile *el, *found;
uint bucket;
lf_rwlock_by_pins(pins);
/* hide OOM errors - if we cannot initalize a bucket, try the previous one */
for (bucket= hashnr % hash->size; ;bucket= my_clear_highest_bit(bucket))
{
el= _lf_dynarray_lvalue(&hash->array, bucket);
el= lf_dynarray_lvalue(&hash->array, bucket);
if (el && (*el || initialize_bucket(hash, el, bucket, pins) == 0))
break;
if (unlikely(bucket == 0))
@@ -481,7 +474,6 @@ void *lf_hash_search_using_hash_value(LF_HASH *hash, LF_PINS *pins,
}
found= lsearch(el, hash->charset, my_reverse_bits(hashnr) | 1,
(uchar *)key, keylen, pins);
lf_rwunlock_by_pins(pins);
return found ? found+1 : 0;
}
@@ -504,8 +496,7 @@ int lf_hash_iterate(LF_HASH *hash, LF_PINS *pins,
int res;
LF_SLIST * volatile *el;
lf_rwlock_by_pins(pins);
el= _lf_dynarray_lvalue(&hash->array, bucket);
el= lf_dynarray_lvalue(&hash->array, bucket);
if (unlikely(!el))
return 0; /* if there's no bucket==0, the hash is empty */
if (*el == NULL && unlikely(initialize_bucket(hash, el, bucket, pins)))
@@ -513,10 +504,9 @@ int lf_hash_iterate(LF_HASH *hash, LF_PINS *pins,
res= lfind(el, 0, 0, (uchar*)argument, 0, &cursor, pins, action);
_lf_unpin(pins, 2);
_lf_unpin(pins, 1);
_lf_unpin(pins, 0);
lf_rwunlock_by_pins(pins);
lf_unpin(pins, 2);
lf_unpin(pins, 1);
lf_unpin(pins, 0);
return res;
}
@@ -540,7 +530,7 @@ static int initialize_bucket(LF_HASH *hash, LF_SLIST * volatile *node,
uint parent= my_clear_highest_bit(bucket);
LF_SLIST *dummy= (LF_SLIST *)my_malloc(sizeof(LF_SLIST), MYF(MY_WME));
LF_SLIST **tmp= 0, *cur;
LF_SLIST * volatile *el= _lf_dynarray_lvalue(&hash->array, parent);
LF_SLIST * volatile *el= lf_dynarray_lvalue(&hash->array, parent);
if (unlikely(!el || !dummy))
return -1;
if (*el == NULL && bucket &&

View File

@@ -192,19 +192,12 @@ uint32 wt_wait_stats[WT_WAIT_STATS+1];
uint32 wt_cycle_stats[2][WT_CYCLE_STATS+1];
uint32 wt_success_stats;
static my_atomic_rwlock_t cycle_stats_lock, wait_stats_lock, success_stats_lock;
#ifdef HAVE_PSI_INTERFACE
extern PSI_cond_key key_WT_RESOURCE_cond;
#endif
#ifdef SAFE_STATISTICS
#define incr(VAR, LOCK) \
do { \
my_atomic_rwlock_wrlock(&(LOCK)); \
my_atomic_add32(&(VAR), 1); \
my_atomic_rwlock_wrunlock(&(LOCK)); \
} while(0)
#define incr(VAR, LOCK) do { my_atomic_add32(&(VAR), 1); } while(0)
#else
#define incr(VAR,LOCK) do { (VAR)++; } while(0)
#endif
@@ -458,9 +451,6 @@ void wt_init()
DBUG_ASSERT(i == 0 || wt_wait_table[i-1] != wt_wait_table[i]);
}
}
my_atomic_rwlock_init(&cycle_stats_lock);
my_atomic_rwlock_init(&success_stats_lock);
my_atomic_rwlock_init(&wait_stats_lock);
wt_init_done= 1;
DBUG_VOID_RETURN;
}
@@ -473,9 +463,6 @@ void wt_end()
DBUG_ASSERT(reshash.count == 0);
lf_hash_destroy(&reshash);
my_atomic_rwlock_destroy(&cycle_stats_lock);
my_atomic_rwlock_destroy(&success_stats_lock);
my_atomic_rwlock_destroy(&wait_stats_lock);
reshash.alloc.constructor= NULL;
wt_init_done= 0;
DBUG_VOID_RETURN;

View File

@@ -149,34 +149,22 @@ class time_collector
{
public:
time_collector(utility& u) : m_utility(&u)
{
my_atomic_rwlock_init(&time_collector_lock);
}
{ }
~time_collector()
{
my_atomic_rwlock_destroy(&time_collector_lock);
}
{ }
uint32 count(uint index)
{
my_atomic_rwlock_rdlock(&time_collector_lock);
uint32 result= my_atomic_load32((int32*)&m_count[index]);
my_atomic_rwlock_rdunlock(&time_collector_lock);
return result;
return my_atomic_load32((int32*)&m_count[index]);
}
uint64 total(uint index)
{
my_atomic_rwlock_rdlock(&time_collector_lock);
uint64 result= my_atomic_load64((int64*)&m_total[index]);
my_atomic_rwlock_rdunlock(&time_collector_lock);
return result;
return my_atomic_load64((int64*)&m_total[index]);
}
public:
void flush()
{
my_atomic_rwlock_wrlock(&time_collector_lock);
memset((void*)&m_count,0,sizeof(m_count));
memset((void*)&m_total,0,sizeof(m_total));
my_atomic_rwlock_wrunlock(&time_collector_lock);
}
void collect(uint64 time)
{
@@ -185,20 +173,14 @@ public:
{
if(m_utility->bound(i) > time)
{
my_atomic_rwlock_wrlock(&time_collector_lock);
my_atomic_add32((int32*)(&m_count[i]), 1);
my_atomic_add64((int64*)(&m_total[i]), time);
my_atomic_rwlock_wrunlock(&time_collector_lock);
break;
}
}
}
private:
utility* m_utility;
/* The lock for atomic operations on m_count and m_total. Only actually
used on architectures that do not have atomic implementation of atomic
operations. */
my_atomic_rwlock_t time_collector_lock;
uint32 m_count[OVERALL_POWER_COUNT + 1];
uint64 m_total[OVERALL_POWER_COUNT + 1];
};

View File

@@ -134,7 +134,7 @@ post_init_event_thread(THD *thd)
return TRUE;
}
thread_safe_increment32(&thread_count, &thread_count_lock);
thread_safe_increment32(&thread_count);
mysql_mutex_lock(&LOCK_thread_count);
threads.append(thd);
mysql_mutex_unlock(&LOCK_thread_count);

View File

@@ -4187,7 +4187,6 @@ int MYSQL_BIN_LOG::purge_first_log(Relay_log_info* rli, bool included)
included= 1;
to_purge_if_included= my_strdup(ir->name, MYF(0));
}
my_atomic_rwlock_destroy(&ir->inuse_relaylog_atomic_lock);
my_free(ir);
ir= next;
}

View File

@@ -507,11 +507,6 @@ ulonglong query_cache_size=0;
ulong query_cache_limit=0;
ulong executed_events=0;
query_id_t global_query_id;
my_atomic_rwlock_t global_query_id_lock;
my_atomic_rwlock_t thread_running_lock;
my_atomic_rwlock_t thread_count_lock;
my_atomic_rwlock_t statistics_lock;
my_atomic_rwlock_t slave_executed_entries_lock;
ulong aborted_threads, aborted_connects;
ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size;
ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use;
@@ -2152,11 +2147,6 @@ void clean_up(bool print_message)
/* Tell main we are ready */
logger.cleanup_end();
sys_var_end();
my_atomic_rwlock_destroy(&global_query_id_lock);
my_atomic_rwlock_destroy(&thread_running_lock);
my_atomic_rwlock_destroy(&thread_count_lock);
my_atomic_rwlock_destroy(&statistics_lock);
my_atomic_rwlock_destroy(&slave_executed_entries_lock);
free_charsets();
mysql_mutex_lock(&LOCK_thread_count);
DBUG_PRINT("quit", ("got thread count lock"));
@@ -2829,7 +2819,7 @@ void delete_running_thd(THD *thd)
delete thd;
dec_thread_running();
thread_safe_decrement32(&thread_count, &thread_count_lock);
thread_safe_decrement32(&thread_count);
if (!thread_count)
{
mysql_mutex_lock(&LOCK_thread_count);
@@ -2871,7 +2861,7 @@ void unlink_thd(THD *thd)
mysql_mutex_unlock(&LOCK_thread_count);
delete thd;
thread_safe_decrement32(&thread_count, &thread_count_lock);
thread_safe_decrement32(&thread_count);
DBUG_VOID_RETURN;
}
@@ -6221,7 +6211,7 @@ void create_thread_to_handle_connection(THD *thd)
thd->unlink();
mysql_mutex_unlock(&LOCK_thread_count);
delete thd;
thread_safe_decrement32(&thread_count, &thread_count_lock);
thread_safe_decrement32(&thread_count);
return;
/* purecov: end */
}
@@ -6275,7 +6265,7 @@ static void create_new_thread(THD *thd)
mysql_mutex_unlock(&LOCK_connection_count);
thread_safe_increment32(&thread_count, &thread_count_lock);
thread_safe_increment32(&thread_count);
/* Start a new thread to handle connection. */
mysql_mutex_lock(&LOCK_thread_count);
@@ -8477,11 +8467,6 @@ static int mysql_init_variables(void)
denied_connections= 0;
executed_events= 0;
global_query_id= thread_id= 1L;
my_atomic_rwlock_init(&global_query_id_lock);
my_atomic_rwlock_init(&thread_running_lock);
my_atomic_rwlock_init(&thread_count_lock);
my_atomic_rwlock_init(&statistics_lock);
my_atomic_rwlock_init(&slave_executed_entries_lock);
strmov(server_version, MYSQL_SERVER_VERSION);
threads.empty();
thread_cache.empty();

View File

@@ -20,7 +20,7 @@
#include "sql_bitmap.h" /* Bitmap */
#include "my_decimal.h" /* my_decimal */
#include "mysql_com.h" /* SERVER_VERSION_LENGTH */
#include "my_atomic.h" /* my_atomic_rwlock_t */
#include "my_atomic.h"
#include "mysql/psi/mysql_file.h" /* MYSQL_FILE */
#include "sql_list.h" /* I_List */
#include "sql_cmd.h"
@@ -538,8 +538,6 @@ extern mysql_cond_t COND_manager;
extern mysql_cond_t COND_slave_init;
extern int32 thread_running;
extern int32 thread_count;
extern my_atomic_rwlock_t thread_running_lock, thread_count_lock;
extern my_atomic_rwlock_t slave_executed_entries_lock;
extern char *opt_ssl_ca, *opt_ssl_capath, *opt_ssl_cert, *opt_ssl_cipher,
*opt_ssl_key, *opt_ssl_crl, *opt_ssl_crlpath;
@@ -627,28 +625,18 @@ enum enum_query_type
/* query_id */
typedef int64 query_id_t;
extern query_id_t global_query_id;
extern my_atomic_rwlock_t global_query_id_lock;
extern my_atomic_rwlock_t statistics_lock;
void unireg_end(void) __attribute__((noreturn));
/* increment query_id and return it. */
inline __attribute__((warn_unused_result)) query_id_t next_query_id()
{
query_id_t id;
my_atomic_rwlock_wrlock(&global_query_id_lock);
id= my_atomic_add64_explicit(&global_query_id, 1, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(&global_query_id_lock);
return (id);
return my_atomic_add64_explicit(&global_query_id, 1, MY_MEMORY_ORDER_RELAXED);
}
inline query_id_t get_query_id()
{
query_id_t id;
my_atomic_rwlock_wrlock(&global_query_id_lock);
id= my_atomic_load64_explicit(&global_query_id, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(&global_query_id_lock);
return id;
return my_atomic_load64_explicit(&global_query_id, MY_MEMORY_ORDER_RELAXED);
}
@@ -669,44 +657,34 @@ inline void table_case_convert(char * name, uint length)
name, length, name, length);
}
inline void thread_safe_increment32(int32 *value, my_atomic_rwlock_t *lock)
inline void thread_safe_increment32(int32 *value)
{
my_atomic_rwlock_wrlock(lock);
(void) my_atomic_add32_explicit(value, 1, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(lock);
}
inline void thread_safe_decrement32(int32 *value, my_atomic_rwlock_t *lock)
inline void thread_safe_decrement32(int32 *value)
{
my_atomic_rwlock_wrlock(lock);
(void) my_atomic_add32_explicit(value, -1, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(lock);
}
inline void thread_safe_increment64(int64 *value, my_atomic_rwlock_t *lock)
inline void thread_safe_increment64(int64 *value)
{
my_atomic_rwlock_wrlock(lock);
(void) my_atomic_add64_explicit(value, 1, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(lock);
}
inline void thread_safe_decrement64(int64 *value, my_atomic_rwlock_t *lock)
inline void thread_safe_decrement64(int64 *value)
{
my_atomic_rwlock_wrlock(lock);
(void) my_atomic_add64_explicit(value, -1, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(lock);
}
inline void
inc_thread_running()
inline void inc_thread_running()
{
thread_safe_increment32(&thread_running, &thread_running_lock);
thread_safe_increment32(&thread_running);
}
inline void
dec_thread_running()
inline void dec_thread_running()
{
thread_safe_decrement32(&thread_running, &thread_running_lock);
thread_safe_decrement32(&thread_running);
}
void set_server_version(void);

View File

@@ -47,8 +47,7 @@ rpt_handle_event(rpl_parallel_thread::queued_event *qev,
/* Mutex will be released in apply_event_and_update_pos(). */
err= apply_event_and_update_pos(ev, thd, rgi, rpt);
thread_safe_increment64(&rli->executed_entries,
&slave_executed_entries_lock);
thread_safe_increment64(&rli->executed_entries);
/* ToDo: error handling. */
return err;
}
@@ -1193,9 +1192,7 @@ rpl_parallel_thread::inuse_relaylog_refcount_update()
inuse_relaylog *ir= accumulated_ir_last;
if (ir)
{
my_atomic_rwlock_wrlock(&ir->rli->inuse_relaylog_atomic_lock);
my_atomic_add64(&ir->dequeued_count, accumulated_ir_count);
my_atomic_rwlock_wrunlock(&ir->rli->inuse_relaylog_atomic_lock);
accumulated_ir_count= 0;
accumulated_ir_last= NULL;
}

View File

@@ -108,7 +108,6 @@ Relay_log_info::~Relay_log_info()
{
DBUG_ASSERT(cur->queued_count == cur->dequeued_count);
inuse_relaylog *next= cur->next;
my_atomic_rwlock_destroy(&cur->inuse_relaylog_atomic_lock);
my_free(cur);
cur= next;
}
@@ -1401,7 +1400,6 @@ Relay_log_info::alloc_inuse_relaylog(const char *name)
last_inuse_relaylog->next= ir;
}
last_inuse_relaylog= ir;
my_atomic_rwlock_init(&ir->inuse_relaylog_atomic_lock);
return 0;
}

View File

@@ -504,8 +504,6 @@ struct inuse_relaylog {
/* Set when all events have been read from a relaylog. */
bool completed;
char name[FN_REFLEN];
/* Lock used to protect inuse_relaylog::dequeued_count */
my_atomic_rwlock_t inuse_relaylog_atomic_lock;
};

View File

@@ -3652,8 +3652,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
serial_rgi->trans_retries));
}
}
thread_safe_increment64(&rli->executed_entries,
&slave_executed_entries_lock);
thread_safe_increment64(&rli->executed_entries);
DBUG_RETURN(exec_res);
}
mysql_mutex_unlock(&rli->data_lock);

View File

@@ -1732,7 +1732,7 @@ void close_temporary_table(THD *thd, TABLE *table,
{
/* natural invariant of temporary_tables */
DBUG_ASSERT(slave_open_temp_tables || !thd->temporary_tables);
thread_safe_decrement32(&slave_open_temp_tables, &thread_running_lock);
thread_safe_decrement32(&slave_open_temp_tables);
table->in_use= 0; // No statistics
}
thd->unlock_temporary_tables();
@@ -5721,7 +5721,7 @@ TABLE *open_table_uncached(THD *thd, handlerton *hton,
thd->temporary_tables->prev= 0;
if (thd->rgi_slave)
{
thread_safe_increment32(&slave_open_temp_tables, &thread_running_lock);
thread_safe_increment32(&slave_open_temp_tables);
}
thd->unlock_temporary_tables();
}

View File

@@ -2061,7 +2061,7 @@ public:
thd.security_ctx->user= thd.security_ctx->host=0;
delayed_insert_threads--;
mysql_mutex_unlock(&LOCK_thread_count);
thread_safe_decrement32(&thread_count, &thread_count_lock);
thread_safe_decrement32(&thread_count);
mysql_cond_broadcast(&COND_thread_count); /* Tell main we are ready */
}
@@ -2197,7 +2197,7 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
if (!(di= new Delayed_insert()))
goto end_create;
thread_safe_increment32(&thread_count, &thread_count_lock);
thread_safe_increment32(&thread_count);
/*
Annotating delayed inserts is not supported.

View File

@@ -840,7 +840,7 @@ end:
delete thd;
#ifndef EMBEDDED_LIBRARY
thread_safe_decrement32(&thread_count, &thread_count_lock);
thread_safe_decrement32(&thread_count);
in_bootstrap= FALSE;
mysql_mutex_lock(&LOCK_thread_count);

View File

@@ -82,7 +82,6 @@ static int32 tc_count; /**< Number of TABLE objects in table cache. */
*/
static mysql_mutex_t LOCK_unused_shares;
my_atomic_rwlock_t LOCK_tdc_atomics; /**< Protects tdc_version. */
#ifdef HAVE_PSI_INTERFACE
PSI_mutex_key key_LOCK_unused_shares, key_TABLE_SHARE_LOCK_table_share;
@@ -136,11 +135,7 @@ static int fix_thd_pins(THD *thd)
uint tc_records(void)
{
uint count;
my_atomic_rwlock_rdlock(&LOCK_tdc_atomics);
count= my_atomic_load32_explicit(&tc_count, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_rdunlock(&LOCK_tdc_atomics);
return count;
return my_atomic_load32_explicit(&tc_count, MY_MEMORY_ORDER_RELAXED);
}
@@ -153,9 +148,7 @@ uint tc_records(void)
static void tc_remove_table(TABLE *table)
{
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
my_atomic_add32_explicit(&tc_count, -1, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
table->s->tdc->all_tables.remove(table);
}
@@ -262,10 +255,8 @@ void tc_add_table(THD *thd, TABLE *table)
mysql_mutex_unlock(&table->s->tdc->LOCK_table_share);
/* If we have too many TABLE instances around, try to get rid of them */
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
need_purge= my_atomic_add32_explicit(&tc_count, 1, MY_MEMORY_ORDER_RELAXED) >=
(int32) tc_size;
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
if (need_purge)
{
@@ -435,7 +426,6 @@ void tdc_init(void)
tdc_inited= true;
mysql_mutex_init(key_LOCK_unused_shares, &LOCK_unused_shares,
MY_MUTEX_INIT_FAST);
my_atomic_rwlock_init(&LOCK_tdc_atomics);
tdc_version= 1L; /* Increments on each reload */
lf_hash_init(&tdc_hash, sizeof(TDC_element), LF_HASH_UNIQUE, 0, 0,
(my_hash_get_key) TDC_element::key,
@@ -484,7 +474,6 @@ void tdc_deinit(void)
{
tdc_inited= false;
lf_hash_destroy(&tdc_hash);
my_atomic_rwlock_destroy(&LOCK_tdc_atomics);
mysql_mutex_destroy(&LOCK_unused_shares);
}
DBUG_VOID_RETURN;
@@ -1000,18 +989,13 @@ int tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name,
ulong tdc_refresh_version(void)
{
my_atomic_rwlock_rdlock(&LOCK_tdc_atomics);
ulong v= my_atomic_load64_explicit(&tdc_version, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_rdunlock(&LOCK_tdc_atomics);
return v;
return my_atomic_load64_explicit(&tdc_version, MY_MEMORY_ORDER_RELAXED);
}
ulong tdc_increment_refresh_version(void)
{
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
ulong v= my_atomic_add64_explicit(&tdc_version, 1, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
DBUG_PRINT("tcache", ("incremented global refresh_version to: %lu", v));
return v + 1;
}
@@ -1154,9 +1138,7 @@ void tdc_assign_new_table_id(TABLE_SHARE *share)
*/
do
{
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
tid= my_atomic_add64_explicit(&last_table_id, 1, MY_MEMORY_ORDER_RELAXED);
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
} while (unlikely(tid == ~0UL));
share->table_map_id= tid;

View File

@@ -264,10 +264,10 @@ retry:
compatible= TRUE;
upgrading= FALSE;
cursor->blocker= cursor->upgrade_from= 0;
_lf_unpin(pins, 3);
lf_unpin(pins, 3);
do {
cursor->curr= PTR(*cursor->prev);
_lf_pin(pins, 1, cursor->curr);
lf_pin(pins, 1, cursor->curr);
} while(*cursor->prev != (intptr)cursor->curr && LF_BACKOFF);
for (;;)
{
@@ -276,7 +276,7 @@ retry:
do {
cur_link= cursor->curr->link;
cursor->next= PTR(cur_link);
_lf_pin(pins, 0, cursor->next);
lf_pin(pins, 0, cursor->next);
} while (cur_link != cursor->curr->link && LF_BACKOFF);
cur_hashnr= cursor->curr->hashnr;
cur_resource= cursor->curr->resource;
@@ -316,7 +316,7 @@ retry:
if (prev_active && !cur_active)
{
cursor->blocker= cursor->curr;
_lf_pin(pins, 3, cursor->curr);
lf_pin(pins, 3, cursor->curr);
}
if (cur_loid == loid)
{
@@ -329,7 +329,7 @@ retry:
if (cur_active)
{
cursor->blocker= cursor->curr; /* loose-locks! */
_lf_unpin(pins, 3); /* loose-locks! */
lf_unpin(pins, 3); /* loose-locks! */
return ALREADY_HAVE_THE_LOCK;
}
else
@@ -345,7 +345,7 @@ retry:
{
compatible= FALSE;
cursor->blocker= cursor->curr;
_lf_pin(pins, 3, cursor->curr);
lf_pin(pins, 3, cursor->curr);
}
}
prev_lock= lock_combining_matrix[prev_lock][cur_lock];
@@ -353,13 +353,13 @@ retry:
}
}
cursor->prev= &(cursor->curr->link);
_lf_pin(pins, 2, cursor->curr);
lf_pin(pins, 2, cursor->curr);
}
else
{
if (my_atomic_casptr((void **)cursor->prev,
(void **)(char*) &cursor->curr, cursor->next))
_lf_alloc_free(pins, cursor->curr);
lf_alloc_free(pins, cursor->curr);
else
{
(void)LF_BACKOFF;
@@ -367,7 +367,7 @@ retry:
}
}
cursor->curr= cursor->next;
_lf_pin(pins, 1, cursor->curr);
lf_pin(pins, 1, cursor->curr);
}
/*
either the end of lock list - no more locks for this resource,
@@ -435,9 +435,9 @@ static int lockinsert(LOCK * volatile *head, LOCK *node, LF_PINS *pins,
}
} while (res == REPEAT_ONCE_MORE);
_lf_unpin(pins, 0);
_lf_unpin(pins, 1);
_lf_unpin(pins, 2);
lf_unpin(pins, 0);
lf_unpin(pins, 1);
lf_unpin(pins, 2);
/*
note that blocker is not necessarily pinned here (when it's == curr).
this is ok as in such a case it's either a dummy node for
@@ -461,9 +461,9 @@ static int lockpeek(LOCK * volatile *head, LOCK *node, LF_PINS *pins,
res= lockfind(head, node, &cursor, pins);
_lf_unpin(pins, 0);
_lf_unpin(pins, 1);
_lf_unpin(pins, 2);
lf_unpin(pins, 0);
lf_unpin(pins, 1);
lf_unpin(pins, 2);
if (blocker)
*blocker= cursor.blocker;
return res;
@@ -502,7 +502,7 @@ static int lockdelete(LOCK * volatile *head, LOCK *node, LF_PINS *pins)
{
if (my_atomic_casptr((void **)cursor.prev,
(void **)(char*)&cursor.curr, cursor.next))
_lf_alloc_free(pins, cursor.curr);
lf_alloc_free(pins, cursor.curr);
else
lockfind(head, node, &cursor, pins);
}
@@ -513,10 +513,10 @@ static int lockdelete(LOCK * volatile *head, LOCK *node, LF_PINS *pins)
cursor.upgrade_from->flags|= IGNORE_ME;
}
} while (res == REPEAT_ONCE_MORE);
_lf_unpin(pins, 0);
_lf_unpin(pins, 1);
_lf_unpin(pins, 2);
_lf_unpin(pins, 3);
lf_unpin(pins, 0);
lf_unpin(pins, 1);
lf_unpin(pins, 2);
lf_unpin(pins, 3);
return res;
}
@@ -532,7 +532,7 @@ void lockman_init(LOCKMAN *lm, loid_to_lo_func *func, uint timeout)
void lockman_destroy(LOCKMAN *lm)
{
LOCK *el= *(LOCK **)_lf_dynarray_lvalue(&lm->array, 0);
LOCK *el= *(LOCK **)lf_dynarray_lvalue(&lm->array, 0);
while (el)
{
intptr next= el->link;
@@ -556,7 +556,7 @@ static void initialize_bucket(LOCKMAN *lm, LOCK * volatile *node,
uint parent= my_clear_highest_bit(bucket);
LOCK *dummy= (LOCK *)my_malloc(sizeof(LOCK), MYF(MY_WME));
LOCK **tmp= 0, *cur;
LOCK * volatile *el= _lf_dynarray_lvalue(&lm->array, parent);
LOCK * volatile *el= lf_dynarray_lvalue(&lm->array, parent);
if (*el == NULL && bucket)
initialize_bucket(lm, el, parent, pins);
@@ -604,15 +604,14 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
enum lockman_lock_type old_lock;
DBUG_ASSERT(lo->loid);
lf_rwlock_by_pins(pins);
node= (LOCK *)_lf_alloc_new(pins);
node= (LOCK *)lf_alloc_new(pins);
node->flags= 0;
node->lock= lock;
node->loid= lo->loid;
node->resource= resource;
hashnr= calc_hash(resource);
bucket= hashnr % lm->size;
el= _lf_dynarray_lvalue(&lm->array, bucket);
el= lf_dynarray_lvalue(&lm->array, bucket);
if (*el == NULL)
initialize_bucket(lm, el, bucket, pins);
node->hashnr= my_reverse_bits(hashnr) | 1;
@@ -621,8 +620,7 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
{
int r;
old_lock= blocker->lock;
_lf_alloc_free(pins, node);
lf_rwunlock_by_pins(pins);
lf_alloc_free(pins, node);
r= getlock_result[old_lock][lock];
DBUG_ASSERT(r);
return r;
@@ -639,7 +637,7 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
ulonglong deadline;
struct timespec timeout;
_lf_assert_pin(pins, 3); /* blocker must be pinned here */
lf_assert_pin(pins, 3); /* blocker must be pinned here */
wait_for_lo= lm->loid_to_lo(blocker->loid);
/*
@@ -652,7 +650,7 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
if (lock_compatibility_matrix[blocker->lock][lock])
{
blocker= wait_for_lo->all_locks;
_lf_pin(pins, 3, blocker);
lf_pin(pins, 3, blocker);
if (blocker != wait_for_lo->all_locks)
continue;
wait_for_lo= wait_for_lo->waiting_for;
@@ -667,7 +665,6 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
continue;
lo->waiting_for= wait_for_lo;
lf_rwunlock_by_pins(pins);
/*
We lock a mutex - it may belong to a wrong LOCK_OWNER, but it must
@@ -683,7 +680,6 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
the lock was rolled back. Either way - the lock was removed
*/
pthread_mutex_unlock(wait_for_lo->mutex);
lf_rwlock_by_pins(pins);
continue;
}
@@ -695,7 +691,6 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
pthread_cond_timedwait(wait_for_lo->cond, wait_for_lo->mutex, &timeout);
} while (!DELETED(blocker->link) && my_hrtime().val < deadline/1000);
pthread_mutex_unlock(wait_for_lo->mutex);
lf_rwlock_by_pins(pins);
if (!DELETED(blocker->link))
{
/*
@@ -704,14 +699,12 @@ enum lockman_getlock_result lockman_getlock(LOCKMAN *lm, LOCK_OWNER *lo,
Instead we're relying on the caller to abort the transaction,
and release all locks at once - see lockman_release_locks()
*/
_lf_unpin(pins, 3);
lf_rwunlock_by_pins(pins);
lf_unpin(pins, 3);
return DIDNT_GET_THE_LOCK;
}
}
lo->waiting_for= 0;
_lf_assert_unpin(pins, 3); /* unpin should not be needed */
lf_rwunlock_by_pins(pins);
lf_assert_unpin(pins, 3); /* unpin should not be needed */
return getlock_result[lock][lock];
}
@@ -729,18 +722,16 @@ int lockman_release_locks(LOCKMAN *lm, LOCK_OWNER *lo)
LF_PINS *pins= lo->pins;
pthread_mutex_lock(lo->mutex);
lf_rwlock_by_pins(pins);
for (node= lo->all_locks; node; node= next)
{
next= node->lonext;
bucket= calc_hash(node->resource) % lm->size;
el= _lf_dynarray_lvalue(&lm->array, bucket);
el= lf_dynarray_lvalue(&lm->array, bucket);
if (*el == NULL)
initialize_bucket(lm, el, bucket, pins);
lockdelete(el, node, pins);
my_atomic_add32(&lm->count, -1);
}
lf_rwunlock_by_pins(pins);
lo->all_locks= 0;
/* now signal all waiters */
pthread_cond_broadcast(lo->cond);
@@ -757,7 +748,7 @@ static const char *lock2str[]=
*/
void print_lockhash(LOCKMAN *lm)
{
LOCK *el= *(LOCK **)_lf_dynarray_lvalue(&lm->array, 0);
LOCK *el= *(LOCK **)lf_dynarray_lvalue(&lm->array, 0);
printf("hash: size %u count %u\n", lm->size, lm->count);
while (el)
{

View File

@@ -419,8 +419,6 @@ static ulonglong flush_start= 0;
#include <my_atomic.h>
/* an array that maps id of a MARIA_SHARE to this MARIA_SHARE */
static MARIA_SHARE **id_to_share= NULL;
/* lock for id_to_share */
static my_atomic_rwlock_t LOCK_id_to_share;
static my_bool translog_dummy_callback(uchar *page,
pgcache_page_no_t page_no,
@@ -4042,7 +4040,6 @@ my_bool translog_init_with_table(const char *directory,
Log records will refer to a MARIA_SHARE by a unique 2-byte id; set up
structures for generating 2-byte ids:
*/
my_atomic_rwlock_init(&LOCK_id_to_share);
id_to_share= (MARIA_SHARE **) my_malloc(SHARE_ID_MAX * sizeof(MARIA_SHARE*),
MYF(MY_WME | MY_ZEROFILL));
if (unlikely(!id_to_share))
@@ -4286,7 +4283,6 @@ void translog_destroy()
if (log_descriptor.directory_fd >= 0)
mysql_file_close(log_descriptor.directory_fd, MYF(MY_WME));
my_atomic_rwlock_destroy(&LOCK_id_to_share);
if (id_to_share != NULL)
my_free(id_to_share + 1);
DBUG_VOID_RETURN;
@@ -8125,7 +8121,6 @@ int translog_assign_id_to_share(MARIA_HA *tbl_info, TRN *trn)
id= 0;
do
{
my_atomic_rwlock_wrlock(&LOCK_id_to_share);
for ( ; i <= SHARE_ID_MAX ; i++) /* the range is [1..SHARE_ID_MAX] */
{
void *tmp= NULL;
@@ -8136,7 +8131,6 @@ int translog_assign_id_to_share(MARIA_HA *tbl_info, TRN *trn)
break;
}
}
my_atomic_rwlock_wrunlock(&LOCK_id_to_share);
i= 1; /* scan the whole array */
} while (id == 0);
DBUG_PRINT("info", ("id_to_share: 0x%lx -> %u", (ulong)share, id));
@@ -8199,9 +8193,7 @@ void translog_deassign_id_from_share(MARIA_SHARE *share)
mutex:
*/
mysql_mutex_assert_owner(&share->intern_lock);
my_atomic_rwlock_rdlock(&LOCK_id_to_share);
my_atomic_storeptr((void **)&id_to_share[share->id], 0);
my_atomic_rwlock_rdunlock(&LOCK_id_to_share);
share->id= 0;
/* useless but safety: */
share->lsn_of_file_id= LSN_IMPOSSIBLE;

View File

@@ -60,7 +60,6 @@ static LF_HASH trid_to_trn;
static TRN **short_trid_to_active_trn;
/* locks for short_trid_to_active_trn and pool */
static my_atomic_rwlock_t LOCK_short_trid_to_trn, LOCK_pool;
static my_bool default_trnman_end_trans_hook(TRN *, my_bool, my_bool);
static void trnman_free_trn(TRN *);
@@ -191,8 +190,6 @@ int trnman_init(TrID initial_trid)
0, 0, trn_get_hash_key, 0);
DBUG_PRINT("info", ("mysql_mutex_init LOCK_trn_list"));
mysql_mutex_init(key_LOCK_trn_list, &LOCK_trn_list, MY_MUTEX_INIT_FAST);
my_atomic_rwlock_init(&LOCK_short_trid_to_trn);
my_atomic_rwlock_init(&LOCK_pool);
DBUG_RETURN(0);
}
@@ -226,8 +223,6 @@ void trnman_destroy()
lf_hash_destroy(&trid_to_trn);
DBUG_PRINT("info", ("mysql_mutex_destroy LOCK_trn_list"));
mysql_mutex_destroy(&LOCK_trn_list);
my_atomic_rwlock_destroy(&LOCK_short_trid_to_trn);
my_atomic_rwlock_destroy(&LOCK_pool);
my_free(short_trid_to_active_trn+1);
short_trid_to_active_trn= NULL;
@@ -257,7 +252,6 @@ static uint get_short_trid(TRN *trn)
for ( ; !res ; i= 1)
{
my_atomic_rwlock_wrlock(&LOCK_short_trid_to_trn);
for ( ; i <= SHORT_TRID_MAX; i++) /* the range is [1..SHORT_TRID_MAX] */
{
void *tmp= NULL;
@@ -268,7 +262,6 @@ static uint get_short_trid(TRN *trn)
break;
}
}
my_atomic_rwlock_wrunlock(&LOCK_short_trid_to_trn);
}
return res;
}
@@ -306,11 +299,9 @@ TRN *trnman_new_trn(WT_THD *wt)
Popping an unused TRN from the pool
(ABA isn't possible, we're behind a mutex
*/
my_atomic_rwlock_wrlock(&LOCK_pool);
while (tmp.trn && !my_atomic_casptr((void **)(char*) &pool, &tmp.v,
(void *)tmp.trn->next))
/* no-op */;
my_atomic_rwlock_wrunlock(&LOCK_pool);
/* Nothing in the pool ? Allocate a new one */
if (!(trn= tmp.trn))
@@ -493,9 +484,7 @@ my_bool trnman_end_trn(TRN *trn, my_bool commit)
note that we don't own trn anymore, it may be in a shared list now.
Thus, we cannot dereference it, and must use cached_short_id below.
*/
my_atomic_rwlock_rdlock(&LOCK_short_trid_to_trn);
my_atomic_storeptr((void **)&short_trid_to_active_trn[cached_short_id], 0);
my_atomic_rwlock_rdunlock(&LOCK_short_trid_to_trn);
/*
we, under the mutex, removed going-in-free_me transactions from the
@@ -545,7 +534,6 @@ static void trnman_free_trn(TRN *trn)
tmp.trn= pool;
my_atomic_rwlock_wrlock(&LOCK_pool);
do
{
/*
@@ -554,7 +542,6 @@ static void trnman_free_trn(TRN *trn)
*/
*(TRN * volatile *)&(trn->next)= tmp.trn;
} while (!my_atomic_casptr((void **)(char*)&pool, &tmp.v, trn));
my_atomic_rwlock_wrunlock(&LOCK_pool);
}
/*

View File

@@ -132,7 +132,6 @@ cursor_by_user.cc
ha_perfschema.cc
pfs.cc
pfs_account.cc
pfs_atomic.cc
pfs_autosize.cc
pfs_column_values.cc
pfs_con_slice.cc

View File

@@ -1,81 +0,0 @@
/* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
/**
@file storage/perfschema/pfs_atomic.cc
Atomic operations (implementation).
*/
#include <my_global.h>
#include <my_pthread.h>
#include "pfs_atomic.h"
/*
Using SAFE_MUTEX is impossible, because of recursion.
- code locks mutex X
- P_S records the event
- P_S needs an atomic counter A
- safe mutex called for m_mutex[hash(A)]
- safe mutex allocates/free memory
- safe mutex locks THR_LOCK_malloc
- P_S records the event
- P_S needs an atomic counter B
- safe mutex called for m_mutex[hash(B)]
When hash(A) == hash(B), safe_mutex complains rightly that
the mutex is already locked.
In some cases, A == B, in particular for events_waits_history_long_index.
In short, the implementation of PFS_atomic should not cause events
to be recorded in the performance schema.
Also, because SAFE_MUTEX redefines pthread_mutex_t, etc,
this code is not inlined in pfs_atomic.h, but located here in pfs_atomic.cc.
What is needed is a plain, unmodified, pthread_mutex_t.
This is provided by my_atomic_rwlock_t.
*/
/**
Internal rwlock array.
Using a single rwlock for all atomic operations would be a bottleneck.
Using a rwlock per performance schema structure would be too costly in
memory, and use too many rwlock.
The PFS_atomic implementation computes a hash value from the
atomic variable, to spread the bottleneck across 256 buckets,
while still providing --transparently for the caller-- an atomic
operation.
*/
my_atomic_rwlock_t PFS_atomic::m_rwlock_array[256];
static int init_done;
void PFS_atomic::init(void)
{
uint i;
for (i=0; i< array_elements(m_rwlock_array); i++)
my_atomic_rwlock_init(&m_rwlock_array[i]);
init_done= 1;
}
void PFS_atomic::cleanup(void)
{
uint i;
if (!init_done)
return;
for (i=0; i< array_elements(m_rwlock_array); i++)
my_atomic_rwlock_destroy(&m_rwlock_array[i]);
}

View File

@@ -27,221 +27,106 @@
class PFS_atomic
{
public:
/** Initialise the PFS_atomic component. */
static void init();
/** Cleanup the PFS_atomic component. */
static void cleanup();
/** Atomic load. */
static inline int32 load_32(volatile int32 *ptr)
{
int32 result;
rdlock(ptr);
result= my_atomic_load32(ptr);
rdunlock(ptr);
return result;
return my_atomic_load32(ptr);
}
/** Atomic load. */
static inline int64 load_64(volatile int64 *ptr)
{
int64 result;
rdlock(ptr);
result= my_atomic_load64(ptr);
rdunlock(ptr);
return result;
return my_atomic_load64(ptr);
}
/** Atomic load. */
static inline uint32 load_u32(volatile uint32 *ptr)
{
uint32 result;
rdlock(ptr);
result= (uint32) my_atomic_load32((int32*) ptr);
rdunlock(ptr);
return result;
return (uint32) my_atomic_load32((int32*) ptr);
}
/** Atomic load. */
static inline uint64 load_u64(volatile uint64 *ptr)
{
uint64 result;
rdlock(ptr);
result= (uint64) my_atomic_load64((int64*) ptr);
rdunlock(ptr);
return result;
return (uint64) my_atomic_load64((int64*) ptr);
}
/** Atomic store. */
static inline void store_32(volatile int32 *ptr, int32 value)
{
wrlock(ptr);
my_atomic_store32(ptr, value);
wrunlock(ptr);
}
/** Atomic store. */
static inline void store_64(volatile int64 *ptr, int64 value)
{
wrlock(ptr);
my_atomic_store64(ptr, value);
wrunlock(ptr);
}
/** Atomic store. */
static inline void store_u32(volatile uint32 *ptr, uint32 value)
{
wrlock(ptr);
my_atomic_store32((int32*) ptr, (int32) value);
wrunlock(ptr);
}
/** Atomic store. */
static inline void store_u64(volatile uint64 *ptr, uint64 value)
{
wrlock(ptr);
my_atomic_store64((int64*) ptr, (int64) value);
wrunlock(ptr);
}
/** Atomic add. */
static inline int32 add_32(volatile int32 *ptr, int32 value)
{
int32 result;
wrlock(ptr);
result= my_atomic_add32(ptr, value);
wrunlock(ptr);
return result;
return my_atomic_add32(ptr, value);
}
/** Atomic add. */
static inline int64 add_64(volatile int64 *ptr, int64 value)
{
int64 result;
wrlock(ptr);
result= my_atomic_add64(ptr, value);
wrunlock(ptr);
return result;
return my_atomic_add64(ptr, value);
}
/** Atomic add. */
static inline uint32 add_u32(volatile uint32 *ptr, uint32 value)
{
uint32 result;
wrlock(ptr);
result= (uint32) my_atomic_add32((int32*) ptr, (int32) value);
wrunlock(ptr);
return result;
return (uint32) my_atomic_add32((int32*) ptr, (int32) value);
}
/** Atomic add. */
static inline uint64 add_u64(volatile uint64 *ptr, uint64 value)
{
uint64 result;
wrlock(ptr);
result= (uint64) my_atomic_add64((int64*) ptr, (int64) value);
wrunlock(ptr);
return result;
return (uint64) my_atomic_add64((int64*) ptr, (int64) value);
}
/** Atomic compare and swap. */
static inline bool cas_32(volatile int32 *ptr, int32 *old_value,
int32 new_value)
{
bool result;
wrlock(ptr);
result= my_atomic_cas32(ptr, old_value, new_value);
wrunlock(ptr);
return result;
return my_atomic_cas32(ptr, old_value, new_value);
}
/** Atomic compare and swap. */
static inline bool cas_64(volatile int64 *ptr, int64 *old_value,
int64 new_value)
{
bool result;
wrlock(ptr);
result= my_atomic_cas64(ptr, old_value, new_value);
wrunlock(ptr);
return result;
return my_atomic_cas64(ptr, old_value, new_value);
}
/** Atomic compare and swap. */
static inline bool cas_u32(volatile uint32 *ptr, uint32 *old_value,
uint32 new_value)
{
bool result;
wrlock(ptr);
result= my_atomic_cas32((int32*) ptr, (int32*) old_value,
return my_atomic_cas32((int32*) ptr, (int32*) old_value,
(uint32) new_value);
wrunlock(ptr);
return result;
}
/** Atomic compare and swap. */
static inline bool cas_u64(volatile uint64 *ptr, uint64 *old_value,
uint64 new_value)
{
bool result;
wrlock(ptr);
result= my_atomic_cas64((int64*) ptr, (int64*) old_value,
return my_atomic_cas64((int64*) ptr, (int64*) old_value,
(uint64) new_value);
wrunlock(ptr);
return result;
}
private:
static my_atomic_rwlock_t m_rwlock_array[256];
/**
Helper used only with non native atomic implementations.
@sa MY_ATOMIC_MODE_RWLOCKS
*/
static inline my_atomic_rwlock_t *get_rwlock(volatile void *ptr)
{
/*
Divide an address by 8 to remove alignment,
modulo 256 to fall in the array.
*/
uint index= (((intptr) ptr) >> 3) & 0xFF;
my_atomic_rwlock_t *result= &m_rwlock_array[index];
return result;
}
/**
Helper used only with non native atomic implementations.
@sa MY_ATOMIC_MODE_RWLOCKS
*/
static inline void rdlock(volatile void *ptr)
{
my_atomic_rwlock_rdlock(get_rwlock(ptr));
}
/**
Helper used only with non native atomic implementations.
@sa MY_ATOMIC_MODE_RWLOCKS
*/
static inline void wrlock(volatile void *ptr)
{
my_atomic_rwlock_wrlock(get_rwlock(ptr));
}
/**
Helper used only with non native atomic implementations.
@sa MY_ATOMIC_MODE_RWLOCKS
*/
static inline void rdunlock(volatile void *ptr)
{
my_atomic_rwlock_rdunlock(get_rwlock(ptr));
}
/**
Helper used only with non native atomic implementations.
@sa MY_ATOMIC_MODE_RWLOCKS
*/
static inline void wrunlock(volatile void *ptr)
{
my_atomic_rwlock_wrunlock(get_rwlock(ptr));
}
};

View File

@@ -71,7 +71,6 @@ initialize_performance_schema(PFS_global_param *param)
}
init_timers();
PFS_atomic::init();
init_event_name_sizing(param);
register_global_classes();
@@ -187,7 +186,6 @@ static void cleanup_performance_schema(void)
cleanup_account_hash();
cleanup_digest();
cleanup_digest_hash();
PFS_atomic::cleanup();
}
void shutdown_performance_schema(void)

View File

@@ -100,11 +100,7 @@ void test_oom()
void do_all_tests()
{
PFS_atomic::init();
test_oom();
PFS_atomic::cleanup();
}
int main(int, char **)

View File

@@ -100,11 +100,7 @@ void test_oom()
void do_all_tests()
{
PFS_atomic::init();
test_oom();
PFS_atomic::cleanup();
}
int main(int, char **)

View File

@@ -654,11 +654,7 @@ void test_oom()
void do_all_tests()
{
PFS_atomic::init();
test_oom();
PFS_atomic::cleanup();
}
int main(int argc, char **argv)

View File

@@ -402,13 +402,9 @@ void test_with_instances()
void do_all_tests()
{
PFS_atomic::init();
test_no_instruments();
test_no_instances();
test_with_instances();
PFS_atomic::cleanup();
}
int main(int argc, char **argv)

View File

@@ -56,11 +56,7 @@ void test_oom()
void do_all_tests()
{
PFS_atomic::init();
test_oom();
PFS_atomic::cleanup();
}
int main(int argc, char **argv)

View File

@@ -655,8 +655,6 @@ void test_instruments_reset()
void do_all_tests()
{
PFS_atomic::init();
test_no_registration();
test_mutex_registration();
test_rwlock_registration();
@@ -666,8 +664,6 @@ void do_all_tests()
test_socket_registration();
test_table_registration();
test_instruments_reset();
PFS_atomic::cleanup();
}
int main(int argc, char **argv)

View File

@@ -106,11 +106,7 @@ void test_timers()
void do_all_tests()
{
PFS_atomic::init();
test_timers();
PFS_atomic::cleanup();
}
int main(int, char **)

View File

@@ -99,11 +99,7 @@ void test_oom()
void do_all_tests()
{
PFS_atomic::init();
test_oom();
PFS_atomic::cleanup();
}
int main(int, char **)

View File

@@ -17,7 +17,6 @@
volatile uint32 b32;
volatile int32 c32;
my_atomic_rwlock_t rwl;
/* add and sub a random number in a loop. Must get 0 at the end */
pthread_handler_t test_atomic_add(void *arg)
@@ -27,13 +26,8 @@ pthread_handler_t test_atomic_add(void *arg)
for (x= ((int)(intptr)(&m)); m ; m--)
{
x= (x*m+0x87654321) & INT_MAX32;
my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&bad, x);
my_atomic_rwlock_wrunlock(&rwl);
my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&bad, -x);
my_atomic_rwlock_wrunlock(&rwl);
}
pthread_mutex_lock(&mutex);
if (!--running_threads) pthread_cond_signal(&cond);
@@ -50,13 +44,8 @@ pthread_handler_t test_atomic_add64(void *arg)
for (x= ((int64)(intptr)(&m)); m ; m--)
{
x= (x*m+0xfdecba987654321LL) & INT_MAX64;
my_atomic_rwlock_wrlock(&rwl);
my_atomic_add64(&a64, x);
my_atomic_rwlock_wrunlock(&rwl);
my_atomic_rwlock_wrlock(&rwl);
my_atomic_add64(&a64, -x);
my_atomic_rwlock_wrunlock(&rwl);
}
pthread_mutex_lock(&mutex);
if (!--running_threads)
@@ -82,31 +71,17 @@ pthread_handler_t test_atomic_fas(void *arg)
int m= *(int *)arg;
int32 x;
my_atomic_rwlock_wrlock(&rwl);
x= my_atomic_add32(&b32, 1);
my_atomic_rwlock_wrunlock(&rwl);
my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&bad, x);
my_atomic_rwlock_wrunlock(&rwl);
for (; m ; m--)
{
my_atomic_rwlock_wrlock(&rwl);
x= my_atomic_fas32(&c32, x);
my_atomic_rwlock_wrunlock(&rwl);
}
if (!x)
{
my_atomic_rwlock_wrlock(&rwl);
x= my_atomic_fas32(&c32, x);
my_atomic_rwlock_wrunlock(&rwl);
}
my_atomic_rwlock_wrlock(&rwl);
my_atomic_add32(&bad, -x);
my_atomic_rwlock_wrunlock(&rwl);
pthread_mutex_lock(&mutex);
if (!--running_threads) pthread_cond_signal(&cond);
@@ -125,19 +100,13 @@ pthread_handler_t test_atomic_cas(void *arg)
int32 x, y;
for (x= ((int)(intptr)(&m)); m ; m--)
{
my_atomic_rwlock_wrlock(&rwl);
y= my_atomic_load32(&bad);
my_atomic_rwlock_wrunlock(&rwl);
x= (x*m+0x87654321) & INT_MAX32;
do {
my_atomic_rwlock_wrlock(&rwl);
ok= my_atomic_cas32(&bad, &y, (uint32)y+x);
my_atomic_rwlock_wrunlock(&rwl);
} while (!ok) ;
do {
my_atomic_rwlock_wrlock(&rwl);
ok= my_atomic_cas32(&bad, &y, y-x);
my_atomic_rwlock_wrunlock(&rwl);
} while (!ok) ;
}
pthread_mutex_lock(&mutex);
@@ -154,7 +123,6 @@ void do_tests()
bad= my_atomic_initialize();
ok(!bad, "my_atomic_initialize() returned %d", bad);
my_atomic_rwlock_init(&rwl);
b32= c32= 0;
test_concurrently("my_atomic_add32", test_atomic_add, THREADS, CYCLES);
@@ -178,6 +146,4 @@ void do_tests()
}
a64=0;
test_concurrently("my_atomic_add64", test_atomic_add64, THREADS, CYCLES);
my_atomic_rwlock_destroy(&rwl);
}

View File

@@ -64,15 +64,7 @@ int main(int argc __attribute__((unused)), char **argv)
pthread_attr_init(&thr_attr);
pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
#ifdef MY_ATOMIC_MODE_RWLOCKS
#if defined(HPUX11) || defined(__POWERPC__) /* showed to be very slow (scheduler-related) */
#define CYCLES 300
#else
#define CYCLES 3000
#endif
#else
#define CYCLES 3000
#endif
#define THREADS 30
diag("N CPUs: %d, atomic ops: %s", my_getncpus(), MY_ATOMIC_MODE);