mirror of
https://github.com/MariaDB/server.git
synced 2026-01-06 05:22:24 +03:00
MDEV-6906 - Relaxed memory order for counters
Let some atomic counters use relaxed memory order.
This commit is contained in:
@@ -3982,7 +3982,7 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
|
||||
}
|
||||
// workaround for gcc 4.2.4-1ubuntu4 -fPIE (from DEB_BUILD_HARDENING=1)
|
||||
int64 volatile * volatile ptr=&global_status_var.memory_used;
|
||||
my_atomic_add64(ptr, size);
|
||||
my_atomic_add64_explicit(ptr, size, MY_MEMORY_ORDER_RELAXED);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
12
sql/mysqld.h
12
sql/mysqld.h
@@ -633,7 +633,7 @@ inline __attribute__((warn_unused_result)) query_id_t next_query_id()
|
||||
{
|
||||
query_id_t id;
|
||||
my_atomic_rwlock_wrlock(&global_query_id_lock);
|
||||
id= my_atomic_add64(&global_query_id, 1);
|
||||
id= my_atomic_add64_explicit(&global_query_id, 1, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(&global_query_id_lock);
|
||||
return (id);
|
||||
}
|
||||
@@ -642,7 +642,7 @@ inline query_id_t get_query_id()
|
||||
{
|
||||
query_id_t id;
|
||||
my_atomic_rwlock_wrlock(&global_query_id_lock);
|
||||
id= my_atomic_load64(&global_query_id);
|
||||
id= my_atomic_load64_explicit(&global_query_id, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(&global_query_id_lock);
|
||||
return id;
|
||||
}
|
||||
@@ -668,28 +668,28 @@ inline void table_case_convert(char * name, uint length)
|
||||
inline void thread_safe_increment32(int32 *value, my_atomic_rwlock_t *lock)
|
||||
{
|
||||
my_atomic_rwlock_wrlock(lock);
|
||||
(void) my_atomic_add32(value, 1);
|
||||
(void) my_atomic_add32_explicit(value, 1, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(lock);
|
||||
}
|
||||
|
||||
inline void thread_safe_decrement32(int32 *value, my_atomic_rwlock_t *lock)
|
||||
{
|
||||
my_atomic_rwlock_wrlock(lock);
|
||||
(void) my_atomic_add32(value, -1);
|
||||
(void) my_atomic_add32_explicit(value, -1, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(lock);
|
||||
}
|
||||
|
||||
inline void thread_safe_increment64(int64 *value, my_atomic_rwlock_t *lock)
|
||||
{
|
||||
my_atomic_rwlock_wrlock(lock);
|
||||
(void) my_atomic_add64(value, 1);
|
||||
(void) my_atomic_add64_explicit(value, 1, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(lock);
|
||||
}
|
||||
|
||||
inline void thread_safe_decrement64(int64 *value, my_atomic_rwlock_t *lock)
|
||||
{
|
||||
my_atomic_rwlock_wrlock(lock);
|
||||
(void) my_atomic_add64(value, -1);
|
||||
(void) my_atomic_add64_explicit(value, -1, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(lock);
|
||||
}
|
||||
|
||||
|
||||
@@ -139,7 +139,7 @@ uint tc_records(void)
|
||||
{
|
||||
uint count;
|
||||
my_atomic_rwlock_rdlock(&LOCK_tdc_atomics);
|
||||
count= my_atomic_load32(&tc_count);
|
||||
count= my_atomic_load32_explicit(&tc_count, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_rdunlock(&LOCK_tdc_atomics);
|
||||
return count;
|
||||
}
|
||||
@@ -155,7 +155,7 @@ uint tc_records(void)
|
||||
static void tc_remove_table(TABLE *table)
|
||||
{
|
||||
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
|
||||
my_atomic_add32(&tc_count, -1);
|
||||
my_atomic_add32_explicit(&tc_count, -1, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
|
||||
table->s->tdc.all_tables.remove(table);
|
||||
}
|
||||
@@ -259,7 +259,8 @@ void tc_add_table(THD *thd, TABLE *table)
|
||||
|
||||
/* If we have too many TABLE instances around, try to get rid of them */
|
||||
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
|
||||
need_purge= my_atomic_add32(&tc_count, 1) >= (int32) tc_size;
|
||||
need_purge= my_atomic_add32_explicit(&tc_count, 1, MY_MEMORY_ORDER_RELAXED) >=
|
||||
(int32) tc_size;
|
||||
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
|
||||
|
||||
if (need_purge)
|
||||
@@ -1101,7 +1102,7 @@ int tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name,
|
||||
ulong tdc_refresh_version(void)
|
||||
{
|
||||
my_atomic_rwlock_rdlock(&LOCK_tdc_atomics);
|
||||
ulong v= my_atomic_load64(&tdc_version);
|
||||
ulong v= my_atomic_load64_explicit(&tdc_version, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_rdunlock(&LOCK_tdc_atomics);
|
||||
return v;
|
||||
}
|
||||
@@ -1110,7 +1111,7 @@ ulong tdc_refresh_version(void)
|
||||
ulong tdc_increment_refresh_version(void)
|
||||
{
|
||||
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
|
||||
ulong v= my_atomic_add64(&tdc_version, 1);
|
||||
ulong v= my_atomic_add64_explicit(&tdc_version, 1, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
|
||||
DBUG_PRINT("tcache", ("incremented global refresh_version to: %lu", v));
|
||||
return v + 1;
|
||||
@@ -1210,7 +1211,7 @@ void tdc_assign_new_table_id(TABLE_SHARE *share)
|
||||
do
|
||||
{
|
||||
my_atomic_rwlock_wrlock(&LOCK_tdc_atomics);
|
||||
tid= my_atomic_add64(&last_table_id, 1);
|
||||
tid= my_atomic_add64_explicit(&last_table_id, 1, MY_MEMORY_ORDER_RELAXED);
|
||||
my_atomic_rwlock_wrunlock(&LOCK_tdc_atomics);
|
||||
} while (unlikely(tid == ~0UL));
|
||||
|
||||
|
||||
Reference in New Issue
Block a user