mirror of
https://sourceware.org/git/glibc.git
synced 2025-10-27 12:15:39 +03:00
atomics: Remove unused atomics
Remove all unused atomics. Replace uses of catomic_increment and catomic_decrement with atomic_fetch_add_relaxed which maps to a standard compiler builtin. Relaxed memory ordering is correct for simple counters since they only need atomicity. Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
This commit is contained in:
@@ -841,11 +841,11 @@ arena_get2 (size_t size, mstate avoid_arena)
|
||||
enough address space to create that many arenas. */
|
||||
if (__glibc_unlikely (n <= narenas_limit - 1))
|
||||
{
|
||||
if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
|
||||
if (atomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
|
||||
goto repeat;
|
||||
a = _int_new_arena (size);
|
||||
if (__glibc_unlikely (a == NULL))
|
||||
catomic_decrement (&narenas);
|
||||
atomic_fetch_add_relaxed (&narenas, -1);
|
||||
}
|
||||
else
|
||||
a = reused_arena (avoid_arena);
|
||||
|
||||
@@ -4008,7 +4008,7 @@ _int_malloc (mstate av, size_t bytes)
|
||||
if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
|
||||
malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
|
||||
} \
|
||||
while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
|
||||
while ((pp = atomic_compare_and_exchange_val_acq (fb, pp, victim)) \
|
||||
!= victim); \
|
||||
|
||||
if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
|
||||
@@ -4667,7 +4667,7 @@ _int_free_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size, int have_lock)
|
||||
old2 = old;
|
||||
p->fd = PROTECT_PTR (&p->fd, old);
|
||||
}
|
||||
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
|
||||
while ((old = atomic_compare_and_exchange_val_rel (fb, p, old2))
|
||||
!= old2);
|
||||
|
||||
/* Check that size of fastbin chunk at the top is the same as
|
||||
|
||||
Reference in New Issue
Block a user