mirror of
https://sourceware.org/git/glibc.git
synced 2025-07-30 22:43:12 +03:00
Use glibc_likely instead __builtin_expect.
This commit is contained in:
@ -376,7 +376,7 @@ ptmalloc_init (void)
|
||||
tsd_setspecific (arena_key, (void *) &main_arena);
|
||||
thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
|
||||
const char *s = NULL;
|
||||
if (__builtin_expect (_environ != NULL, 1))
|
||||
if (__glibc_likely (_environ != NULL))
|
||||
{
|
||||
char **runp = _environ;
|
||||
char *envline;
|
||||
@ -624,7 +624,7 @@ shrink_heap (heap_info *h, long diff)
|
||||
|
||||
/* Try to re-map the extra heap space freshly to save memory, and make it
|
||||
inaccessible. See malloc-sysdep.h to know when this is true. */
|
||||
if (__builtin_expect (check_may_shrink_heap (), 0))
|
||||
if (__glibc_unlikely (check_may_shrink_heap ()))
|
||||
{
|
||||
if ((char *) MMAP ((char *) h + new_size, diff, PROT_NONE,
|
||||
MAP_FIXED) == (char *) MAP_FAILED)
|
||||
@ -863,12 +863,12 @@ arena_get2 (mstate a_tsd, size_t size, mstate avoid_arena)
|
||||
narenas_limit is 0. There is no possibility for narenas to
|
||||
be too big for the test to always fail since there is not
|
||||
enough address space to create that many arenas. */
|
||||
if (__builtin_expect (n <= narenas_limit - 1, 0))
|
||||
if (__glibc_unlikely (n <= narenas_limit - 1))
|
||||
{
|
||||
if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n))
|
||||
goto repeat;
|
||||
a = _int_new_arena (size);
|
||||
if (__builtin_expect (a == NULL, 0))
|
||||
if (__glibc_unlikely (a == NULL))
|
||||
catomic_decrement (&narenas);
|
||||
}
|
||||
else
|
||||
|
@ -3368,7 +3368,7 @@ _int_malloc (mstate av, size_t bytes)
|
||||
else
|
||||
{
|
||||
bck = victim->bk;
|
||||
if (__builtin_expect (bck->fd != victim, 0))
|
||||
if (__glibc_unlikely (bck->fd != victim))
|
||||
{
|
||||
errstr = "malloc(): smallbin double linked list corrupted";
|
||||
goto errout;
|
||||
@ -3591,7 +3591,7 @@ _int_malloc (mstate av, size_t bytes)
|
||||
have to perform a complete insert here. */
|
||||
bck = unsorted_chunks (av);
|
||||
fwd = bck->fd;
|
||||
if (__builtin_expect (fwd->bk != bck, 0))
|
||||
if (__glibc_unlikely (fwd->bk != bck))
|
||||
{
|
||||
errstr = "malloc(): corrupted unsorted chunks";
|
||||
goto errout;
|
||||
@ -3698,7 +3698,7 @@ _int_malloc (mstate av, size_t bytes)
|
||||
have to perform a complete insert here. */
|
||||
bck = unsorted_chunks (av);
|
||||
fwd = bck->fd;
|
||||
if (__builtin_expect (fwd->bk != bck, 0))
|
||||
if (__glibc_unlikely (fwd->bk != bck))
|
||||
{
|
||||
errstr = "malloc(): corrupted unsorted chunks 2";
|
||||
goto errout;
|
||||
@ -3824,7 +3824,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
|
||||
}
|
||||
/* We know that each chunk is at least MINSIZE bytes in size or a
|
||||
multiple of MALLOC_ALIGNMENT. */
|
||||
if (__builtin_expect (size < MINSIZE || !aligned_OK (size), 0))
|
||||
if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
|
||||
{
|
||||
errstr = "free(): invalid size";
|
||||
goto errout;
|
||||
@ -3922,7 +3922,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
|
||||
|
||||
/* Lightweight tests: check whether the block is already the
|
||||
top block. */
|
||||
if (__builtin_expect (p == av->top, 0))
|
||||
if (__glibc_unlikely (p == av->top))
|
||||
{
|
||||
errstr = "double free or corruption (top)";
|
||||
goto errout;
|
||||
@ -3936,7 +3936,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
|
||||
goto errout;
|
||||
}
|
||||
/* Or whether the block is actually not marked used. */
|
||||
if (__builtin_expect (!prev_inuse(nextchunk), 0))
|
||||
if (__glibc_unlikely (!prev_inuse(nextchunk)))
|
||||
{
|
||||
errstr = "double free or corruption (!prev)";
|
||||
goto errout;
|
||||
@ -3979,7 +3979,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
|
||||
|
||||
bck = unsorted_chunks(av);
|
||||
fwd = bck->fd;
|
||||
if (__builtin_expect (fwd->bk != bck, 0))
|
||||
if (__glibc_unlikely (fwd->bk != bck))
|
||||
{
|
||||
errstr = "free(): corrupted unsorted chunks";
|
||||
goto errout;
|
||||
|
@ -137,20 +137,20 @@ update_data (struct header *result, size_t len, size_t old_len)
|
||||
value. The base stack pointer might not be set if this is not
|
||||
the main thread and it is the first call to any of these
|
||||
functions. */
|
||||
if (__builtin_expect (!start_sp, 0))
|
||||
if (__glibc_unlikely (!start_sp))
|
||||
start_sp = GETSP ();
|
||||
|
||||
uintptr_t sp = GETSP ();
|
||||
#ifdef STACK_GROWS_UPWARD
|
||||
/* This can happen in threads where we didn't catch the thread's
|
||||
stack early enough. */
|
||||
if (__builtin_expect (sp < start_sp, 0))
|
||||
if (__glibc_unlikely (sp < start_sp))
|
||||
start_sp = sp;
|
||||
size_t current_stack = sp - start_sp;
|
||||
#else
|
||||
/* This can happen in threads where we didn't catch the thread's
|
||||
stack early enough. */
|
||||
if (__builtin_expect (sp > start_sp, 0))
|
||||
if (__glibc_unlikely (sp > start_sp))
|
||||
start_sp = sp;
|
||||
size_t current_stack = start_sp - sp;
|
||||
#endif
|
||||
@ -330,7 +330,7 @@ malloc (size_t len)
|
||||
struct header *result = NULL;
|
||||
|
||||
/* Determine real implementation if not already happened. */
|
||||
if (__builtin_expect (initialized <= 0, 0))
|
||||
if (__glibc_unlikely (initialized <= 0))
|
||||
{
|
||||
if (initialized == -1)
|
||||
return NULL;
|
||||
@ -382,7 +382,7 @@ realloc (void *old, size_t len)
|
||||
size_t old_len;
|
||||
|
||||
/* Determine real implementation if not already happened. */
|
||||
if (__builtin_expect (initialized <= 0, 0))
|
||||
if (__glibc_unlikely (initialized <= 0))
|
||||
{
|
||||
if (initialized == -1)
|
||||
return NULL;
|
||||
@ -476,7 +476,7 @@ calloc (size_t n, size_t len)
|
||||
size_t size = n * len;
|
||||
|
||||
/* Determine real implementation if not already happened. */
|
||||
if (__builtin_expect (initialized <= 0, 0))
|
||||
if (__glibc_unlikely (initialized <= 0))
|
||||
{
|
||||
if (initialized == -1)
|
||||
return NULL;
|
||||
@ -526,7 +526,7 @@ free (void *ptr)
|
||||
struct header *real;
|
||||
|
||||
/* Determine real implementation if not already happened. */
|
||||
if (__builtin_expect (initialized <= 0, 0))
|
||||
if (__glibc_unlikely (initialized <= 0))
|
||||
{
|
||||
if (initialized == -1)
|
||||
return;
|
||||
@ -578,7 +578,7 @@ mmap (void *start, size_t len, int prot, int flags, int fd, off_t offset)
|
||||
void *result = NULL;
|
||||
|
||||
/* Determine real implementation if not already happened. */
|
||||
if (__builtin_expect (initialized <= 0, 0))
|
||||
if (__glibc_unlikely (initialized <= 0))
|
||||
{
|
||||
if (initialized == -1)
|
||||
return NULL;
|
||||
@ -631,7 +631,7 @@ mmap64 (void *start, size_t len, int prot, int flags, int fd, off64_t offset)
|
||||
void *result = NULL;
|
||||
|
||||
/* Determine real implementation if not already happened. */
|
||||
if (__builtin_expect (initialized <= 0, 0))
|
||||
if (__glibc_unlikely (initialized <= 0))
|
||||
{
|
||||
if (initialized == -1)
|
||||
return NULL;
|
||||
@ -689,7 +689,7 @@ mremap (void *start, size_t old_len, size_t len, int flags, ...)
|
||||
va_end (ap);
|
||||
|
||||
/* Determine real implementation if not already happened. */
|
||||
if (__builtin_expect (initialized <= 0, 0))
|
||||
if (__glibc_unlikely (initialized <= 0))
|
||||
{
|
||||
if (initialized == -1)
|
||||
return NULL;
|
||||
@ -750,7 +750,7 @@ munmap (void *start, size_t len)
|
||||
int result;
|
||||
|
||||
/* Determine real implementation if not already happened. */
|
||||
if (__builtin_expect (initialized <= 0, 0))
|
||||
if (__glibc_unlikely (initialized <= 0))
|
||||
{
|
||||
if (initialized == -1)
|
||||
return -1;
|
||||
@ -766,7 +766,7 @@ munmap (void *start, size_t len)
|
||||
/* Keep track of number of calls. */
|
||||
catomic_increment (&calls[idx_munmap]);
|
||||
|
||||
if (__builtin_expect (result == 0, 1))
|
||||
if (__glibc_likely (result == 0))
|
||||
{
|
||||
/* Keep track of total memory freed using `free'. */
|
||||
catomic_add (&total[idx_munmap], len);
|
||||
|
Reference in New Issue
Block a user