1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-29 11:41:21 +03:00
* malloc.c/arena.c (reused_arena): New parameter, avoid_arena.
        When avoid_arena is set, don't retry in the that arena.  Pick the
        next one, whatever it might be.
        (arena_get2): New parameter avoid_arena, pass through to reused_arena.
        (arena_lock): Pass in new parameter to arena_get2.
        * malloc/malloc.c (__libc_memalign): Pass in new parameter to
        arena_get2.
        (__libc_malloc): Unify retrying after main arena failure with
        __libc_memalign version.
        (__libc_valloc, __libc_pvalloc, __libc_calloc): Likewise.
This commit is contained in:
Jeff Law
2012-08-10 09:37:04 -06:00
parent 2d83a317e9
commit bf51f568f1
4 changed files with 61 additions and 29 deletions

View File

@ -1,3 +1,17 @@
2012-08-09 Jeff Law <law@redhat.com>
[BZ #13939]
* malloc.c/arena.c (reused_arena): New parameter, avoid_arena.
When avoid_arena is set, don't retry in the that arena. Pick the
next one, whatever it might be.
(arena_get2): New parameter avoid_arena, pass through to reused_arena.
(arena_lock): Pass in new parameter to arena_get2.
* malloc/malloc.c (__libc_memalign): Pass in new parameter to
arena_get2.
(__libc_malloc): Unify retrying after main arena failure with
__libc_memalign version.
(__libc_valloc, __libc_pvalloc, __libc_calloc): Likewise.
2012-08-09 H.J. Lu <hongjiu.lu@intel.com> 2012-08-09 H.J. Lu <hongjiu.lu@intel.com>
[BZ #14166] [BZ #14166]

4
NEWS
View File

@ -9,8 +9,8 @@ Version 2.17
* The following bugs are resolved with this release: * The following bugs are resolved with this release:
6778, 6808, 13717, 14042, 14166, 14150, 14151, 14154, 14157, 14173, 14283, 6778, 6808, 13717, 13939, 14042, 14166, 14150, 14151, 14154, 14157, 14173,
14298, 14307, 14328, 14331, 14336, 14337, 14347, 14349 14283, 14298, 14307, 14328, 14331, 14336, 14337, 14347, 14349
* Support for STT_GNU_IFUNC symbols added for s390 and s390x. * Support for STT_GNU_IFUNC symbols added for s390 and s390x.
Optimized versions of memcpy, memset, and memcmp added for System z10 and Optimized versions of memcpy, memset, and memcmp added for System z10 and

View File

@ -120,14 +120,14 @@ int __malloc_initialized = -1;
if(ptr) \ if(ptr) \
(void)mutex_lock(&ptr->mutex); \ (void)mutex_lock(&ptr->mutex); \
else \ else \
ptr = arena_get2(ptr, (size)); \ ptr = arena_get2(ptr, (size), NULL); \
} while(0) } while(0)
#else #else
# define arena_lock(ptr, size) do { \ # define arena_lock(ptr, size) do { \
if(ptr && !mutex_trylock(&ptr->mutex)) { \ if(ptr && !mutex_trylock(&ptr->mutex)) { \
THREAD_STAT(++(ptr->stat_lock_direct)); \ THREAD_STAT(++(ptr->stat_lock_direct)); \
} else \ } else \
ptr = arena_get2(ptr, (size)); \ ptr = arena_get2(ptr, (size), NULL); \
} while(0) } while(0)
#endif #endif
@ -778,9 +778,11 @@ get_free_list (void)
return result; return result;
} }
/* Lock and return an arena that can be reused for memory allocation.
Avoid AVOID_ARENA as we have already failed to allocate memory in
it and it is currently locked. */
static mstate static mstate
reused_arena (void) reused_arena (mstate avoid_arena)
{ {
mstate result; mstate result;
static mstate next_to_use; static mstate next_to_use;
@ -797,6 +799,11 @@ reused_arena (void)
} }
while (result != next_to_use); while (result != next_to_use);
/* Avoid AVOID_ARENA as we have already failed to allocate memory
in that arena and it is currently locked. */
if (result == avoid_arena)
result = result->next;
/* No arena available. Wait for the next in line. */ /* No arena available. Wait for the next in line. */
(void)mutex_lock(&result->mutex); (void)mutex_lock(&result->mutex);
@ -811,7 +818,7 @@ reused_arena (void)
static mstate static mstate
internal_function internal_function
arena_get2(mstate a_tsd, size_t size) arena_get2(mstate a_tsd, size_t size, mstate avoid_arena)
{ {
mstate a; mstate a;
@ -856,7 +863,7 @@ arena_get2(mstate a_tsd, size_t size)
catomic_decrement (&narenas); catomic_decrement (&narenas);
} }
else else
a = reused_arena (); a = reused_arena (avoid_arena);
} }
#else #else
if(!a_tsd) if(!a_tsd)

View File

@ -2865,9 +2865,11 @@ __libc_malloc(size_t bytes)
victim = _int_malloc(ar_ptr, bytes); victim = _int_malloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} else { } else {
/* ... or sbrk() has failed and there is still a chance to mmap() */ /* ... or sbrk() has failed and there is still a chance to mmap()
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); Grab ar_ptr->next prior to releasing its lock. */
(void)mutex_unlock(&main_arena.mutex); mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes, ar_ptr);
if(ar_ptr) { if(ar_ptr) {
victim = _int_malloc(ar_ptr, bytes); victim = _int_malloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
@ -3043,10 +3045,11 @@ __libc_memalign(size_t alignment, size_t bytes)
p = _int_memalign(ar_ptr, alignment, bytes); p = _int_memalign(ar_ptr, alignment, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} else { } else {
/* ... or sbrk() has failed and there is still a chance to mmap() */ /* ... or sbrk() has failed and there is still a chance to mmap()
Grab ar_ptr->next prior to releasing its lock. */
mstate prev = ar_ptr->next ? ar_ptr : 0; mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes); ar_ptr = arena_get2(prev, bytes, ar_ptr);
if(ar_ptr) { if(ar_ptr) {
p = _int_memalign(ar_ptr, alignment, bytes); p = _int_memalign(ar_ptr, alignment, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
@ -3083,23 +3086,27 @@ __libc_valloc(size_t bytes)
if(!ar_ptr) if(!ar_ptr)
return 0; return 0;
p = _int_valloc(ar_ptr, bytes); p = _int_valloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
if(!p) { if(!p) {
/* Maybe the failure is due to running out of mmapped areas. */ /* Maybe the failure is due to running out of mmapped areas. */
if(ar_ptr != &main_arena) { if(ar_ptr != &main_arena) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = &main_arena; ar_ptr = &main_arena;
(void)mutex_lock(&ar_ptr->mutex); (void)mutex_lock(&ar_ptr->mutex);
p = _int_memalign(ar_ptr, pagesz, bytes); p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} else { } else {
/* ... or sbrk() has failed and there is still a chance to mmap() */ /* ... or sbrk() has failed and there is still a chance to mmap()
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes); Grab ar_ptr->next prior to releasing its lock. */
mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes, ar_ptr);
if(ar_ptr) { if(ar_ptr) {
p = _int_memalign(ar_ptr, pagesz, bytes); p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} }
} }
} } else
(void)mutex_unlock (&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) || assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p))); ar_ptr == arena_for_chunk(mem2chunk(p)));
@ -3127,24 +3134,27 @@ __libc_pvalloc(size_t bytes)
arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE); arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
p = _int_pvalloc(ar_ptr, bytes); p = _int_pvalloc(ar_ptr, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
if(!p) { if(!p) {
/* Maybe the failure is due to running out of mmapped areas. */ /* Maybe the failure is due to running out of mmapped areas. */
if(ar_ptr != &main_arena) { if(ar_ptr != &main_arena) {
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = &main_arena; ar_ptr = &main_arena;
(void)mutex_lock(&ar_ptr->mutex); (void)mutex_lock(&ar_ptr->mutex);
p = _int_memalign(ar_ptr, pagesz, rounded_bytes); p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} else { } else {
/* ... or sbrk() has failed and there is still a chance to mmap() */ /* ... or sbrk() has failed and there is still a chance to mmap()
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, Grab ar_ptr->next prior to releasing its lock. */
bytes + 2*pagesz + MINSIZE); mstate prev = ar_ptr->next ? ar_ptr : 0;
(void)mutex_unlock(&ar_ptr->mutex);
ar_ptr = arena_get2(prev, bytes + 2*pagesz + MINSIZE, ar_ptr);
if(ar_ptr) { if(ar_ptr) {
p = _int_memalign(ar_ptr, pagesz, rounded_bytes); p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} }
} }
} } else
(void)mutex_unlock(&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) || assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p))); ar_ptr == arena_for_chunk(mem2chunk(p)));
@ -3209,8 +3219,6 @@ __libc_calloc(size_t n, size_t elem_size)
#endif #endif
mem = _int_malloc(av, sz); mem = _int_malloc(av, sz);
/* Only clearing follows, so we can unlock early. */
(void)mutex_unlock(&av->mutex);
assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
av == arena_for_chunk(mem2chunk(mem))); av == arena_for_chunk(mem2chunk(mem)));
@ -3218,21 +3226,24 @@ __libc_calloc(size_t n, size_t elem_size)
if (mem == 0) { if (mem == 0) {
/* Maybe the failure is due to running out of mmapped areas. */ /* Maybe the failure is due to running out of mmapped areas. */
if(av != &main_arena) { if(av != &main_arena) {
(void)mutex_unlock(&av->mutex);
(void)mutex_lock(&main_arena.mutex); (void)mutex_lock(&main_arena.mutex);
mem = _int_malloc(&main_arena, sz); mem = _int_malloc(&main_arena, sz);
(void)mutex_unlock(&main_arena.mutex); (void)mutex_unlock(&main_arena.mutex);
} else { } else {
/* ... or sbrk() has failed and there is still a chance to mmap() */ /* ... or sbrk() has failed and there is still a chance to mmap()
(void)mutex_lock(&main_arena.mutex); Grab av->next prior to releasing its lock. */
av = arena_get2(av->next ? av : 0, sz); mstate prev = av->next ? av : 0;
(void)mutex_unlock(&main_arena.mutex); (void)mutex_unlock(&av->mutex);
av = arena_get2(prev, sz, av);
if(av) { if(av) {
mem = _int_malloc(av, sz); mem = _int_malloc(av, sz);
(void)mutex_unlock(&av->mutex); (void)mutex_unlock(&av->mutex);
} }
} }
if (mem == 0) return 0; if (mem == 0) return 0;
} } else
(void)mutex_unlock(&av->mutex);
p = mem2chunk(mem); p = mem2chunk(mem);
/* Two optional cases in which clearing not necessary */ /* Two optional cases in which clearing not necessary */