mirror of
https://sourceware.org/git/glibc.git
synced 2025-07-28 00:21:52 +03:00
Improve malloc initialization sequence
The current malloc initialization is quite convoluted. Instead of sometimes calling malloc_consolidate from ptmalloc_init, call malloc_init_state early so that the main_arena is always initialized. The special initialization can now be removed from malloc_consolidate. This also fixes BZ #22159. Check all calls to malloc_consolidate and remove calls that are redundant initialization after ptmalloc_init, like in int_mallinfo and __libc_mallopt (but keep the latter as consolidation is required for set_max_fast). Update comments to improve clarity. Remove impossible initialization check from _int_malloc, fix assert in do_check_malloc_state to ensure arena->top != 0. Fix the obvious bugs in do_check_free_chunk and do_check_remalloced_chunk to enable single threaded malloc debugging (do_check_malloc_state is not thread safe!). [BZ #22159] * malloc/arena.c (ptmalloc_init): Call malloc_init_state. * malloc/malloc.c (do_check_free_chunk): Fix build bug. (do_check_remalloced_chunk): Fix build bug. (do_check_malloc_state): Add assert that checks arena->top. (malloc_consolidate): Remove initialization. (int_mallinfo): Remove call to malloc_consolidate. (__libc_mallopt): Clarify why malloc_consolidate is needed.
This commit is contained in:
11
ChangeLog
11
ChangeLog
@ -1,3 +1,14 @@
|
|||||||
|
2017-10-17 Wilco Dijkstra <wdijkstr@arm.com>
|
||||||
|
|
||||||
|
[BZ #22159]
|
||||||
|
* malloc/arena.c (ptmalloc_init): Call malloc_init_state.
|
||||||
|
* malloc/malloc.c (do_check_free_chunk): Fix build bug.
|
||||||
|
(do_check_remalloced_chunk): Fix build bug.
|
||||||
|
(do_check_malloc_state): Add assert that checks arena->top.
|
||||||
|
(malloc_consolidate): Remove initialization.
|
||||||
|
(int_mallinfo): Remove call to malloc_consolidate.
|
||||||
|
(__libc_mallopt): Clarify why malloc_consolidate is needed.
|
||||||
|
|
||||||
2017-10-17 Wilco Dijkstra <wdijkstr@arm.com>
|
2017-10-17 Wilco Dijkstra <wdijkstr@arm.com>
|
||||||
|
|
||||||
* malloc/malloc.c (FASTCHUNKS_BIT): Remove.
|
* malloc/malloc.c (FASTCHUNKS_BIT): Remove.
|
||||||
|
@ -307,13 +307,9 @@ ptmalloc_init (void)
|
|||||||
|
|
||||||
thread_arena = &main_arena;
|
thread_arena = &main_arena;
|
||||||
|
|
||||||
#if HAVE_TUNABLES
|
malloc_init_state (&main_arena);
|
||||||
/* Ensure initialization/consolidation and do it under a lock so that a
|
|
||||||
thread attempting to use the arena in parallel waits on us till we
|
|
||||||
finish. */
|
|
||||||
__libc_lock_lock (main_arena.mutex);
|
|
||||||
malloc_consolidate (&main_arena);
|
|
||||||
|
|
||||||
|
#if HAVE_TUNABLES
|
||||||
TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
|
TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check));
|
||||||
TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
|
TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad));
|
||||||
TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
|
TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte));
|
||||||
@ -322,13 +318,12 @@ ptmalloc_init (void)
|
|||||||
TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
|
TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max));
|
||||||
TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
|
TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max));
|
||||||
TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
|
TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test));
|
||||||
#if USE_TCACHE
|
# if USE_TCACHE
|
||||||
TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
|
TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max));
|
||||||
TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
|
TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count));
|
||||||
TUNABLE_GET (tcache_unsorted_limit, size_t,
|
TUNABLE_GET (tcache_unsorted_limit, size_t,
|
||||||
TUNABLE_CALLBACK (set_tcache_unsorted_limit));
|
TUNABLE_CALLBACK (set_tcache_unsorted_limit));
|
||||||
#endif
|
# endif
|
||||||
__libc_lock_unlock (main_arena.mutex);
|
|
||||||
#else
|
#else
|
||||||
const char *s = NULL;
|
const char *s = NULL;
|
||||||
if (__glibc_likely (_environ != NULL))
|
if (__glibc_likely (_environ != NULL))
|
||||||
|
195
malloc/malloc.c
195
malloc/malloc.c
@ -1625,8 +1625,10 @@ static INTERNAL_SIZE_T global_max_fast;
|
|||||||
/*
|
/*
|
||||||
Set value of max_fast.
|
Set value of max_fast.
|
||||||
Use impossibly small value if 0.
|
Use impossibly small value if 0.
|
||||||
Precondition: there are no existing fastbin chunks.
|
Precondition: there are no existing fastbin chunks in the main arena.
|
||||||
Setting the value clears fastchunk bit but preserves noncontiguous bit.
|
Since do_check_malloc_state () checks this, we call malloc_consolidate ()
|
||||||
|
before changing max_fast. Note other arenas will leak their fast bin
|
||||||
|
entries if max_fast is reduced.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define set_max_fast(s) \
|
#define set_max_fast(s) \
|
||||||
@ -1790,11 +1792,8 @@ static struct malloc_par mp_ =
|
|||||||
/*
|
/*
|
||||||
Initialize a malloc_state struct.
|
Initialize a malloc_state struct.
|
||||||
|
|
||||||
This is called only from within malloc_consolidate, which needs
|
This is called from ptmalloc_init () or from _int_new_arena ()
|
||||||
be called in the same contexts anyway. It is never called directly
|
when creating a new arena.
|
||||||
outside of malloc_consolidate because some optimizing compilers try
|
|
||||||
to inline it at all call points, which turns out not to be an
|
|
||||||
optimization at all. (Inlining it in malloc_consolidate is fine though.)
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -1972,7 +1971,7 @@ do_check_chunk (mstate av, mchunkptr p)
|
|||||||
static void
|
static void
|
||||||
do_check_free_chunk (mstate av, mchunkptr p)
|
do_check_free_chunk (mstate av, mchunkptr p)
|
||||||
{
|
{
|
||||||
INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
|
INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
|
||||||
mchunkptr next = chunk_at_offset (p, sz);
|
mchunkptr next = chunk_at_offset (p, sz);
|
||||||
|
|
||||||
do_check_chunk (av, p);
|
do_check_chunk (av, p);
|
||||||
@ -1987,7 +1986,7 @@ do_check_free_chunk (mstate av, mchunkptr p)
|
|||||||
assert ((sz & MALLOC_ALIGN_MASK) == 0);
|
assert ((sz & MALLOC_ALIGN_MASK) == 0);
|
||||||
assert (aligned_OK (chunk2mem (p)));
|
assert (aligned_OK (chunk2mem (p)));
|
||||||
/* ... matching footer field */
|
/* ... matching footer field */
|
||||||
assert (prev_size (p) == sz);
|
assert (prev_size (next_chunk (p)) == sz);
|
||||||
/* ... and is fully consolidated */
|
/* ... and is fully consolidated */
|
||||||
assert (prev_inuse (p));
|
assert (prev_inuse (p));
|
||||||
assert (next == av->top || inuse (next));
|
assert (next == av->top || inuse (next));
|
||||||
@ -2047,7 +2046,7 @@ do_check_inuse_chunk (mstate av, mchunkptr p)
|
|||||||
static void
|
static void
|
||||||
do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
|
do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
|
||||||
{
|
{
|
||||||
INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
|
INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
|
||||||
|
|
||||||
if (!chunk_is_mmapped (p))
|
if (!chunk_is_mmapped (p))
|
||||||
{
|
{
|
||||||
@ -2123,8 +2122,11 @@ do_check_malloc_state (mstate av)
|
|||||||
/* alignment is a power of 2 */
|
/* alignment is a power of 2 */
|
||||||
assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
|
assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
|
||||||
|
|
||||||
/* cannot run remaining checks until fully initialized */
|
/* Check the arena is initialized. */
|
||||||
if (av->top == 0 || av->top == initial_top (av))
|
assert (av->top != 0);
|
||||||
|
|
||||||
|
/* No memory has been allocated yet, so doing more tests is not possible. */
|
||||||
|
if (av->top == initial_top (av))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* pagesize is a power of 2 */
|
/* pagesize is a power of 2 */
|
||||||
@ -3578,21 +3580,16 @@ _int_malloc (mstate av, size_t bytes)
|
|||||||
|
|
||||||
if ((victim = last (bin)) != bin)
|
if ((victim = last (bin)) != bin)
|
||||||
{
|
{
|
||||||
if (victim == 0) /* initialization check */
|
bck = victim->bk;
|
||||||
malloc_consolidate (av);
|
if (__glibc_unlikely (bck->fd != victim))
|
||||||
else
|
malloc_printerr ("malloc(): smallbin double linked list corrupted");
|
||||||
{
|
set_inuse_bit_at_offset (victim, nb);
|
||||||
bck = victim->bk;
|
bin->bk = bck;
|
||||||
if (__glibc_unlikely (bck->fd != victim))
|
bck->fd = bin;
|
||||||
malloc_printerr
|
|
||||||
("malloc(): smallbin double linked list corrupted");
|
|
||||||
set_inuse_bit_at_offset (victim, nb);
|
|
||||||
bin->bk = bck;
|
|
||||||
bck->fd = bin;
|
|
||||||
|
|
||||||
if (av != &main_arena)
|
if (av != &main_arena)
|
||||||
set_non_main_arena (victim);
|
set_non_main_arena (victim);
|
||||||
check_malloced_chunk (av, victim, nb);
|
check_malloced_chunk (av, victim, nb);
|
||||||
#if USE_TCACHE
|
#if USE_TCACHE
|
||||||
/* While we're here, if we see other chunks of the same size,
|
/* While we're here, if we see other chunks of the same size,
|
||||||
stash them in the tcache. */
|
stash them in the tcache. */
|
||||||
@ -3619,10 +3616,9 @@ _int_malloc (mstate av, size_t bytes)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
void *p = chunk2mem (victim);
|
void *p = chunk2mem (victim);
|
||||||
alloc_perturb (p, bytes);
|
alloc_perturb (p, bytes);
|
||||||
return p;
|
return p;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4320,10 +4316,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
|
|||||||
purpose since, among other things, it might place chunks back onto
|
purpose since, among other things, it might place chunks back onto
|
||||||
fastbins. So, instead, we need to use a minor variant of the same
|
fastbins. So, instead, we need to use a minor variant of the same
|
||||||
code.
|
code.
|
||||||
|
|
||||||
Also, because this routine needs to be called the first time through
|
|
||||||
malloc anyway, it turns out to be the perfect place to trigger
|
|
||||||
initialization code.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void malloc_consolidate(mstate av)
|
static void malloc_consolidate(mstate av)
|
||||||
@ -4344,84 +4336,73 @@ static void malloc_consolidate(mstate av)
|
|||||||
mchunkptr bck;
|
mchunkptr bck;
|
||||||
mchunkptr fwd;
|
mchunkptr fwd;
|
||||||
|
|
||||||
|
atomic_store_relaxed (&av->have_fastchunks, false);
|
||||||
|
|
||||||
|
unsorted_bin = unsorted_chunks(av);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
If max_fast is 0, we know that av hasn't
|
Remove each chunk from fast bin and consolidate it, placing it
|
||||||
yet been initialized, in which case do so below
|
then in unsorted bin. Among other reasons for doing this,
|
||||||
|
placing in unsorted bin avoids needing to calculate actual bins
|
||||||
|
until malloc is sure that chunks aren't immediately going to be
|
||||||
|
reused anyway.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (get_max_fast () != 0) {
|
maxfb = &fastbin (av, NFASTBINS - 1);
|
||||||
atomic_store_relaxed (&av->have_fastchunks, false);
|
fb = &fastbin (av, 0);
|
||||||
|
do {
|
||||||
|
p = atomic_exchange_acq (fb, NULL);
|
||||||
|
if (p != 0) {
|
||||||
|
do {
|
||||||
|
check_inuse_chunk(av, p);
|
||||||
|
nextp = p->fd;
|
||||||
|
|
||||||
unsorted_bin = unsorted_chunks(av);
|
/* Slightly streamlined version of consolidation code in free() */
|
||||||
|
size = chunksize (p);
|
||||||
|
nextchunk = chunk_at_offset(p, size);
|
||||||
|
nextsize = chunksize(nextchunk);
|
||||||
|
|
||||||
/*
|
if (!prev_inuse(p)) {
|
||||||
Remove each chunk from fast bin and consolidate it, placing it
|
prevsize = prev_size (p);
|
||||||
then in unsorted bin. Among other reasons for doing this,
|
size += prevsize;
|
||||||
placing in unsorted bin avoids needing to calculate actual bins
|
p = chunk_at_offset(p, -((long) prevsize));
|
||||||
until malloc is sure that chunks aren't immediately going to be
|
unlink(av, p, bck, fwd);
|
||||||
reused anyway.
|
}
|
||||||
*/
|
|
||||||
|
|
||||||
maxfb = &fastbin (av, NFASTBINS - 1);
|
if (nextchunk != av->top) {
|
||||||
fb = &fastbin (av, 0);
|
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
||||||
do {
|
|
||||||
p = atomic_exchange_acq (fb, NULL);
|
|
||||||
if (p != 0) {
|
|
||||||
do {
|
|
||||||
check_inuse_chunk(av, p);
|
|
||||||
nextp = p->fd;
|
|
||||||
|
|
||||||
/* Slightly streamlined version of consolidation code in free() */
|
if (!nextinuse) {
|
||||||
size = chunksize (p);
|
|
||||||
nextchunk = chunk_at_offset(p, size);
|
|
||||||
nextsize = chunksize(nextchunk);
|
|
||||||
|
|
||||||
if (!prev_inuse(p)) {
|
|
||||||
prevsize = prev_size (p);
|
|
||||||
size += prevsize;
|
|
||||||
p = chunk_at_offset(p, -((long) prevsize));
|
|
||||||
unlink(av, p, bck, fwd);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nextchunk != av->top) {
|
|
||||||
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
|
||||||
|
|
||||||
if (!nextinuse) {
|
|
||||||
size += nextsize;
|
|
||||||
unlink(av, nextchunk, bck, fwd);
|
|
||||||
} else
|
|
||||||
clear_inuse_bit_at_offset(nextchunk, 0);
|
|
||||||
|
|
||||||
first_unsorted = unsorted_bin->fd;
|
|
||||||
unsorted_bin->fd = p;
|
|
||||||
first_unsorted->bk = p;
|
|
||||||
|
|
||||||
if (!in_smallbin_range (size)) {
|
|
||||||
p->fd_nextsize = NULL;
|
|
||||||
p->bk_nextsize = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
set_head(p, size | PREV_INUSE);
|
|
||||||
p->bk = unsorted_bin;
|
|
||||||
p->fd = first_unsorted;
|
|
||||||
set_foot(p, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
else {
|
|
||||||
size += nextsize;
|
size += nextsize;
|
||||||
set_head(p, size | PREV_INUSE);
|
unlink(av, nextchunk, bck, fwd);
|
||||||
av->top = p;
|
} else
|
||||||
|
clear_inuse_bit_at_offset(nextchunk, 0);
|
||||||
|
|
||||||
|
first_unsorted = unsorted_bin->fd;
|
||||||
|
unsorted_bin->fd = p;
|
||||||
|
first_unsorted->bk = p;
|
||||||
|
|
||||||
|
if (!in_smallbin_range (size)) {
|
||||||
|
p->fd_nextsize = NULL;
|
||||||
|
p->bk_nextsize = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
} while ( (p = nextp) != 0);
|
set_head(p, size | PREV_INUSE);
|
||||||
|
p->bk = unsorted_bin;
|
||||||
|
p->fd = first_unsorted;
|
||||||
|
set_foot(p, size);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
else {
|
||||||
} while (fb++ != maxfb);
|
size += nextsize;
|
||||||
}
|
set_head(p, size | PREV_INUSE);
|
||||||
else {
|
av->top = p;
|
||||||
malloc_init_state(av);
|
}
|
||||||
check_malloc_state(av);
|
|
||||||
}
|
} while ( (p = nextp) != 0);
|
||||||
|
|
||||||
|
}
|
||||||
|
} while (fb++ != maxfb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4688,7 +4669,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
|
|||||||
static int
|
static int
|
||||||
mtrim (mstate av, size_t pad)
|
mtrim (mstate av, size_t pad)
|
||||||
{
|
{
|
||||||
/* Ensure initialization/consolidation */
|
/* Ensure all blocks are consolidated. */
|
||||||
malloc_consolidate (av);
|
malloc_consolidate (av);
|
||||||
|
|
||||||
const size_t ps = GLRO (dl_pagesize);
|
const size_t ps = GLRO (dl_pagesize);
|
||||||
@ -4819,10 +4800,6 @@ int_mallinfo (mstate av, struct mallinfo *m)
|
|||||||
int nblocks;
|
int nblocks;
|
||||||
int nfastblocks;
|
int nfastblocks;
|
||||||
|
|
||||||
/* Ensure initialization */
|
|
||||||
if (av->top == 0)
|
|
||||||
malloc_consolidate (av);
|
|
||||||
|
|
||||||
check_malloc_state (av);
|
check_malloc_state (av);
|
||||||
|
|
||||||
/* Account for top */
|
/* Account for top */
|
||||||
@ -5071,11 +5048,13 @@ __libc_mallopt (int param_number, int value)
|
|||||||
if (__malloc_initialized < 0)
|
if (__malloc_initialized < 0)
|
||||||
ptmalloc_init ();
|
ptmalloc_init ();
|
||||||
__libc_lock_lock (av->mutex);
|
__libc_lock_lock (av->mutex);
|
||||||
/* Ensure initialization/consolidation */
|
|
||||||
malloc_consolidate (av);
|
|
||||||
|
|
||||||
LIBC_PROBE (memory_mallopt, 2, param_number, value);
|
LIBC_PROBE (memory_mallopt, 2, param_number, value);
|
||||||
|
|
||||||
|
/* We must consolidate main arena before changing max_fast
|
||||||
|
(see definition of set_max_fast). */
|
||||||
|
malloc_consolidate (av);
|
||||||
|
|
||||||
switch (param_number)
|
switch (param_number)
|
||||||
{
|
{
|
||||||
case M_MXFAST:
|
case M_MXFAST:
|
||||||
|
Reference in New Issue
Block a user