1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-29 11:41:21 +03:00

Consolidate valloc/pvalloc code.

To make malloc code more maintainable we make malloc and pvalloc share
logic with memalign.
This commit is contained in:
Ondřej Bílka
2013-11-20 15:46:02 +01:00
parent 4712799fbb
commit 10ad46bc65
4 changed files with 44 additions and 112 deletions

View File

@ -1,3 +1,12 @@
2013-11-20 Ondřej Bílka <neleai@seznam.cz>
* malloc/hooks.c (memalign_check): Add alignment rounding.
* malloc/malloc.c (_mid_memalign): New function.
(__libc_valloc, __libc_pvalloc, __libc_memalign, __posix_memalign):
Implement by calling _mid_memalign.
* manual/probes.texi (Memory Allocation Probes): Remove
memory_valloc_retry and memory_pvalloc_retry.
2013-11-20 Siddhesh Poyarekar <siddhesh@redhat.com> 2013-11-20 Siddhesh Poyarekar <siddhesh@redhat.com>
* locale/programs/locarchive.c (open_archive): Add const * locale/programs/locarchive.c (open_archive): Add const

View File

@ -376,6 +376,13 @@ memalign_check(size_t alignment, size_t bytes, const void *caller)
return 0; return 0;
} }
/* Make sure alignment is power of 2. */
if (!powerof2(alignment)) {
size_t a = MALLOC_ALIGNMENT * 2;
while (a < alignment) a <<= 1;
alignment = a;
}
(void)mutex_lock(&main_arena.mutex); (void)mutex_lock(&main_arena.mutex);
mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) : mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
NULL; NULL;

View File

@ -1054,8 +1054,8 @@ static void _int_free(mstate, mchunkptr, int);
static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T, static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
INTERNAL_SIZE_T); INTERNAL_SIZE_T);
static void* _int_memalign(mstate, size_t, size_t); static void* _int_memalign(mstate, size_t, size_t);
static void* _int_valloc(mstate, size_t); static void* _mid_memalign(size_t, size_t, void *);
static void* _int_pvalloc(mstate, size_t);
static void malloc_printerr(int action, const char *str, void *ptr); static void malloc_printerr(int action, const char *str, void *ptr);
static void* internal_function mem2mem_check(void *p, size_t sz); static void* internal_function mem2mem_check(void *p, size_t sz);
@ -3001,6 +3001,13 @@ libc_hidden_def (__libc_realloc)
void* void*
__libc_memalign(size_t alignment, size_t bytes) __libc_memalign(size_t alignment, size_t bytes)
{
void *address = RETURN_ADDRESS (0);
return _mid_memalign (alignment, bytes, address);
}
static void *
_mid_memalign (size_t alignment, size_t bytes, void *address)
{ {
mstate ar_ptr; mstate ar_ptr;
void *p; void *p;
@ -3008,9 +3015,9 @@ __libc_memalign(size_t alignment, size_t bytes)
void *(*hook) (size_t, size_t, const void *) = void *(*hook) (size_t, size_t, const void *) =
force_reg (__memalign_hook); force_reg (__memalign_hook);
if (__builtin_expect (hook != NULL, 0)) if (__builtin_expect (hook != NULL, 0))
return (*hook)(alignment, bytes, RETURN_ADDRESS (0)); return (*hook)(alignment, bytes, address);
/* If need less alignment than we give anyway, just relay to malloc */ /* If we need less alignment than we give anyway, just relay to malloc. */
if (alignment <= MALLOC_ALIGNMENT) return __libc_malloc(bytes); if (alignment <= MALLOC_ALIGNMENT) return __libc_malloc(bytes);
/* Otherwise, ensure that it is at least a minimum chunk size */ /* Otherwise, ensure that it is at least a minimum chunk size */
@ -3031,6 +3038,14 @@ __libc_memalign(size_t alignment, size_t bytes)
return 0; return 0;
} }
/* Make sure alignment is power of 2. */
if (!powerof2(alignment)) {
size_t a = MALLOC_ALIGNMENT * 2;
while (a < alignment) a <<= 1;
alignment = a;
}
arena_get(ar_ptr, bytes + alignment + MINSIZE); arena_get(ar_ptr, bytes + alignment + MINSIZE);
if(!ar_ptr) if(!ar_ptr)
return 0; return 0;
@ -3055,54 +3070,22 @@ libc_hidden_def (__libc_memalign)
void* void*
__libc_valloc(size_t bytes) __libc_valloc(size_t bytes)
{ {
mstate ar_ptr;
void *p;
if(__malloc_initialized < 0) if(__malloc_initialized < 0)
ptmalloc_init (); ptmalloc_init ();
void *address = RETURN_ADDRESS (0);
size_t pagesz = GLRO(dl_pagesize); size_t pagesz = GLRO(dl_pagesize);
return _mid_memalign (pagesz, bytes, address);
/* Check for overflow. */
if (bytes > SIZE_MAX - pagesz - MINSIZE)
{
__set_errno (ENOMEM);
return 0;
}
void *(*hook) (size_t, size_t, const void *) =
force_reg (__memalign_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
arena_get(ar_ptr, bytes + pagesz + MINSIZE);
if(!ar_ptr)
return 0;
p = _int_valloc(ar_ptr, bytes);
if(!p) {
LIBC_PROBE (memory_valloc_retry, 1, bytes);
ar_ptr = arena_get_retry (ar_ptr, bytes);
if (__builtin_expect(ar_ptr != NULL, 1)) {
p = _int_memalign(ar_ptr, pagesz, bytes);
(void)mutex_unlock(&ar_ptr->mutex);
}
} else
(void)mutex_unlock (&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
return p;
} }
void* void*
__libc_pvalloc(size_t bytes) __libc_pvalloc(size_t bytes)
{ {
mstate ar_ptr;
void *p;
if(__malloc_initialized < 0) if(__malloc_initialized < 0)
ptmalloc_init (); ptmalloc_init ();
void *address = RETURN_ADDRESS (0);
size_t pagesz = GLRO(dl_pagesize); size_t pagesz = GLRO(dl_pagesize);
size_t page_mask = GLRO(dl_pagesize) - 1; size_t page_mask = GLRO(dl_pagesize) - 1;
size_t rounded_bytes = (bytes + page_mask) & ~(page_mask); size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
@ -3114,26 +3097,7 @@ __libc_pvalloc(size_t bytes)
return 0; return 0;
} }
void *(*hook) (size_t, size_t, const void *) = return _mid_memalign (pagesz, rounded_bytes, address);
force_reg (__memalign_hook);
if (__builtin_expect (hook != NULL, 0))
return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
p = _int_pvalloc(ar_ptr, bytes);
if(!p) {
LIBC_PROBE (memory_pvalloc_retry, 1, bytes);
ar_ptr = arena_get_retry (ar_ptr, bytes + 2*pagesz + MINSIZE);
if (__builtin_expect(ar_ptr != NULL, 1)) {
p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
(void)mutex_unlock(&ar_ptr->mutex);
}
} else
(void)mutex_unlock(&ar_ptr->mutex);
assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
ar_ptr == arena_for_chunk(mem2chunk(p)));
return p;
} }
void* void*
@ -4318,20 +4282,7 @@ _int_memalign(mstate av, size_t alignment, size_t bytes)
unsigned long remainder_size; /* its size */ unsigned long remainder_size; /* its size */
INTERNAL_SIZE_T size; INTERNAL_SIZE_T size;
/* If need less alignment than we give anyway, just relay to malloc */
if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
/* Otherwise, ensure that it is at least a minimum chunk size */
if (alignment < MINSIZE) alignment = MINSIZE;
/* Make sure alignment is power of 2 (in case MINSIZE is not). */
if ((alignment & (alignment - 1)) != 0) {
size_t a = MALLOC_ALIGNMENT * 2;
while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
alignment = a;
}
checked_request2size(bytes, nb); checked_request2size(bytes, nb);
@ -4405,35 +4356,6 @@ _int_memalign(mstate av, size_t alignment, size_t bytes)
} }
/*
------------------------------ valloc ------------------------------
*/
static void*
_int_valloc(mstate av, size_t bytes)
{
/* Ensure initialization/consolidation */
if (have_fastchunks(av)) malloc_consolidate(av);
return _int_memalign(av, GLRO(dl_pagesize), bytes);
}
/*
------------------------------ pvalloc ------------------------------
*/
static void*
_int_pvalloc(mstate av, size_t bytes)
{
size_t pagesz;
/* Ensure initialization/consolidation */
if (have_fastchunks(av)) malloc_consolidate(av);
pagesz = GLRO(dl_pagesize);
return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
}
/* /*
------------------------------ malloc_trim ------------------------------ ------------------------------ malloc_trim ------------------------------
*/ */
@ -4968,14 +4890,9 @@ __posix_memalign (void **memptr, size_t alignment, size_t size)
|| alignment == 0) || alignment == 0)
return EINVAL; return EINVAL;
/* Call the hook here, so that caller is posix_memalign's caller
and not posix_memalign itself. */ void *address = RETURN_ADDRESS (0);
void *(*hook) (size_t, size_t, const void *) = mem = _mid_memalign (alignment, size, address);
force_reg (__memalign_hook);
if (__builtin_expect (hook != NULL, 0))
mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
else
mem = __libc_memalign (alignment, size);
if (mem != NULL) { if (mem != NULL) {
*memptr = mem; *memptr = mem;

View File

@ -71,8 +71,6 @@ heap is released. Argument @var{$arg1} is a pointer to the heap, and
@deftp Probe memory_malloc_retry (size_t @var{$arg1}) @deftp Probe memory_malloc_retry (size_t @var{$arg1})
@deftpx Probe memory_realloc_retry (size_t @var{$arg1}, void *@var{$arg2}) @deftpx Probe memory_realloc_retry (size_t @var{$arg1}, void *@var{$arg2})
@deftpx Probe memory_memalign_retry (size_t @var{$arg1}, size_t @var{$arg2}) @deftpx Probe memory_memalign_retry (size_t @var{$arg1}, size_t @var{$arg2})
@deftpx Probe memory_valloc_retry (size_t @var{$arg1})
@deftpx Probe memory_pvalloc_retry (size_t @var{$arg1})
@deftpx Probe memory_calloc_retry (size_t @var{$arg1}) @deftpx Probe memory_calloc_retry (size_t @var{$arg1})
These probes are triggered when the corresponding functions fail to These probes are triggered when the corresponding functions fail to
obtain the requested amount of memory from the arena in use, before they obtain the requested amount of memory from the arena in use, before they
@ -83,7 +81,8 @@ computed from both function arguments. In the @code{realloc} case,
@var{$arg2} is the pointer to the memory area being resized. In the @var{$arg2} is the pointer to the memory area being resized. In the
@code{memalign} case, @var{$arg2} is the alignment to be used for the @code{memalign} case, @var{$arg2} is the alignment to be used for the
request, which may be stricter than the value passed to the request, which may be stricter than the value passed to the
@code{memalign} function. @code{memalign} function. A @code{memalign} probe is also used by functions
@code{posix_memalign, valloc} and @code{pvalloc}.
Note that the argument order does @emph{not} match that of the Note that the argument order does @emph{not} match that of the
corresponding two-argument functions, so that in all of these probes the corresponding two-argument functions, so that in all of these probes the