mirror of
https://sourceware.org/git/glibc.git
synced 2025-08-08 17:42:12 +03:00
malloc: Improve arena_for_chunk()
Change heap_max_size() to improve performance of arena_for_chunk(). Instead of a complex calculation, using a simple mask operation to get the arena base pointer. HEAP_MAX_SIZE should be larger than the huge page size, otherwise heaps will use not huge pages. On AArch64 this removes 6 instructions from arena_for_chunk(), and bench-malloc-thread improves by 1.1% - 1.8%. Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
This commit is contained in:
@@ -40,19 +40,20 @@
|
|||||||
mmap threshold, so that requests with a size just below that
|
mmap threshold, so that requests with a size just below that
|
||||||
threshold can be fulfilled without creating too many heaps. */
|
threshold can be fulfilled without creating too many heaps. */
|
||||||
|
|
||||||
/* When huge pages are used to create new arenas, the maximum and minimum
|
/* HEAP_MAX_SIZE should be larger than the huge page size, otherwise heaps will
|
||||||
size are based on the runtime defined huge page size. */
|
use not huge pages. It is a constant so arena_for_chunk() is efficient. */
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
heap_min_size (void)
|
heap_min_size (void)
|
||||||
{
|
{
|
||||||
return mp_.hp_pagesize == 0 ? HEAP_MIN_SIZE : mp_.hp_pagesize;
|
return mp_.hp_pagesize == 0 || mp_.hp_pagesize > HEAP_MAX_SIZE
|
||||||
|
? HEAP_MIN_SIZE : mp_.hp_pagesize;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t
|
static inline size_t
|
||||||
heap_max_size (void)
|
heap_max_size (void)
|
||||||
{
|
{
|
||||||
return mp_.hp_pagesize == 0 ? HEAP_MAX_SIZE : mp_.hp_pagesize * 4;
|
return HEAP_MAX_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
@@ -313,7 +314,7 @@ ptmalloc_init (void)
|
|||||||
TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
|
TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast));
|
||||||
TUNABLE_GET (hugetlb, size_t, TUNABLE_CALLBACK (set_hugetlb));
|
TUNABLE_GET (hugetlb, size_t, TUNABLE_CALLBACK (set_hugetlb));
|
||||||
|
|
||||||
if (mp_.hp_pagesize > 0)
|
if (mp_.hp_pagesize > 0 && mp_.hp_pagesize <= heap_max_size ())
|
||||||
{
|
{
|
||||||
/* Force mmap for main arena instead of sbrk, so MAP_HUGETLB is always
|
/* Force mmap for main arena instead of sbrk, so MAP_HUGETLB is always
|
||||||
tried. Also tune the mmap threshold, so allocation smaller than the
|
tried. Also tune the mmap threshold, so allocation smaller than the
|
||||||
@@ -460,7 +461,7 @@ alloc_new_heap (size_t size, size_t top_pad, size_t pagesize,
|
|||||||
static heap_info *
|
static heap_info *
|
||||||
new_heap (size_t size, size_t top_pad)
|
new_heap (size_t size, size_t top_pad)
|
||||||
{
|
{
|
||||||
if (__glibc_unlikely (mp_.hp_pagesize != 0))
|
if (mp_.hp_pagesize != 0 && mp_.hp_pagesize <= heap_max_size ())
|
||||||
{
|
{
|
||||||
heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize,
|
heap_info *h = alloc_new_heap (size, top_pad, mp_.hp_pagesize,
|
||||||
mp_.hp_flags);
|
mp_.hp_flags);
|
||||||
|
Reference in New Issue
Block a user