mirror of
https://sourceware.org/git/glibc.git
synced 2025-10-27 12:15:39 +03:00
malloc: Change mmap chunk layout
Change the mmap chunk layout to be identical to a normal chunk. This makes it safe for tcache to hold mmap chunks and simplifies size calculations in memsize and musable. Add mmap_base() and mmap_size() macros to simplify code. Reviewed-by: Cupertino Miranda <cupertino.miranda@oracle.com>
This commit is contained in:
@@ -1458,7 +1458,7 @@ checked_request2size (size_t req) __nonnull (1)
|
||||
#define memsize(p) \
|
||||
(__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
|
||||
chunksize (p) - CHUNK_HDR_SZ : \
|
||||
chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
|
||||
chunksize (p) - CHUNK_HDR_SZ + SIZE_SZ)
|
||||
|
||||
/* If memory tagging is enabled the layout changes to accommodate the granule
|
||||
size, this is wasteful for small allocations so not done by default.
|
||||
@@ -1477,6 +1477,20 @@ tag_new_usable (void *ptr)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* Return pointer to mmap base from a chunk with IS_MMAPPED set. */
|
||||
static __always_inline void *
|
||||
mmap_base (mchunkptr p)
|
||||
{
|
||||
return (char *) p - prev_size (p);
|
||||
}
|
||||
|
||||
/* Return total mmap size of a chunk with IS_MMAPPED set. */
|
||||
static __always_inline size_t
|
||||
mmap_size (mchunkptr p)
|
||||
{
|
||||
return prev_size (p) + chunksize (p) + CHUNK_HDR_SZ;
|
||||
}
|
||||
|
||||
/*
|
||||
-------------------- Internal data structures --------------------
|
||||
|
||||
@@ -2105,7 +2119,7 @@ do_check_chunk (mstate av, mchunkptr p)
|
||||
else
|
||||
{
|
||||
/* chunk is page-aligned */
|
||||
assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
|
||||
assert ((mmap_size (p) & (GLRO (dl_pagesize) - 1)) == 0);
|
||||
/* mem is aligned */
|
||||
assert (!misaligned_chunk (p));
|
||||
}
|
||||
@@ -2405,14 +2419,14 @@ do_check_malloc_state (mstate av)
|
||||
|
||||
/* Allocate a mmap chunk - used for large block sizes or as a fallback.
|
||||
Round up size to nearest page. Add padding if MALLOC_ALIGNMENT is
|
||||
larger than CHUNK_HDR_SZ. Add SIZE_SZ at the end since there is no
|
||||
following chunk whose prev_size field could be used. */
|
||||
larger than CHUNK_HDR_SZ. Add CHUNK_HDR_SZ at the end so that mmap
|
||||
chunks have the same layout as regular chunks. */
|
||||
|
||||
static void *
|
||||
sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags)
|
||||
{
|
||||
size_t padding = MALLOC_ALIGNMENT - CHUNK_HDR_SZ;
|
||||
size_t size = ALIGN_UP (nb + padding + SIZE_SZ, pagesize);
|
||||
size_t size = ALIGN_UP (nb + padding + CHUNK_HDR_SZ, pagesize);
|
||||
|
||||
char *mm = (char *) MMAP (NULL, size,
|
||||
mtag_mmap_flags | PROT_READ | PROT_WRITE,
|
||||
@@ -2427,7 +2441,7 @@ sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags)
|
||||
/* Store offset to start of mmap in prev_size. */
|
||||
mchunkptr p = (mchunkptr) (mm + padding);
|
||||
set_prev_size (p, padding);
|
||||
set_head (p, (size - padding) | IS_MMAPPED);
|
||||
set_head (p, (size - padding - CHUNK_HDR_SZ) | IS_MMAPPED);
|
||||
|
||||
/* update statistics */
|
||||
int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
|
||||
@@ -2977,13 +2991,12 @@ static void
|
||||
munmap_chunk (mchunkptr p)
|
||||
{
|
||||
size_t pagesize = GLRO (dl_pagesize);
|
||||
INTERNAL_SIZE_T size = chunksize (p);
|
||||
|
||||
assert (chunk_is_mmapped (p));
|
||||
|
||||
uintptr_t mem = (uintptr_t) chunk2mem (p);
|
||||
uintptr_t block = (uintptr_t) p - prev_size (p);
|
||||
size_t total_size = prev_size (p) + size;
|
||||
uintptr_t block = (uintptr_t) mmap_base (p);
|
||||
size_t total_size = mmap_size (p);
|
||||
/* Unfortunately we have to do the compilers job by hand here. Normally
|
||||
we would test BLOCK and TOTAL-SIZE separately for compliance with the
|
||||
page size. But gcc does not recognize the optimization possibility
|
||||
@@ -3014,15 +3027,15 @@ mremap_chunk (mchunkptr p, size_t new_size)
|
||||
|
||||
assert (chunk_is_mmapped (p));
|
||||
|
||||
uintptr_t block = (uintptr_t) p - offset;
|
||||
uintptr_t block = (uintptr_t) mmap_base (p);
|
||||
uintptr_t mem = (uintptr_t) chunk2mem(p);
|
||||
size_t total_size = offset + size;
|
||||
size_t total_size = mmap_size (p);
|
||||
if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
|
||||
|| __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
|
||||
malloc_printerr("mremap_chunk(): invalid pointer");
|
||||
|
||||
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
|
||||
new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
|
||||
/* Note the extra CHUNK_HDR_SZ overhead as in mmap_chunk(). */
|
||||
new_size = ALIGN_UP (new_size + offset + CHUNK_HDR_SZ, pagesize);
|
||||
|
||||
/* No need to remap if the number of pages does not change. */
|
||||
if (total_size == new_size)
|
||||
@@ -3041,7 +3054,7 @@ mremap_chunk (mchunkptr p, size_t new_size)
|
||||
assert (!misaligned_chunk (p));
|
||||
|
||||
assert (prev_size (p) == offset);
|
||||
set_head (p, (new_size - offset) | IS_MMAPPED);
|
||||
set_head (p, (new_size - offset - CHUNK_HDR_SZ) | IS_MMAPPED);
|
||||
|
||||
INTERNAL_SIZE_T new;
|
||||
new = atomic_fetch_add_relaxed (&mp_.mmapped_mem, new_size - size - offset)
|
||||
@@ -3340,11 +3353,6 @@ tcache_init (void)
|
||||
if (tcache_shutting_down)
|
||||
return;
|
||||
|
||||
/* Check minimum mmap chunk is larger than max tcache size. This means
|
||||
mmap chunks with their different layout are never added to tcache. */
|
||||
if (MAX_TCACHE_SMALL_SIZE >= GLRO (dl_pagesize) / 2)
|
||||
malloc_printerr ("max tcache size too large");
|
||||
|
||||
size_t bytes = sizeof (tcache_perthread_struct);
|
||||
tcache = (tcache_perthread_struct *) __libc_malloc2 (bytes);
|
||||
|
||||
@@ -3492,7 +3500,6 @@ __libc_free (void *mem)
|
||||
{
|
||||
tc_idx = large_csize2tidx (size);
|
||||
if (size >= MINSIZE
|
||||
&& !chunk_is_mmapped (p)
|
||||
&& __glibc_likely (tcache->num_slots[tc_idx] != 0))
|
||||
return tcache_put_large (p, tc_idx);
|
||||
}
|
||||
@@ -5279,7 +5286,7 @@ musable (void *mem)
|
||||
mchunkptr p = mem2chunk (mem);
|
||||
|
||||
if (chunk_is_mmapped (p))
|
||||
return chunksize (p) - CHUNK_HDR_SZ;
|
||||
return memsize (p);
|
||||
else if (inuse (p))
|
||||
return memsize (p);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user