1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-29 11:41:21 +03:00

malloc: Refactor TAG_ macros to avoid indirection

This does not change behaviour, just removes one layer of indirection
in the internal memory tagging logic.

Use tag_ and mtag_ prefixes instead of __tag_ and __mtag_ since these
are all symbols with internal linkage, private to malloc.c, so there
is no user namespace pollution issue.

Reviewed-by: DJ Delorie <dj@redhat.com>
This commit is contained in:
Szabolcs Nagy
2021-02-16 14:12:25 +00:00
parent e865dcbb7b
commit 0c719cf42c
3 changed files with 51 additions and 56 deletions

View File

@ -332,12 +332,12 @@ ptmalloc_init (void)
if (__MTAG_SBRK_UNTAGGED) if (__MTAG_SBRK_UNTAGGED)
__morecore = __failing_morecore; __morecore = __failing_morecore;
__mtag_mmap_flags = __MTAG_MMAP_FLAGS; mtag_mmap_flags = __MTAG_MMAP_FLAGS;
__tag_new_memset = __mtag_tag_new_memset; tag_new_memset = __mtag_tag_new_memset;
__tag_region = __libc_mtag_tag_region; tag_region = __libc_mtag_tag_region;
__tag_new_usable = __mtag_tag_new_usable; tag_new_usable = __mtag_tag_new_usable;
__tag_at = __libc_mtag_address_get_tag; tag_at = __libc_mtag_address_get_tag;
__mtag_granule_mask = ~(size_t)(__MTAG_GRANULE_SIZE - 1); mtag_granule_mask = ~(size_t)(__MTAG_GRANULE_SIZE - 1);
} }
#endif #endif
@ -557,7 +557,7 @@ new_heap (size_t size, size_t top_pad)
} }
} }
} }
if (__mprotect (p2, size, MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE) != 0) if (__mprotect (p2, size, mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
{ {
__munmap (p2, HEAP_MAX_SIZE); __munmap (p2, HEAP_MAX_SIZE);
return 0; return 0;
@ -587,7 +587,7 @@ grow_heap (heap_info *h, long diff)
{ {
if (__mprotect ((char *) h + h->mprotect_size, if (__mprotect ((char *) h + h->mprotect_size,
(unsigned long) new_size - h->mprotect_size, (unsigned long) new_size - h->mprotect_size,
MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE) != 0) mtag_mmap_flags | PROT_READ | PROT_WRITE) != 0)
return -2; return -2;
h->mprotect_size = new_size; h->mprotect_size = new_size;

View File

@ -68,7 +68,7 @@ __malloc_check_init (void)
tags, so fetch the tag at each location before dereferencing tags, so fetch the tag at each location before dereferencing
it. */ it. */
#define SAFE_CHAR_OFFSET(p,offset) \ #define SAFE_CHAR_OFFSET(p,offset) \
((unsigned char *) TAG_AT (((unsigned char *) p) + offset)) ((unsigned char *) tag_at (((unsigned char *) p) + offset))
/* A simple, standard set of debugging hooks. Overhead is `only' one /* A simple, standard set of debugging hooks. Overhead is `only' one
byte per chunk; still this will catch most cases of double frees or byte per chunk; still this will catch most cases of double frees or
@ -249,7 +249,7 @@ malloc_check (size_t sz, const void *caller)
top_check (); top_check ();
victim = _int_malloc (&main_arena, nb); victim = _int_malloc (&main_arena, nb);
__libc_lock_unlock (main_arena.mutex); __libc_lock_unlock (main_arena.mutex);
return mem2mem_check (TAG_NEW_USABLE (victim), sz); return mem2mem_check (tag_new_usable (victim), sz);
} }
static void static void
@ -280,7 +280,7 @@ free_check (void *mem, const void *caller)
else else
{ {
/* Mark the chunk as belonging to the library again. */ /* Mark the chunk as belonging to the library again. */
(void)TAG_REGION (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p) (void)tag_region (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p)
- CHUNK_HDR_SZ); - CHUNK_HDR_SZ);
_int_free (&main_arena, p, 1); _int_free (&main_arena, p, 1);
__libc_lock_unlock (main_arena.mutex); __libc_lock_unlock (main_arena.mutex);
@ -375,7 +375,7 @@ invert:
__libc_lock_unlock (main_arena.mutex); __libc_lock_unlock (main_arena.mutex);
return mem2mem_check (TAG_NEW_USABLE (newmem), bytes); return mem2mem_check (tag_new_usable (newmem), bytes);
} }
static void * static void *
@ -417,7 +417,7 @@ memalign_check (size_t alignment, size_t bytes, const void *caller)
top_check (); top_check ();
mem = _int_memalign (&main_arena, alignment, bytes + 1); mem = _int_memalign (&main_arena, alignment, bytes + 1);
__libc_lock_unlock (main_arena.mutex); __libc_lock_unlock (main_arena.mutex);
return mem2mem_check (TAG_NEW_USABLE (mem), bytes); return mem2mem_check (tag_new_usable (mem), bytes);
} }
#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25) #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_25)

View File

@ -413,26 +413,26 @@ void *(*__morecore)(ptrdiff_t) = __default_morecore;
operations can continue to be used. Support macros are used to do operations can continue to be used. Support macros are used to do
this: this:
void *TAG_NEW_MEMSET (void *ptr, int, val, size_t size) void *tag_new_memset (void *ptr, int, val, size_t size)
Has the same interface as memset(), but additionally allocates a Has the same interface as memset(), but additionally allocates a
new tag, colors the memory with that tag and returns a pointer that new tag, colors the memory with that tag and returns a pointer that
is correctly colored for that location. The non-tagging version is correctly colored for that location. The non-tagging version
will simply call memset. will simply call memset.
void *TAG_REGION (void *ptr, size_t size) void *tag_region (void *ptr, size_t size)
Color the region of memory pointed to by PTR and size SIZE with Color the region of memory pointed to by PTR and size SIZE with
the color of PTR. Returns the original pointer. the color of PTR. Returns the original pointer.
void *TAG_NEW_USABLE (void *ptr) void *tag_new_usable (void *ptr)
Allocate a new random color and use it to color the user region of Allocate a new random color and use it to color the user region of
a chunk; this may include data from the subsequent chunk's header a chunk; this may include data from the subsequent chunk's header
if tagging is sufficiently fine grained. Returns PTR suitably if tagging is sufficiently fine grained. Returns PTR suitably
recolored for accessing the memory there. recolored for accessing the memory there.
void *TAG_AT (void *ptr) void *tag_at (void *ptr)
Read the current color of the memory at the address pointed to by Read the current color of the memory at the address pointed to by
PTR (ignoring it's current color) and return PTR recolored to that PTR (ignoring it's current color) and return PTR recolored to that
@ -455,25 +455,20 @@ __default_tag_nop (void *ptr)
return ptr; return ptr;
} }
static int __mtag_mmap_flags = 0; static int mtag_mmap_flags = 0;
static size_t __mtag_granule_mask = ~(size_t)0; static size_t mtag_granule_mask = ~(size_t)0;
static void *(*__tag_new_memset)(void *, int, size_t) = memset; static void *(*tag_new_memset)(void *, int, size_t) = memset;
static void *(*__tag_region)(void *, size_t) = __default_tag_region; static void *(*tag_region)(void *, size_t) = __default_tag_region;
static void *(*__tag_new_usable)(void *) = __default_tag_nop; static void *(*tag_new_usable)(void *) = __default_tag_nop;
static void *(*__tag_at)(void *) = __default_tag_nop; static void *(*tag_at)(void *) = __default_tag_nop;
# define MTAG_MMAP_FLAGS __mtag_mmap_flags
# define TAG_NEW_MEMSET(ptr, val, size) __tag_new_memset (ptr, val, size)
# define TAG_REGION(ptr, size) __tag_region (ptr, size)
# define TAG_NEW_USABLE(ptr) __tag_new_usable (ptr)
# define TAG_AT(ptr) __tag_at (ptr)
#else #else
# define MTAG_MMAP_FLAGS 0 # define mtag_mmap_flags 0
# define TAG_NEW_MEMSET(ptr, val, size) memset (ptr, val, size) # define tag_new_memset(ptr, val, size) memset (ptr, val, size)
# define TAG_REGION(ptr, size) (ptr) # define tag_region(ptr, size) (ptr)
# define TAG_NEW_USABLE(ptr) (ptr) # define tag_new_usable(ptr) (ptr)
# define TAG_AT(ptr) (ptr) # define tag_at(ptr) (ptr)
#endif #endif
#include <string.h> #include <string.h>
@ -1305,8 +1300,8 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* Convert between user mem pointers and chunk pointers, updating any /* Convert between user mem pointers and chunk pointers, updating any
memory tags on the pointer to respect the tag value at that memory tags on the pointer to respect the tag value at that
location. */ location. */
#define chunk2mem(p) ((void*)TAG_AT (((char*)(p) + CHUNK_HDR_SZ))) #define chunk2mem(p) ((void *)tag_at (((char*)(p) + CHUNK_HDR_SZ)))
#define mem2chunk(mem) ((mchunkptr)TAG_AT (((char*)(mem) - CHUNK_HDR_SZ))) #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
/* The smallest possible chunk */ /* The smallest possible chunk */
#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize)) #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
@ -1337,7 +1332,7 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#ifdef USE_MTAG #ifdef USE_MTAG
#define CHUNK_AVAILABLE_SIZE(p) \ #define CHUNK_AVAILABLE_SIZE(p) \
((chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) \ ((chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) \
& __mtag_granule_mask) & mtag_granule_mask)
#else #else
#define CHUNK_AVAILABLE_SIZE(p) \ #define CHUNK_AVAILABLE_SIZE(p) \
(chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ)) (chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
@ -1361,7 +1356,7 @@ checked_request2size (size_t req, size_t *sz) __nonnull (1)
number. Ideally, this would be part of request2size(), but that number. Ideally, this would be part of request2size(), but that
must be a macro that produces a compile time constant if passed must be a macro that produces a compile time constant if passed
a constant literal. */ a constant literal. */
req = (req + ~__mtag_granule_mask) & __mtag_granule_mask; req = (req + ~mtag_granule_mask) & mtag_granule_mask;
#endif #endif
*sz = request2size (req); *sz = request2size (req);
@ -2467,7 +2462,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
if ((unsigned long) (size) > (unsigned long) (nb)) if ((unsigned long) (size) > (unsigned long) (nb))
{ {
mm = (char *) (MMAP (0, size, mm = (char *) (MMAP (0, size,
MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE, 0)); mtag_mmap_flags | PROT_READ | PROT_WRITE, 0));
if (mm != MAP_FAILED) if (mm != MAP_FAILED)
{ {
@ -2665,7 +2660,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
if ((unsigned long) (size) > (unsigned long) (nb)) if ((unsigned long) (size) > (unsigned long) (nb))
{ {
char *mbrk = (char *) (MMAP (0, size, char *mbrk = (char *) (MMAP (0, size,
MTAG_MMAP_FLAGS | PROT_READ | PROT_WRITE, mtag_mmap_flags | PROT_READ | PROT_WRITE,
0)); 0));
if (mbrk != MAP_FAILED) if (mbrk != MAP_FAILED)
@ -3221,14 +3216,14 @@ __libc_malloc (size_t bytes)
&& tcache->counts[tc_idx] > 0) && tcache->counts[tc_idx] > 0)
{ {
victim = tcache_get (tc_idx); victim = tcache_get (tc_idx);
return TAG_NEW_USABLE (victim); return tag_new_usable (victim);
} }
DIAG_POP_NEEDS_COMMENT; DIAG_POP_NEEDS_COMMENT;
#endif #endif
if (SINGLE_THREAD_P) if (SINGLE_THREAD_P)
{ {
victim = TAG_NEW_USABLE (_int_malloc (&main_arena, bytes)); victim = tag_new_usable (_int_malloc (&main_arena, bytes));
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
&main_arena == arena_for_chunk (mem2chunk (victim))); &main_arena == arena_for_chunk (mem2chunk (victim)));
return victim; return victim;
@ -3249,7 +3244,7 @@ __libc_malloc (size_t bytes)
if (ar_ptr != NULL) if (ar_ptr != NULL)
__libc_lock_unlock (ar_ptr->mutex); __libc_lock_unlock (ar_ptr->mutex);
victim = TAG_NEW_USABLE (victim); victim = tag_new_usable (victim);
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) || assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
ar_ptr == arena_for_chunk (mem2chunk (victim))); ar_ptr == arena_for_chunk (mem2chunk (victim)));
@ -3305,7 +3300,7 @@ __libc_free (void *mem)
MAYBE_INIT_TCACHE (); MAYBE_INIT_TCACHE ();
/* Mark the chunk as belonging to the library again. */ /* Mark the chunk as belonging to the library again. */
(void)TAG_REGION (chunk2rawmem (p), (void)tag_region (chunk2rawmem (p),
CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ); CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
ar_ptr = arena_for_chunk (p); ar_ptr = arena_for_chunk (p);
@ -3408,7 +3403,7 @@ __libc_realloc (void *oldmem, size_t bytes)
reused. There's a performance hit for both us and the reused. There's a performance hit for both us and the
caller for doing this, so we might want to caller for doing this, so we might want to
reconsider. */ reconsider. */
return TAG_NEW_USABLE (newmem); return tag_new_usable (newmem);
} }
#endif #endif
/* Note the extra SIZE_SZ overhead. */ /* Note the extra SIZE_SZ overhead. */
@ -3451,7 +3446,7 @@ __libc_realloc (void *oldmem, size_t bytes)
{ {
size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ; size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ;
memcpy (newp, oldmem, sz); memcpy (newp, oldmem, sz);
(void) TAG_REGION (chunk2rawmem (oldp), sz); (void) tag_region (chunk2rawmem (oldp), sz);
_int_free (ar_ptr, oldp, 0); _int_free (ar_ptr, oldp, 0);
} }
} }
@ -3509,7 +3504,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
p = _int_memalign (&main_arena, alignment, bytes); p = _int_memalign (&main_arena, alignment, bytes);
assert (!p || chunk_is_mmapped (mem2chunk (p)) || assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
&main_arena == arena_for_chunk (mem2chunk (p))); &main_arena == arena_for_chunk (mem2chunk (p)));
return TAG_NEW_USABLE (p); return tag_new_usable (p);
} }
arena_get (ar_ptr, bytes + alignment + MINSIZE); arena_get (ar_ptr, bytes + alignment + MINSIZE);
@ -3527,7 +3522,7 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
assert (!p || chunk_is_mmapped (mem2chunk (p)) || assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
ar_ptr == arena_for_chunk (mem2chunk (p))); ar_ptr == arena_for_chunk (mem2chunk (p)));
return TAG_NEW_USABLE (p); return tag_new_usable (p);
} }
/* For ISO C11. */ /* For ISO C11. */
weak_alias (__libc_memalign, aligned_alloc) weak_alias (__libc_memalign, aligned_alloc)
@ -3544,7 +3539,7 @@ __libc_valloc (size_t bytes)
void *address = RETURN_ADDRESS (0); void *address = RETURN_ADDRESS (0);
size_t pagesize = GLRO (dl_pagesize); size_t pagesize = GLRO (dl_pagesize);
p = _mid_memalign (pagesize, bytes, address); p = _mid_memalign (pagesize, bytes, address);
return TAG_NEW_USABLE (p); return tag_new_usable (p);
} }
void * void *
@ -3569,7 +3564,7 @@ __libc_pvalloc (size_t bytes)
rounded_bytes = rounded_bytes & -(pagesize - 1); rounded_bytes = rounded_bytes & -(pagesize - 1);
p = _mid_memalign (pagesize, rounded_bytes, address); p = _mid_memalign (pagesize, rounded_bytes, address);
return TAG_NEW_USABLE (p); return tag_new_usable (p);
} }
void * void *
@ -3666,7 +3661,7 @@ __libc_calloc (size_t n, size_t elem_size)
regardless of MORECORE_CLEARS, so we zero the whole block while regardless of MORECORE_CLEARS, so we zero the whole block while
doing so. */ doing so. */
#ifdef USE_MTAG #ifdef USE_MTAG
return TAG_NEW_MEMSET (mem, 0, CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ); return tag_new_memset (mem, 0, CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
#else #else
INTERNAL_SIZE_T csz = chunksize (p); INTERNAL_SIZE_T csz = chunksize (p);
@ -4821,7 +4816,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
av->top = chunk_at_offset (oldp, nb); av->top = chunk_at_offset (oldp, nb);
set_head (av->top, (newsize - nb) | PREV_INUSE); set_head (av->top, (newsize - nb) | PREV_INUSE);
check_inuse_chunk (av, oldp); check_inuse_chunk (av, oldp);
return TAG_NEW_USABLE (chunk2rawmem (oldp)); return tag_new_usable (chunk2rawmem (oldp));
} }
/* Try to expand forward into next chunk; split off remainder below */ /* Try to expand forward into next chunk; split off remainder below */
@ -4856,8 +4851,8 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
{ {
void *oldmem = chunk2rawmem (oldp); void *oldmem = chunk2rawmem (oldp);
size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ; size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ;
(void) TAG_REGION (oldmem, sz); (void) tag_region (oldmem, sz);
newmem = TAG_NEW_USABLE (newmem); newmem = tag_new_usable (newmem);
memcpy (newmem, oldmem, sz); memcpy (newmem, oldmem, sz);
_int_free (av, oldp, 1); _int_free (av, oldp, 1);
check_inuse_chunk (av, newp); check_inuse_chunk (av, newp);
@ -4881,7 +4876,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
{ {
remainder = chunk_at_offset (newp, nb); remainder = chunk_at_offset (newp, nb);
/* Clear any user-space tags before writing the header. */ /* Clear any user-space tags before writing the header. */
remainder = TAG_REGION (remainder, remainder_size); remainder = tag_region (remainder, remainder_size);
set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0)); set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
set_head (remainder, remainder_size | PREV_INUSE | set_head (remainder, remainder_size | PREV_INUSE |
(av != &main_arena ? NON_MAIN_ARENA : 0)); (av != &main_arena ? NON_MAIN_ARENA : 0));
@ -4891,7 +4886,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
} }
check_inuse_chunk (av, newp); check_inuse_chunk (av, newp);
return TAG_NEW_USABLE (chunk2rawmem (newp)); return tag_new_usable (chunk2rawmem (newp));
} }
/* /*
@ -5108,7 +5103,7 @@ musable (void *mem)
/* The usable space may be reduced if memory tagging is needed, /* The usable space may be reduced if memory tagging is needed,
since we cannot share the user-space data with malloc's internal since we cannot share the user-space data with malloc's internal
data structure. */ data structure. */
result &= __mtag_granule_mask; result &= mtag_granule_mask;
#endif #endif
return result; return result;
} }