1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-16 07:21:57 +03:00

malloc: Count tcache entries downwards

Currently tcache requires 2 global variable accesses to determine
whether a block can be added to the tcache.  Change the counts array
to 'num_slots' to indicate the number of entries that could be added.
If 'num_slots' reaches zero, no more blocks can be added.  If the entries
pointer is not NULL, at least one block is available for allocation.

Now each tcache bin can support a different maximum number of entries,
and they can be individually switched on or off (a zero initialized
num_slots+entry means the tcache bin is not available for free or malloc).

Reviewed-by: DJ Delorie <dj@redhat.com>
This commit is contained in:
Wilco Dijkstra
2025-04-09 17:41:22 +00:00
parent 404526ee2e
commit 7e10e30e64

View File

@ -313,7 +313,7 @@
# define TCACHE_FILL_COUNT 7 # define TCACHE_FILL_COUNT 7
/* Maximum chunks in tcache bins for tunables. This value must fit the range /* Maximum chunks in tcache bins for tunables. This value must fit the range
of tcache->counts[] entries, else they may overflow. */ of tcache->num_slots[] entries, else they may overflow. */
# define MAX_TCACHE_COUNT UINT16_MAX # define MAX_TCACHE_COUNT UINT16_MAX
#endif #endif
@ -3119,12 +3119,13 @@ typedef struct tcache_entry
/* There is one of these for each thread, which contains the /* There is one of these for each thread, which contains the
per-thread cache (hence "tcache_perthread_struct"). Keeping per-thread cache (hence "tcache_perthread_struct"). Keeping
overall size low is mildly important. Note that COUNTS and ENTRIES overall size low is mildly important. The 'entries' field is linked list of
are redundant (we could have just counted the linked list each free blocks, while 'num_slots' contains the number of free blocks that can
time), this is for performance reasons. */ be added. Each bin may allow a different maximum number of free blocks,
and can be disabled by initializing 'num_slots' to zero. */
typedef struct tcache_perthread_struct typedef struct tcache_perthread_struct
{ {
uint16_t counts[TCACHE_MAX_BINS]; uint16_t num_slots[TCACHE_MAX_BINS];
tcache_entry *entries[TCACHE_MAX_BINS]; tcache_entry *entries[TCACHE_MAX_BINS];
} tcache_perthread_struct; } tcache_perthread_struct;
@ -3171,7 +3172,7 @@ tcache_put (mchunkptr chunk, size_t tc_idx)
e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]); e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
tcache->entries[tc_idx] = e; tcache->entries[tc_idx] = e;
++(tcache->counts[tc_idx]); --(tcache->num_slots[tc_idx]);
} }
/* Caller must ensure that we know tc_idx is valid and there's /* Caller must ensure that we know tc_idx is valid and there's
@ -3194,7 +3195,7 @@ tcache_get_n (size_t tc_idx, tcache_entry **ep)
else else
*ep = PROTECT_PTR (ep, REVEAL_PTR (e->next)); *ep = PROTECT_PTR (ep, REVEAL_PTR (e->next));
--(tcache->counts[tc_idx]); ++(tcache->num_slots[tc_idx]);
e->key = 0; e->key = 0;
return (void *) e; return (void *) e;
} }
@ -3219,7 +3220,7 @@ tcache_available (size_t tc_idx)
{ {
if (tc_idx < mp_.tcache_bins if (tc_idx < mp_.tcache_bins
&& tcache != NULL && tcache != NULL
&& tcache->counts[tc_idx] > 0) && tcache->entries[tc_idx] != NULL)
return true; return true;
else else
return false; return false;
@ -3322,6 +3323,8 @@ tcache_init(void)
{ {
tcache = (tcache_perthread_struct *) victim; tcache = (tcache_perthread_struct *) victim;
memset (tcache, 0, sizeof (tcache_perthread_struct)); memset (tcache, 0, sizeof (tcache_perthread_struct));
for (int i = 0; i < TCACHE_MAX_BINS; i++)
tcache->num_slots[i] = mp_.tcache_count;
} }
} }
@ -3439,7 +3442,7 @@ __libc_free (void *mem)
if (__glibc_unlikely (e->key == tcache_key)) if (__glibc_unlikely (e->key == tcache_key))
return tcache_double_free_verify (e); return tcache_double_free_verify (e);
if (__glibc_likely (tcache->counts[tc_idx] < mp_.tcache_count)) if (__glibc_likely (tcache->num_slots[tc_idx] != 0))
return tcache_put (p, tc_idx); return tcache_put (p, tc_idx);
} }
#endif #endif
@ -3951,8 +3954,7 @@ _int_malloc (mstate av, size_t bytes)
mchunkptr tc_victim; mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks. */ /* While bin not empty and tcache not full, copy chunks. */
while (tcache->counts[tc_idx] < mp_.tcache_count while (tcache->num_slots[tc_idx] != 0 && (tc_victim = *fb) != NULL)
&& (tc_victim = *fb) != NULL)
{ {
if (__glibc_unlikely (misaligned_chunk (tc_victim))) if (__glibc_unlikely (misaligned_chunk (tc_victim)))
malloc_printerr ("malloc(): unaligned fastbin chunk detected 3"); malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
@ -4012,7 +4014,7 @@ _int_malloc (mstate av, size_t bytes)
mchunkptr tc_victim; mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks over. */ /* While bin not empty and tcache not full, copy chunks over. */
while (tcache->counts[tc_idx] < mp_.tcache_count while (tcache->num_slots[tc_idx] != 0
&& (tc_victim = last (bin)) != bin) && (tc_victim = last (bin)) != bin)
{ {
if (tc_victim != NULL) if (tc_victim != NULL)
@ -4149,8 +4151,7 @@ _int_malloc (mstate av, size_t bytes)
#if USE_TCACHE #if USE_TCACHE
/* Fill cache first, return to user only if cache fills. /* Fill cache first, return to user only if cache fills.
We may return one of these chunks later. */ We may return one of these chunks later. */
if (tcache_nb > 0 if (tcache_nb > 0 && tcache->num_slots[tc_idx] != 0)
&& tcache->counts[tc_idx] < mp_.tcache_count)
{ {
tcache_put (victim, tc_idx); tcache_put (victim, tc_idx);
return_cached = 1; return_cached = 1;