1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-28 00:21:52 +03:00

Add single-threaded path to _int_free

This patch adds single-threaded fast paths to _int_free.
Bypass the explicit locking for larger allocations.

	* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.
This commit is contained in:
Wilco Dijkstra
2017-10-20 17:27:53 +01:00
parent b9a558e790
commit a15d53e2de
2 changed files with 32 additions and 13 deletions

View File

@ -1,3 +1,7 @@
2017-10-20 Wilco Dijkstra <wdijkstr@arm.com>
* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.
2017-10-20 Will Hawkins <hawkinsw@borlaugic.com> 2017-10-20 Will Hawkins <hawkinsw@borlaugic.com>
* resolv/Makefile [$(build-shared)$(have-thread-library) == yesyes] * resolv/Makefile [$(build-shared)$(have-thread-library) == yesyes]

View File

@ -4159,24 +4159,34 @@ _int_free (mstate av, mchunkptr p, int have_lock)
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */ /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
mchunkptr old = *fb, old2; mchunkptr old = *fb, old2;
unsigned int old_idx = ~0u;
do if (SINGLE_THREAD_P)
{ {
/* Check that the top of the bin is not the record we are going to add /* Check that the top of the bin is not the record we are going to
(i.e., double free). */ add (i.e., double free). */
if (__builtin_expect (old == p, 0)) if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)"); malloc_printerr ("double free or corruption (fasttop)");
/* Check that size of fastbin chunk at the top is the same as p->fd = old;
size of the chunk that we are adding. We can dereference OLD *fb = p;
only if we have the lock, otherwise it might have already been
deallocated. See use of OLD_IDX below for the actual check. */
if (have_lock && old != NULL)
old_idx = fastbin_index(chunksize(old));
p->fd = old2 = old;
} }
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2); else
do
{
/* Check that the top of the bin is not the record we are going to
add (i.e., double free). */
if (__builtin_expect (old == p, 0))
malloc_printerr ("double free or corruption (fasttop)");
p->fd = old2 = old;
}
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
!= old2);
if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0)) /* Check that size of fastbin chunk at the top is the same as
size of the chunk that we are adding. We can dereference OLD
only if we have the lock, otherwise it might have already been
allocated again. */
if (have_lock && old != NULL
&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
malloc_printerr ("invalid fastbin entry (free)"); malloc_printerr ("invalid fastbin entry (free)");
} }
@ -4185,6 +4195,11 @@ _int_free (mstate av, mchunkptr p, int have_lock)
*/ */
else if (!chunk_is_mmapped(p)) { else if (!chunk_is_mmapped(p)) {
/* If we're single-threaded, don't lock the arena. */
if (SINGLE_THREAD_P)
have_lock = true;
if (!have_lock) if (!have_lock)
__libc_lock_lock (av->mutex); __libc_lock_lock (av->mutex);