1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-12-24 17:51:17 +03:00

nptl: Move stack cache management, __libpthread_freeres into libc

This replaces the FREE_P macro with the __nptl_stack_in_use inline
function.  stack_list_del is renamed to __nptl_stack_list_del,
stack_list_add to __nptl_stack_list_add, __deallocate_stack to
__nptl_deallocate_stack, free_stacks to __nptl_free_stacks.

It is convenient to move __libpthread_freeres into libc at the
same time.  This removes the temporary __default_pthread_attr_freeres
export and restores full freeres coverage for __default_pthread_attr.

Reviewed-by: Adhemerval Zanella  <adhemerval.zanella@linaro.org>
This commit is contained in:
Florian Weimer
2021-05-11 11:08:00 +02:00
parent 249bd833a0
commit c79a31fb36
10 changed files with 212 additions and 158 deletions

View File

@@ -31,7 +31,7 @@
#include <lowlevellock.h>
#include <futex-internal.h>
#include <kernel-features.h>
#include <nptl-stack.h>
#ifndef NEED_SEPARATE_REGISTER_STACK
@@ -92,56 +92,6 @@
# define MAP_STACK 0
#endif
/* This yields the pointer that TLS support code calls the thread pointer. */
#if TLS_TCB_AT_TP
# define TLS_TPADJ(pd) (pd)
#elif TLS_DTV_AT_TP
# define TLS_TPADJ(pd) ((struct pthread *)((char *) (pd) + TLS_PRE_TCB_SIZE))
#endif
/* Cache handling for not-yet free stacks. */
/* Maximum size in kB of cache. */
static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
/* Check whether the stack is still used or not. */
#define FREE_P(descr) ((descr)->tid <= 0)
static void
stack_list_del (list_t *elem)
{
GL (dl_in_flight_stack) = (uintptr_t) elem;
atomic_write_barrier ();
list_del (elem);
atomic_write_barrier ();
GL (dl_in_flight_stack) = 0;
}
static void
stack_list_add (list_t *elem, list_t *list)
{
GL (dl_in_flight_stack) = (uintptr_t) elem | 1;
atomic_write_barrier ();
list_add (elem, list);
atomic_write_barrier ();
GL (dl_in_flight_stack) = 0;
}
/* We create a double linked list of all cache entries. Double linked
because this allows removing entries from the end. */
/* Get a stack frame from the cache. We have to match by size since
some blocks might be too small or far too large. */
static struct pthread *
@@ -164,7 +114,7 @@ get_cached_stack (size_t *sizep, void **memp)
struct pthread *curr;
curr = list_entry (entry, struct pthread, list);
if (FREE_P (curr) && curr->stackblock_size >= size)
if (__nptl_stack_in_use (curr) && curr->stackblock_size >= size)
{
if (curr->stackblock_size == size)
{
@@ -193,10 +143,10 @@ get_cached_stack (size_t *sizep, void **memp)
result->setxid_futex = -1;
/* Dequeue the entry. */
stack_list_del (&result->list);
__nptl_stack_list_del (&result->list);
/* And add to the list of stacks in use. */
stack_list_add (&result->list, &GL (dl_stack_used));
__nptl_stack_list_add (&result->list, &GL (dl_stack_used));
/* And decrease the cache size. */
GL (dl_stack_cache_actsize) -= result->stackblock_size;
@@ -229,68 +179,6 @@ get_cached_stack (size_t *sizep, void **memp)
return result;
}
/* Free stacks until cache size is lower than LIMIT. */
static void
free_stacks (size_t limit)
{
/* We reduce the size of the cache. Remove the last entries until
the size is below the limit. */
list_t *entry;
list_t *prev;
/* Search from the end of the list. */
list_for_each_prev_safe (entry, prev, &GL (dl_stack_cache))
{
struct pthread *curr;
curr = list_entry (entry, struct pthread, list);
if (FREE_P (curr))
{
/* Unlink the block. */
stack_list_del (entry);
/* Account for the freed memory. */
GL (dl_stack_cache_actsize) -= curr->stackblock_size;
/* Free the memory associated with the ELF TLS. */
_dl_deallocate_tls (TLS_TPADJ (curr), false);
/* Remove this block. This should never fail. If it does
something is really wrong. */
if (__munmap (curr->stackblock, curr->stackblock_size) != 0)
abort ();
/* Maybe we have freed enough. */
if (GL (dl_stack_cache_actsize) <= limit)
break;
}
}
}
/* Free all the stacks on cleanup. */
void
__nptl_stacks_freeres (void)
{
free_stacks (0);
}
/* Add a stack frame which is not used anymore to the stack. Must be
called with the cache lock held. */
static inline void
__attribute ((always_inline))
queue_stack (struct pthread *stack)
{
/* We unconditionally add the stack to the list. The memory may
still be in use but it will not be reused until the kernel marks
the stack as not used anymore. */
stack_list_add (&stack->list, &GL (dl_stack_cache));
GL (dl_stack_cache_actsize) += stack->stackblock_size;
if (__glibc_unlikely (GL (dl_stack_cache_actsize) > stack_cache_maxsize))
free_stacks (stack_cache_maxsize);
}
/* Return the guard page position on allocated stack. */
static inline char *
__attribute ((always_inline))
@@ -588,7 +476,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* And add to the list of stacks in use. */
stack_list_add (&pd->list, &GL (dl_stack_used));
__nptl_stack_list_add (&pd->list, &GL (dl_stack_used));
lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
@@ -630,7 +518,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Remove the thread from the list. */
stack_list_del (&pd->list);
__nptl_stack_list_del (&pd->list);
lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
@@ -731,26 +619,3 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
return 0;
}
void
__deallocate_stack (struct pthread *pd)
{
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Remove the thread from the list of threads with user defined
stacks. */
stack_list_del (&pd->list);
/* Not much to do. Just free the mmap()ed memory. Note that we do
not reset the 'used' flag in the 'tid' field. This is done by
the kernel. If no thread has been created yet this field is
still zero. */
if (__glibc_likely (! pd->user_stack))
(void) queue_stack (pd);
else
/* Free the memory associated with the ELF TLS. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
}