mirror of
https://sourceware.org/git/glibc.git
synced 2025-08-07 06:43:00 +03:00
Update.
* internals.h: Declare __pthread_max_stacksize. * pthread.c (__pthread_max_stacksize): New variable. (__pthread_initialize_manager): Determine __pthread_initialize_manager value. * manager.c (thread_segment): Return always NULL if FLOATING_STACKS. (pthread_allocate_stack): Allow kernel to choose stack address if FLOATING_STACKS. This also handles variable-sized stacks. Always allocate stack and guardoage together. Use mprotect to change guardpage access. * sysdeps/i386/useldt.h: Define FLOATING_STACKS and ARCH_STACK_MAX_SIZE. * attr.c (__pthread_attr_setstacksize): Also test value against upper limit.
This commit is contained in:
@@ -1,5 +1,20 @@
|
|||||||
2000-08-04 Ulrich Drepper <drepper@redhat.com>
|
2000-08-04 Ulrich Drepper <drepper@redhat.com>
|
||||||
|
|
||||||
|
* internals.h: Declare __pthread_max_stacksize.
|
||||||
|
* pthread.c (__pthread_max_stacksize): New variable.
|
||||||
|
(__pthread_initialize_manager): Determine __pthread_initialize_manager
|
||||||
|
value.
|
||||||
|
* manager.c (thread_segment): Return always NULL if FLOATING_STACKS.
|
||||||
|
(pthread_allocate_stack): Allow kernel to choose stack address if
|
||||||
|
FLOATING_STACKS. This also handles variable-sized stacks.
|
||||||
|
Always allocate stack and guardoage together. Use mprotect to
|
||||||
|
change guardpage access.
|
||||||
|
* sysdeps/i386/useldt.h: Define FLOATING_STACKS and
|
||||||
|
ARCH_STACK_MAX_SIZE.
|
||||||
|
|
||||||
|
* attr.c (__pthread_attr_setstacksize): Also test value against
|
||||||
|
upper limit.
|
||||||
|
|
||||||
* manager.c (__pthread_nonstandard_stacks): Define only if
|
* manager.c (__pthread_nonstandard_stacks): Define only if
|
||||||
THREAD_SELF is not defined.
|
THREAD_SELF is not defined.
|
||||||
(pthread_allocate_stack): Always initialize gardaddr to a correct
|
(pthread_allocate_stack): Always initialize gardaddr to a correct
|
||||||
|
@@ -18,6 +18,7 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/param.h>
|
#include <sys/param.h>
|
||||||
|
#include <sys/resource.h>
|
||||||
#include "pthread.h"
|
#include "pthread.h"
|
||||||
#include "internals.h"
|
#include "internals.h"
|
||||||
#include <shlib-compat.h>
|
#include <shlib-compat.h>
|
||||||
@@ -184,6 +185,30 @@ weak_alias (__pthread_attr_getstackaddr, pthread_attr_getstackaddr)
|
|||||||
|
|
||||||
int __pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
|
int __pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
|
||||||
{
|
{
|
||||||
|
#ifdef FLOATING_STACKS
|
||||||
|
/* We have to check against the maximum allowed stack size. This is no
|
||||||
|
problem if the manager is already started and we determined it. If
|
||||||
|
this hasn't happened, we have to find the limit outself. */
|
||||||
|
if (__pthread_max_stacksize == 0)
|
||||||
|
{
|
||||||
|
struct rlimit limit;
|
||||||
|
|
||||||
|
getrlimit(RLIMIT_STACK, &limit);
|
||||||
|
# ifdef NEED_SEPARATE_REGISTER_STACK
|
||||||
|
__pthread_max_stacksize = limit.rlim_max / 2;
|
||||||
|
# else
|
||||||
|
__pthread_max_stacksize = limit.rlim_max;
|
||||||
|
# endif
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stacksize > __pthread_max_stacksize)
|
||||||
|
return EINVAL;
|
||||||
|
#else
|
||||||
|
/* We have a fixed size limit. */
|
||||||
|
if (stacksize > STACK_SIZE)
|
||||||
|
return EINVAL;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* We don't accept value smaller than PTHREAD_STACK_MIN. */
|
/* We don't accept value smaller than PTHREAD_STACK_MIN. */
|
||||||
if (stacksize < PTHREAD_STACK_MIN)
|
if (stacksize < PTHREAD_STACK_MIN)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
@@ -272,6 +272,11 @@ extern int __pthread_manager_reader;
|
|||||||
extern char *__pthread_manager_thread_bos;
|
extern char *__pthread_manager_thread_bos;
|
||||||
extern char *__pthread_manager_thread_tos;
|
extern char *__pthread_manager_thread_tos;
|
||||||
|
|
||||||
|
#ifdef FLOATING_STACKS
|
||||||
|
/* Maximum stack size. */
|
||||||
|
extern size_t __pthread_max_stacksize;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Pending request for a process-wide exit */
|
/* Pending request for a process-wide exit */
|
||||||
|
|
||||||
extern int __pthread_exit_requested, __pthread_exit_code;
|
extern int __pthread_exit_requested, __pthread_exit_code;
|
||||||
|
@@ -64,25 +64,29 @@ volatile pthread_descr __pthread_last_event;
|
|||||||
/* Stack segment numbers are also indices into the __pthread_handles array. */
|
/* Stack segment numbers are also indices into the __pthread_handles array. */
|
||||||
/* Stack segment number 0 is reserved for the initial thread. */
|
/* Stack segment number 0 is reserved for the initial thread. */
|
||||||
|
|
||||||
|
#if FLOATING_STACKS
|
||||||
|
# define thread_segment(seq) NULL
|
||||||
|
#else
|
||||||
static inline pthread_descr thread_segment(int seg)
|
static inline pthread_descr thread_segment(int seg)
|
||||||
{
|
{
|
||||||
return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
|
return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
|
||||||
- 1;
|
- 1;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Flag set in signal handler to record child termination */
|
/* Flag set in signal handler to record child termination */
|
||||||
|
|
||||||
static volatile int terminated_children = 0;
|
static volatile int terminated_children;
|
||||||
|
|
||||||
/* Flag set when the initial thread is blocked on pthread_exit waiting
|
/* Flag set when the initial thread is blocked on pthread_exit waiting
|
||||||
for all other threads to terminate */
|
for all other threads to terminate */
|
||||||
|
|
||||||
static int main_thread_exiting = 0;
|
static int main_thread_exiting;
|
||||||
|
|
||||||
/* Counter used to generate unique thread identifier.
|
/* Counter used to generate unique thread identifier.
|
||||||
Thread identifier is pthread_threads_counter + segment. */
|
Thread identifier is pthread_threads_counter + segment. */
|
||||||
|
|
||||||
static pthread_t pthread_threads_counter = 0;
|
static pthread_t pthread_threads_counter;
|
||||||
|
|
||||||
#ifdef NEED_SEPARATE_REGISTER_STACK
|
#ifdef NEED_SEPARATE_REGISTER_STACK
|
||||||
/* Signal masks for the manager. These have to be global only when clone2
|
/* Signal masks for the manager. These have to be global only when clone2
|
||||||
@@ -338,6 +342,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
|
|||||||
void *map_addr;
|
void *map_addr;
|
||||||
|
|
||||||
/* Allocate space for stack and thread descriptor at default address */
|
/* Allocate space for stack and thread descriptor at default address */
|
||||||
|
#ifdef NEED_SEPARATE_REGISTER_STACK
|
||||||
if (attr != NULL)
|
if (attr != NULL)
|
||||||
{
|
{
|
||||||
guardsize = page_roundup (attr->__guardsize, granularity);
|
guardsize = page_roundup (attr->__guardsize, granularity);
|
||||||
@@ -350,7 +355,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
|
|||||||
guardsize = granularity;
|
guardsize = granularity;
|
||||||
stacksize = STACK_SIZE - granularity;
|
stacksize = STACK_SIZE - granularity;
|
||||||
}
|
}
|
||||||
#ifdef NEED_SEPARATE_REGISTER_STACK
|
|
||||||
new_thread = default_new_thread;
|
new_thread = default_new_thread;
|
||||||
new_thread_bottom = (char *) (new_thread + 1) - stacksize - guardsize;
|
new_thread_bottom = (char *) (new_thread + 1) - stacksize - guardsize;
|
||||||
/* Includes guard area, unlike the normal case. Use the bottom
|
/* Includes guard area, unlike the normal case. Use the bottom
|
||||||
@@ -361,6 +366,8 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
|
|||||||
in the same region. The cost is that we might be able to map
|
in the same region. The cost is that we might be able to map
|
||||||
slightly fewer stacks. */
|
slightly fewer stacks. */
|
||||||
|
|
||||||
|
/* XXX Fix for floating stacks with variable sizes. */
|
||||||
|
|
||||||
/* First the main stack: */
|
/* First the main stack: */
|
||||||
if (mmap((caddr_t)((char *)(new_thread + 1) - stacksize / 2),
|
if (mmap((caddr_t)((char *)(new_thread + 1) - stacksize / 2),
|
||||||
stacksize / 2, PROT_READ | PROT_WRITE | PROT_EXEC,
|
stacksize / 2, PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||||
@@ -382,37 +389,63 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
|
|||||||
guardaddr = new_thread_bottom + stacksize/2;
|
guardaddr = new_thread_bottom + stacksize/2;
|
||||||
/* We leave the guard area in the middle unmapped. */
|
/* We leave the guard area in the middle unmapped. */
|
||||||
#else /* !NEED_SEPARATE_REGISTER_STACK */
|
#else /* !NEED_SEPARATE_REGISTER_STACK */
|
||||||
|
# if FLOATING_STACKS
|
||||||
|
if (attr != NULL)
|
||||||
|
{
|
||||||
|
guardsize = page_roundup (attr->__guardsize, granularity);
|
||||||
|
stacksize = __pthread_max_stacksize - guardsize;
|
||||||
|
stacksize = MIN (stacksize,
|
||||||
|
page_roundup (attr->__stacksize, granularity));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
guardsize = granularity;
|
||||||
|
stacksize = __pthread_max_stacksize - guardsize;
|
||||||
|
}
|
||||||
|
|
||||||
|
map_addr = mmap(NULL, stacksize + guardsize,
|
||||||
|
PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||||
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||||
|
if (map_addr == MAP_FAILED)
|
||||||
|
/* No more memory available. */
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
guardaddr = map_addr;
|
||||||
|
if (guardsize > 0)
|
||||||
|
mprotect (guardaddr, guardsize, PROT_NONE);
|
||||||
|
|
||||||
|
new_thread_bottom = (char *) map_addr + guardsize;
|
||||||
|
new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
|
||||||
|
# else
|
||||||
|
if (attr != NULL)
|
||||||
|
{
|
||||||
|
guardsize = page_roundup (attr->__guardsize, granularity);
|
||||||
|
stacksize = STACK_SIZE - guardsize;
|
||||||
|
stacksize = MIN (stacksize,
|
||||||
|
page_roundup (attr->__stacksize, granularity));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
guardsize = granularity;
|
||||||
|
stacksize = STACK_SIZE - granularity;
|
||||||
|
}
|
||||||
|
|
||||||
new_thread = default_new_thread;
|
new_thread = default_new_thread;
|
||||||
new_thread_bottom = (char *) (new_thread + 1) - stacksize;
|
new_thread_bottom = (char *) (new_thread + 1) - stacksize;
|
||||||
map_addr = mmap((caddr_t)((char *)(new_thread + 1) - stacksize),
|
map_addr = mmap((caddr_t)((char *)(new_thread + 1) - stacksize - guardsize),
|
||||||
stacksize, PROT_READ | PROT_WRITE | PROT_EXEC,
|
stacksize + guardsize,
|
||||||
|
PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
||||||
if (map_addr == MAP_FAILED)
|
if (map_addr == MAP_FAILED)
|
||||||
/* Bad luck, this segment is already mapped. */
|
/* Bad luck, this segment is already mapped. */
|
||||||
return -1;
|
return -1;
|
||||||
/* We manage to get a stack. Now see whether we need a guard
|
|
||||||
and allocate it if necessary. Notice that the default
|
/* We manage to get a stack. Protect the guard area pages if
|
||||||
attributes (stack_size = STACK_SIZE - pagesize and guardsize
|
necessary. */
|
||||||
= pagesize) do not need a guard page, since the RLIMIT_STACK
|
guardaddr = map_addr;
|
||||||
soft limit prevents stacks from running into one another. */
|
if (guardsize > 0)
|
||||||
if (stacksize == STACK_SIZE - pagesize)
|
mprotect (guardaddr, guardsize, PROT_NONE);
|
||||||
{
|
# endif
|
||||||
/* We don't need a guard page. */
|
|
||||||
guardaddr = new_thread_bottom;
|
|
||||||
guardsize = 0;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
/* Put a bad page at the bottom of the stack */
|
|
||||||
guardaddr = (void *)new_thread_bottom - guardsize;
|
|
||||||
if (mmap ((caddr_t) guardaddr, guardsize, 0, MAP_FIXED, -1, 0)
|
|
||||||
== MAP_FAILED)
|
|
||||||
{
|
|
||||||
/* We don't make this an error. */
|
|
||||||
guardaddr = new_thread_bottom;
|
|
||||||
guardsize = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* !NEED_SEPARATE_REGISTER_STACK */
|
#endif /* !NEED_SEPARATE_REGISTER_STACK */
|
||||||
}
|
}
|
||||||
/* Clear the thread data structure. */
|
/* Clear the thread data structure. */
|
||||||
|
@@ -182,6 +182,9 @@ char *__pthread_manager_thread_tos;
|
|||||||
int __pthread_exit_requested;
|
int __pthread_exit_requested;
|
||||||
int __pthread_exit_code;
|
int __pthread_exit_code;
|
||||||
|
|
||||||
|
/* Maximum stack size. */
|
||||||
|
size_t __pthread_max_stacksize;
|
||||||
|
|
||||||
/* Nozero if the machine has more than one processor. */
|
/* Nozero if the machine has more than one processor. */
|
||||||
int __pthread_smp_kernel;
|
int __pthread_smp_kernel;
|
||||||
|
|
||||||
@@ -455,20 +458,32 @@ int __pthread_initialize_manager(void)
|
|||||||
struct rlimit limit;
|
struct rlimit limit;
|
||||||
int max_stack;
|
int max_stack;
|
||||||
|
|
||||||
|
getrlimit(RLIMIT_STACK, &limit);
|
||||||
|
#ifdef FLOATING_STACKS
|
||||||
|
if (limit.rlim_cur == RLIM_INFINITY)
|
||||||
|
limit.rlim_cur = ARCH_STACK_MAX_SIZE;
|
||||||
|
# ifdef NEED_SEPARATE_REGISTER_STACK
|
||||||
|
max_stack = limit.rlim_cur / 2;
|
||||||
|
# else
|
||||||
|
max_stack = limit.rlim_cur;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
__pthread_max_stacksize = max_stack;
|
||||||
|
#else
|
||||||
/* Play with the stack size limit to make sure that no stack ever grows
|
/* Play with the stack size limit to make sure that no stack ever grows
|
||||||
beyond STACK_SIZE minus one page (to act as a guard page). */
|
beyond STACK_SIZE minus one page (to act as a guard page). */
|
||||||
getrlimit(RLIMIT_STACK, &limit);
|
# ifdef NEED_SEPARATE_REGISTER_STACK
|
||||||
#ifdef NEED_SEPARATE_REGISTER_STACK
|
|
||||||
/* STACK_SIZE bytes hold both the main stack and register backing
|
/* STACK_SIZE bytes hold both the main stack and register backing
|
||||||
store. The rlimit value applies to each individually. */
|
store. The rlimit value applies to each individually. */
|
||||||
max_stack = STACK_SIZE/2 - __getpagesize();
|
max_stack = STACK_SIZE/2 - __getpagesize ();
|
||||||
#else
|
# else
|
||||||
max_stack = STACK_SIZE - __getpagesize();
|
max_stack = STACK_SIZE - __getpagesize();
|
||||||
#endif
|
# endif
|
||||||
if (limit.rlim_cur > max_stack) {
|
if (limit.rlim_cur > max_stack) {
|
||||||
limit.rlim_cur = max_stack;
|
limit.rlim_cur = max_stack;
|
||||||
setrlimit(RLIMIT_STACK, &limit);
|
setrlimit(RLIMIT_STACK, &limit);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
/* If basic initialization not done yet (e.g. we're called from a
|
/* If basic initialization not done yet (e.g. we're called from a
|
||||||
constructor run before our constructor), do it now */
|
constructor run before our constructor), do it now */
|
||||||
if (__pthread_initial_thread_bos == NULL) pthread_initialize();
|
if (__pthread_initial_thread_bos == NULL) pthread_initialize();
|
||||||
|
@@ -169,3 +169,9 @@ extern int __modify_ldt (int, struct modify_ldt_ldt_s *, size_t);
|
|||||||
member))); \
|
member))); \
|
||||||
} \
|
} \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
/* We want the OS to assign stack addresses. */
|
||||||
|
#define FLOATING_STACKS 1
|
||||||
|
|
||||||
|
/* Maximum size o fthe stack if the rlimit is unlimited. */
|
||||||
|
#define ARCH_STACK_MAX_SIZE 8*1024*1024
|
||||||
|
Reference in New Issue
Block a user