1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-08-08 17:42:12 +03:00

nptl: Do not use pthread set_tid_address as state synchronization (BZ #19951)

The use after free described in BZ#19951 is due the use of two different
PD fields, 'joinid' and 'cancelhandling', to describe the thread state
and to synchronize the calls of pthread_join, pthread_detach,
pthread_exit, and normal thread exit.

Any state change potentially requires to check for both field
atomically to handle partial state (such as pthread_join() with a
cancellation handler to issue a 'joinstate' field rollback).

This patch uses a different PD member with 4 possible states (JOINABLE,
DETACHED, EXITING, and EXITED) instead of pthread 'tid' field, with
the following logic:

  1. On pthread_create the inital state is set either to JOINABLE or
     DETACHED depending of the pthread attribute used.

  2. On pthread_detach, a CAS is issued on the state.  If the CAS
     fails it means that thread is already detached (DETACHED) or is
     being terminated (EXITING).  For former an EINVAL is returned,
     while for latter pthread_detach should be reponsible to join the
     thread (and deallocate any internal resource).

  3. In the exit phase of the wrapper function for the thread start
     routine (reached either if the thread function has returned,
     pthread_exit has being called, or cancellation handled has been
     acted upon) we issue a CAS on state to set to EXITING mode.  If the
     thread is previously on DETACHED mode the thread itself is
     responsible for arranging the deallocation of any resource,
     otherwise the thread needs to be joined (detached threads cannot
     immediately deallocate themselves).

  4. The clear_tid_field on 'clone' call is changed to set the new
     'state' field on thread exit (EXITED).  This state is only
     reached at thread termination.

  5. The pthread_join implementation is now simpler: the futex wait
     is done directly on thread state and there is no need to reset it
     in case of timeout since the state is now set either by
     pthread_detach() or by the kernel on process termination.

The race condition on pthread_detach is avoided with only one atomic
operation on PD state: once the mode is set to THREAD_STATE_DETACHED
it is up to thread itself to deallocate its memory (done on the exit
phase at pthread_create()).

Also, the INVALID_NOT_TERMINATED_TD_P is removed since a a negative
tid is not possible and the macro is not used anywhere.

This change trigger an invalid C11 thread tests: it crates a thread,
which detaches itself, and after a timeout the creating thread checks
if the join fails.  The issue is once thrd_join() is called the thread
lifetime is not defined.

Checked on x86_64-linux-gnu, i686-linux-gnu, aarch64-linux-gnu,
arm-linux-gnueabihf, and powerpc64-linux-gnu.
This commit is contained in:
Adhemerval Zanella
2025-07-09 18:07:48 -03:00
parent 4dc393f13e
commit e4585134ca
15 changed files with 137 additions and 139 deletions

View File

@@ -132,6 +132,18 @@ enum allocate_stack_mode_t
ALLOCATE_GUARD_USER = 2, ALLOCATE_GUARD_USER = 2,
}; };
/* Define a possible thread state on 'joinstate' field. The value will be
cleared by the kernel when the thread terminates (CLONE_CHILD_CLEARTID),
so THREAD_STATE_EXITED must be 0. */
enum thread_state_t
{
THREAD_STATE_EXITED = 0,
THREAD_STATE_EXITING,
THREAD_STATE_JOINABLE,
THREAD_STATE_DETACHED,
};
/* Thread descriptor data structure. */ /* Thread descriptor data structure. */
struct pthread struct pthread
{ {
@@ -174,8 +186,7 @@ struct pthread
GL (dl_stack_user) list. */ GL (dl_stack_user) list. */
list_t list; list_t list;
/* Thread ID - which is also a 'is this thread descriptor (and /* Thread ID set by the kernel with CLONE_PARENT_SETTID. */
therefore stack) used' flag. */
pid_t tid; pid_t tid;
/* List of robust mutexes the thread is holding. */ /* List of robust mutexes the thread is holding. */
@@ -345,15 +356,8 @@ struct pthread
/* Lock for synchronizing setxid calls. */ /* Lock for synchronizing setxid calls. */
unsigned int setxid_futex; unsigned int setxid_futex;
/* If the thread waits to join another one the ID of the latter is /* The current thread state defined by the THREAD_STATE_* enumeration. */
stored here. unsigned int joinstate;
In case a thread is detached this field contains a pointer of the
TCB if the thread itself. This is something which cannot happen
in normal operation. */
struct pthread *joinid;
/* Check whether a thread is detached. */
#define IS_DETACHED(pd) ((pd)->joinid == (pd))
/* The result of the thread function. */ /* The result of the thread function. */
void *result; void *result;

View File

@@ -34,7 +34,7 @@ extern int32_t __nptl_stack_hugetlb;
static inline bool static inline bool
__nptl_stack_in_use (struct pthread *pd) __nptl_stack_in_use (struct pthread *pd)
{ {
return pd->tid <= 0; return atomic_load_relaxed (&pd->joinstate) == THREAD_STATE_EXITED;
} }
/* Remove the stack ELEM from its list. */ /* Remove the stack ELEM from its list. */

View File

@@ -60,7 +60,8 @@ __pthread_cancel (pthread_t th)
{ {
volatile struct pthread *pd = (volatile struct pthread *) th; volatile struct pthread *pd = (volatile struct pthread *) th;
if (pd->tid == 0) int state = atomic_load_acquire (&pd->joinstate);
if (state == THREAD_STATE_EXITED || state == THREAD_STATE_EXITING)
/* The thread has already exited on the kernel side. Its outcome /* The thread has already exited on the kernel side. Its outcome
(regular exit, other cancelation) has already been (regular exit, other cancelation) has already been
determined. */ determined. */

View File

@@ -30,7 +30,7 @@ ___pthread_clockjoin_np64 (pthread_t threadid, void **thread_return,
return EINVAL; return EINVAL;
return __pthread_clockjoin_ex (threadid, thread_return, return __pthread_clockjoin_ex (threadid, thread_return,
clockid, abstime, true); clockid, abstime);
} }
#if __TIMESIZE == 64 #if __TIMESIZE == 64

View File

@@ -290,7 +290,7 @@ static int create_thread (struct pthread *pd, const struct pthread_attr *attr,
.flags = clone_flags, .flags = clone_flags,
.pidfd = (uintptr_t) &pd->tid, .pidfd = (uintptr_t) &pd->tid,
.parent_tid = (uintptr_t) &pd->tid, .parent_tid = (uintptr_t) &pd->tid,
.child_tid = (uintptr_t) &pd->tid, .child_tid = (uintptr_t) &pd->joinstate,
.stack = (uintptr_t) stackaddr, .stack = (uintptr_t) stackaddr,
.stack_size = stacksize, .stack_size = stacksize,
.tls = (uintptr_t) tp, .tls = (uintptr_t) tp,
@@ -355,12 +355,14 @@ start_thread (void *arg)
and free any resource prior return to the pthread_create caller. */ and free any resource prior return to the pthread_create caller. */
setup_failed = pd->setup_failed == 1; setup_failed = pd->setup_failed == 1;
if (setup_failed) if (setup_failed)
pd->joinid = NULL; pd->joinstate = THREAD_STATE_JOINABLE;
/* And give it up right away. */ /* And give it up right away. */
lll_unlock (pd->lock, LLL_PRIVATE); lll_unlock (pd->lock, LLL_PRIVATE);
if (setup_failed) if (setup_failed)
/* No need to clear the tid here, pthread_create() will join the
thread prior returning to caller. */
goto out; goto out;
} }
@@ -492,6 +494,22 @@ start_thread (void *arg)
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */ the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
atomic_fetch_or_relaxed (&pd->cancelhandling, EXITING_BITMASK); atomic_fetch_or_relaxed (&pd->cancelhandling, EXITING_BITMASK);
/* CONCURRENCY NOTES:
Concurrent pthread_detach() will either set state to
THREAD_STATE_DETACHED or wait for the thread to terminate. The exiting
state set here is set so a pthread_join() wait until all the required
cleanup steps are done.
The 'prevstate' field will be used to determine who is responsible to
call __nptl_free_tcb below. */
unsigned int prevstate;
do
prevstate = atomic_load_relaxed (&pd->joinstate);
while (!atomic_compare_exchange_weak_acquire (&pd->joinstate, &prevstate,
THREAD_STATE_EXITING));
if (__glibc_unlikely (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) == 1)) if (__glibc_unlikely (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) == 1))
/* This was the last thread. */ /* This was the last thread. */
exit (0); exit (0);
@@ -574,20 +592,21 @@ start_thread (void *arg)
pd->setxid_futex = 0; pd->setxid_futex = 0;
} }
/* If the thread is detached free the TCB. */ if (prevstate == THREAD_STATE_DETACHED)
if (IS_DETACHED (pd))
/* Free the TCB. */ /* Free the TCB. */
__nptl_free_tcb (pd); __nptl_free_tcb (pd);
/* Remove the associated name from the thread stack. */ /* Remove the associated name from the thread stack. */
name_stack_maps (pd, false); name_stack_maps (pd, false);
pd->tid = 0;
out: out:
/* We cannot call '_exit' here. '_exit' will terminate the process. /* We cannot call '_exit' here. '_exit' will terminate the process.
The 'exit' implementation in the kernel will signal when the The 'exit' implementation in the kernel will signal when the
process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID process is really dead since 'clone' got passed the CLONE_CHILD_CLEARTID
flag. The 'tid' field in the TCB will be set to zero. flag. The 'joinstate' field in the TCB will be set to zero.
rseq TLS is still registered at this point. Rely on implicit rseq TLS is still registered at this point. Rely on implicit
unregistration performed by the kernel on thread teardown. This is not a unregistration performed by the kernel on thread teardown. This is not a
@@ -702,7 +721,9 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
/* Initialize the field for the ID of the thread which is waiting /* Initialize the field for the ID of the thread which is waiting
for us. This is a self-reference in case the thread is created for us. This is a self-reference in case the thread is created
detached. */ detached. */
pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL; pd->joinstate = iattr->flags & ATTR_FLAG_DETACHSTATE
? THREAD_STATE_DETACHED
: THREAD_STATE_JOINABLE;
/* The debug events are inherited from the parent. */ /* The debug events are inherited from the parent. */
pd->eventbuf = self->eventbuf; pd->eventbuf = self->eventbuf;
@@ -861,10 +882,11 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
/* Similar to pthread_join, but since thread creation has failed at /* Similar to pthread_join, but since thread creation has failed at
startup there is no need to handle all the steps. */ startup there is no need to handle all the steps. */
pid_t tid; unsigned int state;
while ((tid = atomic_load_acquire (&pd->tid)) != 0) while ((state = atomic_load_acquire (&pd->joinstate))
__futex_abstimed_wait_cancelable64 ((unsigned int *) &pd->tid, != THREAD_STATE_EXITED)
tid, 0, NULL, LLL_SHARED); __futex_abstimed_wait_cancelable64 (&pd->joinstate, state, 0,
NULL, LLL_SHARED);
} }
/* State (c) or (d) and we have ownership of PD (see CONCURRENCY /* State (c) or (d) and we have ownership of PD (see CONCURRENCY

View File

@@ -25,32 +25,28 @@ ___pthread_detach (pthread_t th)
{ {
struct pthread *pd = (struct pthread *) th; struct pthread *pd = (struct pthread *) th;
/* Make sure the descriptor is valid. */ /* CONCURRENCY NOTES:
if (INVALID_NOT_TERMINATED_TD_P (pd))
/* Not a valid thread handle. */
return ESRCH;
int result = 0; Concurrent pthread_detach will return EINVAL for the case the thread
is already detached (THREAD_STATE_DETACHED). POSIX states it is
undefined to call pthread_detach if TH refers to a non joinable thread.
/* Mark the thread as detached. */ For the case the thread is being terminated (THREAD_STATE_EXITING),
if (atomic_compare_and_exchange_bool_acq (&pd->joinid, pd, NULL)) pthread_detach will responsible to clean up the stack. */
unsigned int prevstate = atomic_load_relaxed (&pd->joinstate);
do
{ {
/* There are two possibilities here. First, the thread might if (prevstate != THREAD_STATE_JOINABLE)
already be detached. In this case we return EINVAL. {
Otherwise there might already be a waiter. The standard does if (prevstate == THREAD_STATE_DETACHED)
not mention what happens in this case. */ return EINVAL;
if (IS_DETACHED (pd)) return __pthread_join (th, 0);
result = EINVAL;
} }
else }
/* Check whether the thread terminated meanwhile. In this case we while (!atomic_compare_exchange_weak_acquire (&pd->joinstate, &prevstate,
will just free the TCB. */ THREAD_STATE_DETACHED));
if ((pd->cancelhandling & EXITING_BITMASK) != 0) return 0;
/* Note that the code in __free_tcb makes sure each thread
control block is freed only once. */
__nptl_free_tcb (pd);
return result;
} }
versioned_symbol (libc, ___pthread_detach, pthread_detach, GLIBC_2_34); versioned_symbol (libc, ___pthread_detach, pthread_detach, GLIBC_2_34);
libc_hidden_ver (___pthread_detach, __pthread_detach) libc_hidden_ver (___pthread_detach, __pthread_detach)

View File

@@ -52,7 +52,7 @@ __pthread_getattr_np (pthread_t thread_id, pthread_attr_t *attr)
iattr->flags = thread->flags; iattr->flags = thread->flags;
/* The thread might be detached by now. */ /* The thread might be detached by now. */
if (IS_DETACHED (thread)) if (atomic_load_acquire (&thread->joinstate) == THREAD_STATE_DETACHED)
iattr->flags |= ATTR_FLAG_DETACHSTATE; iattr->flags |= ATTR_FLAG_DETACHSTATE;
/* This is the guardsize after adjusting it. */ /* This is the guardsize after adjusting it. */

View File

@@ -22,7 +22,7 @@ int
___pthread_join (pthread_t threadid, void **thread_return) ___pthread_join (pthread_t threadid, void **thread_return)
{ {
return __pthread_clockjoin_ex (threadid, thread_return, 0 /* Ignored */, return __pthread_clockjoin_ex (threadid, thread_return, 0 /* Ignored */,
NULL, true); NULL);
} }
versioned_symbol (libc, ___pthread_join, pthread_join, GLIBC_2_34); versioned_symbol (libc, ___pthread_join, pthread_join, GLIBC_2_34);
libc_hidden_ver (___pthread_join, __pthread_join) libc_hidden_ver (___pthread_join, __pthread_join)

View File

@@ -22,91 +22,62 @@
#include <time.h> #include <time.h>
#include <futex-internal.h> #include <futex-internal.h>
static void /* Check for a possible deadlock situation where the threads are waiting for
cleanup (void *arg) each other to finish. Note that this is a "may" error. To be 100% sure we
catch this error we would have to lock the data structures but it is not
necessary. In the unlikely case that two threads are really caught in this
situation they will deadlock. It is the programmer's problem to figure
this out. */
static inline bool
check_for_deadlock (struct pthread *pd)
{ {
/* If we already changed the waiter ID, reset it. The call cannot
fail for any reason but the thread not having done that yet so
there is no reason for a loop. */
struct pthread *self = THREAD_SELF; struct pthread *self = THREAD_SELF;
atomic_compare_exchange_weak_acquire (&arg, &self, NULL); return ((pd == self
|| (atomic_load_acquire (&self->joinstate) == THREAD_STATE_DETACHED
&& (pd->cancelhandling
& (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
| TERMINATED_BITMASK)) == 0))
&& !cancel_enabled_and_canceled (self->cancelhandling));
} }
int int
__pthread_clockjoin_ex (pthread_t threadid, void **thread_return, __pthread_clockjoin_ex (pthread_t threadid, void **thread_return,
clockid_t clockid, clockid_t clockid,
const struct __timespec64 *abstime, bool block) const struct __timespec64 *abstime)
{ {
struct pthread *pd = (struct pthread *) threadid; struct pthread *pd = (struct pthread *) threadid;
/* Make sure the descriptor is valid. */
if (INVALID_NOT_TERMINATED_TD_P (pd))
/* Not a valid thread handle. */
return ESRCH;
/* Is the thread joinable?. */
if (IS_DETACHED (pd))
/* We cannot wait for the thread. */
return EINVAL;
/* Make sure the clock and time specified are valid. */ /* Make sure the clock and time specified are valid. */
if (abstime if (abstime
&& __glibc_unlikely (!futex_abstimed_supported_clockid (clockid) && __glibc_unlikely (!futex_abstimed_supported_clockid (clockid)
|| ! valid_nanoseconds (abstime->tv_nsec))) || ! valid_nanoseconds (abstime->tv_nsec)))
return EINVAL; return EINVAL;
struct pthread *self = THREAD_SELF;
int result = 0;
LIBC_PROBE (pthread_join, 1, threadid); LIBC_PROBE (pthread_join, 1, threadid);
if ((pd == self int result = 0;
|| (self->joinid == pd unsigned int state;
&& (pd->cancelhandling while ((state = atomic_load_acquire (&pd->joinstate))
& (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK != THREAD_STATE_EXITED)
| TERMINATED_BITMASK)) == 0)) {
&& !cancel_enabled_and_canceled (self->cancelhandling)) if (check_for_deadlock (pd))
/* This is a deadlock situation. The threads are waiting for each
other to finish. Note that this is a "may" error. To be 100%
sure we catch this error we would have to lock the data
structures but it is not necessary. In the unlikely case that
two threads are really caught in this situation they will
deadlock. It is the programmer's problem to figure this
out. */
return EDEADLK; return EDEADLK;
/* Wait for the thread to finish. If it is already locked something /* POSIX states calling pthread_join on a non joinable thread is
is wrong. There can only be one waiter. */ undefined. However, if PD is still in the cache we can warn
else if (__glibc_unlikely (atomic_compare_exchange_weak_acquire (&pd->joinid, the caller. */
&self, if (state == THREAD_STATE_DETACHED)
NULL)))
/* There is already somebody waiting for the thread. */
return EINVAL; return EINVAL;
/* BLOCK waits either indefinitely or based on an absolute time. POSIX also /* pthread_join is a cancellation entrypoint and we use the same
states a cancellation point shall occur for pthread_join, and we use the rationale for pthread_timedjoin_np.
same rationale for posix_timedjoin_np. Both clockwait_tid and the futex
call use the cancellable variant. */
if (block)
{
/* During the wait we change to asynchronous cancellation. If we
are cancelled the thread we are waiting for must be marked as
un-wait-ed for again. */
pthread_cleanup_push (cleanup, &pd->joinid);
/* We need acquire MO here so that we synchronize with the The kernel notifies a process which uses CLONE_CHILD_CLEARTID via
kernel's store to 0 when the clone terminates. (see above) */ a memory zeroing and futex wake-up when the process terminates.
pid_t tid; The futex operation is not private. */
while ((tid = atomic_load_acquire (&pd->tid)) != 0) int ret = __futex_abstimed_wait_cancelable64 (&pd->joinstate, state,
{ clockid, abstime,
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via LLL_SHARED);
futex wake-up when the clone terminates. The memory location
contains the thread ID while the clone is running and is reset to
zero by the kernel afterwards. The kernel up to version 3.16.3
does not use the private futex operations for futex wake-up when
the clone terminates. */
int ret = __futex_abstimed_wait_cancelable64 (
(unsigned int *) &pd->tid, tid, clockid, abstime, LLL_SHARED);
if (ret == ETIMEDOUT || ret == EOVERFLOW) if (ret == ETIMEDOUT || ret == EOVERFLOW)
{ {
result = ret; result = ret;
@@ -114,24 +85,15 @@ __pthread_clockjoin_ex (pthread_t threadid, void **thread_return,
} }
} }
pthread_cleanup_pop (0);
}
void *pd_result = pd->result; void *pd_result = pd->result;
if (__glibc_likely (result == 0)) if (__glibc_likely (result == 0))
{ {
/* We mark the thread as terminated and as joined. */
pd->tid = -1;
/* Store the return value if the caller is interested. */
if (thread_return != NULL) if (thread_return != NULL)
*thread_return = pd_result; *thread_return = pd_result;
/* Free the TCB. */ /* Free the TCB. */
__nptl_free_tcb (pd); __nptl_free_tcb (pd);
} }
else
pd->joinid = NULL;
LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd_result); LIBC_PROBE (pthread_join_ret, 3, threadid, result, pd_result);

View File

@@ -24,7 +24,7 @@ ___pthread_timedjoin_np64 (pthread_t threadid, void **thread_return,
const struct __timespec64 *abstime) const struct __timespec64 *abstime)
{ {
return __pthread_clockjoin_ex (threadid, thread_return, return __pthread_clockjoin_ex (threadid, thread_return,
CLOCK_REALTIME, abstime, true); CLOCK_REALTIME, abstime);
} }
#if __TIMESIZE == 64 #if __TIMESIZE == 64

View File

@@ -21,15 +21,18 @@
int int
__pthread_tryjoin_np (pthread_t threadid, void **thread_return) __pthread_tryjoin_np (pthread_t threadid, void **thread_return)
{ {
/* Return right away if the thread hasn't terminated yet. */ /* The joinable state (THREAD_STATE_JOINABLE) is straigthforward since the
struct pthread *pd = (struct pthread *) threadid; thread hasn't finished yet and trying to join might block.
if (pd->tid != 0)
return EBUSY;
/* If pd->tid == 0 then lll_wait_tid will not block on futex The exiting thread (THREAD_STATE_EXITING) also migth result in a blocking
operation. */ call: a detached thread might change its state to exiting and a exiting
return __pthread_clockjoin_ex (threadid, thread_return, 0 /* Ignored */, thread my take some time to exit (and thus let the kernel set the state
NULL, false); to THREAD_STATE_EXITED). */
struct pthread *pd = (struct pthread *) threadid;
return atomic_load_acquire (&pd->joinstate) != THREAD_STATE_EXITED
? EBUSY
: __pthread_clockjoin_ex (threadid, thread_return, 0, NULL);
} }
versioned_symbol (libc, __pthread_tryjoin_np, pthread_tryjoin_np, GLIBC_2_34); versioned_symbol (libc, __pthread_tryjoin_np, pthread_tryjoin_np, GLIBC_2_34);

View File

@@ -73,9 +73,10 @@ __tls_init_tp (void)
list_add (&pd->list, &GL (dl_stack_user)); list_add (&pd->list, &GL (dl_stack_user));
/* Early initialization of the TCB. */ /* Early initialization of the TCB. */
pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->tid); pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->joinstate);
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]); THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
THREAD_SETMEM (pd, stack_mode, ALLOCATE_GUARD_USER); THREAD_SETMEM (pd, stack_mode, ALLOCATE_GUARD_USER);
THREAD_SETMEM (pd, joinstate, THREAD_STATE_JOINABLE);
/* Before initializing GL (dl_stack_user), the debugger could not /* Before initializing GL (dl_stack_user), the debugger could not
find us and had to set __nptl_initial_report_events. Propagate find us and had to set __nptl_initial_report_events. Propagate

View File

@@ -18,6 +18,7 @@
#include <atomic.h> #include <atomic.h>
#include <pthreadP.h> #include <pthreadP.h>
#include <futex-internal.h>
_Noreturn static void _Noreturn static void
__libc_start_call_main (int (*main) (int, char **, char ** MAIN_AUXVEC_DECL), __libc_start_call_main (int (*main) (int, char **, char ** MAIN_AUXVEC_DECL),
@@ -65,6 +66,11 @@ __libc_start_call_main (int (*main) (int, char **, char ** MAIN_AUXVEC_DECL),
/* One less thread. Decrement the counter. If it is zero we /* One less thread. Decrement the counter. If it is zero we
terminate the entire process. */ terminate the entire process. */
result = 0; result = 0;
/* For the case a thread is waiting for the main thread to finish. */
struct pthread *self = THREAD_SELF;
atomic_store_release (&self->joinstate, THREAD_STATE_EXITED);
futex_wake (&self->joinstate, 1, FUTEX_SHARED);
if (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) != 1) if (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) != 1)
/* Not much left to do but to exit the thread, not the process. */ /* Not much left to do but to exit the thread, not the process. */
while (1) while (1)

View File

@@ -233,7 +233,6 @@ libc_hidden_proto (__pthread_current_priority)
nothing. And if the test triggers the thread descriptor is nothing. And if the test triggers the thread descriptor is
guaranteed to be invalid. */ guaranteed to be invalid. */
#define INVALID_TD_P(pd) __builtin_expect ((pd)->tid <= 0, 0) #define INVALID_TD_P(pd) __builtin_expect ((pd)->tid <= 0, 0)
#define INVALID_NOT_TERMINATED_TD_P(pd) __builtin_expect ((pd)->tid < 0, 0)
extern void __pthread_unwind (__pthread_unwind_buf_t *__buf) extern void __pthread_unwind (__pthread_unwind_buf_t *__buf)
__cleanup_fct_attribute __attribute ((__noreturn__)) __cleanup_fct_attribute __attribute ((__noreturn__))
@@ -534,7 +533,7 @@ libc_hidden_proto (__pthread_setcanceltype)
extern void __pthread_testcancel (void); extern void __pthread_testcancel (void);
libc_hidden_proto (__pthread_testcancel) libc_hidden_proto (__pthread_testcancel)
extern int __pthread_clockjoin_ex (pthread_t, void **, clockid_t, extern int __pthread_clockjoin_ex (pthread_t, void **, clockid_t,
const struct __timespec64 *, bool) const struct __timespec64 *)
attribute_hidden; attribute_hidden;
extern int __pthread_sigmask (int, const sigset_t *, sigset_t *); extern int __pthread_sigmask (int, const sigset_t *, sigset_t *);
libc_hidden_proto (__pthread_sigmask); libc_hidden_proto (__pthread_sigmask);

View File

@@ -28,7 +28,10 @@ detach_thrd (void *arg)
{ {
if (thrd_detach (thrd_current ()) != thrd_success) if (thrd_detach (thrd_current ()) != thrd_success)
FAIL_EXIT1 ("thrd_detach failed"); FAIL_EXIT1 ("thrd_detach failed");
thrd_exit (thrd_success);
pause ();
return 0;
} }
static int static int
@@ -43,6 +46,7 @@ do_test (void)
/* Give some time so the thread can finish. */ /* Give some time so the thread can finish. */
thrd_sleep (&(struct timespec) {.tv_sec = 2}, NULL); thrd_sleep (&(struct timespec) {.tv_sec = 2}, NULL);
/* Calling thrd_join on a detached thread is UB... */
if (thrd_join (id, NULL) == thrd_success) if (thrd_join (id, NULL) == thrd_success)
FAIL_EXIT1 ("thrd_join succeed where it should fail"); FAIL_EXIT1 ("thrd_join succeed where it should fail");