mirror of
https://sourceware.org/git/glibc.git
synced 2025-07-29 11:41:21 +03:00
nptl: Cleanup cancellation macros
This patch wraps all uses of *_{enable,disable}_asynccancel and and *_CANCEL_{ASYNC,RESET} in either already provided macros (lll_futex_timed_wait_cancel) or creates new ones if the functionality is not provided (SYSCALL_CANCEL_NCS, lll_futex_wait_cancel, and lll_futex_timed_wait_cancel). Also for some generic implementations, the direct call of the macros are removed since the underlying symbols are suppose to provide cancellation support. This is a priliminary patch intended to simplify the work required for BZ#12683 fix. It is a refactor change, no semantic changes are expected. Checked on x86_64-linux-gnu and i686-linux-gnu. * nptl/pthread_join_common.c (__pthread_timedjoin_ex): Use lll_wait_tid with timeout. * nptl/sem_wait.c (__old_sem_wait): Use lll_futex_wait_cancel. * sysdeps/nptl/aio_misc.h (AIO_MISC_WAIT): Use futex_reltimed_wait_cancelable for cancelabla mode. * sysdeps/nptl/gai_misc.h (GAI_MISC_WAIT): Likewise. * sysdeps/posix/open64.c (__libc_open64): Do not call cancelation macros. * sysdeps/posix/sigwait.c (__sigwait): Likewise. * sysdeps/posix/waitid.c (__sigwait): Likewise. * sysdeps/unix/sysdep.h (__SYSCALL_CANCEL_CALL, SYSCALL_CANCEL_NCS): New macro. * sysdeps/nptl/lowlevellock.h (lll_wait_tid): Add timeout argument. (lll_timedwait_tid): Remove macro. * sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/clock_nanosleep.c (__clock_nanosleep): Use INTERNAL_SYSCALL_CANCEL. * sysdeps/unix/sysv/linux/futex-internal.h (futex_reltimed_wait_cancelable): Use LIBC_CANCEL_{ASYNC,RESET} instead of __pthread_{enable,disable}_asynccancel. * sysdeps/unix/sysv/linux/lowlevellock-futex.h (lll_futex_wait_cancel): New macro.
This commit is contained in:
@ -28,26 +28,16 @@ int
|
||||
__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
|
||||
struct timespec *rem)
|
||||
{
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
int r;
|
||||
|
||||
if (clock_id == CLOCK_THREAD_CPUTIME_ID)
|
||||
return EINVAL;
|
||||
if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
|
||||
clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
|
||||
|
||||
if (SINGLE_THREAD_P)
|
||||
r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem);
|
||||
else
|
||||
{
|
||||
int oldstate = LIBC_CANCEL_ASYNC ();
|
||||
|
||||
r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req,
|
||||
rem);
|
||||
|
||||
LIBC_CANCEL_RESET (oldstate);
|
||||
}
|
||||
|
||||
/* If the call is interrupted by a signal handler or encounters an error,
|
||||
it returns a positive value similar to errno. */
|
||||
INTERNAL_SYSCALL_DECL (err);
|
||||
int r = INTERNAL_SYSCALL_CANCEL (clock_nanosleep, err, clock_id, flags,
|
||||
req, rem);
|
||||
return (INTERNAL_SYSCALL_ERROR_P (r, err)
|
||||
? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
|
||||
}
|
||||
|
@ -138,9 +138,9 @@ futex_reltimed_wait_cancelable (unsigned int *futex_word,
|
||||
const struct timespec *reltime, int private)
|
||||
{
|
||||
int oldtype;
|
||||
oldtype = __pthread_enable_asynccancel ();
|
||||
oldtype = LIBC_CANCEL_ASYNC ();
|
||||
int err = lll_futex_timed_wait (futex_word, expected, reltime, private);
|
||||
__pthread_disable_asynccancel (oldtype);
|
||||
LIBC_CANCEL_RESET (oldtype);
|
||||
switch (err)
|
||||
{
|
||||
case 0:
|
||||
|
@ -221,32 +221,30 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
|
||||
#define lll_islocked(futex) \
|
||||
(futex != LLL_LOCK_INITIALIZER)
|
||||
|
||||
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
|
||||
__attribute__ ((regparm (2))) attribute_hidden;
|
||||
|
||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||
wake-up when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero by the kernel
|
||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||
operations for futex wake-up when the clone terminates. */
|
||||
#define lll_wait_tid(tid) \
|
||||
do { \
|
||||
__typeof (tid) __tid; \
|
||||
while ((__tid = (tid)) != 0) \
|
||||
lll_futex_wait (&(tid), __tid, LLL_SHARED);\
|
||||
} while (0)
|
||||
|
||||
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
|
||||
__attribute__ ((regparm (2))) attribute_hidden;
|
||||
|
||||
/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
|
||||
ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
|
||||
XXX Note that this differs from the generic version in that we do the
|
||||
error checking here and not in __lll_timedwait_tid. */
|
||||
#define lll_timedwait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __result = 0; \
|
||||
if ((tid) != 0) \
|
||||
__result = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
__result; })
|
||||
|
||||
operations for futex wake-up when the clone terminates.
|
||||
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||
The futex operation are issues with cancellable versions. */
|
||||
#define lll_wait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __res = 0; \
|
||||
__typeof (tid) __tid; \
|
||||
if (abstime != NULL) \
|
||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
else \
|
||||
/* We need acquire MO here so that we synchronize with the \
|
||||
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||
__res; \
|
||||
})
|
||||
|
||||
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
|
||||
attribute_hidden;
|
||||
|
@ -125,6 +125,17 @@
|
||||
private), \
|
||||
nr_wake, nr_move, mutex, val)
|
||||
|
||||
|
||||
/* Cancellable futex macros. */
|
||||
#define lll_futex_wait_cancel(futexp, val, private) \
|
||||
({ \
|
||||
int __oldtype = CANCEL_ASYNC (); \
|
||||
long int __err = lll_futex_wait (futexp, val, LLL_SHARED); \
|
||||
CANCEL_RESET (__oldtype); \
|
||||
__err; \
|
||||
})
|
||||
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
#endif /* lowlevellock-futex.h */
|
||||
|
@ -108,28 +108,29 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||
wakeup when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero
|
||||
afterwards. */
|
||||
#define lll_wait_tid(tid) \
|
||||
do \
|
||||
{ \
|
||||
__typeof (tid) __tid; \
|
||||
while ((__tid = (tid)) != 0) \
|
||||
lll_futex_wait (&(tid), __tid, LLL_SHARED); \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
attribute_hidden;
|
||||
|
||||
#define lll_timedwait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __res = 0; \
|
||||
if ((tid) != 0) \
|
||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
__res; \
|
||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||
wake-up when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero by the kernel
|
||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||
operations for futex wake-up when the clone terminates.
|
||||
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||
The futex operation are issues with cancellable versions. */
|
||||
#define lll_wait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __res = 0; \
|
||||
__typeof (tid) __tid; \
|
||||
if (abstime != NULL) \
|
||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
else \
|
||||
/* We need acquire MO here so that we synchronize with the \
|
||||
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||
__res; \
|
||||
})
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
@ -224,32 +224,30 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
|
||||
#define lll_islocked(futex) \
|
||||
(futex != LLL_LOCK_INITIALIZER)
|
||||
|
||||
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
attribute_hidden;
|
||||
|
||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||
wake-up when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero by the kernel
|
||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||
operations for futex wake-up when the clone terminates. */
|
||||
#define lll_wait_tid(tid) \
|
||||
do { \
|
||||
__typeof (tid) __tid; \
|
||||
while ((__tid = (tid)) != 0) \
|
||||
lll_futex_wait (&(tid), __tid, LLL_SHARED);\
|
||||
} while (0)
|
||||
|
||||
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
attribute_hidden;
|
||||
|
||||
/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
|
||||
ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
|
||||
XXX Note that this differs from the generic version in that we do the
|
||||
error checking here and not in __lll_timedwait_tid. */
|
||||
#define lll_timedwait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __result = 0; \
|
||||
if ((tid) != 0) \
|
||||
__result = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
__result; })
|
||||
operations for futex wake-up when the clone terminates.
|
||||
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||
The futex operation are issues with cancellable versions. */
|
||||
#define lll_wait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __res = 0; \
|
||||
__typeof (tid) __tid; \
|
||||
if (abstime != NULL) \
|
||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
else \
|
||||
/* We need acquire MO here so that we synchronize with the \
|
||||
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||
__res; \
|
||||
})
|
||||
|
||||
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
|
||||
attribute_hidden;
|
||||
|
Reference in New Issue
Block a user