mirror of
https://sourceware.org/git/glibc.git
synced 2025-06-15 06:41:47 +03:00
nptl: Cleanup cancellation macros
This patch wraps all uses of *_{enable,disable}_asynccancel and and *_CANCEL_{ASYNC,RESET} in either already provided macros (lll_futex_timed_wait_cancel) or creates new ones if the functionality is not provided (SYSCALL_CANCEL_NCS, lll_futex_wait_cancel, and lll_futex_timed_wait_cancel). Also for some generic implementations, the direct call of the macros are removed since the underlying symbols are suppose to provide cancellation support. This is a priliminary patch intended to simplify the work required for BZ#12683 fix. It is a refactor change, no semantic changes are expected. Checked on x86_64-linux-gnu and i686-linux-gnu. * nptl/pthread_join_common.c (__pthread_timedjoin_ex): Use lll_wait_tid with timeout. * nptl/sem_wait.c (__old_sem_wait): Use lll_futex_wait_cancel. * sysdeps/nptl/aio_misc.h (AIO_MISC_WAIT): Use futex_reltimed_wait_cancelable for cancelabla mode. * sysdeps/nptl/gai_misc.h (GAI_MISC_WAIT): Likewise. * sysdeps/posix/open64.c (__libc_open64): Do not call cancelation macros. * sysdeps/posix/sigwait.c (__sigwait): Likewise. * sysdeps/posix/waitid.c (__sigwait): Likewise. * sysdeps/unix/sysdep.h (__SYSCALL_CANCEL_CALL, SYSCALL_CANCEL_NCS): New macro. * sysdeps/nptl/lowlevellock.h (lll_wait_tid): Add timeout argument. (lll_timedwait_tid): Remove macro. * sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_wait_tid): Likewise. (lll_timedwait_tid): Likewise. * sysdeps/unix/sysv/linux/clock_nanosleep.c (__clock_nanosleep): Use INTERNAL_SYSCALL_CANCEL. * sysdeps/unix/sysv/linux/futex-internal.h (futex_reltimed_wait_cancelable): Use LIBC_CANCEL_{ASYNC,RESET} instead of __pthread_{enable,disable}_asynccancel. * sysdeps/unix/sysv/linux/lowlevellock-futex.h (lll_futex_wait_cancel): New macro.
This commit is contained in:
31
ChangeLog
31
ChangeLog
@ -1,5 +1,36 @@
|
|||||||
2019-01-03 Adhemerval Zanella <adhemerval.zanella@linaro.org>
|
2019-01-03 Adhemerval Zanella <adhemerval.zanella@linaro.org>
|
||||||
|
|
||||||
|
* nptl/pthread_join_common.c (__pthread_timedjoin_ex): Use
|
||||||
|
lll_wait_tid with timeout.
|
||||||
|
* nptl/sem_wait.c (__old_sem_wait): Use lll_futex_wait_cancel.
|
||||||
|
* sysdeps/nptl/aio_misc.h (AIO_MISC_WAIT): Use
|
||||||
|
futex_reltimed_wait_cancelable for cancelabla mode.
|
||||||
|
* sysdeps/nptl/gai_misc.h (GAI_MISC_WAIT): Likewise.
|
||||||
|
* sysdeps/posix/open64.c (__libc_open64): Do not call cancelation
|
||||||
|
macros.
|
||||||
|
* sysdeps/posix/sigwait.c (__sigwait): Likewise.
|
||||||
|
* sysdeps/posix/waitid.c (__sigwait): Likewise.
|
||||||
|
* sysdeps/unix/sysdep.h (__SYSCALL_CANCEL_CALL,
|
||||||
|
SYSCALL_CANCEL_NCS): New macro.
|
||||||
|
* sysdeps/nptl/lowlevellock.h (lll_wait_tid): Add timeout argument.
|
||||||
|
(lll_timedwait_tid): Remove macro.
|
||||||
|
* sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_wait_tid):
|
||||||
|
Likewise.
|
||||||
|
(lll_timedwait_tid): Likewise.
|
||||||
|
* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_wait_tid):
|
||||||
|
Likewise.
|
||||||
|
(lll_timedwait_tid): Likewise.
|
||||||
|
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_wait_tid):
|
||||||
|
Likewise.
|
||||||
|
(lll_timedwait_tid): Likewise.
|
||||||
|
* sysdeps/unix/sysv/linux/clock_nanosleep.c (__clock_nanosleep):
|
||||||
|
Use INTERNAL_SYSCALL_CANCEL.
|
||||||
|
* sysdeps/unix/sysv/linux/futex-internal.h
|
||||||
|
(futex_reltimed_wait_cancelable): Use LIBC_CANCEL_{ASYNC,RESET}
|
||||||
|
instead of __pthread_{enable,disable}_asynccancel.
|
||||||
|
* sysdeps/unix/sysv/linux/lowlevellock-futex.h
|
||||||
|
(lll_futex_wait_cancel): New macro.
|
||||||
|
|
||||||
* sysdeps/i386/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL,
|
* sysdeps/i386/nptl/tls.h (THREAD_ATOMIC_CMPXCHG_VAL,
|
||||||
THREAD_ATOMIC_AND, THREAD_ATOMIC_BIT_SET): Remove macros.
|
THREAD_ATOMIC_AND, THREAD_ATOMIC_BIT_SET): Remove macros.
|
||||||
|
|
||||||
|
@ -81,14 +81,7 @@ __pthread_timedjoin_ex (pthread_t threadid, void **thread_return,
|
|||||||
un-wait-ed for again. */
|
un-wait-ed for again. */
|
||||||
pthread_cleanup_push (cleanup, &pd->joinid);
|
pthread_cleanup_push (cleanup, &pd->joinid);
|
||||||
|
|
||||||
int oldtype = CANCEL_ASYNC ();
|
result = lll_wait_tid (pd->tid, abstime);
|
||||||
|
|
||||||
if (abstime != NULL)
|
|
||||||
result = lll_timedwait_tid (pd->tid, abstime);
|
|
||||||
else
|
|
||||||
lll_wait_tid (pd->tid);
|
|
||||||
|
|
||||||
CANCEL_RESET (oldtype);
|
|
||||||
|
|
||||||
pthread_cleanup_pop (0);
|
pthread_cleanup_pop (0);
|
||||||
}
|
}
|
||||||
|
@ -56,14 +56,8 @@ __old_sem_wait (sem_t *sem)
|
|||||||
if (atomic_decrement_if_positive (futex) > 0)
|
if (atomic_decrement_if_positive (futex) > 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Enable asynchronous cancellation. Required by the standard. */
|
|
||||||
int oldtype = __pthread_enable_asynccancel ();
|
|
||||||
|
|
||||||
/* Always assume the semaphore is shared. */
|
/* Always assume the semaphore is shared. */
|
||||||
err = lll_futex_wait (futex, 0, LLL_SHARED);
|
err = lll_futex_wait_cancel (futex, 0, LLL_SHARED);
|
||||||
|
|
||||||
/* Disable asynchronous cancellation. */
|
|
||||||
__pthread_disable_asynccancel (oldtype);
|
|
||||||
}
|
}
|
||||||
while (err == 0 || err == -EWOULDBLOCK);
|
while (err == 0 || err == -EWOULDBLOCK);
|
||||||
|
|
||||||
|
@ -41,15 +41,15 @@
|
|||||||
{ \
|
{ \
|
||||||
pthread_mutex_unlock (&__aio_requests_mutex); \
|
pthread_mutex_unlock (&__aio_requests_mutex); \
|
||||||
\
|
\
|
||||||
int oldtype; \
|
|
||||||
if (cancel) \
|
|
||||||
oldtype = LIBC_CANCEL_ASYNC (); \
|
|
||||||
\
|
|
||||||
int status; \
|
int status; \
|
||||||
do \
|
do \
|
||||||
{ \
|
{ \
|
||||||
status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
|
if (cancel) \
|
||||||
timeout, FUTEX_PRIVATE); \
|
status = futex_reltimed_wait_cancelable ( \
|
||||||
|
(unsigned int *) futexaddr, oldval, timeout, FUTEX_PRIVATE); \
|
||||||
|
else \
|
||||||
|
status = futex_reltimed_wait ((unsigned int *) futexaddr, \
|
||||||
|
oldval, timeout, FUTEX_PRIVATE); \
|
||||||
if (status != EAGAIN) \
|
if (status != EAGAIN) \
|
||||||
break; \
|
break; \
|
||||||
\
|
\
|
||||||
@ -57,9 +57,6 @@
|
|||||||
} \
|
} \
|
||||||
while (oldval != 0); \
|
while (oldval != 0); \
|
||||||
\
|
\
|
||||||
if (cancel) \
|
|
||||||
LIBC_CANCEL_RESET (oldtype); \
|
|
||||||
\
|
|
||||||
if (status == EINTR) \
|
if (status == EINTR) \
|
||||||
result = EINTR; \
|
result = EINTR; \
|
||||||
else if (status == ETIMEDOUT) \
|
else if (status == ETIMEDOUT) \
|
||||||
|
@ -42,15 +42,15 @@
|
|||||||
{ \
|
{ \
|
||||||
pthread_mutex_unlock (&__gai_requests_mutex); \
|
pthread_mutex_unlock (&__gai_requests_mutex); \
|
||||||
\
|
\
|
||||||
int oldtype; \
|
|
||||||
if (cancel) \
|
|
||||||
oldtype = LIBC_CANCEL_ASYNC (); \
|
|
||||||
\
|
|
||||||
int status; \
|
int status; \
|
||||||
do \
|
do \
|
||||||
{ \
|
{ \
|
||||||
status = futex_reltimed_wait ((unsigned int *) futexaddr, oldval, \
|
if (cancel) \
|
||||||
timeout, FUTEX_PRIVATE); \
|
status = futex_reltimed_wait_cancelable ( \
|
||||||
|
(unsigned int *) futexaddr, oldval, timeout, FUTEX_PRIVATE); \
|
||||||
|
else \
|
||||||
|
status = futex_reltimed_wait ((unsigned int *) futexaddr, \
|
||||||
|
oldval, timeout, FUTEX_PRIVATE); \
|
||||||
if (status != EAGAIN) \
|
if (status != EAGAIN) \
|
||||||
break; \
|
break; \
|
||||||
\
|
\
|
||||||
@ -58,9 +58,6 @@
|
|||||||
} \
|
} \
|
||||||
while (oldval != 0); \
|
while (oldval != 0); \
|
||||||
\
|
\
|
||||||
if (cancel) \
|
|
||||||
LIBC_CANCEL_RESET (oldtype); \
|
|
||||||
\
|
|
||||||
if (status == EINTR) \
|
if (status == EINTR) \
|
||||||
result = EINTR; \
|
result = EINTR; \
|
||||||
else if (status == ETIMEDOUT) \
|
else if (status == ETIMEDOUT) \
|
||||||
|
@ -175,33 +175,29 @@ extern int __lll_timedlock_wait (int *futex, const struct timespec *,
|
|||||||
#define LLL_LOCK_INITIALIZER (0)
|
#define LLL_LOCK_INITIALIZER (0)
|
||||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||||
|
|
||||||
|
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||||
|
attribute_hidden;
|
||||||
|
|
||||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||||
wake-up when the clone terminates. The memory location contains the
|
wake-up when the clone terminates. The memory location contains the
|
||||||
thread ID while the clone is running and is reset to zero by the kernel
|
thread ID while the clone is running and is reset to zero by the kernel
|
||||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||||
operations for futex wake-up when the clone terminates. */
|
operations for futex wake-up when the clone terminates.
|
||||||
#define lll_wait_tid(tid) \
|
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||||
do { \
|
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||||
__typeof (tid) __tid; \
|
The futex operation are issues with cancellable versions. */
|
||||||
/* We need acquire MO here so that we synchronize \
|
#define lll_wait_tid(tid, abstime) \
|
||||||
with the kernel's store to 0 when the clone \
|
({ \
|
||||||
terminates. (see above) */ \
|
int __res = 0; \
|
||||||
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
__typeof (tid) __tid; \
|
||||||
lll_futex_wait (&(tid), __tid, LLL_SHARED); \
|
if (abstime != NULL) \
|
||||||
} while (0)
|
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||||
|
else \
|
||||||
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
/* We need acquire MO here so that we synchronize with the \
|
||||||
attribute_hidden;
|
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||||
|
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||||
/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
|
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||||
ETIMEDOUT. If ABSTIME is invalid, return EINVAL. */
|
__res; \
|
||||||
#define lll_timedwait_tid(tid, abstime) \
|
|
||||||
({ \
|
|
||||||
int __res = 0; \
|
|
||||||
if ((tid) != 0) \
|
|
||||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
|
||||||
__res; \
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@ -34,16 +34,8 @@ __libc_open64 (const char *file, int oflag, ...)
|
|||||||
va_end (arg);
|
va_end (arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (SINGLE_THREAD_P)
|
/* __libc_open should be a cancellation point. */
|
||||||
return __libc_open (file, oflag | O_LARGEFILE, mode);
|
return __libc_open (file, oflag | O_LARGEFILE, mode);
|
||||||
|
|
||||||
int oldtype = LIBC_CANCEL_ASYNC ();
|
|
||||||
|
|
||||||
int result = __libc_open (file, oflag | O_LARGEFILE, mode);
|
|
||||||
|
|
||||||
LIBC_CANCEL_RESET (oldtype);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
weak_alias (__libc_open64, __open64)
|
weak_alias (__libc_open64, __open64)
|
||||||
libc_hidden_weak (__open64)
|
libc_hidden_weak (__open64)
|
||||||
|
@ -85,16 +85,8 @@ do_sigwait (const sigset_t *set, int *sig)
|
|||||||
int
|
int
|
||||||
__sigwait (const sigset_t *set, int *sig)
|
__sigwait (const sigset_t *set, int *sig)
|
||||||
{
|
{
|
||||||
if (SINGLE_THREAD_P)
|
/* __sigsuspend should be a cancellation point. */
|
||||||
return do_sigwait (set, sig);
|
return do_sigitid (idtype, id, infop, options);
|
||||||
|
|
||||||
int oldtype = LIBC_CANCEL_ASYNC ();
|
|
||||||
|
|
||||||
int result = do_sigwait (set, sig);
|
|
||||||
|
|
||||||
LIBC_CANCEL_RESET (oldtype);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
libc_hidden_def (__sigwait)
|
libc_hidden_def (__sigwait)
|
||||||
weak_alias (__sigwait, sigwait)
|
weak_alias (__sigwait, sigwait)
|
||||||
|
@ -151,16 +151,8 @@ OUR_WAITID (idtype_t idtype, id_t id, siginfo_t *infop, int options)
|
|||||||
int
|
int
|
||||||
__waitid (idtype_t idtype, id_t id, siginfo_t *infop, int options)
|
__waitid (idtype_t idtype, id_t id, siginfo_t *infop, int options)
|
||||||
{
|
{
|
||||||
if (SINGLE_THREAD_P)
|
/* __waitpid should be a cancellation point. */
|
||||||
return do_waitid (idtype, id, infop, options);
|
return do_waitid (idtype, id, infop, options);
|
||||||
|
|
||||||
int oldtype = LIBC_CANCEL_ASYNC ();
|
|
||||||
|
|
||||||
int result = do_waitid (idtype, id, infop, options);
|
|
||||||
|
|
||||||
LIBC_CANCEL_RESET (oldtype);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
weak_alias (__waitid, waitid)
|
weak_alias (__waitid, waitid)
|
||||||
strong_alias (__waitid, __libc_waitid)
|
strong_alias (__waitid, __libc_waitid)
|
||||||
|
@ -28,26 +28,16 @@ int
|
|||||||
__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
|
__clock_nanosleep (clockid_t clock_id, int flags, const struct timespec *req,
|
||||||
struct timespec *rem)
|
struct timespec *rem)
|
||||||
{
|
{
|
||||||
INTERNAL_SYSCALL_DECL (err);
|
|
||||||
int r;
|
|
||||||
|
|
||||||
if (clock_id == CLOCK_THREAD_CPUTIME_ID)
|
if (clock_id == CLOCK_THREAD_CPUTIME_ID)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
|
if (clock_id == CLOCK_PROCESS_CPUTIME_ID)
|
||||||
clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
|
clock_id = MAKE_PROCESS_CPUCLOCK (0, CPUCLOCK_SCHED);
|
||||||
|
|
||||||
if (SINGLE_THREAD_P)
|
/* If the call is interrupted by a signal handler or encounters an error,
|
||||||
r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req, rem);
|
it returns a positive value similar to errno. */
|
||||||
else
|
INTERNAL_SYSCALL_DECL (err);
|
||||||
{
|
int r = INTERNAL_SYSCALL_CANCEL (clock_nanosleep, err, clock_id, flags,
|
||||||
int oldstate = LIBC_CANCEL_ASYNC ();
|
req, rem);
|
||||||
|
|
||||||
r = INTERNAL_SYSCALL (clock_nanosleep, err, 4, clock_id, flags, req,
|
|
||||||
rem);
|
|
||||||
|
|
||||||
LIBC_CANCEL_RESET (oldstate);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (INTERNAL_SYSCALL_ERROR_P (r, err)
|
return (INTERNAL_SYSCALL_ERROR_P (r, err)
|
||||||
? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
|
? INTERNAL_SYSCALL_ERRNO (r, err) : 0);
|
||||||
}
|
}
|
||||||
|
@ -138,9 +138,9 @@ futex_reltimed_wait_cancelable (unsigned int *futex_word,
|
|||||||
const struct timespec *reltime, int private)
|
const struct timespec *reltime, int private)
|
||||||
{
|
{
|
||||||
int oldtype;
|
int oldtype;
|
||||||
oldtype = __pthread_enable_asynccancel ();
|
oldtype = LIBC_CANCEL_ASYNC ();
|
||||||
int err = lll_futex_timed_wait (futex_word, expected, reltime, private);
|
int err = lll_futex_timed_wait (futex_word, expected, reltime, private);
|
||||||
__pthread_disable_asynccancel (oldtype);
|
LIBC_CANCEL_RESET (oldtype);
|
||||||
switch (err)
|
switch (err)
|
||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -221,32 +221,30 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
|
|||||||
#define lll_islocked(futex) \
|
#define lll_islocked(futex) \
|
||||||
(futex != LLL_LOCK_INITIALIZER)
|
(futex != LLL_LOCK_INITIALIZER)
|
||||||
|
|
||||||
|
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
|
||||||
|
__attribute__ ((regparm (2))) attribute_hidden;
|
||||||
|
|
||||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||||
wake-up when the clone terminates. The memory location contains the
|
wake-up when the clone terminates. The memory location contains the
|
||||||
thread ID while the clone is running and is reset to zero by the kernel
|
thread ID while the clone is running and is reset to zero by the kernel
|
||||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||||
operations for futex wake-up when the clone terminates. */
|
operations for futex wake-up when the clone terminates.
|
||||||
#define lll_wait_tid(tid) \
|
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||||
do { \
|
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||||
__typeof (tid) __tid; \
|
The futex operation are issues with cancellable versions. */
|
||||||
while ((__tid = (tid)) != 0) \
|
#define lll_wait_tid(tid, abstime) \
|
||||||
lll_futex_wait (&(tid), __tid, LLL_SHARED);\
|
({ \
|
||||||
} while (0)
|
int __res = 0; \
|
||||||
|
__typeof (tid) __tid; \
|
||||||
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
|
if (abstime != NULL) \
|
||||||
__attribute__ ((regparm (2))) attribute_hidden;
|
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||||
|
else \
|
||||||
/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
|
/* We need acquire MO here so that we synchronize with the \
|
||||||
ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
|
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||||
XXX Note that this differs from the generic version in that we do the
|
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||||
error checking here and not in __lll_timedwait_tid. */
|
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||||
#define lll_timedwait_tid(tid, abstime) \
|
__res; \
|
||||||
({ \
|
})
|
||||||
int __result = 0; \
|
|
||||||
if ((tid) != 0) \
|
|
||||||
__result = __lll_timedwait_tid (&(tid), (abstime)); \
|
|
||||||
__result; })
|
|
||||||
|
|
||||||
|
|
||||||
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
|
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
|
||||||
attribute_hidden;
|
attribute_hidden;
|
||||||
|
@ -125,6 +125,17 @@
|
|||||||
private), \
|
private), \
|
||||||
nr_wake, nr_move, mutex, val)
|
nr_wake, nr_move, mutex, val)
|
||||||
|
|
||||||
|
|
||||||
|
/* Cancellable futex macros. */
|
||||||
|
#define lll_futex_wait_cancel(futexp, val, private) \
|
||||||
|
({ \
|
||||||
|
int __oldtype = CANCEL_ASYNC (); \
|
||||||
|
long int __err = lll_futex_wait (futexp, val, LLL_SHARED); \
|
||||||
|
CANCEL_RESET (__oldtype); \
|
||||||
|
__err; \
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
#endif /* !__ASSEMBLER__ */
|
#endif /* !__ASSEMBLER__ */
|
||||||
|
|
||||||
#endif /* lowlevellock-futex.h */
|
#endif /* lowlevellock-futex.h */
|
||||||
|
@ -108,28 +108,29 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
|
|||||||
#define LLL_LOCK_INITIALIZER (0)
|
#define LLL_LOCK_INITIALIZER (0)
|
||||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||||
|
|
||||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
|
||||||
wakeup when the clone terminates. The memory location contains the
|
|
||||||
thread ID while the clone is running and is reset to zero
|
|
||||||
afterwards. */
|
|
||||||
#define lll_wait_tid(tid) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
__typeof (tid) __tid; \
|
|
||||||
while ((__tid = (tid)) != 0) \
|
|
||||||
lll_futex_wait (&(tid), __tid, LLL_SHARED); \
|
|
||||||
} \
|
|
||||||
while (0)
|
|
||||||
|
|
||||||
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||||
attribute_hidden;
|
attribute_hidden;
|
||||||
|
|
||||||
#define lll_timedwait_tid(tid, abstime) \
|
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||||
({ \
|
wake-up when the clone terminates. The memory location contains the
|
||||||
int __res = 0; \
|
thread ID while the clone is running and is reset to zero by the kernel
|
||||||
if ((tid) != 0) \
|
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
operations for futex wake-up when the clone terminates.
|
||||||
__res; \
|
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||||
|
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||||
|
The futex operation are issues with cancellable versions. */
|
||||||
|
#define lll_wait_tid(tid, abstime) \
|
||||||
|
({ \
|
||||||
|
int __res = 0; \
|
||||||
|
__typeof (tid) __tid; \
|
||||||
|
if (abstime != NULL) \
|
||||||
|
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||||
|
else \
|
||||||
|
/* We need acquire MO here so that we synchronize with the \
|
||||||
|
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||||
|
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||||
|
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||||
|
__res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#endif /* lowlevellock.h */
|
#endif /* lowlevellock.h */
|
||||||
|
@ -224,32 +224,30 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
|
|||||||
#define lll_islocked(futex) \
|
#define lll_islocked(futex) \
|
||||||
(futex != LLL_LOCK_INITIALIZER)
|
(futex != LLL_LOCK_INITIALIZER)
|
||||||
|
|
||||||
|
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||||
|
attribute_hidden;
|
||||||
|
|
||||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||||
wake-up when the clone terminates. The memory location contains the
|
wake-up when the clone terminates. The memory location contains the
|
||||||
thread ID while the clone is running and is reset to zero by the kernel
|
thread ID while the clone is running and is reset to zero by the kernel
|
||||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||||
operations for futex wake-up when the clone terminates. */
|
operations for futex wake-up when the clone terminates.
|
||||||
#define lll_wait_tid(tid) \
|
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||||
do { \
|
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||||
__typeof (tid) __tid; \
|
The futex operation are issues with cancellable versions. */
|
||||||
while ((__tid = (tid)) != 0) \
|
#define lll_wait_tid(tid, abstime) \
|
||||||
lll_futex_wait (&(tid), __tid, LLL_SHARED);\
|
({ \
|
||||||
} while (0)
|
int __res = 0; \
|
||||||
|
__typeof (tid) __tid; \
|
||||||
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
if (abstime != NULL) \
|
||||||
attribute_hidden;
|
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||||
|
else \
|
||||||
/* As lll_wait_tid, but with a timeout. If the timeout occurs then return
|
/* We need acquire MO here so that we synchronize with the \
|
||||||
ETIMEDOUT. If ABSTIME is invalid, return EINVAL.
|
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||||
XXX Note that this differs from the generic version in that we do the
|
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||||
error checking here and not in __lll_timedwait_tid. */
|
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||||
#define lll_timedwait_tid(tid, abstime) \
|
__res; \
|
||||||
({ \
|
})
|
||||||
int __result = 0; \
|
|
||||||
if ((tid) != 0) \
|
|
||||||
__result = __lll_timedwait_tid (&(tid), (abstime)); \
|
|
||||||
__result; })
|
|
||||||
|
|
||||||
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
|
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
|
||||||
attribute_hidden;
|
attribute_hidden;
|
||||||
|
Reference in New Issue
Block a user