mirror of
https://sourceware.org/git/glibc.git
synced 2025-07-29 11:41:21 +03:00
nptl: Reinstate pthread_timedjoin_np as a cancellation point (BZ#24215)
Patch ce7eb0e903
("nptl: Cleanup cancellation macros") changed the
join sequence for internal common __pthread_timedjoin_ex to use the
new macro lll_wait_tid. The idea was this macro would issue the
cancellable futex operation depending whether the timeout is used or
not. However if a timeout is used, __lll_timedwait_tid is called and
it is not a cancellable entrypoint.
This patch fixes it by simplifying the code in various ways:
- Instead of adding the cancellation handling on __lll_timedwait_tid,
it moves the generic implementation to pthread_join_common.c (called
now timedwait_tid with some fixes to use the correct type for pid).
- The llvm_wait_tid macro is removed, along with its replication on
x86_64, i686, and sparc arch-specific lowlevellock.h.
- sparc32 __lll_timedwait_tid is also removed, since the code is similar
to generic one.
- x86_64 and i386 provides arch-specific __lll_timedwait_tid which is
also removed since they are similar in functionality to generic C code
and there is no indication it is better than compiler generated code.
New tests, tst-join8 and tst-join9, are provided to check if
pthread_timedjoin_np acts as a cancellation point.
Checked on x86_64-linux-gnu, i686-linux-gnu, sparcv9-linux-gnu, and
aarch64-linux-gnu.
[BZ #24215]
* nptl/Makefile (lpthread-routines): Remove lll_timedwait_tid.
(tests): Add tst-join8 tst-join9.
* nptl/lll_timedwait_tid.c: Remove file.
* sysdeps/sparc/sparc32/lll_timedwait_tid.c: Likewise.
* sysdeps/unix/sysv/linux/i386/lll_timedwait_tid.c: Likewise.
* sysdeps/sysv/linux/x86_64/lll_timedwait_tid.c: Likewise.
* nptl/pthread_join_common.c (timedwait_tid): New function.
(__pthread_timedjoin_ex): Act as cancellation entrypoint is block
is set.
* nptl/tst-join5.c (thread_join): New function.
(tf1, tf2, do_test): Use libsupport and add pthread_timedjoin_np
check.
* nptl/tst-join8.c: New file.
* nptl/tst-join9.c: Likewise.
* sysdeps/nptl/lowlevellock-futex.h (lll_futex_wait_cancel,
lll_futex_timed_wait_cancel): Add generic macros.
* sysdeps/nptl/lowlevellock.h (__lll_timedwait_tid, lll_wait_tid):
Remove definitions.
* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
* sysdeps/sparc/sparc32/lowlevellock.c (__lll_timedwait_tid):
Remove function.
* sysdeps/unix/sysv/linux/i386/lowlevellock.S (__lll_timedwait_tid):
Likewise.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Likewise.
* sysdeps/unix/sysv/linux/lowlevellock-futex.h
(lll_futex_timed_wait_cancel): New macro.
This commit is contained in:
@ -1 +0,0 @@
|
||||
/* __lll_timedwait_tid is in lowlevellock.S. */
|
@ -365,70 +365,4 @@ __lll_unlock_wake:
|
||||
ret
|
||||
cfi_endproc
|
||||
.size __lll_unlock_wake,.-__lll_unlock_wake
|
||||
|
||||
.globl __lll_timedwait_tid
|
||||
.type __lll_timedwait_tid,@function
|
||||
.hidden __lll_timedwait_tid
|
||||
.align 16
|
||||
__lll_timedwait_tid:
|
||||
pushl %edi
|
||||
pushl %esi
|
||||
pushl %ebx
|
||||
pushl %ebp
|
||||
|
||||
movl %eax, %ebp
|
||||
movl %edx, %edi
|
||||
subl $8, %esp
|
||||
|
||||
/* Get current time. */
|
||||
2: movl %esp, %ebx
|
||||
xorl %ecx, %ecx
|
||||
movl $__NR_gettimeofday, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
/* Compute relative timeout. */
|
||||
movl 4(%esp), %eax
|
||||
movl $1000, %edx
|
||||
mul %edx /* Milli seconds to nano seconds. */
|
||||
movl (%edi), %ecx
|
||||
movl 4(%edi), %edx
|
||||
subl (%esp), %ecx
|
||||
subl %eax, %edx
|
||||
jns 5f
|
||||
addl $1000000000, %edx
|
||||
subl $1, %ecx
|
||||
5: testl %ecx, %ecx
|
||||
js 6f /* Time is already up. */
|
||||
|
||||
movl %ecx, (%esp) /* Store relative timeout. */
|
||||
movl %edx, 4(%esp)
|
||||
|
||||
movl (%ebp), %edx
|
||||
testl %edx, %edx
|
||||
jz 4f
|
||||
|
||||
movl %esp, %esi
|
||||
/* XXX The kernel so far uses global futex for the wakeup at
|
||||
all times. */
|
||||
xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
|
||||
movl %ebp, %ebx
|
||||
movl $SYS_futex, %eax
|
||||
ENTER_KERNEL
|
||||
|
||||
cmpl $0, (%ebx)
|
||||
jne 1f
|
||||
4: xorl %eax, %eax
|
||||
|
||||
3: addl $8, %esp
|
||||
popl %ebp
|
||||
popl %ebx
|
||||
popl %esi
|
||||
popl %edi
|
||||
ret
|
||||
|
||||
1: cmpl $-ETIMEDOUT, %eax
|
||||
jne 2b
|
||||
6: movl $ETIMEDOUT, %eax
|
||||
jmp 3b
|
||||
.size __lll_timedwait_tid,.-__lll_timedwait_tid
|
||||
#endif
|
||||
|
@ -219,31 +219,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
|
||||
#define lll_islocked(futex) \
|
||||
(futex != LLL_LOCK_INITIALIZER)
|
||||
|
||||
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
|
||||
__attribute__ ((regparm (2))) attribute_hidden;
|
||||
|
||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||
wake-up when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero by the kernel
|
||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||
operations for futex wake-up when the clone terminates.
|
||||
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||
The futex operation are issues with cancellable versions. */
|
||||
#define lll_wait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __res = 0; \
|
||||
__typeof (tid) __tid; \
|
||||
if (abstime != NULL) \
|
||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
else \
|
||||
/* We need acquire MO here so that we synchronize with the \
|
||||
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||
__res; \
|
||||
})
|
||||
|
||||
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
|
||||
attribute_hidden;
|
||||
|
||||
|
@ -135,6 +135,13 @@
|
||||
__err; \
|
||||
})
|
||||
|
||||
#define lll_futex_timed_wait_cancel(futexp, val, timeout, private) \
|
||||
({ \
|
||||
int __oldtype = CANCEL_ASYNC (); \
|
||||
long int __err = lll_futex_timed_wait (futexp, val, timeout, private); \
|
||||
CANCEL_RESET (__oldtype); \
|
||||
__err; \
|
||||
})
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
|
@ -108,29 +108,4 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
|
||||
#define LLL_LOCK_INITIALIZER (0)
|
||||
#define LLL_LOCK_INITIALIZER_LOCKED (1)
|
||||
|
||||
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
attribute_hidden;
|
||||
|
||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||
wake-up when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero by the kernel
|
||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||
operations for futex wake-up when the clone terminates.
|
||||
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||
The futex operation are issues with cancellable versions. */
|
||||
#define lll_wait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __res = 0; \
|
||||
__typeof (tid) __tid; \
|
||||
if (abstime != NULL) \
|
||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
else \
|
||||
/* We need acquire MO here so that we synchronize with the \
|
||||
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||
__res; \
|
||||
})
|
||||
|
||||
#endif /* lowlevellock.h */
|
||||
|
@ -1 +0,0 @@
|
||||
/* __lll_timedwait_tid is in lowlevellock.S. */
|
@ -345,87 +345,4 @@ __lll_unlock_wake:
|
||||
retq
|
||||
cfi_endproc
|
||||
.size __lll_unlock_wake,.-__lll_unlock_wake
|
||||
|
||||
.globl __lll_timedwait_tid
|
||||
.type __lll_timedwait_tid,@function
|
||||
.hidden __lll_timedwait_tid
|
||||
.align 16
|
||||
__lll_timedwait_tid:
|
||||
cfi_startproc
|
||||
pushq %r12
|
||||
cfi_adjust_cfa_offset(8)
|
||||
pushq %r13
|
||||
cfi_adjust_cfa_offset(8)
|
||||
cfi_offset(%r12, -16)
|
||||
cfi_offset(%r13, -24)
|
||||
|
||||
movq %rdi, %r12
|
||||
movq %rsi, %r13
|
||||
|
||||
/* Align stack to 16 bytes when calling __gettimeofday. */
|
||||
subq $24, %rsp
|
||||
cfi_adjust_cfa_offset(24)
|
||||
|
||||
/* Get current time. */
|
||||
2: movq %rsp, %rdi
|
||||
xorl %esi, %esi
|
||||
/* This call works because we directly jump to a system call entry
|
||||
which preserves all the registers. */
|
||||
call JUMPTARGET(__gettimeofday)
|
||||
|
||||
/* Compute relative timeout. */
|
||||
movq 8(%rsp), %rax
|
||||
movl $1000, %edi
|
||||
mul %rdi /* Milli seconds to nano seconds. */
|
||||
movq (%r13), %rdi
|
||||
movq 8(%r13), %rsi
|
||||
subq (%rsp), %rdi
|
||||
subq %rax, %rsi
|
||||
jns 5f
|
||||
addq $1000000000, %rsi
|
||||
decq %rdi
|
||||
5: testq %rdi, %rdi
|
||||
js 6f /* Time is already up. */
|
||||
|
||||
movq %rdi, (%rsp) /* Store relative timeout. */
|
||||
movq %rsi, 8(%rsp)
|
||||
|
||||
movl (%r12), %edx
|
||||
testl %edx, %edx
|
||||
jz 4f
|
||||
|
||||
movq %rsp, %r10
|
||||
/* XXX The kernel so far uses global futex for the wakeup at
|
||||
all times. */
|
||||
#if FUTEX_WAIT == 0
|
||||
xorl %esi, %esi
|
||||
#else
|
||||
movl $FUTEX_WAIT, %esi
|
||||
#endif
|
||||
movq %r12, %rdi
|
||||
movl $SYS_futex, %eax
|
||||
syscall
|
||||
|
||||
cmpl $0, (%rdi)
|
||||
jne 1f
|
||||
4: xorl %eax, %eax
|
||||
|
||||
8: addq $24, %rsp
|
||||
cfi_adjust_cfa_offset(-24)
|
||||
popq %r13
|
||||
cfi_adjust_cfa_offset(-8)
|
||||
cfi_restore(%r13)
|
||||
popq %r12
|
||||
cfi_adjust_cfa_offset(-8)
|
||||
cfi_restore(%r12)
|
||||
retq
|
||||
|
||||
cfi_adjust_cfa_offset(32)
|
||||
1: cmpq $-ETIMEDOUT, %rax
|
||||
jne 2b
|
||||
|
||||
6: movl $ETIMEDOUT, %eax
|
||||
jmp 8b
|
||||
cfi_endproc
|
||||
.size __lll_timedwait_tid,.-__lll_timedwait_tid
|
||||
#endif
|
||||
|
@ -222,31 +222,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
|
||||
#define lll_islocked(futex) \
|
||||
(futex != LLL_LOCK_INITIALIZER)
|
||||
|
||||
extern int __lll_timedwait_tid (int *, const struct timespec *)
|
||||
attribute_hidden;
|
||||
|
||||
/* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
|
||||
wake-up when the clone terminates. The memory location contains the
|
||||
thread ID while the clone is running and is reset to zero by the kernel
|
||||
afterwards. The kernel up to version 3.16.3 does not use the private futex
|
||||
operations for futex wake-up when the clone terminates.
|
||||
If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
|
||||
occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
|
||||
The futex operation are issues with cancellable versions. */
|
||||
#define lll_wait_tid(tid, abstime) \
|
||||
({ \
|
||||
int __res = 0; \
|
||||
__typeof (tid) __tid; \
|
||||
if (abstime != NULL) \
|
||||
__res = __lll_timedwait_tid (&(tid), (abstime)); \
|
||||
else \
|
||||
/* We need acquire MO here so that we synchronize with the \
|
||||
kernel's store to 0 when the clone terminates. (see above) */ \
|
||||
while ((__tid = atomic_load_acquire (&(tid))) != 0) \
|
||||
lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
|
||||
__res; \
|
||||
})
|
||||
|
||||
extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
|
||||
attribute_hidden;
|
||||
|
||||
|
Reference in New Issue
Block a user