1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-28 00:21:52 +03:00

robust mutexes: Fix broken x86 assembly by removing it

lll_robust_unlock on i386 and x86_64 first sets the futex word to
FUTEX_WAITERS|0 before calling __lll_unlock_wake, which will set the
futex word to 0.  If the thread is killed between these steps, then the
futex word will be FUTEX_WAITERS|0, and the kernel (at least current
upstream) will not set it to FUTEX_OWNER_DIED|FUTEX_WAITERS because 0 is
not equal to the TID of the crashed thread.

The lll_robust_lock assembly code on i386 and x86_64 is not prepared to
deal with this case because the fastpath tries to only CAS 0 to TID and
not FUTEX_WAITERS|0 to TID; the slowpath simply waits until it can CAS 0
to TID or the futex_word has the FUTEX_OWNER_DIED bit set.

This issue is fixed by removing the custom x86 assembly code and using
the generic C code instead.  However, instead of adding more duplicate
code to the custom x86 lowlevellock.h, the code of the lll_robust* functions
is inlined into the single call sites that exist for each of these functions
in the pthread_mutex_* functions.  The robust mutex paths in the latter
have been slightly reorganized to make them simpler.

This patch is meant to be easy to backport, so C11-style atomics are not
used.

	[BZ #20985]
	* nptl/Makefile: Adapt.
	* nptl/pthread_mutex_cond_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove.
	(LLL_ROBUST_MUTEX_LOCK_MODIFIER): New.
	* nptl/pthread_mutex_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove.
	(LLL_ROBUST_MUTEX_LOCK_MODIFIER): New.
	(__pthread_mutex_lock_full): Inline lll_robust* functions and adapt.
	* nptl/pthread_mutex_timedlock.c (pthread_mutex_timedlock): Inline
	lll_robust* functions and adapt.
	* nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise.
	* sysdeps/nptl/lowlevellock.h (__lll_robust_lock_wait,
	__lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait,
	__lll_robust_timedlock, __lll_robust_unlock): Remove.
	* sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_robust_lock,
	lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove.
	* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_robust_lock,
	lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove.
	* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (__lll_robust_lock_wait,
	__lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait,
	__lll_robust_timedlock, __lll_robust_unlock): Remove.
	* nptl/lowlevelrobustlock.c: Remove file.
	* nptl/lowlevelrobustlock.sym: Likewise.
	* sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S: Likewise.
	* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Likewise.
This commit is contained in:
Torvald Riegel
2016-12-22 10:20:43 +01:00
parent f32941d80c
commit 65810f0ef0
14 changed files with 185 additions and 977 deletions

View File

@ -46,7 +46,6 @@ __lll_cond_trylock (int *futex)
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
static inline void
__attribute__ ((always_inline))
@ -64,18 +63,6 @@ __lll_lock (int *futex, int private)
}
#define lll_lock(futex, private) __lll_lock (&(futex), private)
static inline int
__attribute__ ((always_inline))
__lll_robust_lock (int *futex, int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
result = __lll_robust_lock_wait (futex, private);
return result;
}
#define lll_robust_lock(futex, id, private) \
__lll_robust_lock (&(futex), id, private)
static inline void
__attribute__ ((always_inline))
__lll_cond_lock (int *futex, int private)
@ -87,14 +74,9 @@ __lll_cond_lock (int *futex, int private)
}
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
#define lll_robust_cond_lock(futex, id, private) \
__lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
extern int __lll_timedlock_wait (int *futex, const struct timespec *,
int private) attribute_hidden;
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
int private) attribute_hidden;
static inline int
__attribute__ ((always_inline))
@ -110,19 +92,6 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
#define lll_timedlock(futex, abstime, private) \
__lll_timedlock (&(futex), abstime, private)
static inline int
__attribute__ ((always_inline))
__lll_robust_timedlock (int *futex, const struct timespec *abstime,
int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
result = __lll_robust_timedlock_wait (futex, abstime, private);
return result;
}
#define lll_robust_timedlock(futex, abstime, id, private) \
__lll_robust_timedlock (&(futex), abstime, id, private)
#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
@ -132,15 +101,6 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime,
lll_futex_wake (__futex, 1, __private); \
}))
#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __private = (private); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__glibc_unlikely (__val & FUTEX_WAITERS)) \
lll_futex_wake (__futex, 1, __private); \
}))
#define lll_islocked(futex) \
(futex != 0)