mirror of
				https://sourceware.org/git/glibc.git
				synced 2025-10-31 22:10:34 +03:00 
			
		
		
		
	lll_robust_unlock on i386 and x86_64 first sets the futex word to FUTEX_WAITERS|0 before calling __lll_unlock_wake, which will set the futex word to 0. If the thread is killed between these steps, then the futex word will be FUTEX_WAITERS|0, and the kernel (at least current upstream) will not set it to FUTEX_OWNER_DIED|FUTEX_WAITERS because 0 is not equal to the TID of the crashed thread. The lll_robust_lock assembly code on i386 and x86_64 is not prepared to deal with this case because the fastpath tries to only CAS 0 to TID and not FUTEX_WAITERS|0 to TID; the slowpath simply waits until it can CAS 0 to TID or the futex_word has the FUTEX_OWNER_DIED bit set. This issue is fixed by removing the custom x86 assembly code and using the generic C code instead. However, instead of adding more duplicate code to the custom x86 lowlevellock.h, the code of the lll_robust* functions is inlined into the single call sites that exist for each of these functions in the pthread_mutex_* functions. The robust mutex paths in the latter have been slightly reorganized to make them simpler. This patch is meant to be easy to backport, so C11-style atomics are not used. [BZ #20985] * nptl/Makefile: Adapt. * nptl/pthread_mutex_cond_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove. (LLL_ROBUST_MUTEX_LOCK_MODIFIER): New. * nptl/pthread_mutex_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove. (LLL_ROBUST_MUTEX_LOCK_MODIFIER): New. (__pthread_mutex_lock_full): Inline lll_robust* functions and adapt. * nptl/pthread_mutex_timedlock.c (pthread_mutex_timedlock): Inline lll_robust* functions and adapt. * nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise. * sysdeps/nptl/lowlevellock.h (__lll_robust_lock_wait, __lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait, __lll_robust_timedlock, __lll_robust_unlock): Remove. * sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_robust_lock, lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_robust_lock, lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove. * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (__lll_robust_lock_wait, __lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait, __lll_robust_timedlock, __lll_robust_unlock): Remove. * nptl/lowlevelrobustlock.c: Remove file. * nptl/lowlevelrobustlock.sym: Likewise. * sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S: Likewise. * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Likewise.
		
			
				
	
	
		
			268 lines
		
	
	
		
			8.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			268 lines
		
	
	
		
			8.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* Copyright (C) 2002-2017 Free Software Foundation, Inc.
 | |
|    This file is part of the GNU C Library.
 | |
|    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 | |
| 
 | |
|    The GNU C Library is free software; you can redistribute it and/or
 | |
|    modify it under the terms of the GNU Lesser General Public
 | |
|    License as published by the Free Software Foundation; either
 | |
|    version 2.1 of the License, or (at your option) any later version.
 | |
| 
 | |
|    The GNU C Library is distributed in the hope that it will be useful,
 | |
|    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | |
|    Lesser General Public License for more details.
 | |
| 
 | |
|    You should have received a copy of the GNU Lesser General Public
 | |
|    License along with the GNU C Library; if not, see
 | |
|    <http://www.gnu.org/licenses/>.  */
 | |
| 
 | |
| #ifndef _LOWLEVELLOCK_H
 | |
| #define _LOWLEVELLOCK_H	1
 | |
| 
 | |
| #include <stap-probe.h>
 | |
| 
 | |
| #ifndef __ASSEMBLER__
 | |
| # include <time.h>
 | |
| # include <sys/param.h>
 | |
| # include <bits/pthreadtypes.h>
 | |
| # include <kernel-features.h>
 | |
| # include <tcb-offsets.h>
 | |
| 
 | |
| # ifndef LOCK_INSTR
 | |
| #  ifdef UP
 | |
| #   define LOCK_INSTR	/* nothing */
 | |
| #  else
 | |
| #   define LOCK_INSTR "lock;"
 | |
| #  endif
 | |
| # endif
 | |
| #else
 | |
| # ifndef LOCK
 | |
| #  ifdef UP
 | |
| #   define LOCK
 | |
| #  else
 | |
| #   define LOCK lock
 | |
| #  endif
 | |
| # endif
 | |
| #endif
 | |
| 
 | |
| #include <lowlevellock-futex.h>
 | |
| 
 | |
| /* XXX Remove when no assembler code uses futexes anymore.  */
 | |
| #define SYS_futex		__NR_futex
 | |
| 
 | |
| #ifndef __ASSEMBLER__
 | |
| 
 | |
| /* Initializer for compatibility lock.  */
 | |
| #define LLL_LOCK_INITIALIZER		(0)
 | |
| #define LLL_LOCK_INITIALIZER_LOCKED	(1)
 | |
| #define LLL_LOCK_INITIALIZER_WAITERS	(2)
 | |
| 
 | |
| 
 | |
| /* NB: in the lll_trylock macro we simply return the value in %eax
 | |
|    after the cmpxchg instruction.  In case the operation succeded this
 | |
|    value is zero.  In case the operation failed, the cmpxchg instruction
 | |
|    has loaded the current value of the memory work which is guaranteed
 | |
|    to be nonzero.  */
 | |
| #if !IS_IN (libc) || defined UP
 | |
| # define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
 | |
| #else
 | |
| # define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
 | |
| 			   "je 0f\n\t"					      \
 | |
| 			   "lock\n"					      \
 | |
| 			   "0:\tcmpxchgl %2, %1"
 | |
| #endif
 | |
| 
 | |
| #define lll_trylock(futex) \
 | |
|   ({ int ret;								      \
 | |
|      __asm __volatile (__lll_trylock_asm				      \
 | |
| 		       : "=a" (ret), "=m" (futex)			      \
 | |
| 		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \
 | |
| 			 "0" (LLL_LOCK_INITIALIZER),			      \
 | |
| 			 "i" (MULTIPLE_THREADS_OFFSET)			      \
 | |
| 		       : "memory");					      \
 | |
|      ret; })
 | |
| 
 | |
| 
 | |
| #define lll_cond_trylock(futex) \
 | |
|   ({ int ret;								      \
 | |
|      __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \
 | |
| 		       : "=a" (ret), "=m" (futex)			      \
 | |
| 		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \
 | |
| 			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \
 | |
| 		       : "memory");					      \
 | |
|      ret; })
 | |
| 
 | |
| #if !IS_IN (libc) || defined UP
 | |
| # define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
 | |
| #else
 | |
| # define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t"			      \
 | |
| 			      "je 0f\n\t"				      \
 | |
| 			      "lock\n"					      \
 | |
| 			      "0:\tcmpxchgl %1, %2\n\t"
 | |
| #endif
 | |
| 
 | |
| #define lll_lock(futex, private) \
 | |
|   (void)								      \
 | |
|     ({ int ignore1, ignore2;						      \
 | |
|        if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
 | |
| 	 __asm __volatile (__lll_lock_asm_start				      \
 | |
| 			   "jz 18f\n\t"				      \
 | |
| 			   "1:\tleal %2, %%ecx\n"			      \
 | |
| 			   "2:\tcall __lll_lock_wait_private\n" 	      \
 | |
| 			   "18:"					      \
 | |
| 			   : "=a" (ignore1), "=c" (ignore2), "=m" (futex)     \
 | |
| 			   : "0" (0), "1" (1), "m" (futex),		      \
 | |
| 			     "i" (MULTIPLE_THREADS_OFFSET)		      \
 | |
| 			   : "memory");					      \
 | |
|        else								      \
 | |
| 	 {								      \
 | |
| 	   int ignore3;							      \
 | |
| 	   __asm __volatile (__lll_lock_asm_start			      \
 | |
| 			     "jz 18f\n\t"			 	      \
 | |
| 			     "1:\tleal %2, %%edx\n"			      \
 | |
| 			     "0:\tmovl %8, %%ecx\n"			      \
 | |
| 			     "2:\tcall __lll_lock_wait\n"		      \
 | |
| 			     "18:"					      \
 | |
| 			     : "=a" (ignore1), "=c" (ignore2),		      \
 | |
| 			       "=m" (futex), "=&d" (ignore3) 		      \
 | |
| 			     : "1" (1), "m" (futex),			      \
 | |
| 			       "i" (MULTIPLE_THREADS_OFFSET), "0" (0),	      \
 | |
| 			       "g" ((int) (private))			      \
 | |
| 			     : "memory");				      \
 | |
| 	 }								      \
 | |
|     })
 | |
| 
 | |
| 
 | |
| /* Special version of lll_lock which causes the unlock function to
 | |
|    always wakeup waiters.  */
 | |
| #define lll_cond_lock(futex, private) \
 | |
|   (void)								      \
 | |
|     ({ int ignore1, ignore2, ignore3;					      \
 | |
|        __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"		      \
 | |
| 			 "jz 18f\n\t"					      \
 | |
| 			 "1:\tleal %2, %%edx\n"				      \
 | |
| 			 "0:\tmovl %7, %%ecx\n"				      \
 | |
| 			 "2:\tcall __lll_lock_wait\n"			      \
 | |
| 			 "18:"						      \
 | |
| 			 : "=a" (ignore1), "=c" (ignore2), "=m" (futex),      \
 | |
| 			   "=&d" (ignore3)				      \
 | |
| 			 : "0" (0), "1" (2), "m" (futex), "g" ((int) (private))\
 | |
| 			 : "memory");					      \
 | |
|     })
 | |
| 
 | |
| 
 | |
| #define lll_timedlock(futex, timeout, private) \
 | |
|   ({ int result, ignore1, ignore2, ignore3;				      \
 | |
|      __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \
 | |
| 		       "jz 18f\n\t"					      \
 | |
| 		       "1:\tleal %3, %%ecx\n"				      \
 | |
| 		       "0:\tmovl %8, %%edx\n"				      \
 | |
| 		       "2:\tcall __lll_timedlock_wait\n"		      \
 | |
| 		       "18:"						      \
 | |
| 		       : "=a" (result), "=c" (ignore1), "=&d" (ignore2),      \
 | |
| 			 "=m" (futex), "=S" (ignore3)			      \
 | |
| 		       : "0" (0), "1" (1), "m" (futex), "m" (timeout),	      \
 | |
| 			 "4" ((int) (private))				      \
 | |
| 		       : "memory");					      \
 | |
|      result; })
 | |
| 
 | |
| extern int __lll_timedlock_elision (int *futex, short *adapt_count,
 | |
| 					 const struct timespec *timeout,
 | |
| 					 int private) attribute_hidden;
 | |
| 
 | |
| #define lll_timedlock_elision(futex, adapt_count, timeout, private)	\
 | |
|   __lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
 | |
| 
 | |
| #if !IS_IN (libc) || defined UP
 | |
| # define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
 | |
| #else
 | |
| # define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t"			      \
 | |
| 			  "je 0f\n\t"					      \
 | |
| 			  "lock\n"					      \
 | |
| 			  "0:\tsubl $1,%0\n\t"
 | |
| #endif
 | |
| 
 | |
| #define lll_unlock(futex, private) \
 | |
|   (void)								      \
 | |
|     ({ int ignore;							      \
 | |
|        if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \
 | |
| 	 __asm __volatile (__lll_unlock_asm				      \
 | |
| 			   "je 18f\n\t"					      \
 | |
| 			   "1:\tleal %0, %%eax\n"			      \
 | |
| 			   "2:\tcall __lll_unlock_wake_private\n"	      \
 | |
| 			   "18:"					      \
 | |
| 			   : "=m" (futex), "=&a" (ignore)		      \
 | |
| 			   : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET)	      \
 | |
| 			   : "memory");					      \
 | |
|        else								      \
 | |
| 	 {								      \
 | |
| 	   int ignore2;							      \
 | |
| 	   __asm __volatile (__lll_unlock_asm				      \
 | |
| 			     "je 18f\n\t"				      \
 | |
| 			     "1:\tleal %0, %%eax\n"			      \
 | |
| 			     "0:\tmovl %5, %%ecx\n"			      \
 | |
| 			     "2:\tcall __lll_unlock_wake\n"		      \
 | |
| 			     "18:"					      \
 | |
| 			     : "=m" (futex), "=&a" (ignore), "=&c" (ignore2)  \
 | |
| 			     : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex),    \
 | |
| 			       "g" ((int) (private))			      \
 | |
| 			     : "memory");				      \
 | |
| 	 }								      \
 | |
|     })
 | |
| 
 | |
| 
 | |
| #define lll_islocked(futex) \
 | |
|   (futex != LLL_LOCK_INITIALIZER)
 | |
| 
 | |
| /* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
 | |
|    wake-up when the clone terminates.  The memory location contains the
 | |
|    thread ID while the clone is running and is reset to zero by the kernel
 | |
|    afterwards.  The kernel up to version 3.16.3 does not use the private futex
 | |
|    operations for futex wake-up when the clone terminates.  */
 | |
| #define lll_wait_tid(tid) \
 | |
|   do {					\
 | |
|     __typeof (tid) __tid;		\
 | |
|     while ((__tid = (tid)) != 0)	\
 | |
|       lll_futex_wait (&(tid), __tid, LLL_SHARED);\
 | |
|   } while (0)
 | |
| 
 | |
| extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
 | |
|      __attribute__ ((regparm (2))) attribute_hidden;
 | |
| 
 | |
| /* As lll_wait_tid, but with a timeout.  If the timeout occurs then return
 | |
|    ETIMEDOUT.  If ABSTIME is invalid, return EINVAL.
 | |
|    XXX Note that this differs from the generic version in that we do the
 | |
|    error checking here and not in __lll_timedwait_tid.  */
 | |
| #define lll_timedwait_tid(tid, abstime) \
 | |
|   ({									      \
 | |
|     int __result = 0;							      \
 | |
|     if ((tid) != 0)							      \
 | |
|       {									      \
 | |
| 	if ((abstime)->tv_nsec < 0 || (abstime)->tv_nsec >= 1000000000)	      \
 | |
| 	  __result = EINVAL;						      \
 | |
| 	else								      \
 | |
| 	  __result = __lll_timedwait_tid (&(tid), (abstime));		      \
 | |
|       }									      \
 | |
|     __result; })
 | |
| 
 | |
| 
 | |
| extern int __lll_lock_elision (int *futex, short *adapt_count, int private)
 | |
|   attribute_hidden;
 | |
| 
 | |
| extern int __lll_unlock_elision(int *lock, int private)
 | |
|   attribute_hidden;
 | |
| 
 | |
| extern int __lll_trylock_elision(int *lock, short *adapt_count)
 | |
|   attribute_hidden;
 | |
| 
 | |
| #define lll_lock_elision(futex, adapt_count, private) \
 | |
|   __lll_lock_elision (&(futex), &(adapt_count), private)
 | |
| #define lll_unlock_elision(futex, adapt_count, private) \
 | |
|   __lll_unlock_elision (&(futex), private)
 | |
| #define lll_trylock_elision(futex, adapt_count) \
 | |
|   __lll_trylock_elision(&(futex), &(adapt_count))
 | |
| 
 | |
| #endif  /* !__ASSEMBLER__ */
 | |
| 
 | |
| #endif	/* lowlevellock.h */
 |