mirror of
https://sourceware.org/git/glibc.git
synced 2025-07-29 11:41:21 +03:00
powerpc: Fix write-after-destroy in lock elision [BZ #20822]
The update of *adapt_count after the release of the lock causes a race condition when thread A unlocks, thread B continues and destroys the mutex, and thread A writes to *adapt_count.
This commit is contained in:
@ -45,7 +45,9 @@
|
||||
int
|
||||
__lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
|
||||
{
|
||||
if (*adapt_count > 0)
|
||||
/* adapt_count is accessed concurrently but is just a hint. Thus,
|
||||
use atomic accesses but relaxed MO is sufficient. */
|
||||
if (atomic_load_relaxed (adapt_count) > 0)
|
||||
{
|
||||
goto use_lock;
|
||||
}
|
||||
@ -67,7 +69,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
|
||||
if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
|
||||
{
|
||||
if (aconf.skip_lock_internal_abort > 0)
|
||||
*adapt_count = aconf.skip_lock_internal_abort;
|
||||
atomic_store_relaxed (adapt_count,
|
||||
aconf.skip_lock_internal_abort);
|
||||
goto use_lock;
|
||||
}
|
||||
}
|
||||
@ -75,7 +78,8 @@ __lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
|
||||
|
||||
/* Fall back to locks for a bit if retries have been exhausted */
|
||||
if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
|
||||
*adapt_count = aconf.skip_lock_out_of_tbegin_retries;
|
||||
atomic_store_relaxed (adapt_count,
|
||||
aconf.skip_lock_out_of_tbegin_retries);
|
||||
|
||||
use_lock:
|
||||
return LLL_LOCK ((*lock), pshared);
|
||||
|
@ -34,7 +34,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
|
||||
__libc_tabort (_ABORT_NESTED_TRYLOCK);
|
||||
|
||||
/* Only try a transaction if it's worth it. */
|
||||
if (*adapt_count > 0)
|
||||
if (atomic_load_relaxed (adapt_count) > 0)
|
||||
{
|
||||
goto use_lock;
|
||||
}
|
||||
@ -49,7 +49,7 @@ __lll_trylock_elision (int *futex, short *adapt_count)
|
||||
__libc_tend (0);
|
||||
|
||||
if (aconf.skip_lock_busy > 0)
|
||||
*adapt_count = aconf.skip_lock_busy;
|
||||
atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -59,7 +59,8 @@ __lll_trylock_elision (int *futex, short *adapt_count)
|
||||
result in another failure. Use normal locking now and
|
||||
for the next couple of calls. */
|
||||
if (aconf.skip_trylock_internal_abort > 0)
|
||||
*adapt_count = aconf.skip_trylock_internal_abort;
|
||||
atomic_store_relaxed (adapt_count,
|
||||
aconf.skip_trylock_internal_abort);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,13 +28,16 @@ __lll_unlock_elision (int *lock, short *adapt_count, int pshared)
|
||||
__libc_tend (0);
|
||||
else
|
||||
{
|
||||
lll_unlock ((*lock), pshared);
|
||||
/* Update adapt_count in the critical section to prevent a
|
||||
write-after-destroy error as mentioned in BZ 20822. The
|
||||
following update of adapt_count has to be contained within
|
||||
the critical region of the fall-back lock in order to not violate
|
||||
the mutex destruction requirements. */
|
||||
short __tmp = atomic_load_relaxed (adapt_count);
|
||||
if (__tmp > 0)
|
||||
atomic_store_relaxed (adapt_count, __tmp--);
|
||||
|
||||
/* Update the adapt count AFTER completing the critical section.
|
||||
Doing this here prevents unneeded stalling when entering
|
||||
a critical section. Saving about 8% runtime on P8. */
|
||||
if (*adapt_count > 0)
|
||||
(*adapt_count)--;
|
||||
lll_unlock ((*lock), pshared);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user