1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-12-06 12:01:08 +03:00

Remove support for lock elision.

The support for lock elision was already deprecated with glibc 2.42:
commit 77438db8cf
"Mark support for lock elision as deprecated."
See also discussions:
https://sourceware.org/pipermail/libc-alpha/2025-July/168492.html

This patch removes the architecture specific support for lock elision
for x86, powerpc and s390 by removing the elision-conf.h, elision-conf.c,
elision-lock.c, elision-timed.c, elision-unlock.c, elide.h, htm.h/hle.h files.
Those generic files are also removed.

The architecture specific structures are adjusted and the elision fields are
marked as unused.  See struct_mutex.h files.
Furthermore in struct_rwlock.h, the leftover __rwelision was also removed.
Those were originally removed with commit 0377a7fde6
"nptl: Remove rwlock elision definitions"
and by chance reintroduced with commit 7df8af43ad
"nptl: Add struct_rwlock.h"

The common code (e.g. the pthread_mutex-files) are changed back to the time
before lock elision was introduced with the x86-support:
- commit 1cdbe57948
"Add the low level infrastructure for pthreads lock elision with TSX"
- commit b023e4ca99
"Add new internal mutex type flags for elision."
- commit 68cc29355f
"Add minimal test suite changes for elision enabled kernels"
- commit e8c659d74e
"Add elision to pthread_mutex_{try,timed,un}lock"
- commit 49186d21ef
"Disable elision for any pthread_mutexattr_settype call"
- commit 1717da59ae
"Add a configure option to enable lock elision and disable by default"

Elision is removed also from the tunables, the initialization part, the
pretty-printers and the manual.

Some extra handling in the testsuite is removed as well as the full tst-mutex10
testcase, which tested a race while enabling lock elision.

I've also searched the code for "elision", "elide", "transaction" and e.g.
cleaned some comments.

I've run the testsuite on x86_64 and s390x and run the build-many-glibcs.py
script.
Thanks to Sachin Monga, this patch is also tested on powerpc.

A NEWS entry also mentions the removal.
Reviewed-by: Wilco Dijkstra  <Wilco.Dijkstra@arm.com>
This commit is contained in:
Stefan Liebler
2025-10-28 15:21:18 +01:00
parent 5029b63280
commit b9579342c6
68 changed files with 92 additions and 2683 deletions

3
NEWS
View File

@@ -42,6 +42,9 @@ Deprecated and removed features, and other changes affecting compatibility:
definition of these functions in ISO C23. Existing binaries that use
the versions returning intmax_t or uintmax_t will continue to work.
* The support for TX lock elision of pthread mutexes has been removed on all
architectures (powerpc, s390x, x86_64).
Changes to build and runtime requirements:
[Add changes to build and runtime requirements here]

View File

@@ -84,39 +84,6 @@ glibc {
}
}
elision {
enable {
type: INT_32
minval: 0
maxval: 1
}
skip_lock_busy {
type: INT_32
default: 3
minval: 0
}
skip_lock_internal_abort {
type: INT_32
default: 3
minval: 0
}
skip_lock_after_retries {
type: INT_32
default: 3
minval: 0
}
tries {
type: INT_32
default: 3
minval: 0
}
skip_trylock_internal_abort {
type: INT_32
default: 3
minval: 0
}
}
rtld {
nns {
type: SIZE_T

View File

@@ -17,7 +17,6 @@
<https://www.gnu.org/licenses/>. */
#include <ctype.h>
#include <elision-conf.h>
#include <libc-early-init.h>
#include <libc-internal.h>
#include <lowlevellock.h>
@@ -47,10 +46,6 @@ __libc_early_init (_Bool initial)
__getrandom_early_init (initial);
#if ENABLE_ELISION_SUPPORT
__lll_elision_init ();
#endif
/* Initialize system malloc (needs __libc_initial to be set). */
call_function_static_weak (__ptmalloc_init);
}

View File

@@ -14,7 +14,6 @@ $1 = {
__nusers = 0,
__kind = 576,
__spins = 0,
__elision = 0,
__list = {
__prev = 0x0,
__next = 0x0

View File

@@ -35,20 +35,16 @@ tunables with minimum and maximum values:
@example
$ /lib64/ld-linux-x86-64.so.2 --list-tunables
glibc.rtld.nns: 0x4 (min: 0x1, max: 0x10)
glibc.elision.skip_lock_after_retries: 3 (min: 0, max: 2147483647)
glibc.malloc.trim_threshold: 0x0 (min: 0x0, max: 0xffffffffffffffff)
glibc.malloc.perturb: 0 (min: 0, max: 255)
glibc.cpu.x86_shared_cache_size: 0x100000 (min: 0x0, max: 0xffffffffffffffff)
glibc.pthread.rseq: 1 (min: 0, max: 1)
glibc.cpu.prefer_map_32bit_exec: 0 (min: 0, max: 1)
glibc.mem.tagging: 0 (min: 0, max: 255)
glibc.elision.tries: 3 (min: 0, max: 2147483647)
glibc.elision.enable: 0 (min: 0, max: 1)
glibc.malloc.hugetlb: 0x0 (min: 0x0, max: 0xffffffffffffffff)
glibc.cpu.x86_rep_movsb_threshold: 0x2000 (min: 0x100, max: 0xffffffffffffffff)
glibc.malloc.mxfast: 0x0 (min: 0x0, max: 0xffffffffffffffff)
glibc.rtld.dynamic_sort: 2 (min: 1, max: 2)
glibc.elision.skip_lock_busy: 3 (min: 0, max: 2147483647)
glibc.malloc.top_pad: 0x20000 (min: 0x0, max: 0xffffffffffffffff)
glibc.cpu.x86_rep_stosb_threshold: 0x800 (min: 0x1, max: 0xffffffffffffffff)
glibc.cpu.x86_non_temporal_threshold: 0xc0000 (min: 0x4040, max: 0xfffffffffffffff)
@@ -56,12 +52,10 @@ glibc.cpu.x86_memset_non_temporal_threshold: 0xc0000 (min: 0x4040, max: 0xffffff
glibc.cpu.x86_shstk:
glibc.pthread.stack_cache_size: 0x2800000 (min: 0x0, max: 0xffffffffffffffff)
glibc.malloc.mmap_max: 0 (min: 0, max: 2147483647)
glibc.elision.skip_trylock_internal_abort: 3 (min: 0, max: 2147483647)
glibc.cpu.plt_rewrite: 0 (min: 0, max: 2)
glibc.malloc.tcache_unsorted_limit: 0x0 (min: 0x0, max: 0xffffffffffffffff)
glibc.cpu.x86_ibt:
glibc.cpu.hwcaps:
glibc.elision.skip_lock_internal_abort: 3 (min: 0, max: 2147483647)
glibc.malloc.arena_max: 0x0 (min: 0x1, max: 0xffffffffffffffff)
glibc.malloc.mmap_threshold: 0x0 (min: 0x0, max: 0xffffffffffffffff)
glibc.cpu.x86_data_cache_size: 0x8000 (min: 0x0, max: 0xffffffffffffffff)
@@ -77,7 +71,6 @@ glibc.malloc.check: 0 (min: 0, max: 3)
* Tunable names:: The structure of a tunable name
* Memory Allocation Tunables:: Tunables in the memory allocation subsystem
* Dynamic Linking Tunables:: Tunables in the dynamic linking subsystem
* Elision Tunables:: Tunables in elision subsystem
* POSIX Thread Tunables:: Tunables in the POSIX thread subsystem
* Hardware Capability Tunables:: Tunables that modify the hardware
capabilities seen by @theglibc{}
@@ -387,74 +380,6 @@ can be worked around by setting the tunable to @code{2}, where the stack is
always executable.
@end deftp
@node Elision Tunables
@section Elision Tunables
@cindex elision tunables
@cindex tunables, elision
@deftp {Tunable namespace} glibc.elision
Contended locks are usually slow and can lead to performance and scalability
issues in multithread code. Lock elision will use memory transactions to under
certain conditions, to elide locks and improve performance.
Elision behavior can be modified by setting the following tunables in
the @code{elision} namespace:
@end deftp
@deftp Tunable glibc.elision.enable
The @code{glibc.elision.enable} tunable enables lock elision if the feature is
supported by the hardware. If elision is not supported by the hardware this
tunable has no effect.
Elision tunables are supported for 64-bit Intel, IBM POWER, and z System
architectures.
@end deftp
@deftp Tunable glibc.elision.skip_lock_busy
The @code{glibc.elision.skip_lock_busy} tunable sets how many times to use a
non-transactional lock after a transactional failure has occurred because the
lock is already acquired. Expressed in number of lock acquisition attempts.
The default value of this tunable is @samp{3}.
@end deftp
@deftp Tunable glibc.elision.skip_lock_internal_abort
The @code{glibc.elision.skip_lock_internal_abort} tunable sets how many times
the thread should avoid using elision if a transaction aborted for any reason
other than a different thread's memory accesses. Expressed in number of lock
acquisition attempts.
The default value of this tunable is @samp{3}.
@end deftp
@deftp Tunable glibc.elision.skip_lock_after_retries
The @code{glibc.elision.skip_lock_after_retries} tunable sets how many times
to try to elide a lock with transactions, that only failed due to a different
thread's memory accesses, before falling back to regular lock.
Expressed in number of lock elision attempts.
This tunable is supported only on IBM POWER, and z System architectures.
The default value of this tunable is @samp{3}.
@end deftp
@deftp Tunable glibc.elision.tries
The @code{glibc.elision.tries} sets how many times to retry elision if there is
chance for the transaction to finish execution e.g., it wasn't
aborted due to the lock being already acquired. If elision is not supported
by the hardware this tunable is set to @samp{0} to avoid retries.
The default value of this tunable is @samp{3}.
@end deftp
@deftp Tunable glibc.elision.skip_trylock_internal_abort
The @code{glibc.elision.skip_trylock_internal_abort} tunable sets how many
times the thread should avoid trying the lock if a transaction aborted due to
reasons other than a different thread's memory accesses. Expressed in number
of try lock attempts.
The default value of this tunable is @samp{3}.
@end deftp
@node POSIX Thread Tunables
@section POSIX Thread Tunables
@cindex pthread mutex tunables

View File

@@ -42,11 +42,6 @@ routines = \
cleanup_defer \
cleanup_defer_compat \
cleanup_routine \
elision-conf \
elision-lock \
elision-timed \
elision-trylock \
elision-unlock \
events \
futex-internal \
libc-cleanup \
@@ -724,8 +719,6 @@ endif
$(objpfx)tst-compat-forwarder: $(objpfx)tst-compat-forwarder-mod.so
tst-mutex10-ENV = GLIBC_TUNABLES=glibc.elision.enable=1
# Protect against a build using -Wl,-z,now.
LDFLAGS-tst-audit-threads-mod1.so = -Wl,-z,lazy
LDFLAGS-tst-audit-threads-mod2.so = -Wl,-z,lazy

View File

@@ -1,17 +0,0 @@
/* elision-conf.c: Lock elision tunable parameters. Stub version.
Copyright (C) 2021-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */

View File

@@ -1 +0,0 @@
/* empty */

View File

@@ -1,17 +0,0 @@
/* elision-lock.c: Lock elision locking. Stub version.
Copyright (C) 2021-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */

View File

@@ -1,17 +0,0 @@
/* elision-lock.c: Lock elision timed locking. Stub version.
Copyright (C) 2021-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */

View File

@@ -1,17 +0,0 @@
/* elision-lock.c: Lock elision locking attempts. Stub version.
Copyright (C) 2021-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */

View File

@@ -1,17 +0,0 @@
/* elision-lock.c: Lock elision unlocking support. Stub version.
Copyright (C) 2021-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */

View File

@@ -64,8 +64,3 @@ __lll_lock_wake (int *futex, int private)
lll_futex_wake (futex, 1, private);
}
libc_hidden_def (__lll_lock_wake)
#if ENABLE_ELISION_SUPPORT
int __pthread_force_elision;
libc_hidden_data_def (__pthread_force_elision)
#endif

View File

@@ -99,12 +99,7 @@ class MutexPrinter(object):
self.values.append(MUTEX_TYPES[int(mutex_type)])
def read_status(self):
"""Read the mutex's status.
Architectures that support lock elision might not record the mutex owner
ID in the __owner field. In that case, the owner will be reported as
"Unknown".
"""
"""Read the mutex's status."""
if self.kind == PTHREAD_MUTEX_DESTROYED:
self.values.append(('Status', 'Destroyed'))
@@ -178,8 +173,6 @@ class MutexPrinter(object):
if self.owner != 0:
self.values.append(('Owner ID', owner))
else:
# Owner isn't recorded, probably because lock elision
# is enabled.
self.values.append(('Owner ID', 'Unknown'))
def read_attributes(self):
@@ -275,8 +268,7 @@ class MutexAttributesPrinter(object):
mutexattr_type = (self.mutexattr
& 0xffffffff
& ~PTHREAD_MUTEXATTR_FLAG_BITS
& ~PTHREAD_MUTEX_NO_ELISION_NP)
& ~PTHREAD_MUTEXATTR_FLAG_BITS)
# mutexattr_type must be casted to int because it's a gdb.Value
self.values.append(MUTEX_TYPES[int(mutexattr_type)])

View File

@@ -37,7 +37,6 @@ PTHREAD_MUTEXATTR_PRIO_CEILING_MASK
PTHREAD_MUTEXATTR_FLAG_ROBUST
PTHREAD_MUTEXATTR_FLAG_PSHARED
PTHREAD_MUTEXATTR_FLAG_BITS
PTHREAD_MUTEX_NO_ELISION_NP
-- Priority protocols
PTHREAD_PRIO_NONE

View File

@@ -4,13 +4,8 @@
lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
#define LLL_MUTEX_LOCK_OPTIMIZED(mutex) LLL_MUTEX_LOCK (mutex)
/* Not actually elided so far. Needed? */
#define LLL_MUTEX_LOCK_ELISION(mutex) \
({ lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)); 0; })
#define LLL_MUTEX_TRYLOCK(mutex) \
lll_cond_trylock ((mutex)->__data.__lock)
#define LLL_MUTEX_TRYLOCK_ELISION(mutex) LLL_MUTEX_TRYLOCK(mutex)
/* We need to assume that there are other threads blocked on the futex.
See __pthread_mutex_lock_full for further details. */

View File

@@ -54,12 +54,6 @@ lll_mutex_lock_optimized (pthread_mutex_t *mutex)
# define LLL_MUTEX_TRYLOCK(mutex) \
lll_trylock ((mutex)->__data.__lock)
# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
# define LLL_MUTEX_LOCK_ELISION(mutex) \
lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
PTHREAD_MUTEX_PSHARED (mutex))
# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
PTHREAD_MUTEX_PSHARED (mutex))
# define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock
# define PTHREAD_MUTEX_VERSIONS 1
#endif
@@ -77,39 +71,25 @@ PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex)
{
/* See concurrency notes regarding mutex type which is loaded from __kind
in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
LIBC_PROBE (mutex_entry, 1, mutex);
if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
| PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
if (__glibc_unlikely (type & ~PTHREAD_MUTEX_KIND_MASK_NP))
return __pthread_mutex_lock_full (mutex);
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
{
FORCE_ELISION (mutex, goto elision);
simple:
/* Normal mutex. */
LLL_MUTEX_LOCK_OPTIMIZED (mutex);
assert (mutex->__data.__owner == 0);
}
#if ENABLE_ELISION_SUPPORT
else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
{
elision: __attribute__((unused))
/* This case can never happen on a system without elision,
as the mutex type initialization functions will not
allow to set the elision flags. */
/* Don't record owner or users for elision case. This is a
tail call. */
return LLL_MUTEX_LOCK_ELISION (mutex);
}
#endif
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
== PTHREAD_MUTEX_RECURSIVE_NP, 1))
else if (__glibc_likely (type == PTHREAD_MUTEX_RECURSIVE_NP))
{
/* Recursive mutex. */
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
@@ -130,8 +110,7 @@ PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex)
assert (mutex->__data.__owner == 0);
mutex->__data.__count = 1;
}
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
== PTHREAD_MUTEX_ADAPTIVE_NP, 1))
else if (__glibc_likely (type == PTHREAD_MUTEX_ADAPTIVE_NP))
{
if (LLL_MUTEX_TRYLOCK (mutex) != 0)
{
@@ -168,16 +147,13 @@ PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex)
}
else
{
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
/* Check whether we already hold the mutex. */
if (__glibc_unlikely (mutex->__data.__owner == id))
return EDEADLK;
goto simple;
}
pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
/* Record the ownership. */
mutex->__data.__owner = id;
#ifndef NO_INCR

View File

@@ -42,11 +42,10 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
/* See concurrency notes regarding mutex type which is loaded from __kind
in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
PTHREAD_MUTEX_TIMED_NP))
{
/* Recursive mutex. */
case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
case PTHREAD_MUTEX_RECURSIVE_NP:
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
@@ -78,26 +77,14 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
if (__glibc_unlikely (mutex->__data.__owner == id))
return EDEADLK;
/* Don't do lock elision on an error checking mutex. */
goto simple;
/* FALLTHROUGH */
case PTHREAD_MUTEX_TIMED_NP:
FORCE_ELISION (mutex, goto elision);
simple:
/* Normal mutex. */
result = __futex_clocklock64 (&mutex->__data.__lock, clockid, abstime,
PTHREAD_MUTEX_PSHARED (mutex));
break;
case PTHREAD_MUTEX_TIMED_ELISION_NP:
elision: __attribute__((unused))
/* Don't record ownership */
return lll_clocklock_elision (mutex->__data.__lock,
mutex->__data.__spins,
clockid, abstime,
PTHREAD_MUTEX_PSHARED (mutex));
case PTHREAD_MUTEX_ADAPTIVE_NP:
if (lll_trylock (mutex->__data.__lock) != 0)
{

View File

@@ -30,11 +30,10 @@ ___pthread_mutex_trylock (pthread_mutex_t *mutex)
/* See concurrency notes regarding mutex type which is loaded from __kind
in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
PTHREAD_MUTEX_TIMED_NP))
{
/* Recursive mutex. */
case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
case PTHREAD_MUTEX_RECURSIVE_NP:
/* Check whether we already hold the mutex. */
if (mutex->__data.__owner == id)
@@ -58,17 +57,7 @@ ___pthread_mutex_trylock (pthread_mutex_t *mutex)
}
break;
case PTHREAD_MUTEX_TIMED_ELISION_NP:
elision: __attribute__((unused))
if (lll_trylock_elision (mutex->__data.__lock,
mutex->__data.__elision) != 0)
break;
/* Don't record the ownership. */
return 0;
case PTHREAD_MUTEX_TIMED_NP:
FORCE_ELISION (mutex, goto elision);
[[fallthrough]];
case PTHREAD_MUTEX_ADAPTIVE_NP:
case PTHREAD_MUTEX_ERRORCHECK_NP:
if (lll_trylock (mutex->__data.__lock) != 0)

View File

@@ -48,10 +48,8 @@ __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
{
/* See concurrency notes regarding mutex type which is loaded from __kind
in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
if (__builtin_expect (type
& ~(PTHREAD_MUTEX_KIND_MASK_NP
|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
int type = PTHREAD_MUTEX_TYPE (mutex);
if (__glibc_unlikely (type & ~PTHREAD_MUTEX_KIND_MASK_NP))
return __pthread_mutex_unlock_full (mutex, decr);
if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
@@ -71,14 +69,7 @@ __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
return 0;
}
else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
{
/* Don't reset the owner/users fields for elision. */
return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
PTHREAD_MUTEX_PSHARED (mutex));
}
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
== PTHREAD_MUTEX_RECURSIVE_NP, 1))
else if (__glibc_likely (type == PTHREAD_MUTEX_RECURSIVE_NP))
{
/* Recursive mutex. */
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
@@ -89,8 +80,7 @@ __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
return 0;
goto normal;
}
else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
== PTHREAD_MUTEX_ADAPTIVE_NP, 1))
else if (__glibc_likely (type == PTHREAD_MUTEX_ADAPTIVE_NP))
goto normal;
else
{

View File

@@ -25,8 +25,7 @@ __pthread_mutexattr_gettype (const pthread_mutexattr_t *attr, int *kind)
iattr = (const struct pthread_mutexattr *) attr;
*kind = (iattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS
& ~PTHREAD_MUTEX_NO_ELISION_NP);
*kind = iattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
return 0;
}

View File

@@ -27,11 +27,6 @@ ___pthread_mutexattr_settype (pthread_mutexattr_t *attr, int kind)
if (kind < PTHREAD_MUTEX_NORMAL || kind > PTHREAD_MUTEX_ADAPTIVE_NP)
return EINVAL;
/* Cannot distinguish between DEFAULT and NORMAL. So any settype
call disables elision for now. */
if (kind == PTHREAD_MUTEX_NORMAL)
kind |= PTHREAD_MUTEX_NO_ELISION_NP;
iattr = (struct pthread_mutexattr *) attr;
iattr->mutexkind = (iattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_BITS) | kind;

View File

@@ -206,13 +206,7 @@
POSIX allows but does not require rwlock acquisitions to be a cancellation
point. We do not support cancellation.
TODO We do not try to elide any read or write lock acquisitions currently.
While this would be possible, it is unclear whether HTM performance is
currently predictable enough and our runtime tuning is good enough at
deciding when to use elision so that enabling it would lead to consistently
better performance. */
point. We do not support cancellation. */
static int

View File

@@ -43,7 +43,6 @@ try:
next_cmd()
thread_id = get_current_thread_lwpid()
# Owner ID might be reported either as the thread ID or as "Unknown"
# (if e.g. lock elision is enabled).
test_printer(var, to_string,
{'Status': 'Acquired, possibly with no waiters',
'Owner ID': r'({0}|Unknown)'.format(thread_id)})

View File

@@ -21,8 +21,6 @@
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <elf/dl-tunables.h>
static pthread_mutex_t *m;
static pthread_barrier_t b;
@@ -95,28 +93,6 @@ check_type (const char *mas, pthread_mutexattr_t *ma)
{
int e;
/* Check if a mutex will be elided. Lock elision can only be activated via
the tunables framework. By default, lock elision is disabled. */
bool assume_elided_mutex = false;
int ma_type = PTHREAD_MUTEX_TIMED_NP;
if (ma != NULL)
{
e = pthread_mutexattr_gettype (ma, &ma_type);
if (e != 0)
{
printf ("pthread_mutexattr_gettype failed with %d (%m)\n", e);
return 1;
}
}
if (ma_type == PTHREAD_MUTEX_TIMED_NP)
{
/* This type of mutex can be elided if elision is enabled via the tunables
framework. Some tests below are failing if the mutex is elided.
Thus we only run those if we assume that the mutex won't be elided. */
if (TUNABLE_GET_FULL (glibc, elision, enable, int32_t, NULL) == 1)
assume_elided_mutex = true;
}
e = pthread_mutex_init (m, ma);
if (e != 0)
{
@@ -149,23 +125,17 @@ check_type (const char *mas, pthread_mutexattr_t *ma)
return 1;
}
/* Elided mutexes don't fail destroy, thus only test this if we don't assume
elision. */
if (assume_elided_mutex == false)
e = pthread_mutex_destroy (m);
if (e == 0)
{
e = pthread_mutex_destroy (m);
if (e == 0)
{
printf ("mutex_destroy of self-locked mutex succeeded for %s\n", mas);
return 1;
}
if (e != EBUSY)
{
printf ("\
mutex_destroy of self-locked mutex did not return EBUSY %s\n",
mas);
return 1;
}
printf ("mutex_destroy of self-locked mutex succeeded for %s\n", mas);
return 1;
}
if (e != EBUSY)
{
printf ("mutex_destroy of self-locked mutex did not return EBUSY %s\n",
mas);
return 1;
}
if (pthread_mutex_unlock (m) != 0)
@@ -180,23 +150,18 @@ mutex_destroy of self-locked mutex did not return EBUSY %s\n",
return 1;
}
/* Elided mutexes don't fail destroy. */
if (assume_elided_mutex == false)
e = pthread_mutex_destroy (m);
if (e == 0)
{
e = pthread_mutex_destroy (m);
if (e == 0)
{
printf ("mutex_destroy of self-trylocked mutex succeeded for %s\n",
mas);
return 1;
}
if (e != EBUSY)
{
printf ("\
mutex_destroy of self-trylocked mutex did not return EBUSY %s\n",
mas);
return 1;
}
printf ("mutex_destroy of self-trylocked mutex succeeded for %s\n",
mas);
return 1;
}
if (e != EBUSY)
{
printf ("mutex_destroy of self-trylocked mutex did not return EBUSY %s\n",
mas);
return 1;
}
if (pthread_mutex_unlock (m) != 0)
@@ -232,22 +197,18 @@ mutex_destroy of self-trylocked mutex did not return EBUSY %s\n",
return 1;
}
/* Elided mutexes don't fail destroy. */
if (assume_elided_mutex == false)
e = pthread_mutex_destroy (m);
if (e == 0)
{
e = pthread_mutex_destroy (m);
if (e == 0)
{
printf ("mutex_destroy of condvar-used mutex succeeded for %s\n",
mas);
return 1;
}
if (e != EBUSY)
{
printf ("\
printf ("mutex_destroy of condvar-used mutex succeeded for %s\n",
mas);
return 1;
}
if (e != EBUSY)
{
printf ("\
mutex_destroy of condvar-used mutex did not return EBUSY for %s\n", mas);
return 1;
}
return 1;
}
done = true;
@@ -307,23 +268,19 @@ mutex_destroy of condvar-used mutex did not return EBUSY for %s\n", mas);
return 1;
}
/* Elided mutexes don't fail destroy. */
if (assume_elided_mutex == false)
e = pthread_mutex_destroy (m);
if (e == 0)
{
e = pthread_mutex_destroy (m);
if (e == 0)
{
printf ("2nd mutex_destroy of condvar-used mutex succeeded for %s\n",
mas);
return 1;
}
if (e != EBUSY)
{
printf ("\
printf ("2nd mutex_destroy of condvar-used mutex succeeded for %s\n",
mas);
return 1;
}
if (e != EBUSY)
{
printf ("\
2nd mutex_destroy of condvar-used mutex did not return EBUSY for %s\n",
mas);
return 1;
}
mas);
return 1;
}
if (pthread_cancel (th) != 0)

View File

@@ -187,9 +187,6 @@ def init_test(test_bin, printer_files, printer_names):
# Finally, load the test binary.
test('file {0}'.format(test_bin))
# Disable lock elision.
test('set environment GLIBC_TUNABLES glibc.elision.enable=0')
def go_to_main():
"""Executes a gdb 'start' command, which takes us to main."""

View File

@@ -1,25 +0,0 @@
/* elide.h: Fallback noop lock elision support.
Copyright (C) 2014-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef ELIDE_H
#define ELIDE_H 1
#define ELIDE_LOCK(adapt_count, is_lock_free) 0
#define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) 0
#define ELIDE_UNLOCK(is_lock_free) 0
#endif

View File

@@ -21,8 +21,8 @@
/* Generic struct for both POSIX and C11 mutexes. New ports are expected
to use the default layout, however architecture can redefine it to
add arch-specific extension (such as lock-elision). The struct have
a size of 32 bytes on LP32 and 40 bytes on LP64 architectures. */
add arch-specific extension. The struct have a size of 32 bytes on LP32
and 40 bytes on LP64 architectures. */
struct __pthread_mutex_s
{
@@ -40,21 +40,7 @@ struct __pthread_mutex_s
PTHREAD_MUTEX_INITIALIZER or by a call to pthread_mutex_init.
After a mutex has been initialized, the __kind of a mutex is usually not
changed. BUT it can be set to -1 in pthread_mutex_destroy or elision can
be enabled. This is done concurrently in the pthread_mutex_*lock
functions by using the macro FORCE_ELISION. This macro is only defined
for architectures which supports lock elision.
For elision, there are the flags PTHREAD_MUTEX_ELISION_NP and
PTHREAD_MUTEX_NO_ELISION_NP which can be set in addition to the already
set type of a mutex. Before a mutex is initialized, only
PTHREAD_MUTEX_NO_ELISION_NP can be set with pthread_mutexattr_settype.
After a mutex has been initialized, the functions pthread_mutex_*lock can
enable elision - if the mutex-type and the machine supports it - by
setting the flag PTHREAD_MUTEX_ELISION_NP. This is done concurrently.
Afterwards the lock / unlock functions are using specific elision
code-paths. */
changed. BUT it can be set to -1 in pthread_mutex_destroy. */
int __kind;
#if __WORDSIZE != 64
unsigned int __nusers;

View File

@@ -23,8 +23,8 @@
/* Generic struct for both POSIX read-write lock. New ports are expected
to use the default layout, however archictetures can redefine it to add
arch-specific extensions (such as lock-elision). The struct have a size
of 32 bytes on both LP32 and LP64 architectures. */
arch-specific extensions. The struct have a size of 32 bytes on both LP32
and LP64 architectures. */
struct __pthread_rwlock_arch_t
{

View File

@@ -20,7 +20,6 @@
#define _LOWLEVELLOCK_H 1
#include <atomic.h>
#include <elision-conf.h>
#include <lowlevellock-futex.h>
#include <time.h>
@@ -171,105 +170,4 @@ libc_hidden_proto (__lll_lock_wake)
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
/* Elision support. */
#if ENABLE_ELISION_SUPPORT
/* Force elision for all new locks. This is used to decide whether
existing DEFAULT locks should be automatically upgraded to elision
in pthread_mutex_lock. Disabled for suid programs. Only used when
elision is available. */
extern int __pthread_force_elision;
libc_hidden_proto (__pthread_force_elision)
extern void __lll_elision_init (void) attribute_hidden;
extern int __lll_clocklock_elision (int *futex, short *adapt_count,
clockid_t clockid,
const struct __timespec64 *timeout,
int private);
libc_hidden_proto (__lll_clocklock_elision)
extern int __lll_lock_elision (int *futex, short *adapt_count, int private);
libc_hidden_proto (__lll_lock_elision)
# if ELISION_UNLOCK_NEEDS_ADAPT_COUNT
extern int __lll_unlock_elision (int *lock, short *adapt_count, int private);
# else
extern int __lll_unlock_elision (int *lock, int private);
# endif
libc_hidden_proto (__lll_unlock_elision)
extern int __lll_trylock_elision (int *lock, short *adapt_count);
libc_hidden_proto (__lll_trylock_elision)
# define lll_clocklock_elision(futex, adapt_count, clockid, timeout, private) \
__lll_clocklock_elision (&(futex), &(adapt_count), clockid, timeout, private)
# define lll_lock_elision(futex, adapt_count, private) \
__lll_lock_elision (&(futex), &(adapt_count), private)
# define lll_trylock_elision(futex, adapt_count) \
__lll_trylock_elision (&(futex), &(adapt_count))
# if ELISION_UNLOCK_NEEDS_ADAPT_COUNT
# define lll_unlock_elision(futex, adapt_count, private) \
__lll_unlock_elision (&(futex), &(adapt_count), private)
# else
# define lll_unlock_elision(futex, adapt_count, private) \
__lll_unlock_elision (&(futex), private)
# endif
/* Automatically enable elision for existing user lock kinds. */
# define FORCE_ELISION(m, s) \
if (__pthread_force_elision) \
{ \
/* See concurrency notes regarding __kind in \
struct __pthread_mutex_s in \
sysdeps/nptl/bits/thread-shared-types.h. \
\
There are the following cases for the kind of a mutex \
(The mask PTHREAD_MUTEX_ELISION_FLAGS_NP covers the flags \
PTHREAD_MUTEX_ELISION_NP and PTHREAD_MUTEX_NO_ELISION_NP where \
only one of both flags can be set): \
- both flags are not set: \
This is the first lock operation for this mutex. Enable \
elision as it is not enabled so far. \
Note: It can happen that multiple threads are calling e.g. \
pthread_mutex_lock at the same time as the first lock \
operation for this mutex. Then elision is enabled for this \
mutex by multiple threads. Storing with relaxed MO is enough \
as all threads will store the same new value for the kind of \
the mutex. But we have to ensure that we always use the \
elision path regardless if this thread has enabled elision or \
another one. \
\
- PTHREAD_MUTEX_ELISION_NP flag is set: \
Elision was already enabled for this mutex by a previous lock \
operation. See case above. Just use the elision path. \
\
- PTHREAD_MUTEX_NO_ELISION_NP flag is set: \
Elision was explicitly disabled by pthread_mutexattr_settype. \
Do not use the elision path. \
Note: The flag PTHREAD_MUTEX_NO_ELISION_NP will never be \
changed after mutex initialization. */ \
int mutex_kind = atomic_load_relaxed (&((m)->__data.__kind)); \
if ((mutex_kind & PTHREAD_MUTEX_ELISION_FLAGS_NP) == 0) \
{ \
mutex_kind |= PTHREAD_MUTEX_ELISION_NP; \
atomic_store_relaxed (&((m)->__data.__kind), mutex_kind); \
} \
if ((mutex_kind & PTHREAD_MUTEX_ELISION_NP) != 0) \
{ \
s; \
} \
}
#else /* !ENABLE_ELISION_SUPPORT */
# define lll_clocklock_elision(futex, adapt_count, clockid, abstime, private) \
__futex_clocklock64 (&(futex), clockid, abstime, private)
# define lll_lock_elision(lock, try_lock, private) \
({ lll_lock (lock, private); 0; })
# define lll_trylock_elision(a,t) lll_trylock(a)
# define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
# define FORCE_ELISION(m, s)
#endif /* !ENABLE_ELISION_SUPPORT */
#endif /* lowlevellock.h */

View File

@@ -59,10 +59,6 @@ static inline short max_adaptive_count (void)
enum
{
PTHREAD_MUTEX_KIND_MASK_NP = 3,
PTHREAD_MUTEX_ELISION_NP = 256,
PTHREAD_MUTEX_NO_ELISION_NP = 512,
PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
@@ -95,14 +91,7 @@ enum
PTHREAD_MUTEX_PP_ERRORCHECK_NP
= PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_PP_ADAPTIVE_NP
= PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
PTHREAD_MUTEX_ELISION_FLAGS_NP
= PTHREAD_MUTEX_ELISION_NP | PTHREAD_MUTEX_NO_ELISION_NP,
PTHREAD_MUTEX_TIMED_ELISION_NP =
PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_ELISION_NP,
PTHREAD_MUTEX_TIMED_NO_ELISION_NP =
PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_NO_ELISION_NP,
= PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP
};
#define PTHREAD_MUTEX_PSHARED_BIT 128
@@ -110,11 +99,6 @@ enum
in sysdeps/nptl/bits/thread-shared-types.h. */
#define PTHREAD_MUTEX_TYPE(m) \
(atomic_load_relaxed (&((m)->__data.__kind)) & 127)
/* Don't include NO_ELISION, as that type is always the same
as the underlying lock type. */
#define PTHREAD_MUTEX_TYPE_ELISION(m) \
(atomic_load_relaxed (&((m)->__data.__kind)) \
& (127 | PTHREAD_MUTEX_ELISION_NP))
#if LLL_PRIVATE == 0 && LLL_SHARED == 128
# define PTHREAD_MUTEX_PSHARED(m) \

View File

@@ -32,7 +32,7 @@ struct __pthread_mutex_s
int __kind;
#if __WORDSIZE == 64
short __spins;
short __elision;
short __unused;
__pthread_list_t __list;
# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
@@ -41,11 +41,10 @@ struct __pthread_mutex_s
{
struct
{
short __espins;
short __elision;
# define __spins __elision_data.__espins
# define __elision __elision_data.__elision
} __elision_data;
short __data_spins;
short __data_unused;
# define __spins __data.__data_spins
} __data;
__pthread_slist_t __list;
};
# define __PTHREAD_MUTEX_HAVE_PREV 0

View File

@@ -31,31 +31,28 @@ struct __pthread_rwlock_arch_t
#if __WORDSIZE == 64
int __cur_writer;
int __shared;
unsigned char __rwelision;
unsigned char __pad1[7];
unsigned long int __pad1;
unsigned long int __pad2;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
unsigned int __flags;
# define __PTHREAD_RWLOCK_ELISION_EXTRA 0, {0, 0, 0, 0, 0, 0, 0 }
#else
unsigned char __rwelision;
unsigned char __pad1;
unsigned char __pad2;
unsigned char __shared;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
unsigned char __flags;
int __cur_writer;
# define __PTHREAD_RWLOCK_ELISION_EXTRA 0
#endif
};
#if __WORDSIZE == 64
# define __PTHREAD_RWLOCK_INITIALIZER(__flags) \
0, 0, 0, 0, 0, 0, 0, 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, __flags
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, __flags
#else
# define __PTHREAD_RWLOCK_INITIALIZER(__flags) \
0, 0, 0, 0, 0, 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, 0, __flags, 0
0, 0, 0, 0, 0, 0, 0, 0, 0, __flags, 0
#endif
#endif

View File

@@ -1,116 +0,0 @@
/* elide.h: Generic lock elision support for powerpc.
Copyright (C) 2015-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef ELIDE_PPC_H
# define ELIDE_PPC_H
# include <htm.h>
# include <elision-conf.h>
/* Get the new value of adapt_count according to the elision
configurations. Returns true if the system should retry again or false
otherwise. */
static inline bool
__get_new_count (uint8_t *adapt_count, int attempt)
{
/* A persistent failure indicates that a retry will probably
result in another failure. Use normal locking now and
for the next couple of calls. */
if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
{
if (__elision_aconf.skip_lock_internal_abort > 0)
*adapt_count = __elision_aconf.skip_lock_internal_abort;
return false;
}
/* Same logic as above, but for a number of temporary failures in a
a row. */
else if (attempt <= 1 && __elision_aconf.skip_lock_out_of_tbegin_retries > 0
&& __elision_aconf.try_tbegin > 0)
*adapt_count = __elision_aconf.skip_lock_out_of_tbegin_retries;
return true;
}
/* CONCURRENCY NOTES:
The evaluation of the macro expression is_lock_free encompasses one or
more loads from memory locations that are concurrently modified by other
threads. For lock elision to work, this evaluation and the rest of the
critical section protected by the lock must be atomic because an
execution with lock elision must be equivalent to an execution in which
the lock would have been actually acquired and released. Therefore, we
evaluate is_lock_free inside of the transaction that represents the
critical section for which we want to use lock elision, which ensures
the atomicity that we require. */
/* Returns 0 if the lock defined by is_lock_free was elided.
ADAPT_COUNT is a per-lock state variable. */
# define ELIDE_LOCK(adapt_count, is_lock_free) \
({ \
int ret = 0; \
if (adapt_count > 0) \
(adapt_count)--; \
else \
for (int i = __elision_aconf.try_tbegin; i > 0; i--) \
{ \
if (__libc_tbegin (0)) \
{ \
if (is_lock_free) \
{ \
ret = 1; \
break; \
} \
__libc_tabort (_ABORT_LOCK_BUSY); \
} \
else \
if (!__get_new_count (&adapt_count,i)) \
break; \
} \
ret; \
})
# define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) \
({ \
int ret = 0; \
if (__elision_aconf.try_tbegin > 0) \
{ \
if (write) \
__libc_tabort (_ABORT_NESTED_TRYLOCK); \
ret = ELIDE_LOCK (adapt_count, is_lock_free); \
} \
ret; \
})
static inline bool
__elide_unlock (int is_lock_free)
{
if (is_lock_free)
{
/* This code is expected to crash when trying to unlock a lock not
held by this thread. More information is available in the
__pthread_rwlock_unlock() implementation. */
__libc_tend (0);
return true;
}
return false;
}
# define ELIDE_UNLOCK(is_lock_free) \
__elide_unlock (is_lock_free)
#endif

View File

@@ -201,7 +201,6 @@ tests += \
tst-mutex6 \
tst-mutex7 \
tst-mutex9 \
tst-mutex10 \
tst-mutex11 \
tst-once1 \
tst-once2 \

View File

@@ -1,29 +0,0 @@
/* elision-conf.h: Lock elision configuration. Stub version.
Copyright (C) 2021-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef _ELISION_CONF_H
#define _ELISION_CONF_H 1
/* No elision support by default. */
#define ENABLE_ELISION_SUPPORT 0
/* Whether __lll_unlock_elision expects a pointer argument to the
adaptive counter. Here, an unused arbitrary value. */
#define ELISION_UNLOCK_NEEDS_ADAPT_COUNT 0
#endif

View File

@@ -1,4 +1,4 @@
/* Check that error checking mutexes are not subject to lock elision.
/* Check that already locked error checking mutexes return EDEADL.
Copyright (C) 2016-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
@@ -37,10 +37,6 @@ do_test (void)
TEST_COMPARE (pthread_mutex_init (&mutex, &mutexattr), 0);
TEST_COMPARE (pthread_mutexattr_destroy (&mutexattr), 0);
/* The call to pthread_mutex_timedlock erroneously enabled lock elision
on the mutex, which then triggered an assertion failure in
pthread_mutex_unlock. It would also defeat the error checking nature
of the mutex. */
TEST_COMPARE (pthread_mutex_timedlock (&mutex, &tms), 0);
TEST_COMPARE (pthread_mutex_timedlock (&mutex, &tms), EDEADLK);

View File

@@ -1,109 +0,0 @@
/* Testing race while enabling lock elision.
Copyright (C) 2018-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <pthread.h>
#include <unistd.h>
#include <getopt.h>
#include <support/support.h>
#include <support/xthread.h>
static pthread_barrier_t barrier;
static pthread_mutex_t mutex;
static long long int iteration_count = 1000000;
static unsigned int thread_count = 3;
static void *
thr_func (void *arg)
{
long long int i;
for (i = 0; i < iteration_count; i++)
{
if ((uintptr_t) arg == 0)
{
xpthread_mutex_destroy (&mutex);
xpthread_mutex_init (&mutex, NULL);
}
xpthread_barrier_wait (&barrier);
/* Test if enabling lock elision works if it is enabled concurrently.
There was a race in FORCE_ELISION macro which leads to either
pthread_mutex_destroy returning EBUSY as the owner was recorded
by pthread_mutex_lock - in "normal mutex" code path - but was not
reset in pthread_mutex_unlock - in "elision" code path.
Or it leads to the assertion in nptl/pthread_mutex_lock.c:
assert (mutex->__data.__owner == 0);
Please ensure that the test is run with lock elision:
export GLIBC_TUNABLES=glibc.elision.enable=1 */
xpthread_mutex_lock (&mutex);
xpthread_mutex_unlock (&mutex);
xpthread_barrier_wait (&barrier);
}
return NULL;
}
static int
do_test (void)
{
unsigned int i;
printf ("Starting %d threads to run %lld iterations.\n",
thread_count, iteration_count);
pthread_t *threads = xmalloc (thread_count * sizeof (pthread_t));
xpthread_barrier_init (&barrier, NULL, thread_count);
xpthread_mutex_init (&mutex, NULL);
for (i = 0; i < thread_count; i++)
threads[i] = xpthread_create (NULL, thr_func, (void *) (uintptr_t) i);
for (i = 0; i < thread_count; i++)
xpthread_join (threads[i]);
xpthread_barrier_destroy (&barrier);
free (threads);
return EXIT_SUCCESS;
}
#define OPT_ITERATIONS 10000
#define OPT_THREADS 10001
#define CMDLINE_OPTIONS \
{ "iterations", required_argument, NULL, OPT_ITERATIONS }, \
{ "threads", required_argument, NULL, OPT_THREADS },
static void
cmdline_process (int c)
{
long long int arg = strtoll (optarg, NULL, 0);
switch (c)
{
case OPT_ITERATIONS:
if (arg > 0)
iteration_count = arg;
break;
case OPT_THREADS:
if (arg > 0 && arg < 100)
thread_count = arg;
break;
}
}
#define CMDLINE_PROCESS cmdline_process
#define TIMEOUT 50
#include <support/test-driver.c>

View File

@@ -22,7 +22,6 @@
#include <unistd.h>
#include <sys/time.h>
#include <stdint.h>
#include <config.h>
#include <support/check.h>
#include <support/timespec.h>
#include <support/xthread.h>

View File

@@ -1,44 +1,6 @@
# This file is generated from configure.ac by Autoconf. DO NOT EDIT!
# Local configure fragment for sysdeps/s390.
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for __builtin_tbegin" >&5
printf %s "checking for __builtin_tbegin... " >&6; }
if test ${libc_cv_gcc_builtin_tbegin+y}
then :
printf %s "(cached) " >&6
else case e in #(
e) cat > conftest.c <<\EOF
#include <htmintrin.h>
void testtransaction ()
{
if (__builtin_tbegin (0) == _HTM_TBEGIN_STARTED)
{
__builtin_tend ();
}
}
EOF
if { ac_try='${CC-cc} -mhtm -O2 -S conftest.c -o - | grep -w tbegin > /dev/null'
{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
(eval $ac_try) 2>&5
ac_status=$?
printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
test $ac_status = 0; }; } ;
then
libc_cv_gcc_builtin_tbegin=yes
else
libc_cv_gcc_builtin_tbegin=no
fi
rm -f conftest* ;;
esac
fi
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $libc_cv_gcc_builtin_tbegin" >&5
printf "%s\n" "$libc_cv_gcc_builtin_tbegin" >&6; }
if test "$libc_cv_gcc_builtin_tbegin" = no ; then
critic_missing="$critic_missing The used GCC has no support for __builtin_tbegin, which is needed for lock-elision on target S390."
fi
{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for S390 vector instruction support" >&5
printf %s "checking for S390 vector instruction support... " >&6; }

View File

@@ -1,32 +1,6 @@
GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
# Local configure fragment for sysdeps/s390.
AC_CACHE_CHECK(for __builtin_tbegin, libc_cv_gcc_builtin_tbegin, [dnl
cat > conftest.c <<\EOF
#include <htmintrin.h>
void testtransaction ()
{
if (__builtin_tbegin (0) == _HTM_TBEGIN_STARTED)
{
__builtin_tend ();
}
}
EOF
dnl
dnl test, if the tbegin instruction is used by __builtin_tbegin
if AC_TRY_COMMAND([${CC-cc} -mhtm -O2 -S conftest.c -o - | grep -w tbegin > /dev/null]) ;
then
libc_cv_gcc_builtin_tbegin=yes
else
libc_cv_gcc_builtin_tbegin=no
fi
rm -f conftest* ])
if test "$libc_cv_gcc_builtin_tbegin" = no ; then
critic_missing="$critic_missing The used GCC has no support for __builtin_tbegin, which is needed for lock-elision on target S390."
fi
AC_CACHE_CHECK([for S390 vector instruction support], libc_cv_asm_s390_vx, [
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
void testvecinsn ()

View File

@@ -32,7 +32,7 @@ struct __pthread_mutex_s
int __kind;
#if __WORDSIZE == 64
short __spins;
short __elision;
short __unused;
__pthread_list_t __list;
# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
@@ -41,11 +41,10 @@ struct __pthread_mutex_s
{
struct
{
short __espins;
short __elision;
} _d;
# define __spins _d.__espins
# define __elision _d.__elision
short __data_spins;
short __data_unused;
} __data;
# define __spins __data.__data_spins
__pthread_slist_t __list;
};
# define __PTHREAD_MUTEX_HAVE_PREV 0

View File

@@ -1,138 +0,0 @@
/* elision-conf.c: Lock elision tunable parameters.
Copyright (C) 2015-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include "config.h"
#include <pthreadP.h>
#include <elision-conf.h>
#include <unistd.h>
#include <ldsodefs.h>
#define TUNABLE_NAMESPACE elision
#include <elf/dl-tunables.h>
/* Reasonable initial tuning values, may be revised in the future.
This is a conservative initial value. */
struct elision_config __elision_aconf =
{
/* How many times to use a non-transactional lock after a transactional
failure has occurred because the lock is already acquired. Expressed
in number of lock acquisition attempts. */
.skip_lock_busy = 3,
/* How often to not attempt to use elision if a transaction aborted due
to reasons other than other threads' memory accesses. Expressed in
number of lock acquisition attempts. */
.skip_lock_internal_abort = 3,
/* How often to not attempt to use elision if a lock used up all retries
without success. Expressed in number of lock acquisition attempts. */
.skip_lock_out_of_tbegin_retries = 3,
/* How often we retry using elision if there is chance for the transaction
to finish execution (e.g., it wasn't aborted due to the lock being
already acquired. */
.try_tbegin = 3,
/* Same as SKIP_LOCK_INTERNAL_ABORT but for trylock. */
.skip_trylock_internal_abort = 3,
};
static inline void
__always_inline
do_set_elision_enable (int32_t elision_enable)
{
/* Enable elision if it's available in hardware. It's not necessary to check
if __libc_enable_secure isn't enabled since elision_enable will be set
according to the default, which is disabled. */
if (elision_enable == 1)
__pthread_force_elision = (GLRO (dl_hwcap2)
& PPC_FEATURE2_HAS_HTM) ? 1 : 0;
}
/* The pthread->elision_enable tunable is 0 or 1 indicating that elision
should be disabled or enabled respectively. The feature will only be used
if it's supported by the hardware. */
void
TUNABLE_CALLBACK (set_elision_enable) (tunable_val_t *valp)
{
int32_t elision_enable = (int32_t) valp->numval;
do_set_elision_enable (elision_enable);
}
#define TUNABLE_CALLBACK_FNDECL(__name, __type) \
static inline void \
__always_inline \
do_set_elision_ ## __name (__type value) \
{ \
__elision_aconf.__name = value; \
} \
void \
TUNABLE_CALLBACK (set_elision_ ## __name) (tunable_val_t *valp) \
{ \
__type value = (__type) (valp)->numval; \
do_set_elision_ ## __name (value); \
}
TUNABLE_CALLBACK_FNDECL (skip_lock_busy, int32_t);
TUNABLE_CALLBACK_FNDECL (skip_lock_internal_abort, int32_t);
TUNABLE_CALLBACK_FNDECL (skip_lock_out_of_tbegin_retries, int32_t);
TUNABLE_CALLBACK_FNDECL (try_tbegin, int32_t);
TUNABLE_CALLBACK_FNDECL (skip_trylock_internal_abort, int32_t);
/* Initialize elision. */
void
__lll_elision_init (void)
{
/* Elision depends on tunables and must be explicitly turned on by setting
the appropriate tunable on a supported platform. */
TUNABLE_GET (enable, int32_t,
TUNABLE_CALLBACK (set_elision_enable));
TUNABLE_GET (skip_lock_busy, int32_t,
TUNABLE_CALLBACK (set_elision_skip_lock_busy));
TUNABLE_GET (skip_lock_internal_abort, int32_t,
TUNABLE_CALLBACK (set_elision_skip_lock_internal_abort));
TUNABLE_GET (skip_lock_after_retries, int32_t,
TUNABLE_CALLBACK (set_elision_skip_lock_out_of_tbegin_retries));
TUNABLE_GET (tries, int32_t,
TUNABLE_CALLBACK (set_elision_try_tbegin));
TUNABLE_GET (skip_trylock_internal_abort, int32_t,
TUNABLE_CALLBACK (set_elision_skip_trylock_internal_abort));
/* Linux from 3.9 through 4.2 do not abort HTM transaction on syscalls,
instead it suspends the transaction and resumes it when returning to
usercode. The side-effects of the syscall will always remain visible,
even if the transaction is aborted. This is an issue when a transaction
is used along with futex syscall, on pthread_cond_wait for instance,
where futex might succeed but the transaction is rolled back leading
the condition variable object in an inconsistent state.
Glibc used to prevent it by always aborting a transaction before issuing
a syscall. Linux 4.2 also decided to abort active transaction in
syscalls which makes the glibc workaround superflours. Worse, glibc
transaction abortions leads to a performance issues on recent kernels.
So Lock Elision is just enabled when it has been explicitly set (either
by tunables of by a configure switch) and if kernel aborts HTM
transactions on syscalls (PPC_FEATURE2_HTM_NOSC) */
__pthread_force_elision = (__pthread_force_elision
&& GLRO (dl_hwcap2) & PPC_FEATURE2_HTM_NOSC);
if (!__pthread_force_elision)
__elision_aconf.try_tbegin = 0; /* Disable elision on rwlocks. */
}

View File

@@ -1,40 +0,0 @@
/* elision-conf.h: Lock elision tunable parameters.
Copyright (C) 2015-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef _ELISION_CONF_H
#define _ELISION_CONF_H 1
#include <pthread.h>
#include <time.h>
#define ENABLE_ELISION_SUPPORT 1
#define ELISION_UNLOCK_NEEDS_ADAPT_COUNT 1
/* Should make sure there is no false sharing on this. */
struct elision_config
{
int skip_lock_busy;
int skip_lock_internal_abort;
int skip_lock_out_of_tbegin_retries;
int try_tbegin;
int skip_trylock_internal_abort;
} __attribute__ ((__aligned__ (128)));
extern struct elision_config __elision_aconf attribute_hidden;
#endif

View File

@@ -1,81 +0,0 @@
/* elision-lock.c: Elided pthread mutex lock.
Copyright (C) 2015-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <stdio.h>
#include <pthread.h>
#include <pthreadP.h>
#include <lowlevellock.h>
#include <elision-conf.h>
#include "htm.h"
#ifndef EXTRAARG
# define EXTRAARG
#endif
#ifndef LLL_LOCK
# define LLL_LOCK(a,b) lll_lock(a,b), 0
#endif
#define aconf __elision_aconf
/* Adaptive lock using transactions.
By default the lock region is run as a transaction, and when it
aborts or the lock is busy the lock adapts itself. */
int
__lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
{
/* adapt_count is accessed concurrently but is just a hint. Thus,
use atomic accesses but relaxed MO is sufficient. */
if (atomic_load_relaxed (adapt_count) > 0)
{
goto use_lock;
}
for (int i = aconf.try_tbegin; i > 0; i--)
{
if (__libc_tbegin (0))
{
if (*lock == 0)
return 0;
/* Lock was busy. Fall back to normal locking. */
__libc_tabort (_ABORT_LOCK_BUSY);
}
else
{
/* A persistent failure indicates that a retry will probably
result in another failure. Use normal locking now and
for the next couple of calls. */
if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
{
if (aconf.skip_lock_internal_abort > 0)
atomic_store_relaxed (adapt_count,
aconf.skip_lock_internal_abort);
goto use_lock;
}
}
}
/* Fall back to locks for a bit if retries have been exhausted */
if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
atomic_store_relaxed (adapt_count,
aconf.skip_lock_out_of_tbegin_retries);
use_lock:
return LLL_LOCK ((*lock), pshared);
}
libc_hidden_def (__lll_lock_elision)

View File

@@ -1,29 +0,0 @@
/* elision-timed.c: Lock elision timed lock.
Copyright (C) 2015-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <time.h>
#include <elision-conf.h>
#include "lowlevellock.h"
#include "futex-internal.h"
#define __lll_lock_elision __lll_clocklock_elision
#define EXTRAARG clockid_t clockid, const struct __timespec64 *t,
#undef LLL_LOCK
#define LLL_LOCK(a, b) __futex_clocklock64 (&(a), clockid, t, b)
#include "elision-lock.c"

View File

@@ -1,70 +0,0 @@
/* elision-trylock.c: Lock eliding trylock for pthreads.
Copyright (C) 2015-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <pthread.h>
#include <pthreadP.h>
#include <lowlevellock.h>
#include <elision-conf.h>
#include "htm.h"
#define aconf __elision_aconf
/* Try to elide a futex trylock. FUTEX is the futex variable. ADAPT_COUNT is
the adaptation counter in the mutex. */
int
__lll_trylock_elision (int *futex, short *adapt_count)
{
/* Implement POSIX semantics by forbiding nesting elided trylocks. */
__libc_tabort (_ABORT_NESTED_TRYLOCK);
/* Only try a transaction if it's worth it. */
if (atomic_load_relaxed (adapt_count) > 0)
{
goto use_lock;
}
if (__libc_tbegin (0))
{
if (*futex == 0)
return 0;
/* Lock was busy. This is never a nested transaction.
End it, and set the adapt count. */
__libc_tend (0);
if (aconf.skip_lock_busy > 0)
atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
}
else
{
if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
{
/* A persistent failure indicates that a retry will probably
result in another failure. Use normal locking now and
for the next couple of calls. */
if (aconf.skip_trylock_internal_abort > 0)
atomic_store_relaxed (adapt_count,
aconf.skip_trylock_internal_abort);
}
}
use_lock:
return lll_trylock (*futex);
}
libc_hidden_def (__lll_trylock_elision)

View File

@@ -1,44 +0,0 @@
/* elision-unlock.c: Commit an elided pthread lock.
Copyright (C) 2015-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include "pthreadP.h"
#include "lowlevellock.h"
#include "htm.h"
int
__lll_unlock_elision (int *lock, short *adapt_count, int pshared)
{
/* When the lock was free we're in a transaction. */
if (*lock == 0)
__libc_tend (0);
else
{
/* Update adapt_count in the critical section to prevent a
write-after-destroy error as mentioned in BZ 20822. The
following update of adapt_count has to be contained within
the critical region of the fall-back lock in order to not violate
the mutex destruction requirements. */
short __tmp = atomic_load_relaxed (adapt_count);
if (__tmp > 0)
atomic_store_relaxed (adapt_count, __tmp - 1);
lll_unlock ((*lock), pshared);
}
return 0;
}
libc_hidden_def (__lll_unlock_elision)

View File

@@ -1,171 +0,0 @@
/* Shared HTM header. Emulate transactional execution facility intrinsics for
compilers and assemblers that do not support the intrinsics and instructions
yet.
Copyright (C) 2015-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef _HTM_H
#define _HTM_H 1
#ifdef __ASSEMBLER__
/* tbegin. */
.macro TBEGIN
.long 0x7c00051d
.endm
/* tend. 0 */
.macro TEND
.long 0x7c00055d
.endm
/* tabort. code */
.macro TABORT code
.byte 0x7c
.byte \code
.byte 0x07
.byte 0x1d
.endm
/*"TEXASR - Transaction EXception And Summary Register"
mfspr %dst,130 */
.macro TEXASR dst
mfspr \dst,130
.endm
#else
#include <bits/endian.h>
/* Official HTM intrinsics interface matching GCC, but works
on older GCC compatible compilers and binutils.
We should somehow detect if the compiler supports it, because
it may be able to generate slightly better code. */
#define TBEGIN ".long 0x7c00051d"
#define TEND ".long 0x7c00055d"
#if __BYTE_ORDER == __LITTLE_ENDIAN
# define TABORT ".byte 0x1d,0x07,%1,0x7c"
#else
# define TABORT ".byte 0x7c,%1,0x07,0x1d"
#endif
#define __force_inline inline __attribute__((__always_inline__))
#ifndef __HTM__
#define _TEXASRU_EXTRACT_BITS(TEXASR,BITNUM,SIZE) \
(((TEXASR) >> (31-(BITNUM))) & ((1<<(SIZE))-1))
#define _TEXASRU_FAILURE_PERSISTENT(TEXASRU) \
_TEXASRU_EXTRACT_BITS(TEXASRU, 7, 1)
#define _tbegin() \
({ unsigned int __ret; \
asm volatile ( \
TBEGIN "\t\n" \
"mfcr %0\t\n" \
"rlwinm %0,%0,3,1\t\n" \
"xori %0,%0,1\t\n" \
: "=r" (__ret) : \
: "cr0", "memory"); \
__ret; \
})
#define _tend() \
({ unsigned int __ret; \
asm volatile ( \
TEND "\t\n" \
"mfcr %0\t\n" \
"rlwinm %0,%0,3,1\t\n" \
"xori %0,%0,1\t\n" \
: "=r" (__ret) : \
: "cr0", "memory"); \
__ret; \
})
#define _tabort(__code) \
({ unsigned int __ret; \
asm volatile ( \
TABORT "\t\n" \
"mfcr %0\t\n" \
"rlwinm %0,%0,3,1\t\n" \
"xori %0,%0,1\t\n" \
: "=r" (__ret) : "r" (__code) \
: "cr0", "memory"); \
__ret; \
})
#define _texasru() \
({ unsigned long __ret; \
asm volatile ( \
"mfspr %0,131\t\n" \
: "=r" (__ret)); \
__ret; \
})
#define __libc_tbegin(tdb) _tbegin ()
#define __libc_tend(nested) _tend ()
#define __libc_tabort(abortcode) _tabort (abortcode)
#define __builtin_get_texasru() _texasru ()
#else
# include <htmintrin.h>
# ifdef __TM_FENCE__
/* New GCC behavior. */
# define __libc_tbegin(R) __builtin_tbegin (R)
# define __libc_tend(R) __builtin_tend (R)
# define __libc_tabort(R) __builtin_tabort (R)
# else
/* Workaround an old GCC behavior. Earlier releases of GCC 4.9 and 5.0,
didn't use to treat __builtin_tbegin, __builtin_tend and
__builtin_tabort as compiler barriers, moving instructions into and
out the transaction.
Remove this when glibc drops support for GCC 5.0. */
# define __libc_tbegin(R) \
({ __asm__ volatile("" ::: "memory"); \
unsigned int __ret = __builtin_tbegin (R); \
__asm__ volatile("" ::: "memory"); \
__ret; \
})
# define __libc_tabort(R) \
({ __asm__ volatile("" ::: "memory"); \
unsigned int __ret = __builtin_tabort (R); \
__asm__ volatile("" ::: "memory"); \
__ret; \
})
# define __libc_tend(R) \
({ __asm__ volatile("" ::: "memory"); \
unsigned int __ret = __builtin_tend (R); \
__asm__ volatile("" ::: "memory"); \
__ret; \
})
# endif /* __TM_FENCE__ */
#endif /* __HTM__ */
#endif /* __ASSEMBLER__ */
/* Definitions used for TEXASR Failure code (bits 0:7). If the failure
should be persistent, the abort code must be odd. 0xd0 through 0xff
are reserved for the kernel and potential hypervisor. */
#define _ABORT_PERSISTENT 0x01 /* An unspecified persistent abort. */
#define _ABORT_LOCK_BUSY 0x34 /* Busy lock, not persistent. */
#define _ABORT_NESTED_TRYLOCK (0x32 | _ABORT_PERSISTENT)
#define _ABORT_SYSCALL (0x30 | _ABORT_PERSISTENT)
#endif

View File

@@ -11,14 +11,6 @@ ifeq ($(subdir),stdlib)
gen-as-const-headers += ucontext_i.sym
endif
ifeq ($(subdir),nptl)
elision-CFLAGS = -mhtm -msoft-float
CFLAGS-elision-lock.c = $(elision-CFLAGS)
CFLAGS-elision-timed.c = $(elision-CFLAGS)
CFLAGS-elision-trylock.c = $(elision-CFLAGS)
CFLAGS-elision-unlock.c = $(elision-CFLAGS)
endif
ifeq ($(subdir),misc)
tests += tst-ptrace-singleblock
endif

View File

@@ -1,118 +0,0 @@
/* Lock elision tunable parameters.
Copyright (C) 2014-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <config.h>
#include <pthreadP.h>
#include <elision-conf.h>
#include <unistd.h>
#include <ldsodefs.h>
#include <sys/auxv.h>
#define TUNABLE_NAMESPACE elision
#include <elf/dl-tunables.h>
/* Reasonable initial tuning values, may be revised in the future.
This is a conservative initial value. */
struct elision_config __elision_aconf =
{
/* How often to not attempt to use elision if a transaction aborted
because the lock is already acquired. Expressed in number of lock
acquisition attempts. */
.skip_lock_busy = 3,
/* How often to not attempt to use elision if a transaction aborted due
to reasons other than other threads' memory accesses. Expressed in
number of lock acquisition attempts. */
.skip_lock_internal_abort = 3,
/* How often to not attempt to use elision if a lock used up all retries
without success. Expressed in number of lock acquisition attempts. */
.skip_lock_out_of_tbegin_retries = 3,
/* How often we try using elision if there is chance for the transaction
to finish execution (e.g., it wasn't aborted due to the lock being
already acquired. */
.try_tbegin = 3,
/* Same as SKIP_LOCK_INTERNAL_ABORT but for trylock. */
.skip_trylock_internal_abort = 3,
};
static inline void
__always_inline
do_set_elision_enable (int32_t elision_enable)
{
/* Enable elision if it's available in hardware. It's not necessary to check
if __libc_enable_secure isn't enabled since elision_enable will be set
according to the default, which is disabled. */
if (elision_enable == 1)
__pthread_force_elision = (GLRO (dl_hwcap) & HWCAP_S390_TE) ? 1 : 0;
}
/* The pthread->elision_enable tunable is 0 or 1 indicating that elision
should be disabled or enabled respectively. The feature will only be used
if it's supported by the hardware. */
void
TUNABLE_CALLBACK (set_elision_enable) (tunable_val_t *valp)
{
int32_t elision_enable = (int32_t) valp->numval;
do_set_elision_enable (elision_enable);
}
#define TUNABLE_CALLBACK_FNDECL(__name, __type) \
static inline void \
__always_inline \
do_set_elision_ ## __name (__type value) \
{ \
__elision_aconf.__name = value; \
} \
void \
TUNABLE_CALLBACK (set_elision_ ## __name) (tunable_val_t *valp) \
{ \
__type value = (__type) (valp)->numval; \
do_set_elision_ ## __name (value); \
}
TUNABLE_CALLBACK_FNDECL (skip_lock_busy, int32_t);
TUNABLE_CALLBACK_FNDECL (skip_lock_internal_abort, int32_t);
TUNABLE_CALLBACK_FNDECL (skip_lock_out_of_tbegin_retries, int32_t);
TUNABLE_CALLBACK_FNDECL (try_tbegin, int32_t);
TUNABLE_CALLBACK_FNDECL (skip_trylock_internal_abort, int32_t);
/* Initialize elison. */
void
__lll_elision_init (void)
{
/* Elision depends on tunables and must be explicitly turned on by setting
the appropriate tunable on a supported platform. */
TUNABLE_GET (enable, int32_t,
TUNABLE_CALLBACK (set_elision_enable));
TUNABLE_GET (skip_lock_busy, int32_t,
TUNABLE_CALLBACK (set_elision_skip_lock_busy));
TUNABLE_GET (skip_lock_internal_abort, int32_t,
TUNABLE_CALLBACK (set_elision_skip_lock_internal_abort));
TUNABLE_GET (skip_lock_after_retries, int32_t,
TUNABLE_CALLBACK (set_elision_skip_lock_out_of_tbegin_retries));
TUNABLE_GET (tries, int32_t,
TUNABLE_CALLBACK (set_elision_try_tbegin));
TUNABLE_GET (skip_trylock_internal_abort, int32_t,
TUNABLE_CALLBACK (set_elision_skip_trylock_internal_abort));
if (!__pthread_force_elision)
__elision_aconf.try_tbegin = 0; /* Disable elision on rwlocks. */
}

View File

@@ -1,40 +0,0 @@
/* Lock elision tunable parameters.
Copyright (C) 2014-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef _ELISION_CONF_H
#define _ELISION_CONF_H 1
#include <pthread.h>
#include <time.h>
#define ENABLE_ELISION_SUPPORT 1
#define ELISION_UNLOCK_NEEDS_ADAPT_COUNT 1
/* Should make sure there is no false sharing on this. */
struct elision_config
{
int skip_lock_busy;
int skip_lock_internal_abort;
int skip_lock_out_of_tbegin_retries;
int try_tbegin;
int skip_trylock_internal_abort;
};
extern struct elision_config __elision_aconf attribute_hidden;
#endif

View File

@@ -1,120 +0,0 @@
/* Elided pthread mutex lock.
Copyright (C) 2014-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <pthread.h>
#include <pthreadP.h>
#include <lowlevellock.h>
#include <htm.h>
#include <elision-conf.h>
#include <stdint.h>
#ifndef EXTRAARG
#define EXTRAARG
#endif
#ifndef LLL_LOCK
#define LLL_LOCK(a,b) lll_lock(a,b), 0
#endif
#define aconf __elision_aconf
/* Adaptive lock using transactions.
By default the lock region is run as a transaction, and when it
aborts or the lock is busy the lock adapts itself. */
int
__lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
{
/* adapt_count can be accessed concurrently; these accesses can be both
inside of transactions (if critical sections are nested and the outer
critical section uses lock elision) and outside of transactions. Thus,
we need to use atomic accesses to avoid data races. However, the
value of adapt_count is just a hint, so relaxed MO accesses are
sufficient. */
if (atomic_load_relaxed (adapt_count) <= 0 && aconf.try_tbegin > 0)
{
/* Start a transaction and retry it automatically if it aborts with
_HTM_TBEGIN_TRANSIENT. This macro calls tbegin at most retry_cnt
+ 1 times. The second argument is considered as retry_cnt. */
int status = __libc_tbegin_retry ((void *) 0, aconf.try_tbegin - 1);
if (__glibc_likely (status == _HTM_TBEGIN_STARTED))
{
/* Check the futex to make sure nobody has touched it in the
mean time. This forces the futex into the cache and makes
sure the transaction aborts if another thread acquires the lock
concurrently. */
if (__glibc_likely (atomic_load_relaxed (futex) == 0))
/* Lock was free. Return to user code in a transaction. */
return 0;
/* Lock was busy. Fall back to normal locking.
This can be the case if e.g. adapt_count was decremented to zero
by a former release and another thread has been waken up and
acquired it. */
if (__glibc_likely (__libc_tx_nesting_depth () <= 1))
{
/* In a non-nested transaction there is no need to abort,
which is expensive. Simply end the started transaction. */
__libc_tend ();
/* Don't try to use transactions for the next couple of times.
See above for why relaxed MO is sufficient. */
if (aconf.skip_lock_busy > 0)
atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
}
else /* nesting depth is > 1 */
{
/* A nested transaction will abort eventually because it
cannot make any progress before *futex changes back to 0.
So we may as well abort immediately.
This persistently aborts the outer transaction to force
the outer mutex use the default lock instead of retrying
with transactions until the try_tbegin of the outer mutex
is zero.
The adapt_count of this inner mutex is not changed,
because using the default lock with the inner mutex
would abort the outer transaction. */
__libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
__builtin_unreachable ();
}
}
else if (status != _HTM_TBEGIN_TRANSIENT)
{
/* A persistent abort (cc 1 or 3) indicates that a retry is
probably futile. Use the normal locking now and for the
next couple of calls.
Be careful to avoid writing to the lock. See above for why
relaxed MO is sufficient. */
if (aconf.skip_lock_internal_abort > 0)
atomic_store_relaxed (adapt_count,
aconf.skip_lock_internal_abort);
}
else
{
/* The transaction failed for some retries with
_HTM_TBEGIN_TRANSIENT. Use the normal locking now and for the
next couple of calls. */
if (aconf.skip_lock_out_of_tbegin_retries > 0)
atomic_store_relaxed (adapt_count,
aconf.skip_lock_out_of_tbegin_retries);
}
}
/* Use normal locking as fallback path if the transaction does not
succeed. */
return LLL_LOCK ((*futex), private);
}
libc_hidden_def (__lll_lock_elision)

View File

@@ -1,27 +0,0 @@
/* Lock elision timed lock.
Copyright (C) 2014-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <time.h>
#include <elision-conf.h>
#include <lowlevellock.h>
#include "futex-internal.h"
#define __lll_lock_elision __lll_clocklock_elision
#define EXTRAARG clockid_t clockid, const struct __timespec64 *t,
#undef LLL_LOCK
#define LLL_LOCK(a, b) __futex_clocklock64 (&(a), clockid, t, b)
#include "elision-lock.c"

View File

@@ -1,98 +0,0 @@
/* Elided pthread mutex trylock.
Copyright (C) 2014-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <pthread.h>
#include <pthreadP.h>
#include <lowlevellock.h>
#include <htm.h>
#include <elision-conf.h>
#define aconf __elision_aconf
/* Try to elide a futex trylock. FUTEX is the futex variable. ADAPT_COUNT is
the adaptation counter in the mutex. */
int
__lll_trylock_elision (int *futex, short *adapt_count)
{
/* Implement POSIX semantics by forbiding nesting elided trylocks.
Sorry. After the abort the code is re-executed
non transactional and if the lock was already locked
return an error. */
if (__libc_tx_nesting_depth () > 0)
{
/* Note that this abort may terminate an outermost transaction that
was created outside glibc.
This persistently aborts the current transactions to force
them to use the default lock instead of retrying transactions
until their try_tbegin is zero.
*/
__libc_tabort (_HTM_FIRST_USER_ABORT_CODE | 1);
__builtin_unreachable ();
}
/* adapt_count can be accessed concurrently; these accesses can be both
inside of transactions (if critical sections are nested and the outer
critical section uses lock elision) and outside of transactions. Thus,
we need to use atomic accesses to avoid data races. However, the
value of adapt_count is just a hint, so relaxed MO accesses are
sufficient. */
if (atomic_load_relaxed (adapt_count) <= 0 && aconf.try_tbegin > 0)
{
int status = __libc_tbegin ((void *) 0);
if (__glibc_likely (status == _HTM_TBEGIN_STARTED))
{
/* Check the futex to make sure nobody has touched it in the
mean time. This forces the futex into the cache and makes
sure the transaction aborts if another thread acquires the lock
concurrently. */
if (__glibc_likely (atomic_load_relaxed (futex) == 0))
/* Lock was free. Return to user code in a transaction. */
return 0;
/* Lock was busy. Fall back to normal locking.
This can be the case if e.g. adapt_count was decremented to zero
by a former release and another thread has been waken up and
acquired it.
Since we are in a non-nested transaction there is no need to abort,
which is expensive. Simply end the started transaction. */
__libc_tend ();
/* Note: Changing the adapt_count here might abort a transaction on a
different CPU, but that could happen anyway when the futex is
acquired, so there's no need to check the nesting depth here.
See above for why relaxed MO is sufficient. */
if (aconf.skip_lock_busy > 0)
atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
}
else if (status != _HTM_TBEGIN_TRANSIENT)
{
/* A persistent abort (cc 1 or 3) indicates that a retry is
probably futile. Use the normal locking now and for the
next couple of calls.
Be careful to avoid writing to the lock. */
if (aconf.skip_trylock_internal_abort > 0)
*adapt_count = aconf.skip_trylock_internal_abort;
}
/* Could do some retries here. */
}
/* Use normal locking as fallback path if the transaction does not
succeed. */
return lll_trylock (*futex);
}
libc_hidden_def (__lll_trylock_elision)

View File

@@ -1,62 +0,0 @@
/* Commit an elided pthread lock.
Copyright (C) 2014-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <pthreadP.h>
#include <lowlevellock.h>
#include <htm.h>
int
__lll_unlock_elision(int *futex, short *adapt_count, int private)
{
/* If the lock is free, we elided the lock earlier. This does not
necessarily mean that we are in a transaction, because the user code may
have closed the transaction, but that is impossible to detect reliably.
Relaxed MO access to futex is sufficient because a correct program
will only release a lock it has acquired; therefore, it must either
changed the futex word's value to something !=0 or it must have used
elision; these are actions by the same thread, so these actions are
sequenced-before the relaxed load (and thus also happens-before the
relaxed load). Therefore, relaxed MO is sufficient. */
if (atomic_load_relaxed (futex) == 0)
{
__libc_tend ();
}
else
{
/* Update the adapt_count while unlocking before completing the critical
section. adapt_count is accessed concurrently outside of a
transaction or a critical section (e.g. in elision-lock.c). So we need
to use atomic accesses. However, the value of adapt_count is just a
hint, so relaxed MO accesses are sufficient.
If adapt_count would be decremented while locking, multiple
CPUs, trying to lock the acquired mutex, will decrement adapt_count to
zero and another CPU will try to start a transaction, which will be
immediately aborted as the mutex is locked.
The update of adapt_count is done before releasing the lock as POSIX'
mutex destruction requirements disallow accesses to the mutex after it
has been released and thus could have been acquired or destroyed by
another thread. */
short adapt_count_val = atomic_load_relaxed (adapt_count);
if (adapt_count_val > 0)
atomic_store_relaxed (adapt_count, adapt_count_val - 1);
lll_unlock ((*futex), private);
}
return 0;
}
libc_hidden_def (__lll_unlock_elision)

View File

@@ -1,187 +0,0 @@
/* Shared HTM header. Work around false transactional execution facility
intrinsics.
Copyright (C) 2016-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef _HTM_H
#define _HTM_H 1
#include <htmintrin.h>
#ifdef __s390x__
# define TX_FPRS_BYTES 64
# define TX_SAVE_FPRS \
" std %%f8, 0(%[R_FPRS])\n\t" \
" std %%f9, 8(%[R_FPRS])\n\t" \
" std %%f10, 16(%[R_FPRS])\n\t" \
" std %%f11, 24(%[R_FPRS])\n\t" \
" std %%f12, 32(%[R_FPRS])\n\t" \
" std %%f13, 40(%[R_FPRS])\n\t" \
" std %%f14, 48(%[R_FPRS])\n\t" \
" std %%f15, 56(%[R_FPRS])\n\t"
# define TX_RESTORE_FPRS \
" ld %%f8, 0(%[R_FPRS])\n\t" \
" ld %%f9, 8(%[R_FPRS])\n\t" \
" ld %%f10, 16(%[R_FPRS])\n\t" \
" ld %%f11, 24(%[R_FPRS])\n\t" \
" ld %%f12, 32(%[R_FPRS])\n\t" \
" ld %%f13, 40(%[R_FPRS])\n\t" \
" ld %%f14, 48(%[R_FPRS])\n\t" \
" ld %%f15, 56(%[R_FPRS])\n\t"
#else
# define TX_FPRS_BYTES 16
# define TX_SAVE_FPRS \
" std %%f4, 0(%[R_FPRS])\n\t" \
" std %%f6, 8(%[R_FPRS])\n\t"
# define TX_RESTORE_FPRS \
" ld %%f4, 0(%[R_FPRS])\n\t" \
" ld %%f6, 8(%[R_FPRS])\n\t"
#endif /* ! __s390x__ */
/* Use own inline assembly instead of __builtin_tbegin, as tbegin
has to filter program interruptions which can't be done with the builtin.
Now the fprs have to be saved / restored here, too.
The fpc is also not saved / restored with the builtin.
The used inline assembly does not clobber the volatile fprs / vrs!
Clobbering the latter ones would force the compiler to save / restore
the call saved fprs as those overlap with the vrs, but they only need to be
restored if the transaction fails but not if the transaction is successfully
started. Thus the user of the tbegin macros in this header file has to
compile the file / function with -msoft-float. It prevents gcc from using
fprs / vrs. */
#define __libc_tbegin(tdb) __libc_tbegin_base(tdb,,,)
#define __libc_tbegin_retry_output_regs , [R_TX_CNT] "+&d" (__tx_cnt)
#define __libc_tbegin_retry_input_regs(retry_cnt) , [R_RETRY] "d" (retry_cnt)
#define __libc_tbegin_retry_abort_path_insn \
/* If tbegin returned _HTM_TBEGIN_TRANSIENT, retry immediately so \
that max tbegin_cnt transactions are tried. Otherwise return and \
let the caller of this macro do the fallback path. */ \
" jnh 1f\n\t" /* cc 1/3: jump to fallback path. */ \
/* tbegin returned _HTM_TBEGIN_TRANSIENT: retry with transaction. */ \
" crje %[R_TX_CNT], %[R_RETRY], 1f\n\t" /* Reached max retries? */ \
" ahi %[R_TX_CNT], 1\n\t" \
" ppa %[R_TX_CNT], 0, 1\n\t" /* Transaction-Abort Assist. */ \
" j 2b\n\t" /* Loop to tbegin. */
/* Same as __libc_tbegin except if tbegin aborts with _HTM_TBEGIN_TRANSIENT.
Then this macros restores the fpc, fprs and automatically retries up to
retry_cnt tbegins. Further saving of the state is omitted as it is already
saved. This macro calls tbegin at most as retry_cnt + 1 times. */
#define __libc_tbegin_retry(tdb, retry_cnt) \
({ int __ret; \
int __tx_cnt = 0; \
__ret = __libc_tbegin_base(tdb, \
__libc_tbegin_retry_abort_path_insn, \
__libc_tbegin_retry_output_regs, \
__libc_tbegin_retry_input_regs(retry_cnt)); \
__ret; \
})
#define __libc_tbegin_base(tdb, abort_path_insn, output_regs, input_regs) \
({ int __ret; \
int __fpc; \
char __fprs[TX_FPRS_BYTES]; \
__asm__ __volatile__ (".machine push\n\t" \
".machinemode \"zarch_nohighgprs\"\n\t" \
".machine \"all\"\n\t" \
/* Save state at the outermost transaction. \
As extracting nesting depth is expensive \
on at least zEC12, save fprs at inner \
transactions, too. \
The fpc and fprs are saved here as they \
are not saved by tbegin. There exist no \
call-saved vrs, thus they are not saved \
here. */ \
" efpc %[R_FPC]\n\t" \
TX_SAVE_FPRS \
/* Begin transaction: save all gprs, allow \
ar modification and fp operations. Some \
program-interruptions (e.g. a null \
pointer access) are filtered and the \
transaction will abort. In this case \
the normal lock path will execute it \
again and result in a core dump which does \
now show at tbegin but the real executed \
instruction. \
However it is not guaranteed that this \
retry operate on the same data and thus \
may not end in an program-interruption. \
Note: This could also be used to probe \
memory for being accessible! */ \
"2: tbegin 0, 0xFF0E\n\t" \
/* Branch away in abort case (this is the \
preferred sequence. See PoP in chapter 5 \
Transactional-Execution Facility \
Operation). */ \
" jnz 0f\n\t" \
/* Transaction has successfully started. */ \
" lhi %[R_RET], 0\n\t" \
" j 1f\n\t" \
/* Transaction has aborted. Now we are at \
the outermost transaction. Restore fprs \
and fpc. */ \
"0: ipm %[R_RET]\n\t" \
" srl %[R_RET], 28\n\t" \
" sfpc %[R_FPC]\n\t" \
TX_RESTORE_FPRS \
abort_path_insn \
"1:\n\t" \
".machine pop\n" \
: [R_RET] "=&d" (__ret), \
[R_FPC] "=&d" (__fpc) \
output_regs \
: [R_FPRS] "a" (__fprs) \
input_regs \
: "cc", "memory"); \
__ret; \
})
/* These builtins are usable in context of glibc lock elision code without any
changes. Use them. */
#define __libc_tend() \
({ __asm__ __volatile__ (".machine push\n\t" \
".machinemode \"zarch_nohighgprs\"\n\t" \
".machine \"all\"\n\t"); \
int __ret = __builtin_tend (); \
__asm__ __volatile__ (".machine pop"); \
__ret; \
})
#define __libc_tabort(abortcode) \
__asm__ __volatile__ (".machine push\n\t" \
".machinemode \"zarch_nohighgprs\"\n\t" \
".machine \"all\"\n\t"); \
__builtin_tabort (abortcode); \
__asm__ __volatile__ (".machine pop")
#define __libc_tx_nesting_depth() \
({ __asm__ __volatile__ (".machine push\n\t" \
".machinemode \"zarch_nohighgprs\"\n\t" \
".machine \"all\"\n\t"); \
int __ret = __builtin_tx_nesting_depth (); \
__asm__ __volatile__ (".machine pop"); \
__ret; \
})
#endif

View File

@@ -11,13 +11,6 @@ ifeq ($(subdir),misc)
sysdep_headers += sys/elf.h sys/perm.h sys/reg.h sys/vm86.h sys/debugreg.h sys/io.h
endif
ifeq ($(subdir),nptl)
CFLAGS-elision-lock.c += -mrtm
CFLAGS-elision-unlock.c += -mrtm
CFLAGS-elision-timed.c += -mrtm
CFLAGS-elision-trylock.c += -mrtm
endif
ifeq ($(subdir),setjmp)
tests += tst-saved_mask-1
endif

View File

@@ -1,109 +0,0 @@
/* elision-conf.c: Lock elision tunable parameters.
Copyright (C) 2013-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include "config.h"
#include <pthreadP.h>
#include <init-arch.h>
#include <elision-conf.h>
#include <unistd.h>
#define TUNABLE_NAMESPACE elision
#include <elf/dl-tunables.h>
/* Reasonable initial tuning values, may be revised in the future.
This is a conservative initial value. */
struct elision_config __elision_aconf =
{
/* How often to not attempt to use elision if a transaction aborted
because the lock is already acquired. Expressed in number of lock
acquisition attempts. */
.skip_lock_busy = 3,
/* How often to not attempt to use elision if a transaction aborted due
to reasons other than other threads' memory accesses. Expressed in
number of lock acquisition attempts. */
.skip_lock_internal_abort = 3,
/* How often we retry using elision if there is chance for the transaction
to finish execution (e.g., it wasn't aborted due to the lock being
already acquired. */
.retry_try_xbegin = 3,
/* Same as SKIP_LOCK_INTERNAL_ABORT but for trylock. */
.skip_trylock_internal_abort = 3,
};
static __always_inline void
do_set_elision_enable (int32_t elision_enable)
{
/* Enable elision if it's available in hardware. It's not necessary to check
if __libc_enable_secure isn't enabled since elision_enable will be set
according to the default, which is disabled. */
if (elision_enable == 1)
__pthread_force_elision = CPU_FEATURE_USABLE (RTM) ? 1 : 0;
}
/* The pthread->elision_enable tunable is 0 or 1 indicating that elision
should be disabled or enabled respectively. The feature will only be used
if it's supported by the hardware. */
void
TUNABLE_CALLBACK (set_elision_enable) (tunable_val_t *valp)
{
int32_t elision_enable = (int32_t) valp->numval;
do_set_elision_enable (elision_enable);
}
#define TUNABLE_CALLBACK_FNDECL(__name, __type) \
static __always_inline void \
do_set_elision_ ## __name (__type value) \
{ \
__elision_aconf.__name = value; \
} \
void \
TUNABLE_CALLBACK (set_elision_ ## __name) (tunable_val_t *valp) \
{ \
__type value = (__type) (valp)->numval; \
do_set_elision_ ## __name (value); \
}
TUNABLE_CALLBACK_FNDECL (skip_lock_busy, int32_t);
TUNABLE_CALLBACK_FNDECL (skip_lock_internal_abort, int32_t);
TUNABLE_CALLBACK_FNDECL (retry_try_xbegin, int32_t);
TUNABLE_CALLBACK_FNDECL (skip_trylock_internal_abort, int32_t);
/* Initialize elision. */
void
__lll_elision_init (void)
{
/* Elision depends on tunables and must be explicitly turned on by setting
the appropriate tunable on a supported platform. */
TUNABLE_GET (enable, int32_t,
TUNABLE_CALLBACK (set_elision_enable));
TUNABLE_GET (skip_lock_busy, int32_t,
TUNABLE_CALLBACK (set_elision_skip_lock_busy));
TUNABLE_GET (skip_lock_internal_abort, int32_t,
TUNABLE_CALLBACK (set_elision_skip_lock_internal_abort));
TUNABLE_GET (tries, int32_t,
TUNABLE_CALLBACK (set_elision_retry_try_xbegin));
TUNABLE_GET (skip_trylock_internal_abort, int32_t,
TUNABLE_CALLBACK (set_elision_skip_trylock_internal_abort));
if (!__pthread_force_elision)
__elision_aconf.retry_try_xbegin = 0; /* Disable elision on rwlocks. */
}

View File

@@ -1,39 +0,0 @@
/* elision-conf.h: Lock elision tunable parameters.
Copyright (C) 2013-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef _ELISION_CONF_H
#define _ELISION_CONF_H 1
#include <pthread.h>
#include <time.h>
#define ENABLE_ELISION_SUPPORT 1
#define ELISION_UNLOCK_NEEDS_ADAPT_COUNT 0
/* Should make sure there is no false sharing on this. */
struct elision_config
{
int skip_lock_busy;
int skip_lock_internal_abort;
int retry_try_xbegin;
int skip_trylock_internal_abort;
};
extern struct elision_config __elision_aconf attribute_hidden;
#endif

View File

@@ -1,102 +0,0 @@
/* elision-lock.c: Elided pthread mutex lock.
Copyright (C) 2011-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <pthread.h>
#include "pthreadP.h"
#include "lowlevellock.h"
#include "hle.h"
#include <elision-conf.h>
#ifndef EXTRAARG
#define EXTRAARG
#endif
#ifndef LLL_LOCK
#define LLL_LOCK(a,b) lll_lock(a,b), 0
#endif
#define aconf __elision_aconf
/* Adaptive lock using transactions.
By default the lock region is run as a transaction, and when it
aborts or the lock is busy the lock adapts itself. */
int
__lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private)
{
/* adapt_count can be accessed concurrently; these accesses can be both
inside of transactions (if critical sections are nested and the outer
critical section uses lock elision) and outside of transactions. Thus,
we need to use atomic accesses to avoid data races. However, the
value of adapt_count is just a hint, so relaxed MO accesses are
sufficient. */
if (atomic_load_relaxed (adapt_count) <= 0)
{
unsigned status;
int try_xbegin;
for (try_xbegin = aconf.retry_try_xbegin;
try_xbegin > 0;
try_xbegin--)
{
if ((status = _xbegin()) == _XBEGIN_STARTED)
{
if (*futex == 0)
return 0;
/* Lock was busy. Fall back to normal locking.
Could also _xend here but xabort with 0xff code
is more visible in the profiler. */
_xabort (_ABORT_LOCK_BUSY);
}
if (!(status & _XABORT_RETRY))
{
if ((status & _XABORT_EXPLICIT)
&& _XABORT_CODE (status) == _ABORT_LOCK_BUSY)
{
/* Right now we skip here. Better would be to wait a bit
and retry. This likely needs some spinning. See
above for why relaxed MO is sufficient. */
if (atomic_load_relaxed (adapt_count)
!= aconf.skip_lock_busy)
atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
}
/* Internal abort. There is no chance for retry.
Use the normal locking and next time use lock.
Be careful to avoid writing to the lock. See above for why
relaxed MO is sufficient. */
else if (atomic_load_relaxed (adapt_count)
!= aconf.skip_lock_internal_abort)
atomic_store_relaxed (adapt_count,
aconf.skip_lock_internal_abort);
break;
}
}
}
else
{
/* Use a normal lock until the threshold counter runs out.
Lost updates possible. */
atomic_store_relaxed (adapt_count,
atomic_load_relaxed (adapt_count) - 1);
}
/* Use a normal lock here. */
return LLL_LOCK ((*futex), private);
}
libc_hidden_def (__lll_lock_elision)

View File

@@ -1,27 +0,0 @@
/* elision-timed.c: Lock elision timed lock.
Copyright (C) 2013-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <time.h>
#include <elision-conf.h>
#include "lowlevellock.h"
#include "futex-internal.h"
#define __lll_lock_elision __lll_clocklock_elision
#define EXTRAARG clockid_t clockid, const struct __timespec64 *t,
#undef LLL_LOCK
#define LLL_LOCK(a, b) __futex_clocklock64 (&(a), clockid, t, b)
#include "elision-lock.c"

View File

@@ -1,76 +0,0 @@
/* elision-trylock.c: Lock eliding trylock for pthreads.
Copyright (C) 2013-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <pthread.h>
#include <pthreadP.h>
#include <lowlevellock.h>
#include "hle.h"
#include <elision-conf.h>
#define aconf __elision_aconf
/* Try to elide a futex trylock. FUTEX is the futex variable. ADAPT_COUNT is
the adaptation counter in the mutex. */
int
__lll_trylock_elision (int *futex, short *adapt_count)
{
/* Implement POSIX semantics by forbiding nesting
trylock. Sorry. After the abort the code is re-executed
non transactional and if the lock was already locked
return an error. */
_xabort (_ABORT_NESTED_TRYLOCK);
/* Only try a transaction if it's worth it. See __lll_lock_elision for
why we need atomic accesses. Relaxed MO is sufficient because this is
just a hint. */
if (atomic_load_relaxed (adapt_count) <= 0)
{
unsigned status;
if ((status = _xbegin()) == _XBEGIN_STARTED)
{
if (*futex == 0)
return 0;
/* Lock was busy. Fall back to normal locking.
Could also _xend here but xabort with 0xff code
is more visible in the profiler. */
_xabort (_ABORT_LOCK_BUSY);
}
if (!(status & _XABORT_RETRY))
{
/* Internal abort. No chance for retry. For future
locks don't try speculation for some time. See above for MO. */
if (atomic_load_relaxed (adapt_count)
!= aconf.skip_lock_internal_abort)
atomic_store_relaxed (adapt_count, aconf.skip_lock_internal_abort);
}
/* Could do some retries here. */
}
else
{
/* Lost updates are possible but harmless (see above). */
atomic_store_relaxed (adapt_count,
atomic_load_relaxed (adapt_count) - 1);
}
return lll_trylock (*futex);
}
libc_hidden_def (__lll_trylock_elision)

View File

@@ -1,34 +0,0 @@
/* elision-unlock.c: Commit an elided pthread lock.
Copyright (C) 2013-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include "pthreadP.h"
#include "lowlevellock.h"
#include "hle.h"
int
__lll_unlock_elision(int *lock, int private)
{
/* When the lock was free we're in a transaction.
When you crash here you unlocked a free lock. */
if (*lock == 0)
_xend();
else
lll_unlock ((*lock), private);
return 0;
}
libc_hidden_def (__lll_unlock_elision)

View File

@@ -1,11 +0,0 @@
/* Shared RTM header. */
#ifndef _HLE_H
#define _HLE_H 1
#include <x86intrin.h>
#define _ABORT_LOCK_BUSY 0xff
#define _ABORT_LOCK_IS_LOCKED 0xfe
#define _ABORT_NESTED_TRYLOCK 0xfd
#endif

View File

@@ -1,119 +0,0 @@
/* elide.h: Generic lock elision support.
Copyright (C) 2014-2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#ifndef ELIDE_H
#define ELIDE_H 1
#include <hle.h>
#include <elision-conf.h>
#include <atomic.h>
/* Adapt elision with ADAPT_COUNT and STATUS and decide retries. */
static inline bool
elision_adapt(signed char *adapt_count, unsigned int status)
{
if (status & _XABORT_RETRY)
return false;
if ((status & _XABORT_EXPLICIT)
&& _XABORT_CODE (status) == _ABORT_LOCK_BUSY)
{
/* Right now we skip here. Better would be to wait a bit
and retry. This likely needs some spinning. Be careful
to avoid writing the lock.
Using relaxed MO and separate atomic accesses is sufficient because
adapt_count is just a hint. */
if (atomic_load_relaxed (adapt_count) != __elision_aconf.skip_lock_busy)
atomic_store_relaxed (adapt_count, __elision_aconf.skip_lock_busy);
}
/* Internal abort. There is no chance for retry.
Use the normal locking and next time use lock.
Be careful to avoid writing to the lock. See above for MO. */
else if (atomic_load_relaxed (adapt_count)
!= __elision_aconf.skip_lock_internal_abort)
atomic_store_relaxed (adapt_count,
__elision_aconf.skip_lock_internal_abort);
return true;
}
/* is_lock_free must be executed inside the transaction */
/* Returns true if lock defined by IS_LOCK_FREE was elided.
ADAPT_COUNT is a per-lock state variable; it must be accessed atomically
to avoid data races but is just a hint, so using relaxed MO and separate
atomic loads and stores instead of atomic read-modify-write operations is
sufficient. */
#define ELIDE_LOCK(adapt_count, is_lock_free) \
({ \
int ret = 0; \
\
if (atomic_load_relaxed (&(adapt_count)) <= 0) \
{ \
for (int i = __elision_aconf.retry_try_xbegin; i > 0; i--) \
{ \
unsigned int status; \
if ((status = _xbegin ()) == _XBEGIN_STARTED) \
{ \
if (is_lock_free) \
{ \
ret = 1; \
break; \
} \
_xabort (_ABORT_LOCK_BUSY); \
} \
if (!elision_adapt (&(adapt_count), status)) \
break; \
} \
} \
else \
atomic_store_relaxed (&(adapt_count), \
atomic_load_relaxed (&(adapt_count)) - 1); \
ret; \
})
/* Returns true if lock defined by IS_LOCK_FREE was try-elided.
ADAPT_COUNT is a per-lock state variable. */
#define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) ({ \
int ret = 0; \
if (__elision_aconf.retry_try_xbegin > 0) \
{ \
if (write) \
_xabort (_ABORT_NESTED_TRYLOCK); \
ret = ELIDE_LOCK (adapt_count, is_lock_free); \
} \
ret; \
})
/* Returns true if lock defined by IS_LOCK_FREE was elided. The call
to _xend crashes if the application incorrectly tries to unlock a
lock which has not been locked. */
#define ELIDE_UNLOCK(is_lock_free) \
({ \
int ret = 0; \
if (is_lock_free) \
{ \
_xend (); \
ret = 1; \
} \
ret; \
})
#endif

View File

@@ -32,7 +32,7 @@ struct __pthread_mutex_s
int __kind;
#ifdef __x86_64__
short __spins;
short __elision;
short __unused;
__pthread_list_t __list;
# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
@@ -41,11 +41,10 @@ struct __pthread_mutex_s
{
struct
{
short __espins;
short __eelision;
# define __spins __elision_data.__espins
# define __elision __elision_data.__eelision
} __elision_data;
short __data_spins;
short __data_unused;
# define __spins __data.__data_spins
} __data;
__pthread_slist_t __list;
};
# define __PTHREAD_MUTEX_HAVE_PREV 0

View File

@@ -31,14 +31,7 @@ struct __pthread_rwlock_arch_t
#ifdef __x86_64__
int __cur_writer;
int __shared;
signed char __rwelision;
# ifdef __ILP32__
unsigned char __pad1[3];
# define __PTHREAD_RWLOCK_ELISION_EXTRA 0, { 0, 0, 0 }
# else
unsigned char __pad1[7];
# define __PTHREAD_RWLOCK_ELISION_EXTRA 0, { 0, 0, 0, 0, 0, 0, 0 }
# endif
unsigned long int __pad1;
unsigned long int __pad2;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
@@ -48,7 +41,7 @@ struct __pthread_rwlock_arch_t
binary compatibility. */
unsigned char __flags;
unsigned char __shared;
signed char __rwelision;
unsigned char __pad1;
unsigned char __pad2;
int __cur_writer;
#endif
@@ -56,7 +49,7 @@ struct __pthread_rwlock_arch_t
#ifdef __x86_64__
# define __PTHREAD_RWLOCK_INITIALIZER(__flags) \
0, 0, 0, 0, 0, 0, 0, 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, __flags
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, __flags
#else
# define __PTHREAD_RWLOCK_INITIALIZER(__flags) \
0, 0, 0, 0, 0, 0, __flags, 0, 0, 0, 0