1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-28 00:21:52 +03:00

New pthread rwlock that is more scalable.

This replaces the pthread rwlock with a new implementation that uses a
more scalable algorithm (primarily through not using a critical section
anymore to make state changes).  The fast path for rdlock acquisition and
release is now basically a single atomic read-modify write or CAS and a few
branches.  See nptl/pthread_rwlock_common.c for details.

	* nptl/DESIGN-rwlock.txt: Remove.
	* nptl/lowlevelrwlock.sym: Remove.
	* nptl/Makefile: Add new tests.
	* nptl/pthread_rwlock_common.c: New file.  Contains the new rwlock.
	* nptl/pthreadP.h (PTHREAD_RWLOCK_PREFER_READER_P): Remove.
	(PTHREAD_RWLOCK_WRPHASE, PTHREAD_RWLOCK_WRLOCKED,
	PTHREAD_RWLOCK_RWAITING, PTHREAD_RWLOCK_READER_SHIFT,
	PTHREAD_RWLOCK_READER_OVERFLOW, PTHREAD_RWLOCK_WRHANDOVER,
	PTHREAD_RWLOCK_FUTEX_USED): New.
	* nptl/pthread_rwlock_init.c (__pthread_rwlock_init): Adapt to new
	implementation.
	* nptl/pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock_slow): Remove.
	(__pthread_rwlock_rdlock): Adapt.
	* nptl/pthread_rwlock_timedrdlock.c
	(pthread_rwlock_timedrdlock): Adapt.
	* nptl/pthread_rwlock_timedwrlock.c
	(pthread_rwlock_timedwrlock): Adapt.
	* nptl/pthread_rwlock_trywrlock.c (pthread_rwlock_trywrlock): Adapt.
	* nptl/pthread_rwlock_tryrdlock.c (pthread_rwlock_tryrdlock): Adapt.
	* nptl/pthread_rwlock_unlock.c (pthread_rwlock_unlock): Adapt.
	* nptl/pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock_slow): Remove.
	(__pthread_rwlock_wrlock): Adapt.
	* nptl/tst-rwlock10.c: Adapt.
	* nptl/tst-rwlock11.c: Adapt.
	* nptl/tst-rwlock17.c: New file.
	* nptl/tst-rwlock18.c: New file.
	* nptl/tst-rwlock19.c: New file.
	* nptl/tst-rwlock2b.c: New file.
	* nptl/tst-rwlock8.c: Adapt.
	* nptl/tst-rwlock9.c: Adapt.
	* sysdeps/aarch64/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/arm/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/hppa/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/ia64/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/m68k/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/microblaze/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/mips/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/nios2/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/s390/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/sh/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/sparc/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/tile/nptl/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
	(pthread_rwlock_t): Adapt.
	* sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h
	(pthread_rwlock_t): Adapt.
	* sysdeps/x86/bits/pthreadtypes.h (pthread_rwlock_t): Adapt.
	* nptl/nptl-printers.py (): Adapt.
	* nptl/nptl_lock_constants.pysym: Adapt.
	* nptl/test-rwlock-printers.py: Adapt.
	* nptl/test-rwlockattr-printers.c: Adapt.
	* nptl/test-rwlockattr-printers.py: Adapt.
This commit is contained in:
Torvald Riegel
2014-05-22 16:00:12 +02:00
parent fbb31e20bc
commit cc25c8b4c1
42 changed files with 1548 additions and 915 deletions

View File

@ -16,165 +16,17 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#include <errno.h>
#include <sysdep.h>
#include <lowlevellock.h>
#include <futex-internal.h>
#include <pthread.h>
#include <pthreadP.h>
#include <stap-probe.h>
#include <elide.h>
#include <stdbool.h>
/* Acquire read lock for RWLOCK. Slow path. */
static int __attribute__((noinline))
__pthread_rwlock_rdlock_slow (pthread_rwlock_t *rwlock)
{
int result = 0;
bool wake = false;
int futex_shared =
rwlock->__data.__shared == LLL_PRIVATE ? FUTEX_PRIVATE : FUTEX_SHARED;
/* Lock is taken in caller. */
while (1)
{
/* Make sure we are not holding the rwlock as a writer. This is
a deadlock situation we recognize and report. */
if (__builtin_expect (rwlock->__data.__writer
== THREAD_GETMEM (THREAD_SELF, tid), 0))
{
result = EDEADLK;
break;
}
/* Remember that we are a reader. */
if (__glibc_unlikely (++rwlock->__data.__nr_readers_queued == 0))
{
/* Overflow on number of queued readers. */
--rwlock->__data.__nr_readers_queued;
result = EAGAIN;
break;
}
int waitval = rwlock->__data.__readers_wakeup;
/* Free the lock. */
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer to finish. We do not check the return value
because we decide how to continue based on the state of the rwlock. */
futex_wait_simple (&rwlock->__data.__readers_wakeup, waitval,
futex_shared);
/* Get the lock. */
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
--rwlock->__data.__nr_readers_queued;
/* Get the rwlock if there is no writer... */
if (rwlock->__data.__writer == 0
/* ...and if either no writer is waiting or we prefer readers. */
&& (!rwlock->__data.__nr_writers_queued
|| PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
{
/* Increment the reader counter. Avoid overflow. */
if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0))
{
/* Overflow on number of readers. */
--rwlock->__data.__nr_readers;
result = EAGAIN;
}
else
{
LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
/* See pthread_rwlock_rdlock. */
if (rwlock->__data.__nr_readers == 1
&& rwlock->__data.__nr_readers_queued > 0
&& rwlock->__data.__nr_writers_queued > 0)
{
++rwlock->__data.__readers_wakeup;
wake = true;
}
}
break;
}
}
/* We are done, free the lock. */
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
if (wake)
futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, futex_shared);
return result;
}
/* Fast path of acquiring read lock on RWLOCK. */
#include "pthread_rwlock_common.c"
/* See pthread_rwlock_common.c. */
int
__pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
{
int result = 0;
bool wake = false;
int futex_shared =
rwlock->__data.__shared == LLL_PRIVATE ? FUTEX_PRIVATE : FUTEX_SHARED;
LIBC_PROBE (rdlock_entry, 1, rwlock);
if (ELIDE_LOCK (rwlock->__data.__rwelision,
rwlock->__data.__lock == 0
&& rwlock->__data.__writer == 0
&& rwlock->__data.__nr_readers == 0))
return 0;
/* Make sure we are alone. */
lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Get the rwlock if there is no writer... */
if (rwlock->__data.__writer == 0
/* ...and if either no writer is waiting or we prefer readers. */
&& (!rwlock->__data.__nr_writers_queued
|| PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
{
/* Increment the reader counter. Avoid overflow. */
if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0))
{
/* Overflow on number of readers. */
--rwlock->__data.__nr_readers;
result = EAGAIN;
}
else
{
LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
/* If we are the first reader, and there are blocked readers and
writers (which we don't prefer, see above), then it can be the
case that we stole the lock from a writer that was already woken
to acquire it. That means that we need to take over the writer's
responsibility to wake all readers (see pthread_rwlock_unlock).
Thus, wake all readers in this case. */
if (rwlock->__data.__nr_readers == 1
&& rwlock->__data.__nr_readers_queued > 0
&& rwlock->__data.__nr_writers_queued > 0)
{
++rwlock->__data.__readers_wakeup;
wake = true;
}
}
/* We are done, free the lock. */
lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
if (wake)
futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, futex_shared);
return result;
}
return __pthread_rwlock_rdlock_slow (rwlock);
int result = __pthread_rwlock_rdlock_full (rwlock, NULL);
LIBC_PROBE (rdlock_acquire_read, 1, rwlock);
return result;
}
weak_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)