1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-08-10 05:03:06 +03:00

htl: Add pshared semaphore support

The implementation is extremely similar to the nptl implementation, but
with slight differences in the futex interface. This fixes some of BZ
25521.
This commit is contained in:
Samuel Thibault
2020-12-13 15:14:40 +00:00
parent 644d98ec4d
commit c8f9421298
11 changed files with 286 additions and 185 deletions

View File

@@ -130,7 +130,7 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate \
\ \
sem-close sem-destroy sem-getvalue sem-init sem-open \ sem-close sem-destroy sem-getvalue sem-init sem-open \
sem-post sem-timedwait sem-trywait sem-unlink \ sem-post sem-timedwait sem-trywait sem-unlink \
sem-wait \ sem-wait sem-waitfast \
\ \
shm-directory \ shm-directory \
\ \

View File

@@ -331,4 +331,37 @@ extern const struct __pthread_rwlockattr __pthread_default_rwlockattr;
/* Default condition attributes. */ /* Default condition attributes. */
extern const struct __pthread_condattr __pthread_default_condattr; extern const struct __pthread_condattr __pthread_default_condattr;
/* Semaphore encoding.
See nptl implementation for the details. */
struct new_sem
{
#if __HAVE_64B_ATOMICS
/* The data field holds both value (in the least-significant 32 bits) and
nwaiters. */
# if __BYTE_ORDER == __LITTLE_ENDIAN
# define SEM_VALUE_OFFSET 0
# elif __BYTE_ORDER == __BIG_ENDIAN
# define SEM_VALUE_OFFSET 1
# else
# error Unsupported byte order.
# endif
# define SEM_NWAITERS_SHIFT 32
# define SEM_VALUE_MASK (~(unsigned int)0)
uint64_t data;
int pshared;
#define __SEMAPHORE_INITIALIZER(value, pshared) \
{ (value), (pshared) }
#else
# define SEM_VALUE_SHIFT 1
# define SEM_NWAITERS_MASK ((unsigned int)1)
unsigned int value;
unsigned int nwaiters;
int pshared;
#define __SEMAPHORE_INITIALIZER(value, pshared) \
{ (value) << SEM_VALUE_SHIFT, 0, (pshared) }
#endif
};
extern int __sem_waitfast (struct new_sem *isem, int definitive_result);
#endif /* pt-internal.h */ #endif /* pt-internal.h */

View File

@@ -27,21 +27,15 @@
#include <bits/pthread.h> #include <bits/pthread.h>
/* User visible part of a semaphore. */ /* User visible part of a semaphore. */
struct __semaphore
{
__pthread_spinlock_t __lock;
struct __pthread *__queue;
int __pshared;
int __value;
void *__data;
};
typedef struct __semaphore sem_t; #define __SIZEOF_SEM_T 20
typedef union
{
char __size[__SIZEOF_SEM_T];
long int __align;
} sem_t;
#define SEM_FAILED ((void *) 0) #define SEM_FAILED ((void *) 0)
/* Initializer for a semaphore. */
#define __SEMAPHORE_INITIALIZER(pshared, value) \
{ __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, (pshared), (value), NULL }
#endif /* bits/semaphore.h */ #endif /* bits/semaphore.h */

View File

@@ -24,7 +24,15 @@
int int
__sem_destroy (sem_t *sem) __sem_destroy (sem_t *sem)
{ {
if (sem->__queue) struct new_sem *isem = (struct new_sem *) sem;
if (
#if __HAVE_64B_ATOMICS
atomic_load_relaxed (&isem->data) >> SEM_NWAITERS_SHIFT
#else
atomic_load_relaxed (&isem->value) & SEM_NWAITERS_MASK
|| isem->nwaiters
#endif
)
/* There are threads waiting on *SEM. */ /* There are threads waiting on *SEM. */
{ {
errno = EBUSY; errno = EBUSY;

View File

@@ -22,9 +22,13 @@
int int
__sem_getvalue (sem_t *restrict sem, int *restrict value) __sem_getvalue (sem_t *restrict sem, int *restrict value)
{ {
__pthread_spin_wait (&sem->__lock); struct new_sem *isem = (struct new_sem *) sem;
*value = sem->__value;
__pthread_spin_unlock (&sem->__lock); #if __HAVE_64B_ATOMICS
*value = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK;
#else
*value = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT;
#endif
return 0; return 0;
} }

View File

@@ -24,12 +24,6 @@
int int
__sem_init (sem_t *sem, int pshared, unsigned value) __sem_init (sem_t *sem, int pshared, unsigned value)
{ {
if (pshared != 0)
{
errno = EOPNOTSUPP;
return -1;
}
#ifdef SEM_VALUE_MAX #ifdef SEM_VALUE_MAX
if (value > SEM_VALUE_MAX) if (value > SEM_VALUE_MAX)
{ {
@@ -38,7 +32,9 @@ __sem_init (sem_t *sem, int pshared, unsigned value)
} }
#endif #endif
*sem = (sem_t) __SEMAPHORE_INITIALIZER (pshared, value); struct new_sem *isem = (struct new_sem *) sem;
*isem = (struct new_sem) __SEMAPHORE_INITIALIZER (value, pshared);
return 0; return 0;
} }

View File

@@ -19,48 +19,50 @@
#include <semaphore.h> #include <semaphore.h>
#include <assert.h> #include <assert.h>
#include <hurdlock.h>
#include <pt-internal.h> #include <pt-internal.h>
int int
__sem_post (sem_t *sem) __sem_post (sem_t *sem)
{ {
struct __pthread *wakeup; struct new_sem *isem = (struct new_sem *) sem;
int flags = isem->pshared ? GSYNC_SHARED : 0;
__pthread_spin_wait (&sem->__lock); #if __HAVE_64B_ATOMICS
if (sem->__value > 0) uint64_t d = atomic_load_relaxed (&isem->data);
/* Do a quick up. */
do
{ {
if (sem->__value == SEM_VALUE_MAX) if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
{ {
__pthread_spin_unlock (&sem->__lock);
errno = EOVERFLOW; errno = EOVERFLOW;
return -1; return -1;
} }
assert (sem->__queue == NULL);
sem->__value++;
__pthread_spin_unlock (&sem->__lock);
return 0;
} }
while (!atomic_compare_exchange_weak_release (&isem->data, &d, d + 1));
if (sem->__queue == NULL) if ((d >> SEM_NWAITERS_SHIFT) != 0)
/* No one waiting. */ /* Wake one waiter. */
__lll_wake (((unsigned int *) &isem->data) + SEM_VALUE_OFFSET, flags);
#else
unsigned int v = atomic_load_relaxed (&isem->value);
do
{ {
sem->__value = 1; if ((v >> SEM_VALUE_SHIFT) == SEM_VALUE_MAX)
__pthread_spin_unlock (&sem->__lock); {
return 0; errno = EOVERFLOW;
return -1;
}
} }
while (!atomic_compare_exchange_weak_release
(&isem->value, &v, v + (1 << SEM_VALUE_SHIFT)));
/* Wake someone up. */ if ((v & SEM_NWAITERS_MASK) != 0)
/* Wake one waiter. */
/* First dequeue someone. */ __lll_wake (&isem->value, flags);
wakeup = sem->__queue; #endif
__pthread_dequeue (wakeup);
/* Then drop the lock and transfer control. */
__pthread_spin_unlock (&sem->__lock);
__pthread_wakeup (wakeup);
return 0; return 0;
} }

View File

@@ -20,37 +20,27 @@
#include <errno.h> #include <errno.h>
#include <assert.h> #include <assert.h>
#include <time.h> #include <time.h>
#include <hurdlock.h>
#include <hurd/hurd.h>
#include <sysdep-cancel.h>
#include <pt-internal.h> #include <pt-internal.h>
struct cancel_ctx #if !__HAVE_64B_ATOMICS
{ static void
struct __pthread *wakeup; __sem_wait_32_finish (struct new_sem *isem);
sem_t *sem; #endif
int cancel_wake;
};
static void static void
cancel_hook (void *arg) __sem_wait_cleanup (void *arg)
{ {
struct cancel_ctx *ctx = arg; struct new_sem *isem = arg;
struct __pthread *wakeup = ctx->wakeup;
sem_t *sem = ctx->sem;
int unblock;
__pthread_spin_wait (&sem->__lock); #if __HAVE_64B_ATOMICS
/* The thread only needs to be awaken if it's blocking or about to block. atomic_fetch_add_relaxed (&isem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
If it was already unblocked, it's not queued any more. */ #else
unblock = wakeup->prevp != NULL; __sem_wait_32_finish (isem);
if (unblock) #endif
{
__pthread_dequeue (wakeup);
ctx->cancel_wake = 1;
}
__pthread_spin_unlock (&sem->__lock);
if (unblock)
__pthread_wakeup (wakeup);
} }
int int
@@ -58,123 +48,148 @@ __sem_timedwait_internal (sem_t *restrict sem,
clockid_t clock_id, clockid_t clock_id,
const struct timespec *restrict timeout) const struct timespec *restrict timeout)
{ {
error_t err; struct new_sem *isem = (struct new_sem *) sem;
int cancelled, oldtype, drain; int err, ret = 0;
int ret = 0; int flags = isem->pshared ? GSYNC_SHARED : 0;
struct __pthread *self = _pthread_self (); __pthread_testcancel ();
struct cancel_ctx ctx;
ctx.wakeup = self;
ctx.sem = sem;
ctx.cancel_wake = 0;
/* Test for a pending cancellation request, switch to deferred mode for if (__sem_waitfast (isem, 0) == 0)
safer resource handling, and prepare the hook to call in case we're return 0;
cancelled while blocking. Once CANCEL_LOCK is released, the cancellation
hook can be called by another thread at any time. Whatever happens,
this function must exit with MUTEX locked.
This function contains inline implementations of pthread_testcancel and int cancel_oldtype = LIBC_CANCEL_ASYNC();
pthread_setcanceltype to reduce locking overhead. */
__pthread_mutex_lock (&self->cancel_lock);
cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
&& self->cancel_pending;
if (cancelled) #if __HAVE_64B_ATOMICS
uint64_t d = atomic_fetch_add_relaxed (&sem->data,
(uint64_t) 1 << SEM_NWAITERS_SHIFT);
pthread_cleanup_push (__sem_wait_cleanup, isem);
for (;;)
{ {
__pthread_mutex_unlock (&self->cancel_lock); if ((d & SEM_VALUE_MASK) == 0)
__pthread_exit (PTHREAD_CANCELED); {
/* No token, sleep. */
if (timeout)
err = __lll_abstimed_wait_intr (
((unsigned int *) &sem->data) + SEM_VALUE_OFFSET,
0, timeout, flags, clock_id);
else
err = __lll_wait_intr (
((unsigned int *) &sem->data) + SEM_VALUE_OFFSET,
0, flags);
if (err != 0)
{
/* Error, interruption or timeout, abort. */
if (err == KERN_TIMEDOUT)
err = ETIMEDOUT;
if (err == KERN_INTERRUPTED)
err = EINTR;
ret = __hurd_fail (err);
__sem_wait_cleanup (isem);
break;
}
/* Token changed */
d = atomic_load_relaxed (&sem->data);
}
else
{
/* Try to acquire and dequeue. */
if (atomic_compare_exchange_weak_acquire (&sem->data,
&d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
{
/* Success */
ret = 0;
break;
}
}
} }
self->cancel_hook = cancel_hook; pthread_cleanup_pop (0);
self->cancel_hook_arg = &ctx; #else
oldtype = self->cancel_type; unsigned int v;
if (oldtype != PTHREAD_CANCEL_DEFERRED) atomic_fetch_add_acquire (&isem->nwaiters, 1);
self->cancel_type = PTHREAD_CANCEL_DEFERRED;
/* Add ourselves to the list of waiters. This is done while setting pthread_cleanup_push (__sem_wait_cleanup, isem);
the cancellation hook to simplify the cancellation procedure, i.e.
if the thread is queued, it can be cancelled, otherwise it is v = atomic_load_relaxed (&isem->value);
already unblocked, progressing on the return path. */ do
__pthread_spin_wait (&sem->__lock);
if (sem->__value > 0)
/* Successful down. */
{ {
sem->__value--; do
__pthread_spin_unlock (&sem->__lock); {
goto out_locked; do
{
if ((v & SEM_NWAITERS_MASK) != 0)
break;
}
while (!atomic_compare_exchange_weak_release (&isem->value,
&v, v | SEM_NWAITERS_MASK));
if ((v >> SEM_VALUE_SHIFT) == 0)
{
/* No token, sleep. */
if (timeout)
err = __lll_abstimed_wait_intr (&isem->value,
SEM_NWAITERS_MASK, timeout, flags, clock_id);
else
err = __lll_wait_intr (&isem->value,
SEM_NWAITERS_MASK, flags);
if (err != 0)
{
/* Error, interruption or timeout, abort. */
if (err == KERN_TIMEDOUT)
err = ETIMEDOUT;
if (err == KERN_INTERRUPTED)
err = EINTR;
ret = __hurd_fail (err);
goto error;
}
/* Token changed */
v = atomic_load_relaxed (&isem->value);
}
}
while ((v >> SEM_VALUE_SHIFT) == 0);
} }
while (!atomic_compare_exchange_weak_acquire (&isem->value,
&v, v - (1 << SEM_VALUE_SHIFT)));
if (timeout != NULL && ! valid_nanoseconds (timeout->tv_nsec)) error:
{ pthread_cleanup_pop (0);
errno = EINVAL;
ret = -1;
__pthread_spin_unlock (&sem->__lock);
goto out_locked;
}
/* Add ourselves to the queue. */ __sem_wait_32_finish (isem);
__pthread_enqueue (&sem->__queue, self); #endif
__pthread_spin_unlock (&sem->__lock);
__pthread_mutex_unlock (&self->cancel_lock); LIBC_CANCEL_RESET (cancel_oldtype);
/* Block the thread. */
if (timeout != NULL)
err = __pthread_timedblock_intr (self, timeout, clock_id);
else
err = __pthread_block_intr (self);
__pthread_spin_wait (&sem->__lock);
if (self->prevp == NULL)
/* Another thread removed us from the queue, which means a wakeup message
has been sent. It was either consumed while we were blocking, or
queued after we timed out and before we acquired the semaphore lock, in
which case the message queue must be drained. */
drain = err ? 1 : 0;
else
{
/* We're still in the queue. Noone attempted to wake us up, i.e. we
timed out. */
__pthread_dequeue (self);
drain = 0;
}
__pthread_spin_unlock (&sem->__lock);
if (drain)
__pthread_block (self);
if (err)
{
assert (err == ETIMEDOUT || err == EINTR);
errno = err;
ret = -1;
}
/* We're almost done. Remove the unblock hook, restore the previous
cancellation type, and check for a pending cancellation request. */
__pthread_mutex_lock (&self->cancel_lock);
out_locked:
self->cancel_hook = NULL;
self->cancel_hook_arg = NULL;
self->cancel_type = oldtype;
cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
&& self->cancel_pending;
__pthread_mutex_unlock (&self->cancel_lock);
if (cancelled)
{
if (ret == 0 && ctx.cancel_wake == 0)
/* We were cancelled while waking up with a token, put it back. */
__sem_post (sem);
__pthread_exit (PTHREAD_CANCELED);
}
return ret; return ret;
} }
#if !__HAVE_64B_ATOMICS
/* Stop being a registered waiter (non-64b-atomics code only). */
static void
__sem_wait_32_finish (struct new_sem *isem)
{
unsigned int wguess = atomic_load_relaxed (&isem->nwaiters);
if (wguess == 1)
atomic_fetch_and_acquire (&isem->value, ~SEM_NWAITERS_MASK);
unsigned int wfinal = atomic_fetch_add_release (&isem->nwaiters, -1);
if (wfinal > 1 && wguess == 1)
{
unsigned int v = atomic_fetch_or_relaxed (&isem->value,
SEM_NWAITERS_MASK);
v >>= SEM_VALUE_SHIFT;
while (v--)
__lll_wake (&isem->value, isem->pshared ? GSYNC_SHARED : 0);
}
}
#endif
int int
__sem_clockwait (sem_t *sem, clockid_t clockid, __sem_clockwait (sem_t *sem, clockid_t clockid,
const struct timespec *restrict timeout) const struct timespec *restrict timeout)

View File

@@ -24,18 +24,13 @@
int int
__sem_trywait (sem_t *sem) __sem_trywait (sem_t *sem)
{ {
__pthread_spin_wait (&sem->__lock); struct new_sem *isem = (struct new_sem *) sem;
if (sem->__value > 0)
/* Successful down. */ if (__sem_waitfast (isem, 1) == 0)
{ return 0;
sem->__value--;
__pthread_spin_unlock (&sem->__lock);
return 0;
}
__pthread_spin_unlock (&sem->__lock);
errno = EAGAIN; errno = EAGAIN;
return -1; return -1;
} }
strong_alias (__sem_trywait, sem_trywait); weak_alias (__sem_trywait, sem_trywait);

View File

@@ -0,0 +1,55 @@
/* Lock a semaphore if it does not require blocking. Generic version.
Copyright (C) 2005-2020 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <semaphore.h>
#include <errno.h>
#include <pt-internal.h>
int
__sem_waitfast (struct new_sem *isem, int definitive_result)
{
#if __HAVE_64B_ATOMICS
uint64_t d = atomic_load_relaxed (&isem->data);
do
{
if ((d & SEM_VALUE_MASK) == 0)
break;
if (atomic_compare_exchange_weak_acquire (&isem->data, &d, d - 1))
/* Successful down. */
return 0;
}
while (definitive_result);
return -1;
#else
unsigned v = atomic_load_relaxed (&isem->value);
do
{
if ((v >> SEM_VALUE_SHIFT) == 0)
break;
if (atomic_compare_exchange_weak_acquire (&isem->value,
&v, v - (1 << SEM_VALUE_SHIFT)))
/* Successful down. */
return 0;
}
while (definitive_result);
return -1;
#endif
}

View File

@@ -116,7 +116,6 @@ test-xfail-tst-cond13 = yes
test-xfail-tst-cond23 = yes test-xfail-tst-cond23 = yes
test-xfail-tst-rwlock4 = yes test-xfail-tst-rwlock4 = yes
test-xfail-tst-rwlock12 = yes test-xfail-tst-rwlock12 = yes
test-xfail-tst-sem3 = yes
test-xfail-tst-barrier2 = yes test-xfail-tst-barrier2 = yes
test-xfail-tst-pututxline-cache = yes test-xfail-tst-pututxline-cache = yes
test-xfail-tst-pututxline-lockfail = yes test-xfail-tst-pututxline-lockfail = yes