1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-08-01 10:06:57 +03:00

nptl: Add EOVERFLOW checks for futex calls

Some futex-internal calls require additional check for EOVERFLOW (as
indicated by [1] [2] [3]).  For both mutex and rwlock code, EOVERFLOW is
handle as ETIMEDOUT; since it indicate to the caller that the blocking
operation could not be issued.

For mutex it avoids a possible issue where PTHREAD_MUTEX_ROBUST_* might
assume EOVERFLOW indicate futex has succeed, and for PTHREAD_MUTEX_PP_*
it avoid a potential busy infinite loop.  For rwlock and semaphores, it
also avoids potential busy infinite loops.

Checked on x86_64-linux-gnu and i686-linux-gnu, although EOVERFLOW
won't be possible with current usage (since all timeouts on 32-bit
architectures with 32-bit time_t support will be in the range of
32-bit time_t).

[1] https://sourceware.org/pipermail/libc-alpha/2020-November/120079.html
[2] https://sourceware.org/pipermail/libc-alpha/2020-November/120080.html
[3] https://sourceware.org/pipermail/libc-alpha/2020-November/120127.html
This commit is contained in:
Adhemerval Zanella
2020-11-26 10:54:04 -03:00
parent 71eeae0325
commit aa69f19a93
4 changed files with 13 additions and 13 deletions

View File

@ -506,7 +506,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
__pthread_cleanup_pop (&buffer, 0); __pthread_cleanup_pop (&buffer, 0);
if (__glibc_unlikely (err == ETIMEDOUT)) if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
{ {
__condvar_dec_grefs (cond, g, private); __condvar_dec_grefs (cond, g, private);
/* If we timed out, we effectively cancel waiting. Note that /* If we timed out, we effectively cancel waiting. Note that
@ -515,7 +515,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
__condvar_quiesce_and_switch_g1 and us trying to acquire __condvar_quiesce_and_switch_g1 and us trying to acquire
the lock during cancellation is not possible. */ the lock during cancellation is not possible. */
__condvar_cancel_waiting (cond, seq, g, private); __condvar_cancel_waiting (cond, seq, g, private);
result = ETIMEDOUT; result = err;
goto done; goto done;
} }
else else

View File

@ -270,7 +270,7 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
oldval, clockid, abstime, oldval, clockid, abstime,
PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
/* The futex call timed out. */ /* The futex call timed out. */
if (err == ETIMEDOUT) if (err == ETIMEDOUT || err == EOVERFLOW)
return err; return err;
/* Reload current lock value. */ /* Reload current lock value. */
oldval = mutex->__data.__lock; oldval = mutex->__data.__lock;
@ -550,8 +550,8 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
int e = __futex_abstimed_wait64 ( int e = __futex_abstimed_wait64 (
(unsigned int *) &mutex->__data.__lock, ceilval | 2, (unsigned int *) &mutex->__data.__lock, ceilval | 2,
clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex)); clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex));
if (e == ETIMEDOUT) if (e == ETIMEDOUT || e == EOVERFLOW)
return ETIMEDOUT; return e;
} }
} }
while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,

View File

@ -334,7 +334,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
private); private);
/* We ignore EAGAIN and EINTR. On time-outs, we can just /* We ignore EAGAIN and EINTR. On time-outs, we can just
return because we don't need to clean up anything. */ return because we don't need to clean up anything. */
if (err == ETIMEDOUT) if (err == ETIMEDOUT || err == EOVERFLOW)
return err; return err;
} }
/* It makes sense to not break out of the outer loop here /* It makes sense to not break out of the outer loop here
@ -460,7 +460,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex, int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
1 | PTHREAD_RWLOCK_FUTEX_USED, 1 | PTHREAD_RWLOCK_FUTEX_USED,
clockid, abstime, private); clockid, abstime, private);
if (err == ETIMEDOUT) if (err == ETIMEDOUT || err == EOVERFLOW)
{ {
/* If we timed out, we need to unregister. If no read phase /* If we timed out, we need to unregister. If no read phase
has been installed while we waited, we can just decrement has been installed while we waited, we can just decrement
@ -479,7 +479,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
if (atomic_compare_exchange_weak_relaxed if (atomic_compare_exchange_weak_relaxed
(&rwlock->__data.__readers, &r, (&rwlock->__data.__readers, &r,
r - (1 << PTHREAD_RWLOCK_READER_SHIFT))) r - (1 << PTHREAD_RWLOCK_READER_SHIFT)))
return ETIMEDOUT; return err;
/* TODO Back-off. */ /* TODO Back-off. */
} }
/* Use the acquire MO fence to mirror the steps taken in the /* Use the acquire MO fence to mirror the steps taken in the
@ -730,7 +730,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
int err = __futex_abstimed_wait64 (&rwlock->__data.__writers_futex, int err = __futex_abstimed_wait64 (&rwlock->__data.__writers_futex,
1 | PTHREAD_RWLOCK_FUTEX_USED, 1 | PTHREAD_RWLOCK_FUTEX_USED,
clockid, abstime, private); clockid, abstime, private);
if (err == ETIMEDOUT) if (err == ETIMEDOUT || err == EOVERFLOW)
{ {
if (prefer_writer) if (prefer_writer)
{ {
@ -758,7 +758,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
} }
/* We cleaned up and cannot have stolen another waiting writer's /* We cleaned up and cannot have stolen another waiting writer's
futex wake-up, so just return. */ futex wake-up, so just return. */
return ETIMEDOUT; return err;
} }
/* If we got interrupted (EINTR) or the futex word does not have the /* If we got interrupted (EINTR) or the futex word does not have the
expected value (EAGAIN), retry after reloading __readers. */ expected value (EAGAIN), retry after reloading __readers. */
@ -829,7 +829,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex, int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex,
PTHREAD_RWLOCK_FUTEX_USED, PTHREAD_RWLOCK_FUTEX_USED,
clockid, abstime, private); clockid, abstime, private);
if (err == ETIMEDOUT) if (err == ETIMEDOUT || err == EOVERFLOW)
{ {
if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP) if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP)
{ {
@ -861,7 +861,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid,
if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0) if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0)
futex_wake (&rwlock->__data.__writers_futex, futex_wake (&rwlock->__data.__writers_futex,
1, private); 1, private);
return ETIMEDOUT; return err;
} }
/* TODO Back-off. */ /* TODO Back-off. */
} }

View File

@ -191,7 +191,7 @@ __new_sem_wait_slow64 (struct new_sem *sem, clockid_t clockid,
documentation. Before Linux 2.6.22, EINTR was also returned on documentation. Before Linux 2.6.22, EINTR was also returned on
spurious wake-ups; we only support more recent Linux versions, spurious wake-ups; we only support more recent Linux versions,
so do not need to consider this here.) */ so do not need to consider this here.) */
if (err == ETIMEDOUT || err == EINTR) if (err == ETIMEDOUT || err == EINTR || err == EOVERFLOW)
{ {
__set_errno (err); __set_errno (err);
err = -1; err = -1;