mirror of
https://sourceware.org/git/glibc.git
synced 2025-07-30 22:43:12 +03:00
hurd: make lll_* take a variable instead of a ptr
To be coherent with other ports, let's make lll_* take a variable, and rename those that keep taking a ptr into __lll_*.
This commit is contained in:
@ -51,7 +51,7 @@ __lll_abstimed_wait (void *ptr, int val,
|
|||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
int mlsec = compute_reltime (tsp, clk);
|
int mlsec = compute_reltime (tsp, clk);
|
||||||
return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_wait (ptr, val, mlsec, flags);
|
return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait (ptr, val, mlsec, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -62,7 +62,7 @@ __lll_abstimed_xwait (void *ptr, int lo, int hi,
|
|||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
int mlsec = compute_reltime (tsp, clk);
|
int mlsec = compute_reltime (tsp, clk);
|
||||||
return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_xwait (ptr, lo, hi, mlsec,
|
return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_xwait (ptr, lo, hi, mlsec,
|
||||||
flags);
|
flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ __lll_abstimed_lock (void *ptr,
|
|||||||
if (clk != CLOCK_REALTIME)
|
if (clk != CLOCK_REALTIME)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
if (lll_trylock (ptr) == 0)
|
if (__lll_trylock (ptr) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
while (1)
|
while (1)
|
||||||
@ -84,7 +84,7 @@ __lll_abstimed_lock (void *ptr,
|
|||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
int mlsec = compute_reltime (tsp, clk);
|
int mlsec = compute_reltime (tsp, clk);
|
||||||
if (mlsec < 0 || lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
|
if (mlsec < 0 || __lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
|
||||||
return ETIMEDOUT;
|
return ETIMEDOUT;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -140,7 +140,7 @@ __lll_robust_lock (void *ptr, int flags)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
lll_timed_wait (iptr, val, wait_time, flags);
|
__lll_timed_wait (iptr, val, wait_time, flags);
|
||||||
if (wait_time < MAX_WAIT_TIME)
|
if (wait_time < MAX_WAIT_TIME)
|
||||||
wait_time <<= 1;
|
wait_time <<= 1;
|
||||||
}
|
}
|
||||||
@ -187,7 +187,7 @@ __lll_robust_abstimed_lock (void *ptr,
|
|||||||
else if (mlsec > wait_time)
|
else if (mlsec > wait_time)
|
||||||
mlsec = wait_time;
|
mlsec = wait_time;
|
||||||
|
|
||||||
int res = lll_timed_wait (iptr, val, mlsec, flags);
|
int res = __lll_timed_wait (iptr, val, mlsec, flags);
|
||||||
if (res == KERN_TIMEDOUT)
|
if (res == KERN_TIMEDOUT)
|
||||||
return ETIMEDOUT;
|
return ETIMEDOUT;
|
||||||
else if (wait_time < MAX_WAIT_TIME)
|
else if (wait_time < MAX_WAIT_TIME)
|
||||||
@ -223,7 +223,7 @@ __lll_robust_unlock (void *ptr, int flags)
|
|||||||
{
|
{
|
||||||
if (val & LLL_WAITERS)
|
if (val & LLL_WAITERS)
|
||||||
{
|
{
|
||||||
lll_set_wake (ptr, 0, flags);
|
__lll_set_wake (ptr, 0, flags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
|
else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
|
||||||
|
@ -31,21 +31,21 @@ struct timespec;
|
|||||||
|
|
||||||
/* Wait on 64-bit address PTR, without blocking if its contents
|
/* Wait on 64-bit address PTR, without blocking if its contents
|
||||||
are different from the pair <LO, HI>. */
|
are different from the pair <LO, HI>. */
|
||||||
#define lll_xwait(ptr, lo, hi, flags) \
|
#define __lll_xwait(ptr, lo, hi, flags) \
|
||||||
__gsync_wait (__mach_task_self (), \
|
__gsync_wait (__mach_task_self (), \
|
||||||
(vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)
|
(vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)
|
||||||
|
|
||||||
/* Same as 'lll_wait', but only block for MLSEC milliseconds. */
|
/* Same as '__lll_wait', but only block for MLSEC milliseconds. */
|
||||||
#define lll_timed_wait(ptr, val, mlsec, flags) \
|
#define __lll_timed_wait(ptr, val, mlsec, flags) \
|
||||||
__gsync_wait (__mach_task_self (), \
|
__gsync_wait (__mach_task_self (), \
|
||||||
(vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
|
(vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
|
||||||
|
|
||||||
/* Same as 'lll_xwait', but only block for MLSEC milliseconds. */
|
/* Same as '__lll_xwait', but only block for MLSEC milliseconds. */
|
||||||
#define lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
|
#define __lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
|
||||||
__gsync_wait (__mach_task_self (), (vm_offset_t)ptr, \
|
__gsync_wait (__mach_task_self (), (vm_offset_t)ptr, \
|
||||||
lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)
|
lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)
|
||||||
|
|
||||||
/* Same as 'lll_wait', but only block until TSP elapses,
|
/* Same as '__lll_wait', but only block until TSP elapses,
|
||||||
using clock CLK. */
|
using clock CLK. */
|
||||||
extern int __lll_abstimed_wait (void *__ptr, int __val,
|
extern int __lll_abstimed_wait (void *__ptr, int __val,
|
||||||
const struct timespec *__tsp, int __flags, int __clk);
|
const struct timespec *__tsp, int __flags, int __clk);
|
||||||
@ -63,6 +63,8 @@ extern int __lll_abstimed_lock (void *__ptr,
|
|||||||
/* Acquire the lock at PTR, but return with an error if
|
/* Acquire the lock at PTR, but return with an error if
|
||||||
the process containing the owner thread dies. */
|
the process containing the owner thread dies. */
|
||||||
extern int __lll_robust_lock (void *__ptr, int __flags);
|
extern int __lll_robust_lock (void *__ptr, int __flags);
|
||||||
|
#define lll_robust_lock(var, flags) \
|
||||||
|
__lll_robust_lock (&(var), flags)
|
||||||
|
|
||||||
/* Same as '__lll_robust_lock', but only block until TSP
|
/* Same as '__lll_robust_lock', but only block until TSP
|
||||||
elapses, using clock CLK. */
|
elapses, using clock CLK. */
|
||||||
@ -72,19 +74,23 @@ extern int __lll_robust_abstimed_lock (void *__ptr,
|
|||||||
/* Same as '__lll_robust_lock', but return with an error
|
/* Same as '__lll_robust_lock', but return with an error
|
||||||
if the lock cannot be acquired without blocking. */
|
if the lock cannot be acquired without blocking. */
|
||||||
extern int __lll_robust_trylock (void *__ptr);
|
extern int __lll_robust_trylock (void *__ptr);
|
||||||
|
#define lll_robust_trylock(var) \
|
||||||
|
__lll_robust_trylock (&(var))
|
||||||
|
|
||||||
/* Wake one or more threads waiting on address PTR,
|
/* Wake one or more threads waiting on address PTR,
|
||||||
setting its value to VAL before doing so. */
|
setting its value to VAL before doing so. */
|
||||||
#define lll_set_wake(ptr, val, flags) \
|
#define __lll_set_wake(ptr, val, flags) \
|
||||||
__gsync_wake (__mach_task_self (), \
|
__gsync_wake (__mach_task_self (), \
|
||||||
(vm_offset_t)ptr, val, flags | GSYNC_MUTATE)
|
(vm_offset_t)ptr, val, flags | GSYNC_MUTATE)
|
||||||
|
|
||||||
/* Release the robust lock at PTR. */
|
/* Release the robust lock at PTR. */
|
||||||
extern void __lll_robust_unlock (void *__ptr, int __flags);
|
extern void __lll_robust_unlock (void *__ptr, int __flags);
|
||||||
|
#define lll_robust_unlock(var, flags) \
|
||||||
|
__lll_robust_unlock (&(var), flags)
|
||||||
|
|
||||||
/* Rearrange threads waiting on address SRC to instead wait on
|
/* Rearrange threads waiting on address SRC to instead wait on
|
||||||
DST, waking one of them if WAIT_ONE is non-zero. */
|
DST, waking one of them if WAIT_ONE is non-zero. */
|
||||||
#define lll_requeue(src, dst, wake_one, flags) \
|
#define __lll_requeue(src, dst, wake_one, flags) \
|
||||||
__gsync_requeue (__mach_task_self (), (vm_offset_t)src, \
|
__gsync_requeue (__mach_task_self (), (vm_offset_t)src, \
|
||||||
(vm_offset_t)dst, (boolean_t)wake_one, flags)
|
(vm_offset_t)dst, (boolean_t)wake_one, flags)
|
||||||
|
|
||||||
@ -93,31 +99,31 @@ extern void __lll_robust_unlock (void *__ptr, int __flags);
|
|||||||
every one of these calls, defaulting to CLOCK_REALTIME if
|
every one of these calls, defaulting to CLOCK_REALTIME if
|
||||||
no argument is passed. */
|
no argument is passed. */
|
||||||
|
|
||||||
#define lll_abstimed_wait(ptr, val, tsp, flags, ...) \
|
#define lll_abstimed_wait(var, val, tsp, flags, ...) \
|
||||||
({ \
|
({ \
|
||||||
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
|
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
|
||||||
__lll_abstimed_wait ((ptr), (val), (tsp), (flags), \
|
__lll_abstimed_wait (&(var), (val), (tsp), (flags), \
|
||||||
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
|
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define lll_abstimed_xwait(ptr, lo, hi, tsp, flags, ...) \
|
#define lll_abstimed_xwait(var, lo, hi, tsp, flags, ...) \
|
||||||
({ \
|
({ \
|
||||||
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
|
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
|
||||||
__lll_abstimed_xwait ((ptr), (lo), (hi), (tsp), (flags), \
|
__lll_abstimed_xwait (&(var), (lo), (hi), (tsp), (flags), \
|
||||||
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
|
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define lll_abstimed_lock(ptr, tsp, flags, ...) \
|
#define lll_abstimed_lock(var, tsp, flags, ...) \
|
||||||
({ \
|
({ \
|
||||||
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
|
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
|
||||||
__lll_abstimed_lock ((ptr), (tsp), (flags), \
|
__lll_abstimed_lock (&(var), (tsp), (flags), \
|
||||||
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
|
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define lll_robust_abstimed_lock(ptr, tsp, flags, ...) \
|
#define lll_robust_abstimed_lock(var, tsp, flags, ...) \
|
||||||
({ \
|
({ \
|
||||||
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
|
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
|
||||||
__lll_robust_abstimed_lock ((ptr), (tsp), (flags), \
|
__lll_robust_abstimed_lock (&(var), (tsp), (flags), \
|
||||||
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
|
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ _S_msg_proc_newids (mach_port_t me,
|
|||||||
|
|
||||||
/* Notify any waiting user threads that the id change as been completed. */
|
/* Notify any waiting user threads that the id change as been completed. */
|
||||||
++_hurd_pids_changed_stamp;
|
++_hurd_pids_changed_stamp;
|
||||||
lll_wake (&_hurd_pids_changed_stamp, GSYNC_BROADCAST);
|
lll_wake (_hurd_pids_changed_stamp, GSYNC_BROADCAST);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ extern void __spin_lock (__spin_lock_t *__lock);
|
|||||||
_EXTERN_INLINE void
|
_EXTERN_INLINE void
|
||||||
__spin_lock (__spin_lock_t *__lock)
|
__spin_lock (__spin_lock_t *__lock)
|
||||||
{
|
{
|
||||||
lll_lock (__lock, 0);
|
__lll_lock (__lock, 0);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -68,7 +68,7 @@ extern void __spin_unlock (__spin_lock_t *__lock);
|
|||||||
_EXTERN_INLINE void
|
_EXTERN_INLINE void
|
||||||
__spin_unlock (__spin_lock_t *__lock)
|
__spin_unlock (__spin_lock_t *__lock)
|
||||||
{
|
{
|
||||||
lll_unlock (__lock, 0);
|
__lll_unlock (__lock, 0);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ extern int __spin_try_lock (__spin_lock_t *__lock);
|
|||||||
_EXTERN_INLINE int
|
_EXTERN_INLINE int
|
||||||
__spin_try_lock (__spin_lock_t *__lock)
|
__spin_try_lock (__spin_lock_t *__lock)
|
||||||
{
|
{
|
||||||
return (lll_trylock (__lock) == 0);
|
return (__lll_trylock (__lock) == 0);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -36,16 +36,20 @@
|
|||||||
|
|
||||||
/* Wait on address PTR, without blocking if its contents
|
/* Wait on address PTR, without blocking if its contents
|
||||||
* are different from VAL. */
|
* are different from VAL. */
|
||||||
#define lll_wait(ptr, val, flags) \
|
#define __lll_wait(ptr, val, flags) \
|
||||||
__gsync_wait (__mach_task_self (), \
|
__gsync_wait (__mach_task_self (), \
|
||||||
(vm_offset_t)(ptr), (val), 0, 0, (flags))
|
(vm_offset_t)(ptr), (val), 0, 0, (flags))
|
||||||
|
#define lll_wait(var, val, flags) \
|
||||||
|
__lll_wait (&(var), val, flags)
|
||||||
|
|
||||||
/* Wake one or more threads waiting on address PTR. */
|
/* Wake one or more threads waiting on address PTR. */
|
||||||
#define lll_wake(ptr, flags) \
|
#define __lll_wake(ptr, flags) \
|
||||||
__gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags))
|
__gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags))
|
||||||
|
#define lll_wake(var, flags) \
|
||||||
|
__lll_wake (&(var), flags)
|
||||||
|
|
||||||
/* Acquire the lock at PTR. */
|
/* Acquire the lock at PTR. */
|
||||||
#define lll_lock(ptr, flags) \
|
#define __lll_lock(ptr, flags) \
|
||||||
({ \
|
({ \
|
||||||
int *__iptr = (int *)(ptr); \
|
int *__iptr = (int *)(ptr); \
|
||||||
int __flags = (flags); \
|
int __flags = (flags); \
|
||||||
@ -55,27 +59,33 @@
|
|||||||
{ \
|
{ \
|
||||||
if (atomic_exchange_acq (__iptr, 2) == 0) \
|
if (atomic_exchange_acq (__iptr, 2) == 0) \
|
||||||
break; \
|
break; \
|
||||||
lll_wait (__iptr, 2, __flags); \
|
__lll_wait (__iptr, 2, __flags); \
|
||||||
} \
|
} \
|
||||||
(void)0; \
|
(void)0; \
|
||||||
})
|
})
|
||||||
|
#define lll_lock(var, flags) \
|
||||||
|
__lll_lock (&(var), flags)
|
||||||
|
|
||||||
/* Try to acquire the lock at PTR, without blocking.
|
/* Try to acquire the lock at PTR, without blocking.
|
||||||
Evaluates to zero on success. */
|
Evaluates to zero on success. */
|
||||||
#define lll_trylock(ptr) \
|
#define __lll_trylock(ptr) \
|
||||||
({ \
|
({ \
|
||||||
int *__iptr = (int *)(ptr); \
|
int *__iptr = (int *)(ptr); \
|
||||||
*__iptr == 0 \
|
*__iptr == 0 \
|
||||||
&& atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1; \
|
&& atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1; \
|
||||||
})
|
})
|
||||||
|
#define lll_trylock(var) \
|
||||||
|
__lll_trylock (&(var))
|
||||||
|
|
||||||
/* Release the lock at PTR. */
|
/* Release the lock at PTR. */
|
||||||
#define lll_unlock(ptr, flags) \
|
#define __lll_unlock(ptr, flags) \
|
||||||
({ \
|
({ \
|
||||||
int *__iptr = (int *)(ptr); \
|
int *__iptr = (int *)(ptr); \
|
||||||
if (atomic_exchange_rel (__iptr, 0) == 2) \
|
if (atomic_exchange_rel (__iptr, 0) == 2) \
|
||||||
lll_wake (__iptr, (flags)); \
|
__lll_wake (__iptr, (flags)); \
|
||||||
(void)0; \
|
(void)0; \
|
||||||
})
|
})
|
||||||
|
#define lll_unlock(var, flags) \
|
||||||
|
__lll_unlock (&(var), flags)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -33,7 +33,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
|
|||||||
switch (MTX_TYPE (mtxp))
|
switch (MTX_TYPE (mtxp))
|
||||||
{
|
{
|
||||||
case PT_MTX_NORMAL:
|
case PT_MTX_NORMAL:
|
||||||
lll_lock (&mtxp->__lock, flags);
|
lll_lock (mtxp->__lock, flags);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PT_MTX_RECURSIVE:
|
case PT_MTX_RECURSIVE:
|
||||||
@ -47,7 +47,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
lll_lock (&mtxp->__lock, flags);
|
lll_lock (mtxp->__lock, flags);
|
||||||
mtx_set_owner (mtxp, self, flags);
|
mtx_set_owner (mtxp, self, flags);
|
||||||
mtxp->__cnt = 1;
|
mtxp->__cnt = 1;
|
||||||
break;
|
break;
|
||||||
@ -57,7 +57,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
|
|||||||
if (mtx_owned_p (mtxp, self, flags))
|
if (mtx_owned_p (mtxp, self, flags))
|
||||||
return EDEADLK;
|
return EDEADLK;
|
||||||
|
|
||||||
lll_lock (&mtxp->__lock, flags);
|
lll_lock (mtxp->__lock, flags);
|
||||||
mtx_set_owner (mtxp, self, flags);
|
mtx_set_owner (mtxp, self, flags);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
|
|||||||
case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
|
case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
|
||||||
case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
|
case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
|
||||||
self = _pthread_self ();
|
self = _pthread_self ();
|
||||||
ROBUST_LOCK (self, mtxp, __lll_robust_lock, flags);
|
ROBUST_LOCK (self, mtxp, lll_robust_lock, flags);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -34,7 +34,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
|
|||||||
switch (MTX_TYPE (mtxp))
|
switch (MTX_TYPE (mtxp))
|
||||||
{
|
{
|
||||||
case PT_MTX_NORMAL:
|
case PT_MTX_NORMAL:
|
||||||
ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid);
|
ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PT_MTX_RECURSIVE:
|
case PT_MTX_RECURSIVE:
|
||||||
@ -47,7 +47,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
|
|||||||
++mtxp->__cnt;
|
++mtxp->__cnt;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid)) == 0)
|
else if ((ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid)) == 0)
|
||||||
{
|
{
|
||||||
mtx_set_owner (mtxp, self, flags);
|
mtx_set_owner (mtxp, self, flags);
|
||||||
mtxp->__cnt = 1;
|
mtxp->__cnt = 1;
|
||||||
@ -59,7 +59,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
|
|||||||
self = _pthread_self ();
|
self = _pthread_self ();
|
||||||
if (mtx_owned_p (mtxp, self, flags))
|
if (mtx_owned_p (mtxp, self, flags))
|
||||||
ret = EDEADLK;
|
ret = EDEADLK;
|
||||||
else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid)) == 0)
|
else if ((ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid)) == 0)
|
||||||
mtx_set_owner (mtxp, self, flags);
|
mtx_set_owner (mtxp, self, flags);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -32,7 +32,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
|
|||||||
switch (MTX_TYPE (mtxp))
|
switch (MTX_TYPE (mtxp))
|
||||||
{
|
{
|
||||||
case PT_MTX_NORMAL:
|
case PT_MTX_NORMAL:
|
||||||
ret = lll_trylock (&mtxp->__lock);
|
ret = lll_trylock (mtxp->__lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
ret = EBUSY;
|
ret = EBUSY;
|
||||||
break;
|
break;
|
||||||
@ -47,7 +47,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
|
|||||||
++mtxp->__cnt;
|
++mtxp->__cnt;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
else if ((ret = lll_trylock (&mtxp->__lock)) == 0)
|
else if ((ret = lll_trylock (mtxp->__lock)) == 0)
|
||||||
{
|
{
|
||||||
mtx_set_owner (mtxp, self, mtxp->__flags);
|
mtx_set_owner (mtxp, self, mtxp->__flags);
|
||||||
mtxp->__cnt = 1;
|
mtxp->__cnt = 1;
|
||||||
@ -59,7 +59,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
|
|||||||
|
|
||||||
case PT_MTX_ERRORCHECK:
|
case PT_MTX_ERRORCHECK:
|
||||||
self = _pthread_self ();
|
self = _pthread_self ();
|
||||||
if ((ret = lll_trylock (&mtxp->__lock)) == 0)
|
if ((ret = lll_trylock (mtxp->__lock)) == 0)
|
||||||
mtx_set_owner (mtxp, self, mtxp->__flags);
|
mtx_set_owner (mtxp, self, mtxp->__flags);
|
||||||
else
|
else
|
||||||
ret = EBUSY;
|
ret = EBUSY;
|
||||||
@ -69,7 +69,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
|
|||||||
case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
|
case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
|
||||||
case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
|
case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
|
||||||
self = _pthread_self ();
|
self = _pthread_self ();
|
||||||
ROBUST_LOCK (self, mtxp, __lll_robust_trylock);
|
ROBUST_LOCK (self, mtxp, lll_robust_trylock);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -32,7 +32,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
|
|||||||
switch (MTX_TYPE (mtxp))
|
switch (MTX_TYPE (mtxp))
|
||||||
{
|
{
|
||||||
case PT_MTX_NORMAL:
|
case PT_MTX_NORMAL:
|
||||||
lll_unlock (&mtxp->__lock, flags);
|
lll_unlock (mtxp->__lock, flags);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PT_MTX_RECURSIVE:
|
case PT_MTX_RECURSIVE:
|
||||||
@ -42,7 +42,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
|
|||||||
else if (--mtxp->__cnt == 0)
|
else if (--mtxp->__cnt == 0)
|
||||||
{
|
{
|
||||||
mtxp->__owner_id = mtxp->__shpid = 0;
|
mtxp->__owner_id = mtxp->__shpid = 0;
|
||||||
lll_unlock (&mtxp->__lock, flags);
|
lll_unlock (mtxp->__lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
@ -54,7 +54,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
mtxp->__owner_id = mtxp->__shpid = 0;
|
mtxp->__owner_id = mtxp->__shpid = 0;
|
||||||
lll_unlock (&mtxp->__lock, flags);
|
lll_unlock (mtxp->__lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
@ -74,7 +74,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
|
|||||||
* state, mark it as irrecoverable. */
|
* state, mark it as irrecoverable. */
|
||||||
mtxp->__owner_id = ((mtxp->__lock & LLL_DEAD_OWNER)
|
mtxp->__owner_id = ((mtxp->__lock & LLL_DEAD_OWNER)
|
||||||
? NOTRECOVERABLE_ID : 0);
|
? NOTRECOVERABLE_ID : 0);
|
||||||
__lll_robust_unlock (&mtxp->__lock, flags);
|
lll_robust_unlock (mtxp->__lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
return EDEADLK; \
|
return EDEADLK; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
ret = cb (&mtxp->__lock, ##__VA_ARGS__); \
|
ret = cb (mtxp->__lock, ##__VA_ARGS__); \
|
||||||
if (ret == 0 || ret == EOWNERDEAD) \
|
if (ret == 0 || ret == EOWNERDEAD) \
|
||||||
{ \
|
{ \
|
||||||
if (mtxp->__owner_id == ENOTRECOVERABLE) \
|
if (mtxp->__owner_id == ENOTRECOVERABLE) \
|
||||||
|
@ -39,7 +39,7 @@ __setpgid (pid_t pid, pid_t pgid)
|
|||||||
/* Synchronize with the signal thread to make sure we have
|
/* Synchronize with the signal thread to make sure we have
|
||||||
received and processed proc_newids before returning to the user. */
|
received and processed proc_newids before returning to the user. */
|
||||||
while (_hurd_pids_changed_stamp == stamp)
|
while (_hurd_pids_changed_stamp == stamp)
|
||||||
lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
|
lll_wait (_hurd_pids_changed_stamp, stamp, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ __setsid (void)
|
|||||||
returned by `getpgrp ()' in other threads) has been updated before
|
returned by `getpgrp ()' in other threads) has been updated before
|
||||||
we return. */
|
we return. */
|
||||||
while (_hurd_pids_changed_stamp == stamp)
|
while (_hurd_pids_changed_stamp == stamp)
|
||||||
lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
|
lll_wait (_hurd_pids_changed_stamp, stamp, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
HURD_CRITICAL_END;
|
HURD_CRITICAL_END;
|
||||||
|
@ -60,7 +60,7 @@
|
|||||||
#define THREAD_GSCOPE_RESET_FLAG() \
|
#define THREAD_GSCOPE_RESET_FLAG() \
|
||||||
do \
|
do \
|
||||||
if (atomic_exchange_and_add_rel (&GL(dl_thread_gscope_count), -1) == 1) \
|
if (atomic_exchange_and_add_rel (&GL(dl_thread_gscope_count), -1) == 1) \
|
||||||
lll_wake (&GL(dl_thread_gscope_count), 0); \
|
lll_wake (GL(dl_thread_gscope_count), 0); \
|
||||||
while (0)
|
while (0)
|
||||||
#define THREAD_GSCOPE_WAIT() \
|
#define THREAD_GSCOPE_WAIT() \
|
||||||
do \
|
do \
|
||||||
@ -68,7 +68,7 @@
|
|||||||
int count; \
|
int count; \
|
||||||
atomic_write_barrier (); \
|
atomic_write_barrier (); \
|
||||||
while ((count = GL(dl_thread_gscope_count))) \
|
while ((count = GL(dl_thread_gscope_count))) \
|
||||||
lll_wait (&GL(dl_thread_gscope_count), count, 0); \
|
lll_wait (GL(dl_thread_gscope_count), count, 0); \
|
||||||
} \
|
} \
|
||||||
while (0)
|
while (0)
|
||||||
|
|
||||||
|
@ -74,14 +74,14 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
|
|||||||
|
|
||||||
/* Lock the named lock variable. */
|
/* Lock the named lock variable. */
|
||||||
#define __libc_lock_lock(NAME) \
|
#define __libc_lock_lock(NAME) \
|
||||||
({ lll_lock (&(NAME), 0); 0; })
|
({ lll_lock ((NAME), 0); 0; })
|
||||||
|
|
||||||
/* Lock the named lock variable. */
|
/* Lock the named lock variable. */
|
||||||
#define __libc_lock_trylock(NAME) lll_trylock (&(NAME))
|
#define __libc_lock_trylock(NAME) lll_trylock (NAME)
|
||||||
|
|
||||||
/* Unlock the named lock variable. */
|
/* Unlock the named lock variable. */
|
||||||
#define __libc_lock_unlock(NAME) \
|
#define __libc_lock_unlock(NAME) \
|
||||||
({ lll_unlock (&(NAME), 0); 0; })
|
({ lll_unlock ((NAME), 0); 0; })
|
||||||
|
|
||||||
#define __libc_lock_define_recursive(CLASS,NAME) \
|
#define __libc_lock_define_recursive(CLASS,NAME) \
|
||||||
CLASS __libc_lock_recursive_t NAME;
|
CLASS __libc_lock_recursive_t NAME;
|
||||||
@ -111,7 +111,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
|
|||||||
int __r = 0; \
|
int __r = 0; \
|
||||||
if (__self == __lock->owner) \
|
if (__self == __lock->owner) \
|
||||||
++__lock->cnt; \
|
++__lock->cnt; \
|
||||||
else if ((__r = lll_trylock (&__lock->lock)) == 0) \
|
else if ((__r = lll_trylock (__lock->lock)) == 0) \
|
||||||
__lock->owner = __self, __lock->cnt = 1; \
|
__lock->owner = __self, __lock->cnt = 1; \
|
||||||
__r; \
|
__r; \
|
||||||
})
|
})
|
||||||
@ -122,7 +122,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
|
|||||||
void *__self = __libc_lock_owner_self (); \
|
void *__self = __libc_lock_owner_self (); \
|
||||||
if (__self != __lock->owner) \
|
if (__self != __lock->owner) \
|
||||||
{ \
|
{ \
|
||||||
lll_lock (&__lock->lock, 0); \
|
lll_lock (__lock->lock, 0); \
|
||||||
__lock->owner = __self; \
|
__lock->owner = __self; \
|
||||||
} \
|
} \
|
||||||
++__lock->cnt; \
|
++__lock->cnt; \
|
||||||
@ -135,7 +135,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
|
|||||||
if (--__lock->cnt == 0) \
|
if (--__lock->cnt == 0) \
|
||||||
{ \
|
{ \
|
||||||
__lock->owner = 0; \
|
__lock->owner = 0; \
|
||||||
lll_unlock (&__lock->lock, 0); \
|
lll_unlock (__lock->lock, 0); \
|
||||||
} \
|
} \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user