mirror of
https://sourceware.org/git/glibc.git
synced 2025-10-26 00:57:39 +03:00
Use __asm __volatile (__lll_acq_instr ::: "memory") instead of atomic_full_barrier. 2007-07-31 Jakub Jelinek <jakub@redhat.com> * allocatestack.c (stack_cache_lock): Change type to int. (get_cached_stack, allocate_stack, __deallocate_stack, __make_stacks_executable, __find_thread_by_id, __nptl_setxid, __pthread_init_static_tls, __wait_lookup_done): Add LLL_PRIVATE as second argument to lll_lock and lll_unlock macros on stack_cache_lock. * pthread_create.c (__find_in_stack_list): Likewise. (start_thread): Similarly with pd->lock. Use lll_robust_dead macro instead of lll_robust_mutex_dead, pass LLL_SHARED to it as second argument. * descr.h (struct pthread): Change lock and setxid_futex field type to int. * old_pthread_cond_broadcast.c (__pthread_cond_broadcast_2_0): Use LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER. * old_pthread_cond_signal.c (__pthread_cond_signal_2_0): Likewise. * old_pthread_cond_timedwait.c (__pthread_cond_timedwait_2_0): Likewise. * old_pthread_cond_wait.c (__pthread_cond_wait_2_0): Likewise. * pthread_cond_init.c (__pthread_cond_init): Likewise. * pthreadP.h (__attr_list_lock): Change type to int. * pthread_attr_init.c (__attr_list_lock): Likewise. * pthread_barrier_destroy.c (pthread_barrier_destroy): Pass ibarrier->private ^ FUTEX_PRIVATE_FLAG as second argument to lll_{,un}lock. * pthread_barrier_wait.c (pthread_barrier_wait): Likewise and also for lll_futex_{wake,wait}. * pthread_barrier_init.c (pthread_barrier_init): Make iattr a pointer to const. * pthread_cond_broadcast.c (__pthread_cond_broadcast): Pass LLL_SHARED as second argument to lll_{,un}lock. * pthread_cond_destroy.c (__pthread_cond_destroy): Likewise. * pthread_cond_signal.c (__pthread_cond_singal): Likewise. * pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise. * pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait): Likewise. * pthread_getattr_np.c (pthread_getattr_np): Add LLL_PRIVATE as second argument to lll_{,un}lock macros on pd->lock. * pthread_getschedparam.c (__pthread_getschedparam): Likewise. * pthread_setschedparam.c (__pthread_setschedparam): Likewise. * pthread_setschedprio.c (pthread_setschedprio): Likewise. * tpp.c (__pthread_tpp_change_priority, __pthread_current_priority): Likewise. * sysdeps/pthread/createthread.c (do_clone, create_thread): Likewise. * pthread_once.c (once_lock): Change type to int. (__pthread_once): Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on once_lock. * pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Use lll_{,un}lock macros instead of lll_mutex_{,un}lock, pass rwlock->__data.__shared as second argument to them and similarly for lll_futex_w*. * pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock): Likewise. * pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock): Likewise. * pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise. * pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Likewise. * pthread_rwlock_unlock.c (__pthread_rwlock_unlock): Likewise. * pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise. * sem_close.c (sem_close): Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on __sem_mappings_lock. * sem_open.c (check_add_mapping): Likewise. (__sem_mappings_lock): Change type to int. * semaphoreP.h (__sem_mappings_lock): Likewise. * pthread_mutex_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use lll_{,try,robust_}lock macros instead of lll_*mutex_*, pass LLL_SHARED as last argument. (__pthread_mutex_lock): Use lll_unlock instead of lll_mutex_unlock, pass LLL_SHARED as last argument. * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use lll_{cond_,cond_try,robust_cond}lock macros instead of lll_*mutex_*, pass LLL_SHARED as last argument. * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use lll_{timed,try,robust_timed,un}lock instead of lll_*mutex*, pass LLL_SHARED as last argument. * pthread_mutex_trylock.c (__pthread_mutex_trylock): Similarly. * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Similarly. * sysdeps/pthread/bits/libc-lock.h (__libc_lock_lock, __libc_lock_lock_recursive, __libc_lock_unlock, __libc_lock_unlock_recursive): Pass LLL_PRIVATE as second argument to lll_{,un}lock. * sysdeps/pthread/bits/stdio-lock.h (_IO_lock_lock, _IO_lock_unlock): Likewise. * sysdeps/unix/sysv/linux/fork.c (__libc_fork): Don't use compound literal. * sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork): Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on __fork_lock. * sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork, free_mem): Likewise. (__fork_lock): Change type to int. * sysdeps/unix/sysv/linux/fork.h (__fork_lock): Likewise. * sysdeps/unix/sysv/linux/sem_post.c (__new_sem_post): Pass isem->private ^ FUTEX_PRIVATE_FLAG as second argument to lll_futex_wake. * sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Likewise. * sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Likewise. * sysdeps/unix/sysv/linux/lowlevellock.c (__lll_lock_wait_private): New function. (__lll_lock_wait, __lll_timedlock_wait): Add private argument and pass it through to lll_futex_*wait, only compile in when IS_IN_libpthread. * sysdeps/unix/sysv/linux/lowlevelrobustlock.c (__lll_robust_lock_wait, __lll_robust_timedlock_wait): Add private argument and pass it through to lll_futex_*wait. * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Renamed all lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp. lll_robust_*. Renamed all __lll_mutex_* resp. __lll_robust_mutex_* inline functions to __lll_* resp. __lll_robust_*. (LLL_MUTEX_LOCK_INITIALIZER): Remove. (lll_mutex_dead): Add private argument. (__lll_lock_wait_private): New prototype. (__lll_lock_wait, __lll_robust_lock_wait, __lll_lock_timedwait, __lll_robust_lock_timedwait): Add private argument to prototypes. (__lll_lock): Add private argument, if it is constant LLL_PRIVATE, call __lll_lock_wait_private, otherwise pass private to __lll_lock_wait. (__lll_robust_lock, __lll_cond_lock, __lll_timedlock, __lll_robust_timedlock): Add private argument, pass it to __lll_*wait functions. (__lll_unlock): Add private argument, if it is constant LLL_PRIVATE, call __lll_unlock_wake_private, otherwise pass private to __lll_unlock_wake. (__lll_robust_unlock): Add private argument, pass it to __lll_robust_unlock_wake. (lll_lock, lll_robust_lock, lll_cond_lock, lll_timedlock, lll_robust_timedlock, lll_unlock, lll_robust_unlock): Add private argument, pass it through to __lll_* inline function. (__lll_mutex_unlock_force, lll_mutex_unlock_force): Remove. (lll_lock_t): Remove. (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake, __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait, lll_cond_wake, lll_cond_broadcast): Remove. * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Allow including the header from assembler. Renamed all lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp. lll_robust_*. (LOCK, FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define. (LLL_MUTEX_LOCK_INITIALIZER, LLL_MUTEX_LOCK_INITIALIZER_LOCKED, LLL_MUTEX_LOCK_INITIALIZER_WAITERS): Remove. (__lll_mutex_lock_wait, __lll_mutex_timedlock_wait, __lll_mutex_unlock_wake, __lll_lock_wait, __lll_unlock_wake): Remove prototype. (__lll_trylock_asm, __lll_lock_asm_start, __lll_unlock_asm): Define. (lll_robust_trylock, lll_cond_trylock): Use LLL_LOCK_INITIALIZER* rather than LLL_MUTEX_LOCK_INITIALIZER* macros. (lll_trylock): Likewise, use __lll_trylock_asm, pass MULTIPLE_THREADS_OFFSET as another asm operand. (lll_lock): Add private argument, use __lll_lock_asm_start, pass MULTIPLE_THREADS_OFFSET as last asm operand, call __lll_lock_wait_private if private is constant LLL_PRIVATE, otherwise pass private as another argument to __lll_lock_wait. (lll_robust_lock, lll_cond_lock, lll_robust_cond_lock, lll_timedlock, lll_robust_timedlock): Add private argument, pass private as another argument to __lll_*lock_wait call. (lll_unlock): Add private argument, use __lll_unlock_asm, pass MULTIPLE_THREADS_OFFSET as another asm operand, call __lll_unlock_wake_private if private is constant LLL_PRIVATE, otherwise pass private as another argument to __lll_unlock_wake. (lll_robust_unlock): Add private argument, pass private as another argument to __lll_unlock_wake. (lll_robust_dead): Add private argument, use __lll_private_flag macro. (lll_islocked): Use LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER. (lll_lock_t): Remove. (LLL_LOCK_INITIALIZER_WAITERS): Define. (__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake, __lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait, lll_cond_wake, lll_cond_broadcast): Remove. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise. * sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Revert 2007-05-2{3,9} changes. * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Include kernel-features.h and lowlevellock.h. (LOAD_PRIVATE_FUTEX_WAIT): Define. (LOAD_FUTEX_WAIT): Rewritten. (LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. (__lll_lock_wait_private, __lll_unlock_wake_private): New functions. (__lll_mutex_lock_wait): Rename to ... (__lll_lock_wait): ... this. Take futex addr from %edx instead of %ecx, %ecx is now private argument. Don't compile in for libc.so. (__lll_mutex_timedlock_wait): Rename to ... (__lll_timedlock_wait): ... this. Use __NR_gettimeofday. %esi contains private argument. Don't compile in for libc.so. (__lll_mutex_unlock_wake): Rename to ... (__lll_unlock_wake): ... this. %ecx contains private argument. Don't compile in for libc.so. (__lll_timedwait_tid): Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Include kernel-features.h and lowlevellock.h. (LOAD_FUTEX_WAIT): Define. (LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. (__lll_robust_mutex_lock_wait): Rename to ... (__lll_robust_lock_wait): ... this. Futex addr is now in %edx argument, %ecx argument contains private. Use LOAD_FUTEX_WAIT macro. (__lll_robust_mutex_timedlock_wait): Rename to ... (__lll_robust_timedlock_wait): ... this. Use __NR_gettimeofday. %esi argument contains private, use LOAD_FUTEX_WAIT macro. * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass PRIVATE(%ebx) ^ LLL_SHARED as private argument in %ecx to __lll_lock_wait and __lll_unlock_wake, pass MUTEX(%ebx) address to __lll_lock_wait in %edx. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Include lowlevellock.h and pthread-errnos.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define. (__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass cond_lock address in %edx rather than %ecx to __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Include lowlevellock.h and pthread-errnos.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define. (__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass cond_lock address in %edx rather than %ecx to __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Include lowlevellock.h. (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass cond_lock address in %edx rather than %ecx to __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait and __lll_unlock_wake. Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_cond_wait, __condvar_w_cleanup): Rename __lll_mutex_* to __lll_*, pass cond_lock address in %edx rather than %ecx to __lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%ebx) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait and __lll_unlock_wake. Move return value from %ecx to %edx register. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S: Include lowlevellock.h. (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%ebp) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait and __lll_unlock_wake. Move return value from %ecx to %edx register. Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S: Include lowlevellock.h. (SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%ebp) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait and __lll_unlock_wake. Move return value from %ecx to %edx register. Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%edi) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%edi) in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass MUTEX(%ebx) address in %edx rather than %ecx to __lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait and __lll_unlock_wake. Move return value from %ecx to %edx register. * sysdeps/unix/sysv/linux/i386/pthread_once.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't define. * sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAKE): Don't define. * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Include lowlevellock.h. (LOCK, SYS_futex, SYS_gettimeofday, FUTEX_WAIT): Don't define. (sem_timedwait): Use __NR_gettimeofday. * sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Include lowlevellock.h. (LOCK): Don't define. * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT): Don't define. * sysdeps/unix/sysv/linux/powerpc/sem_post.c: Wake only when there are waiters. * sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Revert 2007-05-2{3,9} changes. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Include kernel-features.h and lowlevellock.h. (LOAD_PRIVATE_FUTEX_WAIT): Define. (LOAD_FUTEX_WAIT): Rewritten. (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. (__lll_lock_wait_private, __lll_unlock_wake_private): New functions. (__lll_mutex_lock_wait): Rename to ... (__lll_lock_wait): ... this. %esi is now private argument. Don't compile in for libc.so. (__lll_mutex_timedlock_wait): Rename to ... (__lll_timedlock_wait): ... this. %esi contains private argument. Don't compile in for libc.so. (__lll_mutex_unlock_wake): Rename to ... (__lll_unlock_wake): ... this. %esi contains private argument. Don't compile in for libc.so. * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Include kernel-features.h and lowlevellock.h. (LOAD_FUTEX_WAIT): Define. (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define. (__lll_robust_mutex_lock_wait): Rename to ... (__lll_robust_lock_wait): ... this. %esi argument contains private. Use LOAD_FUTEX_WAIT macro. (__lll_robust_mutex_timedlock_wait): Rename to ... (__lll_robust_timedlock_wait): ... this. %esi argument contains private, use LOAD_FUTEX_WAIT macro. * sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass PRIVATE(%rdi) ^ LLL_SHARED as private argument in %esi to __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: Include lowlevellock.h and pthread-errnos.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define. (__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Include lowlevellock.h and pthread-errnos.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define. (__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define. (__pthread_cond_wait, __condvar_cleanup): Rename __lll_mutex_* to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %esi to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S: Include lowlevellock.h. (SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK): Don't define. (__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass PSHARED(%rdi) in %ecx to both __lll_lock_wait and __lll_unlock_wake. * sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't define. * sysdeps/unix/sysv/linux/x86_64/sem_post.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAKE): Don't define. * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT): Don't define. * sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Include lowlevellock.h. (LOCK): Don't define. * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Include lowlevellock.h. (LOCK, SYS_futex, FUTEX_WAIT): Don't define. * sysdeps/unix/sysv/linux/sparc/internaltypes.h: New file. * sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c: New file. * sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c: New file. * sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c: New file. * sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c (__lll_lock_wait_private): New function. (__lll_lock_wait, __lll_timedlock_wait): Add private argument, pass it to lll_futex_*wait. Don't compile in for libc.so. * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c: Remove. * sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c (struct sparc_pthread_barrier): Remove. (pthread_barrier_wait): Use union sparc_pthread_barrier instead of struct sparc_pthread_barrier. Pass ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE to lll_{,un}lock and lll_futex_wait macros. * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c: Remove. * sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c: Include sparc pthread_barrier_wait.c instead of generic one.
575 lines
21 KiB
C++
575 lines
21 KiB
C++
/* libc-internal interface for mutex locks. NPTL version.
|
|
Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public License as
|
|
published by the Free Software Foundation; either version 2.1 of the
|
|
License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; see the file COPYING.LIB. If not,
|
|
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
Boston, MA 02111-1307, USA. */
|
|
|
|
#ifndef _BITS_LIBC_LOCK_H
|
|
#define _BITS_LIBC_LOCK_H 1
|
|
|
|
#include <pthread.h>
|
|
#define __need_NULL
|
|
#include <stddef.h>
|
|
|
|
|
|
/* Fortunately Linux now has a mean to do locking which is realtime
|
|
safe without the aid of the thread library. We also need no fancy
|
|
options like error checking mutexes etc. We only need simple
|
|
locks, maybe recursive. This can be easily and cheaply implemented
|
|
using futexes. We will use them everywhere except in ld.so since
|
|
ld.so might be used on old kernels with a different libc.so. */
|
|
#ifdef _LIBC
|
|
# include <lowlevellock.h>
|
|
# include <tls.h>
|
|
# include <pthread-functions.h>
|
|
#endif
|
|
|
|
/* Mutex type. */
|
|
#if defined _LIBC || defined _IO_MTSAFE_IO
|
|
# if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
|
|
typedef pthread_mutex_t __libc_lock_t;
|
|
typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
|
|
# else
|
|
typedef int __libc_lock_t;
|
|
typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
|
|
# endif
|
|
typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
|
|
# ifdef __USE_UNIX98
|
|
typedef pthread_rwlock_t __libc_rwlock_t;
|
|
# else
|
|
typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
|
|
# endif
|
|
#else
|
|
typedef struct __libc_lock_opaque__ __libc_lock_t;
|
|
typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
|
|
typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
|
|
#endif
|
|
|
|
/* Type for key to thread-specific data. */
|
|
typedef pthread_key_t __libc_key_t;
|
|
|
|
/* Define a lock variable NAME with storage class CLASS. The lock must be
|
|
initialized with __libc_lock_init before it can be used (or define it
|
|
with __libc_lock_define_initialized, below). Use `extern' for CLASS to
|
|
declare a lock defined in another module. In public structure
|
|
definitions you must use a pointer to the lock structure (i.e., NAME
|
|
begins with a `*'), because its storage size will not be known outside
|
|
of libc. */
|
|
#define __libc_lock_define(CLASS,NAME) \
|
|
CLASS __libc_lock_t NAME;
|
|
#define __libc_rwlock_define(CLASS,NAME) \
|
|
CLASS __libc_rwlock_t NAME;
|
|
#define __libc_lock_define_recursive(CLASS,NAME) \
|
|
CLASS __libc_lock_recursive_t NAME;
|
|
#define __rtld_lock_define_recursive(CLASS,NAME) \
|
|
CLASS __rtld_lock_recursive_t NAME;
|
|
|
|
/* Define an initialized lock variable NAME with storage class CLASS.
|
|
|
|
For the C library we take a deeper look at the initializer. For
|
|
this implementation all fields are initialized to zero. Therefore
|
|
we don't initialize the variable which allows putting it into the
|
|
BSS section. (Except on PA-RISC and other odd architectures, where
|
|
initialized locks must be set to one due to the lack of normal
|
|
atomic operations.) */
|
|
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# if LLL_LOCK_INITIALIZER == 0
|
|
# define __libc_lock_define_initialized(CLASS,NAME) \
|
|
CLASS __libc_lock_t NAME;
|
|
# else
|
|
# define __libc_lock_define_initialized(CLASS,NAME) \
|
|
CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
|
|
# endif
|
|
#else
|
|
# if __LT_SPINLOCK_INIT == 0
|
|
# define __libc_lock_define_initialized(CLASS,NAME) \
|
|
CLASS __libc_lock_t NAME;
|
|
# else
|
|
# define __libc_lock_define_initialized(CLASS,NAME) \
|
|
CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
|
|
# endif
|
|
#endif
|
|
|
|
#define __libc_rwlock_define_initialized(CLASS,NAME) \
|
|
CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
|
|
|
|
/* Define an initialized recursive lock variable NAME with storage
|
|
class CLASS. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# if LLL_LOCK_INITIALIZER == 0
|
|
# define __libc_lock_define_initialized_recursive(CLASS,NAME) \
|
|
CLASS __libc_lock_recursive_t NAME;
|
|
# else
|
|
# define __libc_lock_define_initialized_recursive(CLASS,NAME) \
|
|
CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
|
|
# endif
|
|
# define _LIBC_LOCK_RECURSIVE_INITIALIZER \
|
|
{ LLL_LOCK_INITIALIZER, 0, NULL }
|
|
#else
|
|
# define __libc_lock_define_initialized_recursive(CLASS,NAME) \
|
|
CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
|
|
# define _LIBC_LOCK_RECURSIVE_INITIALIZER \
|
|
{PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
|
|
#endif
|
|
|
|
#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
|
|
CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
|
|
#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
|
|
{PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
|
|
|
|
#define __rtld_lock_initialize(NAME) \
|
|
(void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
|
|
|
|
/* If we check for a weakly referenced symbol and then perform a
|
|
normal jump to it te code generated for some platforms in case of
|
|
PIC is unnecessarily slow. What would happen is that the function
|
|
is first referenced as data and then it is called indirectly
|
|
through the PLT. We can make this a direct jump. */
|
|
#ifdef __PIC__
|
|
# define __libc_maybe_call(FUNC, ARGS, ELSE) \
|
|
(__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
|
|
_fn != NULL ? (*_fn) ARGS : ELSE; }))
|
|
#else
|
|
# define __libc_maybe_call(FUNC, ARGS, ELSE) \
|
|
(FUNC != NULL ? FUNC ARGS : ELSE)
|
|
#endif
|
|
|
|
/* Call thread functions through the function pointer table. */
|
|
#if defined SHARED && !defined NOT_IN_libc
|
|
# define PTFAVAIL(NAME) __libc_pthread_functions_init
|
|
# define __libc_ptf_call(FUNC, ARGS, ELSE) \
|
|
(__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
|
|
# define __libc_ptf_call_always(FUNC, ARGS) \
|
|
PTHFCT_CALL (ptr_##FUNC, ARGS)
|
|
#else
|
|
# define PTFAVAIL(NAME) (NAME != NULL)
|
|
# define __libc_ptf_call(FUNC, ARGS, ELSE) \
|
|
__libc_maybe_call (FUNC, ARGS, ELSE)
|
|
# define __libc_ptf_call_always(FUNC, ARGS) \
|
|
FUNC ARGS
|
|
#endif
|
|
|
|
|
|
/* Initialize the named lock variable, leaving it in a consistent, unlocked
|
|
state. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
|
|
#else
|
|
# define __libc_lock_init(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
|
|
#endif
|
|
#define __libc_rwlock_init(NAME) \
|
|
__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
|
|
|
|
/* Same as last but this time we initialize a recursive mutex. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_init_recursive(NAME) \
|
|
((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
|
|
#else
|
|
# define __libc_lock_init_recursive(NAME) \
|
|
do { \
|
|
if (__pthread_mutex_init != NULL) \
|
|
{ \
|
|
pthread_mutexattr_t __attr; \
|
|
__pthread_mutexattr_init (&__attr); \
|
|
__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
|
|
__pthread_mutex_init (&(NAME).mutex, &__attr); \
|
|
__pthread_mutexattr_destroy (&__attr); \
|
|
} \
|
|
} while (0)
|
|
#endif
|
|
|
|
#define __rtld_lock_init_recursive(NAME) \
|
|
do { \
|
|
if (__pthread_mutex_init != NULL) \
|
|
{ \
|
|
pthread_mutexattr_t __attr; \
|
|
__pthread_mutexattr_init (&__attr); \
|
|
__pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
|
|
__pthread_mutex_init (&(NAME).mutex, &__attr); \
|
|
__pthread_mutexattr_destroy (&__attr); \
|
|
} \
|
|
} while (0)
|
|
|
|
/* Finalize the named lock variable, which must be locked. It cannot be
|
|
used again until __libc_lock_init is called again on it. This must be
|
|
called on a lock variable before the containing storage is reused. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_fini(NAME) ((void) 0)
|
|
#else
|
|
# define __libc_lock_fini(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
|
|
#endif
|
|
#define __libc_rwlock_fini(NAME) \
|
|
__libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
|
|
|
|
/* Finalize recursive named lock. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_fini_recursive(NAME) ((void) 0)
|
|
#else
|
|
# define __libc_lock_fini_recursive(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
|
|
#endif
|
|
|
|
/* Lock the named lock variable. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_lock(NAME) \
|
|
({ lll_lock (NAME, LLL_PRIVATE); 0; })
|
|
#else
|
|
# define __libc_lock_lock(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
|
|
#endif
|
|
#define __libc_rwlock_rdlock(NAME) \
|
|
__libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
|
|
#define __libc_rwlock_wrlock(NAME) \
|
|
__libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
|
|
|
|
/* Lock the recursive named lock variable. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_lock_recursive(NAME) \
|
|
do { \
|
|
void *self = THREAD_SELF; \
|
|
if ((NAME).owner != self) \
|
|
{ \
|
|
lll_lock ((NAME).lock, LLL_PRIVATE); \
|
|
(NAME).owner = self; \
|
|
} \
|
|
++(NAME).cnt; \
|
|
} while (0)
|
|
#else
|
|
# define __libc_lock_lock_recursive(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
|
|
#endif
|
|
|
|
/* Try to lock the named lock variable. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_trylock(NAME) \
|
|
lll_trylock (NAME)
|
|
#else
|
|
# define __libc_lock_trylock(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
|
|
#endif
|
|
#define __libc_rwlock_tryrdlock(NAME) \
|
|
__libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
|
|
#define __libc_rwlock_trywrlock(NAME) \
|
|
__libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
|
|
|
|
/* Try to lock the recursive named lock variable. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_trylock_recursive(NAME) \
|
|
({ \
|
|
int result = 0; \
|
|
void *self = THREAD_SELF; \
|
|
if ((NAME).owner != self) \
|
|
{ \
|
|
if (lll_trylock ((NAME).lock) == 0) \
|
|
{ \
|
|
(NAME).owner = self; \
|
|
(NAME).cnt = 1; \
|
|
} \
|
|
else \
|
|
result = EBUSY; \
|
|
} \
|
|
else \
|
|
++(NAME).cnt; \
|
|
result; \
|
|
})
|
|
#else
|
|
# define __libc_lock_trylock_recursive(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
|
|
#endif
|
|
|
|
#define __rtld_lock_trylock_recursive(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
|
|
|
|
/* Unlock the named lock variable. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
# define __libc_lock_unlock(NAME) \
|
|
lll_unlock (NAME, LLL_PRIVATE)
|
|
#else
|
|
# define __libc_lock_unlock(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
|
|
#endif
|
|
#define __libc_rwlock_unlock(NAME) \
|
|
__libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
|
|
|
|
/* Unlock the recursive named lock variable. */
|
|
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
|
|
/* We do no error checking here. */
|
|
# define __libc_lock_unlock_recursive(NAME) \
|
|
do { \
|
|
if (--(NAME).cnt == 0) \
|
|
{ \
|
|
(NAME).owner = NULL; \
|
|
lll_unlock ((NAME).lock, LLL_PRIVATE); \
|
|
} \
|
|
} while (0)
|
|
#else
|
|
# define __libc_lock_unlock_recursive(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
|
|
#endif
|
|
|
|
#if defined _LIBC && defined SHARED
|
|
# define __rtld_lock_default_lock_recursive(lock) \
|
|
++((pthread_mutex_t *)(lock))->__data.__count;
|
|
|
|
# define __rtld_lock_default_unlock_recursive(lock) \
|
|
--((pthread_mutex_t *)(lock))->__data.__count;
|
|
|
|
# define __rtld_lock_lock_recursive(NAME) \
|
|
GL(dl_rtld_lock_recursive) (&(NAME).mutex)
|
|
|
|
# define __rtld_lock_unlock_recursive(NAME) \
|
|
GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
|
|
#else
|
|
# define __rtld_lock_lock_recursive(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
|
|
|
|
# define __rtld_lock_unlock_recursive(NAME) \
|
|
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
|
|
#endif
|
|
|
|
/* Define once control variable. */
|
|
#if PTHREAD_ONCE_INIT == 0
|
|
/* Special case for static variables where we can avoid the initialization
|
|
if it is zero. */
|
|
# define __libc_once_define(CLASS, NAME) \
|
|
CLASS pthread_once_t NAME
|
|
#else
|
|
# define __libc_once_define(CLASS, NAME) \
|
|
CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
|
|
#endif
|
|
|
|
/* Call handler iff the first call. */
|
|
#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
|
|
do { \
|
|
if (PTFAVAIL (__pthread_once)) \
|
|
__libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
|
|
INIT_FUNCTION)); \
|
|
else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
|
|
INIT_FUNCTION (); \
|
|
(ONCE_CONTROL) |= 2; \
|
|
} \
|
|
} while (0)
|
|
|
|
|
|
/* Note that for I/O cleanup handling we are using the old-style
|
|
cancel handling. It does not have to be integrated with C++ snce
|
|
no C++ code is called in the middle. The old-style handling is
|
|
faster and the support is not going away. */
|
|
extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
|
|
void (*routine) (void *), void *arg);
|
|
extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
|
|
int execute);
|
|
extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
|
|
void (*routine) (void *), void *arg);
|
|
extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
|
|
int execute);
|
|
|
|
/* Start critical region with cleanup. */
|
|
#define __libc_cleanup_region_start(DOIT, FCT, ARG) \
|
|
{ struct _pthread_cleanup_buffer _buffer; \
|
|
int _avail; \
|
|
if (DOIT) { \
|
|
_avail = PTFAVAIL (_pthread_cleanup_push_defer); \
|
|
if (_avail) { \
|
|
__libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT, \
|
|
ARG)); \
|
|
} else { \
|
|
_buffer.__routine = (FCT); \
|
|
_buffer.__arg = (ARG); \
|
|
} \
|
|
} else { \
|
|
_avail = 0; \
|
|
}
|
|
|
|
/* End critical region with cleanup. */
|
|
#define __libc_cleanup_region_end(DOIT) \
|
|
if (_avail) { \
|
|
__libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
|
|
} else if (DOIT) \
|
|
_buffer.__routine (_buffer.__arg); \
|
|
}
|
|
|
|
/* Sometimes we have to exit the block in the middle. */
|
|
#define __libc_cleanup_end(DOIT) \
|
|
if (_avail) { \
|
|
__libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
|
|
} else if (DOIT) \
|
|
_buffer.__routine (_buffer.__arg)
|
|
|
|
|
|
/* Normal cleanup handling, based on C cleanup attribute. */
|
|
__extern_inline void
|
|
__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
|
|
{
|
|
if (f->__do_it)
|
|
f->__cancel_routine (f->__cancel_arg);
|
|
}
|
|
|
|
#define __libc_cleanup_push(fct, arg) \
|
|
do { \
|
|
struct __pthread_cleanup_frame __clframe \
|
|
__attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
|
|
= { .__cancel_routine = (fct), .__cancel_arg = (arg), \
|
|
.__do_it = 1 };
|
|
|
|
#define __libc_cleanup_pop(execute) \
|
|
__clframe.__do_it = (execute); \
|
|
} while (0)
|
|
|
|
|
|
/* Create thread-specific key. */
|
|
#define __libc_key_create(KEY, DESTRUCTOR) \
|
|
__libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
|
|
|
|
/* Get thread-specific data. */
|
|
#define __libc_getspecific(KEY) \
|
|
__libc_ptf_call (__pthread_getspecific, (KEY), NULL)
|
|
|
|
/* Set thread-specific data. */
|
|
#define __libc_setspecific(KEY, VALUE) \
|
|
__libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
|
|
|
|
|
|
/* Register handlers to execute before and after `fork'. Note that the
|
|
last parameter is NULL. The handlers registered by the libc are
|
|
never removed so this is OK. */
|
|
#define __libc_atfork(PREPARE, PARENT, CHILD) \
|
|
__register_atfork (PREPARE, PARENT, CHILD, NULL)
|
|
extern int __register_atfork (void (*__prepare) (void),
|
|
void (*__parent) (void),
|
|
void (*__child) (void),
|
|
void *__dso_handle);
|
|
|
|
/* Functions that are used by this file and are internal to the GNU C
|
|
library. */
|
|
|
|
extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
|
|
__const pthread_mutexattr_t *__mutex_attr);
|
|
|
|
extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
|
|
|
|
extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
|
|
|
|
extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
|
|
|
|
extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
|
|
|
|
extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
|
|
|
|
extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
|
|
|
|
extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
|
|
int __kind);
|
|
|
|
#ifdef __USE_UNIX98
|
|
extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
|
|
__const pthread_rwlockattr_t *__attr);
|
|
|
|
extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
|
|
|
|
extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
|
|
|
|
extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
|
|
|
|
extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
|
|
|
|
extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
|
|
|
|
extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
|
|
#endif
|
|
|
|
extern int __pthread_key_create (pthread_key_t *__key,
|
|
void (*__destr_function) (void *));
|
|
|
|
extern int __pthread_setspecific (pthread_key_t __key,
|
|
__const void *__pointer);
|
|
|
|
extern void *__pthread_getspecific (pthread_key_t __key);
|
|
|
|
extern int __pthread_once (pthread_once_t *__once_control,
|
|
void (*__init_routine) (void));
|
|
|
|
extern int __pthread_atfork (void (*__prepare) (void),
|
|
void (*__parent) (void),
|
|
void (*__child) (void));
|
|
|
|
|
|
|
|
/* Make the pthread functions weak so that we can elide them from
|
|
single-threaded processes. */
|
|
#ifndef __NO_WEAK_PTHREAD_ALIASES
|
|
# ifdef weak_extern
|
|
# if _LIBC
|
|
# include <bp-sym.h>
|
|
# else
|
|
# define BP_SYM (sym) sym
|
|
# endif
|
|
weak_extern (BP_SYM (__pthread_mutex_init))
|
|
weak_extern (BP_SYM (__pthread_mutex_destroy))
|
|
weak_extern (BP_SYM (__pthread_mutex_lock))
|
|
weak_extern (BP_SYM (__pthread_mutex_trylock))
|
|
weak_extern (BP_SYM (__pthread_mutex_unlock))
|
|
weak_extern (BP_SYM (__pthread_mutexattr_init))
|
|
weak_extern (BP_SYM (__pthread_mutexattr_destroy))
|
|
weak_extern (BP_SYM (__pthread_mutexattr_settype))
|
|
weak_extern (BP_SYM (__pthread_rwlock_init))
|
|
weak_extern (BP_SYM (__pthread_rwlock_destroy))
|
|
weak_extern (BP_SYM (__pthread_rwlock_rdlock))
|
|
weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
|
|
weak_extern (BP_SYM (__pthread_rwlock_wrlock))
|
|
weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
|
|
weak_extern (BP_SYM (__pthread_rwlock_unlock))
|
|
weak_extern (BP_SYM (__pthread_key_create))
|
|
weak_extern (BP_SYM (__pthread_setspecific))
|
|
weak_extern (BP_SYM (__pthread_getspecific))
|
|
weak_extern (BP_SYM (__pthread_once))
|
|
weak_extern (__pthread_initialize)
|
|
weak_extern (__pthread_atfork)
|
|
weak_extern (BP_SYM (_pthread_cleanup_push_defer))
|
|
weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
|
|
weak_extern (BP_SYM (pthread_setcancelstate))
|
|
# else
|
|
# pragma weak __pthread_mutex_init
|
|
# pragma weak __pthread_mutex_destroy
|
|
# pragma weak __pthread_mutex_lock
|
|
# pragma weak __pthread_mutex_trylock
|
|
# pragma weak __pthread_mutex_unlock
|
|
# pragma weak __pthread_mutexattr_init
|
|
# pragma weak __pthread_mutexattr_destroy
|
|
# pragma weak __pthread_mutexattr_settype
|
|
# pragma weak __pthread_rwlock_destroy
|
|
# pragma weak __pthread_rwlock_rdlock
|
|
# pragma weak __pthread_rwlock_tryrdlock
|
|
# pragma weak __pthread_rwlock_wrlock
|
|
# pragma weak __pthread_rwlock_trywrlock
|
|
# pragma weak __pthread_rwlock_unlock
|
|
# pragma weak __pthread_key_create
|
|
# pragma weak __pthread_setspecific
|
|
# pragma weak __pthread_getspecific
|
|
# pragma weak __pthread_once
|
|
# pragma weak __pthread_initialize
|
|
# pragma weak __pthread_atfork
|
|
# pragma weak _pthread_cleanup_push_defer
|
|
# pragma weak _pthread_cleanup_pop_restore
|
|
# pragma weak pthread_setcancelstate
|
|
# endif
|
|
#endif
|
|
|
|
#endif /* bits/libc-lock.h */
|