1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-08-01 10:06:57 +03:00
2002-08-29  Jakub Jelinek  <jakub@redhat.com>

	* stdio-common/vfprintf.c (vfprintf): Add builtin_expect for
	string_malloced, it is unlikely to be set.
	Only call free with non-NULL workspace.
	* sysdeps/sparc/sparc32/sparcv9/Makefile (sysdep-CFLAGS): Use
	-mcpu=ultrasparc, not only tune for it.
	(ASFLAGS*): Set unconditionally.

2002-08-29  Jakub Jelinek  <jakub@redhat.com>

	* sysdeps/generic/readelflib.c (process_elf_file): Make loadaddr
	ElfW(Addr).  Don't mask upper 32-bits and lower 12 bits off from
	p_vaddr/p_offset when computing loadaddr.
This commit is contained in:
Ulrich Drepper
2002-08-29 10:42:30 +00:00
parent 69f8b5e823
commit c98d82db4c
6 changed files with 51 additions and 26 deletions

View File

@ -1,3 +1,13 @@
2002-04-24 Steven Munroe <sjmunroe@us.ibm.com>
* spinlock.c (__pthread_lock): Fix spurious wakeup
handling. Don't clear lowest bit of list pointer as sign the thread
is still on the wait list. Don't restart after spurious wakeup
with spinning to get the lock.
(__pthread_unlock): Take set lowest bit into account when handling
pointer to list elements.
Patch by Steve Munroe <sjmunroe@us.ibm.com>.
2002-08-28 Roland McGrath <roland@redhat.com>
* sysdeps/pthread/timer_routines.c (thread_func): Fix type in cast.

View File

@ -85,8 +85,6 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
spurious_wakeup_count = 0;
spin_count = 0;
again:
/* On SMP, try spinning to get the lock. */
if (__pthread_smp_kernel) {
@ -114,6 +112,8 @@ again:
lock->__spinlock += (spin_count - lock->__spinlock) / 8;
}
again:
/* No luck, try once more or suspend. */
do {
@ -130,7 +130,7 @@ again:
}
if (self != NULL) {
THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus & ~1L));
THREAD_SETMEM(self, p_nextlock, (pthread_descr) (oldstatus));
/* Make sure the store in p_nextlock completes before performing
the compare-and-swap */
MEMORY_BARRIER();
@ -214,7 +214,7 @@ again:
maxprio = thr->p_priority;
}
ptr = &(thr->p_nextlock);
thr = *ptr;
thr = (pthread_descr)((long)(thr->p_nextlock) & ~1L);
}
/* Remove max prio thread from waiting list. */
@ -226,13 +226,13 @@ again:
least significant bit is clear. */
thr = (pthread_descr) (oldstatus & ~1L);
if (! __compare_and_swap_with_release_semantics
(&lock->__status, oldstatus, (long)(thr->p_nextlock)))
(&lock->__status, oldstatus, (long)(thr->p_nextlock) & ~1L))
goto again;
} else {
/* No risk of concurrent access, remove max prio thread normally.
But in this case we must also flip the least significant bit
of the status to mark the lock as released. */
thr = *maxptr;
thr = (pthread_descr)((long)*maxptr & ~1L);
*maxptr = thr->p_nextlock;
/* Ensure deletion from linked list completes before we