1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-29 11:41:21 +03:00
1999-12-09  Andreas Jaeger  <aj@suse.de>

	* nis/nss_compat/compat-pwd.c (internal_getpwuid_r): Always set
	errno to ENOENT when returning NSS_STATUS_NOTFOUND.
	Reported by Christian Starkjohann <cs@obdev.at>.

1999-12-09  Andreas Jaeger  <aj@suse.de>

	* sysdeps/i386/fpu/libm-test-ulps: Added some ulps.

1999-12-09  Jakub Jelinek  <jakub@redhat.com>

	* stdlib/longlong.h: Update from latest egcs version.

	* sysdeps/sparc/fpu/fegetenv.c: Add semicolons.

	* sysdeps/unix/sysv/linux/bits/errno.h (__errno_location): __THROW
	has to preceede __attribute__, otherwise g++ barfs.

	* sysdeps/unix/sysv/linux/sparc/sys/ptrace.h: Make things compile
	on sparc64-*-linux.

	* sysdeps/unix/sysv/linux/sparc/sparc64/register-dump.h: Changed to
	use sigcontext.
	* sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h: Likewise.

1999-12-05  Wolfram Gloger  <wg@malloc.de>

	* malloc/malloc.c (arena_get2): If generating a new arena fails,
	try to generate a minimal one and hope for mmap_chunk() to succeed
	later.

1999-11-07  Wolfram Gloger  <wg@malloc.de>

	* malloc/thread-m.h [NO_THREADS]: The mutex_* macros now let
	mutex_t work as an `in-use' flag even without threads.
	* malloc/malloc.c (USE_ARENAS): New feature flag, controls support
	for multiple arenas separately from NO_THREADS.
	(mALLOc, chunk_realloc, mEMALIGn, cALLOc) [USE_ARENAS]: try to
	fall back to an mmap()ed arena when sbrk() has failed.
This commit is contained in:
Ulrich Drepper
1999-12-10 04:37:40 +00:00
parent 97e55a252e
commit e9b3e3c5ce
12 changed files with 599 additions and 265 deletions

View File

@ -1,3 +1,44 @@
1999-12-09 Andreas Jaeger <aj@suse.de>
* nis/nss_compat/compat-pwd.c (internal_getpwuid_r): Always set
errno to ENOENT when returning NSS_STATUS_NOTFOUND.
Reported by Christian Starkjohann <cs@obdev.at>.
1999-12-09 Andreas Jaeger <aj@suse.de>
* sysdeps/i386/fpu/libm-test-ulps: Added some ulps.
1999-12-09 Jakub Jelinek <jakub@redhat.com>
* stdlib/longlong.h: Update from latest egcs version.
* sysdeps/sparc/fpu/fegetenv.c: Add semicolons.
* sysdeps/unix/sysv/linux/bits/errno.h (__errno_location): __THROW
has to preceede __attribute__, otherwise g++ barfs.
* sysdeps/unix/sysv/linux/sparc/sys/ptrace.h: Make things compile
on sparc64-*-linux.
* sysdeps/unix/sysv/linux/sparc/sparc64/register-dump.h: Changed to
use sigcontext.
* sysdeps/unix/sysv/linux/sparc/sparc64/sigcontextinfo.h: Likewise.
1999-12-05 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c (arena_get2): If generating a new arena fails,
try to generate a minimal one and hope for mmap_chunk() to succeed
later.
1999-11-07 Wolfram Gloger <wg@malloc.de>
* malloc/thread-m.h [NO_THREADS]: The mutex_* macros now let
mutex_t work as an `in-use' flag even without threads.
* malloc/malloc.c (USE_ARENAS): New feature flag, controls support
for multiple arenas separately from NO_THREADS.
(mALLOc, chunk_realloc, mEMALIGn, cALLOc) [USE_ARENAS]: try to
fall back to an mmap()ed arena when sbrk() has failed.
1999-12-09 Ulrich Drepper <drepper@cygnus.com> 1999-12-09 Ulrich Drepper <drepper@cygnus.com>
* sysdeps/unix/sysv/linux/Versions: Add getrlimit, setrlimit, * sysdeps/unix/sysv/linux/Versions: Add getrlimit, setrlimit,

View File

@ -19,7 +19,7 @@
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */ Boston, MA 02111-1307, USA. */
/* V2.6.4-pt3 Thu Feb 20 1997 /* $Id$
This work is mainly derived from malloc-2.6.4 by Doug Lea This work is mainly derived from malloc-2.6.4 by Doug Lea
<dl@cs.oswego.edu>, which is available from: <dl@cs.oswego.edu>, which is available from:
@ -202,6 +202,8 @@
HAVE_MREMAP (default: defined as 0 unless Linux libc set) HAVE_MREMAP (default: defined as 0 unless Linux libc set)
Define to non-zero to optionally make realloc() use mremap() to Define to non-zero to optionally make realloc() use mremap() to
reallocate very large blocks. reallocate very large blocks.
USE_ARENAS (default: the same as HAVE_MMAP)
Enable support for multiple arenas, allocated using mmap().
malloc_getpagesize (default: derived from system #includes) malloc_getpagesize (default: derived from system #includes)
Either a constant or routine call returning the system page size. Either a constant or routine call returning the system page size.
HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined) HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
@ -307,6 +309,9 @@
# endif # endif
#else #else
# include <sys/types.h> # include <sys/types.h>
# if defined _LIBC || defined MALLOC_HOOKS
extern char* getenv();
# endif
#endif #endif
#ifndef _LIBC #ifndef _LIBC
@ -508,9 +513,10 @@ do { \
#endif #endif
/* /*
Define HAVE_MMAP to optionally make malloc() use mmap() to Define HAVE_MMAP to optionally make malloc() use mmap() to allocate
allocate very large blocks. These will be returned to the very large blocks. These will be returned to the operating system
operating system immediately after a free(). immediately after a free(). HAVE_MMAP is also a prerequisite to
support multiple `arenas' (see USE_ARENAS below).
*/ */
#ifndef HAVE_MMAP #ifndef HAVE_MMAP
@ -529,6 +535,15 @@ do { \
#define HAVE_MREMAP defined(__linux__) && !defined(__arm__) #define HAVE_MREMAP defined(__linux__) && !defined(__arm__)
#endif #endif
/* Define USE_ARENAS to enable support for multiple `arenas'. These
are allocated using mmap(), are necessary for threads and
occasionally useful to overcome address space limitations affecting
sbrk(). */
#ifndef USE_ARENAS
#define USE_ARENAS HAVE_MMAP
#endif
#if HAVE_MMAP #if HAVE_MMAP
#include <unistd.h> #include <unistd.h>
@ -987,12 +1002,15 @@ int mALLOC_SET_STATe();
#ifdef __cplusplus #ifdef __cplusplus
}; /* end of extern "C" */ } /* end of extern "C" */
#endif #endif
#if !defined(NO_THREADS) && !HAVE_MMAP #if !defined(NO_THREADS) && !HAVE_MMAP
"Can't have threads support without mmap" "Can't have threads support without mmap"
#endif #endif
#if USE_ARENAS && !HAVE_MMAP
"Can't have multiple arenas without mmap"
#endif
/* /*
@ -1193,8 +1211,8 @@ typedef struct _arena {
/* A heap is a single contiguous memory region holding (coalesceable) /* A heap is a single contiguous memory region holding (coalesceable)
malloc_chunks. It is allocated with mmap() and always starts at an malloc_chunks. It is allocated with mmap() and always starts at an
address aligned to HEAP_MAX_SIZE. Not used unless compiling for address aligned to HEAP_MAX_SIZE. Not used unless compiling with
multiple threads. */ USE_ARENAS. */
typedef struct _heap_info { typedef struct _heap_info {
arena *ar_ptr; /* Arena for this heap. */ arena *ar_ptr; /* Arena for this heap. */
@ -1219,7 +1237,7 @@ static mchunkptr chunk_realloc(arena *ar_ptr, mchunkptr oldp,
static mchunkptr chunk_align(arena *ar_ptr, INTERNAL_SIZE_T nb, static mchunkptr chunk_align(arena *ar_ptr, INTERNAL_SIZE_T nb,
size_t alignment) internal_function; size_t alignment) internal_function;
static int main_trim(size_t pad) internal_function; static int main_trim(size_t pad) internal_function;
#ifndef NO_THREADS #if USE_ARENAS
static int heap_trim(heap_info *heap, size_t pad) internal_function; static int heap_trim(heap_info *heap, size_t pad) internal_function;
#endif #endif
#if defined _LIBC || defined MALLOC_HOOKS #if defined _LIBC || defined MALLOC_HOOKS
@ -1244,7 +1262,7 @@ static mchunkptr chunk_alloc();
static mchunkptr chunk_realloc(); static mchunkptr chunk_realloc();
static mchunkptr chunk_align(); static mchunkptr chunk_align();
static int main_trim(); static int main_trim();
#ifndef NO_THREADS #if USE_ARENAS
static int heap_trim(); static int heap_trim();
#endif #endif
#if defined _LIBC || defined MALLOC_HOOKS #if defined _LIBC || defined MALLOC_HOOKS
@ -1511,10 +1529,8 @@ static arena main_arena = {
/* Thread specific data */ /* Thread specific data */
#ifndef NO_THREADS
static tsd_key_t arena_key; static tsd_key_t arena_key;
static mutex_t list_lock = MUTEX_INITIALIZER; static mutex_t list_lock = MUTEX_INITIALIZER;
#endif
#if THREAD_STATS #if THREAD_STATS
static int stat_n_heaps = 0; static int stat_n_heaps = 0;
@ -1552,6 +1568,9 @@ static unsigned int max_n_mmaps = 0;
static unsigned long mmapped_mem = 0; static unsigned long mmapped_mem = 0;
static unsigned long max_mmapped_mem = 0; static unsigned long max_mmapped_mem = 0;
/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
static unsigned long arena_mem = 0;
#ifndef _LIBC #ifndef _LIBC
@ -1656,7 +1675,11 @@ ptmalloc_init __MALLOC_P((void))
#endif #endif
{ {
#if defined _LIBC || defined MALLOC_HOOKS #if defined _LIBC || defined MALLOC_HOOKS
# if __STD_C
const char* s; const char* s;
# else
char* s;
# endif
#endif #endif
if(__malloc_initialized >= 0) return; if(__malloc_initialized >= 0) return;
@ -1679,12 +1702,12 @@ ptmalloc_init __MALLOC_P((void))
if (__pthread_initialize != NULL) if (__pthread_initialize != NULL)
__pthread_initialize(); __pthread_initialize();
#endif #endif
#endif /* !defined NO_THREADS */
mutex_init(&main_arena.mutex); mutex_init(&main_arena.mutex);
mutex_init(&list_lock); mutex_init(&list_lock);
tsd_key_create(&arena_key, NULL); tsd_key_create(&arena_key, NULL);
tsd_setspecific(arena_key, (Void_t *)&main_arena); tsd_setspecific(arena_key, (Void_t *)&main_arena);
thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_init_all); thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_init_all);
#endif /* !defined NO_THREADS */
#if defined _LIBC || defined MALLOC_HOOKS #if defined _LIBC || defined MALLOC_HOOKS
if((s = __secure_getenv("MALLOC_TRIM_THRESHOLD_"))) if((s = __secure_getenv("MALLOC_TRIM_THRESHOLD_")))
mALLOPt(M_TRIM_THRESHOLD, atoi(s)); mALLOPt(M_TRIM_THRESHOLD, atoi(s));
@ -1836,7 +1859,7 @@ static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
#if defined __GNUC__ && __GNUC__ >= 2 #if defined __GNUC__ && __GNUC__ >= 2
/* This function is only called from one place, inline it. */ /* This function is only called from one place, inline it. */
inline __inline__
#endif #endif
static mchunkptr static mchunkptr
internal_function internal_function
@ -1876,8 +1899,8 @@ mmap_chunk(size) size_t size;
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
max_mmapped_mem = mmapped_mem; max_mmapped_mem = mmapped_mem;
#ifdef NO_THREADS #ifdef NO_THREADS
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) if ((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
max_total_mem = mmapped_mem + sbrked_mem; max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
#endif #endif
return p; return p;
} }
@ -1947,8 +1970,8 @@ mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem) if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
max_mmapped_mem = mmapped_mem; max_mmapped_mem = mmapped_mem;
#ifdef NO_THREADS #ifdef NO_THREADS
if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem) if ((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
max_total_mem = mmapped_mem + sbrked_mem; max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
#endif #endif
return p; return p;
} }
@ -1961,7 +1984,7 @@ mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
/* Managing heaps and arenas (for concurrent threads) */ /* Managing heaps and arenas (for concurrent threads) */
#ifndef NO_THREADS #if USE_ARENAS
/* Create a new heap. size is automatically rounded up to a multiple /* Create a new heap. size is automatically rounded up to a multiple
of the page size. */ of the page size. */
@ -2037,7 +2060,7 @@ grow_heap(h, diff) heap_info *h; long diff;
/* Try to re-map the extra heap space freshly to save memory, and /* Try to re-map the extra heap space freshly to save memory, and
make it inaccessible. */ make it inaccessible. */
if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE, if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE,
MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED) MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
return -2; return -2;
} }
h->size = new_size; h->size = new_size;
@ -2052,7 +2075,9 @@ grow_heap(h, diff) heap_info *h; long diff;
First, try the one last locked successfully by this thread. (This First, try the one last locked successfully by this thread. (This
is the common case and handled with a macro for speed.) Then, loop is the common case and handled with a macro for speed.) Then, loop
once over the circularly linked list of arenas. If no arena is once over the circularly linked list of arenas. If no arena is
readily available, create a new one. */ readily available, create a new one. In this latter case, `size'
is just a hint as to how much memory will be required immediately
in the new arena. */
#define arena_get(ptr, size) do { \ #define arena_get(ptr, size) do { \
Void_t *vptr = NULL; \ Void_t *vptr = NULL; \
@ -2112,13 +2137,24 @@ arena_get2(a_tsd, size) arena *a_tsd; size_t size;
/* Nothing immediately available, so generate a new arena. */ /* Nothing immediately available, so generate a new arena. */
h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT)); h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT));
if(!h) if(!h) {
return 0; /* Maybe size is too large to fit in a single heap. So, just try
to create a minimally-sized arena and let chunk_alloc() attempt
to deal with the large request via mmap_chunk(). */
h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT);
if(!h)
return 0;
}
a = h->ar_ptr = (arena *)(h+1); a = h->ar_ptr = (arena *)(h+1);
for(i=0; i<NAV; i++) for(i=0; i<NAV; i++)
init_bin(a, i); init_bin(a, i);
a->next = NULL; a->next = NULL;
a->size = h->size; a->size = h->size;
arena_mem += h->size;
#ifdef NO_THREADS
if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
#endif
tsd_setspecific(arena_key, (Void_t *)a); tsd_setspecific(arena_key, (Void_t *)a);
mutex_init(&a->mutex); mutex_init(&a->mutex);
i = mutex_lock(&a->mutex); /* remember result */ i = mutex_lock(&a->mutex); /* remember result */
@ -2152,14 +2188,14 @@ arena_get2(a_tsd, size) arena *a_tsd; size_t size;
(((mchunkptr)(ptr) < top(&main_arena) && (char *)(ptr) >= sbrk_base) ? \ (((mchunkptr)(ptr) < top(&main_arena) && (char *)(ptr) >= sbrk_base) ? \
&main_arena : heap_for_ptr(ptr)->ar_ptr) &main_arena : heap_for_ptr(ptr)->ar_ptr)
#else /* defined(NO_THREADS) */ #else /* !USE_ARENAS */
/* Without concurrent threads, there is only one arena. */ /* There is only one arena, main_arena. */
#define arena_get(ptr, sz) (ptr = &main_arena) #define arena_get(ptr, sz) (ptr = &main_arena)
#define arena_for_ptr(ptr) (&main_arena) #define arena_for_ptr(ptr) (&main_arena)
#endif /* !defined(NO_THREADS) */ #endif /* USE_ARENAS */
@ -2189,7 +2225,7 @@ static void do_check_chunk(ar_ptr, p) arena *ar_ptr; mchunkptr p;
/* No checkable chunk is mmapped */ /* No checkable chunk is mmapped */
assert(!chunk_is_mmapped(p)); assert(!chunk_is_mmapped(p));
#ifndef NO_THREADS #if USE_ARENAS
if(ar_ptr != &main_arena) { if(ar_ptr != &main_arena) {
heap_info *heap = heap_for_ptr(p); heap_info *heap = heap_for_ptr(p);
assert(heap->ar_ptr == ar_ptr); assert(heap->ar_ptr == ar_ptr);
@ -2397,7 +2433,7 @@ arena *ar_ptr; mchunkptr p; INTERNAL_SIZE_T s;
#if defined __GNUC__ && __GNUC__ >= 2 #if defined __GNUC__ && __GNUC__ >= 2
/* This function is called only from one place, inline it. */ /* This function is called only from one place, inline it. */
inline __inline__
#endif #endif
static void static void
internal_function internal_function
@ -2412,7 +2448,7 @@ malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb;
INTERNAL_SIZE_T old_top_size = chunksize(old_top); INTERNAL_SIZE_T old_top_size = chunksize(old_top);
INTERNAL_SIZE_T top_size; /* new size of top chunk */ INTERNAL_SIZE_T top_size; /* new size of top chunk */
#ifndef NO_THREADS #if USE_ARENAS
if(ar_ptr == &main_arena) { if(ar_ptr == &main_arena) {
#endif #endif
@ -2492,12 +2528,11 @@ malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb;
if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem) if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
max_sbrked_mem = sbrked_mem; max_sbrked_mem = sbrked_mem;
#ifdef NO_THREADS #ifdef NO_THREADS
if ((unsigned long)(mmapped_mem + sbrked_mem) > if ((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
(unsigned long)max_total_mem) max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
max_total_mem = mmapped_mem + sbrked_mem;
#endif #endif
#ifndef NO_THREADS #if USE_ARENAS
} else { /* ar_ptr != &main_arena */ } else { /* ar_ptr != &main_arena */
heap_info *old_heap, *heap; heap_info *old_heap, *heap;
size_t old_heap_size; size_t old_heap_size;
@ -2512,6 +2547,11 @@ malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb;
old_heap_size = old_heap->size; old_heap_size = old_heap->size;
if(grow_heap(old_heap, MINSIZE + nb - old_top_size) == 0) { if(grow_heap(old_heap, MINSIZE + nb - old_top_size) == 0) {
ar_ptr->size += old_heap->size - old_heap_size; ar_ptr->size += old_heap->size - old_heap_size;
arena_mem += old_heap->size - old_heap_size;
#ifdef NO_THREADS
if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
#endif
top_size = ((char *)old_heap + old_heap->size) - (char *)old_top; top_size = ((char *)old_heap + old_heap->size) - (char *)old_top;
set_head(old_top, top_size | PREV_INUSE); set_head(old_top, top_size | PREV_INUSE);
return; return;
@ -2524,13 +2564,18 @@ malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb;
heap->ar_ptr = ar_ptr; heap->ar_ptr = ar_ptr;
heap->prev = old_heap; heap->prev = old_heap;
ar_ptr->size += heap->size; ar_ptr->size += heap->size;
arena_mem += heap->size;
#ifdef NO_THREADS
if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
#endif
/* Set up the new top, so we can safely use chunk_free() below. */ /* Set up the new top, so we can safely use chunk_free() below. */
top(ar_ptr) = chunk_at_offset(heap, sizeof(*heap)); top(ar_ptr) = chunk_at_offset(heap, sizeof(*heap));
top_size = heap->size - sizeof(*heap); top_size = heap->size - sizeof(*heap);
set_head(top(ar_ptr), top_size | PREV_INUSE); set_head(top(ar_ptr), top_size | PREV_INUSE);
} }
#endif /* !defined(NO_THREADS) */ #endif /* USE_ARENAS */
/* We always land on a page boundary */ /* We always land on a page boundary */
assert(((unsigned long)((char*)top(ar_ptr) + top_size) & (pagesz-1)) == 0); assert(((unsigned long)((char*)top(ar_ptr) + top_size) & (pagesz-1)) == 0);
@ -2649,16 +2694,27 @@ Void_t* mALLOc(bytes) size_t bytes;
if(!ar_ptr) if(!ar_ptr)
return 0; return 0;
victim = chunk_alloc(ar_ptr, nb); victim = chunk_alloc(ar_ptr, nb);
(void)mutex_unlock(&ar_ptr->mutex);
if(!victim) { if(!victim) {
/* Maybe the failure is due to running out of mmapped areas. */ /* Maybe the failure is due to running out of mmapped areas. */
if(ar_ptr != &main_arena) { if(ar_ptr != &main_arena) {
(void)mutex_unlock(&ar_ptr->mutex);
(void)mutex_lock(&main_arena.mutex); (void)mutex_lock(&main_arena.mutex);
victim = chunk_alloc(&main_arena, nb); victim = chunk_alloc(&main_arena, nb);
(void)mutex_unlock(&main_arena.mutex); (void)mutex_unlock(&main_arena.mutex);
} else {
#if USE_ARENAS
/* ... or sbrk() has failed and there is still a chance to mmap() */
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, nb);
(void)mutex_unlock(&main_arena.mutex);
if(ar_ptr) {
victim = chunk_alloc(ar_ptr, nb);
(void)mutex_unlock(&ar_ptr->mutex);
}
#endif
} }
if(!victim) return 0; if(!victim) return 0;
} } else
(void)mutex_unlock(&ar_ptr->mutex);
return chunk2mem(victim); return chunk2mem(victim);
} }
@ -2999,7 +3055,7 @@ chunk_free(ar_ptr, p) arena *ar_ptr; mchunkptr p;
if (!(hd & PREV_INUSE)) /* consolidate backward */ if (!(hd & PREV_INUSE)) /* consolidate backward */
{ {
prevsz = p->prev_size; prevsz = p->prev_size;
p = chunk_at_offset(p, -prevsz); p = chunk_at_offset(p, -(long)prevsz);
sz += prevsz; sz += prevsz;
unlink(p, bck, fwd); unlink(p, bck, fwd);
} }
@ -3007,12 +3063,12 @@ chunk_free(ar_ptr, p) arena *ar_ptr; mchunkptr p;
set_head(p, sz | PREV_INUSE); set_head(p, sz | PREV_INUSE);
top(ar_ptr) = p; top(ar_ptr) = p;
#ifndef NO_THREADS #if USE_ARENAS
if(ar_ptr == &main_arena) { if(ar_ptr == &main_arena) {
#endif #endif
if ((unsigned long)(sz) >= (unsigned long)trim_threshold) if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
main_trim(top_pad); main_trim(top_pad);
#ifndef NO_THREADS #if USE_ARENAS
} else { } else {
heap_info *heap = heap_for_ptr(p); heap_info *heap = heap_for_ptr(p);
@ -3032,7 +3088,7 @@ chunk_free(ar_ptr, p) arena *ar_ptr; mchunkptr p;
if (!(hd & PREV_INUSE)) /* consolidate backward */ if (!(hd & PREV_INUSE)) /* consolidate backward */
{ {
prevsz = p->prev_size; prevsz = p->prev_size;
p = chunk_at_offset(p, -prevsz); p = chunk_at_offset(p, -(long)prevsz);
sz += prevsz; sz += prevsz;
if (p->fd == last_remainder(ar_ptr)) /* keep as last_remainder */ if (p->fd == last_remainder(ar_ptr)) /* keep as last_remainder */
@ -3064,7 +3120,7 @@ chunk_free(ar_ptr, p) arena *ar_ptr; mchunkptr p;
if (!islr) if (!islr)
frontlink(ar_ptr, p, sz, idx, bck, fwd); frontlink(ar_ptr, p, sz, idx, bck, fwd);
#ifndef NO_THREADS #if USE_ARENAS
/* Check whether the heap containing top can go away now. */ /* Check whether the heap containing top can go away now. */
if(next->size < MINSIZE && if(next->size < MINSIZE &&
(unsigned long)sz > trim_threshold && (unsigned long)sz > trim_threshold &&
@ -3325,6 +3381,15 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
(void)mutex_lock(&main_arena.mutex); (void)mutex_lock(&main_arena.mutex);
newp = chunk_alloc(&main_arena, nb); newp = chunk_alloc(&main_arena, nb);
(void)mutex_unlock(&main_arena.mutex); (void)mutex_unlock(&main_arena.mutex);
} else {
#if USE_ARENAS
/* ... or sbrk() has failed and there is still a chance to mmap() */
arena* ar_ptr2 = arena_get2(ar_ptr->next ? ar_ptr : 0, nb);
if(ar_ptr2) {
newp = chunk_alloc(ar_ptr2, nb);
(void)mutex_unlock(&ar_ptr2->mutex);
}
#endif
} }
if (newp == 0) /* propagate failure */ if (newp == 0) /* propagate failure */
return 0; return 0;
@ -3435,6 +3500,15 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
(void)mutex_lock(&main_arena.mutex); (void)mutex_lock(&main_arena.mutex);
p = chunk_align(&main_arena, nb, alignment); p = chunk_align(&main_arena, nb, alignment);
(void)mutex_unlock(&main_arena.mutex); (void)mutex_unlock(&main_arena.mutex);
} else {
#if USE_ARENAS
/* ... or sbrk() has failed and there is still a chance to mmap() */
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, nb);
if(ar_ptr) {
p = chunk_align(ar_ptr, nb, alignment);
(void)mutex_unlock(&ar_ptr->mutex);
}
#endif
} }
if(!p) return 0; if(!p) return 0;
} }
@ -3464,7 +3538,7 @@ arena* ar_ptr; INTERNAL_SIZE_T nb; size_t alignment;
if (p == 0) if (p == 0)
return 0; /* propagate failure */ return 0; /* propagate failure */
m = chunk2mem(p); m = (char*)chunk2mem(p);
if ((((unsigned long)(m)) % alignment) == 0) /* aligned */ if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
{ {
@ -3485,7 +3559,8 @@ arena* ar_ptr; INTERNAL_SIZE_T nb; size_t alignment;
this is always possible. this is always possible.
*/ */
brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -alignment); brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
-(long)alignment);
if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk += alignment; if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk += alignment;
newp = (mchunkptr)brk; newp = (mchunkptr)brk;
@ -3611,7 +3686,8 @@ Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
oldtopsize = chunksize(top(ar_ptr)); oldtopsize = chunksize(top(ar_ptr));
#if MORECORE_CLEARS < 2 #if MORECORE_CLEARS < 2
/* Only newly allocated memory is guaranteed to be cleared. */ /* Only newly allocated memory is guaranteed to be cleared. */
if (oldtopsize < sbrk_base + max_sbrked_mem - (char *)oldtop) if (ar_ptr == &main_arena &&
oldtopsize < sbrk_base + max_sbrked_mem - (char *)oldtop)
oldtopsize = (sbrk_base + max_sbrked_mem - (char *)oldtop); oldtopsize = (sbrk_base + max_sbrked_mem - (char *)oldtop);
#endif #endif
#endif #endif
@ -3626,6 +3702,17 @@ Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
(void)mutex_lock(&main_arena.mutex); (void)mutex_lock(&main_arena.mutex);
p = chunk_alloc(&main_arena, sz); p = chunk_alloc(&main_arena, sz);
(void)mutex_unlock(&main_arena.mutex); (void)mutex_unlock(&main_arena.mutex);
} else {
#if USE_ARENAS
/* ... or sbrk() has failed and there is still a chance to mmap() */
(void)mutex_lock(&main_arena.mutex);
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, sz);
(void)mutex_unlock(&main_arena.mutex);
if(ar_ptr) {
p = chunk_alloc(ar_ptr, sz);
(void)mutex_unlock(&ar_ptr->mutex);
}
#endif
} }
if (p == 0) return 0; if (p == 0) return 0;
} }
@ -3664,7 +3751,7 @@ void cfree(Void_t *mem)
void cfree(mem) Void_t *mem; void cfree(mem) Void_t *mem;
#endif #endif
{ {
free(mem); fREe(mem);
} }
#endif #endif
@ -3765,7 +3852,7 @@ main_trim(pad) size_t pad;
return 1; return 1;
} }
#ifndef NO_THREADS #if USE_ARENAS
static int static int
internal_function internal_function
@ -3795,6 +3882,7 @@ heap_trim(heap, pad) heap_info *heap; size_t pad;
if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz) if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
break; break;
ar_ptr->size -= heap->size; ar_ptr->size -= heap->size;
arena_mem -= heap->size;
delete_heap(heap); delete_heap(heap);
heap = prev_heap; heap = prev_heap;
if(!prev_inuse(p)) { /* consolidate backward */ if(!prev_inuse(p)) { /* consolidate backward */
@ -3815,6 +3903,7 @@ heap_trim(heap, pad) heap_info *heap; size_t pad;
if(grow_heap(heap, -extra) != 0) if(grow_heap(heap, -extra) != 0)
return 0; return 0;
ar_ptr->size -= extra; ar_ptr->size -= extra;
arena_mem -= extra;
/* Success. Adjust top accordingly. */ /* Success. Adjust top accordingly. */
set_head(top_chunk, (top_size - extra) | PREV_INUSE); set_head(top_chunk, (top_size - extra) | PREV_INUSE);
@ -3822,7 +3911,7 @@ heap_trim(heap, pad) heap_info *heap; size_t pad;
return 1; return 1;
} }
#endif #endif /* USE_ARENAS */
@ -3913,7 +4002,7 @@ malloc_update_mallinfo(ar_ptr, mi) arena *ar_ptr; struct mallinfo *mi;
(void)mutex_unlock(&ar_ptr->mutex); (void)mutex_unlock(&ar_ptr->mutex);
} }
#if !defined(NO_THREADS) && MALLOC_DEBUG > 1 #if USE_ARENAS && MALLOC_DEBUG > 1
/* Print the complete contents of a single heap to stderr. */ /* Print the complete contents of a single heap to stderr. */
@ -3989,7 +4078,7 @@ void mALLOC_STATs()
stat_lock_loop += ar_ptr->stat_lock_loop; stat_lock_loop += ar_ptr->stat_lock_loop;
stat_lock_wait += ar_ptr->stat_lock_wait; stat_lock_wait += ar_ptr->stat_lock_wait;
#endif #endif
#if !defined(NO_THREADS) && MALLOC_DEBUG > 1 #if USE_ARENAS && MALLOC_DEBUG > 1
if(ar_ptr != &main_arena) { if(ar_ptr != &main_arena) {
heap_info *heap; heap_info *heap;
(void)mutex_lock(&ar_ptr->mutex); (void)mutex_lock(&ar_ptr->mutex);
@ -4071,7 +4160,7 @@ int mALLOPt(param_number, value) int param_number; int value;
case M_TOP_PAD: case M_TOP_PAD:
top_pad = value; return 1; top_pad = value; return 1;
case M_MMAP_THRESHOLD: case M_MMAP_THRESHOLD:
#ifndef NO_THREADS #if USE_ARENAS
/* Forbid setting the threshold too high. */ /* Forbid setting the threshold too high. */
if((unsigned long)value > HEAP_MAX_SIZE/2) return 0; if((unsigned long)value > HEAP_MAX_SIZE/2) return 0;
#endif #endif
@ -4128,7 +4217,7 @@ struct malloc_state {
unsigned int max_n_mmaps; unsigned int max_n_mmaps;
unsigned long mmapped_mem; unsigned long mmapped_mem;
unsigned long max_mmapped_mem; unsigned long max_mmapped_mem;
int using_malloc_checking; int using_malloc_checking;
}; };
Void_t* Void_t*
@ -4237,7 +4326,7 @@ mALLOC_SET_STATe(msptr) Void_t* msptr;
/* Check whether it is safe to enable malloc checking, or whether /* Check whether it is safe to enable malloc checking, or whether
it is necessary to disable it. */ it is necessary to disable it. */
if (ms->using_malloc_checking && !using_malloc_checking && if (ms->using_malloc_checking && !using_malloc_checking &&
!disallow_malloc_check) !disallow_malloc_check)
__malloc_check_init (); __malloc_check_init ();
else if (!ms->using_malloc_checking && using_malloc_checking) { else if (!ms->using_malloc_checking && using_malloc_checking) {
__malloc_hook = 0; __malloc_hook = 0;
@ -4456,9 +4545,9 @@ realloc_check(oldmem, bytes, caller)
oldp = mem2chunk_check(oldmem); oldp = mem2chunk_check(oldmem);
if(!oldp) { if(!oldp) {
(void)mutex_unlock(&main_arena.mutex); (void)mutex_unlock(&main_arena.mutex);
if (check_action & 1) if(check_action & 1)
fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem); fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
if (check_action & 2) if(check_action & 2)
abort(); abort();
return malloc_check(bytes, NULL); return malloc_check(bytes, NULL);
} }

View File

@ -207,18 +207,24 @@ int tsd_key_next;
typedef int thread_id; typedef int thread_id;
/* The mutex functions used to do absolutely nothing, i.e. lock,
trylock and unlock would always just return 0. However, even
without any concurrently active threads, a mutex can be used
legitimately as an `in use' flag. To make the code that is
protected by a mutex async-signal safe, these macros would have to
be based on atomic test-and-set operations, for example. */
typedef int mutex_t; typedef int mutex_t;
#define MUTEX_INITIALIZER 0 #define MUTEX_INITIALIZER 0
#define mutex_init(m) (*(m) = 0) #define mutex_init(m) (*(m) = 0)
#define mutex_lock(m) (0) #define mutex_lock(m) ((*(m) = 1), 0)
#define mutex_trylock(m) (0) #define mutex_trylock(m) (*(m) ? 1 : ((*(m) = 1), 0))
#define mutex_unlock(m) (0) #define mutex_unlock(m) (*(m) = 0)
typedef void *tsd_key_t; typedef void *tsd_key_t;
#define tsd_key_create(key, destr) do {} while(0) #define tsd_key_create(key, destr) do {} while(0)
#define tsd_setspecific(key, data) do {} while(0) #define tsd_setspecific(key, data) ((key) = (data))
#define tsd_getspecific(key, vptr) (vptr = NULL) #define tsd_getspecific(key, vptr) (vptr = (key))
#define thread_atfork(prepare, parent, child) do {} while(0) #define thread_atfork(prepare, parent, child) do {} while(0)

View File

@ -1504,7 +1504,10 @@ internal_getpwuid_r (uid_t uid, struct passwd *result, ent_t *ent,
status = getpwuid_plususer (uid, result, buffer, buflen, errnop); status = getpwuid_plususer (uid, result, buffer, buflen, errnop);
if (status == NSS_STATUS_SUCCESS && if (status == NSS_STATUS_SUCCESS &&
innetgr (buf, NULL, result->pw_name, NULL)) innetgr (buf, NULL, result->pw_name, NULL))
return NSS_STATUS_NOTFOUND; {
*errnop = ENOENT;
return NSS_STATUS_NOTFOUND;
}
continue; continue;
} }
@ -1529,7 +1532,10 @@ internal_getpwuid_r (uid_t uid, struct passwd *result, ent_t *ent,
} }
else else
if (status == NSS_STATUS_RETURN) /* We couldn't parse the entry */ if (status == NSS_STATUS_RETURN) /* We couldn't parse the entry */
return NSS_STATUS_NOTFOUND; {
*errnop = ENOENT;
return NSS_STATUS_NOTFOUND;
}
else else
return status; return status;
@ -1548,7 +1554,10 @@ internal_getpwuid_r (uid_t uid, struct passwd *result, ent_t *ent,
status = getpwuid_plususer (uid, result, buffer, buflen, errnop); status = getpwuid_plususer (uid, result, buffer, buflen, errnop);
if (status == NSS_STATUS_SUCCESS && if (status == NSS_STATUS_SUCCESS &&
innetgr (buf, NULL, result->pw_name, NULL)) innetgr (buf, NULL, result->pw_name, NULL))
return NSS_STATUS_NOTFOUND; {
*errnop = ENOENT;
return NSS_STATUS_NOTFOUND;
}
continue; continue;
} }
@ -1573,7 +1582,10 @@ internal_getpwuid_r (uid_t uid, struct passwd *result, ent_t *ent,
} }
else else
if (status == NSS_STATUS_RETURN) /* We couldn't parse the entry */ if (status == NSS_STATUS_RETURN) /* We couldn't parse the entry */
return NSS_STATUS_NOTFOUND; {
*errnop = ENOENT;
return NSS_STATUS_NOTFOUND;
}
else else
return status; return status;
@ -1590,7 +1602,10 @@ internal_getpwuid_r (uid_t uid, struct passwd *result, ent_t *ent,
break; break;
else else
if (status == NSS_STATUS_RETURN) /* We couldn't parse the entry */ if (status == NSS_STATUS_RETURN) /* We couldn't parse the entry */
return NSS_STATUS_NOTFOUND; {
*errnop = ENOENT;
return NSS_STATUS_NOTFOUND;
}
else else
return status; return status;
} }

View File

@ -16,57 +16,73 @@
Foundation, Inc., 59 Temple Place - Suite 330, Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */ Boston, MA 02111-1307, USA. */
#ifndef SI_TYPE_SIZE /* You have to define the following before including this file:
#define SI_TYPE_SIZE 32
#endif
#define __BITS4 (SI_TYPE_SIZE / 4) UWtype -- An unsigned type, default type for operations (typically a "word")
#define __ll_B (1L << (SI_TYPE_SIZE / 2)) UHWtype -- An unsigned type, at least half the size of UWtype.
#define __ll_lowpart(t) ((USItype) (t) % __ll_B) UDWtype -- An unsigned type, at least twice as large a UWtype
#define __ll_highpart(t) ((USItype) (t) / __ll_B) W_TYPE_SIZE -- size in bits of UWtype
UQItype -- Unsigned 8 bit type.
SItype, USItype -- Signed and unsigned 32 bit types.
DItype, UDItype -- Signed and unsigned 64 bit types.
On a 32 bit machine UWtype should typically be USItype;
on a 64 bit machine, UWtype should typically be UDItype.
*/
#define __BITS4 (W_TYPE_SIZE / 4)
#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
#ifndef W_TYPE_SIZE
#define W_TYPE_SIZE 32
#define UWtype USItype
#define UHWtype USItype
#define UDWtype UDItype
#endif
/* Define auxiliary asm macros. /* Define auxiliary asm macros.
1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
multiplies two USItype integers MULTIPLER and MULTIPLICAND, UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
and generates a two-part USItype product in HIGH_PROD and word product in HIGH_PROD and LOW_PROD.
LOW_PROD.
2) __umulsidi3(a,b) multiplies two USItype integers A and B, 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
and returns a UDItype product. This is just a variant of umul_ppmm. UDWtype product. This is just a variant of umul_ppmm.
3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
denominator) divides a two-word unsigned integer, composed by the denominator) divides a UDWtype, composed by the UWtype integers
integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
places the quotient in QUOTIENT and the remainder in REMAINDER. in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
HIGH_NUMERATOR must be less than DENOMINATOR for correct operation. than DENOMINATOR for correct operation. If, in addition, the most
If, in addition, the most significant bit of DENOMINATOR must be 1, significant bit of DENOMINATOR must be 1, then the pre-processor symbol
then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1. UDIV_NEEDS_NORMALIZATION is defined to 1.
4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator, 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
denominator). Like udiv_qrnnd but the numbers are signed. The denominator). Like udiv_qrnnd but the numbers are signed. The quotient
quotient is rounded towards 0. is rounded towards 0.
5) count_leading_zeros(count, x) counts the number of zero-bits from 5) count_leading_zeros(count, x) counts the number of zero-bits from the
the msb to the first non-zero bit. This is the number of steps X msb to the first non-zero bit in the UWtype X. This is the number of
needs to be shifted left to set the msb. Undefined for X == 0. steps X needs to be shifted left to set the msb. Undefined for X == 0,
unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
6) count_trailing_zeros(count, x) like count_leading_zeros, but counts 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
from the least significant end. from the least significant end.
7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
high_addend_2, low_addend_2) adds two two-word unsigned integers, high_addend_2, low_addend_2) adds two UWtype integers, composed by
composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is (i.e. carry out) is not stored anywhere, and is lost.
lost.
8) sub_ddmmss(high_difference, low_difference, high_minuend, 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
low_minuend, high_subtrahend, low_subtrahend) subtracts two high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
two-word unsigned integers, composed by HIGH_MINUEND_1 and composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2 LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
respectively. The result is placed in HIGH_DIFFERENCE and and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
and is lost. and is lost.
If any of these macros are left undefined for a particular CPU, If any of these macros are left undefined for a particular CPU,
@ -90,7 +106,7 @@
#define __AND_CLOBBER_CC , "cc" #define __AND_CLOBBER_CC , "cc"
#endif /* __GNUC__ < 2 */ #endif /* __GNUC__ < 2 */
#if defined (__a29k__) || defined (_AM29K) #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add %1,%4,%5 __asm__ ("add %1,%4,%5
addc %0,%2,%3" \ addc %0,%2,%3" \
@ -132,9 +148,32 @@
__asm__ ("clz %0,%1" \ __asm__ ("clz %0,%1" \
: "=r" ((USItype) (count)) \ : "=r" ((USItype) (count)) \
: "r" ((USItype) (x))) : "r" ((USItype) (x)))
#define COUNT_LEADING_ZEROS_0 32
#endif /* __a29k__ */ #endif /* __a29k__ */
#if defined (__arc__) #if defined (__alpha) && W_TYPE_SIZE == 64
#define umul_ppmm(ph, pl, m0, m1) \
do { \
UDItype __m0 = (m0), __m1 = (m1); \
__asm__ ("umulh %r1,%2,%0" \
: "=r" ((UDItype) ph) \
: "%rJ" (__m0), \
"rI" (__m1)); \
(pl) = __m0 * __m1; \
} while (0)
#define UMUL_TIME 46
#ifndef LONGLONG_STANDALONE
#define udiv_qrnnd(q, r, n1, n0, d) \
do { UDItype __r; \
(q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
(r) = __r; \
} while (0)
extern UDItype __udiv_qrnnd __P ((UDItype *, UDItype, UDItype, UDItype));
#define UDIV_TIME 220
#endif /* LONGLONG_STANDALONE */
#endif /* __alpha */
#if defined (__arc__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add.f %1, %4, %5 __asm__ ("add.f %1, %4, %5
adc %0, %2, %3" \ adc %0, %2, %3" \
@ -165,7 +204,7 @@ do { \
UDItype __umulsidi3 (USItype, USItype); UDItype __umulsidi3 (USItype, USItype);
#endif #endif
#if defined (__arm__) #if defined (__arm__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("adds %1, %4, %5 __asm__ ("adds %1, %4, %5
adc %0, %2, %3" \ adc %0, %2, %3" \
@ -208,7 +247,7 @@ UDItype __umulsidi3 (USItype, USItype);
#define UDIV_TIME 100 #define UDIV_TIME 100
#endif /* __arm__ */ #endif /* __arm__ */
#if defined (__clipper__) #if defined (__clipper__) && W_TYPE_SIZE == 32
#define umul_ppmm(w1, w0, u, v) \ #define umul_ppmm(w1, w0, u, v) \
({union {UDItype __ll; \ ({union {UDItype __ll; \
struct {USItype __l, __h;} __i; \ struct {USItype __l, __h;} __i; \
@ -236,7 +275,7 @@ UDItype __umulsidi3 (USItype, USItype);
__w; }) __w; })
#endif /* __clipper__ */ #endif /* __clipper__ */
#if defined (__gmicro__) #if defined (__gmicro__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add.w %5,%1 __asm__ ("add.w %5,%1
addx %3,%0" \ addx %3,%0" \
@ -275,7 +314,7 @@ UDItype __umulsidi3 (USItype, USItype);
"0" ((USItype) 0)) "0" ((USItype) 0))
#endif #endif
#if defined (__hppa) #if defined (__hppa) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add %4,%5,%1 __asm__ ("add %4,%5,%1
addc %2,%3,%0" \ addc %2,%3,%0" \
@ -337,7 +376,48 @@ UDItype __umulsidi3 (USItype, USItype);
} while (0) } while (0)
#endif #endif
#if defined (__i386__) || defined (__i486__) #if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32
#define umul_ppmm(xh, xl, m0, m1) \
do { \
union {UDItype __ll; \
struct {USItype __h, __l;} __i; \
} __xx; \
USItype __m0 = (m0), __m1 = (m1); \
__asm__ ("mr %0,%3" \
: "=r" (__xx.__i.__h), \
"=r" (__xx.__i.__l) \
: "%1" (__m0), \
"r" (__m1)); \
(xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
(xh) += ((((SItype) __m0 >> 31) & __m1) \
+ (((SItype) __m1 >> 31) & __m0)); \
} while (0)
#define smul_ppmm(xh, xl, m0, m1) \
do { \
union {DItype __ll; \
struct {USItype __h, __l;} __i; \
} __xx; \
__asm__ ("mr %0,%3" \
: "=r" (__xx.__i.__h), \
"=r" (__xx.__i.__l) \
: "%1" (m0), \
"r" (m1)); \
(xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
} while (0)
#define sdiv_qrnnd(q, r, n1, n0, d) \
do { \
union {DItype __ll; \
struct {USItype __h, __l;} __i; \
} __xx; \
__xx.__i.__h = n1; __xx.__i.__l = n0; \
__asm__ ("dr %0,%2" \
: "=r" (__xx.__ll) \
: "0" (__xx.__ll), "r" (d)); \
(q) = __xx.__i.__l; (r) = __xx.__i.__h; \
} while (0)
#endif
#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addl %5,%1 __asm__ ("addl %5,%1
adcl %3,%0" \ adcl %3,%0" \
@ -382,7 +462,7 @@ UDItype __umulsidi3 (USItype, USItype);
#define UDIV_TIME 40 #define UDIV_TIME 40
#endif /* 80x86 */ #endif /* 80x86 */
#if defined (__i860__) #if defined (__i860__) && W_TYPE_SIZE == 32
#if 0 #if 0
/* Make sure these patterns really improve the code before /* Make sure these patterns really improve the code before
switching them on. */ switching them on. */
@ -423,7 +503,7 @@ UDItype __umulsidi3 (USItype, USItype);
#endif #endif
#endif /* __i860__ */ #endif /* __i860__ */
#if defined (__i960__) #if defined (__i960__) && W_TYPE_SIZE == 32
#define umul_ppmm(w1, w0, u, v) \ #define umul_ppmm(w1, w0, u, v) \
({union {UDItype __ll; \ ({union {UDItype __ll; \
struct {USItype __l, __h;} __i; \ struct {USItype __l, __h;} __i; \
@ -442,7 +522,7 @@ UDItype __umulsidi3 (USItype, USItype);
__w; }) __w; })
#endif /* __i960__ */ #endif /* __i960__ */
#if defined (__M32R__) #if defined (__M32R__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
/* The cmp clears the condition bit. */ \ /* The cmp clears the condition bit. */ \
__asm__ ("cmp %0,%0 __asm__ ("cmp %0,%0
@ -469,7 +549,7 @@ UDItype __umulsidi3 (USItype, USItype);
: "cbit") : "cbit")
#endif /* __M32R__ */ #endif /* __M32R__ */
#if defined (__mc68000__) #if defined (__mc68000__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add%.l %5,%1 __asm__ ("add%.l %5,%1
addx%.l %3,%0" \ addx%.l %3,%0" \
@ -571,7 +651,7 @@ UDItype __umulsidi3 (USItype, USItype);
#endif #endif
#endif /* mc68000 */ #endif /* mc68000 */
#if defined (__m88000__) #if defined (__m88000__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addu.co %1,%r4,%r5 __asm__ ("addu.co %1,%r4,%r5
addu.ci %0,%r2,%r3" \ addu.ci %0,%r2,%r3" \
@ -598,6 +678,7 @@ UDItype __umulsidi3 (USItype, USItype);
: "r" ((USItype) (x))); \ : "r" ((USItype) (x))); \
(count) = __cbtmp ^ 31; \ (count) = __cbtmp ^ 31; \
} while (0) } while (0)
#define COUNT_LEADING_ZEROS_0 63 /* sic */
#if defined (__mc88110__) #if defined (__mc88110__)
#define umul_ppmm(wh, wl, u, v) \ #define umul_ppmm(wh, wl, u, v) \
do { \ do { \
@ -630,7 +711,7 @@ UDItype __umulsidi3 (USItype, USItype);
#endif /* __mc88110__ */ #endif /* __mc88110__ */
#endif /* __m88000__ */ #endif /* __m88000__ */
#if defined (__mips__) #if defined (__mips__) && W_TYPE_SIZE == 32
#define umul_ppmm(w1, w0, u, v) \ #define umul_ppmm(w1, w0, u, v) \
__asm__ ("multu %2,%3" \ __asm__ ("multu %2,%3" \
: "=l" ((USItype) (w0)), \ : "=l" ((USItype) (w0)), \
@ -641,7 +722,7 @@ UDItype __umulsidi3 (USItype, USItype);
#define UDIV_TIME 100 #define UDIV_TIME 100
#endif /* __mips__ */ #endif /* __mips__ */
#if defined (__ns32000__) #if defined (__ns32000__) && W_TYPE_SIZE == 32
#define umul_ppmm(w1, w0, u, v) \ #define umul_ppmm(w1, w0, u, v) \
({union {UDItype __ll; \ ({union {UDItype __ll; \
struct {USItype __l, __h;} __i; \ struct {USItype __l, __h;} __i; \
@ -746,6 +827,7 @@ UDItype __umulsidi3 (USItype, USItype);
__asm__ ("{cntlz|cntlzw} %0,%1" \ __asm__ ("{cntlz|cntlzw} %0,%1" \
: "=r" ((USItype) (count)) \ : "=r" ((USItype) (count)) \
: "r" ((USItype) (x))) : "r" ((USItype) (x)))
#define COUNT_LEADING_ZEROS_0 32
#if defined (_ARCH_PPC) #if defined (_ARCH_PPC)
#define umul_ppmm(ph, pl, m0, m1) \ #define umul_ppmm(ph, pl, m0, m1) \
do { \ do { \
@ -796,7 +878,7 @@ UDItype __umulsidi3 (USItype, USItype);
#endif #endif
#endif /* Power architecture variants. */ #endif /* Power architecture variants. */
#if defined (__pyr__) #if defined (__pyr__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addw %5,%1 __asm__ ("addw %5,%1
addwc %3,%0" \ addwc %3,%0" \
@ -828,7 +910,7 @@ UDItype __umulsidi3 (USItype, USItype);
(w1) = __xx.__i.__h; (w0) = __xx.__i.__l;}) (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
#endif /* __pyr__ */ #endif /* __pyr__ */
#if defined (__ibm032__) /* RT/ROMP */ #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("a %1,%5 __asm__ ("a %1,%5
ae %0,%3" \ ae %0,%3" \
@ -897,7 +979,22 @@ UDItype __umulsidi3 (USItype, USItype);
} while (0) } while (0)
#endif #endif
#if defined (__sparc__) #if defined (__sh2__) && W_TYPE_SIZE == 32
#define umul_ppmm(w1, w0, u, v) \
__asm__ ( \
"dmulu.l %2,%3
sts macl,%1
sts mach,%0" \
: "=r" ((USItype)(w1)), \
"=r" ((USItype)(w0)) \
: "r" ((USItype)(u)), \
"r" ((USItype)(v)) \
: "macl", "mach")
#define UMUL_TIME 5
#endif
#if defined (__sparc__) && !defined (__sparc_v9__) && !defined(__arch64__) \
&& !defined(__sparc_v9) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addcc %r4,%5,%1 __asm__ ("addcc %r4,%5,%1
addx %r2,%3,%0" \ addx %r2,%3,%0" \
@ -987,7 +1084,7 @@ UDItype __umulsidi3 (USItype, USItype);
: "r" ((USItype) (n1)), \ : "r" ((USItype) (n1)), \
"r" ((USItype) (n0)), \ "r" ((USItype) (n0)), \
"rI" ((USItype) (d)) \ "rI" ((USItype) (d)) \
: "%g1" __AND_CLOBBER_CC) : "g1" __AND_CLOBBER_CC)
#define UDIV_TIME 37 #define UDIV_TIME 37
#define count_leading_zeros(count, x) \ #define count_leading_zeros(count, x) \
do { \ do { \
@ -995,14 +1092,17 @@ UDItype __umulsidi3 (USItype, USItype);
: "=r" ((USItype) (count)) \ : "=r" ((USItype) (count)) \
: "r" ((USItype) (x))); \ : "r" ((USItype) (x))); \
} while (0) } while (0)
/* Early sparclites return 63 for an argument of 0, but they warn that future
implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
undefined. */
#else #else
/* SPARC without integer multiplication and divide instructions. /* SPARC without integer multiplication and divide instructions.
(i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */ (i.e. at least Sun4/20,40,60,65,75,110,260,280,330,360,380,470,490) */
#define umul_ppmm(w1, w0, u, v) \ #define umul_ppmm(w1, w0, u, v) \
__asm__ ("! Inlined umul_ppmm __asm__ ("! Inlined umul_ppmm
wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr
sra %3,31,%%g2 ! Don't move this insn sra %3,31,%%o5 ! Don't move this insn
and %2,%%g2,%%g2 ! Don't move this insn and %2,%%o5,%%o5 ! Don't move this insn
andcc %%g0,0,%%g1 ! Don't move this insn andcc %%g0,0,%%g1 ! Don't move this insn
mulscc %%g1,%3,%%g1 mulscc %%g1,%3,%%g1
mulscc %%g1,%3,%%g1 mulscc %%g1,%3,%%g1
@ -1037,13 +1137,13 @@ UDItype __umulsidi3 (USItype, USItype);
mulscc %%g1,%3,%%g1 mulscc %%g1,%3,%%g1
mulscc %%g1,%3,%%g1 mulscc %%g1,%3,%%g1
mulscc %%g1,0,%%g1 mulscc %%g1,0,%%g1
add %%g1,%%g2,%0 add %%g1,%%o5,%0
rd %%y,%1" \ rd %%y,%1" \
: "=r" ((USItype) (w1)), \ : "=r" ((USItype) (w1)), \
"=r" ((USItype) (w0)) \ "=r" ((USItype) (w0)) \
: "%rI" ((USItype) (u)), \ : "%rI" ((USItype) (u)), \
"r" ((USItype) (v)) \ "r" ((USItype) (v)) \
: "%g1", "%g2" __AND_CLOBBER_CC) : "g1", "o5" __AND_CLOBBER_CC)
#define UMUL_TIME 39 /* 39 instructions */ #define UMUL_TIME 39 /* 39 instructions */
/* It's quite necessary to add this much assembler for the sparc. /* It's quite necessary to add this much assembler for the sparc.
The default udiv_qrnnd (in C) is more than 10 times slower! */ The default udiv_qrnnd (in C) is more than 10 times slower! */
@ -1076,13 +1176,79 @@ UDItype __umulsidi3 (USItype, USItype);
"=&r" ((USItype) (r)) \ "=&r" ((USItype) (r)) \
: "r" ((USItype) (d)), \ : "r" ((USItype) (d)), \
"1" ((USItype) (n1)), \ "1" ((USItype) (n1)), \
"0" ((USItype) (n0)) : "%g1" __AND_CLOBBER_CC) "0" ((USItype) (n0)) : "g1" __AND_CLOBBER_CC)
#define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */ #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */
#endif /* __sparclite__ */ #endif /* __sparclite__ */
#endif /* __sparc_v8__ */ #endif /* __sparc_v8__ */
#endif /* __sparc__ */ #endif /* __sparc__ */
#if defined (__vax__) #if (defined (__sparc_v9__) || (defined (__sparc__) && defined (__arch64__)) \
|| defined (__sparcv9)) && W_TYPE_SIZE == 64
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addcc %4,%5,%1
add %2,%3,%0
bcs,a,pn %%xcc, 1f
add %0, 1, %0
1:" \
: "=r" ((UDItype)(sh)), \
"=&r" ((UDItype)(sl)) \
: "%rJ" ((UDItype)(ah)), \
"rI" ((UDItype)(bh)), \
"%rJ" ((UDItype)(al)), \
"rI" ((UDItype)(bl)) \
__CLOBBER_CC)
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("subcc %4,%5,%1
sub %2,%3,%0
bcs,a,pn %%xcc, 1f
sub %0, 1, %0
1:" \
: "=r" ((UDItype)(sh)), \
"=&r" ((UDItype)(sl)) \
: "rJ" ((UDItype)(ah)), \
"rI" ((UDItype)(bh)), \
"rJ" ((UDItype)(al)), \
"rI" ((UDItype)(bl)) \
__CLOBBER_CC)
#define umul_ppmm(wh, wl, u, v) \
do { \
UDItype tmp1, tmp2, tmp3, tmp4; \
__asm__ __volatile__ ( \
"srl %7,0,%3
mulx %3,%6,%1
srlx %6,32,%2
mulx %2,%3,%4
sllx %4,32,%5
srl %6,0,%3
sub %1,%5,%5
srlx %5,32,%5
addcc %4,%5,%4
srlx %7,32,%5
mulx %3,%5,%3
mulx %2,%5,%5
sethi %%hi(0x80000000),%2
addcc %4,%3,%4
srlx %4,32,%4
add %2,%2,%2
movcc %%xcc,%%g0,%2
addcc %5,%4,%5
sllx %3,32,%3
add %1,%3,%1
add %5,%2,%0" \
: "=r" ((UDItype)(wh)), \
"=&r" ((UDItype)(wl)), \
"=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
: "r" ((UDItype)(u)), \
"r" ((UDItype)(v)) \
__CLOBBER_CC); \
} while (0)
#define UMUL_TIME 96
#define UDIV_TIME 230
#endif /* __sparc_v9__ */
#if defined (__vax__) && W_TYPE_SIZE == 32
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("addl2 %5,%1 __asm__ ("addl2 %5,%1
adwc %3,%0" \ adwc %3,%0" \
@ -1129,6 +1295,40 @@ UDItype __umulsidi3 (USItype, USItype);
} while (0) } while (0)
#endif /* __vax__ */ #endif /* __vax__ */
#if defined (__z8000__) && W_TYPE_SIZE == 16
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
__asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
: "=r" ((unsigned int)(sh)), \
"=&r" ((unsigned int)(sl)) \
: "%0" ((unsigned int)(ah)), \
"r" ((unsigned int)(bh)), \
"%1" ((unsigned int)(al)), \
"rQR" ((unsigned int)(bl)))
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
__asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
: "=r" ((unsigned int)(sh)), \
"=&r" ((unsigned int)(sl)) \
: "0" ((unsigned int)(ah)), \
"r" ((unsigned int)(bh)), \
"1" ((unsigned int)(al)), \
"rQR" ((unsigned int)(bl)))
#define umul_ppmm(xh, xl, m0, m1) \
do { \
union {long int __ll; \
struct {unsigned int __h, __l;} __i; \
} __xx; \
unsigned int __m0 = (m0), __m1 = (m1); \
__asm__ ("mult %S0,%H3" \
: "=r" (__xx.__i.__h), \
"=r" (__xx.__i.__l) \
: "%1" (__m0), \
"rQR" (__m1)); \
(xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
(xh) += ((((signed int) __m0 >> 15) & __m1) \
+ (((signed int) __m1 >> 15) & __m0)); \
} while (0)
#endif /* __z8000__ */
#endif /* __GNUC__ */ #endif /* __GNUC__ */
/* If this machine has no inline assembler, use C macros. */ /* If this machine has no inline assembler, use C macros. */
@ -1136,7 +1336,7 @@ UDItype __umulsidi3 (USItype, USItype);
#if !defined (add_ssaaaa) #if !defined (add_ssaaaa)
#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
do { \ do { \
USItype __x; \ UWtype __x; \
__x = (al) + (bl); \ __x = (al) + (bl); \
(sh) = (ah) + (bh) + (__x < (al)); \ (sh) = (ah) + (bh) + (__x < (al)); \
(sl) = __x; \ (sl) = __x; \
@ -1146,7 +1346,7 @@ UDItype __umulsidi3 (USItype, USItype);
#if !defined (sub_ddmmss) #if !defined (sub_ddmmss)
#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
do { \ do { \
USItype __x; \ UWtype __x; \
__x = (al) - (bl); \ __x = (al) - (bl); \
(sh) = (ah) - (bh) - (__x > (al)); \ (sh) = (ah) - (bh) - (__x > (al)); \
(sl) = __x; \ (sl) = __x; \
@ -1156,18 +1356,18 @@ UDItype __umulsidi3 (USItype, USItype);
#if !defined (umul_ppmm) #if !defined (umul_ppmm)
#define umul_ppmm(w1, w0, u, v) \ #define umul_ppmm(w1, w0, u, v) \
do { \ do { \
USItype __x0, __x1, __x2, __x3; \ UWtype __x0, __x1, __x2, __x3; \
USItype __ul, __vl, __uh, __vh; \ UHWtype __ul, __vl, __uh, __vh; \
\ \
__ul = __ll_lowpart (u); \ __ul = __ll_lowpart (u); \
__uh = __ll_highpart (u); \ __uh = __ll_highpart (u); \
__vl = __ll_lowpart (v); \ __vl = __ll_lowpart (v); \
__vh = __ll_highpart (v); \ __vh = __ll_highpart (v); \
\ \
__x0 = (USItype) __ul * __vl; \ __x0 = (UWtype) __ul * __vl; \
__x1 = (USItype) __ul * __vh; \ __x1 = (UWtype) __ul * __vh; \
__x2 = (USItype) __uh * __vl; \ __x2 = (UWtype) __uh * __vl; \
__x3 = (USItype) __uh * __vh; \ __x3 = (UWtype) __uh * __vh; \
\ \
__x1 += __ll_highpart (__x0);/* this can't give carry */ \ __x1 += __ll_highpart (__x0);/* this can't give carry */ \
__x1 += __x2; /* but this indeed can */ \ __x1 += __x2; /* but this indeed can */ \
@ -1189,14 +1389,14 @@ UDItype __umulsidi3 (USItype, USItype);
/* Define this unconditionally, so it can be used for debugging. */ /* Define this unconditionally, so it can be used for debugging. */
#define __udiv_qrnnd_c(q, r, n1, n0, d) \ #define __udiv_qrnnd_c(q, r, n1, n0, d) \
do { \ do { \
USItype __d1, __d0, __q1, __q0; \ UWtype __d1, __d0, __q1, __q0; \
USItype __r1, __r0, __m; \ UWtype __r1, __r0, __m; \
__d1 = __ll_highpart (d); \ __d1 = __ll_highpart (d); \
__d0 = __ll_lowpart (d); \ __d0 = __ll_lowpart (d); \
\ \
__r1 = (n1) % __d1; \ __r1 = (n1) % __d1; \
__q1 = (n1) / __d1; \ __q1 = (n1) / __d1; \
__m = (USItype) __q1 * __d0; \ __m = (UWtype) __q1 * __d0; \
__r1 = __r1 * __ll_B | __ll_highpart (n0); \ __r1 = __r1 * __ll_B | __ll_highpart (n0); \
if (__r1 < __m) \ if (__r1 < __m) \
{ \ { \
@ -1209,7 +1409,7 @@ UDItype __umulsidi3 (USItype, USItype);
\ \
__r0 = __r1 % __d1; \ __r0 = __r1 % __d1; \
__q0 = __r1 / __d1; \ __q0 = __r1 / __d1; \
__m = (USItype) __q0 * __d0; \ __m = (UWtype) __q0 * __d0; \
__r0 = __r0 * __ll_B | __ll_lowpart (n0); \ __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
if (__r0 < __m) \ if (__r0 < __m) \
{ \ { \
@ -1220,7 +1420,7 @@ UDItype __umulsidi3 (USItype, USItype);
} \ } \
__r0 -= __m; \ __r0 -= __m; \
\ \
(q) = (USItype) __q1 * __ll_B | __q0; \ (q) = (UWtype) __q1 * __ll_B | __q0; \
(r) = __r0; \ (r) = __r0; \
} while (0) } while (0)
@ -1245,24 +1445,25 @@ UDItype __umulsidi3 (USItype, USItype);
extern const UQItype __clz_tab[]; extern const UQItype __clz_tab[];
#define count_leading_zeros(count, x) \ #define count_leading_zeros(count, x) \
do { \ do { \
USItype __xr = (x); \ UWtype __xr = (x); \
USItype __a; \ UWtype __a; \
\ \
if (SI_TYPE_SIZE <= 32) \ if (W_TYPE_SIZE <= 32) \
{ \ { \
__a = __xr < ((USItype)1<<2*__BITS4) \ __a = __xr < ((UWtype)1<<2*__BITS4) \
? (__xr < ((USItype)1<<__BITS4) ? 0 : __BITS4) \ ? (__xr < ((UWtype)1<<__BITS4) ? 0 : __BITS4) \
: (__xr < ((USItype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \ : (__xr < ((UWtype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \
} \ } \
else \ else \
{ \ { \
for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \ for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
if (((__xr >> __a) & 0xff) != 0) \ if (((__xr >> __a) & 0xff) != 0) \
break; \ break; \
} \ } \
\ \
(count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \ (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
} while (0) } while (0)
#define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
#endif #endif
#if !defined (count_trailing_zeros) #if !defined (count_trailing_zeros)
@ -1270,10 +1471,10 @@ extern const UQItype __clz_tab[];
defined in asm, but if it is not, the C version above is good enough. */ defined in asm, but if it is not, the C version above is good enough. */
#define count_trailing_zeros(count, x) \ #define count_trailing_zeros(count, x) \
do { \ do { \
USItype __ctz_x = (x); \ UWtype __ctz_x = (x); \
USItype __ctz_c; \ UWtype __ctz_c; \
count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \ count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
(count) = SI_TYPE_SIZE - 1 - __ctz_c; \ (count) = W_TYPE_SIZE - 1 - __ctz_c; \
} while (0) } while (0)
#endif #endif

View File

@ -511,6 +511,8 @@ double: 1
idouble: 1 idouble: 1
Test "erfc (1.2) == 0.089686021770364619762": Test "erfc (1.2) == 0.089686021770364619762":
float: 1 float: 1
double: 1
idouble: 1
ifloat: 1 ifloat: 1
Test "erfc (2.0) == 0.0046777349810472658379": Test "erfc (2.0) == 0.0046777349810472658379":
double: 1 double: 1

View File

@ -28,5 +28,5 @@ __fegetenv (fenv_t *envp)
return 0; return 0;
} }
strong_alias (__fegetenv, __old_fegetenv) strong_alias (__fegetenv, __old_fegetenv)
symbol_version (__old_fegetenv, fegetenv, GLIBC_2.1) symbol_version (__old_fegetenv, fegetenv, GLIBC_2.1);
default_symbol_version (__fegetenv, fegetenv, GLIBC_2.1.3) default_symbol_version (__fegetenv, fegetenv, GLIBC_2.1.3);

View File

@ -36,7 +36,7 @@
extern int errno; extern int errno;
/* Function to get address of global `errno' variable. */ /* Function to get address of global `errno' variable. */
extern int *__errno_location (void) __attribute__ ((__const__)) __THROW; extern int *__errno_location (void) __THROW __attribute__ ((__const__));
# if defined _LIBC # if defined _LIBC
/* We wouldn't need a special macro anymore but it is history. */ /* We wouldn't need a special macro anymore but it is history. */

View File

@ -13,5 +13,5 @@ CFLAGS-initfini.s += -DWEAK_GMON_START
endif endif
ifeq ($(subdir),resource) ifeq ($(subdir),resource)
sysdep_routines += oldgetrlimit oldsetrlimit sysdep_routines += oldgetrlimit64 oldsetrlimit64
endif endif

View File

@ -20,7 +20,6 @@
#include <sys/uio.h> #include <sys/uio.h>
#include <stdio-common/_itoa.h> #include <stdio-common/_itoa.h>
#include <asm/ptrace.h>
#include <bits/sigcontext.h> #include <bits/sigcontext.h>
/* We will print the register dump in this format: /* We will print the register dump in this format:
@ -63,25 +62,17 @@ hexvalue (unsigned long int value, char *buf, size_t len)
*--cp = '0'; *--cp = '0';
} }
struct __siginfo_sparc64_fpu
{
unsigned long si_float_regs[32];
unsigned long si_xfsr;
unsigned long si_gsr;
unsigned long si_fprs;
};
static void static void
register_dump (int fd, SIGCONTEXT ctx) register_dump (int fd, SIGCONTEXT ctx)
{ {
char regs[36][16]; char regs[36][16];
char fregs[35][8]; char fregs[68][8];
struct iovec iov[150]; struct iovec iov[150];
size_t nr = 0; size_t nr = 0;
int i; int i;
struct reg_window *r = (struct reg_window *) unsigned long *r = (unsigned long *)
ctx->sf_regs.u_regs[14]; (ctx->sigc_regs.u_regs[14] + STACK_BIAS);
struct __siginfo_sparc64_fpu *f; __siginfo_fpu_t *f;
#define ADD_STRING(str) \ #define ADD_STRING(str) \
iov[nr].iov_base = (char *) str; \ iov[nr].iov_base = (char *) str; \
@ -93,15 +84,15 @@ register_dump (int fd, SIGCONTEXT ctx)
++nr ++nr
/* Generate strings of register contents. */ /* Generate strings of register contents. */
hexvalue (ctx->sf_regs.tstate, regs[0], 16); hexvalue (ctx->sigc_regs.tstate, regs[0], 16);
hexvalue (ctx->sf_regs.tpc, regs[1], 16); hexvalue (ctx->sigc_regs.tpc, regs[1], 16);
hexvalue (ctx->sf_regs.tnpc, regs[2], 16); hexvalue (ctx->sigc_regs.tnpc, regs[2], 16);
hexvalue (ctx->sf_regs.y, regs[3], 8); hexvalue (ctx->sigc_regs.y, regs[3], 8);
for (i = 1; i <= 15; i++) for (i = 1; i <= 15; i++)
hexvalue (ctx->sf_regs.u_regs[i], regs[3+i], 16); hexvalue (ctx->sigc_regs.u_regs[i], regs[3+i], 16);
for (i = 0; i <= 15; i++) for (i = 0; i <= 15; i++)
hexvalue (r->locals[i], regs[19+i], 16); hexvalue (r[i], regs[19+i], 16);
hexvalue (ctx->sf_mask, regs[35], 16); hexvalue (ctx->sigc_mask, regs[35], 16);
/* Generate the output. */ /* Generate the output. */
ADD_STRING ("Register dump:\n\n TSTATE: "); ADD_STRING ("Register dump:\n\n TSTATE: ");
@ -177,85 +168,85 @@ register_dump (int fd, SIGCONTEXT ctx)
ADD_STRING ("\n\n Mask: "); ADD_STRING ("\n\n Mask: ");
ADD_MEM (regs[35], 16); ADD_MEM (regs[35], 16);
f = *(struct __siginfo_sparc64_fpu **)(ctx + 1); f = ctx->sigc_fpu_save;
if (f != NULL) if (f != NULL)
{ {
for (i = 0; i < 32; i++) for (i = 0; i < 64; i++)
hexvalue (f->si_float_regs[i], fregs[i], 16); hexvalue (f->si_float_regs[i], fregs[i], 8);
hexvalue (f->si_xfsr, fregs[32], 16); hexvalue (f->si_fsr, fregs[64], 16);
hexvalue (f->si_gsr, fregs[33], 2); hexvalue (f->si_gsr, fregs[66], 2);
hexvalue (f->si_fprs, fregs[34], 1); hexvalue (f->si_fprs, fregs[67], 1);
ADD_STRING (" XFSR: "); ADD_STRING (" XFSR: ");
ADD_MEM (fregs[32], 16); ADD_MEM (fregs[64], 16);
ADD_STRING (" GSR: "); ADD_STRING (" GSR: ");
ADD_MEM (fregs[33], 2); ADD_MEM (fregs[66], 2);
ADD_STRING (" FPRS: "); ADD_STRING (" FPRS: ");
ADD_MEM (fregs[34], 1); ADD_MEM (fregs[67], 1);
ADD_STRING ("\n f0: "); ADD_STRING ("\n f0: ");
ADD_MEM (fregs[0], 16); ADD_MEM (fregs[0], 16);
ADD_STRING (" f2: "); ADD_STRING (" f2: ");
ADD_MEM (fregs[1], 16); ADD_MEM (fregs[2], 16);
ADD_STRING (" f4: "); ADD_STRING (" f4: ");
ADD_MEM (fregs[2], 16); ADD_MEM (fregs[4], 16);
ADD_STRING ("\n f6: "); ADD_STRING ("\n f6: ");
ADD_MEM (fregs[3], 16); ADD_MEM (fregs[6], 16);
ADD_STRING (" f8: "); ADD_STRING (" f8: ");
ADD_MEM (fregs[4], 16); ADD_MEM (fregs[8], 16);
ADD_STRING (" f10: "); ADD_STRING (" f10: ");
ADD_MEM (fregs[5], 16); ADD_MEM (fregs[10], 16);
ADD_STRING ("\n f12: "); ADD_STRING ("\n f12: ");
ADD_MEM (fregs[6], 16); ADD_MEM (fregs[12], 16);
ADD_STRING (" f14: "); ADD_STRING (" f14: ");
ADD_MEM (fregs[7], 16); ADD_MEM (fregs[14], 16);
ADD_STRING (" f16: "); ADD_STRING (" f16: ");
ADD_MEM (fregs[8], 16); ADD_MEM (fregs[16], 16);
ADD_STRING ("\n f18: "); ADD_STRING ("\n f18: ");
ADD_MEM (fregs[9], 16); ADD_MEM (fregs[18], 16);
ADD_STRING (" f20: "); ADD_STRING (" f20: ");
ADD_MEM (fregs[10], 16); ADD_MEM (fregs[20], 16);
ADD_STRING (" f22: "); ADD_STRING (" f22: ");
ADD_MEM (fregs[11], 16); ADD_MEM (fregs[22], 16);
ADD_STRING ("\n f24: "); ADD_STRING ("\n f24: ");
ADD_MEM (fregs[12], 16); ADD_MEM (fregs[24], 16);
ADD_STRING (" f26: "); ADD_STRING (" f26: ");
ADD_MEM (fregs[13], 16); ADD_MEM (fregs[26], 16);
ADD_STRING (" f28: "); ADD_STRING (" f28: ");
ADD_MEM (fregs[14], 16); ADD_MEM (fregs[28], 16);
ADD_STRING ("\n f30: "); ADD_STRING ("\n f30: ");
ADD_MEM (fregs[15], 16); ADD_MEM (fregs[30], 16);
ADD_STRING (" f32: "); ADD_STRING (" f32: ");
ADD_MEM (fregs[16], 16); ADD_MEM (fregs[32], 16);
ADD_STRING (" f34: "); ADD_STRING (" f34: ");
ADD_MEM (fregs[17], 16); ADD_MEM (fregs[34], 16);
ADD_STRING ("\n f36: "); ADD_STRING ("\n f36: ");
ADD_MEM (fregs[18], 16); ADD_MEM (fregs[36], 16);
ADD_STRING (" f38: "); ADD_STRING (" f38: ");
ADD_MEM (fregs[19], 16); ADD_MEM (fregs[38], 16);
ADD_STRING (" f40: "); ADD_STRING (" f40: ");
ADD_MEM (fregs[20], 16); ADD_MEM (fregs[40], 16);
ADD_STRING ("\n f42: "); ADD_STRING ("\n f42: ");
ADD_MEM (fregs[21], 16); ADD_MEM (fregs[42], 16);
ADD_STRING (" f44: "); ADD_STRING (" f44: ");
ADD_MEM (fregs[22], 16); ADD_MEM (fregs[44], 16);
ADD_STRING (" f46: "); ADD_STRING (" f46: ");
ADD_MEM (fregs[23], 16); ADD_MEM (fregs[46], 16);
ADD_STRING ("\n f48: "); ADD_STRING ("\n f48: ");
ADD_MEM (fregs[24], 16); ADD_MEM (fregs[48], 16);
ADD_STRING (" f50: "); ADD_STRING (" f50: ");
ADD_MEM (fregs[25], 16); ADD_MEM (fregs[50], 16);
ADD_STRING (" f52: "); ADD_STRING (" f52: ");
ADD_MEM (fregs[26], 16); ADD_MEM (fregs[52], 16);
ADD_STRING ("\n f54: "); ADD_STRING ("\n f54: ");
ADD_MEM (fregs[27], 16); ADD_MEM (fregs[54], 16);
ADD_STRING (" f56: "); ADD_STRING (" f56: ");
ADD_MEM (fregs[28], 16); ADD_MEM (fregs[56], 16);
ADD_STRING (" f58: "); ADD_STRING (" f58: ");
ADD_MEM (fregs[29], 16); ADD_MEM (fregs[58], 16);
ADD_STRING ("\n f60: "); ADD_STRING ("\n f60: ");
ADD_MEM (fregs[30], 16); ADD_MEM (fregs[60], 16);
ADD_STRING (" f62: "); ADD_STRING (" f62: ");
ADD_MEM (fregs[31], 16); ADD_MEM (fregs[62], 16);
} }
ADD_STRING ("\n"); ADD_STRING ("\n");

View File

@ -17,26 +17,14 @@
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */ Boston, MA 02111-1307, USA. */
struct __rt_signal_frame {
siginfo_t sf_info;
struct pt_regs sf_regs;
__siginfo_fpu_t *fpu_save;
struct {
void *ss_sp;
int ss_flags;
size_t ss_size;
} sf_stack;
unsigned long sf_mask;
};
#ifndef STACK_BIAS #ifndef STACK_BIAS
#define STACK_BIAS 2047 #define STACK_BIAS 2047
#endif #endif
#define SIGCONTEXT struct __rt_signal_frame * #define SIGCONTEXT struct sigcontext *
#define SIGCONTEXT_EXTRA_ARGS #define SIGCONTEXT_EXTRA_ARGS
#define GET_PC(__ctx) ((void *) ((__ctx)->sf_regs.tpc)) #define GET_PC(__ctx) ((void *) ((__ctx)->sigc_regs.tpc))
#define ADVANCE_STACK_FRAME(__next) \ #define ADVANCE_STACK_FRAME(__next) \
((void *) &((struct reg_window *) (((unsigned long int) (__next)) \ ((void *) (((unsigned long *) (((unsigned long int) (__next)) \
+ STACK_BIAS))->ins[6]) + STACK_BIAS))+14))
#define GET_STACK(__ctx) ((void *) ((__ctx)->sf_regs.u_regs[14])) #define GET_STACK(__ctx) ((void *) ((__ctx)->sigc_regs.u_regs[14]))
#define GET_FRAME(__ctx) ADVANCE_STACK_FRAME (GET_STACK (__ctx)) #define GET_FRAME(__ctx) ADVANCE_STACK_FRAME (GET_STACK (__ctx))

View File

@ -150,6 +150,7 @@ enum __ptrace_request
#if __WORDSIZE == 64 #if __WORDSIZE == 64
,
/* Get all floating point registers used by a processes. /* Get all floating point registers used by a processes.
This is not supported on all machines. */ This is not supported on all machines. */
PTRACE_GETFPREGS = 25, PTRACE_GETFPREGS = 25,
@ -157,7 +158,7 @@ enum __ptrace_request
/* Set all floating point registers used by a processes. /* Set all floating point registers used by a processes.
This is not supported on all machines. */ This is not supported on all machines. */
PTRACE_SETFPREGS = 26, PTRACE_SETFPREGS = 26
#define PT_SETFPREGS PTRACE_SETFPREGS #define PT_SETFPREGS PTRACE_SETFPREGS
#endif #endif