1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-30 22:43:12 +03:00

* elf/dl-close.c: Include stddef.h.

(_dl_close): If called recursively, just remember GC needs to be rerun
	and decrease l_direct_opencount.  Avoid GC if l_direct_opencount
	decreased to 1.  Rerun GC at the end if any destructor unloaded some
	additional libraries.
	* elf/Makefile: Add rules to build and run unload6 test.
	* elf/unload6.c: New test.
	* elf/unload6mod1.c: New file.
	* elf/unload6mod2.c: New file.
	* elf/unload6mod3.c: New file.

	* malloc/hooks.c (mem2chunk_check): Add magic_p argument, set *magic_p
	if magic_p is not NULL.
	(top_check): Invoke MALLOC_FAILURE_ACTION if MORECORE failed.
	(malloc_check): Fail if sz == -1.
	(free_check): Adjust mem2chunk_check caller.
	(realloc_check): Likewise.  Fail if bytes == -1.  If bytes == 0 and
	oldmem != NULL, call free_check and return NULL.  If reallocating
	and returning NULL, invert magic byte again to make oldmem valid
	region for further checking.
	(memalign_check): Fail if bytes == -1.
	* malloc/Makefile: Add rules to build and run tst-mcheck.
	* malloc/tst-mcheck.c: New test.
This commit is contained in:
Ulrich Drepper
2005-04-27 01:39:11 +00:00
parent 462be6908c
commit bfc832ccf1
10 changed files with 302 additions and 26 deletions

View File

@ -1,5 +1,5 @@
/* Malloc implementation for multiple threads without lock contention.
Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
@ -146,9 +146,9 @@ mem2mem_check(ptr, sz) Void_t *ptr; size_t sz;
static mchunkptr
internal_function
#if __STD_C
mem2chunk_check(Void_t* mem)
mem2chunk_check(Void_t* mem, unsigned char **magic_p)
#else
mem2chunk_check(mem) Void_t* mem;
mem2chunk_check(mem, magic_p) Void_t* mem; unsigned char **magic_p;
#endif
{
mchunkptr p;
@ -173,7 +173,6 @@ mem2chunk_check(mem) Void_t* mem;
for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
((unsigned char*)p)[sz] ^= 0xFF;
} else {
unsigned long offset, page_mask = malloc_getpagesize-1;
@ -193,8 +192,10 @@ mem2chunk_check(mem) Void_t* mem;
for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
}
((unsigned char*)p)[sz] ^= 0xFF;
}
((unsigned char*)p)[sz] ^= 0xFF;
if (magic_p)
*magic_p = (unsigned char *)p + sz;
return p;
}
@ -232,7 +233,11 @@ top_check()
sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
new_brk = (char*)(MORECORE (sbrk_size));
if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
if (new_brk == (char*)(MORECORE_FAILURE))
{
MALLOC_FAILURE_ACTION;
return -1;
}
/* Call the `morecore' hook if necessary. */
if (__after_morecore_hook)
(*__after_morecore_hook) ();
@ -253,6 +258,11 @@ malloc_check(sz, caller) size_t sz; const Void_t *caller;
{
Void_t *victim;
if (sz+1 == 0) {
MALLOC_FAILURE_ACTION;
return NULL;
}
(void)mutex_lock(&main_arena.mutex);
victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
(void)mutex_unlock(&main_arena.mutex);
@ -270,7 +280,7 @@ free_check(mem, caller) Void_t* mem; const Void_t *caller;
if(!mem) return;
(void)mutex_lock(&main_arena.mutex);
p = mem2chunk_check(mem);
p = mem2chunk_check(mem, NULL);
if(!p) {
(void)mutex_unlock(&main_arena.mutex);
@ -302,10 +312,19 @@ realloc_check(oldmem, bytes, caller)
mchunkptr oldp;
INTERNAL_SIZE_T nb, oldsize;
Void_t* newmem = 0;
unsigned char *magic_p;
if (bytes+1 == 0) {
MALLOC_FAILURE_ACTION;
return NULL;
}
if (oldmem == 0) return malloc_check(bytes, NULL);
if (bytes == 0) {
free_check (oldmem, NULL);
return NULL;
}
(void)mutex_lock(&main_arena.mutex);
oldp = mem2chunk_check(oldmem);
oldp = mem2chunk_check(oldmem, &magic_p);
(void)mutex_unlock(&main_arena.mutex);
if(!oldp) {
malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
@ -357,6 +376,12 @@ realloc_check(oldmem, bytes, caller)
#if HAVE_MMAP
}
#endif
/* mem2chunk_check changed the magic byte in the old chunk.
If newmem is NULL, then the old chunk will still be used though,
so we need to invert that change here. */
if (newmem == NULL) *magic_p ^= 0xFF;
(void)mutex_unlock(&main_arena.mutex);
return mem2mem_check(newmem, bytes);
@ -376,6 +401,10 @@ memalign_check(alignment, bytes, caller)
if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
if (alignment < MINSIZE) alignment = MINSIZE;
if (bytes+1 == 0) {
MALLOC_FAILURE_ACTION;
return NULL;
}
checked_request2size(bytes+1, nb);
(void)mutex_lock(&main_arena.mutex);
mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :