1
0
mirror of https://sourceware.org/git/glibc.git synced 2025-07-29 11:41:21 +03:00
2000-12-28  Wolfram Gloger  <wg@malloc.de>

	* malloc/malloc.c (MALLOC_COPY): Handle case if source and
	destination overlap.  Assume dest is always below source if
	overlapping.
This commit is contained in:
Ulrich Drepper
2000-12-31 07:39:50 +00:00
parent c77a447822
commit 09f5e1635a
5 changed files with 56 additions and 39 deletions

View File

@ -1,3 +1,9 @@
2000-12-28 Wolfram Gloger <wg@malloc.de>
* malloc/malloc.c (MALLOC_COPY): Handle case if source and
destination overlap. Assume dest is always below source if
overlapping.
2000-12-30 Ulrich Drepper <drepper@redhat.com> 2000-12-30 Ulrich Drepper <drepper@redhat.com>
* elf/dl-close.c (_dl_close): We can ignore the NODELETE flag if the * elf/dl-close.c (_dl_close): We can ignore the NODELETE flag if the

View File

@ -269,10 +269,6 @@ dl_open_worker (void *a)
/* Load that object's dependencies. */ /* Load that object's dependencies. */
_dl_map_object_deps (new, NULL, 0, 0); _dl_map_object_deps (new, NULL, 0, 0);
/* Increment the open count for all dependencies. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
++new->l_searchlist.r_list[i]->l_opencount;
/* So far, so good. Now check the versions. */ /* So far, so good. Now check the versions. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i) for (i = 0; i < new->l_searchlist.r_nlist; ++i)
if (new->l_searchlist.r_list[i]->l_versions == NULL) if (new->l_searchlist.r_list[i]->l_versions == NULL)
@ -321,6 +317,10 @@ dl_open_worker (void *a)
l = l->l_prev; l = l->l_prev;
} }
/* Increment the open count for all dependencies. */
for (i = 0; i < new->l_searchlist.r_nlist; ++i)
++new->l_searchlist.r_list[i]->l_opencount;
/* Run the initializer functions of new objects. */ /* Run the initializer functions of new objects. */
_dl_init (new, __libc_argc, __libc_argv, __environ); _dl_init (new, __libc_argc, __libc_argv, __environ);
@ -399,11 +399,10 @@ _dl_open (const char *file, int mode, const void *caller)
{ {
int i; int i;
/* Increment open counters for all objects which did not get /* Increment open counters for all objects since this has
correctly loaded. */ not happened yet. */
for (i = 0; i < args.map->l_searchlist.r_nlist; ++i) for (i = 0; i < args.map->l_searchlist.r_nlist; ++i)
if (args.map->l_searchlist.r_list[i]->l_opencount == 0) ++args.map->l_searchlist.r_list[i]->l_opencount;
args.map->l_searchlist.r_list[i]->l_opencount = 1;
_dl_close (args.map); _dl_close (args.map);
} }

View File

@ -1,3 +1,8 @@
2000-11-15 Wolfram Gloger <wg@malloc.de>
* manager.c (pthread_free): [!FLOATING_STACKS]: Only remap the
stack to PROT_NONE, don't unmap it, avoiding collisions with malloc.
2000-12-27 Andreas Jaeger <aj@suse.de> 2000-12-27 Andreas Jaeger <aj@suse.de>
* Examples/ex13.c: Make local functions static. * Examples/ex13.c: Make local functions static.

View File

@ -418,7 +418,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
new_thread_bottom = (char *) map_addr + guardsize; new_thread_bottom = (char *) map_addr + guardsize;
new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1; new_thread = ((pthread_descr) (new_thread_bottom + stacksize)) - 1;
# else # else /* !FLOATING_STACKS */
if (attr != NULL) if (attr != NULL)
{ {
guardsize = page_roundup (attr->__guardsize, granularity); guardsize = page_roundup (attr->__guardsize, granularity);
@ -696,23 +696,24 @@ static void pthread_free(pthread_descr th)
{ {
size_t guardsize = th->p_guardsize; size_t guardsize = th->p_guardsize;
/* Free the stack and thread descriptor area */ /* Free the stack and thread descriptor area */
char *guardaddr = th->p_guardaddr;
/* Guardaddr is always set, even if guardsize is 0. This allows
us to compute everything else. */
size_t stacksize = (char *)(th+1) - guardaddr - guardsize;
#ifdef NEED_SEPARATE_REGISTER_STACK #ifdef NEED_SEPARATE_REGISTER_STACK
char *guardaddr = th->p_guardaddr; /* Take account of the register stack, which is below guardaddr. */
/* We unmap exactly what we mapped, in case there was something guardaddr -= stacksize;
else in the same region. Guardaddr is always set, eve if stacksize *= 2;
guardsize is 0. This allows us to compute everything else. */ #endif
size_t stacksize = (char *)(th+1) - guardaddr - guardsize; #if FLOATING_STACKS
/* Unmap the register stack, which is below guardaddr. */ /* Can unmap safely. */
munmap((caddr_t)(guardaddr-stacksize), munmap(guardaddr, stacksize + guardsize);
2 * stacksize + th->p_guardsize);
#else #else
char *guardaddr = th->p_guardaddr; /* Only remap to PROT_NONE, so that the region is reserved in
/* We unmap exactly what we mapped, in case there was something case we map the stack again later. Avoid collision with
else in the same region. Guardaddr is always set, eve if other mmap()s, in particular by malloc(). */
guardsize is 0. This allows us to compute everything else. */ mmap(guardaddr, stacksize + guardsize, PROT_NONE,
size_t stacksize = (char *)(th+1) - guardaddr - guardsize; MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
munmap (guardaddr, stacksize + guardsize);
#endif #endif
} }
} }

View File

@ -423,11 +423,12 @@ Void_t* memmove();
#endif #endif
#endif #endif
#if USE_MEMCPY
/* The following macros are only invoked with (2n+1)-multiples of /* The following macros are only invoked with (2n+1)-multiples of
INTERNAL_SIZE_T units, with a positive integer n. This is exploited INTERNAL_SIZE_T units, with a positive integer n. This is exploited
for fast inline execution when n is small. */ for fast inline execution when n is small. If the regions to be
copied do overlap, the destination lies always _below_ the source. */
#if USE_MEMCPY
#define MALLOC_ZERO(charp, nbytes) \ #define MALLOC_ZERO(charp, nbytes) \
do { \ do { \
@ -446,7 +447,9 @@ do { \
} else memset((charp), 0, mzsz); \ } else memset((charp), 0, mzsz); \
} while(0) } while(0)
#define MALLOC_COPY(dest,src,nbytes) \ /* If the regions overlap, dest is always _below_ src. */
#define MALLOC_COPY(dest,src,nbytes,overlap) \
do { \ do { \
INTERNAL_SIZE_T mcsz = (nbytes); \ INTERNAL_SIZE_T mcsz = (nbytes); \
if(mcsz <= 9*sizeof(mcsz)) { \ if(mcsz <= 9*sizeof(mcsz)) { \
@ -461,12 +464,12 @@ do { \
*mcdst++ = *mcsrc++; \ *mcdst++ = *mcsrc++; \
*mcdst++ = *mcsrc++; \ *mcdst++ = *mcsrc++; \
*mcdst = *mcsrc ; \ *mcdst = *mcsrc ; \
} else memcpy(dest, src, mcsz); \ } else if(overlap) \
memmove(dest, src, mcsz); \
else \
memcpy(dest, src, mcsz); \
} while(0) } while(0)
#define MALLOC_MEMMOVE(dest,src,nbytes) \
memmove(dest, src, mcsz)
#else /* !USE_MEMCPY */ #else /* !USE_MEMCPY */
/* Use Duff's device for good zeroing/copying performance. */ /* Use Duff's device for good zeroing/copying performance. */
@ -488,7 +491,9 @@ do { \
} \ } \
} while(0) } while(0)
#define MALLOC_COPY(dest,src,nbytes) \ /* If the regions overlap, dest is always _below_ src. */
#define MALLOC_COPY(dest,src,nbytes,overlap) \
do { \ do { \
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \ INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \ INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
@ -3255,7 +3260,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
/* Must alloc, copy, free. */ /* Must alloc, copy, free. */
newmem = mALLOc(bytes); newmem = mALLOc(bytes);
if (newmem == 0) return 0; /* propagate failure */ if (newmem == 0) return 0; /* propagate failure */
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ); MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ, 0);
munmap_chunk(oldp); munmap_chunk(oldp);
return newmem; return newmem;
} }
@ -3370,7 +3375,8 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd); unlink(prev, bck, fwd);
newp = prev; newp = prev;
newsize += prevsize + nextsize; newsize += prevsize + nextsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize); MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize,
1);
top(ar_ptr) = chunk_at_offset(newp, nb); top(ar_ptr) = chunk_at_offset(newp, nb);
set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE); set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE);
set_head_size(newp, nb); set_head_size(newp, nb);
@ -3385,7 +3391,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd); unlink(prev, bck, fwd);
newp = prev; newp = prev;
newsize += nextsize + prevsize; newsize += nextsize + prevsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize); MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
goto split; goto split;
} }
} }
@ -3396,7 +3402,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
unlink(prev, bck, fwd); unlink(prev, bck, fwd);
newp = prev; newp = prev;
newsize += prevsize; newsize += prevsize;
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize); MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
goto split; goto split;
} }
} }
@ -3436,7 +3442,7 @@ arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
} }
/* Otherwise copy, free, and exit */ /* Otherwise copy, free, and exit */
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize); MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 0);
chunk_free(ar_ptr, oldp); chunk_free(ar_ptr, oldp);
return newp; return newp;
} }
@ -4605,7 +4611,7 @@ realloc_check(oldmem, bytes, caller)
newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL; newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
if (newp) { if (newp) {
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), nb), MALLOC_COPY(BOUNDED_N(chunk2mem(newp), nb),
oldmem, oldsize - 2*SIZE_SZ); oldmem, oldsize - 2*SIZE_SZ, 0);
munmap_chunk(oldp); munmap_chunk(oldp);
} }
} }