mirror of
https://sourceware.org/git/glibc.git
synced 2025-07-29 11:41:21 +03:00
Revert 4248f0da6f
.
Objections were raised surrounding the calloc simplification and it is better to revert the patch, continue discussions and then submit a new patch for inclusion with all issues fully addressed.
This commit is contained in:
@ -1,3 +1,7 @@
|
|||||||
|
2014-03-03 Carlos O'Donell <carlos@redhat.com>
|
||||||
|
|
||||||
|
* malloc/malloc.c (__libc_calloc): Revert last change.
|
||||||
|
|
||||||
2014-03-03 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
|
2014-03-03 Adhemerval Zanella <azanella@linux.vnet.ibm.com>
|
||||||
|
|
||||||
* sysdeps/powerpc/fpu/libm-test-ulps: Update.
|
* sysdeps/powerpc/fpu/libm-test-ulps: Update.
|
||||||
|
117
malloc/malloc.c
117
malloc/malloc.c
@ -3141,8 +3141,13 @@ __libc_pvalloc (size_t bytes)
|
|||||||
void *
|
void *
|
||||||
__libc_calloc (size_t n, size_t elem_size)
|
__libc_calloc (size_t n, size_t elem_size)
|
||||||
{
|
{
|
||||||
INTERNAL_SIZE_T bytes;
|
mstate av;
|
||||||
|
mchunkptr oldtop, p;
|
||||||
|
INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
|
||||||
void *mem;
|
void *mem;
|
||||||
|
unsigned long clearsize;
|
||||||
|
unsigned long nclears;
|
||||||
|
INTERNAL_SIZE_T *d;
|
||||||
|
|
||||||
/* size_t is unsigned so the behavior on overflow is defined. */
|
/* size_t is unsigned so the behavior on overflow is defined. */
|
||||||
bytes = n * elem_size;
|
bytes = n * elem_size;
|
||||||
@ -3161,15 +3166,113 @@ __libc_calloc (size_t n, size_t elem_size)
|
|||||||
atomic_forced_read (__malloc_hook);
|
atomic_forced_read (__malloc_hook);
|
||||||
if (__builtin_expect (hook != NULL, 0))
|
if (__builtin_expect (hook != NULL, 0))
|
||||||
{
|
{
|
||||||
mem = (*hook)(bytes, RETURN_ADDRESS (0));
|
sz = bytes;
|
||||||
}
|
mem = (*hook)(sz, RETURN_ADDRESS (0));
|
||||||
else
|
if (mem == 0)
|
||||||
mem = __libc_malloc (bytes);
|
return 0;
|
||||||
|
|
||||||
if (mem == 0)
|
return memset (mem, 0, sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
sz = bytes;
|
||||||
|
|
||||||
|
arena_get (av, sz);
|
||||||
|
if (!av)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return memset (mem, 0, bytes);
|
/* Check if we hand out the top chunk, in which case there may be no
|
||||||
|
need to clear. */
|
||||||
|
#if MORECORE_CLEARS
|
||||||
|
oldtop = top (av);
|
||||||
|
oldtopsize = chunksize (top (av));
|
||||||
|
# if MORECORE_CLEARS < 2
|
||||||
|
/* Only newly allocated memory is guaranteed to be cleared. */
|
||||||
|
if (av == &main_arena &&
|
||||||
|
oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
|
||||||
|
oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
|
||||||
|
# endif
|
||||||
|
if (av != &main_arena)
|
||||||
|
{
|
||||||
|
heap_info *heap = heap_for_ptr (oldtop);
|
||||||
|
if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
|
||||||
|
oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
mem = _int_malloc (av, sz);
|
||||||
|
|
||||||
|
|
||||||
|
assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
|
||||||
|
av == arena_for_chunk (mem2chunk (mem)));
|
||||||
|
|
||||||
|
if (mem == 0)
|
||||||
|
{
|
||||||
|
LIBC_PROBE (memory_calloc_retry, 1, sz);
|
||||||
|
av = arena_get_retry (av, sz);
|
||||||
|
if (__builtin_expect (av != NULL, 1))
|
||||||
|
{
|
||||||
|
mem = _int_malloc (av, sz);
|
||||||
|
(void) mutex_unlock (&av->mutex);
|
||||||
|
}
|
||||||
|
if (mem == 0)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
(void) mutex_unlock (&av->mutex);
|
||||||
|
p = mem2chunk (mem);
|
||||||
|
|
||||||
|
/* Two optional cases in which clearing not necessary */
|
||||||
|
if (chunk_is_mmapped (p))
|
||||||
|
{
|
||||||
|
if (__builtin_expect (perturb_byte, 0))
|
||||||
|
return memset (mem, 0, sz);
|
||||||
|
|
||||||
|
return mem;
|
||||||
|
}
|
||||||
|
|
||||||
|
csz = chunksize (p);
|
||||||
|
|
||||||
|
#if MORECORE_CLEARS
|
||||||
|
if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
|
||||||
|
{
|
||||||
|
/* clear only the bytes from non-freshly-sbrked memory */
|
||||||
|
csz = oldtopsize;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
|
||||||
|
contents have an odd number of INTERNAL_SIZE_T-sized words;
|
||||||
|
minimally 3. */
|
||||||
|
d = (INTERNAL_SIZE_T *) mem;
|
||||||
|
clearsize = csz - SIZE_SZ;
|
||||||
|
nclears = clearsize / sizeof (INTERNAL_SIZE_T);
|
||||||
|
assert (nclears >= 3);
|
||||||
|
|
||||||
|
if (nclears > 9)
|
||||||
|
return memset (d, 0, clearsize);
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
*(d + 0) = 0;
|
||||||
|
*(d + 1) = 0;
|
||||||
|
*(d + 2) = 0;
|
||||||
|
if (nclears > 4)
|
||||||
|
{
|
||||||
|
*(d + 3) = 0;
|
||||||
|
*(d + 4) = 0;
|
||||||
|
if (nclears > 6)
|
||||||
|
{
|
||||||
|
*(d + 5) = 0;
|
||||||
|
*(d + 6) = 0;
|
||||||
|
if (nclears > 8)
|
||||||
|
{
|
||||||
|
*(d + 7) = 0;
|
||||||
|
*(d + 8) = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Reference in New Issue
Block a user