mirror of
https://sourceware.org/git/glibc.git
synced 2025-08-08 17:42:12 +03:00
@@ -150,6 +150,19 @@ memalign_hook_ini (size_t alignment, size_t sz, const void *caller)
|
||||
|
||||
static size_t pagesize;
|
||||
|
||||
/* These variables are used for undumping support. Chunked are marked
|
||||
as using mmap, but we leave them alone if they fall into this
|
||||
range. NB: The chunk size for these chunks only includes the
|
||||
initial size field (of SIZE_SZ bytes), there is no trailing size
|
||||
field (unlike with regular mmapped chunks). */
|
||||
static mchunkptr dumped_main_arena_start; /* Inclusive. */
|
||||
static mchunkptr dumped_main_arena_end; /* Exclusive. */
|
||||
|
||||
/* True if the pointer falls into the dumped arena. Use this after
|
||||
chunk_is_mmapped indicates a chunk is mmapped. */
|
||||
#define DUMPED_MAIN_ARENA_CHUNK(p) \
|
||||
((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
|
||||
|
||||
/* The allocator functions. */
|
||||
|
||||
static void *
|
||||
@@ -189,7 +202,9 @@ __debug_free (void *mem)
|
||||
if (__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK))
|
||||
mem = free_mcheck (mem);
|
||||
|
||||
if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
|
||||
if (DUMPED_MAIN_ARENA_CHUNK (mem2chunk (mem)))
|
||||
/* Do nothing. */;
|
||||
else if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
|
||||
free_check (mem);
|
||||
else
|
||||
__libc_free (mem);
|
||||
@@ -212,7 +227,32 @@ __debug_realloc (void *oldmem, size_t bytes)
|
||||
if ((!__is_malloc_debug_enabled (MALLOC_MCHECK_HOOK)
|
||||
|| !realloc_mcheck_before (&oldmem, &bytes, &oldsize, &victim)))
|
||||
{
|
||||
if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
|
||||
mchunkptr oldp = mem2chunk (oldmem);
|
||||
|
||||
/* If this is a faked mmapped chunk from the dumped main arena,
|
||||
always make a copy (and do not free the old chunk). */
|
||||
if (DUMPED_MAIN_ARENA_CHUNK (oldp))
|
||||
{
|
||||
if (bytes == 0 && oldmem != NULL)
|
||||
victim = NULL;
|
||||
else
|
||||
{
|
||||
const INTERNAL_SIZE_T osize = chunksize (oldp);
|
||||
/* Must alloc, copy, free. */
|
||||
victim = __debug_malloc (bytes);
|
||||
/* Copy as many bytes as are available from the old chunk
|
||||
and fit into the new size. NB: The overhead for faked
|
||||
mmapped chunks is only SIZE_SZ, not CHUNK_HDR_SZ as for
|
||||
regular mmapped chunks. */
|
||||
if (victim != NULL)
|
||||
{
|
||||
if (bytes > osize - SIZE_SZ)
|
||||
bytes = osize - SIZE_SZ;
|
||||
memcpy (victim, oldmem, bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
|
||||
victim = realloc_check (oldmem, bytes);
|
||||
else
|
||||
victim = __libc_realloc (oldmem, bytes);
|
||||
@@ -374,6 +414,10 @@ malloc_usable_size (void *mem)
|
||||
if (__is_malloc_debug_enabled (MALLOC_CHECK_HOOK))
|
||||
return malloc_check_get_size (mem);
|
||||
|
||||
mchunkptr p = mem2chunk (mem);
|
||||
if (DUMPED_MAIN_ARENA_CHUNK (p))
|
||||
return chunksize (p) - SIZE_SZ;
|
||||
|
||||
return musable (mem);
|
||||
}
|
||||
|
||||
@@ -473,10 +517,43 @@ malloc_trim (size_t s)
|
||||
|
||||
#if SHLIB_COMPAT (libc_malloc_debug, GLIBC_2_0, GLIBC_2_25)
|
||||
|
||||
/* Support for saving/restoring dumped heaps in old GLIBCs is no
|
||||
longer implemented - instead we provide dummy implementations
|
||||
which always fail. We need to provide these symbol so that
|
||||
existing Emacs binaries continue to work with BIND_NOW. */
|
||||
/* Support for restoring dumped heaps contained in historic Emacs
|
||||
executables. The heap saving feature (malloc_get_state) is no
|
||||
longer implemented in this version of glibc, but we have a heap
|
||||
rewriter in malloc_set_state which transforms the heap into a
|
||||
version compatible with current malloc. */
|
||||
|
||||
#define MALLOC_STATE_MAGIC 0x444c4541l
|
||||
#define MALLOC_STATE_VERSION (0 * 0x100l + 5l) /* major*0x100 + minor */
|
||||
|
||||
struct malloc_save_state
|
||||
{
|
||||
long magic;
|
||||
long version;
|
||||
mbinptr av[NBINS * 2 + 2];
|
||||
char *sbrk_base;
|
||||
int sbrked_mem_bytes;
|
||||
unsigned long trim_threshold;
|
||||
unsigned long top_pad;
|
||||
unsigned int n_mmaps_max;
|
||||
unsigned long mmap_threshold;
|
||||
int check_action;
|
||||
unsigned long max_sbrked_mem;
|
||||
unsigned long max_total_mem; /* Always 0, for backwards compatibility. */
|
||||
unsigned int n_mmaps;
|
||||
unsigned int max_n_mmaps;
|
||||
unsigned long mmapped_mem;
|
||||
unsigned long max_mmapped_mem;
|
||||
int using_malloc_checking;
|
||||
unsigned long max_fast;
|
||||
unsigned long arena_test;
|
||||
unsigned long arena_max;
|
||||
unsigned long narenas;
|
||||
};
|
||||
|
||||
/* Dummy implementation which always fails. We need to provide this
|
||||
symbol so that existing Emacs binaries continue to work with
|
||||
BIND_NOW. */
|
||||
void *
|
||||
malloc_get_state (void)
|
||||
{
|
||||
@@ -489,7 +566,81 @@ compat_symbol (libc_malloc_debug, malloc_get_state, malloc_get_state,
|
||||
int
|
||||
malloc_set_state (void *msptr)
|
||||
{
|
||||
return -1;
|
||||
struct malloc_save_state *ms = (struct malloc_save_state *) msptr;
|
||||
|
||||
if (ms->magic != MALLOC_STATE_MAGIC)
|
||||
return -1;
|
||||
|
||||
/* Must fail if the major version is too high. */
|
||||
if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl))
|
||||
return -2;
|
||||
|
||||
if (debug_initialized == 1)
|
||||
return -1;
|
||||
|
||||
bool check_was_enabled = __is_malloc_debug_enabled (MALLOC_CHECK_HOOK);
|
||||
|
||||
/* It's not too late, so disable MALLOC_CHECK_ and all of the hooks. */
|
||||
__malloc_hook = NULL;
|
||||
__realloc_hook = NULL;
|
||||
__free_hook = NULL;
|
||||
__memalign_hook = NULL;
|
||||
__malloc_debug_disable (MALLOC_CHECK_HOOK);
|
||||
|
||||
/* We do not need to perform locking here because malloc_set_state
|
||||
must be called before the first call into the malloc subsystem (usually via
|
||||
__malloc_initialize_hook). pthread_create always calls calloc and thus
|
||||
must be called only afterwards, so there cannot be more than one thread
|
||||
when we reach this point. Also handle initialization if either we ended
|
||||
up being called before the first malloc or through the hook when
|
||||
malloc-check was enabled. */
|
||||
if (debug_initialized < 0)
|
||||
generic_hook_ini ();
|
||||
else if (check_was_enabled)
|
||||
__libc_free (__libc_malloc (0));
|
||||
|
||||
/* Patch the dumped heap. We no longer try to integrate into the
|
||||
existing heap. Instead, we mark the existing chunks as mmapped.
|
||||
Together with the update to dumped_main_arena_start and
|
||||
dumped_main_arena_end, realloc and free will recognize these
|
||||
chunks as dumped fake mmapped chunks and never free them. */
|
||||
|
||||
/* Find the chunk with the lowest address with the heap. */
|
||||
mchunkptr chunk = NULL;
|
||||
{
|
||||
size_t *candidate = (size_t *) ms->sbrk_base;
|
||||
size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
|
||||
while (candidate < end)
|
||||
if (*candidate != 0)
|
||||
{
|
||||
chunk = mem2chunk ((void *) (candidate + 1));
|
||||
break;
|
||||
}
|
||||
else
|
||||
++candidate;
|
||||
}
|
||||
if (chunk == NULL)
|
||||
return 0;
|
||||
|
||||
/* Iterate over the dumped heap and patch the chunks so that they
|
||||
are treated as fake mmapped chunks. */
|
||||
mchunkptr top = ms->av[2];
|
||||
while (chunk < top)
|
||||
{
|
||||
if (inuse (chunk))
|
||||
{
|
||||
/* Mark chunk as mmapped, to trigger the fallback path. */
|
||||
size_t size = chunksize (chunk);
|
||||
set_head (chunk, size | IS_MMAPPED);
|
||||
}
|
||||
chunk = next_chunk (chunk);
|
||||
}
|
||||
|
||||
/* The dumped fake mmapped chunks all lie in this address range. */
|
||||
dumped_main_arena_start = (mchunkptr) ms->sbrk_base;
|
||||
dumped_main_arena_end = top;
|
||||
|
||||
return 0;
|
||||
}
|
||||
compat_symbol (libc_malloc_debug, malloc_set_state, malloc_set_state,
|
||||
GLIBC_2_0);
|
||||
|
@@ -17,6 +17,7 @@
|
||||
<https://www.gnu.org/licenses/>. */
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <libc-symbols.h>
|
||||
@@ -33,8 +34,36 @@ compat_symbol_reference (libc, malloc_get_state, malloc_get_state, GLIBC_2_0);
|
||||
int malloc_set_state (void *);
|
||||
compat_symbol_reference (libc, malloc_set_state, malloc_set_state, GLIBC_2_0);
|
||||
|
||||
#define NBINS 128
|
||||
/* Maximum object size in the fake heap. */
|
||||
enum { max_size = 64 };
|
||||
|
||||
/* Allocation actions. These are randomized actions executed on the
|
||||
dumped heap (see allocation_tasks below). They are interspersed
|
||||
with operations on the new heap (see heap_activity). */
|
||||
enum allocation_action
|
||||
{
|
||||
action_free, /* Dumped and freed. */
|
||||
action_realloc, /* Dumped and realloc'ed. */
|
||||
action_realloc_same, /* Dumped and realloc'ed, same size. */
|
||||
action_realloc_smaller, /* Dumped and realloc'ed, shrunk. */
|
||||
action_count
|
||||
};
|
||||
|
||||
/* Dumped heap. Initialize it, so that the object is placed into the
|
||||
.data section, for increased realism. The size is an upper bound;
|
||||
we use about half of the space. */
|
||||
static size_t dumped_heap[action_count * max_size * max_size
|
||||
/ sizeof (size_t)] = {1};
|
||||
|
||||
/* Next free space in the dumped heap. Also top of the heap at the
|
||||
end of the initialization procedure. */
|
||||
static size_t *next_heap_chunk;
|
||||
|
||||
/* Copied from malloc.c and hooks.c. The version is deliberately
|
||||
lower than the final version of malloc_set_state. */
|
||||
# define NBINS 128
|
||||
# define MALLOC_STATE_MAGIC 0x444c4541l
|
||||
# define MALLOC_STATE_VERSION (0 * 0x100l + 4l)
|
||||
static struct
|
||||
{
|
||||
long magic;
|
||||
@@ -58,20 +87,407 @@ static struct
|
||||
unsigned long arena_test;
|
||||
unsigned long arena_max;
|
||||
unsigned long narenas;
|
||||
} save_state;
|
||||
} save_state =
|
||||
{
|
||||
.magic = MALLOC_STATE_MAGIC,
|
||||
.version = MALLOC_STATE_VERSION,
|
||||
};
|
||||
|
||||
/* Allocate a blob in the fake heap. */
|
||||
static void *
|
||||
dumped_heap_alloc (size_t length)
|
||||
{
|
||||
/* malloc needs three state bits in the size field, so the minimum
|
||||
alignment is 8 even on 32-bit architectures. malloc_set_state
|
||||
should be compatible with such heaps even if it currently
|
||||
provides more alignment to applications. */
|
||||
enum
|
||||
{
|
||||
heap_alignment = 8,
|
||||
heap_alignment_mask = heap_alignment - 1
|
||||
};
|
||||
_Static_assert (sizeof (size_t) <= heap_alignment,
|
||||
"size_t compatible with heap alignment");
|
||||
|
||||
/* Need at least this many bytes for metadata and application
|
||||
data. */
|
||||
size_t chunk_size = sizeof (size_t) + length;
|
||||
/* Round up the allocation size to the heap alignment. */
|
||||
chunk_size += heap_alignment_mask;
|
||||
chunk_size &= ~heap_alignment_mask;
|
||||
TEST_VERIFY_EXIT ((chunk_size & 3) == 0);
|
||||
if (next_heap_chunk == NULL)
|
||||
/* Initialize the top of the heap. Add one word of zero padding,
|
||||
to match existing practice. */
|
||||
{
|
||||
dumped_heap[0] = 0;
|
||||
next_heap_chunk = dumped_heap + 1;
|
||||
}
|
||||
else
|
||||
/* The previous chunk is allocated. */
|
||||
chunk_size |= 1;
|
||||
*next_heap_chunk = chunk_size;
|
||||
|
||||
/* User data starts after the chunk header. */
|
||||
void *result = next_heap_chunk + 1;
|
||||
next_heap_chunk += chunk_size / sizeof (size_t);
|
||||
|
||||
/* Mark the previous chunk as used. */
|
||||
*next_heap_chunk = 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Global seed variable for the random number generator. */
|
||||
static unsigned long long global_seed;
|
||||
|
||||
/* Simple random number generator. The numbers are in the range from
|
||||
0 to UINT_MAX (inclusive). */
|
||||
static unsigned int
|
||||
rand_next (unsigned long long *seed)
|
||||
{
|
||||
/* Linear congruential generated as used for MMIX. */
|
||||
*seed = *seed * 6364136223846793005ULL + 1442695040888963407ULL;
|
||||
return *seed >> 32;
|
||||
}
|
||||
|
||||
/* Fill LENGTH bytes at BUFFER with random contents, as determined by
|
||||
SEED. */
|
||||
static void
|
||||
randomize_buffer (unsigned char *buffer, size_t length,
|
||||
unsigned long long seed)
|
||||
{
|
||||
for (size_t i = 0; i < length; ++i)
|
||||
buffer[i] = rand_next (&seed);
|
||||
}
|
||||
|
||||
/* Dumps the buffer to standard output, in hexadecimal. */
|
||||
static void
|
||||
dump_hex (unsigned char *buffer, size_t length)
|
||||
{
|
||||
for (int i = 0; i < length; ++i)
|
||||
printf (" %02X", buffer[i]);
|
||||
}
|
||||
|
||||
/* Set to true if an error is encountered. */
|
||||
static bool errors = false;
|
||||
|
||||
/* Keep track of object allocations. */
|
||||
struct allocation
|
||||
{
|
||||
unsigned char *data;
|
||||
unsigned int size;
|
||||
unsigned int seed;
|
||||
};
|
||||
|
||||
/* Check that the allocation task allocation has the expected
|
||||
contents. */
|
||||
static void
|
||||
check_allocation (const struct allocation *alloc, int index)
|
||||
{
|
||||
size_t size = alloc->size;
|
||||
if (alloc->data == NULL)
|
||||
{
|
||||
printf ("error: NULL pointer for allocation of size %zu at %d, seed %u\n",
|
||||
size, index, alloc->seed);
|
||||
errors = true;
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned char expected[4096];
|
||||
if (size > sizeof (expected))
|
||||
{
|
||||
printf ("error: invalid allocation size %zu at %d, seed %u\n",
|
||||
size, index, alloc->seed);
|
||||
errors = true;
|
||||
return;
|
||||
}
|
||||
randomize_buffer (expected, size, alloc->seed);
|
||||
if (memcmp (alloc->data, expected, size) != 0)
|
||||
{
|
||||
printf ("error: allocation %d data mismatch, size %zu, seed %u\n",
|
||||
index, size, alloc->seed);
|
||||
printf (" expected:");
|
||||
dump_hex (expected, size);
|
||||
putc ('\n', stdout);
|
||||
printf (" actual:");
|
||||
dump_hex (alloc->data, size);
|
||||
putc ('\n', stdout);
|
||||
errors = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* A heap allocation combined with pending actions on it. */
|
||||
struct allocation_task
|
||||
{
|
||||
struct allocation allocation;
|
||||
enum allocation_action action;
|
||||
};
|
||||
|
||||
/* Allocation tasks. Initialized by init_allocation_tasks and used by
|
||||
perform_allocations. */
|
||||
enum { allocation_task_count = action_count * max_size };
|
||||
static struct allocation_task allocation_tasks[allocation_task_count];
|
||||
|
||||
/* Fisher-Yates shuffle of allocation_tasks. */
|
||||
static void
|
||||
shuffle_allocation_tasks (void)
|
||||
{
|
||||
for (int i = 0; i < allocation_task_count - 1; ++i)
|
||||
{
|
||||
/* Pick pair in the tail of the array. */
|
||||
int j = i + (rand_next (&global_seed)
|
||||
% ((unsigned) (allocation_task_count - i)));
|
||||
TEST_VERIFY_EXIT (j >= 0 && j < allocation_task_count);
|
||||
/* Exchange. */
|
||||
struct allocation_task tmp = allocation_tasks[i];
|
||||
allocation_tasks[i] = allocation_tasks[j];
|
||||
allocation_tasks[j] = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set up the allocation tasks and the dumped heap. */
|
||||
static void
|
||||
initial_allocations (void)
|
||||
{
|
||||
/* Initialize in a position-dependent way. */
|
||||
for (int i = 0; i < allocation_task_count; ++i)
|
||||
allocation_tasks[i] = (struct allocation_task)
|
||||
{
|
||||
.allocation =
|
||||
{
|
||||
.size = 1 + (i / action_count),
|
||||
.seed = i,
|
||||
},
|
||||
.action = i % action_count
|
||||
};
|
||||
|
||||
/* Execute the tasks in a random order. */
|
||||
shuffle_allocation_tasks ();
|
||||
|
||||
/* Initialize the contents of the dumped heap. */
|
||||
for (int i = 0; i < allocation_task_count; ++i)
|
||||
{
|
||||
struct allocation_task *task = allocation_tasks + i;
|
||||
task->allocation.data = dumped_heap_alloc (task->allocation.size);
|
||||
randomize_buffer (task->allocation.data, task->allocation.size,
|
||||
task->allocation.seed);
|
||||
}
|
||||
|
||||
for (int i = 0; i < allocation_task_count; ++i)
|
||||
check_allocation (&allocation_tasks[i].allocation, i);
|
||||
}
|
||||
|
||||
/* Indicates whether init_heap has run. This variable needs to be
|
||||
volatile because malloc is declared __THROW, which implies it is a
|
||||
leaf function, but we expect it to run our hooks. */
|
||||
static volatile bool heap_initialized;
|
||||
|
||||
/* Executed by glibc malloc, through __malloc_initialize_hook
|
||||
below. */
|
||||
static void
|
||||
init_heap (void)
|
||||
{
|
||||
if (test_verbose)
|
||||
printf ("info: performing heap initialization\n");
|
||||
heap_initialized = true;
|
||||
|
||||
/* Populate the dumped heap. */
|
||||
initial_allocations ();
|
||||
|
||||
/* Complete initialization of the saved heap data structure. */
|
||||
save_state.sbrk_base = (void *) dumped_heap;
|
||||
save_state.sbrked_mem_bytes = sizeof (dumped_heap);
|
||||
/* Top pointer. Adjust so that it points to the start of struct
|
||||
malloc_chunk. */
|
||||
save_state.av[2] = (void *) (next_heap_chunk - 1);
|
||||
|
||||
/* Integrate the dumped heap into the process heap. */
|
||||
TEST_VERIFY_EXIT (malloc_set_state (&save_state) == 0);
|
||||
}
|
||||
|
||||
/* Interpose the initialization callback. */
|
||||
void (*volatile __malloc_initialize_hook) (void) = init_heap;
|
||||
compat_symbol_reference (libc, __malloc_initialize_hook,
|
||||
__malloc_initialize_hook, GLIBC_2_0);
|
||||
|
||||
/* Simulate occasional unrelated heap activity in the non-dumped
|
||||
heap. */
|
||||
enum { heap_activity_allocations_count = 32 };
|
||||
static struct allocation heap_activity_allocations
|
||||
[heap_activity_allocations_count] = {};
|
||||
static int heap_activity_seed_counter = 1000 * 1000;
|
||||
|
||||
static void
|
||||
heap_activity (void)
|
||||
{
|
||||
/* Only do this from time to time. */
|
||||
if ((rand_next (&global_seed) % 4) == 0)
|
||||
{
|
||||
int slot = rand_next (&global_seed) % heap_activity_allocations_count;
|
||||
struct allocation *alloc = heap_activity_allocations + slot;
|
||||
if (alloc->data == NULL)
|
||||
{
|
||||
alloc->size = rand_next (&global_seed) % (4096U + 1);
|
||||
alloc->data = xmalloc (alloc->size);
|
||||
alloc->seed = heap_activity_seed_counter++;
|
||||
randomize_buffer (alloc->data, alloc->size, alloc->seed);
|
||||
check_allocation (alloc, 1000 + slot);
|
||||
}
|
||||
else
|
||||
{
|
||||
check_allocation (alloc, 1000 + slot);
|
||||
free (alloc->data);
|
||||
alloc->data = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
heap_activity_deallocate (void)
|
||||
{
|
||||
for (int i = 0; i < heap_activity_allocations_count; ++i)
|
||||
free (heap_activity_allocations[i].data);
|
||||
}
|
||||
|
||||
/* Perform a full heap check across the dumped heap allocation tasks,
|
||||
and the simulated heap activity directly above. */
|
||||
static void
|
||||
full_heap_check (void)
|
||||
{
|
||||
/* Dumped heap. */
|
||||
for (int i = 0; i < allocation_task_count; ++i)
|
||||
if (allocation_tasks[i].allocation.data != NULL)
|
||||
check_allocation (&allocation_tasks[i].allocation, i);
|
||||
|
||||
/* Heap activity allocations. */
|
||||
for (int i = 0; i < heap_activity_allocations_count; ++i)
|
||||
if (heap_activity_allocations[i].data != NULL)
|
||||
check_allocation (heap_activity_allocations + i, i);
|
||||
}
|
||||
|
||||
/* Used as an optimization barrier to force a heap allocation. */
|
||||
__attribute_optimization_barrier__
|
||||
static void
|
||||
my_free (void *ptr)
|
||||
{
|
||||
free (ptr);
|
||||
}
|
||||
|
||||
static int
|
||||
do_test (void)
|
||||
{
|
||||
/* Check the dummy implementations always fail. */
|
||||
TEST_VERIFY_EXIT (malloc_set_state (&save_state) == -1);
|
||||
my_free (malloc (1));
|
||||
TEST_VERIFY_EXIT (heap_initialized);
|
||||
|
||||
/* The first pass performs the randomly generated allocation
|
||||
tasks. */
|
||||
if (test_verbose)
|
||||
printf ("info: first pass through allocation tasks\n");
|
||||
full_heap_check ();
|
||||
|
||||
/* Execute the post-undump tasks in a random order. */
|
||||
shuffle_allocation_tasks ();
|
||||
|
||||
for (int i = 0; i < allocation_task_count; ++i)
|
||||
{
|
||||
heap_activity ();
|
||||
struct allocation_task *task = allocation_tasks + i;
|
||||
switch (task->action)
|
||||
{
|
||||
case action_free:
|
||||
check_allocation (&task->allocation, i);
|
||||
free (task->allocation.data);
|
||||
task->allocation.data = NULL;
|
||||
break;
|
||||
|
||||
case action_realloc:
|
||||
check_allocation (&task->allocation, i);
|
||||
task->allocation.data = xrealloc
|
||||
(task->allocation.data, task->allocation.size + max_size);
|
||||
check_allocation (&task->allocation, i);
|
||||
break;
|
||||
|
||||
case action_realloc_same:
|
||||
check_allocation (&task->allocation, i);
|
||||
task->allocation.data = xrealloc
|
||||
(task->allocation.data, task->allocation.size);
|
||||
check_allocation (&task->allocation, i);
|
||||
break;
|
||||
|
||||
case action_realloc_smaller:
|
||||
check_allocation (&task->allocation, i);
|
||||
size_t new_size = task->allocation.size - 1;
|
||||
task->allocation.data = xrealloc (task->allocation.data, new_size);
|
||||
if (new_size == 0)
|
||||
{
|
||||
if (task->allocation.data != NULL)
|
||||
{
|
||||
printf ("error: realloc with size zero did not deallocate\n");
|
||||
errors = true;
|
||||
}
|
||||
/* No further action on this task. */
|
||||
task->action = action_free;
|
||||
}
|
||||
else
|
||||
{
|
||||
task->allocation.size = new_size;
|
||||
check_allocation (&task->allocation, i);
|
||||
}
|
||||
break;
|
||||
|
||||
case action_count:
|
||||
FAIL_EXIT1 ("task->action should never be action_count");
|
||||
}
|
||||
full_heap_check ();
|
||||
}
|
||||
|
||||
/* The second pass frees the objects which were allocated during the
|
||||
first pass. */
|
||||
if (test_verbose)
|
||||
printf ("info: second pass through allocation tasks\n");
|
||||
|
||||
shuffle_allocation_tasks ();
|
||||
for (int i = 0; i < allocation_task_count; ++i)
|
||||
{
|
||||
heap_activity ();
|
||||
struct allocation_task *task = allocation_tasks + i;
|
||||
switch (task->action)
|
||||
{
|
||||
case action_free:
|
||||
/* Already freed, nothing to do. */
|
||||
break;
|
||||
|
||||
case action_realloc:
|
||||
case action_realloc_same:
|
||||
case action_realloc_smaller:
|
||||
check_allocation (&task->allocation, i);
|
||||
free (task->allocation.data);
|
||||
task->allocation.data = NULL;
|
||||
break;
|
||||
|
||||
case action_count:
|
||||
FAIL_EXIT1 ("task->action should never be action_count");
|
||||
}
|
||||
full_heap_check ();
|
||||
}
|
||||
|
||||
heap_activity_deallocate ();
|
||||
|
||||
/* Check that the malloc_get_state stub behaves in the intended
|
||||
way. */
|
||||
errno = 0;
|
||||
TEST_VERIFY_EXIT (malloc_get_state () == NULL);
|
||||
if (malloc_get_state () != NULL)
|
||||
{
|
||||
printf ("error: malloc_get_state succeeded\n");
|
||||
errors = true;
|
||||
}
|
||||
if (errno != ENOSYS)
|
||||
{
|
||||
printf ("error: malloc_get_state: %m\n");
|
||||
errors = true;
|
||||
}
|
||||
|
||||
TEST_VERIFY_EXIT (errno == ENOSYS);
|
||||
|
||||
return 0;
|
||||
return errors;
|
||||
}
|
||||
|
||||
#include <support/test-driver.c>
|
||||
|
Reference in New Issue
Block a user