1
0
mirror of https://github.com/esp8266/Arduino.git synced 2025-04-19 23:22:16 +03:00

Sync umm_malloc style with upstream (#8426)

Upstream umm_malloc at git hash id 4dac43c3be7a7470dd669323021ba238081da18e
processed all project files with the style program uncrustify.

This PR updates our ported version of umm_malloc processed with "uncrustify".
This should make subsequent merges of upstream into this port easier.

This also makes the style more consistant through umm_malloc.
This commit is contained in:
M Hightower 2022-01-03 13:36:03 -08:00 committed by GitHub
parent f401f08aba
commit f26201e6a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 1449 additions and 1403 deletions

View File

@ -248,4 +248,32 @@ Enhancement ideas:
save on the execution time spent with interrupts disabled.
*/
/*
Dec 29, 2021
Upstream umm_malloc at git hash id 4dac43c3be7a7470dd669323021ba238081da18e
processed all project files with the style program uncrustify.
This PR updates our ported version of umm_malloc processed with "uncrustify".
This should make subsequent merges of upstream into this port easier.
This also makes the style more consistant through umm_malloc.
Some edits to source files was needed to get uncrustify to work.
1) macros with "if"s need to be of the form "if ( blah ) { } " curley braces
are needed for it to parse correctly
2) These "#ifdef __cplusplus" also had to be commented out while running to
avoid parser confusion.
```
#ifdef __cplusplus
extern "C" {
#endif
```
and
```
#ifdef __cplusplus
}
#endif
```
*/
#endif

View File

@ -1 +1,2 @@
Downloaded from: https://github.com/rhempel/c-helper-macros/tree/develop
Applied uncrustify to be consistent with the rest of the umm_malloc files.

View File

@ -50,11 +50,11 @@
#undef DBGLOG_FORCE
#ifndef DBGLOG_LEVEL
# define DBGLOG_LEVEL 0
#define DBGLOG_LEVEL 0
#endif
#ifndef DBGLOG_FUNCTION
# define DBGLOG_FUNCTION printf
#define DBGLOG_FUNCTION printf
#endif
#define DBGLOG_32_BIT_PTR(x) ((uint32_t)(((uintptr_t)(x)) & 0xffffffff))
@ -62,39 +62,39 @@
/* ------------------------------------------------------------------------- */
#if DBGLOG_LEVEL >= 6
# define DBGLOG_TRACE(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
#define DBGLOG_TRACE(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
#else
# define DBGLOG_TRACE(format, ...)
#define DBGLOG_TRACE(format, ...)
#endif
#if DBGLOG_LEVEL >= 5
# define DBGLOG_DEBUG(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
#define DBGLOG_DEBUG(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
#else
# define DBGLOG_DEBUG(format, ...)
#define DBGLOG_DEBUG(format, ...)
#endif
#if DBGLOG_LEVEL >= 4
# define DBGLOG_CRITICAL(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
#define DBGLOG_CRITICAL(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
#else
# define DBGLOG_CRITICAL(format, ...)
#define DBGLOG_CRITICAL(format, ...)
#endif
#if DBGLOG_LEVEL >= 3
# define DBGLOG_ERROR(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
#define DBGLOG_ERROR(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
#else
# define DBGLOG_ERROR(format, ...)
#define DBGLOG_ERROR(format, ...)
#endif
#if DBGLOG_LEVEL >= 2
# define DBGLOG_WARNING(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
#define DBGLOG_WARNING(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
#else
# define DBGLOG_WARNING(format, ...)
#define DBGLOG_WARNING(format, ...)
#endif
#if DBGLOG_LEVEL >= 1
# define DBGLOG_INFO(format, ...) DBGLOG_FUNCTION(format, ## __VA_ARGS__)
#define DBGLOG_INFO(format, ...) DBGLOG_FUNCTION(format,##__VA_ARGS__)
#else
# define DBGLOG_INFO(format, ...)
#define DBGLOG_INFO(format, ...)
#endif
#define DBGLOG_FORCE(force, format, ...) {if(force) {DBGLOG_FUNCTION(format, ## __VA_ARGS__);}}
#define DBGLOG_FORCE(force, format, ...) {if (force) {DBGLOG_FUNCTION(format,##__VA_ARGS__);}}

View File

@ -32,70 +32,77 @@
class HeapSelect {
public:
#if (UMM_NUM_HEAPS == 1)
MAYBE_ALWAYS_INLINE
HeapSelect(size_t id) { (void)id; }
MAYBE_ALWAYS_INLINE
~HeapSelect() {}
MAYBE_ALWAYS_INLINE
HeapSelect(size_t id) {
(void)id;
}
MAYBE_ALWAYS_INLINE
~HeapSelect() {
}
#else
MAYBE_ALWAYS_INLINE
HeapSelect(size_t id) : _heap_id(umm_get_current_heap_id()) {
MAYBE_ALWAYS_INLINE
HeapSelect(size_t id) : _heap_id(umm_get_current_heap_id()) {
umm_set_heap_by_id(id);
}
}
MAYBE_ALWAYS_INLINE
~HeapSelect() {
MAYBE_ALWAYS_INLINE
~HeapSelect() {
umm_set_heap_by_id(_heap_id);
}
}
protected:
size_t _heap_id;
size_t _heap_id;
#endif
};
class HeapSelectIram {
public:
#ifdef UMM_HEAP_IRAM
MAYBE_ALWAYS_INLINE
HeapSelectIram() : _heap_id(umm_get_current_heap_id()) {
MAYBE_ALWAYS_INLINE
HeapSelectIram() : _heap_id(umm_get_current_heap_id()) {
umm_set_heap_by_id(UMM_HEAP_IRAM);
}
}
MAYBE_ALWAYS_INLINE
~HeapSelectIram() {
MAYBE_ALWAYS_INLINE
~HeapSelectIram() {
umm_set_heap_by_id(_heap_id);
}
}
protected:
size_t _heap_id;
size_t _heap_id;
#else
MAYBE_ALWAYS_INLINE
HeapSelectIram() {}
MAYBE_ALWAYS_INLINE
~HeapSelectIram() {}
MAYBE_ALWAYS_INLINE
HeapSelectIram() {
}
MAYBE_ALWAYS_INLINE
~HeapSelectIram() {
}
#endif
};
class HeapSelectDram {
public:
#if (UMM_NUM_HEAPS == 1)
MAYBE_ALWAYS_INLINE
HeapSelectDram() {}
MAYBE_ALWAYS_INLINE
~HeapSelectDram() {}
MAYBE_ALWAYS_INLINE
HeapSelectDram() {
}
MAYBE_ALWAYS_INLINE
~HeapSelectDram() {
}
#else
MAYBE_ALWAYS_INLINE
HeapSelectDram() : _heap_id(umm_get_current_heap_id()) {
MAYBE_ALWAYS_INLINE
HeapSelectDram() : _heap_id(umm_get_current_heap_id()) {
umm_set_heap_by_id(UMM_HEAP_DRAM);
}
}
MAYBE_ALWAYS_INLINE
~HeapSelectDram() {
MAYBE_ALWAYS_INLINE
~HeapSelectDram() {
umm_set_heap_by_id(_heap_id);
}
}
protected:
size_t _heap_id;
size_t _heap_id;
#endif
};

View File

@ -25,7 +25,7 @@
// UMM_HEAP_INFO ummHeapInfo;
void *umm_info( void *ptr, bool force ) {
void *umm_info(void *ptr, bool force) {
UMM_CRITICAL_DECL(id_info);
UMM_INIT_HEAP;
@ -41,18 +41,18 @@ void *umm_info( void *ptr, bool force ) {
* Clear out all of the entries in the ummHeapInfo structure before doing
* any calculations..
*/
memset( &_context->info, 0, sizeof( _context->info ) );
memset(&_context->info, 0, sizeof(_context->info));
DBGLOG_FORCE( force, "\n" );
DBGLOG_FORCE( force, "+----------+-------+--------+--------+-------+--------+--------+\n" );
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
DBGLOG_FORCE(force, "\n");
DBGLOG_FORCE(force, "+----------+-------+--------+--------+-------+--------+--------+\n");
DBGLOG_FORCE(force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
blockNo,
UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
UMM_PBLOCK(blockNo),
(UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK )-blockNo,
(UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) - blockNo,
UMM_NFREE(blockNo),
UMM_PFREE(blockNo) );
UMM_PFREE(blockNo));
/*
* Now loop through the block lists, and keep track of the number and size
@ -62,15 +62,15 @@ void *umm_info( void *ptr, bool force ) {
blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK;
while( UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK ) {
size_t curBlocks = (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK )-blockNo;
while (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) {
size_t curBlocks = (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) - blockNo;
++_context->info.totalEntries;
_context->info.totalBlocks += curBlocks;
/* Is this a free block? */
if( UMM_NBLOCK(blockNo) & UMM_FREELIST_MASK ) {
if (UMM_NBLOCK(blockNo) & UMM_FREELIST_MASK) {
++_context->info.freeEntries;
_context->info.freeBlocks += curBlocks;
_context->info.freeBlocksSquared += (curBlocks * curBlocks);
@ -79,34 +79,34 @@ void *umm_info( void *ptr, bool force ) {
_context->info.maxFreeContiguousBlocks = curBlocks;
}
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|NF %5d|PF %5d|\n",
DBGLOG_FORCE(force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|NF %5d|PF %5d|\n",
DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
blockNo,
UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
UMM_PBLOCK(blockNo),
(uint16_t)curBlocks,
UMM_NFREE(blockNo),
UMM_PFREE(blockNo) );
UMM_PFREE(blockNo));
/* Does this block address match the ptr we may be trying to free? */
if( ptr == &UMM_BLOCK(blockNo) ) {
if (ptr == &UMM_BLOCK(blockNo)) {
/* Release the critical section... */
UMM_CRITICAL_EXIT(id_info);
return( ptr );
return ptr;
}
} else {
++_context->info.usedEntries;
_context->info.usedBlocks += curBlocks;
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|\n",
DBGLOG_FORCE(force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|\n",
DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
blockNo,
UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
UMM_PBLOCK(blockNo),
(uint16_t)curBlocks );
(uint16_t)curBlocks);
}
blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK;
@ -119,78 +119,78 @@ void *umm_info( void *ptr, bool force ) {
* ALWAYS be exactly 1 !
*/
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
DBGLOG_FORCE(force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
blockNo,
UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
UMM_PBLOCK(blockNo),
UMM_NUMBLOCKS-blockNo,
UMM_NUMBLOCKS - blockNo,
UMM_NFREE(blockNo),
UMM_PFREE(blockNo) );
UMM_PFREE(blockNo));
DBGLOG_FORCE( force, "+----------+-------+--------+--------+-------+--------+--------+\n" );
DBGLOG_FORCE(force, "+----------+-------+--------+--------+-------+--------+--------+\n");
DBGLOG_FORCE( force, "Total Entries %5d Used Entries %5d Free Entries %5d\n",
DBGLOG_FORCE(force, "Total Entries %5d Used Entries %5d Free Entries %5d\n",
_context->info.totalEntries,
_context->info.usedEntries,
_context->info.freeEntries );
_context->info.freeEntries);
DBGLOG_FORCE( force, "Total Blocks %5d Used Blocks %5d Free Blocks %5d\n",
DBGLOG_FORCE(force, "Total Blocks %5d Used Blocks %5d Free Blocks %5d\n",
_context->info.totalBlocks,
_context->info.usedBlocks,
_context->info.freeBlocks );
_context->info.freeBlocks);
DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n");
DBGLOG_FORCE( force, "Usage Metric: %5d\n", umm_usage_metric_core(_context));
DBGLOG_FORCE( force, "Fragmentation Metric: %5d\n", umm_fragmentation_metric_core(_context));
DBGLOG_FORCE(force, "Usage Metric: %5d\n", umm_usage_metric_core(_context));
DBGLOG_FORCE(force, "Fragmentation Metric: %5d\n", umm_fragmentation_metric_core(_context));
DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n");
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
#if !defined(UMM_INLINE_METRICS)
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
#if !defined(UMM_INLINE_METRICS)
if (_context->info.freeBlocks == _context->stats.free_blocks) {
DBGLOG_FORCE( force, "heap info Free blocks and heap statistics Free blocks match.\n");
DBGLOG_FORCE(force, "heap info Free blocks and heap statistics Free blocks match.\n");
} else {
DBGLOG_FORCE( force, "\nheap info Free blocks %5d != heap statistics Free Blocks %5d\n\n",
DBGLOG_FORCE(force, "\nheap info Free blocks %5d != heap statistics Free Blocks %5d\n\n",
_context->info.freeBlocks,
_context->stats.free_blocks );
_context->stats.free_blocks);
}
DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
#endif
DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n");
#endif
umm_print_stats(force);
#endif
#endif
/* Release the critical section... */
UMM_CRITICAL_EXIT(id_info);
return( NULL );
return NULL;
}
/* ------------------------------------------------------------------------ */
size_t umm_free_heap_size_core( umm_heap_context_t *_context ) {
size_t umm_free_heap_size_core(umm_heap_context_t *_context) {
return (size_t)_context->info.freeBlocks * sizeof(umm_block);
}
size_t umm_free_heap_size( void ) {
#ifndef UMM_INLINE_METRICS
size_t umm_free_heap_size(void) {
#ifndef UMM_INLINE_METRICS
umm_info(NULL, false);
#endif
#endif
return umm_free_heap_size_core(umm_get_current_heap());
}
//C Breaking change in upstream umm_max_block_size() was changed to
//C umm_max_free_block_size() keeping old function name for (dot) releases.
//C TODO: update at next major release.
//C size_t umm_max_free_block_size( void ) {
size_t umm_max_block_size_core( umm_heap_context_t *_context ) {
// C Breaking change in upstream umm_max_block_size() was changed to
// C umm_max_free_block_size() keeping old function name for (dot) releases.
// C TODO: update at next major release.
// C size_t umm_max_free_block_size( void ) {
size_t umm_max_block_size_core(umm_heap_context_t *_context) {
return _context->info.maxFreeContiguousBlocks * sizeof(umm_block);
}
size_t umm_max_block_size( void ) {
size_t umm_max_block_size(void) {
umm_info(NULL, false);
return umm_max_block_size_core(umm_get_current_heap());
}
@ -200,60 +200,61 @@ size_t umm_max_block_size( void ) {
umm_fragmentation_metric() must to be preceded by a call to umm_info(NULL, false)
for updated results.
*/
int umm_usage_metric_core( umm_heap_context_t *_context ) {
//C Note, umm_metrics also appears in the upstrean w/o definition. I suspect it is suppose to be ummHeapInfo.
int umm_usage_metric_core(umm_heap_context_t *_context) {
// C Note, umm_metrics also appears in the upstrean w/o definition. I suspect it is suppose to be ummHeapInfo.
// DBGLOG_DEBUG( "usedBlocks %d totalBlocks %d\n", umm_metrics.usedBlocks, ummHeapInfo.totalBlocks);
DBGLOG_DEBUG( "usedBlocks %d totalBlocks %d\n", _context->info.usedBlocks, _context->info.totalBlocks);
if (_context->info.freeBlocks)
return (int)((_context->info.usedBlocks * 100)/(_context->info.freeBlocks));
DBGLOG_DEBUG("usedBlocks %d totalBlocks %d\n", _context->info.usedBlocks, _context->info.totalBlocks);
if (_context->info.freeBlocks) {
return (int)((_context->info.usedBlocks * 100) / (_context->info.freeBlocks));
}
return -1; // no freeBlocks
}
int umm_usage_metric( void ) {
#ifndef UMM_INLINE_METRICS
int umm_usage_metric(void) {
#ifndef UMM_INLINE_METRICS
umm_info(NULL, false);
#endif
#endif
return umm_usage_metric_core(umm_get_current_heap());
}
uint32_t sqrt32 (uint32_t n);
uint32_t sqrt32(uint32_t n);
int umm_fragmentation_metric_core( umm_heap_context_t *_context ) {
int umm_fragmentation_metric_core(umm_heap_context_t *_context) {
// DBGLOG_DEBUG( "freeBlocks %d freeBlocksSquared %d\n", umm_metrics.freeBlocks, ummHeapInfo.freeBlocksSquared);
DBGLOG_DEBUG( "freeBlocks %d freeBlocksSquared %d\n", _context->info.freeBlocks, _context->info.freeBlocksSquared);
DBGLOG_DEBUG("freeBlocks %d freeBlocksSquared %d\n", _context->info.freeBlocks, _context->info.freeBlocksSquared);
if (0 == _context->info.freeBlocks) {
return 0;
} else {
//upstream version: return (100 - (((uint32_t)(sqrtf(ummHeapInfo.freeBlocksSquared)) * 100)/(ummHeapInfo.freeBlocks)));
return (100 - (((uint32_t)(sqrt32(_context->info.freeBlocksSquared)) * 100)/(_context->info.freeBlocks)));
// upstream version: return (100 - (((uint32_t)(sqrtf(ummHeapInfo.freeBlocksSquared)) * 100)/(ummHeapInfo.freeBlocks)));
return 100 - (((uint32_t)(sqrt32(_context->info.freeBlocksSquared)) * 100) / (_context->info.freeBlocks));
}
}
int umm_fragmentation_metric( void ) {
#ifndef UMM_INLINE_METRICS
int umm_fragmentation_metric(void) {
#ifndef UMM_INLINE_METRICS
umm_info(NULL, false);
#endif
#endif
return umm_fragmentation_metric_core(umm_get_current_heap());
}
#ifdef UMM_INLINE_METRICS
static void umm_fragmentation_metric_init( umm_heap_context_t *_context ) {
static void umm_fragmentation_metric_init(umm_heap_context_t *_context) {
_context->info.freeBlocks = UMM_NUMBLOCKS - 2;
_context->info.freeBlocksSquared = _context->info.freeBlocks * _context->info.freeBlocks;
}
static void umm_fragmentation_metric_add( umm_heap_context_t *_context, uint16_t c ) {
static void umm_fragmentation_metric_add(umm_heap_context_t *_context, uint16_t c) {
uint16_t blocks = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) - c;
DBGLOG_DEBUG( "Add block %d size %d to free metric\n", c, blocks);
DBGLOG_DEBUG("Add block %d size %d to free metric\n", c, blocks);
_context->info.freeBlocks += blocks;
_context->info.freeBlocksSquared += (blocks * blocks);
}
static void umm_fragmentation_metric_remove( umm_heap_context_t *_context, uint16_t c ) {
static void umm_fragmentation_metric_remove(umm_heap_context_t *_context, uint16_t c) {
uint16_t blocks = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) - c;
DBGLOG_DEBUG( "Remove block %d size %d from free metric\n", c, blocks);
DBGLOG_DEBUG("Remove block %d size %d from free metric\n", c, blocks);
_context->info.freeBlocks -= blocks;
_context->info.freeBlocksSquared -= (blocks * blocks);
}

View File

@ -41,7 +41,7 @@ bool umm_integrity_check(void) {
umm_heap_context_t *_context = umm_get_current_heap();
while(1) {
while (1) {
cur = UMM_NFREE(prev);
/* Check that next free block number is valid */
@ -73,7 +73,7 @@ bool umm_integrity_check(void) {
/* Iterate through all blocks */
prev = 0;
while(1) {
while (1) {
cur = UMM_NBLOCK(prev) & UMM_BLOCKNO_MASK;
/* Check that next block number is valid */
@ -91,8 +91,7 @@ bool umm_integrity_check(void) {
/* make sure the free mark is appropriate, and unmark it */
if ((UMM_NBLOCK(cur) & UMM_FREELIST_MASK)
!= (UMM_PBLOCK(cur) & UMM_FREELIST_MASK))
{
!= (UMM_PBLOCK(cur) & UMM_FREELIST_MASK)) {
DBGLOG_FUNCTION("heap integrity broken: mask wrong at addr 0x%08x: n=0x%x, p=0x%x\n",
DBGLOG_32_BIT_PTR(&UMM_NBLOCK(cur)),
(UMM_NBLOCK(cur) & UMM_FREELIST_MASK),
@ -102,7 +101,7 @@ bool umm_integrity_check(void) {
}
/* make sure the block list is sequential */
if (cur <= prev ) {
if (cur <= prev) {
DBGLOG_FUNCTION("heap integrity broken: next block %d is before prev this one "
"(in block %d, addr 0x%08x)\n", cur, prev,
DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
@ -127,7 +126,7 @@ bool umm_integrity_check(void) {
clean:
UMM_CRITICAL_EXIT(id_integrity);
if (!ok){
if (!ok) {
UMM_HEAP_CORRUPTION_CB();
}
return ok;

View File

@ -12,22 +12,21 @@ UMM_TIME_STATS time_stats = {
{0xFFFFFFFF, 0U, 0U, 0U},
{0xFFFFFFFF, 0U, 0U, 0U},
{0xFFFFFFFF, 0U, 0U, 0U},
#ifdef UMM_INFO
#ifdef UMM_INFO
{0xFFFFFFFF, 0U, 0U, 0U},
#endif
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
#endif
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
{0xFFFFFFFF, 0U, 0U, 0U},
#endif
#ifdef UMM_INTEGRITY_CHECK
#endif
#ifdef UMM_INTEGRITY_CHECK
{0xFFFFFFFF, 0U, 0U, 0U},
#endif
{0xFFFFFFFF, 0U, 0U, 0U} };
#endif
{0xFFFFFFFF, 0U, 0U, 0U}
};
bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size)
{
bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size) {
UMM_CRITICAL_DECL(id_no_tag);
if (p && sizeof(time_stats) == size)
{
if (p && sizeof(time_stats) == size) {
UMM_CRITICAL_ENTRY(id_no_tag);
memcpy(p, &time_stats, size);
UMM_CRITICAL_EXIT(id_no_tag);
@ -42,22 +41,24 @@ bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size)
#if defined(UMM_POISON_CHECK_LITE)
// We skip this when doing the full poison check.
static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur ) {
static bool check_poison_neighbors(umm_heap_context_t *_context, uint16_t cur) {
uint16_t c;
if ( 0 == cur )
if (0 == cur) {
return true;
}
c = UMM_PBLOCK(cur) & UMM_BLOCKNO_MASK;
while( c && (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) ) {
while (c && (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK)) {
/*
There can be up to 1 free block neighbor in either direction.
This loop should self limit to 2 passes, due to heap design.
i.e. Adjacent free space is always consolidated.
*/
if ( !(UMM_NBLOCK(c) & UMM_FREELIST_MASK) ) {
if ( !check_poison_block(&UMM_BLOCK(c)) )
if (!(UMM_NBLOCK(c) & UMM_FREELIST_MASK)) {
if (!check_poison_block(&UMM_BLOCK(c))) {
return false;
}
break;
}
@ -66,10 +67,11 @@ static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur )
}
c = UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK;
while( (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) ) {
if ( !(UMM_NBLOCK(c) & UMM_FREELIST_MASK) ) {
if ( !check_poison_block(&UMM_BLOCK(c)) )
while ((UMM_NBLOCK(c) & UMM_BLOCKNO_MASK)) {
if (!(UMM_NBLOCK(c) & UMM_FREELIST_MASK)) {
if (!check_poison_block(&UMM_BLOCK(c))) {
return false;
}
break;
}
@ -85,24 +87,24 @@ static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur )
/* ------------------------------------------------------------------------ */
static void *get_unpoisoned_check_neighbors( void *vptr, const char* file, int line ) {
static void *get_unpoisoned_check_neighbors(void *vptr, const char *file, int line) {
uintptr_t ptr = (uintptr_t)vptr;
if (ptr != 0) {
ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE);
#if defined(UMM_POISON_CHECK_LITE)
#if defined(UMM_POISON_CHECK_LITE)
UMM_CRITICAL_DECL(id_poison);
uint16_t c;
bool poison = false;
umm_heap_context_t *_context = umm_get_ptr_context( vptr );
umm_heap_context_t *_context = umm_get_ptr_context(vptr);
if (NULL == _context) {
panic();
return NULL;
}
/* Figure out which block we're in. Note the use of truncated division... */
c = (ptr - (uintptr_t)(&(_context->heap[0])))/sizeof(umm_block);
c = (ptr - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block);
UMM_CRITICAL_ENTRY(id_poison);
poison = check_poison_block(&UMM_BLOCK(c)) && check_poison_neighbors(_context, c);
@ -115,14 +117,14 @@ static void *get_unpoisoned_check_neighbors( void *vptr, const char* file, int l
abort();
}
}
#else
#else
/*
* No need to check poison here. POISON_CHECK() has already done a
* full heap check.
*/
(void)file;
(void)line;
#endif
#endif
}
return (void *)ptr;
@ -130,7 +132,7 @@ static void *get_unpoisoned_check_neighbors( void *vptr, const char* file, int l
/* ------------------------------------------------------------------------ */
void *umm_poison_realloc_fl(void *ptr, size_t size, const char* file, int line) {
void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line) {
void *ret;
ptr = get_unpoisoned_check_neighbors(ptr, file, line);
@ -145,7 +147,7 @@ void *umm_poison_realloc_fl(void *ptr, size_t size, const char* file, int line)
/* ------------------------------------------------------------------------ */
void umm_poison_free_fl(void *ptr, const char* file, int line) {
void umm_poison_free_fl(void *ptr, const char *file, int line) {
ptr = get_unpoisoned_check_neighbors(ptr, file, line);
@ -156,14 +158,14 @@ void umm_poison_free_fl(void *ptr, const char* file, int line) {
/* ------------------------------------------------------------------------ */
#if defined(UMM_STATS) || defined(UMM_STATS_FULL) || defined(UMM_INFO)
size_t umm_block_size( void ) {
size_t umm_block_size(void) {
return sizeof(umm_block);
}
#endif
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
// Keep complete call path in IRAM
size_t umm_free_heap_size_lw( void ) {
size_t umm_free_heap_size_lw(void) {
UMM_INIT_HEAP;
umm_heap_context_t *_context = umm_get_current_heap();
@ -189,17 +191,17 @@ size_t xPortGetFreeHeapSize(void) __attribute__ ((alias("umm_free_heap_size")));
void umm_print_stats(int force) {
umm_heap_context_t *_context = umm_get_current_heap();
DBGLOG_FORCE( force, "umm heap statistics:\n");
DBGLOG_FORCE( force, " Heap ID %5u\n", _context->id);
DBGLOG_FORCE( force, " Free Space %5u\n", _context->UMM_FREE_BLOCKS * sizeof(umm_block));
DBGLOG_FORCE( force, " OOM Count %5u\n", _context->UMM_OOM_COUNT);
#if defined(UMM_STATS_FULL)
DBGLOG_FORCE( force, " Low Watermark %5u\n", _context->stats.free_blocks_min * sizeof(umm_block));
DBGLOG_FORCE( force, " Low Watermark ISR %5u\n", _context->stats.free_blocks_isr_min * sizeof(umm_block));
DBGLOG_FORCE( force, " MAX Alloc Request %5u\n", _context->stats.alloc_max_size);
#endif
DBGLOG_FORCE( force, " Size of umm_block %5u\n", sizeof(umm_block));
DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
DBGLOG_FORCE(force, "umm heap statistics:\n");
DBGLOG_FORCE(force, " Heap ID %5u\n", _context->id);
DBGLOG_FORCE(force, " Free Space %5u\n", _context->UMM_FREE_BLOCKS * sizeof(umm_block));
DBGLOG_FORCE(force, " OOM Count %5u\n", _context->UMM_OOM_COUNT);
#if defined(UMM_STATS_FULL)
DBGLOG_FORCE(force, " Low Watermark %5u\n", _context->stats.free_blocks_min * sizeof(umm_block));
DBGLOG_FORCE(force, " Low Watermark ISR %5u\n", _context->stats.free_blocks_isr_min * sizeof(umm_block));
DBGLOG_FORCE(force, " MAX Alloc Request %5u\n", _context->stats.alloc_max_size);
#endif
DBGLOG_FORCE(force, " Size of umm_block %5u\n", sizeof(umm_block));
DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n");
}
#endif
@ -214,7 +216,7 @@ int ICACHE_FLASH_ATTR umm_info_safe_printf_P(const char *fmt, ...) {
}
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
size_t ICACHE_FLASH_ATTR umm_get_oom_count( void ) {
size_t ICACHE_FLASH_ATTR umm_get_oom_count(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->UMM_OOM_COUNT;
}
@ -228,12 +230,12 @@ size_t ICACHE_FLASH_ATTR umm_get_oom_count( void ) {
//
// If this is correct use alias.
//
size_t ICACHE_FLASH_ATTR umm_free_heap_size_lw_min( void ) {
size_t ICACHE_FLASH_ATTR umm_free_heap_size_lw_min(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.free_blocks_min * umm_block_size();
}
size_t ICACHE_FLASH_ATTR umm_free_heap_size_min_reset( void ) {
size_t ICACHE_FLASH_ATTR umm_free_heap_size_min_reset(void) {
umm_heap_context_t *_context = umm_get_current_heap();
_context->stats.free_blocks_min = _context->UMM_FREE_BLOCKS;
return _context->stats.free_blocks_min * umm_block_size();
@ -242,53 +244,53 @@ size_t ICACHE_FLASH_ATTR umm_free_heap_size_min_reset( void ) {
#if 0 // TODO - Don't understand this why do both umm_free_heap_size_(lw_)min exist
size_t umm_free_heap_size_min(void) __attribute__ ((alias("umm_free_heap_size_lw_min")));
#else
size_t ICACHE_FLASH_ATTR umm_free_heap_size_min( void ) {
size_t ICACHE_FLASH_ATTR umm_free_heap_size_min(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.free_blocks_min * umm_block_size();
}
#endif
size_t ICACHE_FLASH_ATTR umm_free_heap_size_isr_min( void ) {
size_t ICACHE_FLASH_ATTR umm_free_heap_size_isr_min(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.free_blocks_isr_min * umm_block_size();
}
size_t ICACHE_FLASH_ATTR umm_get_max_alloc_size( void ) {
size_t ICACHE_FLASH_ATTR umm_get_max_alloc_size(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.alloc_max_size;
}
size_t ICACHE_FLASH_ATTR umm_get_last_alloc_size( void ) {
size_t ICACHE_FLASH_ATTR umm_get_last_alloc_size(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.last_alloc_size;
}
size_t ICACHE_FLASH_ATTR umm_get_malloc_count( void ) {
size_t ICACHE_FLASH_ATTR umm_get_malloc_count(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.id_malloc_count;
}
size_t ICACHE_FLASH_ATTR umm_get_malloc_zero_count( void ) {
size_t ICACHE_FLASH_ATTR umm_get_malloc_zero_count(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.id_malloc_zero_count;
}
size_t ICACHE_FLASH_ATTR umm_get_realloc_count( void ) {
size_t ICACHE_FLASH_ATTR umm_get_realloc_count(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.id_realloc_count;
}
size_t ICACHE_FLASH_ATTR umm_get_realloc_zero_count( void ) {
size_t ICACHE_FLASH_ATTR umm_get_realloc_zero_count(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.id_realloc_zero_count;
}
size_t ICACHE_FLASH_ATTR umm_get_free_count( void ) {
size_t ICACHE_FLASH_ATTR umm_get_free_count(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.id_free_count;
}
size_t ICACHE_FLASH_ATTR umm_get_free_null_count( void ) {
size_t ICACHE_FLASH_ATTR umm_get_free_null_count(void) {
umm_heap_context_t *_context = umm_get_current_heap();
return _context->stats.id_free_null_count;
}

View File

@ -22,7 +22,7 @@
* string while INTLEVEL is non-zero.
*/
#undef DBGLOG_FORCE
#define DBGLOG_FORCE(force, format, ...) {if(force) {UMM_INFO_PRINTF(format, ## __VA_ARGS__);}}
#define DBGLOG_FORCE(force, format, ...) {if (force) {UMM_INFO_PRINTF(format,##__VA_ARGS__);}}
// #define DBGLOG_FORCE(force, format, ...) {if(force) {::printf(PSTR(format), ## __VA_ARGS__);}}
@ -37,7 +37,7 @@
#if defined(UMM_POISON_CHECK_LITE)
static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur );
static bool check_poison_neighbors(umm_heap_context_t *_context, uint16_t cur);
#endif
@ -48,7 +48,7 @@ void ICACHE_FLASH_ATTR umm_print_stats(int force);
int ICACHE_FLASH_ATTR umm_info_safe_printf_P(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
#define UMM_INFO_PRINTF(fmt, ...) umm_info_safe_printf_P(PSTR(fmt), ##__VA_ARGS__)
#define UMM_INFO_PRINTF(fmt, ...) umm_info_safe_printf_P(PSTR(fmt),##__VA_ARGS__)
typedef struct umm_block_t umm_block;
@ -56,12 +56,12 @@ typedef struct umm_block_t umm_block;
struct UMM_HEAP_CONTEXT {
umm_block *heap;
void *heap_end;
#if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
#if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
UMM_STATISTICS stats;
#endif
#ifdef UMM_INFO
#endif
#ifdef UMM_INFO
UMM_HEAP_INFO info;
#endif
#endif
unsigned short int numblocks;
unsigned char id;
};

View File

@ -70,17 +70,17 @@ extern "C" {
#include "dbglog/dbglog.h"
//C This change is new in upstream umm_malloc.I think this would have created a
//C breaking change. Keeping the old #define method in umm_malloc_cfg.h.
//C I don't see a simple way of making it work. We would have to run code before
//C the SDK has run to set a value for uint32_t UMM_MALLOC_CFG_HEAP_SIZE.
//C On the other hand, a manual call to umm_init() before anything else has had a
//C chance to run would mean that all those calls testing to see if the heap has
//C been initialized at every umm_malloc API could be removed.
//C
//C before starting the NON OS SDK
//C extern void *UMM_MALLOC_CFG_HEAP_ADDR;
//C extern uint32_t UMM_MALLOC_CFG_HEAP_SIZE;
// C This change is new in upstream umm_malloc.I think this would have created a
// C breaking change. Keeping the old #define method in umm_malloc_cfg.h.
// C I don't see a simple way of making it work. We would have to run code before
// C the SDK has run to set a value for uint32_t UMM_MALLOC_CFG_HEAP_SIZE.
// C On the other hand, a manual call to umm_init() before anything else has had a
// C chance to run would mean that all those calls testing to see if the heap has
// C been initialized at every umm_malloc API could be removed.
// C
// C before starting the NON OS SDK
// C extern void *UMM_MALLOC_CFG_HEAP_ADDR;
// C extern uint32_t UMM_MALLOC_CFG_HEAP_SIZE;
#include "umm_local.h" // target-dependent supplemental
@ -132,12 +132,12 @@ umm_heap_context_t *umm_get_current_heap(void) {
return &heap_context[0];
}
static umm_heap_context_t *umm_get_heap_by_id( size_t which ) {
static umm_heap_context_t *umm_get_heap_by_id(size_t which) {
(void)which;
return &heap_context[0];
}
umm_heap_context_t *umm_set_heap_by_id( size_t which ) {
umm_heap_context_t *umm_set_heap_by_id(size_t which) {
(void)which;
return &heap_context[0];
}
@ -151,14 +151,14 @@ umm_heap_context_t *umm_get_current_heap(void) {
return &heap_context[umm_heap_cur];
}
static umm_heap_context_t *umm_get_heap_by_id( size_t which ) {
static umm_heap_context_t *umm_get_heap_by_id(size_t which) {
if (which < UMM_NUM_HEAPS) {
return &heap_context[which];
}
return NULL;
}
umm_heap_context_t *umm_set_heap_by_id( size_t which ) {
umm_heap_context_t *umm_set_heap_by_id(size_t which) {
umm_heap_context_t *_context = umm_get_heap_by_id(which);
if (_context && _context->heap) {
umm_heap_cur = which;
@ -169,40 +169,40 @@ umm_heap_context_t *umm_set_heap_by_id( size_t which ) {
#endif
#if (UMM_NUM_HEAPS == 1)
umm_heap_context_t *umm_push_heap( size_t which ) {
umm_heap_context_t *umm_push_heap(size_t which) {
(void)which;
return &heap_context[0];
}
umm_heap_context_t *umm_pop_heap( void ) {
umm_heap_context_t *umm_pop_heap(void) {
return &heap_context[0];
}
int umm_get_heap_stack_index( void ) {
int umm_get_heap_stack_index(void) {
return 0;
}
#else
/* ------------------------------------------------------------------------ */
umm_heap_context_t *umm_push_heap( size_t which ) {
umm_heap_context_t *umm_push_heap(size_t which) {
if (umm_heap_stack_ptr < UMM_HEAP_STACK_DEPTH) {
umm_heap_stack[umm_heap_stack_ptr++] = umm_heap_cur;
return umm_set_heap_by_id( which );
return umm_set_heap_by_id(which);
}
return NULL;
}
/* ------------------------------------------------------------------------ */
umm_heap_context_t *umm_pop_heap( void ) {
if (umm_heap_stack_ptr > 0 ) {
umm_heap_context_t *umm_pop_heap(void) {
if (umm_heap_stack_ptr > 0) {
return umm_set_heap_by_id(umm_heap_stack[--umm_heap_stack_ptr]);
}
return NULL;
}
// Intended for diagnosic use
int umm_get_heap_stack_index( void ) {
int umm_get_heap_stack_index(void) {
return umm_heap_stack_ptr;
}
#endif
@ -212,7 +212,7 @@ int umm_get_heap_stack_index( void ) {
* realloc or free since you may not be in the right heap to handle it.
*
*/
static bool test_ptr_context( size_t which, void *ptr ) {
static bool test_ptr_context(size_t which, void *ptr) {
return
heap_context[which].heap &&
ptr >= (void *)heap_context[which].heap &&
@ -221,8 +221,8 @@ static bool test_ptr_context( size_t which, void *ptr ) {
static umm_heap_context_t *umm_get_ptr_context(void *ptr) {
for (size_t i = 0; i < UMM_NUM_HEAPS; i++) {
if (test_ptr_context( i, ptr ) ) {
return umm_get_heap_by_id( i );
if (test_ptr_context(i, ptr)) {
return umm_get_heap_by_id(i);
}
}
@ -343,18 +343,18 @@ static void umm_split_block(
umm_heap_context_t *_context,
uint16_t c,
uint16_t blocks,
uint16_t new_freemask ) {
uint16_t new_freemask) {
UMM_NBLOCK(c+blocks) = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) | new_freemask;
UMM_PBLOCK(c+blocks) = c;
UMM_NBLOCK(c + blocks) = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) | new_freemask;
UMM_PBLOCK(c + blocks) = c;
UMM_PBLOCK(UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) = (c+blocks);
UMM_NBLOCK(c) = (c+blocks);
UMM_PBLOCK(UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) = (c + blocks);
UMM_NBLOCK(c) = (c + blocks);
}
/* ------------------------------------------------------------------------ */
static void umm_disconnect_from_free_list( umm_heap_context_t *_context, uint16_t c ) {
static void umm_disconnect_from_free_list(umm_heap_context_t *_context, uint16_t c) {
/* Disconnect this block from the FREE list */
UMM_NFREE(UMM_PFREE(c)) = UMM_NFREE(c);
@ -371,22 +371,22 @@ static void umm_disconnect_from_free_list( umm_heap_context_t *_context, uint16_
* next block is free.
*/
static void umm_assimilate_up( umm_heap_context_t *_context, uint16_t c ) {
static void umm_assimilate_up(umm_heap_context_t *_context, uint16_t c) {
if( UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK ) {
if (UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK) {
UMM_FRAGMENTATION_METRIC_REMOVE( UMM_NBLOCK(c) );
UMM_FRAGMENTATION_METRIC_REMOVE(UMM_NBLOCK(c));
/*
* The next block is a free block, so assimilate up and remove it from
* the free list
*/
DBGLOG_DEBUG( "Assimilate up to next block, which is FREE\n" );
DBGLOG_DEBUG("Assimilate up to next block, which is FREE\n");
/* Disconnect the next block from the FREE list */
umm_disconnect_from_free_list( _context, UMM_NBLOCK(c) );
umm_disconnect_from_free_list(_context, UMM_NBLOCK(c));
/* Assimilate the next block with this one */
@ -401,7 +401,7 @@ static void umm_assimilate_up( umm_heap_context_t *_context, uint16_t c ) {
* up before assimilating down.
*/
static uint16_t umm_assimilate_down( umm_heap_context_t *_context, uint16_t c, uint16_t freemask ) {
static uint16_t umm_assimilate_down(umm_heap_context_t *_context, uint16_t c, uint16_t freemask) {
// We are going to assimilate down to the previous block because
// it was free, so remove it from the fragmentation metric
@ -421,25 +421,25 @@ static uint16_t umm_assimilate_down( umm_heap_context_t *_context, uint16_t c, u
UMM_FRAGMENTATION_METRIC_ADD(UMM_PBLOCK(c));
}
return( UMM_PBLOCK(c) );
return UMM_PBLOCK(c);
}
/* ------------------------------------------------------------------------- */
static void umm_init_stage_2( umm_heap_context_t *_context ) {
static void umm_init_stage_2(umm_heap_context_t *_context) {
/* setup initial blank heap structure */
UMM_FRAGMENTATION_METRIC_INIT();
/* init stats.free_blocks */
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
#if defined(UMM_STATS_FULL)
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
#if defined(UMM_STATS_FULL)
_context->stats.free_blocks_min =
_context->stats.free_blocks_isr_min = UMM_NUMBLOCKS - 2;
#endif
#ifndef UMM_INLINE_METRICS
#endif
#ifndef UMM_INLINE_METRICS
_context->stats.free_blocks = UMM_NUMBLOCKS - 2;
#endif
#endif
#endif
#endif
/* Set up umm_block[0], which just points to umm_block[1] */
UMM_NBLOCK(0) = 1;
@ -479,7 +479,7 @@ static void umm_init_stage_2( umm_heap_context_t *_context ) {
}
void umm_init_common( size_t id, void *start_addr, size_t size, bool zero ) {
void umm_init_common(size_t id, void *start_addr, size_t size, bool zero) {
/* Preserve internal setup */
umm_heap_context_t *_context = umm_get_heap_by_id(id);
if (NULL == start_addr || NULL == _context || _context->heap) {
@ -496,16 +496,16 @@ void umm_init_common( size_t id, void *start_addr, size_t size, bool zero ) {
// post-crash discovery.
if (zero) {
memset(_context->heap, 0x00, size);
#if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
#if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
memset(&_context->stats, 0x00, sizeof(_context->stats));
#endif
#endif
/* Set up internal data structures */
umm_init_stage_2(_context);
}
}
void umm_init( void ) {
void umm_init(void) {
// if (umm_heap) {
// return;
// }
@ -513,12 +513,12 @@ void umm_init( void ) {
heap_context[i].heap = NULL;
}
memset(&heap_context[0], 0, sizeof(heap_context));
umm_init_common( UMM_HEAP_DRAM, (void *)UMM_MALLOC_CFG_HEAP_ADDR, UMM_MALLOC_CFG_HEAP_SIZE, true );
umm_init_common(UMM_HEAP_DRAM, (void *)UMM_MALLOC_CFG_HEAP_ADDR, UMM_MALLOC_CFG_HEAP_SIZE, true);
// umm_heap = (void *)&heap_context;
}
#ifdef UMM_HEAP_IRAM
void umm_init_iram_ex( void *addr, unsigned int size, bool zero ) {
void umm_init_iram_ex(void *addr, unsigned int size, bool zero) {
/* We need the main, internal heap set up first */
UMM_INIT_HEAP;
@ -539,7 +539,7 @@ void umm_init_iram(void) {
#endif // #ifdef UMM_HEAP_IRAM
#ifdef UMM_HEAP_EXTERNAL
void umm_init_vm( void *vmaddr, unsigned int vmsize ) {
void umm_init_vm(void *vmaddr, unsigned int vmsize) {
/* We need the main, internal (DRAM) heap set up first */
UMM_INIT_HEAP;
@ -552,7 +552,7 @@ void umm_init_vm( void *vmaddr, unsigned int vmsize ) {
* UMM_CRITICAL_ENTRY() and UMM_CRITICAL_EXIT().
*/
static void umm_free_core( umm_heap_context_t *_context, void *ptr ) {
static void umm_free_core(umm_heap_context_t *_context, void *ptr) {
uint16_t c;
@ -573,22 +573,22 @@ static void umm_free_core( umm_heap_context_t *_context, void *ptr ) {
/* Figure out which block we're in. Note the use of truncated division... */
c = (((uintptr_t)ptr)-(uintptr_t)(&(_context->heap[0])))/sizeof(umm_block);
c = (((uintptr_t)ptr) - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block);
DBGLOG_DEBUG( "Freeing block %6d\n", c );
DBGLOG_DEBUG("Freeing block %6d\n", c);
/* Update stats Free Block count */
STATS__FREE_BLOCKS_UPDATE(UMM_NBLOCK(c) - c);
/* Now let's assimilate this block with the next one if possible. */
umm_assimilate_up( _context, c );
umm_assimilate_up(_context, c);
/* Then assimilate with the previous block if possible */
if( UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK ) {
if (UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK) {
DBGLOG_DEBUG( "Assimilate down to previous block, which is FREE\n" );
DBGLOG_DEBUG("Assimilate down to previous block, which is FREE\n");
c = umm_assimilate_down(_context, c, UMM_FREELIST_MASK);
} else {
@ -598,7 +598,7 @@ static void umm_free_core( umm_heap_context_t *_context, void *ptr ) {
*/
UMM_FRAGMENTATION_METRIC_ADD(c);
DBGLOG_DEBUG( "Just add to head of free list\n" );
DBGLOG_DEBUG("Just add to head of free list\n");
UMM_PFREE(UMM_NFREE(0)) = c;
UMM_NFREE(c) = UMM_NFREE(0);
@ -611,15 +611,15 @@ static void umm_free_core( umm_heap_context_t *_context, void *ptr ) {
/* ------------------------------------------------------------------------ */
void umm_free( void *ptr ) {
void umm_free(void *ptr) {
UMM_CRITICAL_DECL(id_free);
UMM_INIT_HEAP;
/* If we're being asked to free a NULL pointer, well that's just silly! */
if( (void *)0 == ptr ) {
DBGLOG_DEBUG( "free a null pointer -> do nothing\n" );
if ((void *)0 == ptr) {
DBGLOG_DEBUG("free a null pointer -> do nothing\n");
STATS__NULL_FREE_REQUEST(id_free);
return;
@ -630,7 +630,7 @@ void umm_free( void *ptr ) {
UMM_CRITICAL_ENTRY(id_free);
/* Need to be in the heap in which this block lives */
umm_free_core( umm_get_ptr_context( ptr ), ptr );
umm_free_core(umm_get_ptr_context(ptr), ptr);
UMM_CRITICAL_EXIT(id_free);
}
@ -640,7 +640,7 @@ void umm_free( void *ptr ) {
* UMM_CRITICAL_ENTRY() and UMM_CRITICAL_EXIT().
*/
static void *umm_malloc_core( umm_heap_context_t *_context, size_t size ) {
static void *umm_malloc_core(umm_heap_context_t *_context, size_t size) {
uint16_t blocks;
uint16_t blockSize = 0;
@ -656,7 +656,7 @@ static void *umm_malloc_core( umm_heap_context_t *_context, size_t size ) {
return NULL;
}
blocks = umm_blocks( size );
blocks = umm_blocks(size);
/*
* Now we can scan through the free list until we find a space that's big
@ -671,35 +671,36 @@ static void *umm_malloc_core( umm_heap_context_t *_context, size_t size ) {
bestBlock = UMM_NFREE(0);
bestSize = 0x7FFF;
while( cf ) {
while (cf) {
blockSize = (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK) - cf;
DBGLOG_TRACE( "Looking at block %6d size %6d\n", cf, blockSize );
DBGLOG_TRACE("Looking at block %6d size %6d\n", cf, blockSize);
#if defined UMM_BEST_FIT
if( (blockSize >= blocks) && (blockSize < bestSize) ) {
#if defined UMM_BEST_FIT
if ((blockSize >= blocks) && (blockSize < bestSize)) {
bestBlock = cf;
bestSize = blockSize;
}
#elif defined UMM_FIRST_FIT
#elif defined UMM_FIRST_FIT
/* This is the first block that fits! */
if( (blockSize >= blocks) )
if ((blockSize >= blocks)) {
break;
#else
# error "No UMM_*_FIT is defined - check umm_malloc_cfg.h"
#endif
}
#else
#error "No UMM_*_FIT is defined - check umm_malloc_cfg.h"
#endif
cf = UMM_NFREE(cf);
}
if( 0x7FFF != bestSize ) {
if (0x7FFF != bestSize) {
cf = bestBlock;
blockSize = bestSize;
}
POISON_CHECK_NEIGHBORS(cf);
if( UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK && blockSize >= blocks ) {
if (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK && blockSize >= blocks) {
UMM_FRAGMENTATION_METRIC_REMOVE(cf);
@ -710,24 +711,24 @@ static void *umm_malloc_core( umm_heap_context_t *_context, size_t size ) {
* block on the free list...
*/
if( blockSize == blocks ) {
if (blockSize == blocks) {
/* It's an exact fit and we don't need to split off a block. */
DBGLOG_DEBUG( "Allocating %6d blocks starting at %6d - exact\n", blocks, cf );
DBGLOG_DEBUG("Allocating %6d blocks starting at %6d - exact\n", blocks, cf);
/* Disconnect this block from the FREE list */
umm_disconnect_from_free_list( _context, cf );
umm_disconnect_from_free_list(_context, cf);
} else {
/* It's not an exact fit and we need to split off a block. */
DBGLOG_DEBUG( "Allocating %6d blocks starting at %6d - existing\n", blocks, cf );
DBGLOG_DEBUG("Allocating %6d blocks starting at %6d - existing\n", blocks, cf);
/*
* split current free block `cf` into two blocks. The first one will be
* returned to user, so it's not free, and the second one will be free.
*/
umm_split_block( _context, cf, blocks, UMM_FREELIST_MASK /*new block is free*/ );
umm_split_block(_context, cf, blocks, UMM_FREELIST_MASK /*new block is free*/);
UMM_FRAGMENTATION_METRIC_ADD(UMM_NBLOCK(cf));
@ -739,31 +740,31 @@ static void *umm_malloc_core( umm_heap_context_t *_context, size_t size ) {
*/
/* previous free block */
UMM_NFREE( UMM_PFREE(cf) ) = cf + blocks;
UMM_PFREE( cf + blocks ) = UMM_PFREE(cf);
UMM_NFREE(UMM_PFREE(cf)) = cf + blocks;
UMM_PFREE(cf + blocks) = UMM_PFREE(cf);
/* next free block */
UMM_PFREE( UMM_NFREE(cf) ) = cf + blocks;
UMM_NFREE( cf + blocks ) = UMM_NFREE(cf);
UMM_PFREE(UMM_NFREE(cf)) = cf + blocks;
UMM_NFREE(cf + blocks) = UMM_NFREE(cf);
}
STATS__FREE_BLOCKS_UPDATE( -blocks );
STATS__FREE_BLOCKS_UPDATE(-blocks);
STATS__FREE_BLOCKS_MIN();
} else {
/* Out of memory */
STATS__OOM_UPDATE();
DBGLOG_DEBUG( "Can't allocate %5d blocks\n", blocks );
DBGLOG_DEBUG("Can't allocate %5d blocks\n", blocks);
return( (void *)NULL );
return (void *)NULL;
}
return( (void *)&UMM_DATA(cf) );
return (void *)&UMM_DATA(cf);
}
/* ------------------------------------------------------------------------ */
void *umm_malloc( size_t size ) {
void *umm_malloc(size_t size) {
UMM_CRITICAL_DECL(id_malloc);
void *ptr = NULL;
@ -831,11 +832,11 @@ void *umm_malloc( size_t size ) {
* the number of blocks to allocate are easier...
*/
if( 0 == size ) {
DBGLOG_DEBUG( "malloc a block of 0 bytes -> do nothing\n" );
if (0 == size) {
DBGLOG_DEBUG("malloc a block of 0 bytes -> do nothing\n");
STATS__ZERO_ALLOC_REQUEST(id_malloc, size);
return( ptr );
return ptr;
}
/* Allocate the memory within a protected critical section */
@ -856,16 +857,16 @@ void *umm_malloc( size_t size ) {
_context = umm_get_heap_by_id(UMM_HEAP_DRAM);
}
ptr = umm_malloc_core( _context, size );
ptr = umm_malloc_core(_context, size);
UMM_CRITICAL_EXIT(id_malloc);
return( ptr );
return ptr;
}
/* ------------------------------------------------------------------------ */
void *umm_realloc( void *ptr, size_t size ) {
void *umm_realloc(void *ptr, size_t size) {
UMM_CRITICAL_DECL(id_realloc);
uint16_t blocks;
@ -887,10 +888,10 @@ void *umm_realloc( void *ptr, size_t size ) {
* standard is concerned.
*/
if( ((void *)NULL == ptr) ) {
DBGLOG_DEBUG( "realloc the NULL pointer - call malloc()\n" );
if (((void *)NULL == ptr)) {
DBGLOG_DEBUG("realloc the NULL pointer - call malloc()\n");
return( umm_malloc(size) );
return umm_malloc(size);
}
/*
@ -900,19 +901,19 @@ void *umm_realloc( void *ptr, size_t size ) {
*/
/* Need to be in the heap in which this block lives */
umm_heap_context_t *_context = umm_get_ptr_context( ptr );
umm_heap_context_t *_context = umm_get_ptr_context(ptr);
if (NULL == _context) {
panic();
return NULL;
}
if( 0 == size ) {
DBGLOG_DEBUG( "realloc to 0 size, just free the block\n" );
if (0 == size) {
DBGLOG_DEBUG("realloc to 0 size, just free the block\n");
STATS__ZERO_ALLOC_REQUEST(id_realloc, size);
umm_free( ptr );
umm_free(ptr);
return( (void *)NULL );
return (void *)NULL;
}
STATS__ALLOC_REQUEST(id_realloc, size);
@ -926,11 +927,11 @@ void *umm_realloc( void *ptr, size_t size ) {
* copying. So first, let's figure out how many blocks we'll need.
*/
blocks = umm_blocks( size );
blocks = umm_blocks(size);
/* Figure out which block we're in. Note the use of truncated division... */
c = (((uintptr_t)ptr)-(uintptr_t)(&(_context->heap[0])))/sizeof(umm_block);
c = (((uintptr_t)ptr) - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block);
/* Figure out how big this block is ... the free bit is not set :-) */
@ -938,7 +939,7 @@ void *umm_realloc( void *ptr, size_t size ) {
/* Figure out how many bytes are in this block */
curSize = (blockSize*sizeof(umm_block))-(sizeof(((umm_block *)0)->header));
curSize = (blockSize * sizeof(umm_block)) - (sizeof(((umm_block *)0)->header));
/* Protect the critical section... */
UMM_CRITICAL_ENTRY(id_realloc);
@ -959,9 +960,9 @@ void *umm_realloc( void *ptr, size_t size ) {
prevBlockSize = (c - UMM_PBLOCK(c));
}
DBGLOG_DEBUG( "realloc blocks %d blockSize %d nextBlockSize %d prevBlockSize %d\n", blocks, blockSize, nextBlockSize, prevBlockSize );
DBGLOG_DEBUG("realloc blocks %d blockSize %d nextBlockSize %d prevBlockSize %d\n", blocks, blockSize, nextBlockSize, prevBlockSize);
//C With each upstream update this section should be reevaluated.
// C With each upstream update this section should be reevaluated.
/*C
*
* The `#if defined(UMM_REALLOC_MINIMIZE_COPY)` section tracks the content of
@ -976,7 +977,7 @@ void *umm_realloc( void *ptr, size_t size ) {
* confirm; however, I think this to be the best option when considering the
* amount of reallocates that can occur with the Strings library.
*/
#if defined(UMM_REALLOC_MINIMIZE_COPY)
#if defined(UMM_REALLOC_MINIMIZE_COPY)
/*
* Ok, now that we're here we know how many blocks we want and the current
* blockSize. The prevBlockSize and nextBlockSize are set and we can figure
@ -1014,69 +1015,69 @@ void *umm_realloc( void *ptr, size_t size ) {
// Case 1 - block is same size or smaller
if (blockSize >= blocks) {
DBGLOG_DEBUG( "realloc the same or smaller size block - %i, do nothing\n", blocks );
DBGLOG_DEBUG("realloc the same or smaller size block - %i, do nothing\n", blocks);
/* This space intentionally left blank */
// Case 2 - block + next block fits EXACTLY
} else if ((blockSize + nextBlockSize) == blocks) {
DBGLOG_DEBUG( "exact realloc using next block - %i\n", blocks );
umm_assimilate_up( c );
STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
DBGLOG_DEBUG("exact realloc using next block - %i\n", blocks);
umm_assimilate_up(c);
STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
blockSize += nextBlockSize;
// Case 3 - prev block NOT free and block + next block fits
} else if ((0 == prevBlockSize) && (blockSize + nextBlockSize) >= blocks) {
DBGLOG_DEBUG( "realloc using next block - %i\n", blocks );
umm_assimilate_up( _context, c );
STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
DBGLOG_DEBUG("realloc using next block - %i\n", blocks);
umm_assimilate_up(_context, c);
STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
blockSize += nextBlockSize;
// Case 4 - prev block + block fits
} else if ((prevBlockSize + blockSize) >= blocks) {
DBGLOG_DEBUG( "realloc using prev block - %i\n", blocks );
umm_disconnect_from_free_list( _context, UMM_PBLOCK(c) );
DBGLOG_DEBUG("realloc using prev block - %i\n", blocks);
umm_disconnect_from_free_list(_context, UMM_PBLOCK(c));
c = umm_assimilate_down(_context, c, 0);
STATS__FREE_BLOCKS_UPDATE( - prevBlockSize );
STATS__FREE_BLOCKS_UPDATE(-prevBlockSize);
STATS__FREE_BLOCKS_ISR_MIN();
blockSize += prevBlockSize;
UMM_CRITICAL_SUSPEND(id_realloc);
memmove( (void *)&UMM_DATA(c), ptr, curSize );
memmove((void *)&UMM_DATA(c), ptr, curSize);
ptr = (void *)&UMM_DATA(c);
UMM_CRITICAL_RESUME(id_realloc);
// Case 5 - prev block + block + next block fits
} else if ((prevBlockSize + blockSize + nextBlockSize) >= blocks) {
DBGLOG_DEBUG( "realloc using prev and next block - %d\n", blocks );
umm_assimilate_up( _context, c );
umm_disconnect_from_free_list( _context, UMM_PBLOCK(c) );
DBGLOG_DEBUG("realloc using prev and next block - %d\n", blocks);
umm_assimilate_up(_context, c);
umm_disconnect_from_free_list(_context, UMM_PBLOCK(c));
c = umm_assimilate_down(_context, c, 0);
STATS__FREE_BLOCKS_UPDATE( - prevBlockSize - nextBlockSize );
#ifdef UMM_LIGHTWEIGHT_CPU
STATS__FREE_BLOCKS_UPDATE(-prevBlockSize - nextBlockSize);
#ifdef UMM_LIGHTWEIGHT_CPU
if ((prevBlockSize + blockSize + nextBlockSize) > blocks) {
umm_split_block( _context, c, blocks, 0 );
umm_free_core( _context, (void *)&UMM_DATA(c+blocks) );
umm_split_block(_context, c, blocks, 0);
umm_free_core(_context, (void *)&UMM_DATA(c + blocks));
}
STATS__FREE_BLOCKS_ISR_MIN();
blockSize = blocks;
#else
#else
blockSize += (prevBlockSize + nextBlockSize);
#endif
#endif
UMM_CRITICAL_SUSPEND(id_realloc);
memmove( (void *)&UMM_DATA(c), ptr, curSize );
memmove((void *)&UMM_DATA(c), ptr, curSize);
ptr = (void *)&UMM_DATA(c);
UMM_CRITICAL_RESUME(id_realloc);
// Case 6 - default is we need to realloc a new block
} else {
DBGLOG_DEBUG( "realloc a completely new block %i\n", blocks );
DBGLOG_DEBUG("realloc a completely new block %i\n", blocks);
void *oldptr = ptr;
if( (ptr = umm_malloc_core( _context, size )) ) {
DBGLOG_DEBUG( "realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks );
if ((ptr = umm_malloc_core(_context, size))) {
DBGLOG_DEBUG("realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks);
UMM_CRITICAL_SUSPEND(id_realloc);
memcpy( ptr, oldptr, curSize );
memcpy(ptr, oldptr, curSize);
UMM_CRITICAL_RESUME(id_realloc);
umm_free_core( _context, oldptr );
umm_free_core(_context, oldptr);
} else {
DBGLOG_DEBUG( "realloc %i to a bigger block %i failed - return NULL and leave the old block!\n", blockSize, blocks );
DBGLOG_DEBUG("realloc %i to a bigger block %i failed - return NULL and leave the old block!\n", blockSize, blocks);
/* This space intentionally left blnk */
/* STATS__OOM_UPDATE() has already been called by umm_malloc_core - don't duplicate count */
}
@ -1085,7 +1086,7 @@ void *umm_realloc( void *ptr, size_t size ) {
*/
blockSize = blocks;
}
#elif defined(UMM_REALLOC_DEFRAG)
#elif defined(UMM_REALLOC_DEFRAG)
/*
* Ok, now that we're here we know how many blocks we want and the current
* blockSize. The prevBlockSize and nextBlockSize are set and we can figure
@ -1112,50 +1113,50 @@ void *umm_realloc( void *ptr, size_t size ) {
* requested number of blocks and add what's left to the free list.
*/
if (prevBlockSize && (prevBlockSize + blockSize + nextBlockSize) >= blocks) { // 1
umm_disconnect_from_free_list( _context, UMM_PBLOCK(c) );
c = umm_assimilate_down( _context, c, 0 );
STATS__FREE_BLOCKS_UPDATE( - prevBlockSize );
umm_disconnect_from_free_list(_context, UMM_PBLOCK(c));
c = umm_assimilate_down(_context, c, 0);
STATS__FREE_BLOCKS_UPDATE(-prevBlockSize);
blockSize += prevBlockSize;
if (blockSize >= blocks) {
DBGLOG_DEBUG( "realloc using prev block - %d\n", blocks );
DBGLOG_DEBUG("realloc using prev block - %d\n", blocks);
STATS__FREE_BLOCKS_ISR_MIN();
} else {
DBGLOG_DEBUG( "realloc using prev and next block - %d\n", blocks );
umm_assimilate_up( _context, c );
STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
DBGLOG_DEBUG("realloc using prev and next block - %d\n", blocks);
umm_assimilate_up(_context, c);
STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
blockSize += nextBlockSize;
#ifdef UMM_LIGHTWEIGHT_CPU
#ifdef UMM_LIGHTWEIGHT_CPU
if (blockSize > blocks) {
umm_split_block( _context, c, blocks, 0 );
umm_free_core( _context, (void *)&UMM_DATA(c+blocks) );
umm_split_block(_context, c, blocks, 0);
umm_free_core(_context, (void *)&UMM_DATA(c + blocks));
}
STATS__FREE_BLOCKS_ISR_MIN();
blockSize = blocks;
#endif
#endif
}
UMM_CRITICAL_SUSPEND(id_realloc);
memmove( (void *)&UMM_DATA(c), ptr, curSize );
memmove((void *)&UMM_DATA(c), ptr, curSize);
ptr = (void *)&UMM_DATA(c);
UMM_CRITICAL_RESUME(id_realloc);
} else if (blockSize >= blocks) { // 2
DBGLOG_DEBUG( "realloc the same or smaller size block - %d, do nothing\n", blocks );
DBGLOG_DEBUG("realloc the same or smaller size block - %d, do nothing\n", blocks);
/* This space intentionally left blank */
} else if ((blockSize + nextBlockSize) >= blocks) { // 3
DBGLOG_DEBUG( "realloc using next block - %d\n", blocks );
umm_assimilate_up( _context, c );
STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
DBGLOG_DEBUG("realloc using next block - %d\n", blocks);
umm_assimilate_up(_context, c);
STATS__FREE_BLOCKS_UPDATE(-nextBlockSize);
blockSize += nextBlockSize;
} else { // 4
DBGLOG_DEBUG( "realloc a completely new block %d\n", blocks );
DBGLOG_DEBUG("realloc a completely new block %d\n", blocks);
void *oldptr = ptr;
if( (ptr = umm_malloc_core( _context, size )) ) {
DBGLOG_DEBUG( "realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks );
if ((ptr = umm_malloc_core(_context, size))) {
DBGLOG_DEBUG("realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks);
UMM_CRITICAL_SUSPEND(id_realloc);
memcpy( ptr, oldptr, curSize );
memcpy(ptr, oldptr, curSize);
UMM_CRITICAL_RESUME(id_realloc);
umm_free_core( _context, oldptr);
umm_free_core(_context, oldptr);
} else {
DBGLOG_DEBUG( "realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks );
DBGLOG_DEBUG("realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks);
/* This space intentionally left blnk */
/* STATS__OOM_UPDATE() has already been called by umm_malloc_core - don't duplicate count */
}
@ -1164,23 +1165,23 @@ void *umm_realloc( void *ptr, size_t size ) {
*/
blockSize = blocks;
}
#else
#warning "Neither UMM_REALLOC_DEFRAG nor UMM_REALLOC_MINIMIZE_COPY is defined - check umm_malloc_cfg.h"
#else
#warning "Neither UMM_REALLOC_DEFRAG nor UMM_REALLOC_MINIMIZE_COPY is defined - check umm_malloc_cfg.h"
/* An always copy option just for performance/fragmentation comparison */
if (blockSize >= blocks) {
DBGLOG_DEBUG( "realloc the same or smaller size block - %d, do nothing\n", blocks );
DBGLOG_DEBUG("realloc the same or smaller size block - %d, do nothing\n", blocks);
/* This space intentionally left blank */
} else {
DBGLOG_DEBUG( "realloc a completely new block %d\n", blocks );
DBGLOG_DEBUG("realloc a completely new block %d\n", blocks);
void *oldptr = ptr;
if( (ptr = umm_malloc_core( _context, size )) ) {
DBGLOG_DEBUG( "realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks );
if ((ptr = umm_malloc_core(_context, size))) {
DBGLOG_DEBUG("realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks);
UMM_CRITICAL_SUSPEND(id_realloc);
memcpy( ptr, oldptr, curSize );
memcpy(ptr, oldptr, curSize);
UMM_CRITICAL_RESUME(id_realloc);
umm_free_core( _context, oldptr );
umm_free_core(_context, oldptr);
} else {
DBGLOG_DEBUG( "realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks );
DBGLOG_DEBUG("realloc %d to a bigger block %d failed - return NULL and leave the old block!\n", blockSize, blocks);
/* This space intentionally left blnk */
/* STATS__OOM_UPDATE() has already been called by umm_malloc_core - don't duplicate count */
}
@ -1189,15 +1190,15 @@ void *umm_realloc( void *ptr, size_t size ) {
*/
blockSize = blocks;
}
#endif
#endif
/* Now all we need to do is figure out if the block fit exactly or if we
* need to split and free ...
*/
if (blockSize > blocks ) {
DBGLOG_DEBUG( "split and free %d blocks from %d\n", blocks, blockSize );
umm_split_block( _context, c, blocks, 0 );
umm_free_core( _context, (void *)&UMM_DATA(c+blocks) );
if (blockSize > blocks) {
DBGLOG_DEBUG("split and free %d blocks from %d\n", blocks, blockSize);
umm_split_block(_context, c, blocks, 0);
umm_free_core(_context, (void *)&UMM_DATA(c + blocks));
}
STATS__FREE_BLOCKS_MIN();
@ -1205,18 +1206,19 @@ void *umm_realloc( void *ptr, size_t size ) {
/* Release the critical section... */
UMM_CRITICAL_EXIT(id_realloc);
return( ptr );
return ptr;
}
/* ------------------------------------------------------------------------ */
void *umm_calloc( size_t num, size_t item_size ) {
void *umm_calloc(size_t num, size_t item_size) {
void *ret;
ret = umm_malloc((size_t)(item_size * num));
if (ret)
if (ret) {
memset(ret, 0x00, (size_t)(item_size * num));
}
return ret;
}

View File

@ -10,7 +10,7 @@
#include <stdint.h>
//C This include is not in upstream
// C This include is not in upstream
#include "umm_malloc_cfg.h" /* user-dependent */
#ifdef __cplusplus
@ -19,28 +19,28 @@ extern "C" {
#ifdef UMM_HEAP_EXTERNAL
extern void umm_init_vm( void *vmaddr, unsigned int vmsize );
extern void umm_init_vm(void *vmaddr, unsigned int vmsize);
#endif
#ifdef UMM_HEAP_IRAM
extern void umm_init_iram(void);
extern void umm_init_iram_ex( void *addr, unsigned int size, bool zero );
extern void umm_init_iram_ex(void *addr, unsigned int size, bool zero);
#endif
/* ------------------------------------------------------------------------ */
extern void umm_init( void );
extern void *umm_malloc( size_t size );
extern void *umm_calloc( size_t num, size_t size );
extern void *umm_realloc( void *ptr, size_t size );
extern void umm_free( void *ptr );
extern void umm_init(void);
extern void *umm_malloc(size_t size);
extern void *umm_calloc(size_t num, size_t size);
extern void *umm_realloc(void *ptr, size_t size);
extern void umm_free(void *ptr);
/* ------------------------------------------------------------------------ */
extern umm_heap_context_t *umm_push_heap( size_t heap_number );
extern umm_heap_context_t *umm_pop_heap( void );
extern int umm_get_heap_stack_index( void );
extern umm_heap_context_t *umm_set_heap_by_id( size_t which );
extern size_t umm_get_current_heap_id( void );
extern umm_heap_context_t *umm_get_current_heap( void );
extern umm_heap_context_t *umm_push_heap(size_t heap_number);
extern umm_heap_context_t *umm_pop_heap(void);
extern int umm_get_heap_stack_index(void);
extern umm_heap_context_t *umm_set_heap_by_id(size_t which);
extern size_t umm_get_current_heap_id(void);
extern umm_heap_context_t *umm_get_current_heap(void);
#ifdef __cplusplus
}

View File

@ -192,13 +192,13 @@ extern char _heap_start[];
/* -------------------------------------------------------------------------- */
#ifdef UMM_BEST_FIT
#ifdef UMM_FIRST_FIT
#error Both UMM_BEST_FIT and UMM_FIRST_FIT are defined - pick one!
#endif
#ifdef UMM_FIRST_FIT
#error Both UMM_BEST_FIT and UMM_FIRST_FIT are defined - pick one!
#endif
#else /* UMM_BEST_FIT is not defined */
#ifndef UMM_FIRST_FIT
#ifndef UMM_FIRST_FIT
#define UMM_BEST_FIT
#endif
#endif
#endif
/* -------------------------------------------------------------------------- */
@ -207,9 +207,9 @@ extern char _heap_start[];
#define UMM_FRAGMENTATION_METRIC_INIT() umm_fragmentation_metric_init(_context)
#define UMM_FRAGMENTATION_METRIC_ADD(c) umm_fragmentation_metric_add(_context, c)
#define UMM_FRAGMENTATION_METRIC_REMOVE(c) umm_fragmentation_metric_remove(_context, c)
#ifndef UMM_INFO
#ifndef UMM_INFO
#define UMM_INFO
#endif
#endif
#else
#define UMM_FRAGMENTATION_METRIC_INIT()
#define UMM_FRAGMENTATION_METRIC_ADD(c)
@ -229,7 +229,7 @@ extern char _heap_start[];
// #define UMM_INFO
#ifdef UMM_INFO
typedef struct UMM_HEAP_INFO_t {
typedef struct UMM_HEAP_INFO_t {
unsigned int totalEntries;
unsigned int usedEntries;
unsigned int freeEntries;
@ -238,33 +238,33 @@ extern char _heap_start[];
unsigned int usedBlocks;
unsigned int freeBlocks;
unsigned int freeBlocksSquared;
#ifdef UMM_INLINE_METRICS
#ifdef UMM_INLINE_METRICS
size_t oom_count;
#define UMM_OOM_COUNT info.oom_count
#define UMM_FREE_BLOCKS info.freeBlocks
#endif
#endif
unsigned int maxFreeContiguousBlocks;
}
UMM_HEAP_INFO;
}
UMM_HEAP_INFO;
// extern UMM_HEAP_INFO ummHeapInfo;
// extern UMM_HEAP_INFO ummHeapInfo;
struct UMM_HEAP_CONTEXT;
typedef struct UMM_HEAP_CONTEXT umm_heap_context_t;
extern ICACHE_FLASH_ATTR void *umm_info( void *ptr, bool force );
extern ICACHE_FLASH_ATTR void *umm_info(void *ptr, bool force);
#ifdef UMM_INLINE_METRICS
extern size_t umm_free_heap_size( void );
extern size_t umm_free_heap_size(void);
#else
extern ICACHE_FLASH_ATTR size_t umm_free_heap_size( void );
extern ICACHE_FLASH_ATTR size_t umm_free_heap_size(void);
#endif
// umm_max_block_size changed to umm_max_free_block_size in upstream.
extern ICACHE_FLASH_ATTR size_t umm_max_block_size( void );
extern ICACHE_FLASH_ATTR int umm_usage_metric( void );
extern ICACHE_FLASH_ATTR int umm_fragmentation_metric( void );
extern ICACHE_FLASH_ATTR size_t umm_free_heap_size_core( umm_heap_context_t *_context );
extern ICACHE_FLASH_ATTR size_t umm_max_block_size_core( umm_heap_context_t *_context );
extern ICACHE_FLASH_ATTR int umm_usage_metric_core( umm_heap_context_t *_context );
extern ICACHE_FLASH_ATTR int umm_fragmentation_metric_core( umm_heap_context_t *_context );
// umm_max_block_size changed to umm_max_free_block_size in upstream.
extern ICACHE_FLASH_ATTR size_t umm_max_block_size(void);
extern ICACHE_FLASH_ATTR int umm_usage_metric(void);
extern ICACHE_FLASH_ATTR int umm_fragmentation_metric(void);
extern ICACHE_FLASH_ATTR size_t umm_free_heap_size_core(umm_heap_context_t *_context);
extern ICACHE_FLASH_ATTR size_t umm_max_block_size_core(umm_heap_context_t *_context);
extern ICACHE_FLASH_ATTR int umm_usage_metric_core(umm_heap_context_t *_context);
extern ICACHE_FLASH_ATTR int umm_fragmentation_metric_core(umm_heap_context_t *_context);
#else
#define umm_info(p,b)
#define umm_free_heap_size() (0)
@ -312,7 +312,7 @@ typedef struct UMM_HEAP_CONTEXT umm_heap_context_t;
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
typedef struct UMM_STATISTICS_t {
#ifndef UMM_INLINE_METRICS
#ifndef UMM_INLINE_METRICS
// If we are doing UMM_INLINE_METRICS, we can move oom_count and free_blocks to
// umm_info's structure and save a little DRAM and IRAM.
// Otherwise it is defined here.
@ -320,8 +320,8 @@ typedef struct UMM_STATISTICS_t {
size_t oom_count;
#define UMM_OOM_COUNT stats.oom_count
#define UMM_FREE_BLOCKS stats.free_blocks
#endif
#ifdef UMM_STATS_FULL
#endif
#ifdef UMM_STATS_FULL
size_t free_blocks_min;
size_t free_blocks_isr_min;
size_t alloc_max_size;
@ -332,7 +332,7 @@ typedef struct UMM_STATISTICS_t {
size_t id_realloc_zero_count;
size_t id_free_count;
size_t id_free_null_count;
#endif
#endif
}
UMM_STATISTICS;
@ -344,8 +344,8 @@ UMM_STATISTICS;
#define STATS__OOM_UPDATE() _context->UMM_OOM_COUNT += 1
extern size_t umm_free_heap_size_lw( void );
extern size_t umm_get_oom_count( void );
extern size_t umm_free_heap_size_lw(void);
extern size_t umm_get_oom_count(void);
#else // not UMM_STATS or UMM_STATS_FULL
#define STATS__FREE_BLOCKS_UPDATE(s) (void)(s)
@ -353,59 +353,62 @@ extern size_t umm_get_oom_count( void );
#endif
#if defined(UMM_STATS) || defined(UMM_STATS_FULL) || defined(UMM_INFO)
size_t ICACHE_FLASH_ATTR umm_block_size( void );
size_t ICACHE_FLASH_ATTR umm_block_size(void);
#endif
#ifdef UMM_STATS_FULL
#define STATS__FREE_BLOCKS_MIN() \
do { \
if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_min) \
do { \
if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_min) { \
_context->stats.free_blocks_min = _context->UMM_FREE_BLOCKS; \
} while(false)
} \
} while (false)
#define STATS__FREE_BLOCKS_ISR_MIN() \
do { \
if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_isr_min) \
do { \
if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_isr_min) { \
_context->stats.free_blocks_isr_min = _context->UMM_FREE_BLOCKS; \
} while(false)
} \
} while (false)
#define STATS__ALLOC_REQUEST(tag, s) \
do { \
do { \
_context->stats.tag##_count += 1; \
_context->stats.last_alloc_size = s; \
if (_context->stats.alloc_max_size < s) \
if (_context->stats.alloc_max_size < s) { \
_context->stats.alloc_max_size = s; \
} while(false)
} \
} while (false)
#define STATS__ZERO_ALLOC_REQUEST(tag, s) \
do { \
do { \
_context->stats.tag##_zero_count += 1; \
} while(false)
} while (false)
#define STATS__NULL_FREE_REQUEST(tag) \
do { \
do { \
umm_heap_context_t *_context = umm_get_current_heap(); \
_context->stats.tag##_null_count += 1; \
} while(false)
} while (false)
#define STATS__FREE_REQUEST(tag) \
do { \
do { \
_context->stats.tag##_count += 1; \
} while(false)
} while (false)
size_t umm_free_heap_size_lw_min( void );
size_t umm_free_heap_size_min_reset( void );
size_t umm_free_heap_size_min( void );
size_t umm_free_heap_size_isr_min( void );
size_t umm_get_max_alloc_size( void );
size_t umm_get_last_alloc_size( void );
size_t umm_get_malloc_count( void );
size_t umm_get_malloc_zero_count( void );
size_t umm_get_realloc_count( void );
size_t umm_get_realloc_zero_count( void );
size_t umm_get_free_count( void );
size_t umm_get_free_null_count( void );
size_t umm_free_heap_size_lw_min(void);
size_t umm_free_heap_size_min_reset(void);
size_t umm_free_heap_size_min(void);
size_t umm_free_heap_size_isr_min(void);
size_t umm_get_max_alloc_size(void);
size_t umm_get_last_alloc_size(void);
size_t umm_get_malloc_count(void);
size_t umm_get_malloc_zero_count(void);
size_t umm_get_realloc_count(void);
size_t umm_get_realloc_zero_count(void);
size_t umm_get_free_count(void);
size_t umm_get_free_null_count(void);
#else // Not UMM_STATS_FULL
#define STATS__FREE_BLOCKS_MIN() (void)0
@ -472,11 +475,13 @@ static inline void _critical_entry(UMM_TIME_STAT *p, uint32_t *saved_ps) {
static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
uint32_t elapse = esp_get_cycle_count() - p->start;
if (elapse < p->min)
if (elapse < p->min) {
p->min = elapse;
}
if (elapse > p->max)
if (elapse > p->max) {
p->max = elapse;
}
xt_wsr_ps(*saved_ps);
}
@ -495,33 +500,33 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
*/
#ifdef UMM_TEST_BUILD
extern int umm_critical_depth;
extern int umm_max_critical_depth;
#define UMM_CRITICAL_ENTRY() {\
extern int umm_critical_depth;
extern int umm_max_critical_depth;
#define UMM_CRITICAL_ENTRY() { \
++umm_critical_depth; \
if (umm_critical_depth > umm_max_critical_depth) { \
umm_max_critical_depth = umm_critical_depth; \
} \
}
}
#define UMM_CRITICAL_EXIT() (umm_critical_depth--)
#else
#if defined(UMM_CRITICAL_METRICS)
#if defined(UMM_CRITICAL_METRICS)
#define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag
#define UMM_CRITICAL_ENTRY(tag)_critical_entry(&time_stats.tag, &_saved_ps_##tag)
#define UMM_CRITICAL_EXIT(tag) _critical_exit(&time_stats.tag, &_saved_ps_##tag)
#define UMM_CRITICAL_WITHINISR(tag) (0 != (_saved_ps_##tag & 0x0F))
#else // ! UMM_CRITICAL_METRICS
// This method preserves the intlevel on entry and restores the
// original intlevel at exit.
#else // ! UMM_CRITICAL_METRICS
// This method preserves the intlevel on entry and restores the
// original intlevel at exit.
#define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag
#define UMM_CRITICAL_ENTRY(tag) _saved_ps_##tag = xt_rsil(DEFAULT_CRITICAL_SECTION_INTLEVEL)
#define UMM_CRITICAL_EXIT(tag) xt_wsr_ps(_saved_ps_##tag)
#define UMM_CRITICAL_WITHINISR(tag) (0 != (_saved_ps_##tag & 0x0F))
#endif
#endif
#endif
/*
/*
* -D UMM_LIGHTWEIGHT_CPU
*
* The use of this macro is hardware/application specific.
@ -550,8 +555,8 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
#define UMM_CRITICAL_SUSPEND(tag) UMM_CRITICAL_EXIT(tag)
#define UMM_CRITICAL_RESUME(tag) UMM_CRITICAL_ENTRY(tag)
#else
#define UMM_CRITICAL_SUSPEND(tag) do {} while(0)
#define UMM_CRITICAL_RESUME(tag) do {} while(0)
#define UMM_CRITICAL_SUSPEND(tag) do {} while (0)
#define UMM_CRITICAL_RESUME(tag) do {} while (0)
#endif
/*
@ -594,12 +599,12 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
*/
#ifdef UMM_INTEGRITY_CHECK
extern bool umm_integrity_check( void );
# define INTEGRITY_CHECK() umm_integrity_check()
extern void umm_corruption(void);
# define UMM_HEAP_CORRUPTION_CB() DBGLOG_FUNCTION( "Heap Corruption!" )
extern bool umm_integrity_check(void);
#define INTEGRITY_CHECK() umm_integrity_check()
extern void umm_corruption(void);
#define UMM_HEAP_CORRUPTION_CB() DBGLOG_FUNCTION("Heap Corruption!")
#else
# define INTEGRITY_CHECK() (1)
#define INTEGRITY_CHECK() (1)
#endif
/////////////////////////////////////////////////
@ -669,33 +674,33 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
#define UMM_POISONED_BLOCK_LEN_TYPE uint32_t
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
extern void *umm_poison_malloc( size_t size );
extern void *umm_poison_calloc( size_t num, size_t size );
extern void *umm_poison_realloc( void *ptr, size_t size );
extern void umm_poison_free( void *ptr );
extern bool umm_poison_check( void );
// Local Additions to better report location in code of the caller.
void *umm_poison_realloc_fl( void *ptr, size_t size, const char* file, int line );
void umm_poison_free_fl( void *ptr, const char* file, int line );
#if defined(UMM_POISON_CHECK_LITE)
/*
extern void *umm_poison_malloc(size_t size);
extern void *umm_poison_calloc(size_t num, size_t size);
extern void *umm_poison_realloc(void *ptr, size_t size);
extern void umm_poison_free(void *ptr);
extern bool umm_poison_check(void);
// Local Additions to better report location in code of the caller.
void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line);
void umm_poison_free_fl(void *ptr, const char *file, int line);
#if defined(UMM_POISON_CHECK_LITE)
/*
* We can safely do individual poison checks at free and realloc and stay
* under 10us or close.
*/
# define POISON_CHECK() 1
# define POISON_CHECK_NEIGHBORS(c) \
do {\
if(!check_poison_neighbors(_context, c)) \
panic();\
} while(false)
#else
/* Not normally enabled. A full heap poison check may exceed 10us. */
# define POISON_CHECK() umm_poison_check()
# define POISON_CHECK_NEIGHBORS(c) do{}while(false)
#endif
#define POISON_CHECK() 1
#define POISON_CHECK_NEIGHBORS(c) \
do { \
if (!check_poison_neighbors(_context, c)) \
panic(); \
} while (false)
#else
# define POISON_CHECK() 1
# define POISON_CHECK_NEIGHBORS(c) do{}while(false)
/* Not normally enabled. A full heap poison check may exceed 10us. */
#define POISON_CHECK() umm_poison_check()
#define POISON_CHECK_NEIGHBORS(c) do {} while (false)
#endif
#else
#define POISON_CHECK() 1
#define POISON_CHECK_NEIGHBORS(c) do {} while (false)
#endif
@ -705,13 +710,13 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
* that can actually be allocated.
*/
#define UMM_OVERHEAD_ADJUST ( \
umm_block_size()/2 + \
umm_block_size() / 2 + \
UMM_POISON_SIZE_BEFORE + \
UMM_POISON_SIZE_AFTER + \
sizeof(UMM_POISONED_BLOCK_LEN_TYPE))
#else
#define UMM_OVERHEAD_ADJUST (umm_block_size()/2)
#define UMM_OVERHEAD_ADJUST (umm_block_size() / 2)
#endif
@ -722,9 +727,9 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
#if defined(DEBUG_ESP_PORT) || defined(DEBUG_ESP_OOM) || \
defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) || \
defined(UMM_INTEGRITY_CHECK)
#define DBGLOG_FUNCTION(fmt, ...) ets_uart_printf(fmt, ##__VA_ARGS__)
#define DBGLOG_FUNCTION(fmt, ...) ets_uart_printf(fmt,##__VA_ARGS__)
#else
#define DBGLOG_FUNCTION(fmt, ...) do { (void)fmt; } while(false)
#define DBGLOG_FUNCTION(fmt, ...) do { (void)fmt; } while (false)
#endif
/////////////////////////////////////////////////
@ -742,15 +747,15 @@ struct UMM_TIME_STATS_t {
UMM_TIME_STAT id_malloc;
UMM_TIME_STAT id_realloc;
UMM_TIME_STAT id_free;
#ifdef UMM_INFO
#ifdef UMM_INFO
UMM_TIME_STAT id_info;
#endif
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
#endif
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
UMM_TIME_STAT id_poison;
#endif
#ifdef UMM_INTEGRITY_CHECK
#endif
#ifdef UMM_INTEGRITY_CHECK
UMM_TIME_STAT id_integrity;
#endif
#endif
UMM_TIME_STAT id_no_tag;
};
#endif
@ -764,17 +769,17 @@ struct UMM_TIME_STATS_t {
#define umm_zalloc(s) umm_calloc(1,s)
void* malloc_loc (size_t s, const char* file, int line);
void* calloc_loc (size_t n, size_t s, const char* file, int line);
void* realloc_loc (void* p, size_t s, const char* file, int line);
void *malloc_loc(size_t s, const char *file, int line);
void *calloc_loc(size_t n, size_t s, const char *file, int line);
void *realloc_loc(void *p, size_t s, const char *file, int line);
// *alloc are macro calling *alloc_loc calling+checking umm_*alloc()
// they are defined at the bottom of this file
/////////////////////////////////////////////////
#elif defined(UMM_POISON_CHECK)
void* realloc_loc (void* p, size_t s, const char* file, int line);
void free_loc (void* p, const char* file, int line);
void *realloc_loc(void *p, size_t s, const char *file, int line);
void free_loc(void *p, const char *file, int line);
#else // !defined(ESP_DEBUG_OOM)
#endif
@ -797,11 +802,11 @@ extern "C" {
#include <pgmspace.h>
// Reuse pvPort* calls, since they already support passing location information.
// Specifically the debug version (heap_...) that does not force DRAM heap.
void* IRAM_ATTR heap_pvPortMalloc(size_t size, const char* file, int line);
void* IRAM_ATTR heap_pvPortCalloc(size_t count, size_t size, const char* file, int line);
void* IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char* file, int line);
void* IRAM_ATTR heap_pvPortZalloc(size_t size, const char* file, int line);
void IRAM_ATTR heap_vPortFree(void *ptr, const char* file, int line);
void *IRAM_ATTR heap_pvPortMalloc(size_t size, const char *file, int line);
void *IRAM_ATTR heap_pvPortCalloc(size_t count, size_t size, const char *file, int line);
void *IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char *file, int line);
void *IRAM_ATTR heap_pvPortZalloc(size_t size, const char *file, int line);
void IRAM_ATTR heap_vPortFree(void *ptr, const char *file, int line);
#define malloc(s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortMalloc(s, mem_debug_file, __LINE__); })
#define calloc(n,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortCalloc(n, s, mem_debug_file, __LINE__); })
@ -815,11 +820,11 @@ void IRAM_ATTR heap_vPortFree(void *ptr, const char* file, int line);
#elif defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
#include <pgmspace.h>
void* IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char* file, int line);
void *IRAM_ATTR heap_pvPortRealloc(void *ptr, size_t size, const char *file, int line);
#define realloc(p,s) ({ static const char mem_debug_file[] PROGMEM STORE_ATTR = __FILE__; heap_pvPortRealloc(p, s, mem_debug_file, __LINE__); })
void IRAM_ATTR heap_vPortFree(void *ptr, const char* file, int line);
//C - to be discussed
void IRAM_ATTR heap_vPortFree(void *ptr, const char *file, int line);
// C - to be discussed
/*
Problem, I would like to report the file and line number with the umm poison
event as close as possible to the event. The #define method works for malloc,

View File

@ -13,16 +13,16 @@
* If `s` is 0, returns 0.
*/
static size_t poison_size(size_t s) {
return(s ? (UMM_POISON_SIZE_BEFORE +
return s ? (UMM_POISON_SIZE_BEFORE +
sizeof(UMM_POISONED_BLOCK_LEN_TYPE) +
UMM_POISON_SIZE_AFTER)
: 0);
: 0;
}
/*
* Print memory contents starting from given `ptr`
*/
static void dump_mem ( const void *vptr, size_t len ) {
static void dump_mem(const void *vptr, size_t len) {
const uint8_t *ptr = (const uint8_t *)vptr;
while (len--) {
DBGLOG_ERROR(" 0x%.2x", (unsigned int)(*ptr++));
@ -32,7 +32,7 @@ static void dump_mem ( const void *vptr, size_t len ) {
/*
* Put poison data at given `ptr` and `poison_size`
*/
static void put_poison( void *ptr, size_t poison_size ) {
static void put_poison(void *ptr, size_t poison_size) {
memset(ptr, POISON_BYTE, poison_size);
}
@ -43,7 +43,7 @@ static void put_poison( void *ptr, size_t poison_size ) {
* If poison is there, returns 1.
* Otherwise, prints the appropriate message, and returns 0.
*/
static bool check_poison( const void *ptr, size_t poison_size,
static bool check_poison(const void *ptr, size_t poison_size,
const char *where) {
size_t i;
bool ok = true;
@ -56,9 +56,9 @@ static bool check_poison( const void *ptr, size_t poison_size,
}
if (!ok) {
DBGLOG_ERROR( "No poison %s block at: 0x%lx, actual data:", where, (unsigned long)ptr);
DBGLOG_ERROR("No poison %s block at: 0x%lx, actual data:", where, (unsigned long)ptr);
dump_mem(ptr, poison_size);
DBGLOG_ERROR( "\n" );
DBGLOG_ERROR("\n");
}
return ok;
@ -68,11 +68,11 @@ static bool check_poison( const void *ptr, size_t poison_size,
* Check if a block is properly poisoned. Must be called only for non-free
* blocks.
*/
static bool check_poison_block( umm_block *pblock ) {
static bool check_poison_block(umm_block *pblock) {
bool ok = true;
if (pblock->header.used.next & UMM_FREELIST_MASK) {
DBGLOG_ERROR( "check_poison_block is called for free block 0x%lx\n", (unsigned long)pblock);
DBGLOG_ERROR("check_poison_block is called for free block 0x%lx\n", (unsigned long)pblock);
} else {
/* the block is used; let's check poison */
unsigned char *pc = (unsigned char *)pblock->body.data;
@ -102,7 +102,7 @@ clean:
*
* `size_w_poison` is a size of the whole block, including a poison.
*/
static void *get_poisoned( void *vptr, size_t size_w_poison ) {
static void *get_poisoned(void *vptr, size_t size_w_poison) {
unsigned char *ptr = (unsigned char *)vptr;
if (size_w_poison != 0 && ptr != NULL) {
@ -129,7 +129,7 @@ static void *get_poisoned( void *vptr, size_t size_w_poison ) {
*
* Returns unpoisoned pointer, i.e. actual pointer to the allocated memory.
*/
static void *get_unpoisoned( void *vptr ) {
static void *get_unpoisoned(void *vptr) {
uintptr_t ptr = (uintptr_t)vptr;
if (ptr != 0) {
@ -137,13 +137,13 @@ static void *get_unpoisoned( void *vptr ) {
ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE);
umm_heap_context_t *_context = umm_get_ptr_context( vptr );
umm_heap_context_t *_context = umm_get_ptr_context(vptr);
if (NULL == _context) {
panic();
return NULL;
}
/* Figure out which block we're in. Note the use of truncated division... */
c = (ptr - (uintptr_t)(&(_context->heap[0])))/sizeof(umm_block);
c = (ptr - (uintptr_t)(&(_context->heap[0]))) / sizeof(umm_block);
check_poison_block(&UMM_BLOCK(c));
}
@ -155,12 +155,12 @@ static void *get_unpoisoned( void *vptr ) {
/* ------------------------------------------------------------------------ */
void *umm_poison_malloc( size_t size ) {
void *umm_poison_malloc(size_t size) {
void *ret;
size += poison_size(size);
ret = umm_malloc( size );
ret = umm_malloc(size);
ret = get_poisoned(ret, size);
@ -169,7 +169,7 @@ void *umm_poison_malloc( size_t size ) {
/* ------------------------------------------------------------------------ */
void *umm_poison_calloc( size_t num, size_t item_size ) {
void *umm_poison_calloc(size_t num, size_t item_size) {
void *ret;
size_t size = item_size * num;
@ -177,8 +177,9 @@ void *umm_poison_calloc( size_t num, size_t item_size ) {
ret = umm_malloc(size);
if (NULL != ret)
if (NULL != ret) {
memset(ret, 0x00, size);
}
ret = get_poisoned(ret, size);
@ -187,13 +188,13 @@ void *umm_poison_calloc( size_t num, size_t item_size ) {
/* ------------------------------------------------------------------------ */
void *umm_poison_realloc( void *ptr, size_t size ) {
void *umm_poison_realloc(void *ptr, size_t size) {
void *ret;
ptr = get_unpoisoned(ptr);
size += poison_size(size);
ret = umm_realloc( ptr, size );
ret = umm_realloc(ptr, size);
ret = get_poisoned(ret, size);
@ -202,11 +203,11 @@ void *umm_poison_realloc( void *ptr, size_t size ) {
/* ------------------------------------------------------------------------ */
void umm_poison_free( void *ptr ) {
void umm_poison_free(void *ptr) {
ptr = get_unpoisoned(ptr);
umm_free( ptr );
umm_free(ptr);
}
/*
@ -227,11 +228,11 @@ bool umm_poison_check(void) {
/* Now iterate through the blocks list */
cur = UMM_NBLOCK(0) & UMM_BLOCKNO_MASK;
while( UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK ) {
if ( !(UMM_NBLOCK(cur) & UMM_FREELIST_MASK) ) {
while (UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK) {
if (!(UMM_NBLOCK(cur) & UMM_FREELIST_MASK)) {
/* This is a used block (not free), so, check its poison */
ok = check_poison_block(&UMM_BLOCK(cur));
if (!ok){
if (!ok) {
break;
}
}