1
0
mirror of https://github.com/esp8266/Arduino.git synced 2025-06-04 18:03:20 +03:00

umm_malloc manual merge with upstream (#7337)

* umm_malloc manual merge with upstream

* Fix divide by zero, case when heap is 100% allocated.

* Removed extra line.

* Fixed block count for debug build. This resolves OOM events for debug build.
Correct overstepping array when freeing.

* Handle another corner case in example HeapMetric.ino.
Comment corrections.

* Revert - ESP.getMaxFreeBlockSize() is back to indicating the size of a
contiguous block of memory before the umm_malloc overhead is removed.

* Stale code cleanup and comment improvements
This commit is contained in:
M Hightower 2020-06-07 20:00:15 -07:00 committed by GitHub
parent 0d04124b94
commit 83523c0259
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 572 additions and 245 deletions

View File

@ -29,20 +29,28 @@ void EspClass::getHeapStats(uint32_t* hfree, uint16_t* hmax, uint8_t* hfrag)
// Having getFreeHeap()=sum(hole-size), fragmentation is given by
// 100 * (1 - sqrt(sum(hole-size²)) / sum(hole-size))
umm_info(NULL, 0);
umm_info(NULL, false);
uint8_t block_size = umm_block_size();
uint32_t fh = ummHeapInfo.freeBlocks * block_size;
if (hfree)
*hfree = fh;
*hfree = ummHeapInfo.freeBlocks * block_size;
if (hmax)
*hmax = ummHeapInfo.maxFreeContiguousBlocks * block_size;
if (hfrag)
*hfrag = 100 - (sqrt32(ummHeapInfo.freeSize2) * 100) / fh;
*hmax = (uint16_t)ummHeapInfo.maxFreeContiguousBlocks * block_size;
if (hfrag) {
if (ummHeapInfo.freeBlocks) {
*hfrag = 100 - (sqrt32(ummHeapInfo.freeBlocksSquared) * 100) / ummHeapInfo.freeBlocks;
} else {
*hfrag = 0;
}
}
}
uint8_t EspClass::getHeapFragmentation()
{
#ifdef UMM_INLINE_METRICS
return (uint8_t)umm_fragmentation_metric();
#else
uint8_t hfrag;
getHeapStats(nullptr, nullptr, &hfrag);
return hfrag;
#endif
}

View File

@ -314,7 +314,7 @@ size_t ICACHE_RAM_ATTR xPortWantedSizeAlign(size_t size)
void system_show_malloc(void)
{
umm_info(NULL, 1);
umm_info(NULL, true);
}
};

View File

@ -11,7 +11,7 @@
* ----------------------------------------------------------------------------
* NOTE WELL that this file may be included multiple times - this allows you
* to set the trace level #define DBGLOG_LEVEL x
*
*
* To update which of the DBGLOG macros are compiled in, you must redefine the
* DBGLOG_LEVEL macro and the inlcude the dbglog.h file again, like this:
*
@ -57,6 +57,8 @@
# define DBGLOG_FUNCTION printf
#endif
#define DBGLOG_32_BIT_PTR(x) ((uint32_t)(((uintptr_t)(x)) & 0xffffffff))
/* ------------------------------------------------------------------------- */
#if DBGLOG_LEVEL >= 6

View File

@ -2,6 +2,12 @@
#ifdef UMM_INFO
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <math.h>
/* ----------------------------------------------------------------------------
* One of the coolest things about this little library is that it's VERY
* easy to get debug information about the memory heap by simply iterating
@ -19,15 +25,15 @@
UMM_HEAP_INFO ummHeapInfo;
void *umm_info( void *ptr, int force ) {
void *umm_info( void *ptr, bool force ) {
UMM_CRITICAL_DECL(id_info);
unsigned short int blockNo = 0;
if (umm_heap == NULL) {
if(umm_heap == NULL) {
umm_init();
}
uint16_t blockNo = 0;
/* Protect the critical section... */
UMM_CRITICAL_ENTRY(id_info);
@ -40,7 +46,7 @@ void *umm_info( void *ptr, int force ) {
DBGLOG_FORCE( force, "\n" );
DBGLOG_FORCE( force, "+----------+-------+--------+--------+-------+--------+--------+\n" );
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
(unsigned long)(&UMM_BLOCK(blockNo)),
DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
blockNo,
UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
UMM_PBLOCK(blockNo),
@ -67,21 +73,18 @@ void *umm_info( void *ptr, int force ) {
if( UMM_NBLOCK(blockNo) & UMM_FREELIST_MASK ) {
++ummHeapInfo.freeEntries;
ummHeapInfo.freeBlocks += curBlocks;
ummHeapInfo.freeSize2 += (unsigned int)curBlocks
* (unsigned int)sizeof(umm_block)
* (unsigned int)curBlocks
* (unsigned int)sizeof(umm_block);
ummHeapInfo.freeBlocksSquared += (curBlocks * curBlocks);
if (ummHeapInfo.maxFreeContiguousBlocks < curBlocks) {
ummHeapInfo.maxFreeContiguousBlocks = curBlocks;
}
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|NF %5d|PF %5d|\n",
(unsigned long)(&UMM_BLOCK(blockNo)),
DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
blockNo,
UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
UMM_PBLOCK(blockNo),
(unsigned int)curBlocks,
(uint16_t)curBlocks,
UMM_NFREE(blockNo),
UMM_PFREE(blockNo) );
@ -99,33 +102,25 @@ void *umm_info( void *ptr, int force ) {
ummHeapInfo.usedBlocks += curBlocks;
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5u|\n",
(unsigned long)(&UMM_BLOCK(blockNo)),
DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
blockNo,
UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
UMM_PBLOCK(blockNo),
(unsigned int)curBlocks );
(uint16_t)curBlocks );
}
blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK;
}
/*
* Update the accounting totals with information from the last block, the
* rest must be free!
* The very last block is used as a placeholder to indicate that
* there are no more blocks in the heap, so it cannot be used
* for anything - at the same time, the size of this block must
* ALWAYS be exactly 1 !
*/
{
size_t curBlocks = UMM_NUMBLOCKS-blockNo;
ummHeapInfo.freeBlocks += curBlocks;
ummHeapInfo.totalBlocks += curBlocks;
if (ummHeapInfo.maxFreeContiguousBlocks < curBlocks) {
ummHeapInfo.maxFreeContiguousBlocks = curBlocks;
}
}
DBGLOG_FORCE( force, "|0x%08lx|B %5d|NB %5d|PB %5d|Z %5d|NF %5d|PF %5d|\n",
(unsigned long)(&UMM_BLOCK(blockNo)),
DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)),
blockNo,
UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK,
UMM_PBLOCK(blockNo),
@ -147,7 +142,13 @@ void *umm_info( void *ptr, int force ) {
DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
DBGLOG_FORCE( force, "Usage Metric: %5d\n", umm_usage_metric());
DBGLOG_FORCE( force, "Fragmentation Metric: %5d\n", umm_fragmentation_metric());
DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
#if !defined(UMM_INLINE_METRICS)
if (ummHeapInfo.freeBlocks == ummStats.free_blocks) {
DBGLOG_FORCE( force, "heap info Free blocks and heap statistics Free blocks match.\n");
} else {
@ -156,6 +157,7 @@ void *umm_info( void *ptr, int force ) {
ummStats.free_blocks );
}
DBGLOG_FORCE( force, "+--------------------------------------------------------------+\n" );
#endif
print_stats(force);
#endif
@ -169,17 +171,74 @@ void *umm_info( void *ptr, int force ) {
/* ------------------------------------------------------------------------ */
size_t umm_free_heap_size( void ) {
umm_info(NULL, 0);
#ifndef UMM_INLINE_METRICS
umm_info(NULL, false);
#endif
return (size_t)ummHeapInfo.freeBlocks * sizeof(umm_block);
}
//C Breaking change in upstream umm_max_block_size() was changed to
//C umm_max_free_block_size() keeping old function name for (dot) releases.
//C TODO: update at next major release.
//C size_t umm_max_free_block_size( void ) {
size_t umm_max_block_size( void ) {
umm_info(NULL, 0);
umm_info(NULL, false);
return ummHeapInfo.maxFreeContiguousBlocks * sizeof(umm_block);
}
/* ------------------------------------------------------------------------ */
/*
Without build option UMM_INLINE_METRICS, calls to umm_usage_metric() or
umm_fragmentation_metric() must to be preceeded by a call to umm_info(NULL, false)
for updated results.
*/
int umm_usage_metric( void ) {
#ifndef UMM_INLINE_METRICS
umm_info(NULL, false);
#endif
DBGLOG_DEBUG( "usedBlocks %d totalBlocks %d\n", umm_metrics.usedBlocks, ummHeapInfo.totalBlocks);
if (ummHeapInfo.freeBlocks)
return (int)((ummHeapInfo.usedBlocks * 100)/(ummHeapInfo.freeBlocks));
return -1; // no freeBlocks
}
uint32_t sqrt32 (uint32_t n);
int umm_fragmentation_metric( void ) {
#ifndef UMM_INLINE_METRICS
umm_info(NULL, false);
#endif
DBGLOG_DEBUG( "freeBlocks %d freeBlocksSquared %d\n", umm_metrics.freeBlocks, ummHeapInfo.freeBlocksSquared);
if (0 == ummHeapInfo.freeBlocks) {
return 0;
} else {
//upstream version: return (100 - (((uint32_t)(sqrtf(ummHeapInfo.freeBlocksSquared)) * 100)/(ummHeapInfo.freeBlocks)));
return (100 - (((uint32_t)(sqrt32(ummHeapInfo.freeBlocksSquared)) * 100)/(ummHeapInfo.freeBlocks)));
}
}
#ifdef UMM_INLINE_METRICS
static void umm_fragmentation_metric_init( void ) {
ummHeapInfo.freeBlocks = UMM_NUMBLOCKS - 2;
ummHeapInfo.freeBlocksSquared = ummHeapInfo.freeBlocks * ummHeapInfo.freeBlocks;
}
static void umm_fragmentation_metric_add( uint16_t c ) {
uint16_t blocks = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) - c;
DBGLOG_DEBUG( "Add block %d size %d to free metric\n", c, blocks);
ummHeapInfo.freeBlocks += blocks;
ummHeapInfo.freeBlocksSquared += (blocks * blocks);
}
static void umm_fragmentation_metric_remove( uint16_t c ) {
uint16_t blocks = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) - c;
DBGLOG_DEBUG( "Remove block %d size %d from free metric\n", c, blocks);
ummHeapInfo.freeBlocks -= blocks;
ummHeapInfo.freeBlocksSquared -= (blocks * blocks);
}
#endif // UMM_INLINE_METRICS
/* ------------------------------------------------------------------------ */
#endif
#endif // defined(BUILD_UMM_MALLOC_C)

View File

@ -1,6 +1,10 @@
#if defined(BUILD_UMM_MALLOC_C)
/* integrity check (UMM_INTEGRITY_CHECK) {{{ */
#if defined(UMM_INTEGRITY_CHECK)
#include <stdint.h>
#include <stdbool.h>
/*
* Perform integrity check of the whole heap data. Returns 1 in case of
* success, 0 otherwise.
@ -23,11 +27,11 @@
* This way, we ensure that the free flag is in sync with the free pointers
* chain.
*/
int umm_integrity_check(void) {
bool umm_integrity_check(void) {
UMM_CRITICAL_DECL(id_integrity);
int ok = 1;
unsigned short int prev;
unsigned short int cur;
bool ok = true;
uint16_t prev;
uint16_t cur;
if (umm_heap == NULL) {
umm_init();
@ -42,9 +46,9 @@ int umm_integrity_check(void) {
/* Check that next free block number is valid */
if (cur >= UMM_NUMBLOCKS) {
DBGLOG_FUNCTION("heap integrity broken: too large next free num: %d "
"(in block %d, addr 0x%lx)\n", cur, prev,
(unsigned long)&UMM_NBLOCK(prev));
ok = 0;
"(in block %d, addr 0x%08x)\n", cur, prev,
DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
ok = false;
goto clean;
}
if (cur == 0) {
@ -57,7 +61,7 @@ int umm_integrity_check(void) {
DBGLOG_FUNCTION("heap integrity broken: free links don't match: "
"%d -> %d, but %d -> %d\n",
prev, cur, cur, UMM_PFREE(cur));
ok = 0;
ok = false;
goto clean;
}
@ -74,9 +78,9 @@ int umm_integrity_check(void) {
/* Check that next block number is valid */
if (cur >= UMM_NUMBLOCKS) {
DBGLOG_FUNCTION("heap integrity broken: too large next block num: %d "
"(in block %d, addr 0x%lx)\n", cur, prev,
(unsigned long)&UMM_NBLOCK(prev));
ok = 0;
"(in block %d, addr 0x%08x)\n", cur, prev,
DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
ok = false;
goto clean;
}
if (cur == 0) {
@ -88,21 +92,20 @@ int umm_integrity_check(void) {
if ((UMM_NBLOCK(cur) & UMM_FREELIST_MASK)
!= (UMM_PBLOCK(cur) & UMM_FREELIST_MASK))
{
DBGLOG_FUNCTION("heap integrity broken: mask wrong at addr 0x%lx: n=0x%x, p=0x%x\n",
(unsigned long)&UMM_NBLOCK(cur),
DBGLOG_FUNCTION("heap integrity broken: mask wrong at addr 0x%08x: n=0x%x, p=0x%x\n",
DBGLOG_32_BIT_PTR(&UMM_NBLOCK(cur)),
(UMM_NBLOCK(cur) & UMM_FREELIST_MASK),
(UMM_PBLOCK(cur) & UMM_FREELIST_MASK)
);
ok = 0;
(UMM_PBLOCK(cur) & UMM_FREELIST_MASK));
ok = false;
goto clean;
}
/* make sure the block list is sequential */
if (cur <= prev ) {
DBGLOG_FUNCTION("heap integrity broken: next block %d is before prev this one "
"(in block %d, addr 0x%lx)\n", cur, prev,
(unsigned long)&UMM_NBLOCK(prev));
ok = 0;
"(in block %d, addr 0x%08x)\n", cur, prev,
DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev)));
ok = false;
goto clean;
}
@ -114,7 +117,7 @@ int umm_integrity_check(void) {
DBGLOG_FUNCTION("heap integrity broken: block links don't match: "
"%d -> %d, but %d -> %d\n",
prev, cur, cur, UMM_PBLOCK(cur));
ok = 0;
ok = false;
goto clean;
}

View File

@ -42,11 +42,11 @@ bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size)
#if defined(UMM_POISON_CHECK_LITE)
// We skip this when doing the full poison check.
static int check_poison_neighbors( unsigned short cur ) {
unsigned short int c;
static bool check_poison_neighbors( uint16_t cur ) {
uint16_t c;
if ( 0 == cur )
return 1;
return true;
c = UMM_PBLOCK(cur) & UMM_BLOCKNO_MASK;
while( c && (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) ) {
@ -57,7 +57,7 @@ static int check_poison_neighbors( unsigned short cur ) {
*/
if ( !(UMM_NBLOCK(c) & UMM_FREELIST_MASK) ) {
if ( !check_poison_block(&UMM_BLOCK(c)) )
return 0;
return false;
break;
}
@ -69,7 +69,7 @@ static int check_poison_neighbors( unsigned short cur ) {
while( (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) ) {
if ( !(UMM_NBLOCK(c) & UMM_FREELIST_MASK) ) {
if ( !check_poison_block(&UMM_BLOCK(c)) )
return 0;
return false;
break;
}
@ -77,7 +77,7 @@ static int check_poison_neighbors( unsigned short cur ) {
c = UMM_NBLOCK(c) & UMM_BLOCKNO_MASK;
}
return 1;
return true;
}
#endif
@ -85,20 +85,20 @@ static int check_poison_neighbors( unsigned short cur ) {
/* ------------------------------------------------------------------------ */
static void *get_unpoisoned_check_neighbors( void *v_ptr, const char* file, int line ) {
unsigned char *ptr = (unsigned char *)v_ptr;
static void *get_unpoisoned_check_neighbors( void *vptr, const char* file, int line ) {
uintptr_t ptr = (uintptr_t)vptr;
if (ptr != NULL) {
if (ptr != 0) {
ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE);
#if defined(UMM_POISON_CHECK_LITE)
UMM_CRITICAL_DECL(id_poison);
unsigned short int c;
uint16_t c;
bool poison = false;
/* Figure out which block we're in. Note the use of truncated division... */
c = (((char *)ptr)-(char *)(&(umm_heap[0])))/sizeof(umm_block);
c = (ptr - (uintptr_t)(&(umm_heap[0])))/sizeof(umm_block);
UMM_CRITICAL_ENTRY(id_poison);
poison = check_poison_block(&UMM_BLOCK(c)) && check_poison_neighbors(c);
@ -157,16 +157,17 @@ size_t umm_block_size( void ) {
}
#endif
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
#if (!defined(UMM_INLINE_METRICS) && defined(UMM_STATS)) || defined(UMM_STATS_FULL)
UMM_STATISTICS ummStats;
#endif
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
// Keep complete call path in IRAM
size_t umm_free_heap_size_lw( void ) {
if (umm_heap == NULL) {
umm_init();
}
return (size_t)ummStats.free_blocks * sizeof(umm_block);
return (size_t)UMM_FREE_BLOCKS * sizeof(umm_block);
}
#endif
@ -178,15 +179,17 @@ size_t umm_free_heap_size_lw( void ) {
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
size_t xPortGetFreeHeapSize(void) __attribute__ ((alias("umm_free_heap_size_lw")));
#elif defined(UMM_INFO)
#ifndef UMM_INLINE_METRICS
#warning "No ISR safe function available to implement xPortGetFreeHeapSize()"
#endif
size_t xPortGetFreeHeapSize(void) __attribute__ ((alias("umm_free_heap_size")));
#endif
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
void print_stats(int force) {
DBGLOG_FORCE( force, "umm heap statistics:\n");
DBGLOG_FORCE( force, " Free Space %5u\n", ummStats.free_blocks * sizeof(umm_block));
DBGLOG_FORCE( force, " OOM Count %5u\n", ummStats.oom_count);
DBGLOG_FORCE( force, " Raw Free Space %5u\n", UMM_FREE_BLOCKS * sizeof(umm_block));
DBGLOG_FORCE( force, " OOM Count %5u\n", UMM_OOM_COUNT);
#if defined(UMM_STATS_FULL)
DBGLOG_FORCE( force, " Low Watermark %5u\n", ummStats.free_blocks_min * sizeof(umm_block));
DBGLOG_FORCE( force, " Low Watermark ISR %5u\n", ummStats.free_blocks_isr_min * sizeof(umm_block));
@ -197,8 +200,6 @@ void print_stats(int force) {
}
#endif
int ICACHE_FLASH_ATTR umm_info_safe_printf_P(const char *fmt, ...) {
/*
To use ets_strlen() and ets_strcpy() safely with PROGMEM, flash storage,

View File

@ -15,7 +15,7 @@
#define memset ets_memset
/*
/*
* This redefines DBGLOG_FORCE defined in dbglog/dbglog.h
* Just for printing from umm_info() which is assumed to always be called from
* non-ISR. Thus SPI bus is available to handle cache-miss and reading a flash
@ -37,7 +37,7 @@
#if defined(UMM_POISON_CHECK_LITE)
static int check_poison_neighbors( unsigned short cur );
static bool check_poison_neighbors( uint16_t cur );
#endif
@ -51,5 +51,4 @@ int ICACHE_FLASH_ATTR umm_info_safe_printf_P(const char *fmt, ...) __attribute__
#define UMM_INFO_PRINTF(fmt, ...) umm_info_safe_printf_P(PSTR4(fmt), ##__VA_ARGS__)
// use PSTR4() instead of PSTR() to ensure 4-bytes alignment in Flash, whatever the default alignment of PSTR_ALIGN
#endif

View File

@ -28,6 +28,10 @@
* wrappers that use critical section protection macros
* and static core functions that assume they are
* running in a protected con text. Thanks @devyte
* R.Hempel 2020-01-07 - Add support for Fragmentation metric - See Issue 14
* R.Hempel 2020-01-12 - Use explicitly sized values from stdint.h - See Issue 15
* R.Hempel 2020-01-20 - Move metric functions back to umm_info - See Issue 29
* R.Hempel 2020-02-01 - Macro functions are uppercased - See Issue 34
* ----------------------------------------------------------------------------
*/
@ -41,17 +45,17 @@
/*
* Added for using with Arduino ESP8266 and handling renameing to umm_malloc.cpp
*/
#define BUILD_UMM_MALLOC_C
extern "C" {
#include <stdio.h>
#include <stdint.h>
#include <stddef.h>
#include <string.h>
#include "umm_malloc.h"
#include "umm_malloc_cfg.h" /* user-dependent */
#include "umm_malloc.h"
/* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */
@ -61,13 +65,25 @@ extern "C" {
#include "dbglog/dbglog.h"
//C This change is new in upstream umm_malloc.I think this would have created a
//C breaking change. Keeping the old #define method in umm_malloc_cfg.h.
//C I don't see a simple way of making it work. We would have to run code before
//C the SDK has run to set a value for uint32_t UMM_MALLOC_CFG_HEAP_SIZE.
//C On the other hand, a manual call to umm_init() before anything else has had a
//C chance to run would mean that all those calls testing to see if the heap has
//C been initialized at every umm_malloc API could be removed.
//C
//C before starting the NON OS SDK
//C extern void *UMM_MALLOC_CFG_HEAP_ADDR;
//C extern uint32_t UMM_MALLOC_CFG_HEAP_SIZE;
#include "umm_local.h" // target-dependent supplemental
/* ------------------------------------------------------------------------- */
UMM_H_ATTPACKPRE typedef struct umm_ptr_t {
unsigned short int next;
unsigned short int prev;
uint16_t next;
uint16_t prev;
} UMM_H_ATTPACKSUF umm_ptr;
@ -77,29 +93,36 @@ UMM_H_ATTPACKPRE typedef struct umm_block_t {
} header;
union {
umm_ptr free;
unsigned char data[4];
uint8_t data[4];
} body;
} UMM_H_ATTPACKSUF umm_block;
#define UMM_FREELIST_MASK (0x8000)
#define UMM_BLOCKNO_MASK (0x7FFF)
#define UMM_FREELIST_MASK ((uint16_t)(0x8000))
#define UMM_BLOCKNO_MASK ((uint16_t)(0x7FFF))
/* ------------------------------------------------------------------------- */
umm_block *umm_heap = NULL;
unsigned short int umm_numblocks = 0;
uint16_t umm_numblocks = 0;
#define UMM_NUMBLOCKS (umm_numblocks)
#define UMM_NUMBLOCKS (umm_numblocks)
#define UMM_BLOCK_LAST (UMM_NUMBLOCKS - 1)
/* ------------------------------------------------------------------------ */
/* -------------------------------------------------------------------------
* These macros evaluate to the address of the block and data respectively
*/
#define UMM_BLOCK(b) (umm_heap[b])
#define UMM_DATA(b) (UMM_BLOCK(b).body.data)
/* -------------------------------------------------------------------------
* These macros evaluate to the index of the block - NOT the address!!!
*/
#define UMM_NBLOCK(b) (UMM_BLOCK(b).header.used.next)
#define UMM_PBLOCK(b) (UMM_BLOCK(b).header.used.prev)
#define UMM_NFREE(b) (UMM_BLOCK(b).body.free.next)
#define UMM_PFREE(b) (UMM_BLOCK(b).body.free.prev)
#define UMM_DATA(b) (UMM_BLOCK(b).body.data)
/* -------------------------------------------------------------------------
* There are additional files that may be included here - normally it's
@ -116,7 +139,7 @@ unsigned short int umm_numblocks = 0;
/* ------------------------------------------------------------------------ */
static unsigned short int umm_blocks( size_t size ) {
static uint16_t umm_blocks( size_t size ) {
/*
* The calculation of the block size is not too difficult, but there are
@ -149,9 +172,9 @@ static unsigned short int umm_blocks( size_t size ) {
*
* Note that free pointers are NOT modified by this function.
*/
static void umm_split_block( unsigned short int c,
unsigned short int blocks,
unsigned short int new_freemask ) {
static void umm_split_block( uint16_t c,
uint16_t blocks,
uint16_t new_freemask ) {
UMM_NBLOCK(c+blocks) = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) | new_freemask;
UMM_PBLOCK(c+blocks) = c;
@ -162,7 +185,7 @@ static void umm_split_block( unsigned short int c,
/* ------------------------------------------------------------------------ */
static void umm_disconnect_from_free_list( unsigned short int c ) {
static void umm_disconnect_from_free_list( uint16_t c ) {
/* Disconnect this block from the FREE list */
UMM_NFREE(UMM_PFREE(c)) = UMM_NFREE(c);
@ -174,13 +197,17 @@ static void umm_disconnect_from_free_list( unsigned short int c ) {
}
/* ------------------------------------------------------------------------
* The umm_assimilate_up() function assumes that UMM_NBLOCK(c) does NOT
* have the UMM_FREELIST_MASK bit set!
* The umm_assimilate_up() function does not assume that UMM_NBLOCK(c)
* has the UMM_FREELIST_MASK bit set. It only assimilates up if the
* next block is free.
*/
static void umm_assimilate_up( unsigned short int c ) {
static void umm_assimilate_up( uint16_t c ) {
if( UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK ) {
UMM_FRAGMENTATION_METRIC_REMOVE( UMM_NBLOCK(c) );
/*
* The next block is a free block, so assimilate up and remove it from
* the free list
@ -201,14 +228,30 @@ static void umm_assimilate_up( unsigned short int c ) {
/* ------------------------------------------------------------------------
* The umm_assimilate_down() function assumes that UMM_NBLOCK(c) does NOT
* have the UMM_FREELIST_MASK bit set!
* have the UMM_FREELIST_MASK bit set. In other words, try to assimilate
* up before assimilating down.
*/
static unsigned short int umm_assimilate_down( unsigned short int c, unsigned short int freemask ) {
static uint16_t umm_assimilate_down( uint16_t c, uint16_t freemask ) {
// We are going to assimilate down to the previous block because
// it was free, so remove it from the fragmentation metric
UMM_FRAGMENTATION_METRIC_REMOVE(UMM_PBLOCK(c));
UMM_NBLOCK(UMM_PBLOCK(c)) = UMM_NBLOCK(c) | freemask;
UMM_PBLOCK(UMM_NBLOCK(c)) = UMM_PBLOCK(c);
if (freemask) {
// We are going to free the entire assimilated block
// so add it to the fragmentation metric. A good
// compiler will optimize away the empty if statement
// when UMM_INFO is not defined, so don't worry about
// guarding it.
UMM_FRAGMENTATION_METRIC_ADD(UMM_PBLOCK(c));
}
return( UMM_PBLOCK(c) );
}
@ -221,62 +264,54 @@ void umm_init( void ) {
memset(umm_heap, 0x00, UMM_MALLOC_CFG_HEAP_SIZE);
/* setup initial blank heap structure */
{
/* index of the 0th `umm_block` */
const unsigned short int block_0th = 0;
/* index of the 1st `umm_block` */
const unsigned short int block_1th = 1;
/* index of the latest `umm_block` */
const unsigned short int block_last = UMM_NUMBLOCKS - 1;
UMM_FRAGMENTATION_METRIC_INIT();
/* init ummStats.free_blocks */
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
#if defined(UMM_STATS_FULL)
ummStats.free_blocks_min =
ummStats.free_blocks_isr_min =
ummStats.free_blocks_isr_min = UMM_NUMBLOCKS - 2;
#endif
#ifndef UMM_INLINE_METRICS
ummStats.free_blocks = UMM_NUMBLOCKS - 2;
#endif
ummStats.free_blocks = block_last;
#endif
/* setup the 0th `umm_block`, which just points to the 1st */
UMM_NBLOCK(block_0th) = block_1th;
UMM_NFREE(block_0th) = block_1th;
UMM_PFREE(block_0th) = block_1th;
/* Set up umm_block[0], which just points to umm_block[1] */
UMM_NBLOCK(0) = 1;
UMM_NFREE(0) = 1;
UMM_PFREE(0) = 1;
/*
* Now, we need to set the whole heap space as a huge free block. We should
* not touch the 0th `umm_block`, since it's special: the 0th `umm_block`
* is the head of the free block list. It's a part of the heap invariant.
* not touch umm_block[0], since it's special: umm_block[0] is the head of
* the free block list. It's a part of the heap invariant.
*
* See the detailed explanation at the beginning of the file.
*/
/*
* 1th `umm_block` has pointers:
*
* - next `umm_block`: the latest one
* - prev `umm_block`: the 0th
* umm_block[1] has pointers:
*
* - next `umm_block`: the last one umm_block[n]
* - prev `umm_block`: umm_block[0]
*
* Plus, it's a free `umm_block`, so we need to apply `UMM_FREELIST_MASK`
*
* And it's the last free block, so the next free block is 0.
* And it's the last free block, so the next free block is 0 which marks
* the end of the list. The previous block and free block pointer are 0
* too, there is no need to initialize these values due to the init code
* that memsets the entire umm_ space to 0.
*/
UMM_NBLOCK(block_1th) = block_last | UMM_FREELIST_MASK;
UMM_NFREE(block_1th) = 0;
UMM_PBLOCK(block_1th) = block_0th;
UMM_PFREE(block_1th) = block_0th;
UMM_NBLOCK(1) = UMM_BLOCK_LAST | UMM_FREELIST_MASK;
/*
* latest `umm_block` has pointers:
* Last umm_block[n] has the next block index at 0, meaning it's
* the end of the list, and the previous block is umm_block[1].
*
* - next `umm_block`: 0 (meaning, there are no more `umm_blocks`)
* - prev `umm_block`: the 1st
*
* It's not a free block, so we don't touch NFREE / PFREE at all.
* The last block is a special block and can never be part of the
* free list, so its pointers are left at 0 too.
*/
UMM_NBLOCK(block_last) = 0;
UMM_PBLOCK(block_last) = block_1th;
}
UMM_PBLOCK(UMM_BLOCK_LAST) = 1;
}
/* ------------------------------------------------------------------------
@ -286,7 +321,7 @@ void umm_init( void ) {
static void umm_free_core( void *ptr ) {
unsigned short int c;
uint16_t c;
STATS__FREE_REQUEST(id_free);
/*
@ -300,7 +335,7 @@ static void umm_free_core( void *ptr ) {
/* Figure out which block we're in. Note the use of truncated division... */
c = (((char *)ptr)-(char *)(&(umm_heap[0])))/sizeof(umm_block);
c = (((uintptr_t)ptr)-(uintptr_t)(&(umm_heap[0])))/sizeof(umm_block);
DBGLOG_DEBUG( "Freeing block %6d\n", c );
@ -315,7 +350,7 @@ static void umm_free_core( void *ptr ) {
if( UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK ) {
DBGLOG_DEBUG( "Assimilate down to next block, which is FREE\n" );
DBGLOG_DEBUG( "Assimilate down to previous block, which is FREE\n" );
c = umm_assimilate_down(c, UMM_FREELIST_MASK);
} else {
@ -323,6 +358,7 @@ static void umm_free_core( void *ptr ) {
* The previous block is not a free block, so add this one to the head
* of the free list
*/
UMM_FRAGMENTATION_METRIC_ADD(c);
DBGLOG_DEBUG( "Just add to head of free list\n" );
@ -368,13 +404,13 @@ void umm_free( void *ptr ) {
*/
static void *umm_malloc_core( size_t size ) {
unsigned short int blocks;
unsigned short int blockSize = 0;
uint16_t blocks;
uint16_t blockSize = 0;
unsigned short int bestSize;
unsigned short int bestBlock;
uint16_t bestSize;
uint16_t bestBlock;
unsigned short int cf;
uint16_t cf;
STATS__ALLOC_REQUEST(id_malloc, size);
@ -422,6 +458,9 @@ static void *umm_malloc_core( size_t size ) {
POISON_CHECK_NEIGHBORS(cf);
if( UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK && blockSize >= blocks ) {
UMM_FRAGMENTATION_METRIC_REMOVE(cf);
/*
* This is an existing block in the memory heap, we just need to split off
* what we need, unlink it from the free list and mark it as in use, and
@ -436,8 +475,8 @@ static void *umm_malloc_core( size_t size ) {
/* Disconnect this block from the FREE list */
umm_disconnect_from_free_list( cf );
} else {
/* It's not an exact fit and we need to split off a block. */
DBGLOG_DEBUG( "Allocating %6d blocks starting at %6d - existing\n", blocks, cf );
@ -447,6 +486,8 @@ static void *umm_malloc_core( size_t size ) {
*/
umm_split_block( cf, blocks, UMM_FREELIST_MASK /*new block is free*/ );
UMM_FRAGMENTATION_METRIC_ADD(UMM_NBLOCK(cf));
/*
* `umm_split_block()` does not update the free pointers (it affects
* only free flags), but effectively we've just moved beginning of the
@ -518,12 +559,12 @@ void *umm_malloc( size_t size ) {
void *umm_realloc( void *ptr, size_t size ) {
UMM_CRITICAL_DECL(id_realloc);
unsigned short int blocks;
unsigned short int blockSize;
unsigned short int prevBlockSize = 0;
unsigned short int nextBlockSize = 0;
uint16_t blocks;
uint16_t blockSize;
uint16_t prevBlockSize = 0;
uint16_t nextBlockSize = 0;
unsigned short int c;
uint16_t c;
size_t curSize;
@ -551,7 +592,6 @@ void *umm_realloc( void *ptr, size_t size ) {
* we should operate the same as free.
*/
if( 0 == size ) {
DBGLOG_DEBUG( "realloc to 0 size, just free the block\n" );
STATS__ZERO_ALLOC_REQUEST(id_realloc, size);
@ -576,7 +616,7 @@ void *umm_realloc( void *ptr, size_t size ) {
/* Figure out which block we're in. Note the use of truncated division... */
c = (((char *)ptr)-(char *)(&(umm_heap[0])))/sizeof(umm_block);
c = (((uintptr_t)ptr)-(uintptr_t)(&(umm_heap[0])))/sizeof(umm_block);
/* Figure out how big this block is ... the free bit is not set :-) */
@ -607,6 +647,9 @@ void *umm_realloc( void *ptr, size_t size ) {
DBGLOG_DEBUG( "realloc blocks %d blockSize %d nextBlockSize %d prevBlockSize %d\n", blocks, blockSize, nextBlockSize, prevBlockSize );
//C This has changed need to review and see if UMM_REALLOC_MINIMIZE_COPY really
//C is that any more. or is it equivalent or close enough to my defrag
//C - mjh
#if defined(UMM_REALLOC_MINIMIZE_COPY)
/*
* Ok, now that we're here we know how many blocks we want and the current
@ -616,29 +659,53 @@ void *umm_realloc( void *ptr, size_t size ) {
* 1. If the new block is the same size or smaller than the current block do
* nothing.
* 2. If the next block is free and adding it to the current block gives us
* enough memory, assimilate the next block.
* 3. If the prev block is free and adding it to the current block gives us
* EXACTLY enough memory, assimilate the next block. This avoids unwanted
* fragmentation of free memory.
*
* The following cases may be better handled with memory copies to reduce
* fragmentation
*
* 3. If the previous block is NOT free and the next block is free and
* adding it to the current block gives us enough memory, assimilate
* the next block. This may introduce a bit of fragmentation.
* 4. If the prev block is free and adding it to the current block gives us
* enough memory, remove the previous block from the free list, assimilate
* it, copy to the new block.
* 4. If the prev and next blocks are free and adding them to the current
* 5. If the prev and next blocks are free and adding them to the current
* block gives us enough memory, assimilate the next block, remove the
* previous block from the free list, assimilate it, copy to the new block.
* 5. Otherwise try to allocate an entirely new block of memory. If the
* 6. Otherwise try to allocate an entirely new block of memory. If the
* allocation works free the old block and return the new pointer. If
* the allocation fails, return NULL and leave the old block intact.
*
* TODO: Add some conditional code to optimise for less fragmentation
* by simply allocating new memory if we need to copy anyways.
*
* All that's left to do is decide if the fit was exact or not. If the fit
* was not exact, then split the memory block so that we use only the requested
* number of blocks and add what's left to the free list.
*/
// Case 1 - block is same size or smaller
if (blockSize >= blocks) {
DBGLOG_DEBUG( "realloc the same or smaller size block - %i, do nothing\n", blocks );
/* This space intentionally left blank */
} else if ((blockSize + nextBlockSize) >= blocks) {
// Case 2 - block + next block fits EXACTLY
} else if ((blockSize + nextBlockSize) == blocks) {
DBGLOG_DEBUG( "exact realloc using next block - %i\n", blocks );
umm_assimilate_up( c );
STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
blockSize += nextBlockSize;
// Case 3 - prev block NOT free and block + next block fits
} else if ((0 == prevBlockSize) && (blockSize + nextBlockSize) >= blocks) {
DBGLOG_DEBUG( "realloc using next block - %i\n", blocks );
umm_assimilate_up( c );
STATS__FREE_BLOCKS_UPDATE( - nextBlockSize );
blockSize += nextBlockSize;
// Case 4 - prev block + block fits
} else if ((prevBlockSize + blockSize) >= blocks) {
DBGLOG_DEBUG( "realloc using prev block - %i\n", blocks );
umm_disconnect_from_free_list( UMM_PBLOCK(c) );
@ -650,6 +717,7 @@ void *umm_realloc( void *ptr, size_t size ) {
memmove( (void *)&UMM_DATA(c), ptr, curSize );
ptr = (void *)&UMM_DATA(c);
UMM_CRITICAL_RESUME(id_realloc);
// Case 5 - prev block + block + next block fits
} else if ((prevBlockSize + blockSize + nextBlockSize) >= blocks) {
DBGLOG_DEBUG( "realloc using prev and next block - %d\n", blocks );
umm_assimilate_up( c );
@ -670,6 +738,8 @@ void *umm_realloc( void *ptr, size_t size ) {
memmove( (void *)&UMM_DATA(c), ptr, curSize );
ptr = (void *)&UMM_DATA(c);
UMM_CRITICAL_RESUME(id_realloc);
// Case 6 - default is we need to realloc a new block
} else {
DBGLOG_DEBUG( "realloc a completely new block %i\n", blocks );
void *oldptr = ptr;

View File

@ -8,25 +8,27 @@
#ifndef UMM_MALLOC_H
#define UMM_MALLOC_H
/* ------------------------------------------------------------------------ */
#include <stdint.h>
//C This include is not in upstream neither are the #ifdef __cplusplus
//C This include is not in upstream
#include "umm_malloc_cfg.h" /* user-dependent */
#ifdef __cplusplus
extern "C" {
#endif
void umm_init( void );
void *umm_malloc( size_t size );
void *umm_calloc( size_t num, size_t size );
void *umm_realloc( void *ptr, size_t size );
void umm_free( void *ptr );
/* ------------------------------------------------------------------------ */
extern void umm_init( void );
extern void *umm_malloc( size_t size );
extern void *umm_calloc( size_t num, size_t size );
extern void *umm_realloc( void *ptr, size_t size );
extern void umm_free( void *ptr );
/* ------------------------------------------------------------------------ */
#ifdef __cplusplus
}
#endif
/* ------------------------------------------------------------------------ */
#endif /* UMM_MALLOC_H */

View File

@ -3,11 +3,20 @@
*
* Changes specific to a target platform go here.
*
* This comment section changed to below in the upstream version, keeping old method for now.
*
* Configuration for umm_malloc - DO NOT EDIT THIS FILE BY HAND!
*
* Refer to the notes below for how to configure the build at compile time
* using -D to define non-default values
*/
#ifndef _UMM_MALLOC_CFG_H
#define _UMM_MALLOC_CFG_H
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <debug.h>
#include <pgmspace.h>
#include <esp8266_undocumented.h>
@ -25,24 +34,56 @@ extern "C" {
/*
* There are a number of defines you can set at compile time that affect how
* the memory allocator will operate.
* You can set them in your config file umm_malloc_cfg.h.
* In GNU C, you also can set these compile time defines like this:
*
* -D UMM_TEST_MAIN
* Unless otherwise noted, the default state of these values is #undef-ined!
*
* If you set them via the -D option on the command line (preferred method)
* then this file handles all the configuration automagically and warns if
* there is an incompatible configuration.
*
* UMM_TEST_BUILD
*
* Set this if you want to compile in the test suite
*
* -D UMM_BEST_FIT (defualt)
* UMM_BEST_FIT (default)
*
* Set this if you want to use a best-fit algorithm for allocating new
* blocks
* Set this if you want to use a best-fit algorithm for allocating new blocks.
* On by default, turned off by UMM_FIRST_FIT
*
* -D UMM_FIRST_FIT
* UMM_FIRST_FIT
*
* Set this if you want to use a first-fit algorithm for allocating new
* blocks
* Set this if you want to use a first-fit algorithm for allocating new blocks.
* Faster than UMM_BEST_FIT but can result in higher fragmentation.
*
* -D UMM_DBG_LOG_LEVEL=n
* UMM_INFO
*
* Enables a dump of the heap contents and a function to return the total
* heap size that is unallocated - note this is not the same as the largest
* unallocated block on the heap!
*
* Set if you want the ability to calculate metrics on demand
*
* UMM_INLINE_METRICS
*
* Set this if you want to have access to a minimal set of heap metrics that
* can be used to gauge heap health.
* Setting this at compile time will automatically set UMM_INFO.
* Note that enabling this define will add a slight runtime penalty.
*
* UMM_INTEGRITY_CHECK
*
* Set if you want to be able to verify that the heap is semantically correct
* before or after any heap operation - all of the block indexes in the heap
* make sense.
* Slows execution dramatically but catches errors really quickly.
*
* UMM_POISON_CHECK
*
* Set if you want to be able to leave a poison buffer around each allocation.
* Note this uses an extra 8 bytes per allocation, but you get the benefit of
* being able to detect if your program is writing past an allocated buffer.
*
* UMM_DBG_LOG_LEVEL=n
*
* Set n to a value from 0 to 6 depending on how verbose you want the debug
* log to be
@ -56,11 +97,36 @@ extern "C" {
* ----------------------------------------------------------------------------
*/
#ifdef TEST_BUILD
#define UMM_BEST_FIT
#define UMM_INFO
// #define UMM_INLINE_METRICS
#define UMM_STATS
/*
* To support API call, system_show_malloc(), -DUMM_INFO is required.
*
* For the ESP8266 we need an ISR safe function to call for implementing
* xPortGetFreeHeapSize(). We can get this with one of these options:
* 1) -DUMM_STATS or -DUMM_STATS_FULL
* 2) -DUMM_INLINE_METRICS (and implicitly includes -DUMM_INFO)
*
* If frequent calls are made to ESP.getHeapFragmentation(),
* -DUMM_INLINE_METRICS would reduce long periods of interrupts disabled caused
* by frequent calls to `umm_info()`. Instead, the computations get distributed
* across each malloc, realloc, and free. This appears to require an additional
* 116 bytes of IRAM vs using `UMM_STATS` with `UMM_INFO`.
*
* When both UMM_STATS and UMM_INLINE_METRICS are defined, macros and structures
* have been optimized to reduce duplications.
*
*/
#ifdef UMM_TEST_BUILD
extern char test_umm_heap[];
#endif
#ifdef TEST_BUILD
#ifdef UMM_TEST_BUILD
/* Start addresses and the size of the heap */
#define UMM_MALLOC_CFG_HEAP_ADDR (test_umm_heap)
#define UMM_MALLOC_CFG_HEAP_SIZE 0x10000
@ -76,8 +142,34 @@ extern char _heap_start[];
#define UMM_H_ATTPACKPRE
#define UMM_H_ATTPACKSUF __attribute__((__packed__))
#define UMM_BEST_FIT
#undef UMM_FIRST_FIT
/* -------------------------------------------------------------------------- */
#ifdef UMM_BEST_FIT
#ifdef UMM_FIRST_FIT
#error Both UMM_BEST_FIT and UMM_FIRST_FIT are defined - pick one!
#endif
#else /* UMM_BEST_FIT is not defined */
#ifndef UMM_FIRST_FIT
#define UMM_BEST_FIT
#endif
#endif
/* -------------------------------------------------------------------------- */
#ifdef UMM_INLINE_METRICS
#define UMM_FRAGMENTATION_METRIC_INIT() umm_fragmentation_metric_init()
#define UMM_FRAGMENTATION_METRIC_ADD(c) umm_fragmentation_metric_add(c)
#define UMM_FRAGMENTATION_METRIC_REMOVE(c) umm_fragmentation_metric_remove(c)
#ifndef UMM_INFO
#define UMM_INFO
#endif
#else
#define UMM_FRAGMENTATION_METRIC_INIT()
#define UMM_FRAGMENTATION_METRIC_ADD(c)
#define UMM_FRAGMENTATION_METRIC_REMOVE(c)
#endif // UMM_INLINE_METRICS
/* -------------------------------------------------------------------------- */
/*
* -D UMM_INFO :
@ -87,30 +179,45 @@ extern char _heap_start[];
* unallocated block on the heap!
*/
#define UMM_INFO
// #define UMM_INFO
#ifdef UMM_INFO
typedef struct UMM_HEAP_INFO_t {
unsigned short int totalEntries;
unsigned short int usedEntries;
unsigned short int freeEntries;
unsigned int totalEntries;
unsigned int usedEntries;
unsigned int freeEntries;
unsigned short int totalBlocks;
unsigned short int usedBlocks;
unsigned short int freeBlocks;
unsigned short int maxFreeContiguousBlocks;
unsigned int freeSize2;
unsigned int totalBlocks;
unsigned int usedBlocks;
unsigned int freeBlocks;
unsigned int freeBlocksSquared;
#ifdef UMM_INLINE_METRICS
size_t oom_count;
#define UMM_OOM_COUNT ummHeapInfo.oom_count
#define UMM_FREE_BLOCKS ummHeapInfo.freeBlocks
#endif
unsigned int maxFreeContiguousBlocks;
}
UMM_HEAP_INFO;
extern UMM_HEAP_INFO ummHeapInfo;
void ICACHE_FLASH_ATTR *umm_info( void *ptr, int force );
size_t ICACHE_FLASH_ATTR umm_free_heap_size( void );
size_t ICACHE_FLASH_ATTR umm_max_block_size( void );
extern ICACHE_FLASH_ATTR void *umm_info( void *ptr, bool force );
#ifdef UMM_INLINE_METRICS
extern size_t umm_free_heap_size( void );
#else
extern ICACHE_FLASH_ATTR size_t umm_free_heap_size( void );
#endif
// umm_max_block_size changed to umm_max_free_block_size in upstream.
extern ICACHE_FLASH_ATTR size_t umm_max_block_size( void );
extern ICACHE_FLASH_ATTR int umm_usage_metric( void );
extern ICACHE_FLASH_ATTR int umm_fragmentation_metric( void );
#else
#define umm_info(p,b)
#define umm_free_heap_size() (0)
#define umm_max_block_size() (0)
#define umm_fragmentation_metric() (0)
#define umm_usage_metric() (0)
#endif
/*
@ -138,12 +245,7 @@ extern char _heap_start[];
#define UMM_STATS_FULL
*/
/*
* For the ESP8266 we want at lest UMM_STATS built, so we have an ISR safe
* function to call for implementing xPortGetFreeHeapSize(), because umm_info()
* is in flash.
*/
#if !defined(UMM_STATS) && !defined(UMM_STATS_FULL)
#if !defined(UMM_STATS) && !defined(UMM_STATS_FULL) && !defined(UMM_INLINE_METRICS)
#define UMM_STATS
#endif
@ -154,11 +256,18 @@ extern char _heap_start[];
#if defined(UMM_STATS) || defined(UMM_STATS_FULL)
typedef struct UMM_STATISTICS_t {
unsigned short int free_blocks;
#ifndef UMM_INLINE_METRICS
// If we are doing UMM_INLINE_METRICS, we can move oom_count and free_blocks to
// umm_info's structure and save a little DRAM and IRAM.
// Otherwise it is defined here.
size_t free_blocks;
size_t oom_count;
#define UMM_OOM_COUNT ummStats.oom_count
#define UMM_FREE_BLOCKS ummStats.free_blocks
#endif
#ifdef UMM_STATS_FULL
unsigned short int free_blocks_min;
unsigned short int free_blocks_isr_min;
size_t free_blocks_min;
size_t free_blocks_isr_min;
size_t alloc_max_size;
size_t last_alloc_size;
size_t id_malloc_count;
@ -172,13 +281,18 @@ typedef struct UMM_STATISTICS_t {
UMM_STATISTICS;
extern UMM_STATISTICS ummStats;
#ifdef UMM_INLINE_METRICS
#define STATS__FREE_BLOCKS_UPDATE(s) (void)(s)
#else
#define STATS__FREE_BLOCKS_UPDATE(s) ummStats.free_blocks += (s)
#define STATS__OOM_UPDATE() ummStats.oom_count += 1
#endif
size_t umm_free_heap_size_lw( void );
#define STATS__OOM_UPDATE() UMM_OOM_COUNT += 1
extern size_t umm_free_heap_size_lw( void );
static inline size_t ICACHE_FLASH_ATTR umm_get_oom_count( void ) {
return ummStats.oom_count;
return UMM_OOM_COUNT;
}
#else // not UMM_STATS or UMM_STATS_FULL
@ -193,14 +307,14 @@ size_t ICACHE_FLASH_ATTR umm_block_size( void );
#ifdef UMM_STATS_FULL
#define STATS__FREE_BLOCKS_MIN() \
do { \
if (ummStats.free_blocks < ummStats.free_blocks_min) \
ummStats.free_blocks_min = ummStats.free_blocks; \
if (UMM_FREE_BLOCKS < ummStats.free_blocks_min) \
ummStats.free_blocks_min = UMM_FREE_BLOCKS; \
} while(false)
#define STATS__FREE_BLOCKS_ISR_MIN() \
do { \
if (ummStats.free_blocks < ummStats.free_blocks_isr_min) \
ummStats.free_blocks_isr_min = ummStats.free_blocks; \
if (UMM_FREE_BLOCKS < ummStats.free_blocks_isr_min) \
ummStats.free_blocks_isr_min = UMM_FREE_BLOCKS; \
} while(false)
#define STATS__ALLOC_REQUEST(tag, s) \
@ -231,7 +345,7 @@ static inline size_t ICACHE_FLASH_ATTR umm_free_heap_size_lw_min( void ) {
}
static inline size_t ICACHE_FLASH_ATTR umm_free_heap_size_min_reset( void ) {
ummStats.free_blocks_min = ummStats.free_blocks;
ummStats.free_blocks_min = UMM_FREE_BLOCKS;
return (size_t)ummStats.free_blocks_min * umm_block_size();
}
@ -349,6 +463,8 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
xt_wsr_ps(*saved_ps);
}
#endif
//////////////////////////////////////////////////////////////////////////////////////
/*
* A couple of macros to make it easier to protect the memory allocator
@ -360,7 +476,7 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
* called from within umm_malloc()
*/
#ifdef TEST_BUILD
#ifdef UMM_TEST_BUILD
extern int umm_critical_depth;
extern int umm_max_critical_depth;
#define UMM_CRITICAL_ENTRY() {\
@ -458,12 +574,12 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
*/
#ifdef UMM_INTEGRITY_CHECK
int umm_integrity_check( void );
extern bool umm_integrity_check( void );
# define INTEGRITY_CHECK() umm_integrity_check()
extern void umm_corruption(void);
# define UMM_HEAP_CORRUPTION_CB() DBGLOG_FUNCTION( "Heap Corruption!" )
#else
# define INTEGRITY_CHECK() 0
# define INTEGRITY_CHECK() (1)
#endif
/////////////////////////////////////////////////
@ -482,11 +598,11 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
* Customizations:
*
* UMM_POISON_SIZE_BEFORE:
* Number of poison bytes before each block, e.g. 2
* Number of poison bytes before each block, e.g. 4
* UMM_POISON_SIZE_AFTER:
* Number of poison bytes after each block e.g. 2
* Number of poison bytes after each block e.g. 4
* UMM_POISONED_BLOCK_LEN_TYPE
* Type of the exact buffer length, e.g. `short`
* Type of the exact buffer length, e.g. `uint16_t`
*
* NOTE: each allocated buffer is aligned by 4 bytes. But when poisoning is
* enabled, actual pointer returned to user is shifted by
@ -528,16 +644,16 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
#endif
#endif
#define UMM_POISON_SIZE_BEFORE 4
#define UMM_POISON_SIZE_AFTER 4
#define UMM_POISON_SIZE_BEFORE (4)
#define UMM_POISON_SIZE_AFTER (4)
#define UMM_POISONED_BLOCK_LEN_TYPE uint32_t
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
void *umm_poison_malloc( size_t size );
void *umm_poison_calloc( size_t num, size_t size );
void *umm_poison_realloc( void *ptr, size_t size );
void umm_poison_free( void *ptr );
int umm_poison_check( void );
extern void *umm_poison_malloc( size_t size );
extern void *umm_poison_calloc( size_t num, size_t size );
extern void *umm_poison_realloc( void *ptr, size_t size );
extern void umm_poison_free( void *ptr );
extern bool umm_poison_check( void );
// Local Additions to better report location in code of the caller.
void *umm_poison_realloc_fl( void *ptr, size_t size, const char* file, int line );
void umm_poison_free_fl( void *ptr, const char* file, int line );
@ -562,6 +678,23 @@ static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
# define POISON_CHECK_NEIGHBORS(c) do{}while(false)
#endif
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
/*
* Overhead adjustments needed for free_blocks to express the number of bytes
* that can actually be allocated.
*/
#define UMM_OVERHEAD_ADJUST ( \
umm_block_size()/2 + \
UMM_POISON_SIZE_BEFORE + \
UMM_POISON_SIZE_AFTER + \
sizeof(UMM_POISONED_BLOCK_LEN_TYPE))
#else
#define UMM_OVERHEAD_ADJUST (umm_block_size()/2)
#endif
/////////////////////////////////////////////////
#undef DBGLOG_FUNCTION
#undef DBGLOG_FUNCTION_P

View File

@ -4,6 +4,10 @@
#if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE)
#define POISON_BYTE (0xa5)
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
/*
* Yields a size of the poison for the block of size `s`.
* If `s` is 0, returns 0.
@ -18,7 +22,8 @@ static size_t poison_size(size_t s) {
/*
* Print memory contents starting from given `ptr`
*/
static void dump_mem ( const unsigned char *ptr, size_t len ) {
static void dump_mem ( const void *vptr, size_t len ) {
const uint8_t *ptr = (const uint8_t *)vptr;
while (len--) {
DBGLOG_ERROR(" 0x%.2x", (unsigned int)(*ptr++));
}
@ -27,7 +32,7 @@ static void dump_mem ( const unsigned char *ptr, size_t len ) {
/*
* Put poison data at given `ptr` and `poison_size`
*/
static void put_poison( unsigned char *ptr, size_t poison_size ) {
static void put_poison( void *ptr, size_t poison_size ) {
memset(ptr, POISON_BYTE, poison_size);
}
@ -38,14 +43,14 @@ static void put_poison( unsigned char *ptr, size_t poison_size ) {
* If poison is there, returns 1.
* Otherwise, prints the appropriate message, and returns 0.
*/
static int check_poison( const unsigned char *ptr, size_t poison_size,
static bool check_poison( const void *ptr, size_t poison_size,
const char *where) {
size_t i;
int ok = 1;
bool ok = true;
for (i = 0; i < poison_size; i++) {
if (ptr[i] != POISON_BYTE) {
ok = 0;
if (((const uint8_t *)ptr)[i] != POISON_BYTE) {
ok = false;
break;
}
}
@ -63,8 +68,8 @@ static int check_poison( const unsigned char *ptr, size_t poison_size,
* Check if a block is properly poisoned. Must be called only for non-free
* blocks.
*/
static int check_poison_block( umm_block *pblock ) {
int ok = 1;
static bool check_poison_block( umm_block *pblock ) {
int ok = true;
if (pblock->header.used.next & UMM_FREELIST_MASK) {
DBGLOG_ERROR( "check_poison_block is called for free block 0x%lx\n", (unsigned long)pblock);
@ -75,13 +80,13 @@ static int check_poison_block( umm_block *pblock ) {
pc_cur = pc + sizeof(UMM_POISONED_BLOCK_LEN_TYPE);
if (!check_poison(pc_cur, UMM_POISON_SIZE_BEFORE, "before")) {
ok = 0;
ok = false;
goto clean;
}
pc_cur = pc + *((UMM_POISONED_BLOCK_LEN_TYPE *)pc) - UMM_POISON_SIZE_AFTER;
if (!check_poison(pc_cur, UMM_POISON_SIZE_AFTER, "after")) {
ok = 0;
ok = false;
goto clean;
}
}
@ -97,8 +102,8 @@ clean:
*
* `size_w_poison` is a size of the whole block, including a poison.
*/
static void *get_poisoned( void *v_ptr, size_t size_w_poison ) {
unsigned char *ptr = (unsigned char *)v_ptr;
static void *get_poisoned( void *vptr, size_t size_w_poison ) {
unsigned char *ptr = (unsigned char *)vptr;
if (size_w_poison != 0 && ptr != NULL) {
@ -124,16 +129,16 @@ static void *get_poisoned( void *v_ptr, size_t size_w_poison ) {
*
* Returns unpoisoned pointer, i.e. actual pointer to the allocated memory.
*/
static void *get_unpoisoned( void *v_ptr ) {
unsigned char *ptr = (unsigned char *)v_ptr;
static void *get_unpoisoned( void *vptr ) {
uintptr_t ptr = (uintptr_t)vptr;
if (ptr != NULL) {
unsigned short int c;
if (ptr != 0) {
uint16_t c;
ptr -= (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE);
/* Figure out which block we're in. Note the use of truncated division... */
c = (((char *)ptr)-(char *)(&(umm_heap[0])))/sizeof(umm_block);
c = (ptr - (uintptr_t)(&(umm_heap[0])))/sizeof(umm_block);
check_poison_block(&UMM_BLOCK(c));
}
@ -204,10 +209,10 @@ void umm_poison_free( void *ptr ) {
* blocks.
*/
int umm_poison_check(void) {
bool umm_poison_check(void) {
UMM_CRITICAL_DECL(id_poison);
int ok = 1;
unsigned short int cur;
bool ok = true;
uint16_t cur;
if (umm_heap == NULL) {
umm_init();

View File

@ -3,6 +3,7 @@
// released to public domain
#include <ESP8266WiFi.h>
#include <umm_malloc/umm_malloc.h>
void stats(const char* what) {
// we could use getFreeHeap() getMaxFreeBlockSize() and getHeapFragmentation()
@ -21,9 +22,53 @@ void tryit(int blocksize) {
void** p;
int blocks;
// heap-used ~= blocks*sizeof(void*) + blocks*blocksize
blocks = ((ESP.getMaxFreeBlockSize() / (blocksize + sizeof(void*))) + 3) & ~3; // rounded up, multiple of 4
/*
heap-used ~= blocks*sizeof(void*) + blocks*blocksize
This calculation gets deep into how umm_malloc divides up memory and
understanding it is not important for this example. However, some may find
the details useful when creating memory restricted test cases and possibly
other manufactured failures.
Internally the umm_malloc works with memory in 8-byte increments and aligns
to 8 bytes. The creation of an allocation adds about 4-bytes of overhead
plus alignment to the allocation size and more for debug builds. This
complicates the calculation of `blocks` a little.
ESP.getMaxFreeBlockSize() does not indicate the amount of memory that is
available for use in a single malloc call. It indicates the size of a
contiguous block of (raw) memory before the umm_malloc overhead is removed.
It should also be pointed out that, if you allow for the needed overhead in
your malloc call, it could still fail in the general case. An IRQ handler
could have allocated memory between the time you call
ESP.getMaxFreeBlockSize() and your malloc call, reducing the available
memory. In this particular sketch, with "WiFi off" we are not expecting this
to be an issue.
The macro UMM_OVERHEAD_ADJUST provides a value that can be used to adjust
calculations when trying to dividing up memory as we are here. However, the
calculation of multiple elements combined with the rounding up for the
8-byte alignment of each allocation can make for some tricky calculations.
*/
int rawMemoryMaxFreeBlockSize = ESP.getMaxFreeBlockSize();
// Remove the space for overhead component of the blocks*sizeof(void*) array.
int maxFreeBlockSize = rawMemoryMaxFreeBlockSize - UMM_OVERHEAD_ADJUST;
// Initial estimate to use all of the MaxFreeBlock with multiples of 8 rounding up.
blocks = maxFreeBlockSize /
(((blocksize + UMM_OVERHEAD_ADJUST + 7) & ~7) + sizeof(void*));
/*
While we allowed for the 8-byte alignment overhead for blocks*blocksize we
were unable to compensate in advance for the later 8-byte aligning needed
for the blocks*sizeof(void*) allocation. Thus blocks may be off by one count.
We now validate the estimate and adjust as needed.
*/
int rawMemoryEstimate =
blocks * ((blocksize + UMM_OVERHEAD_ADJUST + 7) & ~7) +
((blocks * sizeof(void*) + UMM_OVERHEAD_ADJUST + 7) & ~7);
if (rawMemoryMaxFreeBlockSize < rawMemoryEstimate) {
--blocks;
}
Serial.printf("\nFilling memory with blocks of %d bytes each\n", blocksize);
stats("before");
@ -41,7 +86,7 @@ void tryit(int blocksize) {
}
stats("freeing every other blocks");
for (int i = 0; i < blocks; i += 4) {
for (int i = 0; i < (blocks - 1); i += 4) {
if (p[i + 1]) {
free(p[i + 1]);
}