mirror of
https://github.com/esp8266/Arduino.git
synced 2025-04-19 23:22:16 +03:00
Sync umm_malloc style with upstream (#8426)
Upstream umm_malloc at git hash id 4dac43c3be7a7470dd669323021ba238081da18e processed all project files with the style program uncrustify. This PR updates our ported version of umm_malloc processed with "uncrustify". This should make subsequent merges of upstream into this port easier. This also makes the style more consistant through umm_malloc.
This commit is contained in:
parent
f401f08aba
commit
f26201e6a9
@ -248,4 +248,32 @@ Enhancement ideas:
|
|||||||
save on the execution time spent with interrupts disabled.
|
save on the execution time spent with interrupts disabled.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Dec 29, 2021
|
||||||
|
Upstream umm_malloc at git hash id 4dac43c3be7a7470dd669323021ba238081da18e
|
||||||
|
processed all project files with the style program uncrustify.
|
||||||
|
|
||||||
|
This PR updates our ported version of umm_malloc processed with "uncrustify".
|
||||||
|
This should make subsequent merges of upstream into this port easier.
|
||||||
|
|
||||||
|
This also makes the style more consistant through umm_malloc.
|
||||||
|
|
||||||
|
Some edits to source files was needed to get uncrustify to work.
|
||||||
|
1) macros with "if"s need to be of the form "if ( blah ) { } " curley braces
|
||||||
|
are needed for it to parse correctly
|
||||||
|
2) These "#ifdef __cplusplus" also had to be commented out while running to
|
||||||
|
avoid parser confusion.
|
||||||
|
```
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
```
|
||||||
|
and
|
||||||
|
```
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
```
|
||||||
|
*/
|
||||||
#endif
|
#endif
|
||||||
|
@ -1 +1,2 @@
|
|||||||
Downloaded from: https://github.com/rhempel/c-helper-macros/tree/develop
|
Downloaded from: https://github.com/rhempel/c-helper-macros/tree/develop
|
||||||
|
Applied uncrustify to be consistent with the rest of the umm_malloc files.
|
||||||
|
@ -33,9 +33,12 @@ class HeapSelect {
|
|||||||
public:
|
public:
|
||||||
#if (UMM_NUM_HEAPS == 1)
|
#if (UMM_NUM_HEAPS == 1)
|
||||||
MAYBE_ALWAYS_INLINE
|
MAYBE_ALWAYS_INLINE
|
||||||
HeapSelect(size_t id) { (void)id; }
|
HeapSelect(size_t id) {
|
||||||
|
(void)id;
|
||||||
|
}
|
||||||
MAYBE_ALWAYS_INLINE
|
MAYBE_ALWAYS_INLINE
|
||||||
~HeapSelect() {}
|
~HeapSelect() {
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
MAYBE_ALWAYS_INLINE
|
MAYBE_ALWAYS_INLINE
|
||||||
HeapSelect(size_t id) : _heap_id(umm_get_current_heap_id()) {
|
HeapSelect(size_t id) : _heap_id(umm_get_current_heap_id()) {
|
||||||
@ -70,9 +73,11 @@ protected:
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
MAYBE_ALWAYS_INLINE
|
MAYBE_ALWAYS_INLINE
|
||||||
HeapSelectIram() {}
|
HeapSelectIram() {
|
||||||
|
}
|
||||||
MAYBE_ALWAYS_INLINE
|
MAYBE_ALWAYS_INLINE
|
||||||
~HeapSelectIram() {}
|
~HeapSelectIram() {
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -80,9 +85,11 @@ class HeapSelectDram {
|
|||||||
public:
|
public:
|
||||||
#if (UMM_NUM_HEAPS == 1)
|
#if (UMM_NUM_HEAPS == 1)
|
||||||
MAYBE_ALWAYS_INLINE
|
MAYBE_ALWAYS_INLINE
|
||||||
HeapSelectDram() {}
|
HeapSelectDram() {
|
||||||
|
}
|
||||||
MAYBE_ALWAYS_INLINE
|
MAYBE_ALWAYS_INLINE
|
||||||
~HeapSelectDram() {}
|
~HeapSelectDram() {
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
MAYBE_ALWAYS_INLINE
|
MAYBE_ALWAYS_INLINE
|
||||||
HeapSelectDram() : _heap_id(umm_get_current_heap_id()) {
|
HeapSelectDram() : _heap_id(umm_get_current_heap_id()) {
|
||||||
|
@ -95,7 +95,7 @@ void *umm_info( void *ptr, bool force ) {
|
|||||||
/* Release the critical section... */
|
/* Release the critical section... */
|
||||||
UMM_CRITICAL_EXIT(id_info);
|
UMM_CRITICAL_EXIT(id_info);
|
||||||
|
|
||||||
return( ptr );
|
return ptr;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
++_context->info.usedEntries;
|
++_context->info.usedEntries;
|
||||||
@ -165,7 +165,7 @@ void *umm_info( void *ptr, bool force ) {
|
|||||||
/* Release the critical section... */
|
/* Release the critical section... */
|
||||||
UMM_CRITICAL_EXIT(id_info);
|
UMM_CRITICAL_EXIT(id_info);
|
||||||
|
|
||||||
return( NULL );
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ------------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------------ */
|
||||||
@ -204,8 +204,9 @@ int umm_usage_metric_core( umm_heap_context_t *_context ) {
|
|||||||
// C Note, umm_metrics also appears in the upstrean w/o definition. I suspect it is suppose to be ummHeapInfo.
|
// C Note, umm_metrics also appears in the upstrean w/o definition. I suspect it is suppose to be ummHeapInfo.
|
||||||
// DBGLOG_DEBUG( "usedBlocks %d totalBlocks %d\n", umm_metrics.usedBlocks, ummHeapInfo.totalBlocks);
|
// DBGLOG_DEBUG( "usedBlocks %d totalBlocks %d\n", umm_metrics.usedBlocks, ummHeapInfo.totalBlocks);
|
||||||
DBGLOG_DEBUG("usedBlocks %d totalBlocks %d\n", _context->info.usedBlocks, _context->info.totalBlocks);
|
DBGLOG_DEBUG("usedBlocks %d totalBlocks %d\n", _context->info.usedBlocks, _context->info.totalBlocks);
|
||||||
if (_context->info.freeBlocks)
|
if (_context->info.freeBlocks) {
|
||||||
return (int)((_context->info.usedBlocks * 100) / (_context->info.freeBlocks));
|
return (int)((_context->info.usedBlocks * 100) / (_context->info.freeBlocks));
|
||||||
|
}
|
||||||
|
|
||||||
return -1; // no freeBlocks
|
return -1; // no freeBlocks
|
||||||
}
|
}
|
||||||
@ -226,7 +227,7 @@ int umm_fragmentation_metric_core( umm_heap_context_t *_context ) {
|
|||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
// upstream version: return (100 - (((uint32_t)(sqrtf(ummHeapInfo.freeBlocksSquared)) * 100)/(ummHeapInfo.freeBlocks)));
|
// upstream version: return (100 - (((uint32_t)(sqrtf(ummHeapInfo.freeBlocksSquared)) * 100)/(ummHeapInfo.freeBlocks)));
|
||||||
return (100 - (((uint32_t)(sqrt32(_context->info.freeBlocksSquared)) * 100)/(_context->info.freeBlocks)));
|
return 100 - (((uint32_t)(sqrt32(_context->info.freeBlocksSquared)) * 100) / (_context->info.freeBlocks));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,8 +91,7 @@ bool umm_integrity_check(void) {
|
|||||||
|
|
||||||
/* make sure the free mark is appropriate, and unmark it */
|
/* make sure the free mark is appropriate, and unmark it */
|
||||||
if ((UMM_NBLOCK(cur) & UMM_FREELIST_MASK)
|
if ((UMM_NBLOCK(cur) & UMM_FREELIST_MASK)
|
||||||
!= (UMM_PBLOCK(cur) & UMM_FREELIST_MASK))
|
!= (UMM_PBLOCK(cur) & UMM_FREELIST_MASK)) {
|
||||||
{
|
|
||||||
DBGLOG_FUNCTION("heap integrity broken: mask wrong at addr 0x%08x: n=0x%x, p=0x%x\n",
|
DBGLOG_FUNCTION("heap integrity broken: mask wrong at addr 0x%08x: n=0x%x, p=0x%x\n",
|
||||||
DBGLOG_32_BIT_PTR(&UMM_NBLOCK(cur)),
|
DBGLOG_32_BIT_PTR(&UMM_NBLOCK(cur)),
|
||||||
(UMM_NBLOCK(cur) & UMM_FREELIST_MASK),
|
(UMM_NBLOCK(cur) & UMM_FREELIST_MASK),
|
||||||
|
@ -21,13 +21,12 @@ UMM_TIME_STATS time_stats = {
|
|||||||
#ifdef UMM_INTEGRITY_CHECK
|
#ifdef UMM_INTEGRITY_CHECK
|
||||||
{0xFFFFFFFF, 0U, 0U, 0U},
|
{0xFFFFFFFF, 0U, 0U, 0U},
|
||||||
#endif
|
#endif
|
||||||
{0xFFFFFFFF, 0U, 0U, 0U} };
|
{0xFFFFFFFF, 0U, 0U, 0U}
|
||||||
|
};
|
||||||
|
|
||||||
bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size)
|
bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size) {
|
||||||
{
|
|
||||||
UMM_CRITICAL_DECL(id_no_tag);
|
UMM_CRITICAL_DECL(id_no_tag);
|
||||||
if (p && sizeof(time_stats) == size)
|
if (p && sizeof(time_stats) == size) {
|
||||||
{
|
|
||||||
UMM_CRITICAL_ENTRY(id_no_tag);
|
UMM_CRITICAL_ENTRY(id_no_tag);
|
||||||
memcpy(p, &time_stats, size);
|
memcpy(p, &time_stats, size);
|
||||||
UMM_CRITICAL_EXIT(id_no_tag);
|
UMM_CRITICAL_EXIT(id_no_tag);
|
||||||
@ -45,8 +44,9 @@ bool ICACHE_FLASH_ATTR get_umm_get_perf_data(UMM_TIME_STATS *p, size_t size)
|
|||||||
static bool check_poison_neighbors(umm_heap_context_t *_context, uint16_t cur) {
|
static bool check_poison_neighbors(umm_heap_context_t *_context, uint16_t cur) {
|
||||||
uint16_t c;
|
uint16_t c;
|
||||||
|
|
||||||
if ( 0 == cur )
|
if (0 == cur) {
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
c = UMM_PBLOCK(cur) & UMM_BLOCKNO_MASK;
|
c = UMM_PBLOCK(cur) & UMM_BLOCKNO_MASK;
|
||||||
while (c && (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK)) {
|
while (c && (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK)) {
|
||||||
@ -56,8 +56,9 @@ static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur )
|
|||||||
i.e. Adjacent free space is always consolidated.
|
i.e. Adjacent free space is always consolidated.
|
||||||
*/
|
*/
|
||||||
if (!(UMM_NBLOCK(c) & UMM_FREELIST_MASK)) {
|
if (!(UMM_NBLOCK(c) & UMM_FREELIST_MASK)) {
|
||||||
if ( !check_poison_block(&UMM_BLOCK(c)) )
|
if (!check_poison_block(&UMM_BLOCK(c))) {
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -68,8 +69,9 @@ static bool check_poison_neighbors( umm_heap_context_t *_context, uint16_t cur )
|
|||||||
c = UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK;
|
c = UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK;
|
||||||
while ((UMM_NBLOCK(c) & UMM_BLOCKNO_MASK)) {
|
while ((UMM_NBLOCK(c) & UMM_BLOCKNO_MASK)) {
|
||||||
if (!(UMM_NBLOCK(c) & UMM_FREELIST_MASK)) {
|
if (!(UMM_NBLOCK(c) & UMM_FREELIST_MASK)) {
|
||||||
if ( !check_poison_block(&UMM_BLOCK(c)) )
|
if (!check_poison_block(&UMM_BLOCK(c))) {
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -421,7 +421,7 @@ static uint16_t umm_assimilate_down( umm_heap_context_t *_context, uint16_t c, u
|
|||||||
UMM_FRAGMENTATION_METRIC_ADD(UMM_PBLOCK(c));
|
UMM_FRAGMENTATION_METRIC_ADD(UMM_PBLOCK(c));
|
||||||
}
|
}
|
||||||
|
|
||||||
return( UMM_PBLOCK(c) );
|
return UMM_PBLOCK(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ------------------------------------------------------------------------- */
|
/* ------------------------------------------------------------------------- */
|
||||||
@ -683,8 +683,9 @@ static void *umm_malloc_core( umm_heap_context_t *_context, size_t size ) {
|
|||||||
}
|
}
|
||||||
#elif defined UMM_FIRST_FIT
|
#elif defined UMM_FIRST_FIT
|
||||||
/* This is the first block that fits! */
|
/* This is the first block that fits! */
|
||||||
if( (blockSize >= blocks) )
|
if ((blockSize >= blocks)) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#error "No UMM_*_FIT is defined - check umm_malloc_cfg.h"
|
#error "No UMM_*_FIT is defined - check umm_malloc_cfg.h"
|
||||||
#endif
|
#endif
|
||||||
@ -755,10 +756,10 @@ static void *umm_malloc_core( umm_heap_context_t *_context, size_t size ) {
|
|||||||
|
|
||||||
DBGLOG_DEBUG("Can't allocate %5d blocks\n", blocks);
|
DBGLOG_DEBUG("Can't allocate %5d blocks\n", blocks);
|
||||||
|
|
||||||
return( (void *)NULL );
|
return (void *)NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return( (void *)&UMM_DATA(cf) );
|
return (void *)&UMM_DATA(cf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ------------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------------ */
|
||||||
@ -835,7 +836,7 @@ void *umm_malloc( size_t size ) {
|
|||||||
DBGLOG_DEBUG("malloc a block of 0 bytes -> do nothing\n");
|
DBGLOG_DEBUG("malloc a block of 0 bytes -> do nothing\n");
|
||||||
STATS__ZERO_ALLOC_REQUEST(id_malloc, size);
|
STATS__ZERO_ALLOC_REQUEST(id_malloc, size);
|
||||||
|
|
||||||
return( ptr );
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Allocate the memory within a protected critical section */
|
/* Allocate the memory within a protected critical section */
|
||||||
@ -860,7 +861,7 @@ void *umm_malloc( size_t size ) {
|
|||||||
|
|
||||||
UMM_CRITICAL_EXIT(id_malloc);
|
UMM_CRITICAL_EXIT(id_malloc);
|
||||||
|
|
||||||
return( ptr );
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ------------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------------ */
|
||||||
@ -890,7 +891,7 @@ void *umm_realloc( void *ptr, size_t size ) {
|
|||||||
if (((void *)NULL == ptr)) {
|
if (((void *)NULL == ptr)) {
|
||||||
DBGLOG_DEBUG("realloc the NULL pointer - call malloc()\n");
|
DBGLOG_DEBUG("realloc the NULL pointer - call malloc()\n");
|
||||||
|
|
||||||
return( umm_malloc(size) );
|
return umm_malloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -912,7 +913,7 @@ void *umm_realloc( void *ptr, size_t size ) {
|
|||||||
|
|
||||||
umm_free(ptr);
|
umm_free(ptr);
|
||||||
|
|
||||||
return( (void *)NULL );
|
return (void *)NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
STATS__ALLOC_REQUEST(id_realloc, size);
|
STATS__ALLOC_REQUEST(id_realloc, size);
|
||||||
@ -1205,7 +1206,7 @@ void *umm_realloc( void *ptr, size_t size ) {
|
|||||||
/* Release the critical section... */
|
/* Release the critical section... */
|
||||||
UMM_CRITICAL_EXIT(id_realloc);
|
UMM_CRITICAL_EXIT(id_realloc);
|
||||||
|
|
||||||
return( ptr );
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ------------------------------------------------------------------------ */
|
/* ------------------------------------------------------------------------ */
|
||||||
@ -1215,8 +1216,9 @@ void *umm_calloc( size_t num, size_t item_size ) {
|
|||||||
|
|
||||||
ret = umm_malloc((size_t)(item_size * num));
|
ret = umm_malloc((size_t)(item_size * num));
|
||||||
|
|
||||||
if (ret)
|
if (ret) {
|
||||||
memset(ret, 0x00, (size_t)(item_size * num));
|
memset(ret, 0x00, (size_t)(item_size * num));
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -359,22 +359,25 @@ size_t ICACHE_FLASH_ATTR umm_block_size( void );
|
|||||||
#ifdef UMM_STATS_FULL
|
#ifdef UMM_STATS_FULL
|
||||||
#define STATS__FREE_BLOCKS_MIN() \
|
#define STATS__FREE_BLOCKS_MIN() \
|
||||||
do { \
|
do { \
|
||||||
if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_min) \
|
if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_min) { \
|
||||||
_context->stats.free_blocks_min = _context->UMM_FREE_BLOCKS; \
|
_context->stats.free_blocks_min = _context->UMM_FREE_BLOCKS; \
|
||||||
|
} \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
#define STATS__FREE_BLOCKS_ISR_MIN() \
|
#define STATS__FREE_BLOCKS_ISR_MIN() \
|
||||||
do { \
|
do { \
|
||||||
if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_isr_min) \
|
if (_context->UMM_FREE_BLOCKS < _context->stats.free_blocks_isr_min) { \
|
||||||
_context->stats.free_blocks_isr_min = _context->UMM_FREE_BLOCKS; \
|
_context->stats.free_blocks_isr_min = _context->UMM_FREE_BLOCKS; \
|
||||||
|
} \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
#define STATS__ALLOC_REQUEST(tag, s) \
|
#define STATS__ALLOC_REQUEST(tag, s) \
|
||||||
do { \
|
do { \
|
||||||
_context->stats.tag##_count += 1; \
|
_context->stats.tag##_count += 1; \
|
||||||
_context->stats.last_alloc_size = s; \
|
_context->stats.last_alloc_size = s; \
|
||||||
if (_context->stats.alloc_max_size < s) \
|
if (_context->stats.alloc_max_size < s) { \
|
||||||
_context->stats.alloc_max_size = s; \
|
_context->stats.alloc_max_size = s; \
|
||||||
|
} \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
#define STATS__ZERO_ALLOC_REQUEST(tag, s) \
|
#define STATS__ZERO_ALLOC_REQUEST(tag, s) \
|
||||||
@ -472,11 +475,13 @@ static inline void _critical_entry(UMM_TIME_STAT *p, uint32_t *saved_ps) {
|
|||||||
|
|
||||||
static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
|
static inline void _critical_exit(UMM_TIME_STAT *p, uint32_t *saved_ps) {
|
||||||
uint32_t elapse = esp_get_cycle_count() - p->start;
|
uint32_t elapse = esp_get_cycle_count() - p->start;
|
||||||
if (elapse < p->min)
|
if (elapse < p->min) {
|
||||||
p->min = elapse;
|
p->min = elapse;
|
||||||
|
}
|
||||||
|
|
||||||
if (elapse > p->max)
|
if (elapse > p->max) {
|
||||||
p->max = elapse;
|
p->max = elapse;
|
||||||
|
}
|
||||||
|
|
||||||
xt_wsr_ps(*saved_ps);
|
xt_wsr_ps(*saved_ps);
|
||||||
}
|
}
|
||||||
|
@ -13,10 +13,10 @@
|
|||||||
* If `s` is 0, returns 0.
|
* If `s` is 0, returns 0.
|
||||||
*/
|
*/
|
||||||
static size_t poison_size(size_t s) {
|
static size_t poison_size(size_t s) {
|
||||||
return(s ? (UMM_POISON_SIZE_BEFORE +
|
return s ? (UMM_POISON_SIZE_BEFORE +
|
||||||
sizeof(UMM_POISONED_BLOCK_LEN_TYPE) +
|
sizeof(UMM_POISONED_BLOCK_LEN_TYPE) +
|
||||||
UMM_POISON_SIZE_AFTER)
|
UMM_POISON_SIZE_AFTER)
|
||||||
: 0);
|
: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -177,8 +177,9 @@ void *umm_poison_calloc( size_t num, size_t item_size ) {
|
|||||||
|
|
||||||
ret = umm_malloc(size);
|
ret = umm_malloc(size);
|
||||||
|
|
||||||
if (NULL != ret)
|
if (NULL != ret) {
|
||||||
memset(ret, 0x00, size);
|
memset(ret, 0x00, size);
|
||||||
|
}
|
||||||
|
|
||||||
ret = get_poisoned(ret, size);
|
ret = get_poisoned(ret, size);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user