From c517bfd9971c3867c289a5845b5f968c141f192d Mon Sep 17 00:00:00 2001 From: M Hightower <27247790+mhightower83@users.noreply.github.com> Date: Fri, 28 Apr 2023 05:57:50 -0700 Subject: [PATCH] umm_poison false positive from ISR (#8914) The umm_poison logic runs outside the UMM_CRITICAL_* umbrella. When interrupt routines do alloc calls, it is possible to interrupt an in-progress allocation just before the poison is set, with a new alloc request resulting in a false "poison check fail" against the in-progress allocation. The SDK does mallocs from ISRs. SmartConfig can illustrate this issue, see https://github.com/esp8266/Arduino/issues/3494#issue-247919259 Other Sketches built with Debug: "Serial," that use the SDK's Promiscuous callbacks are also likely to encounter problems. The SDK support for "Promiscuous Receive" allocates memory from an ISR context, occasionally interrupting the poison wrapper code before it finishes setting the poison fences resulting in a false poison failed event. --- cores/esp8266/umm_malloc/Notes.h | 11 ++++++++++ cores/esp8266/umm_malloc/umm_local.c | 7 ++++-- cores/esp8266/umm_malloc/umm_malloc.cpp | 26 +++++++++++++++++------ cores/esp8266/umm_malloc/umm_malloc_cfg.h | 8 +++++++ cores/esp8266/umm_malloc/umm_poison.c | 19 +++++++++++------ 5 files changed, 55 insertions(+), 16 deletions(-) diff --git a/cores/esp8266/umm_malloc/Notes.h b/cores/esp8266/umm_malloc/Notes.h index af0852b43..5d076c4b6 100644 --- a/cores/esp8266/umm_malloc/Notes.h +++ b/cores/esp8266/umm_malloc/Notes.h @@ -341,5 +341,16 @@ Enhancement ideas: * For multiple Heaps builds, add a dedicated function that always reports DRAM results. + + April 22, 2023 + + The umm_poison logic runs outside the UMM_CRITICAL_* umbrella. When interrupt + routines do alloc calls, it is possible to interrupt an in-progress allocation + just before the poison is set, with a new alloc request resulting in a false + "poison check fail" against the in-progress allocation. The SDK does mallocs + from ISRs. SmartConfig can illustrate this issue. + + Move get_poisoned() within UMM_CRITICAL_* in umm_malloc() and umm_realloc(). + */ #endif diff --git a/cores/esp8266/umm_malloc/umm_local.c b/cores/esp8266/umm_malloc/umm_local.c index b02e511ce..c08e2a27c 100644 --- a/cores/esp8266/umm_malloc/umm_local.c +++ b/cores/esp8266/umm_malloc/umm_local.c @@ -142,8 +142,11 @@ void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line) add_poison_size(&size); ret = umm_realloc(ptr, size); - - ret = get_poisoned(ret, size); + /* + "get_poisoned" is now called from umm_realloc while still in a critical + section. Before umm_realloc returned, the pointer offset was adjusted to + the start of the requested buffer. + */ return ret; } diff --git a/cores/esp8266/umm_malloc/umm_malloc.cpp b/cores/esp8266/umm_malloc/umm_malloc.cpp index 4a1bdfc76..692308187 100644 --- a/cores/esp8266/umm_malloc/umm_malloc.cpp +++ b/cores/esp8266/umm_malloc/umm_malloc.cpp @@ -909,6 +909,8 @@ void *umm_malloc(size_t size) { ptr = umm_malloc_core(_context, size); + ptr = POISON_CHECK_SET_POISON(ptr, size); + UMM_CRITICAL_EXIT(id_malloc); return ptr; @@ -1068,7 +1070,7 @@ void *umm_realloc(void *ptr, size_t size) { // Case 2 - block + next block fits EXACTLY } else if ((blockSize + nextBlockSize) == blocks) { DBGLOG_DEBUG("exact realloc using next block - %i\n", blocks); - umm_assimilate_up(c); + umm_assimilate_up(_context, c); STATS__FREE_BLOCKS_UPDATE(-nextBlockSize); blockSize += nextBlockSize; @@ -1087,8 +1089,9 @@ void *umm_realloc(void *ptr, size_t size) { STATS__FREE_BLOCKS_UPDATE(-prevBlockSize); STATS__FREE_BLOCKS_ISR_MIN(); blockSize += prevBlockSize; + POISON_CHECK_SET_POISON((void *)&UMM_DATA(c), size); // Fix allocation so ISR poison check is good UMM_CRITICAL_SUSPEND(id_realloc); - memmove((void *)&UMM_DATA(c), ptr, curSize); + UMM_POISON_MEMMOVE((void *)&UMM_DATA(c), ptr, curSize); ptr = (void *)&UMM_DATA(c); UMM_CRITICAL_RESUME(id_realloc); // Case 5 - prev block + block + next block fits @@ -1108,8 +1111,9 @@ void *umm_realloc(void *ptr, size_t size) { #else blockSize += (prevBlockSize + nextBlockSize); #endif + POISON_CHECK_SET_POISON((void *)&UMM_DATA(c), size); UMM_CRITICAL_SUSPEND(id_realloc); - memmove((void *)&UMM_DATA(c), ptr, curSize); + UMM_POISON_MEMMOVE((void *)&UMM_DATA(c), ptr, curSize); ptr = (void *)&UMM_DATA(c); UMM_CRITICAL_RESUME(id_realloc); @@ -1119,8 +1123,9 @@ void *umm_realloc(void *ptr, size_t size) { void *oldptr = ptr; if ((ptr = umm_malloc_core(_context, size))) { DBGLOG_DEBUG("realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks); + POISON_CHECK_SET_POISON((void *)&UMM_DATA(c), size); UMM_CRITICAL_SUSPEND(id_realloc); - memcpy(ptr, oldptr, curSize); + UMM_POISON_MEMCPY(ptr, oldptr, curSize); UMM_CRITICAL_RESUME(id_realloc); umm_free_core(_context, oldptr); } else { @@ -1181,8 +1186,9 @@ void *umm_realloc(void *ptr, size_t size) { blockSize = blocks; #endif } + POISON_CHECK_SET_POISON((void *)&UMM_DATA(c), size); UMM_CRITICAL_SUSPEND(id_realloc); - memmove((void *)&UMM_DATA(c), ptr, curSize); + UMM_POISON_MEMMOVE((void *)&UMM_DATA(c), ptr, curSize); ptr = (void *)&UMM_DATA(c); UMM_CRITICAL_RESUME(id_realloc); } else if (blockSize >= blocks) { // 2 @@ -1198,8 +1204,9 @@ void *umm_realloc(void *ptr, size_t size) { void *oldptr = ptr; if ((ptr = umm_malloc_core(_context, size))) { DBGLOG_DEBUG("realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks); + POISON_CHECK_SET_POISON((void *)&UMM_DATA(c), size); UMM_CRITICAL_SUSPEND(id_realloc); - memcpy(ptr, oldptr, curSize); + UMM_POISON_MEMCPY(ptr, oldptr, curSize); UMM_CRITICAL_RESUME(id_realloc); umm_free_core(_context, oldptr); } else { @@ -1223,8 +1230,9 @@ void *umm_realloc(void *ptr, size_t size) { void *oldptr = ptr; if ((ptr = umm_malloc_core(_context, size))) { DBGLOG_DEBUG("realloc %d to a bigger block %d, copy, and free the old\n", blockSize, blocks); + POISON_CHECK_SET_POISON((void *)&UMM_DATA(c), size); UMM_CRITICAL_SUSPEND(id_realloc); - memcpy(ptr, oldptr, curSize); + UMM_POISON_MEMCPY(ptr, oldptr, curSize); UMM_CRITICAL_RESUME(id_realloc); umm_free_core(_context, oldptr); } else { @@ -1250,6 +1258,8 @@ void *umm_realloc(void *ptr, size_t size) { STATS__FREE_BLOCKS_MIN(); + ptr = POISON_CHECK_SET_POISON(ptr, size); + /* Release the critical section... */ UMM_CRITICAL_EXIT(id_realloc); @@ -1258,6 +1268,7 @@ void *umm_realloc(void *ptr, size_t size) { /* ------------------------------------------------------------------------ */ +#if !defined(UMM_POISON_CHECK) && !defined(UMM_POISON_CHECK_LITE) void *umm_calloc(size_t num, size_t item_size) { void *ret; @@ -1273,6 +1284,7 @@ void *umm_calloc(size_t num, size_t item_size) { return ret; } +#endif /* ------------------------------------------------------------------------ */ diff --git a/cores/esp8266/umm_malloc/umm_malloc_cfg.h b/cores/esp8266/umm_malloc/umm_malloc_cfg.h index 449d395fc..14d2055eb 100644 --- a/cores/esp8266/umm_malloc/umm_malloc_cfg.h +++ b/cores/esp8266/umm_malloc/umm_malloc_cfg.h @@ -618,6 +618,9 @@ extern bool umm_poison_check(void); // Local Additions to better report location in code of the caller. void *umm_poison_realloc_fl(void *ptr, size_t size, const char *file, int line); void umm_poison_free_fl(void *ptr, const char *file, int line); +#define POISON_CHECK_SET_POISON(p, s) get_poisoned(p, s) +#define UMM_POISON_SKETCH_PTR(p) ((void*)((uintptr_t)p + sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE)) +#define UMM_POISON_SKETCH_PTRSZ(s) (s - sizeof(UMM_POISONED_BLOCK_LEN_TYPE) - UMM_POISON_SIZE_BEFORE - UMM_POISON_SIZE_AFTER) #if defined(UMM_POISON_CHECK_LITE) /* * We can safely do individual poison checks at free and realloc and stay @@ -637,8 +640,13 @@ void umm_poison_free_fl(void *ptr, const char *file, int line); #else #define POISON_CHECK() 1 #define POISON_CHECK_NEIGHBORS(c) do {} while (false) +#define POISON_CHECK_SET_POISON(p, s) (p) +#define UMM_POISON_SKETCH_PTR(p) (p) +#define UMM_POISON_SKETCH_PTRSZ(s) (s) #endif +#define UMM_POISON_MEMMOVE(t, p, s) memmove(UMM_POISON_SKETCH_PTR(t), UMM_POISON_SKETCH_PTR(p), UMM_POISON_SKETCH_PTRSZ(s)) +#define UMM_POISON_MEMCPY(t, p, s) memcpy(UMM_POISON_SKETCH_PTR(t), UMM_POISON_SKETCH_PTR(p), UMM_POISON_SKETCH_PTRSZ(s)) #if defined(UMM_POISON_CHECK) || defined(UMM_POISON_CHECK_LITE) /* diff --git a/cores/esp8266/umm_malloc/umm_poison.c b/cores/esp8266/umm_malloc/umm_poison.c index 9ebaafdd7..ca41cabf4 100644 --- a/cores/esp8266/umm_malloc/umm_poison.c +++ b/cores/esp8266/umm_malloc/umm_poison.c @@ -163,8 +163,11 @@ void *umm_poison_malloc(size_t size) { add_poison_size(&size); ret = umm_malloc(size); - - ret = get_poisoned(ret, size); + /* + "get_poisoned" is now called from umm_malloc while still in a critical + section. Before umm_malloc returned, the pointer offset was adjusted to + the start of the requested buffer. + */ return ret; } @@ -177,17 +180,16 @@ void *umm_poison_calloc(size_t num, size_t item_size) { // Use saturated multiply. // Rely on umm_malloc to supply the fail response as needed. size_t size = umm_umul_sat(num, item_size); + size_t request_sz = size; add_poison_size(&size); ret = umm_malloc(size); if (NULL != ret) { - memset(ret, 0x00, size); + memset(ret, 0x00, request_sz); } - ret = get_poisoned(ret, size); - return ret; } @@ -200,8 +202,11 @@ void *umm_poison_realloc(void *ptr, size_t size) { add_poison_size(&size); ret = umm_realloc(ptr, size); - - ret = get_poisoned(ret, size); + /* + "get_poisoned" is now called from umm_realloc while still in a critical + section. Before umm_realloc returned, the pointer offset was adjusted to + the start of the requested buffer. + */ return ret; }