Core: Add ASAN support to BLI_mempool

This patch adds address sanitizer support to memory pools.

when ASAN is enabled the following happens:
* 32 byte red zones are inserted between pool elements.
* The BLI_mempool struct itself is marked as a red zone.
* Access to the pool goes through a thread mutex (except when compiling makesdna).

This is very useful for finding bugs in code that uses BMesh.

Pull Request: #104668
This commit is contained in:
Joseph Eagar 2023-04-26 01:32:41 -07:00
parent 536144983b
commit c1d4b6f339
3 changed files with 181 additions and 31 deletions

View File

@ -7,8 +7,10 @@
# define __has_feature(x) 0
#endif
#if (defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer)) && !defined(_MSC_VER)
#if (defined(__SANITIZE_ADDRESS__) || __has_feature(address_sanitizer)) && \
(!defined(_MSC_VER) || _MSC_VER > 1929) /* MSVC 2019 and below doesn't ship ASAN headers. */
# include "sanitizer/asan_interface.h"
# define WITH_ASAN
#else
/* Ensure return value is used. Just using UNUSED_VARS results in a warning. */
# define ASAN_POISON_MEMORY_REGION(addr, size) (void)(0 && ((size) != 0 && (addr) != NULL))

View File

@ -20,9 +20,14 @@
#include "BLI_utildefines.h"
#include "BLI_asan.h"
#include "BLI_mempool.h" /* own include */
#include "BLI_mempool_private.h" /* own include */
#ifdef WITH_ASAN
# include "BLI_threads.h"
#endif
#include "MEM_guardedalloc.h"
#include "BLI_strict_flags.h" /* keep last */
@ -31,6 +36,12 @@
# include "valgrind/memcheck.h"
#endif
#ifdef WITH_ASAN
# define POISON_REDZONE_SIZE 32
#else
# define POISON_REDZONE_SIZE 0
#endif
/* NOTE: copied from BLO_blend_defs.h, don't use here because we're in BLI. */
#ifdef __BIG_ENDIAN__
/* Big Endian */
@ -95,6 +106,10 @@ typedef struct BLI_mempool_chunk {
* The mempool, stores and tracks memory \a chunks and elements within those chunks \a free.
*/
struct BLI_mempool {
/* Serialize access to mempools when debugging wih ASAN. */
#ifdef WITH_ASAN
ThreadMutex mutex;
#endif
/** Single linked list of allocated chunks. */
BLI_mempool_chunk *chunks;
/** Keep a pointer to the last, so we can append new chunks there
@ -108,7 +123,6 @@ struct BLI_mempool {
/** Number of elements per chunk. */
uint pchunk;
uint flag;
/* keeps aligned to 16 bits */
/** Free element list. Interleaved into chunk data. */
BLI_freenode *free;
@ -132,6 +146,20 @@ struct BLI_mempool {
/** Extra bytes implicitly used for every chunk alloc. */
#define CHUNK_OVERHEAD (uint)(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
static void mempool_asan_unlock(BLI_mempool *pool)
{
#ifdef WITH_ASAN
BLI_mutex_unlock(&pool->mutex);
#endif
}
static void mempool_asan_lock(BLI_mempool *pool)
{
#ifdef WITH_ASAN
BLI_mutex_lock(&pool->mutex);
#endif
}
#ifdef USE_CHUNK_POW2
static uint power_of_2_max_u(uint x)
{
@ -166,7 +194,7 @@ BLI_INLINE uint mempool_maxchunks(const uint elem_num, const uint pchunk)
static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool)
{
return MEM_mallocN(sizeof(BLI_mempool_chunk) + (size_t)pool->csize, "BLI_Mempool Chunk");
return MEM_mallocN(sizeof(BLI_mempool_chunk) + (size_t)pool->csize, "mempool chunk");
}
/**
@ -206,22 +234,41 @@ static BLI_freenode *mempool_chunk_add(BLI_mempool *pool,
j = pool->pchunk;
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
while (j--) {
curnode->next = NODE_STEP_NEXT(curnode);
BLI_freenode *next;
BLI_asan_unpoison(curnode, pool->esize - POISON_REDZONE_SIZE);
curnode->next = next = NODE_STEP_NEXT(curnode);
curnode->freeword = FREEWORD;
curnode = curnode->next;
BLI_asan_poison(curnode, pool->esize);
curnode = next;
}
}
else {
while (j--) {
curnode->next = NODE_STEP_NEXT(curnode);
curnode = curnode->next;
BLI_freenode *next;
BLI_asan_unpoison(curnode, pool->esize - POISON_REDZONE_SIZE);
curnode->next = next = NODE_STEP_NEXT(curnode);
BLI_asan_poison(curnode, pool->esize);
curnode = next;
}
}
/* terminate the list (rewind one)
* will be overwritten if 'curnode' gets passed in again as 'last_tail' */
BLI_asan_unpoison(curnode, pool->esize - POISON_REDZONE_SIZE);
BLI_asan_poison(curnode, pool->esize);
curnode = NODE_STEP_PREV(curnode);
BLI_asan_unpoison(curnode, pool->esize - POISON_REDZONE_SIZE);
curnode->next = NULL;
BLI_asan_poison(curnode, pool->esize);
#ifdef USE_TOTALLOC
pool->totalloc += pool->pchunk;
@ -229,24 +276,27 @@ static BLI_freenode *mempool_chunk_add(BLI_mempool *pool,
/* final pointer in the previously allocated chunk is wrong */
if (last_tail) {
BLI_asan_unpoison(last_tail, pool->esize - POISON_REDZONE_SIZE);
last_tail->next = CHUNK_DATA(mpchunk);
BLI_asan_poison(last_tail, pool->esize);
}
return curnode;
}
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk)
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk, BLI_mempool *pool)
{
BLI_asan_unpoison(mpchunk, sizeof(BLI_mempool_chunk) + pool->esize * pool->csize);
MEM_freeN(mpchunk);
}
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk, BLI_mempool *pool)
{
BLI_mempool_chunk *mpchunk_next;
for (; mpchunk; mpchunk = mpchunk_next) {
mpchunk_next = mpchunk->next;
mempool_chunk_free(mpchunk);
mempool_chunk_free(mpchunk, pool);
}
}
@ -259,6 +309,10 @@ BLI_mempool *BLI_mempool_create(uint esize, uint elem_num, uint pchunk, uint fla
/* allocate the pool structure */
pool = MEM_mallocN(sizeof(BLI_mempool), "memory pool");
#ifdef WITH_ASAN
BLI_mutex_init(&pool->mutex);
#endif
/* set the elem size */
if (esize < (int)MEMPOOL_ELEM_SIZE_MIN) {
esize = (int)MEMPOOL_ELEM_SIZE_MIN;
@ -268,6 +322,8 @@ BLI_mempool *BLI_mempool_create(uint esize, uint elem_num, uint pchunk, uint fla
esize = MAX2(esize, (uint)sizeof(BLI_freenode));
}
esize += POISON_REDZONE_SIZE;
maxchunks = mempool_maxchunks(elem_num, pchunk);
pool->chunks = NULL;
@ -328,6 +384,8 @@ void *BLI_mempool_alloc(BLI_mempool *pool)
free_pop = pool->free;
BLI_asan_unpoison(free_pop, pool->esize - POISON_REDZONE_SIZE);
BLI_assert(pool->chunk_tail->next == NULL);
if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
@ -347,10 +405,17 @@ void *BLI_mempool_alloc(BLI_mempool *pool)
void *BLI_mempool_calloc(BLI_mempool *pool)
{
void *retval = BLI_mempool_alloc(pool);
memset(retval, 0, (size_t)pool->esize);
memset(retval, 0, (size_t)pool->esize - POISON_REDZONE_SIZE);
return retval;
}
/**
* Free an element from the mempool.
*
* \note doesn't protect against double frees, take care!
*/
void BLI_mempool_free(BLI_mempool *pool, void *addr)
{
BLI_freenode *newhead = addr;
@ -372,7 +437,7 @@ void BLI_mempool_free(BLI_mempool *pool, void *addr)
/* Enable for debugging. */
if (UNLIKELY(mempool_debug_memset)) {
memset(addr, 255, pool->esize);
memset(addr, 255, pool->esize - POISON_REDZONE_SIZE);
}
#endif
@ -387,6 +452,8 @@ void BLI_mempool_free(BLI_mempool *pool, void *addr)
newhead->next = pool->free;
pool->free = newhead;
BLI_asan_poison(newhead, pool->esize);
pool->totused--;
#ifdef WITH_MEM_VALGRIND
@ -401,7 +468,7 @@ void BLI_mempool_free(BLI_mempool *pool, void *addr)
BLI_mempool_chunk *first;
first = pool->chunks;
mempool_chunk_free_all(first->next);
mempool_chunk_free_all(first->next, pool);
first->next = NULL;
pool->chunk_tail = first;
@ -419,11 +486,21 @@ void BLI_mempool_free(BLI_mempool *pool, void *addr)
j = pool->pchunk;
while (j--) {
curnode->next = NODE_STEP_NEXT(curnode);
curnode = curnode->next;
BLI_asan_unpoison(curnode, pool->esize - POISON_REDZONE_SIZE);
BLI_freenode *next = curnode->next = NODE_STEP_NEXT(curnode);
BLI_asan_poison(curnode, pool->esize);
curnode = next;
}
curnode = NODE_STEP_PREV(curnode);
BLI_asan_unpoison(curnode, pool->esize - POISON_REDZONE_SIZE);
BLI_freenode *prev = NODE_STEP_PREV(curnode);
BLI_asan_poison(curnode, pool->esize);
curnode = prev;
BLI_asan_unpoison(curnode, pool->esize - POISON_REDZONE_SIZE);
curnode->next = NULL; /* terminate the list */
BLI_asan_poison(curnode, pool->esize);
#ifdef WITH_MEM_VALGRIND
VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
@ -433,14 +510,18 @@ void BLI_mempool_free(BLI_mempool *pool, void *addr)
int BLI_mempool_len(const BLI_mempool *pool)
{
return (int)pool->totused;
int ret = pool->totused;
return ret;
}
void *BLI_mempool_findelem(BLI_mempool *pool, uint index)
{
mempool_asan_lock(pool);
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
if (index < pool->totused) {
if (index < (uint)pool->totused) {
/* We could have some faster mem chunk stepping code inline. */
BLI_mempool_iter iter;
void *elem;
@ -448,9 +529,12 @@ void *BLI_mempool_findelem(BLI_mempool *pool, uint index)
for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
/* pass */
}
mempool_asan_unlock(pool);
return elem;
}
mempool_asan_unlock(pool);
return NULL;
}
@ -459,12 +543,16 @@ void BLI_mempool_as_table(BLI_mempool *pool, void **data)
BLI_mempool_iter iter;
void *elem;
void **p = data;
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
BLI_mempool_iternew(pool, &iter);
while ((elem = BLI_mempool_iterstep(&iter))) {
*p++ = elem;
}
BLI_assert((uint)(p - data) == pool->totused);
BLI_assert((int)(p - data) == pool->totused);
}
void **BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
@ -476,21 +564,24 @@ void **BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
{
const uint esize = pool->esize;
const uint esize = pool->esize - (uint)POISON_REDZONE_SIZE;
BLI_mempool_iter iter;
char *elem, *p = data;
BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
mempool_asan_lock(pool);
BLI_mempool_iternew(pool, &iter);
while ((elem = BLI_mempool_iterstep(&iter))) {
memcpy(p, elem, (size_t)esize);
p = NODE_STEP_NEXT(p);
}
BLI_assert((uint)(p - (char *)data) == pool->totused * esize);
mempool_asan_unlock(pool);
}
void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
{
char *data = MEM_malloc_arrayN(pool->totused, pool->esize, allocstr);
char *data = MEM_malloc_arrayN((size_t)pool->totused, pool->esize, allocstr);
BLI_mempool_as_array(pool, data);
return data;
}
@ -586,6 +677,8 @@ void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
do {
ret = curnode;
BLI_asan_unpoison(ret, iter->pool->esize - POISON_REDZONE_SIZE);
if (++iter->curindex != iter->pool->pchunk) {
curnode = POINTER_OFFSET(curnode, esize);
}
@ -593,7 +686,14 @@ void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
iter->curindex = 0;
iter->curchunk = iter->curchunk->next;
if (UNLIKELY(iter->curchunk == NULL)) {
return (ret->freeword == FREEWORD) ? NULL : ret;
BLI_asan_unpoison(ret, iter->pool->esize - POISON_REDZONE_SIZE);
void *ret2 = (ret->freeword == FREEWORD) ? NULL : ret;
if (ret->freeword == FREEWORD) {
BLI_asan_poison(ret, iter->pool->esize);
}
return ret2;
}
curnode = CHUNK_DATA(iter->curchunk);
}
@ -609,12 +709,16 @@ void *mempool_iter_threadsafe_step(BLI_mempool_threadsafe_iter *ts_iter)
return NULL;
}
mempool_asan_lock(iter->pool);
const uint esize = iter->pool->esize;
BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
BLI_freenode *ret;
do {
ret = curnode;
BLI_asan_unpoison(ret, esize - POISON_REDZONE_SIZE);
if (++iter->curindex != iter->pool->pchunk) {
curnode = POINTER_OFFSET(curnode, esize);
}
@ -630,24 +734,49 @@ void *mempool_iter_threadsafe_step(BLI_mempool_threadsafe_iter *ts_iter)
/* pass. */
}
if (UNLIKELY(iter->curchunk == NULL)) {
return (ret->freeword == FREEWORD) ? NULL : ret;
if (ret->freeword == FREEWORD) {
BLI_asan_poison(ret, esize);
mempool_asan_unlock(iter->pool);
return NULL;
}
else {
mempool_asan_unlock(iter->pool);
return ret;
}
}
/* End `threadsafe` exception. */
iter->curchunk = iter->curchunk->next;
if (UNLIKELY(iter->curchunk == NULL)) {
return (ret->freeword == FREEWORD) ? NULL : ret;
if (ret->freeword == FREEWORD) {
BLI_asan_poison(ret, iter->pool->esize);
mempool_asan_unlock(iter->pool);
return NULL;
}
else {
mempool_asan_unlock(iter->pool);
return ret;
}
}
curnode = CHUNK_DATA(iter->curchunk);
}
} while (ret->freeword == FREEWORD);
if (ret->freeword == FREEWORD) {
BLI_asan_poison(ret, iter->pool->esize);
}
else {
break;
}
} while (true);
mempool_asan_unlock(iter->pool);
return ret;
}
#endif
void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
void BLI_mempool_clear_ex(BLI_mempool *pool, const int elem_num_reserve)
{
BLI_mempool_chunk *mpchunk;
BLI_mempool_chunk *mpchunk_next;
@ -661,11 +790,11 @@ void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
VALGRIND_CREATE_MEMPOOL(pool, 0, false);
#endif
if (totelem_reserve == -1) {
if (elem_num_reserve == -1) {
maxchunks = pool->maxchunks;
}
else {
maxchunks = mempool_maxchunks((uint)totelem_reserve, pool->pchunk);
maxchunks = mempool_maxchunks((uint)elem_num_reserve, pool->pchunk);
}
/* Free all after 'pool->maxchunks'. */
@ -678,7 +807,7 @@ void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
do {
mpchunk_next = mpchunk->next;
mempool_chunk_free(mpchunk);
mempool_chunk_free(mpchunk, pool);
} while ((mpchunk = mpchunk_next));
}
@ -706,7 +835,7 @@ void BLI_mempool_clear(BLI_mempool *pool)
void BLI_mempool_destroy(BLI_mempool *pool)
{
mempool_chunk_free_all(pool->chunks);
mempool_chunk_free_all(pool->chunks, pool);
#ifdef WITH_MEM_VALGRIND
VALGRIND_DESTROY_MEMPOOL(pool);

View File

@ -60,6 +60,17 @@ set(SRC
${dna_header_string_file}
)
# BLI_mempool.c needs these files for ThreadMutex when
# compiling in address sanitizer mode.
if(WITH_COMPILER_ASAN)
list(APPEND SRC
../../blenlib/intern/gsqueue.c
../../blenlib/intern/listbase.cc
../../blenlib/intern/threads.cc
../../blenlib/intern/time.c
)
endif()
# SRC_DNA_INC is defined in the parent dir
add_cc_flags_custom_test(makesdna)
@ -146,4 +157,12 @@ set(SRC
set(LIB
)
if(WITH_COMPILER_ASAN)
list(APPEND SRC
../../blenlib/intern/gsqueue.c
../../blenlib/intern/threads.cc
../../blenlib/intern/time.c
)
endif()
blender_add_lib(bf_dna_blenlib "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")