Metal: Resolve Race Condition in Memory Manager

Fix race condition if several competing threads are inserting Metal
buffers into the MTLSafeFreeList simultaneously while a new list
chunk is being created.

Also raise the limit for an MTLSafeFreeListChunk size to optimize
for interactivity when releasing lots of memory simultaneously.

Authored by Apple: Michael Parkin-White

Pull Request: https://projects.blender.org/blender/blender/pulls/105254
This commit is contained in:
Jason Fielder 2023-03-16 08:25:15 +01:00 committed by Jeroen Bakker
parent d3409f2159
commit 7bdd82eca0
2 changed files with 23 additions and 19 deletions

View File

@ -287,17 +287,17 @@ class MTLSafeFreeList {
std::atomic<bool> in_free_queue_;
std::atomic<bool> referenced_by_workload_;
std::recursive_mutex lock_;
/* Linked list of next MTLSafeFreeList chunk if current chunk is full. */
std::atomic<int> has_next_pool_;
std::atomic<MTLSafeFreeList *> next_;
/* Lockless list. MAX_NUM_BUFFERS_ within a chunk based on considerations
* for performance and memory.
* for performance and memory. Higher chunk counts are preferable for efficiently
* performing block operations such as copying several objects simultaneously.
*
* MIN_BUFFER_FLUSH_COUNT refers to the minimum count of buffers in the MTLSafeFreeList
* before buffers are returned to global memory pool. This is set at a point to reduce
* overhead of small pool flushes, while ensuring floating memory overhead is not excessive. */
static const int MAX_NUM_BUFFERS_ = 1024;
static const int MAX_NUM_BUFFERS_ = 8192;
static const int MIN_BUFFER_FLUSH_COUNT = 120;
std::atomic<int> current_list_index_;
gpu::MTLBuffer *safe_free_pool_[MAX_NUM_BUFFERS_];
@ -305,8 +305,8 @@ class MTLSafeFreeList {
public:
MTLSafeFreeList();
/* Add buffer to Safe Free List, can be called from secondary threads.
* Performs a lockless list insert. */
/* Can be used from multiple threads. Performs insertion into Safe Free List with the least
* amount of threading synchronization. */
void insert_buffer(gpu::MTLBuffer *buffer);
/* Whether we need to start a new safe free list, or can carry on using the existing one. */
@ -321,10 +321,11 @@ class MTLSafeFreeList {
void flag_in_queue()
{
in_free_queue_ = true;
if (has_next_pool_) {
if (current_list_index_ >= MTLSafeFreeList::MAX_NUM_BUFFERS_) {
MTLSafeFreeList *next_pool = next_.load();
BLI_assert(next_pool != nullptr);
next_pool->flag_in_queue();
if (next_pool) {
next_pool->flag_in_queue();
}
}
}
};

View File

@ -257,10 +257,7 @@ void MTLBufferPool::update_memory_pools()
}
/* Fetch next MTLSafeFreeList chunk, if any. */
MTLSafeFreeList *next_list = nullptr;
if (current_pool->has_next_pool_ > 0) {
next_list = current_pool->next_.load();
}
MTLSafeFreeList *next_list = current_pool->next_.load();
/* Delete current MTLSafeFreeList */
current_pool->lock_.unlock();
@ -396,7 +393,6 @@ MTLSafeFreeList::MTLSafeFreeList()
in_free_queue_ = false;
current_list_index_ = 0;
next_ = nullptr;
has_next_pool_ = 0;
}
void MTLSafeFreeList::insert_buffer(gpu::MTLBuffer *buffer)
@ -410,12 +406,19 @@ void MTLSafeFreeList::insert_buffer(gpu::MTLBuffer *buffer)
* insert the buffer into the next available chunk. */
if (insert_index >= MTLSafeFreeList::MAX_NUM_BUFFERS_) {
/* Check if first caller to generate next pool. */
int has_next = has_next_pool_++;
if (has_next == 0) {
next_ = new MTLSafeFreeList();
}
/* Check if first caller to generate next pool in chain.
* Otherwise, ensure pool exists or wait for first caller to create next pool. */
MTLSafeFreeList *next_list = next_.load();
if (!next_list) {
std::unique_lock lock(lock_);
next_list = next_.load();
if (!next_list) {
next_list = new MTLSafeFreeList();
next_.store(next_list);
}
}
BLI_assert(next_list);
next_list->insert_buffer(buffer);