From 2540741dee789f752687198c6f272a995d45073e Mon Sep 17 00:00:00 2001 From: Mai Lavelle Date: Wed, 23 Aug 2017 00:40:04 -0400 Subject: [PATCH] Fix implementation of atomic update max and move to a central location While unlikely to have had any serious effects because of limited use, the previous implementation was not actually atomic due to a data race and incorrectly coded CAS loop. We also had duplicates of this code in a few places, it's now been moved to a single location with all other atomic operations. --- intern/atomic/atomic_ops.h | 1 + intern/atomic/intern/atomic_ops_ext.h | 11 +++++++++++ intern/cycles/util/util_atomic.h | 10 ---------- intern/cycles/util/util_stats.h | 2 +- intern/guardedalloc/intern/mallocn_lockfree_impl.c | 7 +------ 5 files changed, 14 insertions(+), 17 deletions(-) diff --git a/intern/atomic/atomic_ops.h b/intern/atomic/atomic_ops.h index 1e9528f9ed9..72813c39ac2 100644 --- a/intern/atomic/atomic_ops.h +++ b/intern/atomic/atomic_ops.h @@ -100,6 +100,7 @@ ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x); ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x); ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x); ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new); +ATOMIC_INLINE size_t atomic_fetch_and_update_max_z(size_t *p, size_t x); /* Uses CAS loop, see warning below. */ ATOMIC_INLINE unsigned int atomic_add_and_fetch_u(unsigned int *p, unsigned int x); ATOMIC_INLINE unsigned int atomic_sub_and_fetch_u(unsigned int *p, unsigned int x); diff --git a/intern/atomic/intern/atomic_ops_ext.h b/intern/atomic/intern/atomic_ops_ext.h index b72c94563fc..8d5f2e5dad7 100644 --- a/intern/atomic/intern/atomic_ops_ext.h +++ b/intern/atomic/intern/atomic_ops_ext.h @@ -111,6 +111,17 @@ ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new) #endif } +ATOMIC_INLINE size_t atomic_fetch_and_update_max_z(size_t *p, size_t x) +{ + size_t prev_value; + while((prev_value = *p) < x) { + if(atomic_cas_z(p, prev_value, x) == prev_value) { + break; + } + } + return prev_value; +} + /******************************************************************************/ /* unsigned operations. */ ATOMIC_INLINE unsigned int atomic_add_and_fetch_u(unsigned int *p, unsigned int x) diff --git a/intern/cycles/util/util_atomic.h b/intern/cycles/util/util_atomic.h index 643af87a65f..f3c7ae546a0 100644 --- a/intern/cycles/util/util_atomic.h +++ b/intern/cycles/util/util_atomic.h @@ -22,16 +22,6 @@ /* Using atomic ops header from Blender. */ #include "atomic_ops.h" -ATOMIC_INLINE void atomic_update_max_z(size_t *maximum_value, size_t value) -{ - size_t prev_value = *maximum_value; - while(prev_value < value) { - if(atomic_cas_z(maximum_value, prev_value, value) != prev_value) { - break; - } - } -} - #define atomic_add_and_fetch_float(p, x) atomic_add_and_fetch_fl((p), (x)) #define atomic_fetch_and_inc_uint32(p) atomic_fetch_and_add_uint32((p), 1) diff --git a/intern/cycles/util/util_stats.h b/intern/cycles/util/util_stats.h index baba549753d..7667f58eb7d 100644 --- a/intern/cycles/util/util_stats.h +++ b/intern/cycles/util/util_stats.h @@ -30,7 +30,7 @@ public: void mem_alloc(size_t size) { atomic_add_and_fetch_z(&mem_used, size); - atomic_update_max_z(&mem_peak, mem_used); + atomic_fetch_and_update_max_z(&mem_peak, mem_used); } void mem_free(size_t size) { diff --git a/intern/guardedalloc/intern/mallocn_lockfree_impl.c b/intern/guardedalloc/intern/mallocn_lockfree_impl.c index b4838cdca18..66573b91ace 100644 --- a/intern/guardedalloc/intern/mallocn_lockfree_impl.c +++ b/intern/guardedalloc/intern/mallocn_lockfree_impl.c @@ -76,12 +76,7 @@ enum { MEM_INLINE void update_maximum(size_t *maximum_value, size_t value) { #ifdef USE_ATOMIC_MAX - size_t prev_value = *maximum_value; - while (prev_value < value) { - if (atomic_cas_z(maximum_value, prev_value, value) != prev_value) { - break; - } - } + atomic_fetch_and_update_max_z(maximum_value, value); #else *maximum_value = value > *maximum_value ? value : *maximum_value; #endif