Atomics: Add some extra utility functions

Also fixed semantic of fetch-and-add in assembler implementation,
it seemed to not match the naming.
This commit is contained in:
Sergey Sharybin 2016-11-15 13:41:08 +01:00
parent 4ee08e9533
commit a284d04093
4 changed files with 82 additions and 2 deletions

View File

@ -79,6 +79,8 @@
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8) #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x); ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x);
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x); ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x);
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x);
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x);
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new); ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new);
#endif #endif
@ -95,10 +97,14 @@ ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b);
ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x); ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x); ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new); ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new);
ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x); ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x);
ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x); ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x);
ATOMIC_INLINE unsigned atomic_fetch_and_add_u(unsigned *p, unsigned x);
ATOMIC_INLINE unsigned atomic_fetch_and_sub_u(unsigned *p, unsigned x);
ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new); ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new);
/* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation, /* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation,

View File

@ -78,6 +78,28 @@ ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
#endif #endif
} }
ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x)
{
assert(sizeof(size_t) == LG_SIZEOF_PTR);
#if (LG_SIZEOF_PTR == 8)
return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 4)
return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x)
{
assert(sizeof(size_t) == LG_SIZEOF_PTR);
#if (LG_SIZEOF_PTR == 8)
return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
#elif (LG_SIZEOF_PTR == 4)
return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
#endif
}
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new) ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
{ {
assert(sizeof(size_t) == LG_SIZEOF_PTR); assert(sizeof(size_t) == LG_SIZEOF_PTR);
@ -113,6 +135,28 @@ ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x)
#endif #endif
} }
ATOMIC_INLINE unsigned atomic_fetch_and_add_u(unsigned *p, unsigned x)
{
assert(sizeof(unsigned) == LG_SIZEOF_INT);
#if (LG_SIZEOF_INT == 8)
return (unsigned)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 4)
return (unsigned)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
ATOMIC_INLINE unsigned atomic_fetch_and_sub_u(unsigned *p, unsigned x)
{
assert(sizeof(unsigned) == LG_SIZEOF_INT);
#if (LG_SIZEOF_INT == 8)
return (unsigned)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
#elif (LG_SIZEOF_INT == 4)
return (unsigned)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
#endif
}
ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new) ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new)
{ {
assert(sizeof(unsigned) == LG_SIZEOF_INT); assert(sizeof(unsigned) == LG_SIZEOF_INT);

View File

@ -57,6 +57,16 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
{ {
return InterlockedCompareExchange64((int64_t *)v, _new, old); return InterlockedCompareExchange64((int64_t *)v, _new, old);
} }
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{
return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
}
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
{
return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
}
#endif #endif
/******************************************************************************/ /******************************************************************************/

View File

@ -68,12 +68,22 @@ ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
return __sync_sub_and_fetch(p, x); return __sync_sub_and_fetch(p, x);
} }
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{
return __sync_fetch_and_add(p, x);
}
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
{
return __sync_fetch_and_sub(p, x);
}
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new) ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
{ {
return __sync_val_compare_and_swap(v, old, _new); return __sync_val_compare_and_swap(v, old, _new);
} }
# elif (defined(__amd64__) || defined(__x86_64__)) # elif (defined(__amd64__) || defined(__x86_64__))
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x) ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{ {
asm volatile ( asm volatile (
"lock; xaddq %0, %1;" "lock; xaddq %0, %1;"
@ -83,7 +93,7 @@ ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
return x; return x;
} }
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x) ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
{ {
x = (uint64_t)(-(int64_t)x); x = (uint64_t)(-(int64_t)x);
asm volatile ( asm volatile (
@ -94,6 +104,16 @@ ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
return x; return x;
} }
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return atomic_fetch_and_add_uint64(p, x) + x;
}
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return atomic_fetch_and_sub_uint64(p, x) - x;
}
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new) ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
{ {
uint64_t ret; uint64_t ret;