diff --git a/rtcore.h b/rtcore.h index 5ee11cb..607f9ef 100644 --- a/rtcore.h +++ b/rtcore.h @@ -297,24 +297,40 @@ RTC_API b32 WriteEntireFile(s8 path, byte *data, isize length); #if !defined(RTC_NO_ATOMICS) #if defined(__GNUC__) || defined(__clang__) /* Atomic add */ - #define AtomicAdd32(_addend, _val) __atomic_add_fetch((i32 *)_addend, _val, __ATOMIC_SEQ_CST) - #define AtomicAdd64(_addend, _val) __atomic_add_fetch((i64 *)_addend, _val, __ATOMIC_SEQ_CST) - #define AtomicInc32(_addend) AtomicAdd32(_addend, 1) - #define AtomicInc64(_addend) AtomicAdd64(_addend, 1) + #define AtomicFetchAdd32(_addend, _val) __atomic_fetch_add((i32 *)_addend, _val, __ATOMIC_SEQ_CST) + #define AtomicFetchAdd64(_addend, _val) __atomic_fetch_add((i64 *)_addend, _val, __ATOMIC_SEQ_CST) + #define AtomicInc32(_addend) __atomic_add_fetch((i32 *)_addend, 1, __ATOMIC_SEQ_CST) + #define AtomicInc64(_addend) __atomic_add_fetch((i64 *)_addend, 1, __ATOMIC_SEQ_CST) #define AtomicStore(_ptr, _val) __atomic_store_n(_ptr, _val, __ATOMIC_SEQ_CST) #define AtomicStoreRelease(_ptr, _val) __atomic_store_n(_ptr, _val, __ATOMIC_RELEASE) #define AtomicLoad(_ptr) __atomic_load_n(_ptr, __ATOMIC_SEQ_CST) #define AtomicLoadAcquire(_ptr) __atomic_load_n(_ptr, __ATOMIC_ACQUIRE) +static force_inline b32 +AtomicCompareExchange32(volatile u32 *ptr, u32 expected, u32 new) +{ + u32 e = expected; + return (b32)__atomic_compare_exchange_n(ptr, &e, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} +static force_inline b32 +AtomicCompareExchange64(volatile u64 *ptr, u64 expected, u64 new) +{ + return (b32)__atomic_compare_exchange_n(ptr, &expected, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} #elif defined(_MSC_VER) - #define AtomicAdd32(_addend, _val) _InterlockedExchangeAdd((volatile long *)_addend, _val) - #define AtomicAdd64(_addend, _val) _InterlockedExchangeAdd64((volatile __int64 *)_addend, _val) - #define AtomicInc32(_addend) AtomicAdd32(_addend, 1) - #define AtomicInc64(_addend) AtomicAdd64(_addend, 1) + #define AtomicFetchAdd32(_addend, _val) _InterlockedExchangeAdd((volatile long *)_addend, _val) + #define AtomicFetchAdd64(_addend, _val) _InterlockedExchangeAdd64((volatile __int64 *)_addend, _val) + #define AtomicInc32(_addend) _InterlockedIncrement((volatile long *)_addend) + #define AtomicInc64(_addend) _InterlockedIncrement64((volatile long *)_addend) #define AtomicStore(_ptr, _val) _InterlockedExchange((volatile long *)_ptr, _val) #define AtomicStoreRelease(_ptr, _val) _InterlockedExchange_HLERelease(_ptr, _val) #define AtomicLoad(_ptr) _InterlockedOr(_ptr, 0) #define AtomicLoadAcquire(_ptr) _InterlockedOr_HLEAcquire(_ptr, 0) + #define AtomicCompareExchange32(_ptr, _expected, _new) \ + _InterlockedCompareExchange((volatile long *)_ptr, _new, _expected) + #define AtomicCompareExchange64(_ptr, _expected, _new) \ + _InterlockedCompareExchange((volatile __int64 *)_ptr, _new, _expected) #elif defined(__TINYC__) + /* FIXME: Kevin: These need to "return" the new value */ #define AtomicInc32(_addend) \ do \ { \