atomic operations for x64 & tcc

This commit is contained in:
Kevin Trogant 2026-01-04 00:40:08 +01:00
parent 3911b74a15
commit 207fd90233

View File

@ -257,6 +257,8 @@ RTC_API b32 WriteEntireFile(s8 path, byte *data, isize length);
/* Atomic add */
#define AtomicAdd32(_addend, _val) __atomic_add_fetch((i32 *)_addend, _val, __ATOMIC_SEQ_CST)
#define AtomicAdd64(_addend, _val) __atomic_add_fetch((i64 *)_addend, _val, __ATOMIC_SEQ_CST)
#define AtomicInc32(_addend) AtomicAdd32(_addend, 1)
#define AtomicInc64(_addend) AtomicAdd64(_addend, 1)
#define AtomicStore(_ptr, _val) __atomic_store_n(_ptr, _val, __ATOMIC_SEQ_CST)
#define AtomicStoreRelease(_ptr, _val) __atomic_store_n(_ptr, _val, __ATOMIC_RELEASE)
#define AtomicLoad(_ptr) __atomic_load_n(_ptr, __ATOMIC_SEQ_CST)
@ -264,10 +266,59 @@ RTC_API b32 WriteEntireFile(s8 path, byte *data, isize length);
#elif defined(_MSC_VER)
#define AtomicAdd32(_addend, _val) _InterlockedExchangeAdd((volatile long *)_addend, _val)
#define AtomicAdd64(_addend, _val) _InterlockedExchangeAdd64((volatile __int64 *)_addend, _val)
#define AtomicInc32(_addend) AtomicAdd32(_addend, 1)
#define AtomicInc64(_addend) AtomicAdd64(_addend, 1)
#define AtomicStore(_ptr, _val) _InterlockedExchange((volatile long *)_ptr, _val)
#define AtomicStoreRelease(_ptr, _val) _InterlockedExchange_HLERelease(_ptr, _val)
#define AtomicLoad(_ptr) _InterlockedOr(_ptr, 0)
#define AtomicLoadAcquire(_ptr) _InterlockedOr_HLEAcquire(_ptr, 0)
#elif defined(__TINYC__)
#define AtomicInc32(_addend) do { \
__asm__ volatile( \
"lock incl %0" \
: "+m" (*_addend) \
); \
} while (0)
#define AtomicInc64(_addend) do { \
__asm__ volatile( \
"lock incq %0" \
: "+m" (*_addend) \
); \
} while (0)
#define AtomicAdd32(_addend, _val) do { \
__asm__ volatile( \
"lock addl %1, %0" \
: "+m" (*_addend) \
: "r" (_val) \
); \
} while (0)
#define AtomicAdd64(_addend, _val) do { \
__asm__ volatile( \
"lock addq %1, %0" \
: "+m" (*_addend) \
: "r" (_val) \
); \
} while (0)
/* This uses mov followed by mfence to ensure that
* the store becomes globally visible to any subsequent load or store. */
#define AtomicStore(_ptr, _val) do { \
__asm__ volatile( \
"movl %1, %0;" \
"mfence;" \
: "=m" (*_ptr) \
: "r" (_val) \
); \
} while(0)
#define AtomicStoreRelease(_ptr, _val) do { \
__asm__ volatile( \
"movl %1, %0" \
: "=m" (*_ptr) \
: "r" (_val) \
); \
} while (0)
/* NOTE(Kevin): This should always compile to a mov, which is what we want. */
#define AtomicLoad(_ptr) (*(_ptr))
#define AtomicLoadAcquire(_ptr) (*(_ptr))
#else
#define RTC_NO_ATOMICS
#endif