I decided to make queues explicit, to simplify handling queue ownership transfers in the renderer code. The framegraph + pass code has explicit knowledge about resource ownership, so it makes sense to handle it there. - Manage pools - Allocate command buffers - Submit command buffers
30 lines
1.2 KiB
C
30 lines
1.2 KiB
C
#ifndef RT_ATOMICS_H
|
|
#define RT_ATOMICS_H
|
|
|
|
/* Macros & helpers for atomic instructions */
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
/* Increment and decrement return the new value */
|
|
|
|
#define rtAtomic32Inc(pa) _InterlockedIncrement((volatile LONG *)(pa))
|
|
#define rtAtomic64Inc(pa) _InterlockedIncrement64((volatile LONG64 *)(pa))
|
|
#define rtAtomic32Dec(pa) _InterlockedDecrement((volatile LONG *)(pa))
|
|
#define rtAtomic64Dec(pa) _InterlockedDecrement64((volatile LONG64 *)(pa))
|
|
|
|
#define rtAtomic32FetchAdd(pa, value) _InterlockedExchangeAdd((volatile LONG *)(pa), (LONG)(value))
|
|
#define rtAtomic64FetchAdd(pa, value) _InterlockedExchangeAdd64((volatile LONG64 *)(pa), (LONG)(value))
|
|
|
|
#elif defined(__GNUC__) || defined(__clang__)
|
|
|
|
#define rtAtomic32Inc(pa) __atomic_add_fetch((pa), 1, __ATOMIC_SEQ_CST)
|
|
#define rtAtomic64Inc(pa) __atomic_add_fetch((pa), 1LL, __ATOMIC_SEQ_CST)
|
|
#define rtAtomic32Dec(pa) __atomic_sub_fetch((pa), 1, __ATOMIC_SEQ_CST)
|
|
#define rtAtomic64Dec(pa) __atomic_sub_fetch((pa), 1LL, __ATOMIC_SEQ_CST)
|
|
|
|
#define rtAtomic32FetchAdd(pa, value) __atomic_fetch_add((pa), (value), __ATOMIC_SEQ_CST)
|
|
#define rtAtomic64FetchAdd(pa, value) _-atomic_fetch_add((pa), (value), __ATOMIC_SEQ_CST)
|
|
#endif
|
|
|
|
#endif
|