reformat
This commit is contained in:
parent
207fd90233
commit
8c226f5f31
@ -7,11 +7,11 @@ BinPackParameters: 'false'
|
||||
BreakBeforeBraces: Allman
|
||||
ColumnLimit: '120'
|
||||
IndentPPDirectives: BeforeHash
|
||||
IndentWidth: '2'
|
||||
IndentWidth: '4'
|
||||
Language: Cpp
|
||||
PointerAlignment: Right
|
||||
SortIncludes: 'false'
|
||||
TabWidth: '2'
|
||||
UseTab: ForContinuationAndIndentation
|
||||
TabWidth: '4'
|
||||
UseTab: AlignWithSpaces
|
||||
|
||||
...
|
||||
|
||||
98
rtcore.h
98
rtcore.h
@ -153,12 +153,14 @@ typedef struct arena_cp
|
||||
byte *cp;
|
||||
} arena_cp;
|
||||
|
||||
static force_inline arena_cp SaveArena(arena a)
|
||||
static force_inline arena_cp
|
||||
SaveArena(arena a)
|
||||
{
|
||||
return (arena_cp){a.begin};
|
||||
}
|
||||
|
||||
static force_inline void RestoreArena(arena *a, arena_cp cp)
|
||||
static force_inline void
|
||||
RestoreArena(arena *a, arena_cp cp)
|
||||
{
|
||||
a->begin = cp.cp;
|
||||
}
|
||||
@ -194,7 +196,11 @@ typedef struct s8
|
||||
#define S8(_s) \
|
||||
(s8) { .data = (u8 *)_s, .length = lengthof(_s), }
|
||||
|
||||
typedef struct { s8 first; s8 second; } split_result;
|
||||
typedef struct
|
||||
{
|
||||
s8 first;
|
||||
s8 second;
|
||||
} split_result;
|
||||
|
||||
/* constructs a string containing the bytes between begin and end (exclusive) */
|
||||
RTC_API s8 S8Span(u8 *begin, u8 *end);
|
||||
@ -230,8 +236,16 @@ RTC_API split_result S8Split2(s8 s, u8 c);
|
||||
/* Creates a clone of string s on arena a */
|
||||
RTC_API s8 S8Clone(s8 s, arena *a);
|
||||
|
||||
typedef struct { i64 i; b32 ok; } s8_parse_i64_result;
|
||||
typedef struct { i32 i; b32 ok; } s8_parse_i32_result;
|
||||
typedef struct
|
||||
{
|
||||
i64 i;
|
||||
b32 ok;
|
||||
} s8_parse_i64_result;
|
||||
typedef struct
|
||||
{
|
||||
i32 i;
|
||||
b32 ok;
|
||||
} s8_parse_i32_result;
|
||||
|
||||
/* Parses a integer from string s */
|
||||
RTC_API s8_parse_i64_result S8ParseI64(s8 s, int base);
|
||||
@ -273,48 +287,40 @@ RTC_API b32 WriteEntireFile(s8 path, byte *data, isize length);
|
||||
#define AtomicLoad(_ptr) _InterlockedOr(_ptr, 0)
|
||||
#define AtomicLoadAcquire(_ptr) _InterlockedOr_HLEAcquire(_ptr, 0)
|
||||
#elif defined(__TINYC__)
|
||||
#define AtomicInc32(_addend) do { \
|
||||
__asm__ volatile( \
|
||||
"lock incl %0" \
|
||||
: "+m" (*_addend) \
|
||||
); \
|
||||
#define AtomicInc32(_addend) \
|
||||
do \
|
||||
{ \
|
||||
__asm__ volatile("lock incl %0" : "+m"(*_addend)); \
|
||||
} while (0)
|
||||
#define AtomicInc64(_addend) do { \
|
||||
__asm__ volatile( \
|
||||
"lock incq %0" \
|
||||
: "+m" (*_addend) \
|
||||
); \
|
||||
#define AtomicInc64(_addend) \
|
||||
do \
|
||||
{ \
|
||||
__asm__ volatile("lock incq %0" : "+m"(*_addend)); \
|
||||
} while (0)
|
||||
#define AtomicAdd32(_addend, _val) do { \
|
||||
__asm__ volatile( \
|
||||
"lock addl %1, %0" \
|
||||
: "+m" (*_addend) \
|
||||
: "r" (_val) \
|
||||
); \
|
||||
#define AtomicAdd32(_addend, _val) \
|
||||
do \
|
||||
{ \
|
||||
__asm__ volatile("lock addl %1, %0" : "+m"(*_addend) : "r"(_val)); \
|
||||
} while (0)
|
||||
#define AtomicAdd64(_addend, _val) do { \
|
||||
__asm__ volatile( \
|
||||
"lock addq %1, %0" \
|
||||
: "+m" (*_addend) \
|
||||
: "r" (_val) \
|
||||
); \
|
||||
#define AtomicAdd64(_addend, _val) \
|
||||
do \
|
||||
{ \
|
||||
__asm__ volatile("lock addq %1, %0" : "+m"(*_addend) : "r"(_val)); \
|
||||
} while (0)
|
||||
/* This uses mov followed by mfence to ensure that
|
||||
* the store becomes globally visible to any subsequent load or store. */
|
||||
#define AtomicStore(_ptr, _val) do { \
|
||||
__asm__ volatile( \
|
||||
"movl %1, %0;" \
|
||||
#define AtomicStore(_ptr, _val) \
|
||||
do \
|
||||
{ \
|
||||
__asm__ volatile("movl %1, %0;" \
|
||||
"mfence;" \
|
||||
: "=m"(*_ptr) \
|
||||
: "r" (_val) \
|
||||
); \
|
||||
: "r"(_val)); \
|
||||
} while (0)
|
||||
#define AtomicStoreRelease(_ptr, _val) do { \
|
||||
__asm__ volatile( \
|
||||
"movl %1, %0" \
|
||||
: "=m" (*_ptr) \
|
||||
: "r" (_val) \
|
||||
); \
|
||||
#define AtomicStoreRelease(_ptr, _val) \
|
||||
do \
|
||||
{ \
|
||||
__asm__ volatile("movl %1, %0" : "=m"(*_ptr) : "r"(_val)); \
|
||||
} while (0)
|
||||
/* NOTE(Kevin): This should always compile to a mov, which is what we want. */
|
||||
#define AtomicLoad(_ptr) (*(_ptr))
|
||||
@ -334,25 +340,29 @@ RTC_API b32 WriteEntireFile(s8 path, byte *data, isize length);
|
||||
#define PopCount32(_x) __builtin_popcount(_x)
|
||||
#define PopCount64(_x) __builtin_popcountl(_x)
|
||||
#elif defined(_MSC_VER)
|
||||
static force_inline unsigned int CTZ32(u32 x)
|
||||
static force_inline unsigned int
|
||||
CTZ32(u32 x)
|
||||
{
|
||||
unsigned int index;
|
||||
_BitScanReverse(&index, x);
|
||||
return index;
|
||||
}
|
||||
static force_inline unsigned int CTZ64(u64 x)
|
||||
static force_inline unsigned int
|
||||
CTZ64(u64 x)
|
||||
{
|
||||
unsigned int index;
|
||||
_BitScanReverse64(&index, x);
|
||||
return index;
|
||||
}
|
||||
static force_inline unsigned int CLZ32(u32 x)
|
||||
static force_inline unsigned int
|
||||
CLZ32(u32 x)
|
||||
{
|
||||
unsigned int index;
|
||||
_BitScanForward(&index, x);
|
||||
return index;
|
||||
}
|
||||
static force_inline unsigned int CLZ64(u64 x)
|
||||
static force_inline unsigned int
|
||||
CLZ64(u64 x)
|
||||
{
|
||||
unsigned int index;
|
||||
_BitScanForward64(&index, x);
|
||||
@ -679,8 +689,7 @@ JoinThread(thread *t)
|
||||
RTC_API thread *
|
||||
StartThread(thread_fn *fn, void *param)
|
||||
{
|
||||
HANDLE h = CreateThread(
|
||||
NULL,
|
||||
HANDLE h = CreateThread(NULL,
|
||||
0, /* Use default stack size */
|
||||
(LPTHREAD_START_ROUTINE)fn,
|
||||
param,
|
||||
@ -705,4 +714,3 @@ JoinThread(thread *t)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user