Test resource manager and renderer integration

This commit is contained in:
Kevin Trogant 2024-02-05 01:23:31 +01:00
parent c66d5ce337
commit fc08ab08d4
19 changed files with 259 additions and 879 deletions

View File

@ -2,7 +2,7 @@ optimization speed;
vertex { vertex {
vk BEGIN vk BEGIN
#include "test.hlsl" #include "common.hlsl"
struct VSInput struct VSInput
{ {

View File

@ -136,6 +136,7 @@ runtime_lib = library('rt',
'src/runtime/file_tab.c', 'src/runtime/file_tab.c',
'src/runtime/fsutils.c', 'src/runtime/fsutils.c',
'src/runtime/gfx_main.c', 'src/runtime/gfx_main.c',
'src/runtime/gfx_object_renderer.c',
'src/runtime/hashing.c', 'src/runtime/hashing.c',
'src/runtime/init.c', 'src/runtime/init.c',
'src/runtime/jobs.c', 'src/runtime/jobs.c',
@ -176,11 +177,12 @@ if vk_dep.found()
vk_renderer_lib = library('rtvk', vk_renderer_lib = library('rtvk',
# Project Sources # Project Sources
'src/renderer/vk/gpu.h', 'src/renderer/vk/gpu.h',
'src/renderer/vk/pipelines.h',
'src/renderer/vk/swapchain.h', 'src/renderer/vk/swapchain.h',
'src/renderer/vk/init.c', 'src/renderer/vk/init.c',
'src/renderer/vk/swapchain.c',
'src/renderer/vk/pipelines.c', 'src/renderer/vk/pipelines.c',
'src/renderer/vk/swapchain.c',
# Contrib Sources # Contrib Sources
'contrib/volk/volk.h', 'contrib/volk/volk.h',

View File

@ -38,6 +38,8 @@ typedef struct {
VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_props; VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_props;
VkPhysicalDeviceProperties phys_device_props; VkPhysicalDeviceProperties phys_device_props;
VkPhysicalDeviceDescriptorIndexingFeatures descriptor_indexing_features;
VkPhysicalDeviceFeatures phys_device_features;
} rt_vk_gpu; } rt_vk_gpu;
#ifndef RT_VK_DONT_DEFINE_GPU_GLOBAL #ifndef RT_VK_DONT_DEFINE_GPU_GLOBAL

View File

@ -242,6 +242,7 @@ static rt_queue_indices RetrieveQueueIndices(VkPhysicalDevice phys_dev, VkSurfac
static bool CheckDeviceExtensionSupported(VkPhysicalDevice phys_dev) { static bool CheckDeviceExtensionSupported(VkPhysicalDevice phys_dev) {
const char *required_extensions[] = { const char *required_extensions[] = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME, VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME,
}; };
uint32_t extension_count; uint32_t extension_count;
@ -266,6 +267,11 @@ static bool CheckDeviceExtensionSupported(VkPhysicalDevice phys_dev) {
} }
if (!found) { if (!found) {
supported = false; supported = false;
VkPhysicalDeviceProperties props;
vkGetPhysicalDeviceProperties(phys_dev, &props);
rtLog("Device %s does not support the required extension %s",
props.deviceName,
required_extensions[i]);
goto out; goto out;
} }
} }
@ -294,6 +300,15 @@ static rt_result ChoosePhysicalDevice(void) {
uint32_t highscore = 0; uint32_t highscore = 0;
uint32_t best_index = phys_device_count; uint32_t best_index = phys_device_count;
for (uint32_t i = 0; i < phys_device_count; ++i) { for (uint32_t i = 0; i < phys_device_count; ++i) {
VkPhysicalDeviceDescriptorIndexingFeatures descriptor_indexing_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES,
};
VkPhysicalDeviceFeatures2 features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = &descriptor_indexing_features,
};
vkGetPhysicalDeviceFeatures2(phys_devices[i], &features);
VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_props = { VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_props = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES,
.pNext = NULL, .pNext = NULL,
@ -312,6 +327,11 @@ static rt_result ChoosePhysicalDevice(void) {
indices.graphics == UINT32_MAX) indices.graphics == UINT32_MAX)
continue; continue;
/* Check for bindless support */
if (!descriptor_indexing_features.runtimeDescriptorArray ||
!descriptor_indexing_features.descriptorBindingPartiallyBound)
continue;
uint32_t score = 0; uint32_t score = 0;
if (props.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) if (props.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
@ -338,7 +358,8 @@ static rt_result ChoosePhysicalDevice(void) {
} }
} }
if (best_index < phys_device_count) { if (best_index < phys_device_count) {
g_gpu.phys_device = phys_devices[0]; g_gpu.phys_device = phys_devices[best_index];
VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_props = { VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_props = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES,
.pNext = NULL, .pNext = NULL,
@ -347,9 +368,20 @@ static rt_result ChoosePhysicalDevice(void) {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
.pNext = &descriptor_indexing_props, .pNext = &descriptor_indexing_props,
}; };
vkGetPhysicalDeviceProperties2(phys_devices[0], &props); VkPhysicalDeviceDescriptorIndexingFeatures descriptor_indexing_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES,
};
VkPhysicalDeviceFeatures2 features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = &descriptor_indexing_features,
};
vkGetPhysicalDeviceFeatures2(phys_devices[best_index], &features);
vkGetPhysicalDeviceProperties2(phys_devices[best_index], &props);
g_gpu.phys_device_props = props.properties; g_gpu.phys_device_props = props.properties;
g_gpu.descriptor_indexing_props = descriptor_indexing_props; g_gpu.descriptor_indexing_props = descriptor_indexing_props;
g_gpu.phys_device_features = features.features;
g_gpu.descriptor_indexing_features = descriptor_indexing_features;
} }
free(phys_devices); free(phys_devices);
@ -400,8 +432,20 @@ static rt_result CreateDevice(void) {
queue_info[distinct_queue_count].pQueuePriorities = &priority; queue_info[distinct_queue_count].pQueuePriorities = &priority;
} }
VkPhysicalDeviceDescriptorIndexingFeatures indexing_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES,
};
VkPhysicalDeviceFeatures2 features = {.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = &indexing_features};
vkGetPhysicalDeviceFeatures2(g_gpu.phys_device, &features);
RT_ASSERT(indexing_features.runtimeDescriptorArray &&
indexing_features.descriptorBindingPartiallyBound,
"We require a device that supports bindless vulkan.");
VkDeviceCreateInfo device_info = { VkDeviceCreateInfo device_info = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.pNext = &features,
.enabledExtensionCount = RT_ARRAY_COUNT(extensions), .enabledExtensionCount = RT_ARRAY_COUNT(extensions),
.ppEnabledExtensionNames = extensions, .ppEnabledExtensionNames = extensions,
.pQueueCreateInfos = queue_info, .pQueueCreateInfos = queue_info,

View File

@ -1,13 +1,15 @@
#include "runtime/renderer_api.h"
#include "runtime/config.h" #include "runtime/config.h"
#include "runtime/handles.h" #include "runtime/handles.h"
#include "runtime/mem_arena.h"
#include "runtime/renderer_api.h"
#include "runtime/resources.h"
#include "runtime/threading.h" #include "runtime/threading.h"
#include "gpu.h" #include "gpu.h"
#include "pipelines.h" #include "pipelines.h"
#include <volk/volk.h>
#include <stdlib.h> #include <stdlib.h>
#include <volk/volk.h>
RT_CVAR_I(r_VkMaxPipelineCount, "Maximum number of pipeline objects. Default: 1024", 1024); RT_CVAR_I(r_VkMaxPipelineCount, "Maximum number of pipeline objects. Default: 1024", 1024);
@ -21,6 +23,71 @@ static rt_pipeline_slot *_pipelines;
static rt_pipeline_slot *_first_free; static rt_pipeline_slot *_first_free;
static rt_rwlock _lock; static rt_rwlock _lock;
static void DestroyPipeline(rt_pipeline_slot *slot) {
if (slot->pipeline.pipeline) {
vkDestroyPipeline(g_gpu.device, slot->pipeline.pipeline, g_gpu.alloc_cb);
}
slot->next_free = _first_free;
_first_free = slot;
}
static VkShaderModule CreateShaderModuleFromResource(rt_resource_id rid) {
if (rid == RT_INVALID_RESOURCE_ID)
return VK_NULL_HANDLE;
rt_resource *resource = NULL;
size_t size = rtGetResourceSize(rid);
if (size == 0)
return VK_NULL_HANDLE;
rt_temp_arena temp = rtGetTemporaryArena(NULL, 0);
if (!temp.arena)
return VK_NULL_HANDLE;
VkShaderModule module = VK_NULL_HANDLE;
resource = rtArenaPush(temp.arena, size);
if (!resource) {
rtLog("VK", "Failed to allocate temporary memory for retrieving a shader resource");
goto out;
}
if (rtGetResource(rid, resource) != RT_SUCCESS) {
goto out;
}
if (resource->type != RT_RESOURCE_SHADER) {
rtLog("VK", "Attempted to create a shader module from a non-shader resource %llx", rid);
goto out;
}
rt_shader_info *info = resource->data;
if (!info) {
rtLog("VK", "Shader resource %llx has no attached shader_info", rid);
goto out;
}
VkShaderModuleCreateInfo module_info = {.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
.pCode = rtResolveRelptr(&info->bytecode),
.codeSize = info->bytecode_length};
if (vkCreateShaderModule(g_gpu.device, &module_info, g_gpu.alloc_cb, &module) != VK_SUCCESS) {
rtLog("VK", "Failed to create the shader module from resource %llx", rid);
goto out;
}
out:
rtReturnTemporaryArena(temp);
return module;
}
static bool CreateComputePipeline(VkShaderModule compute_shader,
const rt_pipeline_info *info,
rt_pipeline_slot *slot) {
return false;
}
static bool CreateGraphicsPipeline(VkShaderModule vertex_shader,
VkShaderModule fragment_shader,
const rt_pipeline_info *info,
rt_pipeline_slot *slot) {
return false;
}
rt_result InitPipelineManagement(void) { rt_result InitPipelineManagement(void) {
rt_create_rwlock_result lock_res = rtCreateRWLock(); rt_create_rwlock_result lock_res = rtCreateRWLock();
if (!lock_res.ok) if (!lock_res.ok)
@ -40,14 +107,6 @@ rt_result InitPipelineManagement(void) {
return RT_SUCCESS; return RT_SUCCESS;
} }
static void DestroyPipeline(rt_pipeline_slot *slot) {
if (slot->pipeline.pipeline) {
vkDestroyPipeline(g_gpu.device, slot->pipeline.pipeline, g_gpu.alloc_cb);
}
slot->next_free = _first_free;
_first_free = slot;
}
void ShutdownPipelineManagement(void) { void ShutdownPipelineManagement(void) {
for (int i = 1; i < r_VkMaxPipelineCount.i; ++i) { for (int i = 1; i < r_VkMaxPipelineCount.i; ++i) {
DestroyPipeline(&_pipelines[i]); DestroyPipeline(&_pipelines[i]);
@ -68,12 +127,34 @@ rt_pipeline_handle RT_RENDERER_API_FN(CompilePipeline)(const rt_pipeline_info *i
rt_pipeline_slot *slot = _first_free; rt_pipeline_slot *slot = _first_free;
_first_free = slot->next_free; _first_free = slot->next_free;
slot->version = (slot->version + 1) & RT_GFX_HANDLE_MAX_VERSION; slot->version = (slot->version + 1) & RT_GFX_HANDLE_MAX_VERSION;
rtUnlockWrite(&_lock);
/* No other thread that calls compile gets the same slot. /* No other thread that calls compile gets the same slot.
* Another thread accessing the slot via GetPipeline would get a version mismatch. * Another thread accessing the slot via GetPipeline would get a version mismatch.
* The same holds for DestroyPipeline * The same holds for DestroyPipeline
*/ */
rtUnlockWrite(&_lock);
VkShaderModule vertex_shader = CreateShaderModuleFromResource(info->vertex_shader);
VkShaderModule fragment_shader = CreateShaderModuleFromResource(info->fragment_shader);
VkShaderModule compute_shader = CreateShaderModuleFromResource(info->compute_shader);
RT_UNUSED(vertex_shader);
RT_UNUSED(fragment_shader);
RT_UNUSED(compute_shader);
bool create_success = false;
if (compute_shader) {
create_success = CreateComputePipeline(compute_shader, info, slot);
} else if (vertex_shader && fragment_shader) {
create_success = CreateGraphicsPipeline(vertex_shader, fragment_shader, info, slot);
} else {
rtLog("VK", "Invalid combination of shaders in pipeline info.");
}
if (create_success) {
handle.version = slot->version;
handle.index = (uint32_t)(slot - _pipelines);
}
return handle; return handle;
} }

View File

@ -85,7 +85,7 @@ rtWin32Entry(HINSTANCE hInstance, HINSTANCE hPrevInstance, PWSTR pCmdLine, int n
} }
rt_renderer_init_info renderer_info = {.hWnd = wnd, .hInstance = hInstance}; rt_renderer_init_info renderer_info = {.hWnd = wnd, .hInstance = hInstance};
if (!rtInitGFX(&renderer_info)) { if (rtInitGFX(&renderer_info) != RT_SUCCESS) {
rtReportError("GFX", "Init failed."); rtReportError("GFX", "Init failed.");
return 1; return 1;
} }

View File

@ -1,379 +0,0 @@
#include "assets.h"
#include "asset_dependencies.h"
#include "aio.h"
#include "buffer_manager.h"
#include "config.h"
#include "threading.h"
#include "uidtab.h"
#include <assert.h>
#include <stdlib.h>
RT_CVAR_I(rt_AssetCacheSize, "Number of asset cache entries. Default: 1024.", 1024);
/* asset_loading.c */
extern rt_result DecompressAsset(void *compressed_buffer,
size_t compressed_buffer_size,
void **p_decompressed,
size_t *p_decompressed_size);
typedef enum {
CACHE_ENTRY_STATE_FREE,
CACHE_ENTRY_STATE_LOADING,
CACHE_ENTRY_STATE_LOADED,
} rt_asset_cache_entry_state;
typedef struct rt_asset_cache_entry_s {
rt_asset_cache_entry_state state;
rt_aio_handle load;
void *buffer;
size_t size;
int refcount;
/* Reclaim list */
struct rt_asset_cache_entry_s *prev_reclaim;
struct rt_asset_cache_entry_s *next_reclaim;
} rt_asset_cache_entry;
static rt_uid *_uids;
static rt_asset_cache_entry *_entries;
static rt_asset_cache_entry *_first_reclaim;
static rt_asset_cache_entry *_last_reclaim;
/* Locked as writer when modifiying entries, as reader when searching */
static rt_rwlock _lock;
rt_result InitAssetCache(void) {
_entries = calloc((size_t)rt_AssetCacheSize.i, sizeof(rt_asset_cache_entry));
if (!_entries) {
return RT_BUFFER_ALLOC_FAILED;
}
_uids = calloc((size_t)rt_AssetCacheSize.i, sizeof(rt_uid));
if (!_uids) {
free(_entries);
return RT_BUFFER_ALLOC_FAILED;
}
rt_create_rwlock_result lock_res = rtCreateRWLock();
if (!lock_res.ok) {
free(_entries);
free(_uids);
return RT_UNKNOWN_ERROR;
}
_lock = lock_res.lock;
return RT_SUCCESS;
}
void ShutdownAssetCache(void) {
free(_entries);
free(_uids);
rtDestroyRWLock(&_lock);
_first_reclaim = NULL;
_last_reclaim = NULL;
}
static void ReleaseEntry(rt_asset_cache_entry *entry) {
if (entry->load != RT_AIO_INVALID_HANDLE) {
rtWaitForAIOCompletion(entry->load);
rtReleaseAIO(entry->load);
entry->load = RT_AIO_INVALID_HANDLE;
}
rtReleaseBuffer(entry->buffer, entry->size);
entry->buffer = NULL;
entry->size = 0;
entry->next_reclaim = NULL;
entry->prev_reclaim = NULL;
}
static void GarbageCollect(void) {
rtLockWrite(&_lock);
rt_asset_cache_entry *entry = _first_reclaim;
while (entry) {
assert(entry->refcount == 0);
rt_asset_cache_entry *next = entry->next_reclaim;
if (entry->state == CACHE_ENTRY_STATE_LOADED) {
ReleaseEntry(entry);
_first_reclaim = next;
}
entry = next;
}
rtUnlockWrite(&_lock);
}
static rt_asset_cache_entry *GetEntry(rt_uid uid) {
/* Hash lookup */
unsigned int mod = (unsigned int)rt_AssetCacheSize.i - 1;
for (unsigned int i = 0; i < (unsigned int)rt_AssetCacheSize.i; ++i) {
unsigned int slot = (uid + i) & mod;
if (_uids[slot] == uid) {
return &_entries[slot];
} else if (_uids[slot] == RT_INVALID_UID) {
break;
}
}
return NULL;
}
static bool IsAssetLoaded(rt_uid uid) {
const rt_asset_cache_entry *entry = GetEntry(uid);
if (entry)
return entry->state == CACHE_ENTRY_STATE_LOADED ||
entry->state == CACHE_ENTRY_STATE_LOADING;
else
return false;
}
static int InsertEntry(rt_uid uid) {
unsigned int mod = (unsigned int)rt_AssetCacheSize.i - 1;
for (unsigned int i = 0; i < (unsigned int)rt_AssetCacheSize.i; ++i) {
unsigned int slot = (uid + i) & mod;
if (_uids[slot] == 0) {
return (int)slot;
}
}
return -1;
}
static rt_result InsertAndLoadAssets(const rt_uid *uids, size_t count) {
rt_load_batch batch = {.num_loads = 0};
rt_result res = RT_SUCCESS;
count = (count < RT_LOAD_BATCH_MAX_SIZE) ? count : RT_LOAD_BATCH_MAX_SIZE;
rt_asset_cache_entry *load_entries[RT_LOAD_BATCH_MAX_SIZE];
for (size_t i = 0; i < count; ++i) {
rtLockRead(&_lock);
bool needs_load = !IsAssetLoaded(uids[i]);
rtUnlockRead(&_lock);
if (!needs_load)
continue;
rtLockWrite(&_lock);
/* It's possible that another thread loaded the asset in the meantime */
if (!IsAssetLoaded(uids[i])) {
const rt_uid_data *data = rtGetUIDData(uids[i]);
if (!data) {
rtUnlockWrite(&_lock);
rtLog("ASSET_CACHE", "Failed to get uid data for uid %u", uids[i]);
res = RT_UNKNOWN_ASSET;
continue;
}
void *compressed_data = rtAllocBuffer(data->size);
if (!compressed_data) {
/* Try again after garbage collection */
rtUnlockWrite(&_lock);
GarbageCollect();
compressed_data = rtAllocBuffer(data->size);
if (!compressed_data) {
rtLog("ASSET_CACHE",
"Failed to allocate intermediate buffer for uid %u",
uids[i]);
res = RT_BUFFER_ALLOC_FAILED;
continue;
}
rtLockWrite(&_lock);
}
int slot = InsertEntry(uids[i]);
if (slot == -1) {
rtUnlockWrite(&_lock);
rtLog("ASSET_CACHE", "Failed to insert new entry for uid %u", uids[i]);
res = RT_ASSET_CACHE_FULL;
break;
}
rt_asset_cache_entry *entry = &_entries[slot];
load_entries[batch.num_loads] = entry;
/* We set the refcount to 0, but don't insert the entry
* into the reclaim list, to ensure that its buffer does not get freed
* while the load still executes. Setting the refcount to 0 ensures
* that the count is correct, once the asset is accessed the first time. */
entry->state = CACHE_ENTRY_STATE_LOADING;
entry->refcount = 0;
entry->buffer = compressed_data;
entry->size = data->size;
entry->next_reclaim = NULL;
entry->prev_reclaim = NULL;
entry->load = RT_AIO_INVALID_HANDLE;
batch.loads[batch.num_loads].file = data->pkg_file;
batch.loads[batch.num_loads].num_bytes = data->size;
batch.loads[batch.num_loads].offset = data->offset;
batch.loads[batch.num_loads].dest = compressed_data;
++batch.num_loads;
}
}
rtUnlockWrite(&_lock);
/* Dispatch the load */
rt_aio_handle handles[RT_LOAD_BATCH_MAX_SIZE];
if ((res = rtSubmitLoadBatch(&batch, handles)) != RT_SUCCESS) {
rtLog("ASSET_CACHE", "Failed to submit %u asset loads.", batch.num_loads);
return res;
}
/* Set the aio handles of the inserted entries */
rtLockWrite(&_lock);
for (unsigned int i = 0; i < batch.num_loads; ++i) {
load_entries[batch.num_loads]->load = handles[i];
}
rtUnlockWrite(&_lock);
return res;
}
static bool DecompressEntry(rt_uid uid, rt_asset_cache_entry *entry) {
rtReleaseAIO(entry->load);
entry->load = RT_AIO_INVALID_HANDLE;
void *decompressed_buffer;
size_t decompressed_size;
rt_result dec_res =
DecompressAsset(entry->buffer, entry->size, &decompressed_buffer, &decompressed_size);
if (dec_res == RT_SUCCESS) {
rtReleaseBuffer(entry->buffer, entry->size);
entry->buffer = decompressed_buffer;
entry->size = decompressed_size;
entry->state = CACHE_ENTRY_STATE_LOADED;
return true;
} else if (dec_res == RT_BUFFER_ALLOC_FAILED) {
GarbageCollect();
/* Try again */
if (DecompressAsset(entry->buffer, entry->size, &decompressed_buffer, &decompressed_size) ==
RT_SUCCESS) {
rtReleaseBuffer(entry->buffer, entry->size);
entry->buffer = decompressed_buffer;
entry->size = decompressed_size;
entry->state = CACHE_ENTRY_STATE_LOADED;
return true;
}
/* Don't do anything yet. We might be able to to do this later, once some
* buffers become free. */
rtLog("ASSET_CACHE", "Failed to decompress asset %u", uid);
return false;
} else {
rtLog("ASSET_CACHE", "Failed to decompress asset %u", uid);
ReleaseEntry(entry);
ptrdiff_t idx = entry - _entries;
_uids[idx] = RT_INVALID_UID;
return false;
}
}
static void CheckCompletedLoads(const rt_uid *uids, size_t count) {
for (size_t i = 0; i < count; ++i) {
rtLockRead(&_lock);
volatile rt_asset_cache_entry *entry = (volatile rt_asset_cache_entry *)GetEntry(uids[i]);
if (!entry) {
rtUnlockRead(&_lock);
rtLog("ASSET_CACHE", "Passed unknown uid %u to CheckCompletedLoads()", uids[i]);
continue;
}
if (entry->state != CACHE_ENTRY_STATE_LOADING) {
rtUnlockRead(&_lock);
continue;
}
bool load_finished = rtGetAIOState(entry->load) == RT_AIO_STATE_FINISHED;
rtUnlockRead(&_lock);
if (load_finished) {
rtLockWrite(&_lock);
/* Ensure that no-one else handled this */
if (entry->state == CACHE_ENTRY_STATE_LOADING) {
DecompressEntry(uids[i], (rt_asset_cache_entry *)entry);
}
rtUnlockWrite(&_lock);
}
}
}
RT_DLLEXPORT rt_get_asset_result rtGetAsset(rt_uid uid) {
rt_get_asset_result result = {
.result = RT_SUCCESS,
};
rtLockRead(&_lock);
bool needs_load = !IsAssetLoaded(uid);
rtUnlockRead(&_lock);
if (needs_load) {
rt_uid load_uids[RT_LOAD_BATCH_MAX_SIZE];
size_t load_count = 1;
load_uids[0] = uid;
rt_asset_dependency_list deps = rtGetAssetDependencies(uid);
for (size_t i = 0; i < deps.count && i < RT_LOAD_BATCH_MAX_SIZE - 1; ++i) {
load_uids[i + 1] = deps.dependencies[i];
++load_count;
}
result.result = InsertAndLoadAssets(load_uids, load_count);
if (result.result == RT_SUCCESS) {
CheckCompletedLoads(load_uids, load_count);
}
}
rtLockWrite(&_lock);
rt_asset_cache_entry *entry = GetEntry(uid);
if (entry) {
if (entry->state == CACHE_ENTRY_STATE_LOADED) {
++entry->refcount;
result.data = entry->buffer;
result.size = entry->size;
} else if (entry->state == CACHE_ENTRY_STATE_LOADING) {
if (entry->state == CACHE_ENTRY_STATE_LOADING) {
assert(entry->load != RT_AIO_INVALID_HANDLE);
++entry->refcount;
if (rtWaitForAIOCompletion(entry->load) == RT_AIO_STATE_FINISHED) {
if (DecompressEntry(uid, entry)) {
result.data = entry->buffer;
result.size = entry->size;
} else {
result.result = RT_LOAD_FAILED;
}
} else {
ReleaseEntry(entry);
rtLog("ASSET_CACHE", "Failed to load asset %u", uid);
result.result = RT_LOAD_FAILED;
}
}
}
/* Remove from the reclaim list */
if (_first_reclaim == entry)
_first_reclaim = entry->next_reclaim;
if (_last_reclaim == entry)
_last_reclaim = entry->prev_reclaim;
if (entry->next_reclaim)
entry->next_reclaim->prev_reclaim = entry->prev_reclaim;
if (entry->prev_reclaim)
entry->prev_reclaim->next_reclaim = entry->next_reclaim;
}
rtUnlockWrite(&_lock);
return result;
}
RT_DLLEXPORT void rtReleaseAsset(rt_uid uid) {
rtLockWrite(&_lock);
rt_asset_cache_entry *entry = GetEntry(uid);
if (entry && entry->refcount > 0) {
--entry->refcount;
if (entry->refcount == 0) {
/* add to the reclaim list */
if (_last_reclaim)
_last_reclaim->next_reclaim = entry;
if (!_first_reclaim)
_first_reclaim = entry;
entry->prev_reclaim = _last_reclaim;
entry->next_reclaim = NULL;
_last_reclaim = entry;
}
}
rtUnlockWrite(&_lock);
}

View File

@ -1,137 +0,0 @@
#define RT_DEFINE_DEPENDENCY_FILE_STRUCTURES
#include "asset_dependencies.h"
#include "aio.h"
#include "buffer_manager.h"
#include <assert.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
typedef struct {
uint32_t begin;
uint32_t count;
} rt_dep_list;
typedef struct {
rt_uid *uids;
rt_dep_list *lists;
uint32_t capacity;
} rt_dep_map;
static rt_dep_map _map;
static rt_uid *_list_mem;
rt_result LoadAssetDependencies(void) {
rt_dependency_file_header header;
rt_file_id fid = rtAddFile("data/deps.bin");
if (rtSubmitSingleLoadSync((rt_file_load){.dest = &header,
.num_bytes = sizeof(header),
.offset = 0,
.file = fid}) != RT_AIO_STATE_FINISHED) {
rtReportError("core", "Failed to load deps.bin");
return RT_UNKNOWN_ERROR;
}
void *buffer = rtAllocBuffer(header.data_size);
if (rtSubmitSingleLoadSync((rt_file_load){.dest = buffer,
.num_bytes = header.data_size,
.offset = sizeof(header),
.file = fid}) != RT_AIO_STATE_FINISHED) {
rtReportError("core", "Failed to load deps.bin");
return RT_UNKNOWN_ERROR;
}
/* We know the exact number of list entries */
uint64_t total_list_entries =
(header.data_size - header.num_lists * sizeof(rt_dependency_file_list_header)) /
sizeof(rt_uid);
_list_mem = malloc(total_list_entries * sizeof(rt_uid));
if (!_list_mem) {
rtReleaseBuffer(buffer, header.data_size);
rtReportError("core", "Failed to allocate dependency list storage.");
return RT_UNKNOWN_ERROR;
}
_map.capacity = rtNextPowerOfTwo32(header.num_lists);
_map.uids = calloc(_map.capacity, sizeof(rt_uid));
if (!_map.uids) {
free(_list_mem);
rtReleaseBuffer(buffer, header.data_size);
rtReportError("core", "Failed to allocate dependency list storage.");
return RT_UNKNOWN_ERROR;
}
_map.lists = calloc(_map.capacity, sizeof(rt_dep_list));
if (!_map.uids) {
free(_list_mem);
free(_map.uids);
rtReleaseBuffer(buffer, header.data_size);
rtReportError("core", "Failed to allocate dependency list storage.");
return RT_UNKNOWN_ERROR;
}
uint32_t storage_at = 0;
rt_dependency_file_list_header *list = buffer;
for (uint32_t i = 0; i < header.num_lists; ++i) {
const rt_uid *entries = (rt_uid *)(list + 1);
/* Validate the checksum */
XXH64_hash_t file_hash = XXH64_hashFromCanonical(&list->checksum);
XXH64_hash_t calc_hash = XXH3_64bits(entries, sizeof(rt_uid) * list->num_entries);
if (file_hash != calc_hash) {
rtReportError("core", "Checksum mismatch in list %u", i);
rtReleaseBuffer(buffer, header.data_size);
return RT_UNKNOWN_ERROR;
}
/* Store the list */
memcpy(_list_mem + storage_at, entries, sizeof(rt_uid) * list->num_entries);
bool inserted = false;
for (uint32_t j = 0; j < _map.capacity; ++j) {
uint32_t slot = (list->uid + j) % _map.capacity;
if (_map.uids[slot] == RT_INVALID_UID) {
_map.uids[slot] = list->uid;
_map.lists[slot].begin = storage_at;
_map.lists[slot].count = list->num_entries;
inserted = true;
break;
}
}
storage_at += list->num_entries;
assert(inserted);
assert(storage_at <= total_list_entries);
list = (rt_dependency_file_list_header *)(entries + list->num_entries);
}
rtReleaseBuffer(buffer, header.data_size);
return RT_SUCCESS;
}
void ReleaseAssetDependencies(void) {
free(_list_mem);
free(_map.uids);
free(_map.lists);
}
RT_DLLEXPORT rt_asset_dependency_list rtGetAssetDependencies(rt_uid asset) {
rt_asset_dependency_list result = {
.dependencies = NULL,
.count = 0,
};
for (uint32_t i = 0; i < _map.capacity; ++i) {
uint32_t slot = (asset + i) % _map.capacity;
if (_map.uids[slot] == asset) {
result.dependencies = &_list_mem[_map.lists[slot].begin];
result.count = _map.lists[slot].count;
break;
} else if (_map.uids[slot] == RT_INVALID_UID) {
break;
}
}
return result;
}

View File

@ -1,33 +0,0 @@
#ifndef RT_ASSET_DEPENDENCIES_H
#define RT_ASSET_DEPENDENCIES_H
#include "assets.h"
#ifdef RT_DEFINE_DEPENDENCY_FILE_STRUCTURES
#include "xxhash/xxhash.h"
#pragma pack(push, 1)
typedef struct {
uint64_t data_size;
uint32_t num_lists;
} rt_dependency_file_header;
typedef struct {
rt_uid uid;
uint32_t num_entries;
XXH64_canonical_t checksum;
} rt_dependency_file_list_header;
#pragma pack(pop)
#endif
typedef struct {
const rt_uid *dependencies;
uint32_t count;
} rt_asset_dependency_list;
RT_DLLEXPORT rt_asset_dependency_list rtGetAssetDependencies(rt_uid asset);
#endif

View File

@ -1,74 +0,0 @@
#include "assets.h"
#include "uidtab.h"
#include "aio.h"
#include "buffer_manager.h"
#define RT_DEFINE_PACKAGE_FILE_STRUCTURES
#include "packages.h"
#include "lz4/lz4.h"
rt_result DecompressAsset(void *compressed_buffer,
size_t compressed_buffer_size,
void **p_decompressed,
size_t *p_decompressed_size) {
const rt_package_asset_header *header = compressed_buffer;
size_t compressed_size = (compressed_buffer_size) - sizeof(*header);
XXH64_hash_t calculated_hash = XXH3_64bits((header + 1), compressed_size);
XXH64_hash_t file_hash = XXH64_hashFromCanonical(&header->checksum);
if (calculated_hash != file_hash) {
rtLog("core", "Checksum mismatch for asset");
return RT_LOAD_FAILED;
}
size_t size = (size_t)header->decompressed_size;
void *decompressed_buffer = rtAllocBuffer(size);
if (!decompressed_buffer) {
return RT_BUFFER_ALLOC_FAILED;
}
if (LZ4_decompress_safe((const char *)(header + 1),
(char *)decompressed_buffer,
(int)compressed_size,
(int)size) < 0) {
return RT_UNKNOWN_ERROR;
}
*p_decompressed = decompressed_buffer;
*p_decompressed_size = size;
return RT_SUCCESS;
}
RT_DLLEXPORT rt_result rtLoadAssetDirect(rt_uid uid, void **p_buffer, size_t *p_size) {
const rt_uid_data *data = rtGetUIDData(uid);
if (!data)
return RT_UNKNOWN_ASSET;
void *compressed_buffer = rtAllocBuffer(data->size);
if (!compressed_buffer) {
return RT_BUFFER_ALLOC_FAILED;
}
if (rtSubmitSingleLoadSync((rt_file_load) {
.file = data->pkg_file,
.offset = data->offset,
.num_bytes = data->size,
.dest = compressed_buffer,
}) != RT_AIO_STATE_FINISHED) {
rtReleaseBuffer(compressed_buffer, data->size);
return RT_LOAD_FAILED;
}
void *decompressed_buffer;
size_t decompressed_size;
rt_result res = DecompressAsset(compressed_buffer, data->size, &decompressed_buffer, &decompressed_size);
rtReleaseBuffer(compressed_buffer, data->size);
*p_buffer = decompressed_buffer;
*p_size = decompressed_size;
return res;
}

View File

@ -1,40 +0,0 @@
#ifndef RT_ASSETS_H
#define RT_ASSETS_H
#include <stdint.h>
#include "runtime.h"
/* Unique identifier for an asset. */
typedef uint32_t rt_uid;
#define RT_INVALID_UID 0
/* Used to identify renderer backend dependent assets. */
enum {
RT_INVALID_RENDERER_BACKEND_CODE = 0,
RT_RENDERER_BACKEND_CODE_VK = 1,
RT_RENDERER_BACKEND_CODE_ONE_PAST_LAST,
};
typedef uint8_t rt_renderer_backend_code;
enum {
RT_UNKNOWN_ASSET = RT_CUSTOM_ERROR_START,
RT_BUFFER_ALLOC_FAILED,
RT_LOAD_FAILED,
RT_ASSET_CACHE_FULL,
};
/* Load an asset without using the cache */
RT_DLLEXPORT rt_result rtLoadAssetDirect(rt_uid uid, void **buffer, size_t *size);
typedef struct {
void *data;
size_t size;
rt_result result;
} rt_get_asset_result;
RT_DLLEXPORT rt_get_asset_result rtGetAsset(rt_uid uid);
#endif

View File

@ -1,121 +0,0 @@
#define RT_DEFINE_UIDTAB_FILE_STRUCTURES
#include "uidtab.h"
#include "aio.h"
#include "xxhash/xxhash.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
typedef struct {
rt_uid *uids;
rt_uid_data *data;
uint32_t slots;
} rt_uidtab;
static rt_uidtab _tab;
rt_result LoadUIDTable(void) {
/* We use stdio here, because we cannot load any asset in parallel to this.
* This is because the uidtab is what tells us which assets exist.
*/
FILE *f = fopen("data/uidtab.bin", "rb");
if (!f)
return RT_LOAD_FAILED;
rt_uidtab_header header;
if (fread(&header, sizeof(header), 1, f) != 1) {
fclose(f);
return RT_LOAD_FAILED;
}
/* TODO(Kevin): For some reason, the checksum calculation causes
* Memory access errors .
XXH3_state_t *checksum = XXH3_createState();
if (!checksum) {
fclose(f);
return RT_UNKNOWN_ERROR;
}
*/
_tab.slots = rtNextPowerOfTwo32(header.num_entries * 2);
void *mem = malloc((sizeof(rt_uid) + sizeof(rt_uid_data)) * _tab.slots);
if (!mem) {
fclose(f);
_tab.slots = 0;
return RT_OUT_OF_MEMORY;
}
_tab.uids = mem;
_tab.data = (rt_uid_data *)(_tab.uids + _tab.slots);
memset(mem, 0, (sizeof(rt_uid) + sizeof(rt_uid_data)) * _tab.slots);
uint32_t mod = _tab.slots - 1;
for (uint32_t i = 0; i < header.num_entries; ++i) {
rt_uidtab_entry entry;
if (fread(&entry, sizeof(entry), 1, f) != 1) {
free(mem);
_tab.slots = 0;
fclose(f);
return RT_LOAD_FAILED;
}
//XXH3_64bits_update(checksum, &entry, sizeof(entry));
/* Insert into hashtable */
bool inserted = false;
for (uint32_t j = 0; j < _tab.slots; ++j) {
uint32_t at = (entry.uid + j) & mod;
if (_tab.uids[at] == RT_INVALID_UID) {
_tab.uids[at] = entry.uid;
_tab.data[at].pkg_file = entry.file;
_tab.data[at].size = entry.size;
_tab.data[at].offset = entry.offset;
inserted = true;
break;
}
}
if (!inserted) {
rtReportError("core",
"Failed to insert an entry into the uid table. This should not happen.");
fclose(f);
free(mem);
_tab.slots = 0;
return RT_UNKNOWN_ERROR;
}
}
fclose(f);
/*
XXH64_hash_t checksum_hash = XXH3_64bits_digest(checksum);
XXH64_hash_t file_hash = XXH64_hashFromCanonical(&header.checksum);
XXH3_freeState(checksum);
if (checksum_hash != file_hash) {
rtLog("core",
"WARNING: uidtab.bin checksum does not match calculated checksum of loaded entries.");
}
*/
return RT_SUCCESS;
}
void ReleaseUIDTable(void) {
free(_tab.uids);
_tab.slots = 0;
}
RT_DLLEXPORT const rt_uid_data *rtGetUIDData(rt_uid uid) {
uint32_t mod = _tab.slots - 1;
for (uint32_t j = 0; j < _tab.slots; ++j) {
uint32_t at = (uid + j) & mod;
if (_tab.uids[at] == uid) {
return &_tab.data[at];
} else if (_tab.uids[at] == RT_INVALID_UID) {
break;
}
}
return NULL;
}

View File

@ -1,36 +0,0 @@
#ifndef RT_UIDTAB_H
#define RT_UIDTAB_H
#include "runtime.h"
#include "file_tab.h"
#include "assets.h"
#include "xxhash/xxhash.h"
#ifdef RT_DEFINE_UIDTAB_FILE_STRUCTURES
#pragma pack(push, 1)
typedef struct {
XXH64_canonical_t checksum;
uint32_t num_entries;
} rt_uidtab_header;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct {
rt_file_id file;
uint64_t offset;
uint64_t size;
rt_uid uid;
} rt_uidtab_entry;
#pragma pack(pop)
#endif
/* Data associated with an uid */
typedef struct {
rt_file_id pkg_file;
uint64_t offset;
uint64_t size;
} rt_uid_data;
RT_DLLEXPORT const rt_uid_data *rtGetUIDData(rt_uid uid);
#endif

View File

@ -13,6 +13,7 @@
#include <stdint.h> #include <stdint.h>
#include "runtime.h" #include "runtime.h"
#include "resources.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -23,39 +24,10 @@ typedef struct rt_renderer_init_info_s rt_renderer_init_info;
RT_DLLEXPORT void rtRegisterRendererCVars(void); RT_DLLEXPORT void rtRegisterRendererCVars(void);
RT_DLLEXPORT bool rtInitGFX(rt_renderer_init_info *renderer_info); RT_DLLEXPORT rt_result rtInitGFX(rt_renderer_init_info *renderer_info);
RT_DLLEXPORT void rtShutdownGFX(void); RT_DLLEXPORT void rtShutdownGFX(void);
/* Handles backend objects */
#define RT_GFX_HANDLE_MAX_VERSION 255
typedef struct {
uint32_t version : 8;
uint32_t index : 24;
} rt_pipeline_handle;
/* Attributes are used to bind buffers (or textures) to symbolic values.
* For example, an attribute might be bound to "CELL_GRID", which would be
* replaced with the (at the time of the invoke) grid buffer of the current
* world cell.
*/
typedef enum {
RT_ATTRIBUTE_VALUE_UNDEFINED,
RT_ATTRIBUTE_VALUE_MATERIAL_ALBEDO,
RT_ATTRIBUTE_VALUE_MATERIAL_NORMAL,
RT_ATTRIBUTE_VALUE_count
} rt_attribute_value;
typedef struct {
uint32_t index;
rt_attribute_value value;
} rt_attribute_binding;
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -19,6 +19,9 @@ static bool _renderer_loaded = false;
RT_CVAR_S(rt_Renderer, "Select the render backend. Available options: [vk], Default: vk", "vk"); RT_CVAR_S(rt_Renderer, "Select the render backend. Available options: [vk], Default: vk", "vk");
extern rt_result InitObjectRenderer(void);
extern void ShutdownObjectRenderer(void);
#ifdef RT_STATIC_LIB #ifdef RT_STATIC_LIB
extern void RT_RENDERER_API_FN(RegisterCVars)(void); extern void RT_RENDERER_API_FN(RegisterCVars)(void);
extern rt_result RT_RENDERER_API_FN(Init)(const rt_renderer_init_info *); extern rt_result RT_RENDERER_API_FN(Init)(const rt_renderer_init_info *);
@ -77,19 +80,24 @@ RT_DLLEXPORT void rtRegisterRendererCVars(void) {
g_renderer.RegisterCVars(); g_renderer.RegisterCVars();
} }
RT_DLLEXPORT bool rtInitGFX(rt_renderer_init_info *renderer_info) { RT_DLLEXPORT rt_result rtInitGFX(rt_renderer_init_info *renderer_info) {
if (!_renderer_loaded) { if (!_renderer_loaded) {
if (!LoadRenderer()) if (!LoadRenderer())
return false; return RT_UNKNOWN_ERROR;
g_renderer.RegisterCVars(); g_renderer.RegisterCVars();
} }
if (g_renderer.Init(renderer_info) != RT_SUCCESS) if (g_renderer.Init(renderer_info) != RT_SUCCESS)
return false; return RT_UNKNOWN_ERROR;
return true; rt_result result = RT_SUCCESS;
if ((result = InitObjectRenderer()) != RT_SUCCESS)
return result;
return RT_SUCCESS;
} }
RT_DLLEXPORT void rtShutdownGFX(void) { RT_DLLEXPORT void rtShutdownGFX(void) {
ShutdownObjectRenderer();
g_renderer.Shutdown(); g_renderer.Shutdown();
} }

View File

@ -0,0 +1,59 @@
#include "renderer_api.h"
#include "mem_arena.h"
#include "handles.h"
typedef struct {
rt_pipeline_handle pipeline;
} rt_object_renderer;
static rt_object_renderer _object_renderer;
#define PIPELINE_ID 0xdee414bba9b4f5bdLL
rt_result InitObjectRenderer(void) {
rt_result result = RT_SUCCESS;
rt_temp_arena temp = rtGetTemporaryArena(NULL, 0);
if (!temp.arena) {
result = RT_OUT_OF_MEMORY;
goto out;
}
/* Init the pipeline */
size_t pipeline_size = rtGetResourceSize(PIPELINE_ID);
if (pipeline_size == 0) {
rtReportError("GFX", "Failed to determine size of object pipeline %llx", PIPELINE_ID);
result = RT_INVALID_VALUE;
goto out;
}
rt_resource *pipeline_resource = rtArenaPush(temp.arena, pipeline_size);
if (!pipeline_resource) {
rtReportError("GFX", "Failed to allocate memory for object pipeline %llx", PIPELINE_ID);
result = RT_OUT_OF_MEMORY;
goto out;
}
result = rtGetResource(PIPELINE_ID, pipeline_resource);
if (result != RT_SUCCESS) {
rtReportError("GFX", "Failed to load the object pipeline %llx", PIPELINE_ID);
goto out;
}
rt_pipeline_info *info = pipeline_resource->data;
if (!info) {
rtReportError("GFX", "Malformed object pipeline %llx (missing pipeline_info)", PIPELINE_ID);
result = RT_INVALID_VALUE;
goto out;
}
_object_renderer.pipeline = g_renderer.CompilePipeline(info);
if (!RT_IS_HANDLE_VALID(_object_renderer.pipeline)) {
rtReportError("GFX", "Failed to compile the object pipeline %llx", PIPELINE_ID);
result = RT_UNKNOWN_ERROR;
goto out;
}
out:
rtReturnTemporaryArena(temp);
return result;
}
void ShutdownObjectRenderer(void) {
g_renderer.DestroyPipeline(_object_renderer.pipeline);
}

View File

@ -32,6 +32,8 @@ RT_DLLEXPORT rt_create_arena_result rtCreateArena(void *memory, size_t size) {
} }
RT_DLLEXPORT void *rtArenaPush(rt_arena *arena, size_t n) { RT_DLLEXPORT void *rtArenaPush(rt_arena *arena, size_t n) {
if (n == 0)
return NULL;
n = ALIGN(n); n = ALIGN(n);
if (arena->at + n > arena->size) if (arena->at + n > arena->size)
return NULL; return NULL;

View File

@ -44,6 +44,26 @@ typedef struct {
uint16_t storage_binding_count; uint16_t storage_binding_count;
} rt_pipeline_info; } rt_pipeline_info;
/* Attributes are used to bind buffers (or textures) to symbolic values.
* For example, an attribute might be bound to "CELL_GRID", which would be
* replaced with the (at the time of the invoke) grid buffer of the current
* world cell.
*/
typedef enum {
RT_ATTRIBUTE_VALUE_UNDEFINED,
RT_ATTRIBUTE_VALUE_MATERIAL_ALBEDO,
RT_ATTRIBUTE_VALUE_MATERIAL_NORMAL,
RT_ATTRIBUTE_VALUE_count
} rt_attribute_value;
typedef struct {
uint32_t index;
rt_attribute_value value;
} rt_attribute_binding;
typedef enum { typedef enum {
RT_SHADER_TYPE_INVALID, RT_SHADER_TYPE_INVALID,
RT_SHADER_TYPE_VULKAN, RT_SHADER_TYPE_VULKAN,
@ -66,6 +86,16 @@ typedef struct {
size_t bytecode_length; size_t bytecode_length;
} rt_shader_info; } rt_shader_info;
/* Handles for backend objects */
#define RT_GFX_HANDLE_MAX_VERSION 255
typedef struct {
uint32_t version : 8;
uint32_t index : 24;
} rt_pipeline_handle;
typedef void rt_register_renderer_cvars_fn(void); typedef void rt_register_renderer_cvars_fn(void);
typedef rt_result rt_init_renderer_fn(const rt_renderer_init_info *info); typedef rt_result rt_init_renderer_fn(const rt_renderer_init_info *info);
typedef void rt_shutdown_renderer_fn(void); typedef void rt_shutdown_renderer_fn(void);