rtengine/src/runtime/resource_manager.c
Kevin Trogant 4e02d43514 Add early config file parsing
Also cleaned up contrib/ and made lz4 and xxhash subprojects installed
via wrap.
2024-07-19 10:30:50 +02:00

1146 lines
44 KiB
C

#include "aio.h"
#include "buffer_manager.h"
#include "compression.h"
#include "config.h"
#include "ds.h"
#include "file_tab.h"
#include "fsutils.h"
#include "hashing.h"
#include "mem_arena.h"
#include "resources.h"
#include "threading.h"
#include "renderer/common/renderer_api.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
RT_CVAR_S(rt_ResourceDirectory, "The directory used for storing resources. Default: res", "res");
RT_CVAR_SZ(rt_ResourceCacheSize,
"The maximum amount of memory used for caching resources. Default: 512MB",
RT_MB(512));
RT_CVAR_SZ(rt_MaxCachedResources,
"The maximum number of simultaneously cached resources. Default: 1024",
1024);
RT_CVAR_SZ(rt_ResourceNamespaceSize,
"The maximum number of resources that can exist. Default: 1.048.576",
1048576);
RT_CVAR_I(rt_DisableResourceNamespaceLoad,
"Disables the load of the saved resource namespace. Default: 0 (off)",
0);
#define RT_TOMBSTONE_ID 1
typedef struct {
void *buffer;
size_t size;
rt_aio_handle load_aio;
unsigned int next_free;
int usage_counter;
} rt_cached_resource;
typedef struct {
unsigned int index;
rt_resource_id id;
} rt_cached_resource_ref;
typedef struct {
void *mem;
rt_minheap reclaim_heap;
/* Used to lookup cached resources by id */
rt_resource_id *resource_ids;
unsigned int *resource_indices;
rt_cached_resource *resources;
unsigned int first_free;
size_t current_size;
rt_rwlock resource_lock;
rt_mutex *heap_lock;
} rt_resource_cache;
typedef struct {
rt_file_id file;
size_t offset;
size_t decompressed_size;
size_t compressed_size;
} rt_resource_ref;
typedef struct {
rt_resource_id *ids;
rt_resource_ref *refs;
rt_rwlock lock;
} rt_resource_namespace;
#pragma pack(push, 1)
typedef struct {
rt_hash64 checksum;
uint32_t num_entries;
} rt_namespace_file_header;
typedef struct {
rt_resource_id id;
rt_resource_ref ref;
} rt_namespace_file_entry;
#pragma pack(pop)
/* ~~~ Utilities ~~~ */
static size_t GetResourceDataSize(const rt_resource *resource) {
switch (resource->type) {
#if 0
case RT_RESOURCE_SHADER: {
/* Sizeof metadata + bytecode */
const rt_shader_info *info = resource->data;
return sizeof(rt_shader_info) + (info) ? info->bytecode_length : 0;
} break;
case RT_RESOURCE_PIPELINE:
return sizeof(rt_pipeline_info);
case RT_RESOURCE_FRAMEGRAPH: {
#if 0
const rt_framegraph_info *info = resource->data;
size_t size = sizeof(*info) + sizeof(rt_render_target_info) * info->render_target_count +
sizeof(rt_render_pass_info) * info->render_pass_count + info->names_size;
const rt_render_pass_info *passes = rtResolveConstRelptr(&info->render_passes);
for (uint32_t i = 0; i < info->render_pass_count; ++i) {
size += passes[i].read_render_target_count * sizeof(rt_render_target_read) +
passes[i].write_render_target_count * sizeof(rt_render_target_write);
}
return size;
#endif
} break;
case RT_RESOURCE_EFFECT: {
return sizeof(rt_effect_info);
} break;
#endif
default:
rtLog("RESMGR", "Tried to get size of an invalid resource type %u", resource->type);
}
return 0;
}
static void CopyResourceData(const rt_resource *resource, void *dest) {
switch (resource->type) {
#if 0
case RT_RESOURCE_SHADER: {
/* Sizeof metadata + bytecode */
const rt_shader_info *info = resource->data;
rt_shader_info *dest_info = dest;
memcpy(dest_info, info, sizeof(*info));
memcpy(dest_info + 1, rtResolveConstRelptr(&info->bytecode), info->bytecode_length);
rtSetRelptr(&dest_info->bytecode, (void *)(dest_info + 1));
} break;
case RT_RESOURCE_PIPELINE:
memcpy(dest, resource->data, sizeof(rt_pipeline_info));
break;
case RT_RESOURCE_FRAMEGRAPH: {
#if 0
const rt_framegraph_info *info = resource->data;
rt_framegraph_info *dest_info = dest;
memcpy(dest_info, info, sizeof(*info));
memcpy(dest_info + 1,
rtResolveConstRelptr(&info->render_targets),
info->render_target_count * sizeof(rt_render_target_info));
rtSetRelptr(&dest_info->render_targets, (void *)(dest_info + 1));
char *passes_begin =
(char *)(dest_info + 1) + info->render_target_count * sizeof(rt_render_target_info);
char *read_write_dest =
passes_begin + info->render_pass_count * sizeof(rt_render_pass_info);
rt_render_pass_info *passes_dest = (rt_render_pass_info *)passes_begin;
const rt_render_pass_info *passes =
(const rt_render_pass_info *)rtResolveConstRelptr(&info->render_passes);
memcpy(passes_dest, passes, info->render_pass_count * sizeof(rt_render_pass_info));
rtSetRelptr(&dest_info->render_passes, passes_dest);
for (uint32_t i = 0; i < info->render_pass_count; ++i) {
if (passes[i].read_render_target_count > 0) {
const rt_render_target_read *reads =
rtResolveConstRelptr(&passes[i].read_render_targets);
rt_render_target_read *reads_dest = (rt_render_target_read *)read_write_dest;
rtSetRelptr(&passes_dest[i].read_render_targets, reads_dest);
memcpy(reads_dest,
reads,
sizeof(rt_render_target_read) * passes[i].read_render_target_count);
read_write_dest +=
sizeof(rt_render_target_read) * passes[i].read_render_target_count;
} else {
rtSetRelptr(&passes_dest[i].read_render_targets, NULL);
}
if (passes[i].write_render_target_count > 0) {
const rt_render_target_write *writes =
rtResolveConstRelptr(&passes[i].write_render_targets);
rt_render_target_write *write_dest = (rt_render_target_write *)read_write_dest;
rtSetRelptr(&passes_dest[i].write_render_targets, write_dest);
memcpy(write_dest,
writes,
sizeof(rt_render_target_write) * passes[i].write_render_target_count);
read_write_dest +=
sizeof(rt_render_target_write) * passes[i].write_render_target_count;
} else {
rtSetRelptr(&passes_dest[i].write_render_targets, NULL);
}
}
char *names_begin = (char *)read_write_dest;
const char *src_names = rtResolveConstRelptr(&info->names);
memcpy(names_begin, src_names, info->names_size);
rtSetRelptr(&dest_info->names, names_begin);
dest_info->names_size = info->names_size;
const rt_render_target_info *src_rts = rtResolveConstRelptr(&info->render_targets);
rt_render_target_info *rts = (rt_render_target_info *)(dest_info + 1);
for (uint32_t i = 0; i < info->render_target_count; ++i) {
const char *src_name = rtResolveConstRelptr(&src_rts[i].name);
if (src_name)
rtSetRelptr(&rts[i].name, names_begin + (src_name - src_names));
}
for (uint32_t i = 0; i < info->render_pass_count; ++i) {
const char *src_name = rtResolveConstRelptr(&passes[i].name);
if (src_name)
rtSetRelptr(&passes_dest[i].name, names_begin + (src_name - src_names));
}
#endif
} break;
case RT_RESOURCE_EFFECT: {
memcpy(dest, resource->data, sizeof(rt_effect_info));
} break;
#endif
default:
rtLog("RESMGR", "Tried to copy a resource of invalid type %u", resource->type);
}
}
/* Fills the passed write struct with the necessary information to save the resource to a file */
static bool PrepareResourceFlushToFile(rt_resource_id id,
const rt_resource *resource,
rt_file_write *write,
rt_arena *arena) {
RT_ASSERT((uintptr_t)resource->data == (uintptr_t)(resource + 1),
"The resource and data must be laid out in a continous buffer.");
char file_path[260];
rtSPrint(file_path, 260, "%s/%llx.bin", rt_ResourceDirectory.s, id);
size_t total_size = sizeof(rt_resource) + GetResourceDataSize(resource);
size_t compression_bound = rtGetCompressionBound(total_size);
void *compressed_resource = rtArenaPush(arena, compression_bound);
if (!compressed_resource)
return false;
size_t compressed_bytes =
rtCompressData(resource, total_size, compressed_resource, compression_bound);
write->file = rtAddFile(file_path);
write->buffer = compressed_resource;
write->offset = 0;
write->num_bytes = compressed_bytes;
return true;
}
/* ~~~ Cache ~~~ */
static rt_resource_cache _cache;
static rt_result InitResourceCache(void) {
int count = rt_MaxCachedResources.i;
if (count == 0) {
rtReportError("RESMGR", "rt_MaxCachedResources must be greater than 0.");
return RT_INVALID_VALUE;
}
size_t required_mem = (size_t)count * (sizeof(rt_cached_resource_ref) + sizeof(int) +
sizeof(rt_cached_resource)) +
2 * (size_t)count * (sizeof(rt_resource_id) + sizeof(unsigned int));
void *mem = malloc(required_mem);
if (!mem)
return RT_OUT_OF_MEMORY;
rt_create_rwlock_result resource_lock_create = rtCreateRWLock();
if (!resource_lock_create.ok) {
free(mem);
return RT_UNKNOWN_ERROR;
}
_cache.heap_lock = rtCreateMutex();
if (!_cache.heap_lock) {
free(mem);
rtDestroyRWLock(&_cache.resource_lock);
return RT_UNKNOWN_ERROR;
}
memset(mem, 0, required_mem);
_cache.mem = mem;
int *reclaim_keys = mem;
rt_cached_resource_ref *reclaim_refs = (rt_cached_resource_ref *)reclaim_keys + count;
_cache.reclaim_heap = rtCreateMinheap(reclaim_keys,
reclaim_refs,
sizeof(rt_cached_resource_ref),
(size_t)count,
0);
_cache.current_size = 0;
_cache.resources = (rt_cached_resource *)(reclaim_keys + count);
_cache.resource_lock = resource_lock_create.lock;
for (int i = 0; i < count; ++i) {
_cache.resources[i].next_free = (i < count - 1) ? (unsigned int)i + 1 : UINT_MAX;
}
_cache.first_free = 0;
_cache.resource_ids = (rt_resource_id *)(_cache.resources + count);
_cache.resource_indices = (unsigned int *)(_cache.resource_ids + 2 * count);
return RT_SUCCESS;
}
static void ShutdownResourceCache(void) {
free(_cache.mem);
rtDestroyRWLock(&_cache.resource_lock);
rtDestroyMutex(_cache.heap_lock);
memset(&_cache, 0, sizeof(_cache));
}
/* NOTE(Kevin): Only call this while holding a write-lock on the cache.
* The function locks the reclaim heap lock itself. */
static bool FreeCacheSpace(size_t space) {
size_t free_space = rt_ResourceCacheSize.sz - _cache.current_size;
rtLockMutex(_cache.heap_lock);
while (free_space < space && !rtMinheapIsEmpty(&_cache.reclaim_heap)) {
rt_cached_resource_ref ref;
rtMinheapPop(&_cache.reclaim_heap, &ref);
rt_cached_resource *res = &_cache.resources[ref.index];
if (res->load_aio != RT_AIO_INVALID_HANDLE) {
rtWaitForAIOCompletion(res->load_aio);
rtReleaseAIO(res->load_aio);
}
rtReleaseBuffer(res->buffer, res->size);
free_space += res->size;
_cache.current_size -= res->size;
res->next_free = _cache.first_free;
_cache.first_free = ref.index;
res->usage_counter = 0;
res->buffer = NULL;
res->size = 0;
/* Remove from lookup table */
size_t ht_size = rt_MaxCachedResources.sz * 2;
for (size_t off = 0; off < ht_size; ++off) {
size_t slot = (ref.id + off) % ht_size;
if (_cache.resource_ids[slot] == ref.id) {
_cache.resource_ids[slot] = RT_TOMBSTONE_ID;
break;
} else if (_cache.resource_ids[slot] == RT_INVALID_RESOURCE_ID) {
break;
}
}
}
rtUnlockMutex(_cache.heap_lock);
return free_space >= space;
}
static unsigned int FindCachedResource(rt_resource_id id) {
size_t ht_size = rt_MaxCachedResources.sz * 2;
for (size_t off = 0; off < ht_size; ++off) {
size_t slot = (id + off) % ht_size;
if (_cache.resource_ids[slot] == id)
return _cache.resource_indices[slot];
else if (_cache.resource_ids[slot] == RT_INVALID_RESOURCE_ID)
return UINT_MAX;
}
return UINT_MAX;
}
static rt_resource *CacheResource(rt_resource_id id, const rt_resource *res) {
rt_resource *cached = NULL;
rtLockWrite(&_cache.resource_lock);
unsigned int index = FindCachedResource(id);
if (index != UINT_MAX) {
rt_cached_resource_ref ref = {.id = id, .index = index};
rt_cached_resource *cache_entry = &_cache.resources[index];
++cache_entry->usage_counter;
rtLockMutex(_cache.heap_lock);
rtMinheapUpdate(&_cache.reclaim_heap, &ref, cache_entry->usage_counter, NULL);
rtUnlockMutex(_cache.heap_lock);
cached = cache_entry->buffer;
} else {
/* Insert into cache */
size_t total_size = sizeof(rt_resource) + GetResourceDataSize(res);
if (_cache.current_size + total_size >= rt_ResourceCacheSize.sz) {
if (!FreeCacheSpace(total_size)) {
rtLog("RESMGR",
"Unable to reclaim %zu kB from the resource cache.",
total_size / 1024);
rtUnlockWrite(&_cache.resource_lock);
return cached;
}
RT_ASSERT(_cache.first_free != UINT_MAX,
"There must be a free cache entry after space was freed.");
}
void *buffer = rtAllocBuffer(total_size);
if (!buffer) {
rtLog("RESMG", "Unable to allocate %zu kB for the new resource.", total_size / 1024);
rtUnlockWrite(&_cache.resource_lock);
return cached;
}
cached = buffer;
memcpy(cached, res, sizeof(rt_resource));
cached->data = (void *)(cached + 1);
CopyResourceData(res, cached->data);
index = _cache.first_free;
_cache.first_free = _cache.resources[index].next_free;
_cache.resources[index].buffer = buffer;
_cache.resources[index].usage_counter = 1;
_cache.resources[index].size = total_size;
_cache.resources[index].next_free = UINT_MAX;
_cache.resources[index].load_aio = RT_AIO_INVALID_HANDLE;
_cache.current_size += total_size;
rt_cached_resource_ref reclaim_ref = {
.id = id,
.index = index,
};
rtLockMutex(_cache.heap_lock);
rtMinheapPush(&_cache.reclaim_heap, 1, &reclaim_ref);
rtUnlockMutex(_cache.heap_lock);
/* Insert into lookup table */
bool inserted = false;
size_t ht_size = rt_MaxCachedResources.sz * 2;
for (size_t off = 0; off < ht_size; ++off) {
size_t slot = (id + off) % ht_size;
if (_cache.resource_ids[slot] == RT_INVALID_RESOURCE_ID ||
_cache.resource_ids[slot] == RT_TOMBSTONE_ID || _cache.resource_ids[slot] == id) {
_cache.resource_ids[slot] = id;
_cache.resource_indices[slot] = index;
inserted = true;
break;
}
}
if (!inserted) {
rtReportError("RESMGR",
"Failed to insert created resource into the resource lookup table.");
}
}
rtUnlockWrite(&_cache.resource_lock);
return cached;
}
static void InsertPrefetchResourceIntoCache(rt_resource_id id,
rt_aio_handle load_aio,
void *load_buffer,
size_t load_buffer_size) {
rtLockWrite(&_cache.resource_lock);
unsigned int index = FindCachedResource(id);
if (index != UINT_MAX) {
rtUnlockWrite(&_cache.resource_lock);
return;
}
if (_cache.current_size + load_buffer_size >= rt_ResourceCacheSize.sz) {
if (!FreeCacheSpace(load_buffer_size)) {
rtLog("RESMGR",
"Unable to reclaim %zu kB from the resource cache.",
load_buffer_size / 1024);
rtUnlockWrite(&_cache.resource_lock);
return;
}
RT_ASSERT(_cache.first_free != UINT_MAX,
"There must be a free cache entry after space was freed.");
}
index = _cache.first_free;
_cache.first_free = _cache.resources[index].next_free;
_cache.resources[index].buffer = load_buffer;
_cache.resources[index].usage_counter = 1;
_cache.resources[index].size = load_buffer_size;
_cache.resources[index].next_free = UINT_MAX;
_cache.resources[index].load_aio = load_aio;
_cache.current_size += load_buffer_size;
rt_cached_resource_ref reclaim_ref = {
.id = id,
.index = index,
};
rtLockMutex(_cache.heap_lock);
rtMinheapPush(&_cache.reclaim_heap, 1, &reclaim_ref);
rtUnlockMutex(_cache.heap_lock);
/* Insert into lookup table */
bool inserted = false;
size_t ht_size = rt_MaxCachedResources.sz * 2;
for (size_t off = 0; off < ht_size; ++off) {
size_t slot = (id + off) % ht_size;
if (_cache.resource_ids[slot] == RT_INVALID_RESOURCE_ID ||
_cache.resource_ids[slot] == RT_TOMBSTONE_ID || _cache.resource_ids[slot] == id) {
_cache.resource_ids[slot] = id;
_cache.resource_indices[slot] = index;
inserted = true;
break;
}
}
if (!inserted) {
rtReportError("RESMGR",
"Failed to insert created resource into the resource lookup table.");
}
rtUnlockWrite(&_cache.resource_lock);
}
/* ~~~ Resource Namespace ~~~ */
static rt_resource_namespace _namespace;
static rt_result InitResourceNamespace(void) {
size_t size = rt_ResourceNamespaceSize.sz;
if (size == 0) {
rtReportError("RESMGR", "rt_ResourceNamespaceSize must be greater than 0.");
return RT_INVALID_VALUE;
}
void *mem = calloc(size, sizeof(rt_resource_id) + sizeof(rt_resource_ref));
if (!mem)
return RT_OUT_OF_MEMORY;
rt_create_rwlock_result lock_create = rtCreateRWLock();
if (!lock_create.ok) {
free(mem);
return RT_UNKNOWN_ERROR;
}
_namespace.lock = lock_create.lock;
_namespace.ids = mem;
_namespace.refs = (rt_resource_ref *)(_namespace.ids + size);
return RT_SUCCESS;
}
static void ShutdownNamespace(void) {
rtDestroyRWLock(&_namespace.lock);
free(_namespace.ids);
memset(&_namespace, 0, sizeof(_namespace));
}
static void LoadNamespace(void) {
char path[260];
rtSPrint(path, RT_ARRAY_COUNT(path), "%s/namespace.bin", rt_ResourceDirectory.s);
rt_file_id fid = rtAddFile(path);
size_t file_size = rtGetFileSize(path);
if (file_size == 0) {
rtLog("RESMGR", "Unable to determine size of %s", path);
return;
}
rt_temp_arena temp = rtGetTemporaryArena(NULL, 0);
if (!temp.arena) {
rtLog("RESMGR", "Unable to get temporary arena for loading the namespace.");
return;
}
void *dest = rtArenaPush(temp.arena, file_size);
if (!dest) {
rtReturnTemporaryArena(temp);
rtLog("RESMGR", "Unable to allocate temporary space for loading the namespace.");
return;
}
rt_aio_state state = rtSubmitSingleLoadSync(
(rt_file_load){.file = fid, .num_bytes = file_size, .offset = 0, .dest = dest});
if (state == RT_AIO_STATE_FINISHED) {
const rt_namespace_file_header *header = dest;
const rt_namespace_file_entry *entries = (const rt_namespace_file_entry *)(header + 1);
if ((header->num_entries * sizeof(rt_namespace_file_entry)) <=
(file_size - sizeof(*header))) {
rt_hash64 entries_hash =
rtHashBytes(entries, sizeof(rt_namespace_file_entry) * header->num_entries);
if (entries_hash == header->checksum) {
size_t ns_size = rt_ResourceNamespaceSize.sz;
for (uint32_t i = 0; i < header->num_entries; ++i) {
bool inserted = false;
for (size_t j = 0; j < ns_size; ++j) {
size_t at = (entries[i].id + j) % ns_size;
if (_namespace.ids[at] == RT_INVALID_RESOURCE_ID) {
inserted = true;
_namespace.ids[at] = entries[i].id;
_namespace.refs[at] = entries[i].ref;
break;
} else if (_namespace.ids[at] == entries[i].id) {
rtReportError(
"RESMGR",
"Resource ID (%llx) collision detected in namespace file %s",
entries[i].id,
path);
rtReturnTemporaryArena(temp);
return;
}
}
if (!inserted) {
rtReportError("RESMGR",
"Failed to insert namespace entry %llx",
entries[i].id);
break;
}
}
} else {
rtLog("RESMGR", "Checksum mismatch in %s", path);
}
} else {
rtLog("RESMGR", "Number of entries in %s is inconsistent with the file size.", path);
}
} else {
rtLog("RESMGR", "Failed to load %s.", path);
}
rtReturnTemporaryArena(temp);
}
static rt_resource_ref GetResourceRef(rt_resource_id id) {
rt_resource_ref ref = {.file = RT_INVALID_FILE_ID};
rtLockRead(&_namespace.lock);
size_t ns_size = rt_ResourceNamespaceSize.sz;
for (size_t off = 0; off < ns_size; ++off) {
size_t at = (id + off) % ns_size;
if (_namespace.ids[at] == id) {
ref = _namespace.refs[at];
break;
} else if (_namespace.ids[at] == RT_INVALID_RESOURCE_ID) {
rtLog("RESMGR", "Tried to load unknown resource %llx", id);
break;
}
}
rtUnlockRead(&_namespace.lock);
return ref;
}
/* ~~~ Public API ~~~ */
rt_result InitResourceManager(void) {
if (!rtCreateDirectory(rt_ResourceDirectory.s))
rtLog("RESMGR", "CreateDirectory(%s) failed.", rt_ResourceDirectory.s);
rt_result res;
if ((res = InitResourceCache()) != RT_SUCCESS)
return res;
if ((res = InitResourceNamespace()) != RT_SUCCESS) {
ShutdownResourceCache();
return res;
}
if (!rt_DisableResourceNamespaceLoad.i) {
LoadNamespace();
}
return RT_SUCCESS;
}
void ShutdownResourceManager(void) {
ShutdownResourceCache();
ShutdownNamespace();
}
RT_DLLEXPORT rt_result rtGetResource(rt_resource_id id, void *dest) {
rtLockRead(&_cache.resource_lock);
unsigned int cache_index = FindCachedResource(id);
if (cache_index != UINT_MAX) {
rt_cached_resource *cached = &_cache.resources[cache_index];
/* TODO(Kevin): It's possible that the load is not finished. */
if (cached->load_aio != RT_AIO_INVALID_HANDLE) {
rtUnlockRead(&_cache.resource_lock);
rtLockWrite(&_cache.resource_lock);
if (cached->load_aio != RT_AIO_INVALID_HANDLE) {
rt_aio_state state = rtWaitForAIOCompletion(cached->load_aio);
rtReleaseAIO(cached->load_aio);
cached->load_aio = RT_AIO_INVALID_HANDLE;
if (state != RT_AIO_STATE_FINISHED) {
rtLog("RESMGR", "Failed to load resource %llx: %u", id, state);
rtUnlockWrite(&_cache.resource_lock);
return RT_UNKNOWN_ERROR;
}
/* Need to decompress the resource */
rt_resource_ref ref = GetResourceRef(id);
if (ref.file == RT_INVALID_FILE_ID) {
rtUnlockWrite(&_cache.resource_lock);
return RT_INVALID_VALUE;
}
void *decompressed = rtAllocBuffer(ref.decompressed_size);
if (!decompressed) {
rtUnlockWrite(&_cache.resource_lock);
return RT_OUT_OF_MEMORY;
}
size_t written_bytes = rtDecompressData(cached->buffer,
cached->size,
decompressed,
ref.decompressed_size);
if (written_bytes != ref.decompressed_size) {
rtLog("RESMGR",
"Corrupted resource data %llx: Result of decompression does not match "
"saved "
"metadata.",
id);
rtUnlockWrite(&_cache.resource_lock);
return RT_UNKNOWN_ERROR;
}
rt_resource *resource = decompressed;
/* Patch the data pointer */
resource->data = (resource + 1);
/* Note that we allow the cache to grow beyond its configured maximum here. */
rtReleaseBuffer(cached->buffer, cached->size);
_cache.current_size -= cached->size;
cached->size = ref.decompressed_size;
_cache.current_size += ref.decompressed_size;
cached->buffer = decompressed;
rtUnlockWrite(&_cache.resource_lock);
rtLockRead(&_cache.resource_lock);
}
}
RT_ASSERT(cached->size == rtGetResourceSize(id), "Inconsistent resource size");
memcpy(dest, cached->buffer, cached->size);
rtLockMutex(_cache.heap_lock);
++cached->usage_counter;
rt_cached_resource_ref ref = {.id = id, .index = cache_index};
rtMinheapUpdate(&_cache.reclaim_heap, &ref, cached->usage_counter, NULL);
rtUnlockMutex(_cache.heap_lock);
rtUnlockRead(&_cache.resource_lock);
} else {
/* Load the resource file */
rtUnlockRead(&_cache.resource_lock);
rt_resource_ref ref = GetResourceRef(id);
if (ref.file == RT_INVALID_FILE_ID) {
return RT_INVALID_VALUE;
}
rt_temp_arena temp_arena = rtGetTemporaryArena(NULL, 0);
void *compressed_buffer = rtArenaPush(temp_arena.arena, ref.compressed_size);
if (!compressed_buffer) {
rtReturnTemporaryArena(temp_arena);
return RT_OUT_OF_MEMORY;
}
rt_aio_state state = rtSubmitSingleLoadSync((rt_file_load){
.file = ref.file,
.dest = compressed_buffer,
.num_bytes = ref.compressed_size,
.offset = ref.offset,
});
if (state != RT_AIO_STATE_FINISHED) {
rtLog("RESMGR", "Failed to load resource %llx: %u", id, state);
rtReturnTemporaryArena(temp_arena);
return RT_UNKNOWN_ERROR;
}
/* Decompress */
size_t written_bytes =
rtDecompressData(compressed_buffer, ref.compressed_size, dest, ref.decompressed_size);
rtReturnTemporaryArena(temp_arena);
if (written_bytes != ref.decompressed_size) {
rtLog("RESMGR",
"Corrupted resource data %llx: Result of decompression does not match saved "
"metadata.",
id);
return RT_UNKNOWN_ERROR;
}
rt_resource *resource = dest;
/* Patch the data pointer */
resource->data = (resource + 1);
CacheResource(id, resource);
rtPrefetchResources(resource->dependencies, resource->dependency_count);
rtPrefetchResources(resource->subresources, resource->subresource_count);
}
return RT_SUCCESS;
}
RT_DLLEXPORT size_t rtGetResourceSize(rt_resource_id id) {
size_t size = 0;
rtLockRead(&_namespace.lock);
size_t ns_size = rt_ResourceNamespaceSize.sz;
for (size_t off = 0; off < ns_size; ++off) {
size_t at = (id + off) % ns_size;
if (_namespace.ids[at] == id) {
size = _namespace.refs[at].decompressed_size;
break;
} else if (_namespace.ids[at] == RT_INVALID_RESOURCE_ID) {
rtLog("RESMGR", "Tried to get size of unknown resource %llx", id);
break;
}
}
rtUnlockRead(&_namespace.lock);
return size;
}
RT_DLLEXPORT void rtPrefetchResources(const rt_resource_id *ids, uint32_t count) {
rt_load_batch loads = {.num_loads = 0};
rt_aio_handle handles[RT_LOAD_BATCH_MAX_SIZE];
for (uint32_t i = 0; i < count; ++i) {
rt_resource_ref ref = GetResourceRef(ids[i]);
if (ref.file == RT_INVALID_FILE_ID) {
rtLog("RESMGR", "Attempted to prefetch unknown resource %llx", ids[i]);
continue;
}
/* Check if the resource is already cached */
if (FindCachedResource(ids[i]) != UINT_MAX)
continue;
void *buffer = rtAllocBuffer(ref.compressed_size);
if (!buffer) {
rtLog("RESMGR",
"Could not prefetch resource %llx because a buffer allocation failed.",
ids[i]);
continue;
}
loads.loads[loads.num_loads] = (rt_file_load){
.file = ref.file,
.num_bytes = ref.compressed_size,
.offset = ref.offset,
.dest = buffer,
};
++loads.num_loads;
if (loads.num_loads == RT_LOAD_BATCH_MAX_SIZE || i == count - 1) {
if (rtSubmitLoadBatch(&loads, handles) != RT_SUCCESS) {
rtLog("RESMGR", "Prefetch failed because the file loads could not be submitted.");
}
for (uint32_t j = 0; j < loads.num_loads; ++j) {
InsertPrefetchResourceIntoCache(ids[i - loads.num_loads + j],
handles[j],
loads.loads[j].dest,
loads.loads[j].num_bytes);
}
loads.num_loads = 0;
}
}
if (loads.num_loads > 0) {
if (rtSubmitLoadBatch(&loads, handles) != RT_SUCCESS) {
rtLog("RESMGR", "Prefetch failed because the file loads could not be submitted.");
}
for (uint32_t j = 0; j < loads.num_loads; ++j) {
InsertPrefetchResourceIntoCache(ids[count - 1 - loads.num_loads + j],
handles[j],
loads.loads[j].dest,
loads.loads[j].num_bytes);
}
}
}
RT_DLLEXPORT rt_resource_id rtGetResourceID(const char *name) {
size_t name_len = strlen(name);
rt_resource_id id = (rt_resource_id)rtHashBytes(name, name_len);
if (id == RT_INVALID_RESOURCE_ID || id == RT_TOMBSTONE_ID)
id = ~id;
return id;
}
RT_DLLEXPORT rt_result rtCreateResources(uint32_t count,
const char **names,
const rt_resource *resources,
rt_resource_id *ids) {
rt_result result = RT_SUCCESS;
size_t ns_size = rt_ResourceNamespaceSize.sz;
rt_write_batch writes = {.num_writes = 0};
rt_aio_handle write_handles[RT_WRITE_BATCH_MAX_SIZE];
uint32_t outstanding_writes = 0;
rt_temp_arena temp_arena = rtGetTemporaryArena(NULL, 0);
rtLockWrite(&_namespace.lock);
for (uint32_t i = 0; i < count; ++i) {
rt_resource_id id = rtGetResourceID(names[i]);
bool inserted = false;
for (size_t j = 0; j < ns_size; ++j) {
size_t at = (id + j) % ns_size;
if (_namespace.ids[at] == RT_INVALID_RESOURCE_ID) {
inserted = true;
ids[i] = id;
rt_resource *cached = CacheResource(id, &resources[i]);
if (!PrepareResourceFlushToFile(id,
cached,
&writes.writes[writes.num_writes],
temp_arena.arena)) {
rtReportError("RESMGR", "Failed to prepare resource %llx for writing.", id);
inserted = false;
break;
}
_namespace.ids[at] = id;
_namespace.refs[at].offset = writes.writes[writes.num_writes].offset;
_namespace.refs[at].compressed_size = writes.writes[writes.num_writes].num_bytes;
_namespace.refs[at].file = writes.writes[writes.num_writes].file;
_namespace.refs[at].decompressed_size =
sizeof(rt_resource) + GetResourceDataSize(&resources[i]);
++writes.num_writes;
rtLog("RESMGR",
"Created resource %llx: Uncompressed size: %zu bytes, compressed size: %zu "
"bytes.",
id,
_namespace.refs[at].decompressed_size,
_namespace.refs[at].compressed_size);
break;
} else if (_namespace.ids[at] == id) {
rtReportError("RESMGR",
"Resource ID collision occured with resource %s.\nID: %llx",
names[i],
id);
result = RT_INVALID_VALUE;
goto out;
}
}
if (!inserted) {
result = RT_OUT_OF_MEMORY;
goto out;
}
if (writes.num_writes == RT_WRITE_BATCH_MAX_SIZE ||
(i == count - 1 && writes.num_writes > 0)) {
if (outstanding_writes > 0) {
/* Wait until the previous batch is finished */
for (uint32_t k = 0; k < outstanding_writes; ++k) {
if (rtWaitForAIOCompletion(write_handles[k]) != RT_AIO_STATE_FINISHED) {
rtReportError("RESMGR", "Resource write failed.");
result = RT_UNKNOWN_ERROR;
goto out;
}
rtReleaseAIO(write_handles[k]);
}
}
outstanding_writes = writes.num_writes;
if (rtSubmitWriteBatch(&writes, write_handles) != RT_SUCCESS) {
rtReportError("RESMGR", "Failed to submit resource writes.");
result = RT_UNKNOWN_ERROR;
goto out;
}
}
}
if (outstanding_writes > 0) {
/* Wait until the last batch is finished */
for (uint32_t i = 0; i < outstanding_writes; ++i) {
if (rtWaitForAIOCompletion(write_handles[i]) != RT_AIO_STATE_FINISHED) {
rtReportError("RESMGR", "Resource write failed.");
result = RT_UNKNOWN_ERROR;
}
rtReleaseAIO(write_handles[i]);
}
}
out:
rtReturnTemporaryArena(temp_arena);
rtUnlockWrite(&_namespace.lock);
return result;
}
RT_DLLEXPORT void rDebugLogResource(rt_resource_id id, const rt_resource *resource) {
static const char *type_str[RT_RESOURCE_TYPE_count] = {"Shader", "Pipeline", "Framegraph"};
rtLog("RESMGR", "Resource %llx:", id);
rtLog("RESMGR",
" type: %s",
(resource->type < RT_RESOURCE_TYPE_count) ? type_str[resource->type] : "<INVALID>");
rtLog("RESMGR", " subresources:");
for (uint32_t i = 0; i < resource->subresource_count; ++i) {
rtLog("RESMGR", " - %llx", resource->subresources[i]);
}
rtLog("RESMGR", " dependencies:");
for (uint32_t i = 0; i < resource->dependency_count; ++i) {
rtLog("RESMGR", " - %llx", resource->dependencies[i]);
}
switch (resource->type) {
#if 0
case RT_RESOURCE_SHADER: {
static const char *stype_str[RT_SHADER_TYPE_count] = {"<INVALID>", "Vulkan"};
static const char *stage_str[RT_SHADER_STAGE_count] = {"Vertex", "Fragment", "Compute"};
const rt_shader_info *shader = resource->data;
rtLog("RESMGR", " shader data:");
rtLog("RESMGR",
" type: %s",
(shader->type < RT_SHADER_TYPE_count) ? stype_str[shader->type] : "<INVALID>");
rtLog("RESMGR",
" stage: %s",
(shader->stage < RT_SHADER_STAGE_count) ? stage_str[shader->stage] : "<INVALID>");
rtLog("RESMGR", " bytecode: %zu bytes", shader->bytecode_length);
} break;
case RT_RESOURCE_PIPELINE: {
const rt_pipeline_info *pipeline = resource->data;
rtLog("RESMGR", " pipeline data:");
rtLog("RESMGR", " vertex shader: %llx", pipeline->vertex_shader);
rtLog("RESMGR", " fragment shader: %llx", pipeline->fragment_shader);
rtLog("RESMGR", " compute shader: %llx", pipeline->compute_shader);
} break;
case RT_RESOURCE_FRAMEGRAPH: {
static const char *format_str[RT_PIXEL_FORMAT_count] = {
"<INVALID>",
"R8G8B8A8_UNORM",
"B8G8R8A8_UNORM",
"R8G8B8A8_SRGB",
"B8G8R8A8_SRGB",
"R8G8B8_UNORM",
"B8G8R8_UNORM",
"R8G8B8_SRGB",
"B8G8R8_SRGB",
"DEPTH24_STENCIL8",
"DEPTH32",
"SWAPCHAIN",
};
static const char *read_mode_str[RT_RENDER_TARGET_READ_count] = {"SAMPLED",
"INPUT_ATTACHMENT"};
const rt_framegraph_info *framegraph = resource->data;
rtLog("RESMGR", " framegraph data:");
rtLog("RESMGR", " render targets:");
const rt_render_target_info *render_targets =
rtResolveConstRelptr(&framegraph->render_targets);
for (uint32_t i = 0; i < framegraph->render_target_count; ++i) {
const char *name = rtResolveConstRelptr(&render_targets[i].name);
rtLog("RESMGR", " - %s %x", name ? name : "Unnamed RT", render_targets[i].id);
if (render_targets[i].width != RT_RENDER_TARGET_SIZE_SWAPCHAIN)
rtLog("RESMGR",
" size: %u x %u",
render_targets[i].width,
render_targets[i].height);
else
rtLog("RESMGR", " size: SWAPCHAIN SIZE");
rtLog("RESMGR", " samples: %u", render_targets[i].sample_count);
rtLog("RESMGR",
" format: %s",
(render_targets[i].format < RT_PIXEL_FORMAT_count)
? format_str[render_targets[i].format]
: "<INVALID>");
}
rtLog("RESMGR", " passes:");
const rt_render_pass_info *render_passes = rtResolveConstRelptr(&framegraph->render_passes);
for (uint32_t i = 0; i < framegraph->render_pass_count; ++i) {
const char *name = rtResolveConstRelptr(&render_targets[i].name);
rtLog("RESMGR", " - %s %x", name ? name : "Unnamed Pass", render_passes[i].id);
rtLog("RESMGR", " reads:");
const rt_render_target_read *reads =
rtResolveConstRelptr(&render_passes[i].read_render_targets);
for (uint32_t j = 0; j < render_passes[i].read_render_target_count; ++j) {
rtLog("RESMGR", " - %x", reads[j].render_target);
rtLog("RESMGR",
" mode: %s",
(reads[j].mode < RT_RENDER_TARGET_READ_count) ? read_mode_str[reads[j].mode]
: "<INVALID>");
}
rtLog("RESMGR", " writes:");
const rt_render_target_write *writes =
rtResolveConstRelptr(&render_passes[i].write_render_targets);
for (uint32_t j = 0; j < render_passes[i].write_render_target_count; ++j) {
rtLog("RESMGR", " - %x", writes[j].render_target);
rtLog("RESMGR",
" clear: %s",
(writes[j].flags & RT_RENDER_TARGET_WRITE_CLEAR) ? "YES" : "NO");
rtLog("RESMGR",
" discard: %s",
(writes[j].flags & RT_RENDER_TARGET_WRITE_DISCARD) ? "YES" : "NO");
rtLog("RESMGR",
" clear_value: {rgba: {%f %f %f %f}, ds: {%f %u}}",
(double)writes[j].clear.color.r,
(double)writes[j].clear.color.g,
(double)writes[j].clear.color.b,
(double)writes[j].clear.color.a,
(double)writes[j].clear.depth_stencil.depth,
writes[j].clear.depth_stencil.stencil);
}
}
} break;
case RT_RESOURCE_EFFECT: {
const rt_effect_info *effect = resource->data;
rtLog("RESMGR", " effect data:");
for (uint32_t i = 0; i < effect->pass_count; ++i) {
rtLog("RESMGR", " pass %u:", i);
rtLog("RESMGR", " id: %llx", effect->passes[i].pass_id);
rtLog("RESMGR", " pipeline: %llx", effect->passes[i].pipeline);
}
} break;
#endif
default:
rtLog("RESMGR", " unknown data at: %llx", (uintptr_t)resource->data);
}
}
RT_DLLEXPORT void rtSaveResourceNamespace(void) {
rt_temp_arena temp = rtGetTemporaryArena(NULL, 0);
rtLockRead(&_namespace.lock);
uint32_t entry_count = 0;
for (size_t i = 0; i < rt_ResourceNamespaceSize.sz; ++i) {
if (_namespace.ids[i] != RT_INVALID_RESOURCE_ID)
++entry_count;
}
size_t buffer_size =
sizeof(rt_namespace_file_header) + entry_count * sizeof(rt_namespace_file_entry);
void *buffer = rtArenaPush(temp.arena, buffer_size);
if (!buffer) {
rtReportError(
"RESMGR",
"Failed to allocate temporary buffer (%zu bytes) for writing the namespace file");
goto out;
}
rt_namespace_file_header *header = buffer;
rt_namespace_file_entry *entries = (rt_namespace_file_entry *)(header + 1);
size_t at = 0;
for (size_t i = 0; i < rt_ResourceNamespaceSize.sz; ++i) {
if (_namespace.ids[i] != RT_INVALID_RESOURCE_ID) {
entries[at].id = _namespace.ids[i];
entries[at].ref = _namespace.refs[i];
++at;
}
}
RT_ASSERT(at == entry_count, "");
header->num_entries = entry_count;
header->checksum = rtHashBytes(entries, entry_count * sizeof(rt_namespace_file_entry));
char path[260];
rtSPrint(path, RT_ARRAY_COUNT(path), "%s/namespace.bin", rt_ResourceDirectory.s);
rt_write_batch write = {.num_writes = 1};
write.writes[0] = (rt_file_write){
.file = rtAddFile(path),
.buffer = buffer,
.num_bytes = buffer_size,
.offset = 0,
};
rt_aio_handle handle;
if (rtSubmitWriteBatch(&write, &handle) != RT_SUCCESS) {
rtReportError("RESMGR", "Failed to submit the write for %s", path);
goto out;
}
rt_aio_state state = rtWaitForAIOCompletion(handle);
rtReleaseAIO(handle);
if (state != RT_AIO_STATE_FINISHED)
rtReportError("RESMGR", "Write to %s failed: %u", path, state);
out:
rtUnlockRead(&_namespace.lock);
rtReturnTemporaryArena(temp);
}