rtengine/src/runtime/resource_manager.c
2024-02-04 17:26:51 +01:00

861 lines
32 KiB
C

#include "aio.h"
#include "buffer_manager.h"
#include "compression.h"
#include "config.h"
#include "ds.h"
#include "file_tab.h"
#include "fsutils.h"
#include "hashing.h"
#include "mem_arena.h"
#include "renderer_api.h"
#include "resources.h"
#include "threading.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
RT_CVAR_S(rt_ResourceDirectory, "The directory used for storing resources. Default: res", "res");
RT_CVAR_I(rt_ResourceCacheSize,
"The maximum amount of memory used for caching resources. Default: 512MB",
RT_MB(512));
RT_CVAR_I(rt_MaxCachedResources,
"The maximum number of simultaneously cached resources. Default: 1024",
1024);
RT_CVAR_I(rt_ResourceNamespaceSize,
"The maximum number of resources that can exist. Default: 1.048.576",
1048576);
#define RT_TOMBSTONE_ID 1
typedef struct {
void *buffer;
size_t size;
rt_aio_handle load_aio;
unsigned int next_free;
int usage_counter;
} rt_cached_resource;
typedef struct {
unsigned int index;
rt_resource_id id;
} rt_cached_resource_ref;
typedef struct {
void *mem;
rt_minheap reclaim_heap;
/* Used to lookup cached resources by id */
rt_resource_id *resource_ids;
unsigned int *resource_indices;
rt_cached_resource *resources;
unsigned int first_free;
size_t current_size;
rt_rwlock resource_lock;
rt_mutex *heap_lock;
} rt_resource_cache;
typedef struct {
rt_file_id file;
size_t offset;
size_t decompressed_size;
size_t compressed_size;
} rt_resource_ref;
typedef struct {
rt_resource_id *ids;
rt_resource_ref *refs;
rt_rwlock lock;
} rt_resource_namespace;
/* ~~~ Utilities ~~~ */
static size_t GetResourceDataSize(const rt_resource *resource) {
switch (resource->type) {
case RT_RESOURCE_PIPELINE:
return sizeof(rt_pipeline_info);
case RT_RESOURCE_SHADER: {
/* Sizeof metadata + bytecode */
const rt_shader_info *info = resource->data;
return sizeof(rt_shader_info) + (info) ? info->bytecode_length : 0;
} break;
default:
rtLog("RESMGR", "Tried to get size of an invalid resource type %u", resource->type);
}
return 0;
}
static void CopyResourceData(const rt_resource *resource, void *dest) {
switch (resource->type) {
case RT_RESOURCE_PIPELINE:
memcpy(dest, resource->data, sizeof(rt_pipeline_info));
break;
case RT_RESOURCE_SHADER: {
/* Sizeof metadata + bytecode */
const rt_shader_info *info = resource->data;
rt_shader_info *dest_info = dest;
memcpy(dest_info, info, sizeof(*info));
memcpy(dest_info + 1, rtResolveConstRelptr(&info->bytecode), info->bytecode_length);
rtSetRelptr(&dest_info->bytecode, (void *)(dest_info + 1));
} break;
default:
rtLog("RESMGR", "Tried to get copy a resource of invalid type %u", resource->type);
}
}
#if 0
static rt_resource_ref *GetResourceRefPtr(rt_resource_id id) {
rt_resource_ref *ref = NULL;
rtLockRead(&_namespace.lock);
size_t ns_size = (size_t)rt_ResourceNamespaceSize.i;
for (size_t j = 0; j < ns_size; ++j) {
size_t at = (id + j) % ns_size;
if (_namespace.ids[at] == RT_INVALID_RESOURCE_ID) {
break;
} else if (_namespace.ids[at] == id) {
ref = &_namespace.refs[at];
break;
}
}
rtUnlockRead(&_namespace.lock);
return ref;
}
#endif
/* Fills the passed write struct with the necessary information to save the resource to a file */
static bool PrepareResourceFlushToFile(rt_resource_id id,
const rt_resource *resource,
rt_file_write *write,
rt_arena *arena) {
RT_ASSERT((uintptr_t)resource->data == (uintptr_t)(resource + 1),
"The resource and data must be laid out in a continous buffer.");
char file_path[260];
rtSPrint(file_path, 260, "%s/%llx.bin", rt_ResourceDirectory.s, id);
size_t total_size = sizeof(rt_resource) + GetResourceDataSize(resource);
size_t compression_bound = rtGetCompressionBound(total_size);
void *compressed_resource = rtArenaPush(arena, compression_bound);
if (!compressed_resource)
return false;
size_t compressed_bytes =
rtCompressData(resource, total_size, compressed_resource, compression_bound);
write->file = rtAddFile(file_path);
write->buffer = compressed_resource;
write->offset = 0;
write->num_bytes = compressed_bytes;
return true;
}
/* ~~~ Cache ~~~ */
static rt_resource_cache _cache;
static rt_result InitResourceCache(void) {
int count = rt_MaxCachedResources.i;
if (count == 0) {
rtReportError("RESMGR", "rt_MaxCachedResources must be greater than 0.");
return RT_INVALID_VALUE;
}
size_t required_mem = (size_t)count * (sizeof(rt_cached_resource_ref) + sizeof(int) +
sizeof(rt_cached_resource)) +
2 * (size_t)count * (sizeof(rt_resource_id) + sizeof(unsigned int));
void *mem = malloc(required_mem);
if (!mem)
return RT_OUT_OF_MEMORY;
rt_create_rwlock_result resource_lock_create = rtCreateRWLock();
if (!resource_lock_create.ok) {
free(mem);
return RT_UNKNOWN_ERROR;
}
_cache.heap_lock = rtCreateMutex();
if (!_cache.heap_lock) {
free(mem);
rtDestroyRWLock(&_cache.resource_lock);
return RT_UNKNOWN_ERROR;
}
memset(mem, 0, required_mem);
_cache.mem = mem;
int *reclaim_keys = mem;
rt_cached_resource_ref *reclaim_refs = (rt_cached_resource_ref *)reclaim_keys + count;
_cache.reclaim_heap = rtCreateMinheap(reclaim_keys,
reclaim_refs,
sizeof(rt_cached_resource_ref),
(size_t)count,
0);
_cache.current_size = 0;
_cache.resources = (rt_cached_resource *)(reclaim_keys + count);
_cache.resource_lock = resource_lock_create.lock;
for (int i = 0; i < count; ++i) {
_cache.resources[i].next_free = (i < count - 1) ? i + 1 : UINT_MAX;
}
_cache.first_free = 0;
_cache.resource_ids = (rt_resource_id *)(_cache.resources + count);
_cache.resource_indices = (unsigned int *)(_cache.resource_ids + 2 * count);
return RT_SUCCESS;
}
static void ShutdownResourceCache(void) {
free(_cache.mem);
rtDestroyRWLock(&_cache.resource_lock);
rtDestroyMutex(_cache.heap_lock);
memset(&_cache, 0, sizeof(_cache));
}
/* NOTE(Kevin): Only call this while holding a write-lock on the cache.
* The function locks the reclaim heap lock itself. */
static bool FreeCacheSpace(size_t space) {
size_t free_space = (size_t)rt_ResourceCacheSize.i - _cache.current_size;
rtLockMutex(_cache.heap_lock);
while (free_space < space && !rtMinheapIsEmpty(&_cache.reclaim_heap)) {
rt_cached_resource_ref ref;
rtMinheapPop(&_cache.reclaim_heap, &ref);
rt_cached_resource *res = &_cache.resources[ref.index];
if (res->load_aio != RT_AIO_INVALID_HANDLE) {
rtWaitForAIOCompletion(res->load_aio);
rtReleaseAIO(res->load_aio);
}
rtReleaseBuffer(res->buffer, res->size);
free_space += res->size;
_cache.current_size -= res->size;
res->next_free = _cache.first_free;
_cache.first_free = ref.index;
res->usage_counter = 0;
res->buffer = NULL;
res->size = 0;
/* Remove from lookup table */
size_t ht_size = (size_t)rt_MaxCachedResources.i * 2;
for (size_t off = 0; off < ht_size; ++off) {
size_t slot = (ref.id + off) % ht_size;
if (_cache.resource_ids[slot] == ref.id) {
_cache.resource_ids[slot] = RT_TOMBSTONE_ID;
break;
} else if (_cache.resource_ids[slot] == RT_INVALID_RESOURCE_ID) {
break;
}
}
}
rtUnlockMutex(_cache.heap_lock);
return free_space >= space;
}
static unsigned int FindCachedResource(rt_resource_id id) {
size_t ht_size = (size_t)rt_MaxCachedResources.i * 2;
for (size_t off = 0; off < ht_size; ++off) {
size_t slot = (id + off) % ht_size;
if (_cache.resource_ids[slot] == id)
return _cache.resource_indices[slot];
else if (_cache.resource_ids[slot] == RT_INVALID_RESOURCE_ID)
return UINT_MAX;
}
return UINT_MAX;
}
static rt_resource *CacheResource(rt_resource_id id, const rt_resource *res) {
rt_resource *cached = NULL;
rtLockWrite(&_cache.resource_lock);
unsigned int index = FindCachedResource(id);
if (index != UINT_MAX) {
rt_cached_resource_ref ref = {.id = id, .index = index};
rt_cached_resource *cache_entry = &_cache.resources[index];
++cache_entry->usage_counter;
rtLockMutex(_cache.heap_lock);
rtMinheapUpdate(&_cache.reclaim_heap, &ref, cache_entry->usage_counter, NULL);
rtUnlockMutex(_cache.heap_lock);
cached = cache_entry->buffer;
} else {
/* Insert into cache */
size_t total_size = sizeof(rt_resource) + GetResourceDataSize(res);
if (_cache.current_size + total_size >= (size_t)rt_ResourceCacheSize.i) {
if (!FreeCacheSpace(total_size)) {
rtLog("RESMGR",
"Unable to reclaim %zu kB from the resource cache.",
total_size / 1024);
rtUnlockWrite(&_cache.resource_lock);
return cached;
}
RT_ASSERT(_cache.first_free != UINT_MAX,
"There must be a free cache entry after space was freed.");
}
void *buffer = rtAllocBuffer(total_size);
if (!buffer) {
rtLog("RESMG", "Unable to allocate %zu kB for the new resource.", total_size / 1024);
rtUnlockWrite(&_cache.resource_lock);
return cached;
}
cached = buffer;
memcpy(cached, res, sizeof(rt_resource));
cached->data = (void *)(cached + 1);
CopyResourceData(res, cached->data);
index = _cache.first_free;
_cache.first_free = _cache.resources[index].next_free;
_cache.resources[index].buffer = buffer;
_cache.resources[index].usage_counter = 1;
_cache.resources[index].size = total_size;
_cache.resources[index].next_free = UINT_MAX;
_cache.resources[index].load_aio = RT_AIO_INVALID_HANDLE;
_cache.current_size += total_size;
rt_cached_resource_ref reclaim_ref = {
.id = id,
.index = index,
};
rtLockMutex(_cache.heap_lock);
rtMinheapPush(&_cache.reclaim_heap, 1, &reclaim_ref);
rtUnlockMutex(_cache.heap_lock);
/* Insert into lookup table */
bool inserted = false;
size_t ht_size = (size_t)rt_MaxCachedResources.i * 2;
for (size_t off = 0; off < ht_size; ++off) {
size_t slot = (id + off) % ht_size;
if (_cache.resource_ids[slot] == RT_INVALID_RESOURCE_ID ||
_cache.resource_ids[slot] == RT_TOMBSTONE_ID || _cache.resource_ids[slot] == id) {
_cache.resource_ids[slot] = id;
_cache.resource_indices[slot] = index;
inserted = true;
break;
}
}
if (!inserted) {
rtReportError("RESMGR",
"Failed to insert created resource into the resource lookup table.");
}
}
rtUnlockWrite(&_cache.resource_lock);
return cached;
}
static void InsertPrefetchResourceIntoCache(rt_resource_id id,
rt_aio_handle load_aio,
void *load_buffer,
size_t load_buffer_size) {
rtLockWrite(&_cache.resource_lock);
unsigned int index = FindCachedResource(id);
if (index != UINT_MAX) {
rtUnlockWrite(&_cache.resource_lock);
return;
}
if (_cache.current_size + load_buffer_size >= (size_t)rt_ResourceCacheSize.i) {
if (!FreeCacheSpace(load_buffer_size)) {
rtLog("RESMGR",
"Unable to reclaim %zu kB from the resource cache.",
load_buffer_size / 1024);
rtUnlockWrite(&_cache.resource_lock);
return;
}
RT_ASSERT(_cache.first_free != UINT_MAX,
"There must be a free cache entry after space was freed.");
}
index = _cache.first_free;
_cache.first_free = _cache.resources[index].next_free;
_cache.resources[index].buffer = load_buffer;
_cache.resources[index].usage_counter = 1;
_cache.resources[index].size = load_buffer_size;
_cache.resources[index].next_free = UINT_MAX;
_cache.resources[index].load_aio = load_aio;
_cache.current_size += load_buffer_size;
rt_cached_resource_ref reclaim_ref = {
.id = id,
.index = index,
};
rtLockMutex(_cache.heap_lock);
rtMinheapPush(&_cache.reclaim_heap, 1, &reclaim_ref);
rtUnlockMutex(_cache.heap_lock);
/* Insert into lookup table */
bool inserted = false;
size_t ht_size = (size_t)rt_MaxCachedResources.i * 2;
for (size_t off = 0; off < ht_size; ++off) {
size_t slot = (id + off) % ht_size;
if (_cache.resource_ids[slot] == RT_INVALID_RESOURCE_ID ||
_cache.resource_ids[slot] == RT_TOMBSTONE_ID || _cache.resource_ids[slot] == id) {
_cache.resource_ids[slot] = id;
_cache.resource_indices[slot] = index;
inserted = true;
break;
}
}
if (!inserted) {
rtReportError("RESMGR",
"Failed to insert created resource into the resource lookup table.");
}
rtUnlockWrite(&_cache.resource_lock);
}
/* ~~~ Resource Namespace ~~~ */
static rt_resource_namespace _namespace;
static rt_result InitResourceNamespace(void) {
size_t size = (size_t)rt_ResourceNamespaceSize.i;
if (size == 0) {
rtReportError("RESMGR", "rt_ResourceNamespaceSize must be greater than 0.");
return RT_INVALID_VALUE;
}
void *mem = calloc(size, sizeof(rt_resource_id) + sizeof(rt_resource_ref));
if (!mem)
return RT_OUT_OF_MEMORY;
rt_create_rwlock_result lock_create = rtCreateRWLock();
if (!lock_create.ok) {
free(mem);
return RT_UNKNOWN_ERROR;
}
_namespace.lock = lock_create.lock;
_namespace.ids = mem;
_namespace.refs = (rt_resource_ref *)(_namespace.ids + size);
return RT_SUCCESS;
}
static void ShutdownNamespace(void) {
rtDestroyRWLock(&_namespace.lock);
free(_namespace.ids);
memset(&_namespace, 0, sizeof(_namespace));
}
static rt_resource_ref GetResourceRef(rt_resource_id id) {
rt_resource_ref ref = {.file = RT_INVALID_FILE_ID};
rtLockRead(&_namespace.lock);
size_t ns_size = (size_t)rt_ResourceNamespaceSize.i;
for (size_t off = 0; off < ns_size; ++off) {
size_t at = (id + off) % ns_size;
if (_namespace.ids[at] == id) {
ref = _namespace.refs[at];
break;
} else if (_namespace.ids[at] == RT_INVALID_RESOURCE_ID) {
rtLog("RESMGR", "Tried to load unknown resource %llx", id);
break;
}
}
rtUnlockRead(&_namespace.lock);
return ref;
}
/* ~~~ Public API ~~~ */
rt_result InitResourceManager(void) {
if (!rtCreateDirectory(rt_ResourceDirectory.s))
rtLog("RESMGR", "CreateDirectory(%s) failed.", rt_ResourceDirectory.s);
rt_result res;
if ((res = InitResourceCache()) != RT_SUCCESS)
return res;
if ((res = InitResourceNamespace()) != RT_SUCCESS) {
ShutdownResourceCache();
return res;
}
return RT_SUCCESS;
}
void ShutdownResourceManager(void) {
ShutdownResourceCache();
ShutdownNamespace();
}
RT_DLLEXPORT rt_result rtGetResource(rt_resource_id id, void *dest) {
rtLockRead(&_cache.resource_lock);
unsigned int cache_index = FindCachedResource(id);
if (cache_index != UINT_MAX) {
rt_cached_resource *cached = &_cache.resources[cache_index];
/* TODO(Kevin): It's possible that the load is not finished. */
if (cached->load_aio != RT_AIO_INVALID_HANDLE) {
rtUnlockRead(&_cache.resource_lock);
rtLockWrite(&_cache.resource_lock);
if (cached->load_aio != RT_AIO_INVALID_HANDLE) {
rt_aio_state state = rtWaitForAIOCompletion(cached->load_aio);
rtReleaseAIO(cached->load_aio);
cached->load_aio = RT_AIO_INVALID_HANDLE;
if (state != RT_AIO_STATE_FINISHED) {
rtLog("RESMGR", "Failed to load resource %llx: %u", id, state);
rtUnlockWrite(&_cache.resource_lock);
return RT_UNKNOWN_ERROR;
}
/* Need to decompress the resource */
rt_resource_ref ref = GetResourceRef(id);
if (ref.file == RT_INVALID_FILE_ID) {
rtUnlockWrite(&_cache.resource_lock);
return RT_INVALID_VALUE;
}
void *decompressed = rtAllocBuffer(ref.decompressed_size);
if (!decompressed) {
rtUnlockWrite(&_cache.resource_lock);
return RT_OUT_OF_MEMORY;
}
size_t written_bytes = rtDecompressData(cached->buffer,
cached->size,
decompressed,
ref.decompressed_size);
if (written_bytes != ref.decompressed_size) {
rtLog("RESMGR",
"Corrupted resource data %llx: Result of decompression does not match "
"saved "
"metadata.",
id);
rtUnlockWrite(&_cache.resource_lock);
return RT_UNKNOWN_ERROR;
}
rt_resource *resource = decompressed;
/* Patch the data pointer */
resource->data = (resource + 1);
/* Note that we allow the cache to grow beyond its configured maximum here. */
rtReleaseBuffer(cached->buffer, cached->size);
_cache.current_size -= cached->size;
cached->size = ref.decompressed_size;
_cache.current_size += ref.decompressed_size;
cached->buffer = decompressed;
rtUnlockWrite(&_cache.resource_lock);
rtLockRead(&_cache.resource_lock);
}
}
RT_ASSERT(cached->size == rtGetResourceSize(id), "Inconsistent resource size");
memcpy(dest, cached->buffer, cached->size);
rtLockMutex(_cache.heap_lock);
++cached->usage_counter;
rt_cached_resource_ref ref = {.id = id, .index = cache_index};
rtMinheapUpdate(&_cache.reclaim_heap, &ref, cached->usage_counter, NULL);
rtUnlockMutex(_cache.heap_lock);
rtUnlockRead(&_cache.resource_lock);
} else {
/* Load the resource file */
rtUnlockRead(&_cache.resource_lock);
rt_resource_ref ref = GetResourceRef(id);
if (ref.file == RT_INVALID_FILE_ID) {
return RT_INVALID_VALUE;
}
rt_temp_arena temp_arena = rtGetTemporaryArena(NULL, 0);
void *compressed_buffer = rtArenaPush(temp_arena.arena, ref.compressed_size);
if (!compressed_buffer) {
rtReturnTemporaryArena(temp_arena);
return RT_OUT_OF_MEMORY;
}
rt_aio_state state = rtSubmitSingleLoadSync((rt_file_load){
.file = ref.file,
.dest = compressed_buffer,
.num_bytes = ref.compressed_size,
.offset = ref.offset,
});
if (state != RT_AIO_STATE_FINISHED) {
rtLog("RESMGR", "Failed to load resource %llx: %u", id, state);
rtReturnTemporaryArena(temp_arena);
return RT_UNKNOWN_ERROR;
}
/* Decompress */
size_t written_bytes =
rtDecompressData(compressed_buffer, ref.compressed_size, dest, ref.decompressed_size);
rtReturnTemporaryArena(temp_arena);
if (written_bytes != ref.decompressed_size) {
rtLog("RESMGR",
"Corrupted resource data %llx: Result of decompression does not match saved "
"metadata.",
id);
return RT_UNKNOWN_ERROR;
}
rt_resource *resource = dest;
/* Patch the data pointer */
resource->data = (resource + 1);
CacheResource(id, resource);
rtPrefetchResources(resource->dependencies, resource->dependency_count);
rtPrefetchResources(resource->subresources, resource->subresource_count);
}
return RT_SUCCESS;
}
RT_DLLEXPORT size_t rtGetResourceSize(rt_resource_id id) {
size_t size = 0;
rtLockRead(&_namespace.lock);
size_t ns_size = (size_t)rt_ResourceNamespaceSize.i;
for (size_t off = 0; off < ns_size; ++off) {
size_t at = (id + off) % ns_size;
if (_namespace.ids[at] == id) {
size = _namespace.refs[at].decompressed_size;
break;
} else if (_namespace.ids[at] == RT_INVALID_RESOURCE_ID) {
rtLog("RESMGR", "Tried to get size of unknown resource %llx", id);
break;
}
}
rtUnlockRead(&_namespace.lock);
return size;
}
RT_DLLEXPORT void rtPrefetchResources(const rt_resource_id *ids, uint32_t count) {
rt_load_batch loads = {.num_loads = 0};
rt_aio_handle handles[RT_LOAD_BATCH_MAX_SIZE];
for (uint32_t i = 0; i < count; ++i) {
rt_resource_ref ref = GetResourceRef(ids[i]);
if (ref.file == RT_INVALID_FILE_ID) {
rtLog("RESMGR", "Attempted to prefetch unknown resource %llx", ids[i]);
continue;
}
/* Check if the resource is already cached */
if (FindCachedResource(ids[i]) != UINT_MAX)
continue;
void *buffer = rtAllocBuffer(ref.compressed_size);
if (!buffer) {
rtLog("RESMGR",
"Could not prefetch resource %llx because a buffer allocation failed.",
ids[i]);
continue;
}
loads.loads[loads.num_loads] = (rt_file_load){
.file = ref.file,
.num_bytes = ref.compressed_size,
.offset = ref.offset,
.dest = buffer,
};
++loads.num_loads;
if (loads.num_loads == RT_LOAD_BATCH_MAX_SIZE || i == count - 1) {
if (rtSubmitLoadBatch(&loads, handles) != RT_SUCCESS) {
rtLog("RESMGR", "Prefetch failed because the file loads could not be submitted.");
}
for (uint32_t j = 0; j < loads.num_loads; ++j) {
InsertPrefetchResourceIntoCache(ids[i - loads.num_loads + j],
handles[j],
loads.loads[j].dest,
loads.loads[j].num_bytes);
}
loads.num_loads = 0;
}
}
if (loads.num_loads > 0) {
if (rtSubmitLoadBatch(&loads, handles) != RT_SUCCESS) {
rtLog("RESMGR", "Prefetch failed because the file loads could not be submitted.");
}
for (uint32_t j = 0; j < loads.num_loads; ++j) {
InsertPrefetchResourceIntoCache(ids[count - 1 - loads.num_loads + j],
handles[j],
loads.loads[j].dest,
loads.loads[j].num_bytes);
}
}
}
RT_DLLEXPORT rt_result rtCreateResources(uint32_t count,
const char **names,
const rt_resource *resources,
rt_resource_id *ids) {
rt_result result = RT_SUCCESS;
size_t ns_size = (size_t)rt_ResourceNamespaceSize.i;
rt_write_batch writes = {.num_writes = 0};
rt_aio_handle write_handles[RT_WRITE_BATCH_MAX_SIZE];
uint32_t outstanding_writes = 0;
rt_temp_arena temp_arena = rtGetTemporaryArena(NULL, 0);
rtLockWrite(&_namespace.lock);
for (uint32_t i = 0; i < count; ++i) {
size_t name_len = strlen(names[i]);
rt_resource_id id = (rt_resource_id)rtHashBytes(names[i], name_len);
if (id == RT_INVALID_RESOURCE_ID || id == RT_TOMBSTONE_ID)
id = ~id;
bool inserted = false;
for (size_t j = 0; j < ns_size; ++j) {
size_t at = (id + j) % ns_size;
if (_namespace.ids[at] == RT_INVALID_RESOURCE_ID) {
inserted = true;
ids[i] = id;
rt_resource *cached = CacheResource(id, &resources[i]);
if (!PrepareResourceFlushToFile(id,
cached,
&writes.writes[writes.num_writes],
temp_arena.arena)) {
rtReportError("RESMGR", "Failed to prepare resource %llx for writing.", id);
inserted = false;
break;
}
_namespace.ids[at] = id;
_namespace.refs[at].offset = writes.writes[writes.num_writes].offset;
_namespace.refs[at].compressed_size = writes.writes[writes.num_writes].num_bytes;
_namespace.refs[at].file = writes.writes[writes.num_writes].file;
_namespace.refs[at].decompressed_size =
sizeof(rt_resource) + GetResourceDataSize(&resources[i]);
++writes.num_writes;
rtLog("RESMGR",
"Created resource %llx: Uncompressed size: %zu bytes, compressed size: %zu "
"bytes.",
id,
_namespace.refs[at].decompressed_size,
_namespace.refs[at].compressed_size);
break;
} else if (_namespace.ids[at] == id) {
rtReportError("RESMGR",
"Resource ID collision occured with resource %s.\nID: %llx",
names[i],
id);
result = RT_INVALID_FILE_ID;
goto out;
}
}
if (!inserted) {
result = RT_OUT_OF_MEMORY;
goto out;
}
if (writes.num_writes == RT_WRITE_BATCH_MAX_SIZE ||
(i == count - 1 && writes.num_writes > 0)) {
if (outstanding_writes > 0) {
/* Wait until the previous batch is finished */
for (uint32_t k = 0; k < outstanding_writes; ++k) {
if (rtWaitForAIOCompletion(write_handles[k]) != RT_AIO_STATE_FINISHED) {
rtReportError("RESMGR", "Resource write failed.");
result = RT_UNKNOWN_ERROR;
goto out;
}
rtReleaseAIO(write_handles[k]);
}
}
outstanding_writes = writes.num_writes;
if (rtSubmitWriteBatch(&writes, write_handles) != RT_SUCCESS) {
rtReportError("RESMGR", "Failed to submit resource writes.");
result = RT_UNKNOWN_ERROR;
goto out;
}
}
}
if (outstanding_writes > 0) {
/* Wait until the last batch is finished */
for (uint32_t i = 0; i < outstanding_writes; ++i) {
if (rtWaitForAIOCompletion(write_handles[i]) != RT_AIO_STATE_FINISHED) {
rtReportError("RESMGR", "Resource write failed.");
result = RT_UNKNOWN_ERROR;
}
rtReleaseAIO(write_handles[i]);
}
}
out:
rtReturnTemporaryArena(temp_arena);
rtUnlockWrite(&_namespace.lock);
return result;
}
RT_DLLEXPORT void rDebugLogResource(rt_resource_id id, const rt_resource *resource) {
static const char *type_str[RT_RESOURCE_TYPE_count] = {"Shader", "Pipeline"};
rtLog("RESMGR", "Resource %llx:", id);
rtLog("RESMGR",
" type: %s",
(resource->type < RT_RESOURCE_TYPE_count) ? type_str[resource->type] : "<INVALID>");
rtLog("RESMGR", " subresources:");
for (uint32_t i = 0; i < resource->subresource_count; ++i) {
rtLog("RESMGR", " - %llx", resource->subresources[i]);
}
rtLog("RESMGR", " dependencies:");
for (uint32_t i = 0; i < resource->dependency_count; ++i) {
rtLog("RESMGR", " - %llx", resource->dependencies[i]);
}
switch (resource->type) {
case RT_RESOURCE_PIPELINE: {
static const char *binding_str[RT_ATTRIBUTE_VALUE_count] = {"<UNDEFINED>",
"MaterialAlbedo",
"MaterialNormal"};
const rt_pipeline_info *pipeline = resource->data;
rtLog("RESMGR", " pipeline data:");
rtLog("RESMGR", " vertex shader: %llx", pipeline->vertex_shader);
rtLog("RESMGR", " fragment shader: %llx", pipeline->fragment_shader);
rtLog("RESMGR", " compute shader: %llx", pipeline->compute_shader);
rtLog("RESMGR", " uniform bindings:");
const rt_attribute_binding *uniform_bindings =
rtResolveConstRelptr(&pipeline->uniform_bindings);
for (uint32_t i = 0; i < pipeline->uniform_binding_count; ++i) {
rtLog("RESMGR",
" - %u : %s",
uniform_bindings[i].index,
(uniform_bindings[i].value < RT_ATTRIBUTE_VALUE_count)
? binding_str[uniform_bindings[i].value]
: "<INVALID>");
}
rtLog("RESMGR", " texture bindings:");
const rt_attribute_binding *texture_bindings =
rtResolveConstRelptr(&pipeline->texture_bindings);
for (uint32_t i = 0; i < pipeline->texture_binding_count; ++i) {
rtLog("RESMGR",
" - %u : %s",
texture_bindings[i].index,
(texture_bindings[i].value < RT_ATTRIBUTE_VALUE_count)
? binding_str[texture_bindings[i].value]
: "<INVALID>");
}
rtLog("RESMGR", " storage bindings:");
const rt_attribute_binding *storage_bindings =
rtResolveConstRelptr(&pipeline->storage_bindings);
for (uint32_t i = 0; i < pipeline->storage_binding_count; ++i) {
rtLog("RESMGR",
" - %u : %s",
storage_bindings[i].index,
(storage_bindings[i].value < RT_ATTRIBUTE_VALUE_count)
? binding_str[storage_bindings[i].value]
: "<INVALID>");
}
} break;
case RT_RESOURCE_SHADER: {
static const char *stype_str[RT_SHADER_TYPE_count] = {"<INVALID>", "Vulkan"};
static const char *stage_str[RT_SHADER_STAGE_count] = {"Vertex", "Fragment", "Compute"};
const rt_shader_info *shader = resource->data;
rtLog("RESMGR", " shader data:");
rtLog("RESMGR",
" type: %s",
(shader->type < RT_SHADER_TYPE_count) ? stype_str[shader->type] : "<INVALID>");
rtLog("RESMGR",
" stage: %s",
(shader->stage < RT_SHADER_STAGE_count) ? stage_str[shader->stage] : "<INVALID>");
rtLog("RESMGR", " bytecode: %zu bytes", shader->bytecode_length);
} break;
default:
rtLog("RESMGR", " unknown data at: %llx", (uintptr_t)resource->data);
}
}