feat(renderer): Basic command list managemnent
Some checks failed
Ubuntu Cross to Win64 / Cross Compile with ming64 (1.4.0, ubuntu-latest) (push) Failing after 1m41s

This commit is contained in:
Kevin Trogant 2024-08-02 17:12:35 +02:00
parent 4febd1b3fa
commit beba96b915
10 changed files with 347 additions and 13 deletions

View File

@ -6,16 +6,19 @@
#include "renderer.h"
#include "render_resource.h"
#include "command_list.h"
typedef struct rt_physical_resource_manager_i rt_physical_resource_manager_i;
typedef rt_physical_resource_manager_i rt_render_device_get_physical_resource_manager_fn(void *o);
typedef rt_result rt_render_device_submit_command_list_fn(void *o, rt_render_command_list *list);
/* Interface for the render device.
* The device is responsible for executing command lists. */
typedef struct {
void *o;
rt_render_device_get_physical_resource_manager_fn *GetPhysicalResourceManager;
rt_render_device_submit_command_list_fn *SubmitCommandList;
} rt_render_device_i;
typedef bool rt_physical_resource_manager_is_present_fn(void *o, rt_render_resource_handle h);
@ -51,5 +54,6 @@ typedef struct {
} rt_render_backend_api;
extern rt_render_backend_api g_render_backend;
extern rt_render_device_i g_device_i;
#endif

103
src/renderer/command_list.c Normal file
View File

@ -0,0 +1,103 @@
#include "command_list.h"
#include "backend_api.h"
#include <runtime/config.h>
#include <runtime/mem_arena.h>
#include <runtime/threading.h>
#include <string.h>
RT_CVAR_SZ(r_PerFrameCommandListMemory, "Amount of memory to allocate for single frame command list management."
" The total amount of memory will be this times the maximum number of frames in flight. (Default: 16 MB)",
RT_MB(16));
static rt_arena _list_arenas[3];
static rt_arena *_current_arena = &_list_arenas[0];
static unsigned int _current_arena_idx = 0;
static rt_mutex *_mutex;
rt_result InitCommandLists(void) {
_mutex = rtCreateMutex();
if (!_mutex)
return RT_UNKNOWN_ERROR;
for (unsigned int i = 0; i < RT_ARRAY_COUNT(_list_arenas); ++i) {
rt_create_arena_result arena_res = rtCreateArena(NULL, r_PerFrameCommandListMemory.sz);
if (!arena_res.ok) {
rtDestroyMutex(_mutex);
return RT_OUT_OF_MEMORY;
}
_list_arenas[i] = arena_res.arena;
}
return RT_SUCCESS;
}
void ShutdownCommandLists(void) {
for (unsigned int i = 0; i < RT_ARRAY_COUNT(_list_arenas); ++i) {
rtReleaseArena(&_list_arenas[i]);
}
rtDestroyMutex(_mutex);
}
void CommandListsOnBeginFrame(void) {
_current_arena_idx = (_current_arena_idx + 1) % RT_ARRAY_COUNT(_list_arenas);
_current_arena = &_list_arenas[_current_arena_idx];
rtArenaClear(_current_arena);
}
#define COMMAND_LIST_CAPACITY RT_KB(512)
#define AVERAGE_COMMAND_DATA_SIZE sizeof(rt_draw_indirect_data)
#define COMMAND_LIST_MAX_LENGTH (COMMAND_LIST_CAPACITY / AVERAGE_COMMAND_DATA_SIZE)
/* Get a new render command list. */
RT_DLLEXPORT rt_begin_render_command_list_result rtBeginRenderCommandList(void) {
size_t mem_required = COMMAND_LIST_MAX_LENGTH * sizeof(rt_render_command_header) + COMMAND_LIST_CAPACITY;
rtLockMutex(_mutex);
void *mem = rtArenaPush(_current_arena, mem_required);
rtUnlockMutex(_mutex);
if (!mem) {
rtReportError("RENDERER", "Ran out of memory for command lists.");
return (rt_begin_render_command_list_result){.result = RT_OUT_OF_MEMORY};
}
rt_render_command_list list = {
.headers = mem,
.data = (void *)((rt_render_command_header *)mem + COMMAND_LIST_MAX_LENGTH),
.length = 0u,
.data_capacity = COMMAND_LIST_CAPACITY,
.data_end = 0u
};
return (rt_begin_render_command_list_result){
.result = RT_SUCCESS,
.list = list
};
}
/* Lowlevel function that writes the data to the queue. */
RT_DLLEXPORT rt_result rtEncodeRenderCommand(rt_render_command_list *list, rt_render_command_type type, rt_render_queue queue, const void *data) {
size_t data_size = 0u;
switch (type) {
case RT_RENDER_COMMAND_DRAW_INDIRECT:
data_size = sizeof(rt_draw_indirect_data);
break;
default:
rtReportError("RENDERER", "Invalid render command type %u", type);
return RT_INVALID_VALUE;
}
if (list->length == COMMAND_LIST_MAX_LENGTH || (list->data_end + data_size) > list->data_capacity) {
rtReportError("RENDERER", "Reached maximum lenght or capacity of command list.");
return RT_OUT_OF_MEMORY;
}
list->headers[list->length].type = type;
list->headers[list->length].target_queue = queue;
++list->length;
char *dest = (char *)list->data + list->data_end;
memcpy(dest, data, data_size);
list->data_end += data_size;
return RT_SUCCESS;
}
/* Submit a finished command list to the graphics device. The list will not be usable after this function is done. */
RT_DLLEXPORT rt_result rtSubmitCommandList(rt_render_command_list *list) {
return g_device_i.SubmitCommandList(g_device_i.o, list);
}

View File

@ -0,0 +1,81 @@
#ifndef RT_RENDERER_COMMAND_LIST_H
#define RT_RENDERER_COMMAND_LIST_H
#include <stdint.h>
#include <runtime/runtime.h>
#include "render_resource.h"
/* Types of render commands */
typedef enum {
RT_RENDER_COMMAND_DRAW_INDIRECT,
} rt_render_command_type;
typedef enum {
RT_RENDER_QUEUE_GRAPHICS,
RT_RENDER_QUEUE_COMPUTE,
RT_RENDER_QUEUE_TRANSFER,
} rt_render_queue;
/* Structures containing command parameters */
typedef struct {
rt_render_buffer_handle buffer;
uint32_t offset;
uint32_t draw_count;
uint32_t stride;
} rt_draw_indirect_data;
typedef struct {
uint32_t type;
uint32_t target_queue;
} rt_render_command_header;
typedef struct {
rt_render_command_header *headers;
void *data;
/* Number of encoded commands */
uint32_t length;
/* Information used while encoding.
* In the future, it would be possible to move this to another struct that is discarded after
* encoding is finished.
*/
size_t data_capacity;
size_t data_end;
} rt_render_command_list;
#ifdef __cplusplus
extern "C" {
#endif
/* *** Encoding API *** */
typedef struct {
rt_result result;
rt_render_command_list list;
} rt_begin_render_command_list_result;
/* Get a new render command list. */
RT_DLLEXPORT rt_begin_render_command_list_result rtBeginRenderCommandList(void);
/* Lowlevel function that writes the data to the queue. */
RT_DLLEXPORT rt_result rtEncodeRenderCommand(rt_render_command_list *list, rt_render_command_type type, rt_render_queue queue, const void *data);
/* Helper functions for specific commands */
RT_INLINE rt_result rtEncodeDrawIndirect(rt_render_command_list *list, const rt_draw_indirect_data *draw_indirect_data) {
return rtEncodeRenderCommand(list, RT_RENDER_COMMAND_DRAW_INDIRECT, RT_RENDER_QUEUE_GRAPHICS, draw_indirect_data);
}
/* *** Submission *** */
/* Submit a finished command list to the graphics device. The list will not be usable after this function is done. */
RT_DLLEXPORT rt_result rtSubmitCommandList(rt_render_command_list *list);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -5,9 +5,11 @@
#include <runtime/config.h>
extern rt_cvar r_MaxRenderResources;
extern rt_cvar r_PerFrameCommandListMemory;
RT_DLLEXPORT void rtRegisterRenderCVARs(void) {
rtRegisterCVAR(&r_MaxRenderResources);
rtRegisterCVAR(&r_PerFrameCommandListMemory);
}
RT_DLLEXPORT void rtRegisterRenderBackendCVARs(void) {
@ -18,6 +20,8 @@ rt_render_device_i g_device_i;
extern rt_result InitVirtualResourceRegistry(void);
extern void ShutdownVirtualResourceRegistry(void);
extern rt_result InitCommandLists(void);
extern void ShutdownCommandLists(void);
RT_DLLEXPORT rt_result rtInitRenderer(const rt_renderer_window_info *info) {
rt_render_backend_init_result backend_res = g_render_backend.Init(info);
@ -30,9 +34,16 @@ RT_DLLEXPORT rt_result rtInitRenderer(const rt_renderer_window_info *info) {
g_render_backend.Shutdown();
return res;
}
if ((res = InitCommandLists()) != RT_SUCCESS) {
ShutdownVirtualResourceRegistry();
g_render_backend.Shutdown();
return res;
}
return res;
}
RT_DLLEXPORT void rtShutdownRenderer(void) {
ShutdownCommandLists();
ShutdownVirtualResourceRegistry();
g_render_backend.Shutdown();
}

View File

@ -17,11 +17,13 @@ endif
renderer_lib = library('renderer',
'backend_api.h',
'command_list.h',
'renderer.h',
'render_mesh.h',
'render_resource.h',
'virtual_resource_registry.h',
'command_list.c',
'init.c',
'load_stub.c',
'meshlet_pools.c',

View File

@ -21,6 +21,11 @@ typedef struct {
uint32_t value;
} rt_render_resource_handle;
/* Aliases for render_resource_handle to be able to express the type of expected resources in code. */
typedef rt_render_resource_handle rt_render_buffer_handle;
typedef rt_render_resource_handle rt_render_texture2d_handle;
#define RT_RENDER_RESOURCE_MAX_VERSION 0x3f
/* Extract the type part of a render resource handle */
@ -84,6 +89,20 @@ typedef struct {
rt_resource_id source_resource;
} rt_render_buffer_desc;
typedef enum {
RT_TEXTURE_FORMAT_B8G8R8A8_SRGB,
RT_TEXTURE_FORMAT_MAX,
} rt_texture_format;
typedef enum {
RT_TEXTURE_USAGE_NONE = 0x00,
RT_TEXTURE_USAGE_COLOR_ATTACHMENT = 0x01,
RT_TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT = 0x02,
RT_TEXTURE_USAGE_SAMPLED_IMAGE = 0x04,
RT_TEXTURE_USAGE_STORAGE_IMAGE = 0x10,
} rt_texture_usage_flags;
/* Describes a gpu texture */
typedef struct {
/* Width in pixels */
@ -91,6 +110,21 @@ typedef struct {
/* Height in pixels */
uint32_t height;
/* Pixel format */
rt_texture_format format;
/* Number of samples */
uint32_t samples;
/* Number of mip levels */
uint32_t mip_levels;
/* Bitmask of usages this texture needs to support */
rt_texture_usage_flags usage;
/* ResourceID of the resource that will be used to populate this texture. */
rt_resource_id source_resource;
} rt_render_texture2d_desc;
#endif

View File

@ -655,4 +655,27 @@ rt_physical_resource_manager_i rtVkDevGetPhysicalResourceManager(void *o) {
.CreateTexture2D = rtVkPhysicalResourceManagerCreateTexture2D,
};
return iface;
}
rt_result rtVkDevSubmitCommandList(void *o, rt_render_command_list *list) {
/* Dummy implementation */
RT_UNUSED(o);
size_t data_off = 0;
for (uint32_t i = 0; i < list->length; ++i) {
if (list->headers[i].type != RT_RENDER_COMMAND_DRAW_INDIRECT)
break;
rtLog("VK", "DrawIndirect %u", i);
rtLog("VK", "Target queue: %u", list->headers[i].target_queue);
RT_VERIFY(data_off < list->data_end);
rt_draw_indirect_data *data = (rt_draw_indirect_data *)((char *)list->data + data_off);
rtLog("VK", "buffer %u draw_count %u offset %u stride %u",
data->buffer.value,
data->draw_count,
data->offset,
data->stride);
data_off += sizeof(rt_draw_indirect_data);
}
return RT_SUCCESS;
}

View File

@ -89,5 +89,6 @@ void rtDestroyVkDevice(rt_vk_device *dev);
/* rt_render_device_i functions */
rt_physical_resource_manager_i rtVkDevGetPhysicalResourceManager(void *o);
rt_result rtVkDevSubmitCommandList(void *o, rt_render_command_list *list);
#endif

View File

@ -37,6 +37,7 @@ rt_render_backend_init_result VkInit(const rt_renderer_window_info *info) {
rt_render_device_i device_i = {
.o = &_device,
.GetPhysicalResourceManager = rtVkDevGetPhysicalResourceManager,
.SubmitCommandList = rtVkDevSubmitCommandList,
};
res.device = device_i;

View File

@ -147,6 +147,19 @@ void rtVkPhysicalResourceManagerDestroy(void *o, rt_render_resource_handle handl
rtUnlockWrite(&phys_res_mgr->lock);
}
/* Call this with a held write lock! */
static uint32_t AllocStorageSlot(rt_vk_physical_resource_manager *phys_res_mgr, rt_render_resource_handle h) {
if (phys_res_mgr->free_slot_count > 0u) {
uint32_t slot = phys_res_mgr->free_slots[--phys_res_mgr->free_slot_count];
/* The hashtable is large enough that this should never fail */
rt_result insert_res = rtHashtableInsert(&phys_res_mgr->resource_lut, (uint64_t)h.value, (uint64_t)slot);
RT_UNUSED(insert_res);
RT_VERIFY(insert_res == RT_SUCCESS);
return slot;
}
return UINT32_MAX;
}
rt_result rtVkPhysicalResourceManagerCreateBuffer(void *o, rt_render_resource_handle h, const rt_render_buffer_desc *desc) {
rt_vk_physical_resource_manager *phys_res_mgr = o;
@ -190,28 +203,89 @@ rt_result rtVkPhysicalResourceManagerCreateBuffer(void *o, rt_render_resource_ha
/* Store */
rt_result res = RT_SUCCESS;
rtLockWrite(&phys_res_mgr->lock);
if (phys_res_mgr->free_slot_count > 0u) {
uint32_t slot = phys_res_mgr->free_slots[--phys_res_mgr->free_slot_count];
uint32_t slot = AllocStorageSlot(phys_res_mgr, h);
if (slot != UINT32_MAX) {
phys_res_mgr->resources[slot].type = RT_RENDER_RESOURCE_TYPE_BUFFER;
phys_res_mgr->resources[slot].buffer = buffer;
phys_res_mgr->resources[slot].allocation = allocation;
/* The hashtable is large enough that this should never fail */
rt_result insert_res = rtHashtableInsert(&phys_res_mgr->resource_lut, (uint64_t)h.value, (uint64_t)slot);
RT_UNUSED(insert_res);
RT_VERIFY(insert_res == RT_SUCCESS);
}
else {
rtLog("VK", "Could not create buffer, because no free slots for storing it are available.");
vmaDestroyBuffer(phys_res_mgr->allocator, buffer, allocation);
} else {
res = RT_OUT_OF_MEMORY;
vmaDestroyBuffer(phys_res_mgr->allocator, buffer, allocation);
rtLog("VK","Could not create buffer because no storage space is available.");
}
rtUnlockWrite(&phys_res_mgr->lock);
return res;
}
static VkFormat RtTextureFormatToVkFormat(rt_texture_format texture_format) {
RT_ASSERT(texture_format < RT_TEXTURE_FORMAT_MAX, "Invalid format");
VkFormat formats[RT_TEXTURE_FORMAT_MAX] = {
VK_FORMAT_B8G8R8A8_SRGB, // RT_TEXTURE_FORMAT_B8G8R8A8_SRGB
};
return formats[texture_format];
}
static VkImageUsageFlagBits RtTextureUsageToVkImageUsage(rt_texture_usage_flags usage_flags) {
VkImageUsageFlagBits usage = 0;
if ((usage_flags & RT_TEXTURE_USAGE_COLOR_ATTACHMENT) != 0)
usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
if ((usage_flags & RT_TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT) != 0)
usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
if ((usage_flags & RT_TEXTURE_USAGE_SAMPLED_IMAGE) != 0)
usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
if ((usage_flags & RT_TEXTURE_USAGE_STORAGE_IMAGE) != 0)
usage |= VK_IMAGE_USAGE_STORAGE_BIT;
return usage;
}
rt_result rtVkPhysicalResourceManagerCreateTexture2D(void *o, rt_render_resource_handle h, const rt_render_texture2d_desc *desc) {
RT_NOT_IMPLEMENTED;
return RT_UNKNOWN_ERROR;
rt_vk_physical_resource_manager *phys_res_mgr = o;
VkFormat format = RtTextureFormatToVkFormat(desc->format);
VkExtent3D extent = {.width = desc->width, .height = desc->height, .depth = 1};
VkImageUsageFlagBits usage = RtTextureUsageToVkImageUsage(desc->usage);
VkImageCreateInfo image_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.extent = extent,
.format = format,
.imageType = VK_IMAGE_TYPE_2D,
.sharingMode = VK_SHARING_MODE_CONCURRENT,
.queueFamilyIndexCount = phys_res_mgr->dev->unique_family_count,
.pQueueFamilyIndices = &phys_res_mgr->dev->unique_families[0],
.mipLevels = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.arrayLayers = 1,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.usage = usage,
.tiling = VK_IMAGE_TILING_OPTIMAL,
};
VmaAllocationCreateInfo alloc_info = {
.usage = VMA_MEMORY_USAGE_AUTO,
};
VkImage image;
VmaAllocation allocation;
if (vmaCreateImage(phys_res_mgr->allocator, &image_info, &alloc_info, &image, &allocation, NULL) != VK_SUCCESS) {
rtLog("VK", "Failed to create image.");
return RT_UNKNOWN_ERROR;
}
/* Store */
rt_result res = RT_SUCCESS;
rtLockWrite(&phys_res_mgr->lock);
uint32_t slot = AllocStorageSlot(phys_res_mgr, h);
if (slot != UINT32_MAX) {
phys_res_mgr->resources[slot].type = RT_RENDER_RESOURCE_TYPE_BUFFER;
phys_res_mgr->resources[slot].image = image;
phys_res_mgr->resources[slot].allocation = allocation;
} else {
res = RT_OUT_OF_MEMORY;
vmaDestroyImage(phys_res_mgr->allocator, image, allocation);
rtLog("VK","Could not create image because no storage space is available.");
}
rtUnlockWrite(&phys_res_mgr->lock);
return res;
}