rtengine/src/renderer/vk/init.c
Kevin Trogant 3bc192b281 dump state
this will be the basis of the framegraph rewrite, because the current
state is fucked
2024-03-25 17:55:03 +01:00

737 lines
27 KiB
C

#include <malloc.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#define RT_VK_DONT_DEFINE_GPU_GLOBAL
#include "gpu.h"
#include "render_targets.h"
#include "swapchain.h"
#include "runtime/config.h"
#include "runtime/runtime.h"
#include "gfx/renderer_api.h"
#define TARGET_API_VERSION VK_API_VERSION_1_3
RT_CVAR_I(r_VkEnableAPIAllocTracking,
"Enable tracking of allocations done by the vulkan api. [0/1] Default: 0",
0);
RT_CVAR_S(r_VkPhysDeviceName, "Name of the selected physical device. Default: \"\"", "");
RT_CVAR_I(r_VkMaxFramesInFlight, "Maximum number of frames in flight. [2/3] Default: 2", 2);
rt_vk_gpu g_gpu;
static VkAllocationCallbacks _tracking_alloc_cbs;
static const char *AllocationScopeToString(VkSystemAllocationScope scope) {
switch (scope) {
case VK_SYSTEM_ALLOCATION_SCOPE_COMMAND:
return "COMMAND";
case VK_SYSTEM_ALLOCATION_SCOPE_OBJECT:
return "OBJECT";
case VK_SYSTEM_ALLOCATION_SCOPE_CACHE:
return "CACHE";
case VK_SYSTEM_ALLOCATION_SCOPE_DEVICE:
return "DEVICE";
case VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE:
return "INSTANCE";
default:
return "UNKNOWN";
}
}
static void *
TrackAllocation(void *userData, size_t size, size_t alignment, VkSystemAllocationScope scope) {
rtLog("vk",
"Allocation. Size: %zu, Alignment: %zu, Scope: %s",
size,
alignment,
AllocationScopeToString(scope));
#ifdef _WIN32
return _aligned_malloc(size, alignment);
#else
return aligned_alloc(alignment, size);
#endif
}
static void *TrackReallocation(void *userData,
void *original,
size_t size,
size_t alignment,
VkSystemAllocationScope scope) {
rtLog("vk",
"Reallocation. Size: %zu, Alignment: %zu, Scope: %s",
size,
alignment,
AllocationScopeToString(scope));
return realloc(original, size);
}
static void TrackFree(void *userData, void *memory) {
free(memory);
}
static VkBool32 VKAPI_PTR
DebugUtilsMessengerCb(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
VkDebugUtilsMessageTypeFlagsEXT types,
const VkDebugUtilsMessengerCallbackDataEXT *callbackData,
void *userData) {
if (severity < VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT)
return VK_FALSE;
const char *severity_str = "<UNKNOWN>";
if (severity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT)
severity_str = "WARNING";
else if (severity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT)
severity_str = "ERROR";
rtLog("vk", "[%s] %s", severity_str, callbackData->pMessage);
if (severity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT)
RT_DEBUGBREAK;
return VK_FALSE;
}
extern rt_cvar r_VkPreferredSwapchainImages;
extern rt_cvar r_VkPreferMailboxMode;
extern rt_cvar r_VkMaxPipelineCount;
void RT_RENDERER_API_FN(RegisterCVars)(void) {
rtRegisterCVAR(&r_VkEnableAPIAllocTracking);
rtRegisterCVAR(&r_VkPhysDeviceName);
rtRegisterCVAR(&r_VkPreferredSwapchainImages);
rtRegisterCVAR(&r_VkPreferMailboxMode);
rtRegisterCVAR(&r_VkMaxFramesInFlight);
rtRegisterCVAR(&r_VkMaxPipelineCount);
}
static rt_result CreateInstance(void) {
VkResult result = volkInitialize();
if (result != VK_SUCCESS) {
rtReportError("vk", "Initialization failed: volkInitialize()");
return 1;
}
VkApplicationInfo app_info = {
.apiVersion = TARGET_API_VERSION,
.applicationVersion = 0x00001000,
.engineVersion = 0x00001000,
.pEngineName = "voyageEngine",
.pApplicationName = "Voyage",
};
const char *extensions[] = {
VK_KHR_SURFACE_EXTENSION_NAME,
#ifdef _WIN32
"VK_KHR_win32_surface",
#elif defined(RT_USE_XLIB)
"VK_KHR_xlib_surface",
#endif
#ifdef RT_DEBUG
VK_EXT_DEBUG_UTILS_EXTENSION_NAME,
#endif
};
const char *layers[1];
unsigned int layer_count = 0;
#ifdef RT_DEBUG
/* Search for layers we want to enable */
uint32_t available_layer_count = 0;
result = vkEnumerateInstanceLayerProperties(&available_layer_count, NULL);
if (result == VK_SUCCESS) {
VkLayerProperties *props = calloc(available_layer_count, sizeof(VkLayerProperties));
if (props) {
vkEnumerateInstanceLayerProperties(&available_layer_count, props);
for (uint32_t i = 0; i < available_layer_count; ++i) {
if (strcmp(props[i].layerName, "VK_LAYER_KHRONOS_validation") == 0) {
layers[0] = "VK_LAYER_KHRONOS_validation";
layer_count = 1;
break;
}
}
free(props);
} else {
rtLog("vk", "Failed to allocate storage for instance layer properties.");
}
} else {
rtLog("vk", "vkEnumerateInstanceLayerProperties failed.");
}
#endif
VkInstanceCreateInfo instance_info = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pApplicationInfo = &app_info,
.ppEnabledExtensionNames = extensions,
.enabledExtensionCount = RT_ARRAY_COUNT(extensions),
.ppEnabledLayerNames = layers,
.enabledLayerCount = layer_count,
};
result = vkCreateInstance(&instance_info, g_gpu.alloc_cb, &g_gpu.instance);
if (result != VK_SUCCESS) {
rtReportError("vk", "Failed to create the vulkan instance.");
return 1;
}
volkLoadInstance(g_gpu.instance);
#ifdef RT_DEBUG
/* Create the debug utils messenger */
VkDebugUtilsMessengerCreateInfoEXT messenger_info = {
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
.pfnUserCallback = DebugUtilsMessengerCb,
};
vkCreateDebugUtilsMessengerEXT(g_gpu.instance,
&messenger_info,
g_gpu.alloc_cb,
&g_gpu.messenger);
#endif
return RT_SUCCESS;
}
static rt_result CreateSurface(const rt_renderer_init_info *info) {
#ifdef _WIN32
g_gpu.native_window.hInstance = info->hInstance;
g_gpu.native_window.hWnd = info->hWnd;
VkWin32SurfaceCreateInfoKHR surface_info = {
.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR,
.hinstance = info->hInstance,
.hwnd = info->hWnd,
};
if (vkCreateWin32SurfaceKHR(g_gpu.instance, &surface_info, g_gpu.alloc_cb, &g_gpu.surface) ==
VK_SUCCESS)
return RT_SUCCESS;
else
return 100;
#elif defined(RT_USE_XLIB)
g_gpu.native_window.display = info->display;
g_gpu.native_window.window = info->window;
VkXlibSurfaceCreateInfoKHR surface_info = {
.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR,
.dpy = info->display,
.window = info->window,
};
if (vkCreateXlibSurfaceKHR(g_gpu.instance, &surface_info, g_gpu.alloc_cb, &g_gpu.surface) ==
VK_SUCCESS)
return RT_SUCCESS;
else
return 100;
#endif
}
typedef struct {
uint32_t graphics;
uint32_t compute;
uint32_t present;
uint32_t transfer;
} rt_queue_indices;
static rt_queue_indices RetrieveQueueIndices(VkPhysicalDevice phys_dev, VkSurfaceKHR surface) {
rt_queue_indices indices = {.graphics = UINT32_MAX,
.compute = UINT32_MAX,
.present = UINT32_MAX,
.transfer = UINT32_MAX};
uint32_t count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(phys_dev, &count, NULL);
VkQueueFamilyProperties *props = calloc(count, sizeof(VkQueueFamilyProperties));
if (!props) {
return indices;
}
vkGetPhysicalDeviceQueueFamilyProperties(phys_dev, &count, props);
for (uint32_t i = 0; i < count; ++i) {
if (props[i].queueCount == 0)
continue;
if ((props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0)
indices.graphics = i;
if ((props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) != 0)
indices.compute = i;
if ((props[i].queueFlags & VK_QUEUE_TRANSFER_BIT) != 0)
indices.transfer = i;
VkBool32 present_supported = VK_FALSE;
vkGetPhysicalDeviceSurfaceSupportKHR(phys_dev, i, surface, &present_supported);
if (present_supported)
indices.present = i;
}
if (indices.transfer == UINT32_MAX && indices.graphics != UINT32_MAX)
indices.transfer = indices.graphics;
else if (indices.transfer == UINT32_MAX && indices.compute != UINT32_MAX)
indices.transfer = indices.compute;
free(props);
return indices;
}
static bool CheckDeviceExtensionSupported(VkPhysicalDevice phys_dev) {
const char *required_extensions[] = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
};
uint32_t extension_count;
vkEnumerateDeviceExtensionProperties(phys_dev, NULL, &extension_count, NULL);
VkExtensionProperties *supported_extensions =
calloc(extension_count, sizeof(VkExtensionProperties));
if (!supported_extensions)
return false;
vkEnumerateDeviceExtensionProperties(phys_dev, NULL, &extension_count, supported_extensions);
bool supported = true;
for (uint32_t i = 0; i < RT_ARRAY_COUNT(required_extensions); ++i) {
bool found = false;
for (uint32_t j = 0; j < extension_count; ++j) {
if (strncmp(supported_extensions[j].extensionName,
required_extensions[i],
VK_MAX_EXTENSION_NAME_SIZE) == 0) {
found = true;
break;
}
}
if (!found) {
supported = false;
VkPhysicalDeviceProperties props;
vkGetPhysicalDeviceProperties(phys_dev, &props);
rtLog("Device %s does not support the required extension %s",
props.deviceName,
required_extensions[i]);
goto out;
}
}
out:
free(supported_extensions);
return supported;
}
static rt_result ChoosePhysicalDevice(void) {
g_gpu.phys_device = VK_NULL_HANDLE;
uint32_t phys_device_count = 0;
VkResult result = vkEnumeratePhysicalDevices(g_gpu.instance, &phys_device_count, NULL);
if (result != VK_SUCCESS) {
rtReportError("vk", "Failed to enumerate the physical devices.");
return 2;
}
VkPhysicalDevice *phys_devices = calloc(phys_device_count, sizeof(VkPhysicalDevice));
if (!phys_devices) {
rtReportError("vk", "Failed to enumerate the physical devices: Out of memory.");
return 2;
}
vkEnumeratePhysicalDevices(g_gpu.instance, &phys_device_count, phys_devices);
uint32_t highscore = 0;
uint32_t best_index = phys_device_count;
for (uint32_t i = 0; i < phys_device_count; ++i) {
VkPhysicalDeviceTimelineSemaphoreFeatures timeline_semaphore_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES,
};
VkPhysicalDeviceSynchronization2Features synchronization2_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES,
.pNext = &timeline_semaphore_features,
};
VkPhysicalDeviceDynamicRenderingFeatures dynamic_rendering_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES,
.pNext = &synchronization2_features,
};
VkPhysicalDeviceDescriptorIndexingFeatures descriptor_indexing_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES,
.pNext = &dynamic_rendering_features,
};
VkPhysicalDeviceFeatures2 features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = &descriptor_indexing_features,
};
vkGetPhysicalDeviceFeatures2(phys_devices[i], &features);
VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_props = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES,
.pNext = NULL,
};
VkPhysicalDeviceProperties2 props = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
.pNext = &descriptor_indexing_props,
};
vkGetPhysicalDeviceProperties2(phys_devices[i], &props);
if (!CheckDeviceExtensionSupported(phys_devices[i]))
continue;
rt_queue_indices indices = RetrieveQueueIndices(phys_devices[i], g_gpu.surface);
if (indices.compute == UINT32_MAX || indices.present == UINT32_MAX ||
indices.graphics == UINT32_MAX)
continue;
if (!synchronization2_features.synchronization2 ||
!dynamic_rendering_features.dynamicRendering ||
!timeline_semaphore_features.timelineSemaphore)
continue;
/* Check for bindless support */
if (!descriptor_indexing_features.runtimeDescriptorArray ||
!descriptor_indexing_features.descriptorBindingPartiallyBound)
continue;
uint32_t score = 0;
if (props.properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU)
score += 100;
score += (props.properties.limits.maxFramebufferWidth / 100) *
(props.properties.limits.maxFramebufferHeight / 100);
score +=
(descriptor_indexing_props.shaderStorageBufferArrayNonUniformIndexingNative) ? 100 : 0;
score +=
(descriptor_indexing_props.shaderSampledImageArrayNonUniformIndexingNative) ? 100 : 0;
if (score > highscore) {
highscore = score;
best_index = i;
}
if (strncmp(props.properties.deviceName,
r_VkPhysDeviceName.s,
VK_MAX_PHYSICAL_DEVICE_NAME_SIZE) == 0) {
best_index = i;
break;
}
}
if (best_index < phys_device_count) {
g_gpu.phys_device = phys_devices[best_index];
VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_props = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES,
.pNext = NULL,
};
VkPhysicalDeviceProperties2 props = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
.pNext = &descriptor_indexing_props,
};
VkPhysicalDeviceDescriptorIndexingFeatures descriptor_indexing_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES,
};
VkPhysicalDeviceFeatures2 features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = &descriptor_indexing_features,
};
vkGetPhysicalDeviceFeatures2(phys_devices[best_index], &features);
vkGetPhysicalDeviceProperties2(phys_devices[best_index], &props);
g_gpu.phys_device_props = props.properties;
g_gpu.descriptor_indexing_props = descriptor_indexing_props;
g_gpu.phys_device_features = features.features;
g_gpu.descriptor_indexing_features = descriptor_indexing_features;
}
free(phys_devices);
if (g_gpu.phys_device == VK_NULL_HANDLE) {
rtReportError("vk", "Failed to find a suitable physical device.");
return 3;
}
return RT_SUCCESS;
}
static rt_result CreateDevice(void) {
const char *extensions[] = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
};
rt_queue_indices queue_indices = RetrieveQueueIndices(g_gpu.phys_device, g_gpu.surface);
g_gpu.compute_family = queue_indices.compute;
g_gpu.graphics_family = queue_indices.graphics;
g_gpu.present_family = queue_indices.present;
g_gpu.transfer_family = queue_indices.transfer;
float priority = 1.f;
uint32_t distinct_queue_count = 1;
VkDeviceQueueCreateInfo queue_info[4];
queue_info[0].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info[0].pNext = NULL;
queue_info[0].flags = 0;
queue_info[0].queueCount = 1;
queue_info[0].queueFamilyIndex = queue_indices.graphics;
queue_info[0].pQueuePriorities = &priority;
if (queue_indices.compute != queue_indices.graphics) {
queue_info[1].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info[1].pNext = NULL;
queue_info[1].flags = 0;
queue_info[1].queueCount = 1;
queue_info[1].queueFamilyIndex = queue_indices.compute;
queue_info[1].pQueuePriorities = &priority;
++distinct_queue_count;
}
if (queue_indices.present != queue_indices.graphics &&
queue_indices.present != queue_indices.compute) {
queue_info[distinct_queue_count].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info[distinct_queue_count].pNext = NULL;
queue_info[distinct_queue_count].flags = 0;
queue_info[distinct_queue_count].queueCount = 1;
queue_info[distinct_queue_count].queueFamilyIndex = queue_indices.present;
queue_info[distinct_queue_count].pQueuePriorities = &priority;
++distinct_queue_count;
}
if (queue_indices.transfer != queue_indices.graphics &&
queue_indices.transfer != queue_indices.compute) {
queue_info[distinct_queue_count].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info[distinct_queue_count].pNext = NULL;
queue_info[distinct_queue_count].flags = 0;
queue_info[distinct_queue_count].queueCount = 1;
queue_info[distinct_queue_count].queueFamilyIndex = queue_indices.transfer;
queue_info[distinct_queue_count].pQueuePriorities = &priority;
++distinct_queue_count;
}
VkPhysicalDeviceTimelineSemaphoreFeatures timeline_semaphore_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES,
};
VkPhysicalDeviceSynchronization2Features synchronization2_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES,
.pNext = &timeline_semaphore_features,
};
VkPhysicalDeviceDynamicRenderingFeatures dynamic_rendering_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES,
.pNext = &synchronization2_features,
};
VkPhysicalDeviceDescriptorIndexingFeatures indexing_features = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES,
.pNext = &dynamic_rendering_features,
};
VkPhysicalDeviceFeatures2 features = {.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = &indexing_features};
vkGetPhysicalDeviceFeatures2(g_gpu.phys_device, &features);
RT_ASSERT(indexing_features.runtimeDescriptorArray &&
indexing_features.descriptorBindingPartiallyBound,
"We require a device that supports bindless vulkan.");
VkDeviceCreateInfo device_info = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.pNext = &features,
.enabledExtensionCount = RT_ARRAY_COUNT(extensions),
.ppEnabledExtensionNames = extensions,
.pQueueCreateInfos = queue_info,
.queueCreateInfoCount = distinct_queue_count,
};
if (vkCreateDevice(g_gpu.phys_device, &device_info, g_gpu.alloc_cb, &g_gpu.device) !=
VK_SUCCESS) {
rtReportError("vk", "Device creation failed.");
return 10;
}
vkGetDeviceQueue(g_gpu.device, queue_indices.graphics, 0, &g_gpu.graphics_queue);
vkGetDeviceQueue(g_gpu.device, queue_indices.compute, 0, &g_gpu.compute_queue);
vkGetDeviceQueue(g_gpu.device, queue_indices.present, 0, &g_gpu.present_queue);
vkGetDeviceQueue(g_gpu.device, queue_indices.transfer, 0, &g_gpu.transfer_queue);
return RT_SUCCESS;
}
static rt_result CreateAllocator(void) {
#define SET_FNC(name) fncs.name = name
#define SET_KHR_FNC(name) (fncs).name##KHR = name
VmaVulkanFunctions fncs = {NULL};
SET_FNC(vkGetInstanceProcAddr);
SET_FNC(vkGetDeviceProcAddr);
SET_FNC(vkGetPhysicalDeviceProperties);
SET_FNC(vkGetPhysicalDeviceMemoryProperties);
SET_FNC(vkAllocateMemory);
SET_FNC(vkFreeMemory);
SET_FNC(vkMapMemory);
SET_FNC(vkUnmapMemory);
SET_FNC(vkFlushMappedMemoryRanges);
SET_FNC(vkInvalidateMappedMemoryRanges);
SET_FNC(vkBindBufferMemory);
SET_FNC(vkBindImageMemory);
SET_FNC(vkGetBufferMemoryRequirements);
SET_FNC(vkGetImageMemoryRequirements);
SET_FNC(vkCreateBuffer);
SET_FNC(vkDestroyBuffer);
SET_FNC(vkCreateImage);
SET_FNC(vkDestroyImage);
SET_FNC(vkCmdCopyBuffer);
SET_KHR_FNC(vkGetBufferMemoryRequirements2);
SET_KHR_FNC(vkGetImageMemoryRequirements2);
SET_KHR_FNC(vkBindBufferMemory2);
SET_KHR_FNC(vkBindImageMemory2);
SET_KHR_FNC(vkGetPhysicalDeviceMemoryProperties2);
SET_FNC(vkGetDeviceBufferMemoryRequirements);
SET_FNC(vkGetDeviceImageMemoryRequirements);
#undef SET_FNC
#undef SET_KHR_FNC
VmaAllocatorCreateInfo allocator_info = {
.instance = g_gpu.instance,
.physicalDevice = g_gpu.phys_device,
.device = g_gpu.device,
.pAllocationCallbacks = g_gpu.alloc_cb,
.vulkanApiVersion = TARGET_API_VERSION,
.pVulkanFunctions = &fncs,
};
return vmaCreateAllocator(&allocator_info, &g_gpu.allocator) == VK_SUCCESS ? RT_SUCCESS
: RT_UNKNOWN_ERROR;
}
static void DestroyAllocator(void) {
vmaDestroyAllocator(g_gpu.allocator);
}
static rt_result CreatePerFrameObjects(void) {
for (unsigned int i = 0; i < g_gpu.max_frames_in_flight; ++i) {
VkSemaphoreCreateInfo semaphore_info = {
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
};
if (vkCreateSemaphore(g_gpu.device,
&semaphore_info,
g_gpu.alloc_cb,
&g_gpu.frames[i].render_finished) != VK_SUCCESS) {
return RT_UNKNOWN_ERROR;
}
if (vkCreateSemaphore(g_gpu.device,
&semaphore_info,
g_gpu.alloc_cb,
&g_gpu.frames[i].image_available) != VK_SUCCESS) {
return RT_UNKNOWN_ERROR;
}
if (vkCreateSemaphore(g_gpu.device,
&semaphore_info,
g_gpu.alloc_cb,
&g_gpu.frames[i].swapchain_transitioned) != VK_SUCCESS) {
return RT_UNKNOWN_ERROR;
}
#ifdef RT_DEBUG
char name[128];
rtSPrint(name, 128, "Render Finished Semaphore (%u)", i);
VkDebugUtilsObjectNameInfoEXT name_info = {
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT,
.objectHandle = (uint64_t)g_gpu.frames[i].render_finished,
.objectType = VK_OBJECT_TYPE_SEMAPHORE,
.pObjectName = name,
};
vkSetDebugUtilsObjectNameEXT(g_gpu.device, &name_info);
rtSPrint(name, 128, "Image Available Semaphore (%u)", i);
name_info.objectHandle = (uint64_t)g_gpu.frames[i].image_available;
vkSetDebugUtilsObjectNameEXT(g_gpu.device, &name_info);
rtSPrint(name, 128, "Swapchain Transitioned Semaphore (%u)", i);
name_info.objectHandle = (uint64_t)g_gpu.frames[i].swapchain_transitioned;
vkSetDebugUtilsObjectNameEXT(g_gpu.device, &name_info);
#endif
}
return RT_SUCCESS;
}
void DestroyPerFrameObjects(void) {
for (unsigned int i = 0; i < g_gpu.max_frames_in_flight; ++i) {
vkDestroySemaphore(g_gpu.device, g_gpu.frames[i].image_available, g_gpu.alloc_cb);
vkDestroySemaphore(g_gpu.device, g_gpu.frames[i].render_finished, g_gpu.alloc_cb);
vkDestroySemaphore(g_gpu.device, g_gpu.frames[i].swapchain_transitioned, g_gpu.alloc_cb);
}
}
extern rt_result InitPipelineManagement(void);
extern void ShutdownPipelineManagement(void);
extern rt_result InitRenderTargetManagement(void);
extern void ShutdownRenderTargetManagement(void);
extern rt_result InitCommandBufferManagement(void);
extern void ShutdownCommandBufferManagement(void);
extern rt_result InitializeSempahoreManagement(void);
extern void ShutdownSemaphoreManagement(void);
extern rt_result InitBufferManagement(void);
extern void ShutdownBufferManagement(void);
extern rt_result InitializeTransfers(void);
extern void ShutdownTransfers(void);
rt_result RT_RENDERER_API_FN(Init)(const rt_renderer_init_info *info) {
rtLog("vk", "Init");
_tracking_alloc_cbs.pUserData = NULL;
_tracking_alloc_cbs.pfnAllocation = TrackAllocation;
_tracking_alloc_cbs.pfnReallocation = TrackReallocation;
_tracking_alloc_cbs.pfnFree = TrackFree;
if (r_VkEnableAPIAllocTracking.i) {
g_gpu.alloc_cb = &_tracking_alloc_cbs;
} else {
g_gpu.alloc_cb = NULL;
}
g_gpu.max_frames_in_flight = RT_RESTRICT_VALUE_TO_BOUNDS(r_VkMaxFramesInFlight.i,
RT_VK_MIN_SUPPORTED_FRAMES_IN_FLIGHT,
RT_VK_MAX_SUPPORTED_FRAMES_IN_FLIGHT);
int res = CreateInstance();
if (res != RT_SUCCESS)
return res;
res = CreateSurface(info);
if (res != RT_SUCCESS)
return res;
res = ChoosePhysicalDevice();
if (res != RT_SUCCESS)
return res;
res = CreateDevice();
if (res != RT_SUCCESS)
return res;
res = CreateAllocator();
if (res != RT_SUCCESS)
return res;
res = CreatePerFrameObjects();
if (res != RT_SUCCESS)
return res;
res = InitPipelineManagement();
if (res != RT_SUCCESS)
return res;
res = InitRenderTargetManagement();
if (res != RT_SUCCESS)
return res;
res = InitializeSempahoreManagement();
if (res != RT_SUCCESS)
return res;
res = InitCommandBufferManagement();
if (res != RT_SUCCESS)
return res;
res = InitBufferManagement();
if (res != RT_SUCCESS)
return res;
res = InitializeTransfers();
if (res != RT_SUCCESS)
return res;
res = rtCreateSwapchain();
if (res != RT_SUCCESS)
return res;
rtUpdateSwapchainRenderTarget();
return RT_SUCCESS;
}
void RT_RENDERER_API_FN(Shutdown)(void) {
rtLog("vk", "Shutdown");
vkDeviceWaitIdle(g_gpu.device);
rtDestroySwapchain();
ShutdownTransfers();
ShutdownBufferManagement();
ShutdownCommandBufferManagement();
ShutdownSemaphoreManagement();
ShutdownRenderTargetManagement();
ShutdownPipelineManagement();
DestroyPerFrameObjects();
DestroyAllocator();
vkDestroyDevice(g_gpu.device, g_gpu.alloc_cb);
vkDestroySurfaceKHR(g_gpu.instance, g_gpu.surface, g_gpu.alloc_cb);
#ifdef RT_DEBUG
vkDestroyDebugUtilsMessengerEXT(g_gpu.instance, g_gpu.messenger, g_gpu.alloc_cb);
#endif
vkDestroyInstance(g_gpu.instance, g_gpu.alloc_cb);
}
unsigned int RT_RENDERER_API_FN(GetMaxFramesInFlight)(void) {
return g_gpu.max_frames_in_flight;
}