summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorClément Sibille <clements@lisible.xyz>2025-12-08 12:45:47 +0100
committerClément Sibille <clements@lisible.xyz>2025-12-08 12:45:47 +0100
commit67e722c16237f935c4c7adffeb8af523efa78ccf (patch)
treead1d13da93abad3fc3452e7aa5d44d677b2c00c6 /src
Initial commitHEADmain
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt22
-rw-r--r--src/backend/vk.c2366
-rw-r--r--src/lrhi.c55
3 files changed, 2443 insertions, 0 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
new file mode 100644
index 0000000..201b3e2
--- /dev/null
+++ b/src/CMakeLists.txt
@@ -0,0 +1,22 @@
+find_package(Vulkan REQUIRED)
+
+add_library(
+ lrhi
+ lrhi.c
+ backend/vk.c
+)
+
+set_target_properties(lrhi PROPERTIES
+ C_STANDARD 17
+ C_EXTENSIONS OFF
+ RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}
+)
+
+if (UNIX)
+ target_compile_options(lrhi PRIVATE -Wall -Wextra -Wpedantic)
+endif()
+
+include_directories(../include)
+target_include_directories(lrhi SYSTEM PRIVATE ${PROJECT_SOURCE_DIR}/vendor/vma)
+target_include_directories(lrhi PRIVATE ${Vulkan_INCLUDE_DIRS})
+target_link_libraries(lrhi ${Vulkan_LIBRARIES})
diff --git a/src/backend/vk.c b/src/backend/vk.c
new file mode 100644
index 0000000..d8fdb16
--- /dev/null
+++ b/src/backend/vk.c
@@ -0,0 +1,2366 @@
+#include "lrhi.h"
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <vk_mem_alloc.h>
+#include <vulkan/vulkan.h>
+#include <vulkan/vulkan_core.h>
+
+#include <X11/Xlib.h>
+#include <vulkan/vulkan_xlib.h>
+
+enum { MAX_FRAME_IN_FLIGHT = 2 };
+
+static VKAPI_ATTR VkBool32 VKAPI_CALL
+debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+ VkDebugUtilsMessageTypeFlagsEXT messageType,
+ const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
+ void *pUserData) {
+ (void)messageSeverity;
+ (void)messageType;
+ (void)pUserData;
+ LRHI_LOG("validation layer: %s", pCallbackData->pMessage);
+ return VK_FALSE;
+}
+
+static VkResult create_debug_utils_messenger_ext(
+ VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDebugUtilsMessengerEXT *pDebugMessenger) {
+ PFN_vkCreateDebugUtilsMessengerEXT func =
+ (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
+ instance, "vkCreateDebugUtilsMessengerEXT");
+ if (func != NULL) {
+ return func(instance, pCreateInfo, pAllocator, pDebugMessenger);
+ } else {
+ return VK_ERROR_EXTENSION_NOT_PRESENT;
+ }
+}
+
+void destroy_debug_utils_messenger_ext(
+ VkInstance instance, VkDebugUtilsMessengerEXT debugMessenger,
+ const VkAllocationCallbacks *pAllocator) {
+ PFN_vkDestroyDebugUtilsMessengerEXT func =
+ (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
+ instance, "vkDestroyDebugUtilsMessengerEXT");
+ if (func != NULL) {
+ func(instance, debugMessenger, pAllocator);
+ }
+}
+
+static bool
+are_validation_layers_supported(const char **validation_layers,
+ const uint32_t validation_layer_count) {
+ enum { AVAILABLE_LAYERS_BUF_SIZE = 128 };
+ VkLayerProperties available_layers_buf[AVAILABLE_LAYERS_BUF_SIZE] = {0};
+ uint32_t available_layer_count;
+ vkEnumerateInstanceLayerProperties(&available_layer_count, NULL);
+ if (available_layer_count > AVAILABLE_LAYERS_BUF_SIZE) {
+ LRHI_LOG("available layer count > buf size, will be truncated");
+ available_layer_count = AVAILABLE_LAYERS_BUF_SIZE;
+ }
+
+ vkEnumerateInstanceLayerProperties(&available_layer_count,
+ available_layers_buf);
+
+ for (uint32_t layer_idx = 0; layer_idx < validation_layer_count;
+ layer_idx++) {
+ const char *required_layer = validation_layers[layer_idx];
+ bool layer_found = false;
+
+ for (uint32_t available_layer_idx = 0;
+ available_layer_idx < available_layer_count; available_layer_idx++) {
+ VkLayerProperties *available_layer =
+ &available_layers_buf[available_layer_idx];
+ if (strcmp(required_layer, available_layer->layerName) == 0) {
+ layer_found = true;
+ break;
+ }
+ }
+
+ if (!layer_found) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void get_required_instance_extensions(
+ enum lrhi_window_backend window_backend, const char **required_extensions,
+ uint32_t required_extensions_capacity, uint32_t *required_extension_count,
+ bool enable_validation_layers) {
+ // uint32_t platform_required_extension_count;
+ // const char *const *platform_required_extensions =
+ // lrhi_platform_get_vulkan_required_instance_extensions(
+ // platform, &platform_required_extension_count);
+ uint32_t ext_index = 0;
+ // for (uint32_t i = 0; i < platform_required_extension_count; i++) {
+ // if (ext_index >= required_extensions_capacity) {
+ // LRHI_LOG("Platform required extension count too large for the required
+ // "
+ // "extensions buffer, will be truncated.");
+ // break;
+ // }
+
+ // required_extensions[ext_index++] = platform_required_extensions[i];
+ // }
+
+ if (window_backend == LRHI_WINDOW_BACKEND_X11) {
+ required_extensions[ext_index++] = VK_KHR_SURFACE_EXTENSION_NAME;
+ required_extensions[ext_index++] = VK_KHR_XLIB_SURFACE_EXTENSION_NAME;
+ } else {
+ LRHI_PANIC("Unsupported window backend.");
+ }
+
+ if (ext_index < required_extensions_capacity && enable_validation_layers) {
+ required_extensions[ext_index++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME;
+ }
+ *required_extension_count = ext_index;
+}
+
+static const char *REQUIRED_VALIDATION_LAYERS[] = {
+ "VK_LAYER_KHRONOS_validation"};
+static const uint32_t REQUIRED_VALIDATION_LAYER_COUNT =
+ ARRAY_LENGTH(REQUIRED_VALIDATION_LAYERS);
+
+struct lrhi_instance {
+ VkInstance instance;
+ VkDebugUtilsMessengerEXT debug_messenger;
+ struct lrhi_allocator allocator;
+ bool enable_validation_layers;
+};
+
+lrhi_instance *lrhi_instance_create(struct lrhi_allocator *allocator,
+ enum lrhi_window_backend window_backend,
+ const struct lrhi_instance_desc *desc) {
+
+ struct lrhi_instance *instance =
+ lrhi_allocator_allocate(allocator, sizeof(struct lrhi_instance));
+ instance->allocator = *allocator;
+
+ // Creating instance and debug messenger
+ if (desc->enable_validation_layers) {
+ if (!are_validation_layers_supported(REQUIRED_VALIDATION_LAYERS,
+ REQUIRED_VALIDATION_LAYER_COUNT)) {
+ LRHI_LOG_ERR("required validation layers are not supported.");
+ goto fail;
+ }
+ }
+
+ VkApplicationInfo application_info = {
+ .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ .pApplicationName = "lrhi",
+ .applicationVersion = VK_MAKE_VERSION(1, 0, 0),
+ .pEngineName = "none",
+ .engineVersion = VK_MAKE_VERSION(1, 0, 0),
+ .apiVersion = VK_API_VERSION_1_3};
+
+ VkInstanceCreateInfo instance_create_info = {
+ .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ .pApplicationInfo = &application_info};
+
+ enum { REQUIRED_EXTENSIONS_BUF_SIZE = 256 };
+ const char *required_extensions[REQUIRED_EXTENSIONS_BUF_SIZE];
+ uint32_t required_extension_count = 0;
+ get_required_instance_extensions(
+ window_backend, required_extensions, REQUIRED_EXTENSIONS_BUF_SIZE,
+ &required_extension_count, desc->enable_validation_layers);
+ instance_create_info.enabledExtensionCount = required_extension_count;
+ instance_create_info.ppEnabledExtensionNames = required_extensions;
+
+ VkDebugUtilsMessengerCreateInfoEXT debug_utils_messenger_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
+ .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
+ .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
+ VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
+ .pfnUserCallback = debug_callback,
+ };
+ if (desc->enable_validation_layers) {
+ instance_create_info.enabledLayerCount = REQUIRED_VALIDATION_LAYER_COUNT;
+ instance_create_info.ppEnabledLayerNames = REQUIRED_VALIDATION_LAYERS;
+ instance_create_info.pNext = (VkDebugUtilsMessengerCreateInfoEXT
+ *)&debug_utils_messenger_create_info;
+ } else {
+ instance_create_info.enabledLayerCount = 0;
+ instance_create_info.pNext = NULL;
+ }
+
+ if (vkCreateInstance(&instance_create_info, NULL, &instance->instance) !=
+ VK_SUCCESS) {
+ LRHI_LOG_ERR("Vulkan instance creation failed.");
+ goto fail;
+ }
+ LRHI_LOG("Vulkan instance created.");
+
+ if (desc->enable_validation_layers) {
+
+ if (create_debug_utils_messenger_ext(
+ instance->instance, &debug_utils_messenger_create_info, NULL,
+ &instance->debug_messenger) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Couldn't create VkDebugUtilsMessengerCreateInfoEXT");
+ goto destroy_instance;
+ }
+
+ instance->enable_validation_layers = true;
+ }
+
+ return instance;
+destroy_instance:
+ vkDestroyInstance(instance->instance, NULL);
+fail:
+ lrhi_allocator_free(allocator, sizeof(struct lrhi_instance), instance);
+ return NULL;
+}
+void lrhi_instance_destroy(struct lrhi_allocator *allocator,
+ lrhi_instance *instance) {
+ if (instance->enable_validation_layers) {
+ destroy_debug_utils_messenger_ext(instance->instance,
+ instance->debug_messenger, NULL);
+ }
+ vkDestroyInstance(instance->instance, NULL);
+ lrhi_allocator_free(allocator, sizeof(struct lrhi_instance), instance);
+}
+
+struct lrhi_texture {
+ VkImage image;
+
+ // Note: These fields are NULL for the swapchain textures
+ VmaAllocation allocation;
+ VmaAllocationInfo allocation_info;
+};
+
+struct lrhi_texture_view {
+ VkImageView view;
+};
+
+#define VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY 32
+struct lrhi_surface {
+ VkSurfaceKHR surface;
+ VkSwapchainKHR swapchain;
+
+ lrhi_texture images[VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY];
+ lrhi_texture_view image_views[VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY];
+
+ VkSemaphore image_available_semaphore[MAX_FRAME_IN_FLIGHT];
+ VkSemaphore render_finished_semaphore[VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY];
+ VkExtent2D new_extent;
+ VkExtent2D extent;
+ VkFormat fmt;
+ lrhi_surface_reconfigured_callback surface_reconfigured_callback;
+ void *surface_reconfigured_user_data;
+ bool resized;
+ uint32_t img_count;
+ uint32_t image_index;
+};
+lrhi_surface *lrhi_instance_create_surface(
+ lrhi_instance *instance, struct lrhi_native_surface *native_surface,
+ lrhi_surface_reconfigured_callback cb, void *user_data) {
+ lrhi_surface *surface = lrhi_allocator_allocate(&instance->allocator,
+ sizeof(struct lrhi_surface));
+ surface->swapchain = NULL;
+
+ if (native_surface->backend == LRHI_WINDOW_BACKEND_X11) {
+ VkXlibSurfaceCreateInfoKHR surface_create_info = {
+ .sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR,
+ .dpy = (Display *)native_surface->display_hnd,
+ .window = (Window)(uintptr_t)native_surface->window_hnd};
+
+ if (vkCreateXlibSurfaceKHR(instance->instance, &surface_create_info, NULL,
+ &surface->surface) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Vulkan surface creation failed.");
+ goto fail;
+ }
+ } else {
+ LRHI_PANIC("Unsupported window backend.");
+ }
+
+ surface->surface_reconfigured_callback = cb;
+ surface->surface_reconfigured_user_data = user_data;
+
+ return surface;
+fail:
+ lrhi_allocator_free(&instance->allocator, sizeof(struct lrhi_surface),
+ surface);
+ return NULL;
+}
+
+struct queue_family_indices {
+ uint32_t graphics_family;
+ uint32_t present_family;
+ bool has_graphics;
+ bool has_present;
+};
+
+static bool
+queue_family_indices_complete(const struct queue_family_indices *indices) {
+ return indices->has_graphics && indices->has_present;
+}
+
+static struct queue_family_indices
+queue_family_indices_find_for_device(VkPhysicalDevice device,
+ VkSurfaceKHR surface) {
+ struct queue_family_indices indices = {0};
+
+ enum { QUEUE_FAMILIES_BUF_SIZE = 64 };
+ VkQueueFamilyProperties queue_families[QUEUE_FAMILIES_BUF_SIZE] = {0};
+
+ uint32_t queue_family_count = 0;
+ vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, NULL);
+
+ if (queue_family_count > QUEUE_FAMILIES_BUF_SIZE) {
+ LRHI_LOG("Physical device queue family count too large for the queue "
+ "families buffer, will be truncated.");
+ queue_family_count = QUEUE_FAMILIES_BUF_SIZE;
+ }
+ vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count,
+ queue_families);
+ for (uint32_t queue_family_idx = 0; queue_family_idx < queue_family_count;
+ queue_family_idx++) {
+ VkQueueFamilyProperties *queue_family = &queue_families[queue_family_idx];
+ if (queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ indices.graphics_family = queue_family_idx;
+ indices.has_graphics = true;
+ }
+
+ VkBool32 present_support;
+ vkGetPhysicalDeviceSurfaceSupportKHR(device, queue_family_idx, surface,
+ &present_support);
+ if (present_support) {
+ indices.present_family = queue_family_idx;
+ indices.has_present = true;
+ }
+
+ if (queue_family_indices_complete(&indices)) {
+ break;
+ }
+ }
+
+ return indices;
+}
+
+static const char *REQUIRED_DEVICE_EXTS[] = {
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME,
+#ifdef LRHI_PLATFORM_MACOS
+ "VK_KHR_portability_subset",
+#endif // LRHI_PLATFORM_MACOS
+};
+#define SUPPORTED_EXT_BUFFER_CAPACITY 1024
+static const uint32_t REQUIRED_DEVICE_EXT_COUNT =
+ sizeof(REQUIRED_DEVICE_EXTS) / sizeof(REQUIRED_DEVICE_EXTS[0]);
+static bool are_required_device_extensions_supported(VkPhysicalDevice pd) {
+ // Fetch supported extensions
+ uint32_t supported_ext_count = 0;
+ if (vkEnumerateDeviceExtensionProperties(pd, NULL, &supported_ext_count,
+ NULL) != VK_SUCCESS) {
+ LRHI_LOG("Couldn't enumerate supported device extensions");
+ goto err;
+ }
+
+ if (supported_ext_count > SUPPORTED_EXT_BUFFER_CAPACITY) {
+ LRHI_LOG(
+ "There are %u supported extensions for this device, but the render "
+ "backend "
+ "buffer for them "
+ "has a capacity of %u, some of the device extensions will be ignored",
+ supported_ext_count, SUPPORTED_EXT_BUFFER_CAPACITY);
+ supported_ext_count = SUPPORTED_EXT_BUFFER_CAPACITY;
+ }
+
+ const uint32_t supported_ext_requested_count = supported_ext_count;
+ VkExtensionProperties supported_exts[SUPPORTED_EXT_BUFFER_CAPACITY];
+ if (vkEnumerateDeviceExtensionProperties(pd, NULL, &supported_ext_count,
+ supported_exts) != VK_SUCCESS) {
+ LRHI_LOG("Couldn't enumerate supported device extensions");
+ goto err;
+ }
+
+ if (supported_ext_count < supported_ext_requested_count) {
+ LRHI_LOG("Actual supported extension count is smaller than expected");
+ }
+
+ for (uint32_t required_ext_index = 0;
+ required_ext_index < REQUIRED_DEVICE_EXT_COUNT; required_ext_index++) {
+ const char *required_ext_name = REQUIRED_DEVICE_EXTS[required_ext_index];
+ bool found = false;
+ for (uint32_t supported_ext_index = 0;
+ supported_ext_index < supported_ext_count; supported_ext_index++) {
+ const char *supported_ext_name =
+ supported_exts[supported_ext_index].extensionName;
+
+ if (strcmp(required_ext_name, supported_ext_name) == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ return false;
+ }
+ }
+
+ return true;
+
+err:
+ return false;
+}
+#define SWAPCHAIN_SUPPORT_DETAILS_MAX_SURF_FMT_COUNT 256
+#define SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT 256
+struct swapchain_support_details {
+ VkSurfaceCapabilitiesKHR capabilities;
+ VkSurfaceFormatKHR formats[SWAPCHAIN_SUPPORT_DETAILS_MAX_SURF_FMT_COUNT];
+ VkPresentModeKHR
+ present_modes[SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT];
+ uint32_t format_count;
+ uint32_t present_mode_count;
+};
+static bool
+get_swapchain_support_details(VkPhysicalDevice pd, VkSurfaceKHR surf,
+ struct swapchain_support_details *details) {
+ if (vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+ pd, surf, &details->capabilities) != VK_SUCCESS) {
+ goto err;
+ }
+
+ if (vkGetPhysicalDeviceSurfaceFormatsKHR(pd, surf, &details->format_count,
+ NULL) != VK_SUCCESS) {
+ goto err;
+ }
+
+ if (details->format_count != 0 &&
+ details->format_count < SWAPCHAIN_SUPPORT_DETAILS_MAX_SURF_FMT_COUNT) {
+ if (vkGetPhysicalDeviceSurfaceFormatsKHR(pd, surf, &details->format_count,
+ details->formats) != VK_SUCCESS) {
+ goto err;
+ }
+ }
+
+ if (vkGetPhysicalDeviceSurfacePresentModesKHR(
+ pd, surf, &details->present_mode_count, NULL) != VK_SUCCESS) {
+ goto err;
+ }
+
+ if (details->present_mode_count != 0 &&
+ details->present_mode_count <
+ SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT) {
+ if (vkGetPhysicalDeviceSurfacePresentModesKHR(
+ pd, surf, &details->present_mode_count, details->present_modes) !=
+ VK_SUCCESS) {
+ goto err;
+ }
+ }
+
+ return true;
+err:
+ return false;
+}
+static bool is_physical_device_suitable(VkPhysicalDevice device,
+ VkSurfaceKHR surface) {
+ struct queue_family_indices indices =
+ queue_family_indices_find_for_device(device, surface);
+
+ VkPhysicalDeviceFeatures supported_features;
+ vkGetPhysicalDeviceFeatures(device, &supported_features);
+
+ bool extensions_supported = are_required_device_extensions_supported(device);
+ bool swapchain_adequate = false;
+ if (extensions_supported) {
+ struct swapchain_support_details swapchain_support_details = {0};
+ if (!get_swapchain_support_details(device, surface,
+ &swapchain_support_details)) {
+ LRHI_LOG("Couldn't query swapchain support details from device");
+ goto err;
+ }
+
+ swapchain_adequate = swapchain_support_details.format_count > 0 &&
+ swapchain_support_details.present_mode_count > 0;
+ }
+
+ return queue_family_indices_complete(&indices) && extensions_supported &&
+ swapchain_adequate && supported_features.samplerAnisotropy;
+err:
+ return false;
+}
+struct lrhi_command_buffer {
+ lrhi_device *device;
+ VkCommandBuffer buffer;
+};
+
+struct lrhi_descriptor_set {
+ VkDescriptorSet set;
+};
+
+enum { MAX_RENDER_PASS_COLOR_ATTACHMENT = 64 };
+struct framebuffer_cache_entry {
+ VkFramebuffer framebuffer;
+ VkRenderPass render_pass;
+ VkImageView attachments[MAX_RENDER_PASS_COLOR_ATTACHMENT];
+ uint32_t attachment_count;
+ uint32_t width;
+ uint32_t height;
+ bool used;
+};
+
+enum { FRAMEBUFFER_CACHE_CAPACITY = 64 };
+enum { DESCRIPTOR_SET_POOL_CAPACITY = 1024 };
+struct lrhi_device {
+ struct lrhi_allocator *allocator;
+ VkPhysicalDevice physical_device;
+ VkDevice device;
+ VkQueue graphics_queue;
+ VkQueue present_queue;
+ VkCommandPool command_pool;
+ VmaAllocator vma_allocator;
+ VkFence in_flight_fence[MAX_FRAME_IN_FLIGHT];
+ lrhi_command_buffer command_buffers[MAX_FRAME_IN_FLIGHT];
+ VkDescriptorPool descriptor_pool;
+ lrhi_descriptor_set descriptor_sets[DESCRIPTOR_SET_POOL_CAPACITY];
+ int used_descriptor_set_count;
+ uint32_t current_frame;
+
+ // TODO need to clear the framebuffer cache on destroy
+ struct framebuffer_cache_entry framebuffer_cache[FRAMEBUFFER_CACHE_CAPACITY];
+};
+static void framebuffer_cache_clear(lrhi_device *device) {
+ for (uint32_t i = 0; i < FRAMEBUFFER_CACHE_CAPACITY; i++) {
+ if (device->framebuffer_cache[i].used) {
+ vkDestroyFramebuffer(device->device,
+ device->framebuffer_cache[i].framebuffer, NULL);
+ device->framebuffer_cache[i].used = false;
+ }
+ }
+}
+static VkFramebuffer framebuffer_cache_get_or_create(
+ lrhi_device *device, VkRenderPass pass, const VkImageView *attachments,
+ uint32_t attachment_count, uint32_t width, uint32_t height) {
+ // Looking for a matching framebuffer
+ for (uint32_t i = 0; i < FRAMEBUFFER_CACHE_CAPACITY; i++) {
+ struct framebuffer_cache_entry *entry = &device->framebuffer_cache[i];
+ if (!entry->used) {
+ continue;
+ }
+
+ if (entry->render_pass == pass &&
+ entry->attachment_count == attachment_count && entry->width == width &&
+ entry->height == height) {
+ bool same_attachments = true;
+ for (uint32_t i = 0; i < attachment_count; i++) {
+ if (attachments[i] != entry->attachments[i]) {
+ same_attachments = false;
+ break;
+ }
+ }
+
+ if (same_attachments) {
+ return entry->framebuffer;
+ }
+ }
+ }
+
+ // We haven't found one, so we create a new one
+ uint32_t free_slot = UINT32_MAX;
+
+ for (uint32_t i = 0; i < FRAMEBUFFER_CACHE_CAPACITY; i++) {
+ if (!device->framebuffer_cache[i].used) {
+ free_slot = i;
+ break;
+ }
+ }
+
+ if (free_slot == UINT32_MAX) {
+ LRHI_PANIC("Framebuffer cache is full, this shouldn't happen.");
+ }
+
+ struct framebuffer_cache_entry *entry = &device->framebuffer_cache[free_slot];
+ VkFramebufferCreateInfo fb_info = {
+ .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ .renderPass = pass,
+ .attachmentCount = attachment_count,
+ .pAttachments = attachments,
+ .width = width,
+ .height = height,
+ .layers = 1};
+
+ if (vkCreateFramebuffer(device->device, &fb_info, NULL,
+ &entry->framebuffer) != VK_SUCCESS) {
+ LRHI_PANIC("Framebuffer creation failed!");
+ }
+
+ entry->render_pass = pass;
+ entry->attachment_count = attachment_count;
+ entry->width = width;
+ entry->height = height;
+ entry->used = true;
+
+ for (uint32_t i = 0; i < attachment_count; i++) {
+ entry->attachments[i] = attachments[i];
+ }
+
+ return entry->framebuffer;
+}
+lrhi_device *lrhi_instance_create_device(lrhi_instance *instance,
+ lrhi_surface *surface) {
+ struct lrhi_device *device =
+ lrhi_allocator_allocate(&instance->allocator, sizeof(struct lrhi_device));
+
+ device->allocator = &instance->allocator;
+
+ // Picking physical device
+ VkPhysicalDevice physical_device = VK_NULL_HANDLE;
+ uint32_t device_count;
+ vkEnumeratePhysicalDevices(instance->instance, &device_count, NULL);
+ if (device_count == 0) {
+ LRHI_LOG_ERR("Failed to find any GPU with Vulkan support.");
+ goto fail;
+ }
+
+ enum { PHYSICAL_DEVICE_BUF_SIZE = 64 };
+ VkPhysicalDevice physical_devices[PHYSICAL_DEVICE_BUF_SIZE] = {0};
+ if (device_count > PHYSICAL_DEVICE_BUF_SIZE) {
+ LRHI_LOG("Physical device count too large for the physical "
+ "devices buffer, will be truncated.");
+ device_count = PHYSICAL_DEVICE_BUF_SIZE;
+ }
+ vkEnumeratePhysicalDevices(instance->instance, &device_count,
+ physical_devices);
+
+ for (uint32_t physical_device_idx = 0; physical_device_idx < device_count;
+ physical_device_idx++) {
+ if (is_physical_device_suitable(physical_devices[physical_device_idx],
+ surface->surface)) {
+ physical_device = physical_devices[physical_device_idx];
+ }
+ }
+
+ if (physical_device == VK_NULL_HANDLE) {
+ LRHI_LOG_ERR("Failed to find a suitable GPU.");
+ goto fail;
+ }
+ device->physical_device = physical_device;
+ LRHI_LOG("Picked physical device.");
+
+ // Create logical device
+ struct queue_family_indices queue_family_indices =
+ queue_family_indices_find_for_device(physical_device, surface->surface);
+
+ enum { UNIQUE_QUEUE_FAMILIES_BUF_SIZE = 2 };
+ uint32_t unique_queue_families[UNIQUE_QUEUE_FAMILIES_BUF_SIZE];
+ int unique_queue_family_count = 1;
+ unique_queue_families[0] = queue_family_indices.graphics_family;
+ if (queue_family_indices.present_family !=
+ queue_family_indices.graphics_family) {
+ unique_queue_families[1] = queue_family_indices.present_family;
+ unique_queue_family_count++;
+ }
+
+ VkDeviceQueueCreateInfo queue_create_infos[UNIQUE_QUEUE_FAMILIES_BUF_SIZE] = {
+ 0};
+ float queue_priority = 1.f;
+ for (int i = 0; i < unique_queue_family_count; i++) {
+ VkDeviceQueueCreateInfo *queue_create_info = &queue_create_infos[i];
+ queue_create_info->sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ queue_create_info->queueFamilyIndex = unique_queue_families[i];
+ queue_create_info->queueCount = 1;
+ queue_create_info->pQueuePriorities = &queue_priority;
+ }
+
+ VkPhysicalDeviceFeatures device_features = {.samplerAnisotropy = VK_TRUE};
+ VkDeviceCreateInfo device_create_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ .pQueueCreateInfos = queue_create_infos,
+ .queueCreateInfoCount = unique_queue_family_count,
+ .pEnabledFeatures = &device_features,
+ .ppEnabledExtensionNames = REQUIRED_DEVICE_EXTS,
+ .enabledExtensionCount = REQUIRED_DEVICE_EXT_COUNT};
+ if (instance->enable_validation_layers) {
+ device_create_info.enabledLayerCount = REQUIRED_VALIDATION_LAYER_COUNT;
+ device_create_info.ppEnabledLayerNames = REQUIRED_VALIDATION_LAYERS;
+ } else {
+ device_create_info.enabledLayerCount = 0;
+ }
+
+ if (vkCreateDevice(physical_device, &device_create_info, NULL,
+ &device->device) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to create logical device.");
+ goto fail;
+ }
+ vkGetDeviceQueue(device->device, queue_family_indices.graphics_family, 0,
+ &device->graphics_queue);
+ vkGetDeviceQueue(device->device, queue_family_indices.present_family, 0,
+ &device->present_queue);
+
+ if (vmaCreateAllocator(
+ &(const VmaAllocatorCreateInfo){.device = device->device,
+ .instance = instance->instance,
+ .physicalDevice =
+ device->physical_device},
+ &device->vma_allocator) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to create vulkan memory allocator");
+ goto destroy_device;
+ }
+
+ VkCommandPoolCreateInfo pool_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
+ .queueFamilyIndex = queue_family_indices.graphics_family};
+
+ if (vkCreateCommandPool(device->device, &pool_info, NULL,
+ &device->command_pool) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to create command pool.");
+ goto destroy_vma;
+ }
+
+ VkCommandBufferAllocateInfo command_buffer_allocate_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .commandPool = device->command_pool,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandBufferCount = MAX_FRAME_IN_FLIGHT};
+ VkCommandBuffer cmd_bufs[MAX_FRAME_IN_FLIGHT];
+ if (vkAllocateCommandBuffers(device->device, &command_buffer_allocate_info,
+ cmd_bufs) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to allocate command buffers.");
+ goto destroy_command_pool;
+ }
+ for (int i = 0; i < MAX_FRAME_IN_FLIGHT; i++) {
+ device->command_buffers[i].buffer = cmd_bufs[i];
+ device->command_buffers[i].device = device;
+ }
+
+ VkFenceCreateInfo fence_infos = {.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ .flags = VK_FENCE_CREATE_SIGNALED_BIT};
+ uint32_t fence_idx;
+ for (fence_idx = 0; fence_idx < MAX_FRAME_IN_FLIGHT; fence_idx++) {
+ if (vkCreateFence(device->device, &fence_infos, NULL,
+ &device->in_flight_fence[fence_idx]) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to create fences.");
+ goto destroy_in_flight_fences;
+ }
+ }
+
+ // Creating descriptor pools
+ VkDescriptorPoolSize uniform_buffer_descriptor_pool_size = {
+ .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ .descriptorCount = MAX_FRAME_IN_FLIGHT};
+
+ VkDescriptorPoolSize combined_image_sampler_pool_size = {
+ .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ .descriptorCount = MAX_FRAME_IN_FLIGHT};
+
+ VkDescriptorPoolCreateInfo descriptor_pool_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ .poolSizeCount = 2,
+ .pPoolSizes =
+ (const VkDescriptorPoolSize[]){uniform_buffer_descriptor_pool_size,
+ combined_image_sampler_pool_size},
+ .maxSets = MAX_FRAME_IN_FLIGHT};
+
+ if (vkCreateDescriptorPool(device->device, &descriptor_pool_info, NULL,
+ &device->descriptor_pool) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Uniform descriptor pool creation failed.");
+ goto destroy_in_flight_fences;
+ }
+ device->used_descriptor_set_count = 0;
+
+ device->current_frame = 0;
+
+ // zero-initializing the framebuffer cache, so all .used flag are false
+ memset(device->framebuffer_cache, 0, sizeof(device->framebuffer_cache));
+
+ return device;
+destroy_in_flight_fences:
+ for (uint32_t i = 0; i < fence_idx; i++) {
+ vkDestroyFence(device->device, device->in_flight_fence[i], NULL);
+ }
+
+ VkCommandBuffer command_buffers_to_free[MAX_FRAME_IN_FLIGHT] = {0};
+ for (int i = 0; i < MAX_FRAME_IN_FLIGHT; i++) {
+ command_buffers_to_free[i] = device->command_buffers[i].buffer;
+ }
+ vkFreeCommandBuffers(device->device, device->command_pool,
+ MAX_FRAME_IN_FLIGHT, command_buffers_to_free);
+destroy_command_pool:
+ vkDestroyCommandPool(device->device, device->command_pool, NULL);
+destroy_vma:
+ vmaDestroyAllocator(device->vma_allocator);
+destroy_device:
+ vkDestroyDevice(device->device, NULL);
+fail:
+ lrhi_allocator_free(&instance->allocator, sizeof(struct lrhi_device), device);
+ return NULL;
+}
+void lrhi_instance_destroy_device(lrhi_instance *instance,
+ lrhi_device *device) {
+ framebuffer_cache_clear(device);
+ vkDestroyCommandPool(device->device, device->command_pool, NULL);
+ vkDestroyDescriptorPool(device->device, device->descriptor_pool, NULL);
+ for (int i = 0; i < MAX_FRAME_IN_FLIGHT; i++) {
+ vkDestroyFence(device->device, device->in_flight_fence[i], NULL);
+ }
+ vmaDestroyAllocator(device->vma_allocator);
+ vkDestroyDevice(device->device, NULL);
+ lrhi_allocator_free(&instance->allocator, sizeof(struct lrhi_device), device);
+ LRHI_LOG("Logical device destroyed.");
+}
+
+struct lrhi_shader_module {
+ VkShaderModule module;
+};
+lrhi_shader_module *
+lrhi_device_create_shader_module(lrhi_device *device,
+ const struct lrhi_shader_module_desc *desc) {
+ lrhi_shader_module *module = lrhi_allocator_allocate(
+ device->allocator, sizeof(struct lrhi_shader_module));
+
+ if (vkCreateShaderModule(
+ device->device,
+ &(const VkShaderModuleCreateInfo){
+ .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ .pCode = (const uint32_t *)desc->spirv_source,
+ .codeSize = desc->spirv_source_size},
+ NULL, &module->module) != VK_SUCCESS) {
+ if (desc->label) {
+ LRHI_LOG_ERR("Shader module \"%s\" creation failed.", desc->label);
+ } else {
+ LRHI_LOG_ERR("Shader module creation failed.");
+ }
+
+ goto fail;
+ }
+
+ return module;
+fail:
+ return NULL;
+}
+void lrhi_device_wait_idle(lrhi_device *device) {
+ vkDeviceWaitIdle(device->device);
+}
+void lrhi_device_destroy_shader_module(lrhi_device *device,
+ lrhi_shader_module *module) {
+ vkDestroyShaderModule(device->device, module->module, NULL);
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_shader_module),
+ module);
+}
+
+static VkDescriptorType
+lrhi_descriptor_type_to_vk_descriptor_type(enum lrhi_descriptor_type type) {
+ switch (type) {
+ case lrhi_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ return VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ case lrhi_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ default:
+ return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ }
+}
+
+static VkShaderStageFlags
+lrhi_shader_stages_to_vk_shader_stage_flags(lrhi_shader_stages stages) {
+ VkShaderStageFlags flags = 0;
+
+ if (stages & lrhi_SHADER_STAGE_VERTEX) {
+ flags |= VK_SHADER_STAGE_VERTEX_BIT;
+ }
+
+ if (stages & lrhi_SHADER_STAGE_FRAGMENT) {
+ flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+
+ return flags;
+}
+
+struct lrhi_descriptor_set_layout {
+ VkDescriptorSetLayout layout;
+};
+
+lrhi_descriptor_set_layout *lrhi_device_create_descriptor_set_layout(
+ lrhi_device *device, const struct lrhi_descriptor_set_layout_desc *desc) {
+
+ struct lrhi_descriptor_set_layout *layout = lrhi_allocator_allocate(
+ device->allocator, sizeof(struct lrhi_descriptor_set_layout));
+
+ enum { BINDINGS_BUF_CAP = 64 };
+ VkDescriptorSetLayoutBinding bindings[BINDINGS_BUF_CAP];
+ for (uint32_t i = 0; i < desc->binding_count; i++) {
+ bindings[i].descriptorCount = desc->bindings[i].descriptor_count;
+ bindings[i].descriptorType = lrhi_descriptor_type_to_vk_descriptor_type(
+ desc->bindings[i].descriptor_type);
+ bindings[i].binding = desc->bindings[i].binding;
+ bindings[i].pImmutableSamplers = NULL;
+ bindings[i].stageFlags = lrhi_shader_stages_to_vk_shader_stage_flags(
+ desc->bindings[i].visibility);
+ }
+
+ VkDescriptorSetLayoutCreateInfo layout_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ .bindingCount = desc->binding_count,
+ .pBindings = bindings};
+
+ if (vkCreateDescriptorSetLayout(device->device, &layout_info, NULL,
+ &layout->layout) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Descriptor set layout creation failed.");
+ goto fail;
+ }
+ return layout;
+
+fail:
+ return NULL;
+}
+void lrhi_device_destroy_descriptor_set_layout(
+ lrhi_device *device, lrhi_descriptor_set_layout *layout) {
+ vkDestroyDescriptorSetLayout(device->device, layout->layout, NULL);
+ lrhi_allocator_free(device->allocator,
+ sizeof(struct lrhi_descriptor_set_layout), layout);
+}
+
+struct lrhi_buffer {
+ VkBuffer buffer;
+ VmaAllocation allocation;
+ VmaAllocationInfo allocation_info;
+};
+
+struct lrhi_sampler {
+ VkSampler sampler;
+};
+
+lrhi_descriptor_set *
+lrhi_device_create_descriptor_set(lrhi_device *device,
+ const struct lrhi_descriptor_set_desc *desc) {
+
+ if (device->used_descriptor_set_count >= DESCRIPTOR_SET_POOL_CAPACITY) {
+ LRHI_LOG_ERR("Descriptor set pool capacity exceeded.");
+ goto fail;
+ }
+ lrhi_descriptor_set *set =
+ &device->descriptor_sets[device->used_descriptor_set_count++];
+
+ VkDescriptorSetLayout layouts[] = {desc->layout->layout};
+ VkDescriptorSetAllocateInfo descriptor_set_allocate_info = {
+ .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ .descriptorPool = device->descriptor_pool,
+ .descriptorSetCount = 1,
+ .pSetLayouts = layouts};
+
+ if (vkAllocateDescriptorSets(device->device, &descriptor_set_allocate_info,
+ &set->set) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Couldn't allocate descriptor set.");
+ goto fail;
+ }
+
+ VkWriteDescriptorSet descriptor_writes[64] = {0};
+ for (uint32_t i = 0; i < desc->entry_count; i++) {
+ const VkDescriptorBufferInfo *buffer_info = NULL;
+ const VkDescriptorImageInfo *image_info = NULL;
+
+ if (desc->entries[i].buffer_info) {
+ buffer_info = &(const VkDescriptorBufferInfo){
+ .buffer = desc->entries[i].buffer_info->buffer->buffer,
+ .offset = desc->entries[i].buffer_info->offset,
+ .range = desc->entries[i].buffer_info->range};
+ } else if (desc->entries[i].texture_info) {
+ image_info = &(const VkDescriptorImageInfo){
+ .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ .imageView = desc->entries[i].texture_info->view->view,
+ .sampler = desc->entries[i].texture_info->sampler->sampler,
+ };
+ }
+
+ descriptor_writes[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptor_writes[i].dstSet = set->set;
+ descriptor_writes[i].dstBinding = desc->entries[i].binding;
+ descriptor_writes[i].dstArrayElement = 0;
+ descriptor_writes[i].descriptorType =
+ lrhi_descriptor_type_to_vk_descriptor_type(
+ desc->entries[i].descriptor_type);
+ descriptor_writes[i].descriptorCount = 1;
+ descriptor_writes[i].pBufferInfo = buffer_info;
+ descriptor_writes[i].pImageInfo = image_info;
+ descriptor_writes[i].pTexelBufferView = NULL;
+ }
+
+ vkUpdateDescriptorSets(device->device, desc->entry_count, descriptor_writes,
+ 0, NULL);
+
+ return set;
+
+fail:
+ return NULL;
+}
+
+struct lrhi_pipeline_layout {
+ VkPipelineLayout layout;
+};
+lrhi_pipeline_layout *lrhi_device_create_pipeline_layout(
+ lrhi_device *device, const struct lrhi_pipeline_layout_desc *desc) {
+ lrhi_pipeline_layout *layout = lrhi_allocator_allocate(
+ device->allocator, sizeof(struct lrhi_pipeline_layout));
+
+ // TODO add bound checking
+ VkDescriptorSetLayout layouts[256];
+ for (uint32_t i = 0; i < desc->set_layout_count; i++) {
+ layouts[i] = desc->set_layouts[i].layout;
+ }
+
+ VkPipelineLayoutCreateInfo pipeline_layout_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ .setLayoutCount = desc->set_layout_count,
+ .pSetLayouts = layouts,
+ .pushConstantRangeCount = 0};
+
+ if (vkCreatePipelineLayout(device->device, &pipeline_layout_info, NULL,
+ &layout->layout) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Pipeline layout creation failed.");
+ goto fail;
+ }
+
+ return layout;
+fail:
+ return NULL;
+}
+void lrhi_device_destroy_pipeline_layout(lrhi_device *device,
+ lrhi_pipeline_layout *layout) {
+ vkDestroyPipelineLayout(device->device, layout->layout, NULL);
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_pipeline_layout),
+ layout);
+}
+
+static VkFormat lrhi_format_to_vk_format(enum lrhi_format format) {
+ switch (format) {
+ case lrhi_FORMAT_B8G8R8A8_SRGB:
+ return VK_FORMAT_B8G8R8A8_SRGB;
+ case lrhi_FORMAT_R8G8B8A8_SRGB:
+ return VK_FORMAT_R8G8B8A8_SRGB;
+ case lrhi_FORMAT_R32G32_SFLOAT:
+ return VK_FORMAT_R32G32_SFLOAT;
+ case lrhi_FORMAT_D32_SFLOAT:
+ return VK_FORMAT_D32_SFLOAT;
+ case lrhi_FORMAT_R32G32B32_SFLOAT:
+ default:
+ return VK_FORMAT_R32G32B32_SFLOAT;
+ }
+}
+
+struct lrhi_render_pass {
+ VkRenderPass pass;
+};
+static VkAttachmentLoadOp lrhi_attachment_load_op_to_vk_attachment_load_op(
+ enum lrhi_attachment_load_op op) {
+ switch (op) {
+ case lrhi_ATTACHMENT_LOAD_OP_CLEAR:
+ return VK_ATTACHMENT_LOAD_OP_CLEAR;
+ case lrhi_ATTACHMENT_LOAD_OP_LOAD:
+ return VK_ATTACHMENT_LOAD_OP_LOAD;
+ case lrhi_ATTACHMENT_LOAD_OP_DONT_CARE:
+ default:
+ return VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ }
+}
+static VkAttachmentStoreOp lrhi_attachment_store_op_to_vk_attachment_store_op(
+ enum lrhi_attachment_store_op op) {
+ switch (op) {
+ case lrhi_ATTACHMENT_STORE_OP_STORE:
+ return VK_ATTACHMENT_STORE_OP_STORE;
+ case lrhi_ATTACHMENT_STORE_OP_DONT_CARE:
+ default:
+ return VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ }
+}
+lrhi_render_pass *
+lrhi_device_create_render_pass(lrhi_device *device,
+ const struct lrhi_render_pass_desc *desc) {
+ lrhi_render_pass *pass = lrhi_allocator_allocate(
+ device->allocator, sizeof(struct lrhi_render_pass));
+
+ VkAttachmentDescription color_attachments[MAX_RENDER_PASS_COLOR_ATTACHMENT] =
+ {0};
+ VkAttachmentReference
+ color_attachment_refs[MAX_RENDER_PASS_COLOR_ATTACHMENT] = {0};
+ // TODO check bound
+ for (uint32_t i = 0; i < desc->color_attachment_count; i++) {
+ color_attachments[i].format =
+ lrhi_format_to_vk_format(desc->color_attachments[i].format);
+ color_attachments[i].samples = desc->color_attachments[i].sample_count;
+ color_attachments[i].loadOp =
+ lrhi_attachment_load_op_to_vk_attachment_load_op(
+ desc->color_attachments[i].load_op);
+ color_attachments[i].storeOp =
+ lrhi_attachment_store_op_to_vk_attachment_store_op(
+ desc->color_attachments[i].store_op);
+ color_attachments[i].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ color_attachments[i].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+
+ // NOTE This might not be sufficient
+ if (color_attachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
+ color_attachments[i].initialLayout =
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ color_attachments[i].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ } else {
+ color_attachments[i].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ color_attachments[i].finalLayout =
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ }
+ color_attachment_refs[i].attachment = i;
+ color_attachment_refs[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ }
+
+ VkAttachmentDescription depth_attachment = {0};
+ VkAttachmentReference depth_attachment_ref = {0};
+ if (desc->depth_stencil_attachment) {
+ depth_attachment.format =
+ lrhi_format_to_vk_format(desc->depth_stencil_attachment->format);
+ depth_attachment.samples = desc->depth_stencil_attachment->sample_count;
+ depth_attachment.loadOp = lrhi_attachment_load_op_to_vk_attachment_load_op(
+ desc->depth_stencil_attachment->load_op);
+ depth_attachment.storeOp =
+ lrhi_attachment_store_op_to_vk_attachment_store_op(
+ desc->depth_stencil_attachment->store_op);
+ depth_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ depth_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ depth_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ depth_attachment.finalLayout =
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ depth_attachment_ref.attachment = 1;
+ depth_attachment_ref.layout =
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ }
+
+ VkSubpassDescription subpass = {
+ .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
+ .colorAttachmentCount = desc->color_attachment_count,
+ .pColorAttachments = color_attachment_refs};
+
+ VkSubpassDependency dependency = {
+ .srcSubpass = VK_SUBPASS_EXTERNAL,
+ .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ .srcAccessMask = 0,
+ .dstSubpass = 0,
+ .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT};
+
+ if (desc->depth_stencil_attachment) {
+ subpass.pDepthStencilAttachment = &depth_attachment_ref;
+ dependency.srcStageMask |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+ dependency.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ dependency.dstStageMask |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
+ dependency.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ }
+
+ VkAttachmentDescription attachments[64] = {0};
+ uint32_t attachment_index;
+ for (attachment_index = 0; attachment_index < desc->color_attachment_count;
+ attachment_index++) {
+ attachments[attachment_index] = color_attachments[attachment_index];
+ }
+ if (desc->depth_stencil_attachment) {
+ attachments[attachment_index++] = depth_attachment;
+ }
+
+ VkRenderPassCreateInfo render_pass_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ .attachmentCount = attachment_index,
+ .pAttachments = attachments,
+ .subpassCount = 1,
+ .pSubpasses = &subpass,
+ .dependencyCount = 1,
+ .pDependencies = &dependency};
+
+ if (vkCreateRenderPass(device->device, &render_pass_info, NULL,
+ &pass->pass) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Couldn't create render pass.");
+ goto fail;
+ }
+
+ return pass;
+
+fail:
+ return NULL;
+}
+void lrhi_device_destroy_render_pass(lrhi_device *device,
+ lrhi_render_pass *pass) {
+ vkDestroyRenderPass(device->device, pass->pass, NULL);
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_render_pass), pass);
+}
+
+static VkVertexInputRate lrhi_vertex_input_rate_to_vk_vertex_input_rate(
+ enum lrhi_vertex_input_rate rate) {
+ switch (rate) {
+ case lrhi_VERTEX_INPUT_RATE_INSTANCE:
+ return VK_VERTEX_INPUT_RATE_INSTANCE;
+ case lrhi_VERTEX_INPUT_RATE_VERTEX:
+ default:
+ return VK_VERTEX_INPUT_RATE_VERTEX;
+ }
+}
+
+VkCompareOp
+lrhi_compare_function_to_vk_compare_op(enum lrhi_compare_function op) {
+ switch (op) {
+ case lrhi_COMPARE_FUNCTION_LESS:
+ return VK_COMPARE_OP_LESS;
+ case lrhi_COMPARE_FUNCTION_LESS_EQUAL:
+ return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case lrhi_COMPARE_FUNCTION_EQUAL:
+ return VK_COMPARE_OP_EQUAL;
+ case lrhi_COMPARE_FUNCTION_NOT_EQUAL:
+ return VK_COMPARE_OP_NOT_EQUAL;
+ case lrhi_COMPARE_FUNCTION_GREATER_EQUAL:
+ return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case lrhi_COMPARE_FUNCTION_GREATER:
+ return VK_COMPARE_OP_GREATER;
+ case lrhi_COMPARE_FUNCTION_ALWAYS:
+ return VK_COMPARE_OP_ALWAYS;
+ case lrhi_COMPARE_FUNCTION_NEVER:
+ default:
+ return VK_COMPARE_OP_NEVER;
+ }
+}
+
+struct lrhi_pipeline {
+ VkPipeline pipeline;
+};
+lrhi_pipeline *
+lrhi_device_create_pipeline(lrhi_device *device,
+ const struct lrhi_pipeline_desc *desc) {
+ lrhi_pipeline *pipeline =
+ lrhi_allocator_allocate(device->allocator, sizeof(struct lrhi_pipeline));
+
+ VkPipelineShaderStageCreateInfo vertex_shader_stage_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = VK_SHADER_STAGE_VERTEX_BIT,
+ .module = desc->vertex_module->module,
+ .pName = "main"};
+
+ VkPipelineShaderStageCreateInfo fragment_shader_stage_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ .stage = VK_SHADER_STAGE_FRAGMENT_BIT,
+ .module = desc->fragment_module->module,
+ .pName = "main"};
+
+ VkPipelineShaderStageCreateInfo shader_stages[] = {
+ vertex_shader_stage_info, fragment_shader_stage_info};
+
+ VkDynamicState dynamic_states[] = {VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_SCISSOR};
+
+ VkPipelineDynamicStateCreateInfo dynamic_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ .dynamicStateCount = ARRAY_LENGTH(dynamic_states),
+ .pDynamicStates = dynamic_states};
+
+ enum { VERTEX_INPUT_BINDING_DESCRIPTION_BUFFER_CAP = 16 };
+ VkVertexInputBindingDescription vertex_input_binding_descriptions
+ [VERTEX_INPUT_BINDING_DESCRIPTION_BUFFER_CAP];
+ uint32_t vertex_input_binding_description_count =
+ desc->vertex_input->binding_count;
+ assert(vertex_input_binding_description_count <=
+ VERTEX_INPUT_BINDING_DESCRIPTION_BUFFER_CAP);
+
+ for (int i = 0; i < desc->vertex_input->binding_count; i++) {
+ vertex_input_binding_descriptions[i].binding =
+ desc->vertex_input->bindings[i].binding;
+ vertex_input_binding_descriptions[i].stride =
+ desc->vertex_input->bindings[i].stride;
+ vertex_input_binding_descriptions[i].inputRate =
+ lrhi_vertex_input_rate_to_vk_vertex_input_rate(
+ desc->vertex_input->bindings[i].input_rate);
+ }
+
+ enum { VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_BUFFER_CAP = 16 };
+ VkVertexInputAttributeDescription vertex_input_attribute_descriptions
+ [VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_BUFFER_CAP];
+ uint32_t vertex_input_attribute_description_count =
+ desc->vertex_input->attribute_count;
+ assert(vertex_input_attribute_description_count <=
+ VERTEX_INPUT_ATTRIBUTE_DESCRIPTION_BUFFER_CAP);
+ for (uint32_t i = 0; i < vertex_input_attribute_description_count; i++) {
+ vertex_input_attribute_descriptions[i].binding =
+ desc->vertex_input->attributes[i].binding;
+ vertex_input_attribute_descriptions[i].location =
+ desc->vertex_input->attributes[i].location;
+ vertex_input_attribute_descriptions[i].offset =
+ desc->vertex_input->attributes[i].offset;
+ vertex_input_attribute_descriptions[i].format =
+ lrhi_format_to_vk_format(desc->vertex_input->attributes[i].format);
+ }
+
+ VkPipelineVertexInputStateCreateInfo vertex_input_info = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ .vertexBindingDescriptionCount = vertex_input_binding_description_count,
+ .pVertexBindingDescriptions = vertex_input_binding_descriptions,
+ .vertexAttributeDescriptionCount =
+ vertex_input_attribute_description_count,
+ .pVertexAttributeDescriptions = vertex_input_attribute_descriptions};
+
+ VkPipelineInputAssemblyStateCreateInfo input_assembly = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ .primitiveRestartEnable = VK_FALSE,
+ };
+
+ VkPipelineViewportStateCreateInfo viewport_state = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ .viewportCount = 1,
+ .scissorCount = 1};
+
+ VkPipelineRasterizationStateCreateInfo rasterizer = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ .depthClampEnable = VK_FALSE,
+ .rasterizerDiscardEnable = VK_FALSE,
+ .polygonMode = VK_POLYGON_MODE_FILL,
+ .lineWidth = 1.f,
+ .cullMode = VK_CULL_MODE_BACK_BIT,
+ .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
+ .depthBiasEnable = VK_FALSE};
+
+ VkPipelineMultisampleStateCreateInfo multisampling = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ .sampleShadingEnable = VK_FALSE,
+ .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT};
+
+ VkPipelineColorBlendAttachmentState color_blend_attachment = {
+ .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
+ .blendEnable = VK_FALSE};
+
+ VkPipelineColorBlendStateCreateInfo color_blending = {
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ .logicOpEnable = VK_FALSE,
+ .attachmentCount = 1,
+ .pAttachments = &color_blend_attachment};
+
+ VkPipelineDepthStencilStateCreateInfo *depth_stencil = NULL;
+ if (desc->depth_stencil) {
+ depth_stencil = &(VkPipelineDepthStencilStateCreateInfo){
+ .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ .depthTestEnable = VK_TRUE,
+ .depthWriteEnable = desc->depth_stencil->depth_write_enabled,
+ .depthCompareOp = lrhi_compare_function_to_vk_compare_op(
+ desc->depth_stencil->compare_function),
+ .depthBoundsTestEnable = VK_FALSE,
+ .stencilTestEnable = VK_FALSE,
+ };
+ }
+
+ VkGraphicsPipelineCreateInfo pipeline_info = {
+ .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ .stageCount = 2,
+ .pStages = shader_stages,
+ .pVertexInputState = &vertex_input_info,
+ .pInputAssemblyState = &input_assembly,
+ .pViewportState = &viewport_state,
+ .pRasterizationState = &rasterizer,
+ .pMultisampleState = &multisampling,
+ .pDepthStencilState = depth_stencil,
+ .pColorBlendState = &color_blending,
+ .pDynamicState = &dynamic_state,
+ .layout = desc->layout->layout,
+ .renderPass = desc->pass->pass,
+ .subpass = 0,
+ .basePipelineHandle = VK_NULL_HANDLE,
+ .basePipelineIndex = -1};
+
+ if (vkCreateGraphicsPipelines(device->device, VK_NULL_HANDLE, 1,
+ &pipeline_info, NULL,
+ &pipeline->pipeline) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to create graphics pipeline.");
+ goto fail;
+ }
+
+ return pipeline;
+
+fail:
+ return NULL;
+}
+void lrhi_device_destroy_pipeline(lrhi_device *device,
+ lrhi_pipeline *pipeline) {
+ vkDestroyPipeline(device->device, pipeline->pipeline, NULL);
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_pipeline),
+ pipeline);
+}
+
+static VkBufferUsageFlags
+lrhi_buffer_usage_to_vk_buffer_usage_flags(lrhi_buffer_usage usage) {
+ VkBufferUsageFlags flags = 0;
+ if (usage & lrhi_BUFFER_USAGE_VERTEX) {
+ flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ } else if (usage & lrhi_BUFFER_USAGE_INDEX) {
+ flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ flags |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ } else if (usage & lrhi_BUFFER_USAGE_TRANSFER) {
+ flags |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ } else if (usage & lrhi_BUFFER_USAGE_UNIFORM) {
+ flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ }
+
+ return flags;
+}
+
+lrhi_buffer *lrhi_device_create_buffer(lrhi_device *device,
+ const struct lrhi_buffer_desc *desc) {
+ struct lrhi_buffer *buffer =
+ lrhi_allocator_allocate(device->allocator, sizeof(struct lrhi_buffer));
+
+ VkBufferCreateInfo buffer_create_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .size = desc->size,
+ .usage = lrhi_buffer_usage_to_vk_buffer_usage_flags(desc->usage),
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE};
+
+ VmaAllocationCreateInfo allocation_create_info = {0};
+ if (desc->usage & lrhi_BUFFER_USAGE_TRANSFER) {
+ allocation_create_info.usage = VMA_MEMORY_USAGE_CPU_ONLY;
+ allocation_create_info.flags =
+ VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
+ allocation_create_info.preferredFlags =
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ } else if (desc->usage & lrhi_BUFFER_USAGE_UNIFORM) {
+ allocation_create_info.usage = VMA_MEMORY_USAGE_AUTO;
+ allocation_create_info.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
+ allocation_create_info.preferredFlags =
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ } else {
+ allocation_create_info.usage = VMA_MEMORY_USAGE_GPU_ONLY;
+ allocation_create_info.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+
+ if (vmaCreateBuffer(device->vma_allocator, &buffer_create_info,
+ &allocation_create_info, &buffer->buffer,
+ &buffer->allocation,
+ &buffer->allocation_info) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Buffer allocation failed.");
+ goto fail;
+ }
+
+ return buffer;
+fail:
+ return NULL;
+}
+
+lrhi_command_buffer *lrhi_device_create_command_buffer(lrhi_device *device) {
+ lrhi_command_buffer *buffer = lrhi_allocator_allocate(
+ device->allocator, sizeof(struct lrhi_command_buffer));
+
+ VkCommandBufferAllocateInfo alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .commandPool = device->command_pool,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandBufferCount = 1};
+
+ if (vkAllocateCommandBuffers(device->device, &alloc_info, &buffer->buffer) !=
+ VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to allocate command buffer.");
+ goto fail;
+ }
+
+ buffer->device = device;
+
+ return buffer;
+fail:
+ return NULL;
+}
+void lrhi_device_destroy_command_buffer(lrhi_device *device,
+ lrhi_command_buffer *buffer) {
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_command_buffer),
+ buffer);
+}
+static lrhi_command_buffer *
+record_one_time_command_buffer(lrhi_device *device) {
+ lrhi_command_buffer *command_buffer = lrhi_allocator_allocate(
+ device->allocator, sizeof(struct lrhi_command_buffer));
+ VkCommandBufferAllocateInfo command_buffer_allocate_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ .commandPool = device->command_pool,
+ .commandBufferCount = 1};
+ vkAllocateCommandBuffers(device->device, &command_buffer_allocate_info,
+ &command_buffer->buffer);
+ command_buffer->device = device;
+
+ VkCommandBufferBeginInfo begin_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT};
+ vkBeginCommandBuffer(command_buffer->buffer, &begin_info);
+ return command_buffer;
+}
+static void submit_one_time_command_buffer(lrhi_device *device,
+ lrhi_command_buffer *cmd_buf) {
+ vkEndCommandBuffer(cmd_buf->buffer);
+
+ VkSubmitInfo submit_info = {.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .commandBufferCount = 1,
+ .pCommandBuffers = &cmd_buf->buffer};
+ vkQueueSubmit(device->graphics_queue, 1, &submit_info, VK_NULL_HANDLE);
+ vkQueueWaitIdle(device->graphics_queue);
+ lrhi_device_destroy_command_buffer(device, cmd_buf);
+}
+lrhi_buffer *
+lrhi_device_create_buffer_init(lrhi_device *device,
+ const struct lrhi_buffer_init_desc *desc) {
+ lrhi_buffer *staging_buffer = lrhi_device_create_buffer(
+ device, &(const struct lrhi_buffer_desc){
+ .usage = lrhi_BUFFER_USAGE_TRANSFER, .size = desc->size});
+ if (!staging_buffer) {
+ LRHI_LOG_ERR("Staging buffer creation failed.");
+ goto fail;
+ }
+
+ void *mapped_staging_buffer = lrhi_device_map_buffer(device, staging_buffer);
+ memcpy(mapped_staging_buffer, desc->content, desc->size);
+ lrhi_device_unmap_buffer(device, staging_buffer);
+
+ lrhi_buffer *buffer = lrhi_device_create_buffer(
+ device, &(const struct lrhi_buffer_desc){.size = desc->size,
+ .usage = desc->usage});
+ if (!buffer) {
+ LRHI_LOG_ERR("Buffer creation failed.");
+ goto destroy_staging_buffer;
+ }
+
+ lrhi_command_buffer *command_buffer = record_one_time_command_buffer(device);
+ struct lrhi_buffer_copy copy = {
+ .src = staging_buffer,
+ .dst = buffer,
+ .region_count = 1,
+ .regions = &(const struct lrhi_buffer_copy_region){.size = desc->size}};
+ lrhi_command_copy_buffer_to_buffer(command_buffer, &copy);
+ submit_one_time_command_buffer(device, command_buffer);
+ lrhi_device_destroy_buffer(device, staging_buffer);
+
+ return buffer;
+destroy_staging_buffer:
+ vmaDestroyBuffer(device->vma_allocator, staging_buffer->buffer,
+ staging_buffer->allocation);
+fail:
+ return NULL;
+}
+void lrhi_device_destroy_buffer(lrhi_device *device, lrhi_buffer *buffer) {
+ vmaDestroyBuffer(device->vma_allocator, buffer->buffer, buffer->allocation);
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_buffer), buffer);
+}
+void *lrhi_device_map_buffer(lrhi_device *device, lrhi_buffer *buffer) {
+ void *mapped = NULL;
+ vmaMapMemory(device->vma_allocator, buffer->allocation, &mapped);
+
+ return mapped;
+}
+void lrhi_device_unmap_buffer(lrhi_device *device, lrhi_buffer *buffer) {
+ vmaUnmapMemory(device->vma_allocator, buffer->allocation);
+}
+uint32_t lrhi_buffer_size(lrhi_buffer *buffer) {
+ return buffer->allocation_info.size;
+}
+
+static VkImageUsageFlags
+lrhi_texture_usage_to_vk_image_usage_flags(lrhi_texture_usage usage) {
+ VkImageUsageFlags flags = 0;
+ if (usage & lrhi_TEXTURE_USAGE_COPY_SRC) {
+ flags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ }
+
+ if (usage & lrhi_TEXTURE_USAGE_COPY_DST) {
+ flags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+
+ if (usage & lrhi_TEXTURE_USAGE_TEXTURE_BINDING) {
+ flags |= VK_IMAGE_USAGE_SAMPLED_BIT;
+ }
+
+ if (usage & lrhi_TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT) {
+ flags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ }
+
+ return flags;
+}
+lrhi_texture *lrhi_device_create_texture(lrhi_device *device,
+ const struct lrhi_texture_desc *desc) {
+ lrhi_texture *texture =
+ lrhi_allocator_allocate(device->allocator, sizeof(struct lrhi_texture));
+
+ VkImageCreateInfo image_create_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ .imageType = VK_IMAGE_TYPE_2D,
+ .extent = {.width = desc->width, .height = desc->height, .depth = 1},
+ .mipLevels = desc->mip_level_count,
+ .arrayLayers = 1,
+ .format = lrhi_format_to_vk_format(desc->format),
+ .tiling = VK_IMAGE_TILING_OPTIMAL,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ .usage = lrhi_texture_usage_to_vk_image_usage_flags(desc->usage),
+ .samples = desc->sample_count,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE};
+
+ VmaAllocationCreateInfo allocation_create_info = {
+ .usage = VMA_MEMORY_USAGE_AUTO,
+ .preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT};
+
+ if (vmaCreateImage(device->vma_allocator, &image_create_info,
+ &allocation_create_info, &texture->image,
+ &texture->allocation,
+ &texture->allocation_info) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to create texture.");
+ goto fail;
+ }
+
+ return texture;
+fail:
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_texture), texture);
+ return NULL;
+}
+void lrhi_device_destroy_texture(lrhi_device *device, lrhi_texture *texture) {
+ vmaDestroyImage(device->vma_allocator, texture->image, texture->allocation);
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_texture), texture);
+}
+static void transition_image_layout(lrhi_device *device, VkImage image,
+ VkImageLayout old_layout,
+ VkImageLayout new_layout) {
+ lrhi_command_buffer *cmdbuf = record_one_time_command_buffer(device);
+
+ VkImageMemoryBarrier barrier = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ .oldLayout = old_layout,
+ .newLayout = new_layout,
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .image = image,
+ .subresourceRange = {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .baseMipLevel = 0,
+ .levelCount = 1,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ }};
+
+ VkPipelineStageFlags source_stage;
+ VkPipelineStageFlags destination_stage;
+ if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
+ new_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
+ barrier.srcAccessMask = 0;
+ barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ source_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ destination_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ } else if (old_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
+ new_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
+ barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ source_stage = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ destination_stage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ } else {
+ LRHI_PANIC("Unsupported layout transition");
+ }
+
+ vkCmdPipelineBarrier(cmdbuf->buffer, source_stage, destination_stage, 0, 0,
+ NULL, 0, NULL, 1, &barrier);
+
+ submit_one_time_command_buffer(device, cmdbuf);
+}
+void lrhi_device_write_texture(
+ lrhi_device *device, const struct lrhi_texel_copy_texture_desc *texture,
+ unsigned char *data, size_t data_size,
+ const struct lrhi_texel_copy_buffer_layout *layout, uint32_t width,
+ uint32_t height) {
+
+ lrhi_buffer *staging_buffer = lrhi_device_create_buffer(
+ device, &(const struct lrhi_buffer_desc){
+ .usage = lrhi_BUFFER_USAGE_TRANSFER, .size = data_size});
+
+ void *mapped_staging_buffer = lrhi_device_map_buffer(device, staging_buffer);
+ memcpy(mapped_staging_buffer, data, data_size);
+ lrhi_device_unmap_buffer(device, staging_buffer);
+
+ transition_image_layout(device, texture->texture->image,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ lrhi_command_buffer *cmdbuf = record_one_time_command_buffer(device);
+ lrhi_command_copy_buffer_to_texture(
+ cmdbuf,
+ &(const struct lrhi_texel_copy_buffer_desc){.buffer = staging_buffer,
+ .layout = layout},
+ texture, width, height);
+ submit_one_time_command_buffer(device, cmdbuf);
+ transition_image_layout(device, texture->texture->image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+
+ lrhi_device_destroy_buffer(device, staging_buffer);
+}
+
+static bool create_image_view(lrhi_device *device, VkImage image,
+ VkFormat format, VkImageView *image_view,
+ VkImageAspectFlags aspect_flags,
+ uint32_t mip_level_count) {
+ VkImageViewCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ .image = image,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
+ .format = format,
+ .components = {VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY},
+ .subresourceRange = {.aspectMask = aspect_flags,
+ .levelCount = mip_level_count,
+ .layerCount = 1}};
+ if (vkCreateImageView(device->device, &create_info, NULL, image_view) !=
+ VK_SUCCESS) {
+ return false;
+ }
+
+ return true;
+}
+
+static VkImageAspectFlags
+lrhi_image_aspect_to_vk_image_aspect_flags(lrhi_image_aspect aspect) {
+ VkImageAspectFlags flags = 0;
+
+ if (aspect & lrhi_IMAGE_ASPECT_COLOR) {
+ flags |= VK_IMAGE_ASPECT_COLOR_BIT;
+ }
+
+ if (aspect & lrhi_IMAGE_ASPECT_DEPTH) {
+ flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ }
+
+ return flags;
+}
+lrhi_texture_view *
+lrhi_device_create_texture_view(lrhi_device *device,
+ const struct lrhi_texture_view_desc *desc) {
+ lrhi_texture_view *view = lrhi_allocator_allocate(
+ device->allocator, sizeof(struct lrhi_texture_view));
+
+ if (!create_image_view(
+ device, desc->texture->image, lrhi_format_to_vk_format(desc->format),
+ &view->view, lrhi_image_aspect_to_vk_image_aspect_flags(desc->aspect),
+ 1)) {
+ LRHI_LOG_ERR("Image view creation failed.");
+ goto fail;
+ }
+
+ return view;
+
+fail:
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_texture_view),
+ view);
+ return NULL;
+}
+void lrhi_device_destroy_texture_view(lrhi_device *device,
+ lrhi_texture_view *texture_view) {
+ vkDestroyImageView(device->device, texture_view->view, NULL);
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_texture_view),
+ texture_view);
+}
+
+static VkFilter lrhi_filter_to_vk_filter(enum lrhi_filter filter) {
+ switch (filter) {
+ case lrhi_FILTER_LINEAR:
+ default:
+ return VK_FILTER_LINEAR;
+ case lrhi_FILTER_NEAREST:
+ return VK_FILTER_NEAREST;
+ }
+}
+static VkSamplerAddressMode
+lrhi_sampler_address_mode_to_vk_sampler_address_mode(
+ enum lrhi_sampler_address_mode sampler_address_mode) {
+ switch (sampler_address_mode) {
+ case lrhi_SAMPLER_ADDRESS_MODE_REPEAT:
+ default:
+ return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ case lrhi_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
+ return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+ case lrhi_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ case lrhi_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
+ return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
+ }
+}
+lrhi_sampler *lrhi_device_create_sampler(lrhi_device *device,
+ const struct lrhi_sampler_desc *desc) {
+ lrhi_sampler *sampler =
+ lrhi_allocator_allocate(device->allocator, sizeof(struct lrhi_sampler));
+
+ VkPhysicalDeviceProperties properties = {0};
+ vkGetPhysicalDeviceProperties(device->physical_device, &properties);
+
+ VkSamplerCreateInfo sampler_info = {
+ .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
+ .magFilter = lrhi_filter_to_vk_filter(desc->mag_filter),
+ .minFilter = lrhi_filter_to_vk_filter(desc->min_filter),
+ .addressModeU = lrhi_sampler_address_mode_to_vk_sampler_address_mode(
+ desc->address_mode_u),
+ .addressModeV = lrhi_sampler_address_mode_to_vk_sampler_address_mode(
+ desc->address_mode_v),
+ .addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ .anisotropyEnable = VK_TRUE,
+ .maxAnisotropy = properties.limits.maxSamplerAnisotropy,
+ .borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK,
+ .unnormalizedCoordinates = VK_FALSE,
+ .compareEnable = VK_FALSE,
+ .compareOp = VK_COMPARE_OP_ALWAYS,
+ .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
+ .mipLodBias = 0.f,
+ .minLod = 0.f,
+ .maxLod = 0.f};
+
+ if (vkCreateSampler(device->device, &sampler_info, NULL, &sampler->sampler) !=
+ VK_SUCCESS) {
+ LRHI_LOG_ERR("Texture sampler creation failed.");
+ goto fail;
+ }
+
+ return sampler;
+
+fail:
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_sampler), sampler);
+ return NULL;
+}
+void lrhi_device_destroy_sampler(lrhi_device *device, lrhi_sampler *sampler) {
+ vkDestroySampler(device->device, sampler->sampler, NULL);
+ lrhi_allocator_free(device->allocator, sizeof(struct lrhi_sampler), sampler);
+}
+lrhi_command_buffer *lrhi_command_buffer_begin(lrhi_device *device) {
+ lrhi_command_buffer *command_buffer =
+ &device->command_buffers[device->current_frame];
+ vkResetFences(device->device, 1,
+ &device->in_flight_fence[device->current_frame]);
+ vkResetCommandBuffer(command_buffer->buffer, 0);
+
+ VkCommandBufferBeginInfo begin_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO};
+
+ if (vkBeginCommandBuffer(command_buffer->buffer, &begin_info) != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to begin recording command buffer.");
+ goto fail;
+ }
+
+ return command_buffer;
+fail:
+ return NULL;
+}
+bool lrhi_command_buffer_end(lrhi_command_buffer *buffer) {
+ return vkEndCommandBuffer(buffer->buffer) == VK_SUCCESS;
+}
+
+bool lrhi_device_submit_command_buffer(lrhi_device *device,
+ lrhi_command_buffer *buffer,
+ lrhi_surface *surface) {
+ VkSemaphore wait_semaphores[] = {
+ surface->image_available_semaphore[device->current_frame]};
+ VkPipelineStageFlags wait_stages[] = {
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
+ VkSemaphore signal_semaphores[] = {
+ surface->render_finished_semaphore[surface->image_index]};
+ VkSubmitInfo submit = {.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = wait_semaphores,
+ .pWaitDstStageMask = wait_stages,
+ .commandBufferCount = 1,
+ .pCommandBuffers = &buffer->buffer,
+ .signalSemaphoreCount = 1,
+ .pSignalSemaphores = signal_semaphores};
+ if (vkQueueSubmit(device->graphics_queue, 1, &submit,
+ device->in_flight_fence[device->current_frame]) !=
+ VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to submit draw command buffer.");
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+void lrhi_command_begin_render_pass(
+ lrhi_command_buffer *buffer,
+ const struct lrhi_render_pass_begin_desc *desc) {
+
+ VkClearValue clear_values[2] = {0};
+ clear_values[0] =
+ (VkClearValue){{{desc->clear_color.r, desc->clear_color.g,
+ desc->clear_color.b, desc->clear_color.a}}};
+ clear_values[1] = (VkClearValue){.depthStencil = {1.f, 0}};
+
+ enum { MAX_ATTACHMENT_COUNT = 64 };
+ if (desc->color_attachment_count >= MAX_ATTACHMENT_COUNT - 1) {
+ LRHI_PANIC("Too many color attachments for render pass");
+ }
+
+ VkImageView attachments[MAX_ATTACHMENT_COUNT] = {0};
+ int attachment_count = 0;
+ for (uint32_t i = 0; i < desc->color_attachment_count; i++) {
+ attachments[attachment_count++] = desc->color_attachments[i].view->view;
+ }
+
+ if (desc->depth_stencil_attachment) {
+ attachments[attachment_count++] =
+ desc->depth_stencil_attachment->view->view;
+ }
+
+ VkFramebuffer framebuffer = framebuffer_cache_get_or_create(
+ buffer->device, desc->pass->pass, attachments, attachment_count,
+ desc->surface->extent.width, desc->surface->extent.height);
+
+ VkRenderPassBeginInfo render_pass_info = {
+ .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ .renderPass = desc->pass->pass,
+ .framebuffer = framebuffer,
+ .renderArea.offset = {0.f, 0.f},
+ .renderArea.extent = desc->surface->extent,
+ .clearValueCount = 2,
+ .pClearValues = clear_values};
+
+ vkCmdBeginRenderPass(buffer->buffer, &render_pass_info,
+ VK_SUBPASS_CONTENTS_INLINE);
+}
+void lrhi_command_end_render_pass(lrhi_command_buffer *buffer) {
+ vkCmdEndRenderPass(buffer->buffer);
+}
+void lrhi_command_copy_buffer_to_buffer(lrhi_command_buffer *cmd_buf,
+ const struct lrhi_buffer_copy *copy) {
+
+ VkBufferCopy *copy_regions = lrhi_allocator_allocate(
+ cmd_buf->device->allocator, copy->region_count * sizeof(VkBufferCopy));
+ for (uint32_t i = 0; i < copy->region_count; i++) {
+ copy_regions[i].size = copy->regions[i].size;
+ copy_regions[i].srcOffset = copy->regions[i].src_offset;
+ copy_regions[i].dstOffset = copy->regions[i].dst_offset;
+ }
+
+ vkCmdCopyBuffer(cmd_buf->buffer, copy->src->buffer, copy->dst->buffer,
+ copy->region_count, copy_regions);
+
+ lrhi_allocator_free(cmd_buf->device->allocator,
+ copy->region_count * sizeof(VkBufferCopy), copy_regions);
+}
+void lrhi_command_copy_buffer_to_texture(
+ lrhi_command_buffer *cmdbuf,
+ const struct lrhi_texel_copy_buffer_desc *buffer,
+ const struct lrhi_texel_copy_texture_desc *texture, uint32_t width,
+ uint32_t height) {
+ VkBufferImageCopy region = {
+ .bufferOffset = buffer->layout->offset,
+ .bufferRowLength = buffer->layout->bytes_per_row,
+ .bufferImageHeight = buffer->layout->rows_per_image,
+ .imageSubresource =
+ {
+ .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .mipLevel = texture->mip_level,
+ .baseArrayLayer = 0,
+ .layerCount = 1,
+ },
+ .imageOffset =
+ {
+ 0,
+ 0,
+ 0,
+ },
+ .imageExtent = {width, height, 1},
+ };
+
+ vkCmdCopyBufferToImage(cmdbuf->buffer, buffer->buffer->buffer,
+ texture->texture->image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+}
+void lrhi_command_bind_pipeline(lrhi_command_buffer *buffer,
+ lrhi_pipeline *pipeline) {
+ vkCmdBindPipeline(buffer->buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->pipeline);
+}
+void lrhi_command_bind_vertex_buffers(lrhi_command_buffer *cmdbuf,
+ uint32_t first_binding,
+ uint32_t binding_count,
+ lrhi_buffer **buffers,
+ uint64_t *offsets) {
+
+ enum { VK_BUFFERS_BUFFER_CAP = 16 };
+ VkBuffer vk_buffers[VK_BUFFERS_BUFFER_CAP];
+ assert(binding_count <= VK_BUFFERS_BUFFER_CAP);
+ for (uint32_t i = 0; i < binding_count; i++) {
+ vk_buffers[i] = buffers[i]->buffer;
+ }
+
+ vkCmdBindVertexBuffers(cmdbuf->buffer, first_binding, binding_count,
+ vk_buffers, offsets);
+}
+void lrhi_command_bind_index_buffer(lrhi_command_buffer *cmdbuf,
+ lrhi_buffer *index_buffer,
+ uint64_t offset) {
+ vkCmdBindIndexBuffer(cmdbuf->buffer, index_buffer->buffer, offset,
+ VK_INDEX_TYPE_UINT16);
+}
+void lrhi_command_bind_descriptor_set(
+ lrhi_command_buffer *cmdbuf, lrhi_pipeline_layout *pipeline_layout,
+ uint32_t first_set, uint32_t descriptor_set_count,
+ const lrhi_descriptor_set *descriptor_sets, uint32_t dynamic_offset_count,
+ const uint32_t *dynamic_offsets) {
+
+ VkDescriptorSet sets[256];
+ for (uint32_t i = 0; i < descriptor_set_count; i++) {
+ sets[i] = descriptor_sets[i].set;
+ }
+
+ vkCmdBindDescriptorSets(cmdbuf->buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline_layout->layout, first_set,
+ descriptor_set_count, sets, dynamic_offset_count,
+ dynamic_offsets);
+}
+
+void lrhi_command_set_viewport(lrhi_command_buffer *buffer,
+ const struct lrhi_viewport *viewport) {
+ vkCmdSetViewport(buffer->buffer, 0, 1,
+ &(const VkViewport){.x = viewport->x,
+ .y = viewport->y,
+ .width = viewport->width,
+ .height = viewport->height,
+ .minDepth = viewport->min_depth,
+ .maxDepth = viewport->max_depth});
+}
+void lrhi_command_set_scissor(lrhi_command_buffer *buffer,
+ const struct lrhi_scissor *scissor) {
+ vkCmdSetScissor(
+ buffer->buffer, 0, 1,
+ &(const VkRect2D){
+ .offset = {scissor->x_offset, scissor->y_offset},
+ .extent = {.width = scissor->width, .height = scissor->height}});
+}
+void lrhi_command_draw(lrhi_command_buffer *buffer, uint32_t vertex_count,
+ uint32_t instance_count, uint32_t first_vertex,
+ uint32_t first_instance) {
+ vkCmdDraw(buffer->buffer, vertex_count, instance_count, first_vertex,
+ first_instance);
+}
+void lrhi_command_draw_indexed(lrhi_command_buffer *cmdbuf,
+ uint32_t index_count, uint32_t instance_count,
+ uint32_t first_index, uint32_t vertex_offset,
+ uint32_t first_instance) {
+ vkCmdDrawIndexed(cmdbuf->buffer, index_count, instance_count, first_index,
+ vertex_offset, first_instance);
+}
+static uint32_t clamp_uint32(uint32_t min, uint32_t max, uint32_t value) {
+ return value < min ? min : value > max ? max : value;
+}
+
+static void lrhi_surface_destroy_swapchain(lrhi_surface *surface,
+ lrhi_device *device) {
+ for (uint32_t img_view_index = 0; img_view_index < surface->img_count;
+ img_view_index++) {
+ vkDestroyImageView(device->device,
+ surface->image_views[img_view_index].view, NULL);
+ }
+ vkDestroySwapchainKHR(device->device, surface->swapchain, NULL);
+}
+
+static bool create_swapchain_image_views(VkImageView *swapchain_image_views,
+ const VkImage *swapchain_images,
+ uint32_t swapchain_image_count,
+ VkFormat swapchain_fmt,
+ lrhi_device *device) {
+ uint32_t swapchain_image_index;
+ for (swapchain_image_index = 0; swapchain_image_index < swapchain_image_count;
+ swapchain_image_index++) {
+ VkImage img = swapchain_images[swapchain_image_index];
+ if (!create_image_view(device, img, swapchain_fmt,
+ &swapchain_image_views[swapchain_image_index],
+ VK_IMAGE_ASPECT_COLOR_BIT, 1)) {
+ LRHI_LOG("Image view creation failed");
+ goto err;
+ }
+ }
+
+ return true;
+err:
+ for (uint32_t to_remove_index = 0; to_remove_index < swapchain_image_index;
+ to_remove_index++) {
+ vkDestroyImageView(device->device, swapchain_image_views[to_remove_index],
+ NULL);
+ }
+ return false;
+}
+
+static bool lrhi_surface_create_swapchain(
+ lrhi_surface *surface, struct lrhi_device *device,
+ const struct lrhi_surface_configuration *configuration) {
+
+ struct swapchain_support_details support_details = {0};
+ if (!get_swapchain_support_details(device->physical_device, surface->surface,
+ &support_details)) {
+ LRHI_LOG("Couldn't query swapchain support details from device");
+ goto err;
+ }
+
+ // Pick swapchain format
+ VkSurfaceFormatKHR surface_fmt = {0};
+ for (uint32_t available_format_index = 0;
+ available_format_index < support_details.format_count;
+ available_format_index++) {
+ const VkSurfaceFormatKHR *available_format =
+ &support_details.formats[available_format_index];
+ if (available_format->format == VK_FORMAT_B8G8R8A8_SRGB &&
+ available_format->colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
+ surface_fmt = *available_format;
+ break;
+ }
+ }
+
+ // Pick present mode
+ VkPresentModeKHR present_mode = VK_PRESENT_MODE_FIFO_KHR;
+ for (uint32_t available_mode_index = 0;
+ available_mode_index < support_details.present_mode_count;
+ available_mode_index++) {
+ VkPresentModeKHR available_mode =
+ support_details.present_modes[available_mode_index];
+ if (available_mode == VK_PRESENT_MODE_MAILBOX_KHR) {
+ present_mode = available_mode;
+ break;
+ }
+ }
+
+ // Pick swapchain extent
+ VkExtent2D extent;
+ if (support_details.capabilities.currentExtent.width != UINT32_MAX) {
+ extent = support_details.capabilities.currentExtent;
+ } else {
+ VkExtent2D actual_extent = {configuration->width, configuration->height};
+ actual_extent.width = clamp_uint32(
+ support_details.capabilities.minImageExtent.width,
+ support_details.capabilities.maxImageExtent.width, actual_extent.width);
+ actual_extent.height =
+ clamp_uint32(support_details.capabilities.minImageExtent.height,
+ support_details.capabilities.maxImageExtent.height,
+ actual_extent.height);
+ extent = actual_extent;
+ }
+
+ uint32_t image_count = support_details.capabilities.minImageCount + 1;
+ if (support_details.capabilities.maxImageCount > 0 &&
+ image_count > support_details.capabilities.maxImageCount) {
+ image_count = support_details.capabilities.maxImageCount;
+ }
+
+ VkSwapchainCreateInfoKHR create_info = {
+ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ .surface = surface->surface,
+ .minImageCount = image_count,
+ .imageFormat = surface_fmt.format,
+ .imageColorSpace = surface_fmt.colorSpace,
+ .imageExtent = extent,
+ .imageArrayLayers = 1,
+ .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT};
+
+ struct queue_family_indices found_queue_family_indices =
+ queue_family_indices_find_for_device(device->physical_device,
+ surface->surface);
+
+ uint32_t queue_family_indices[] = {found_queue_family_indices.graphics_family,
+ found_queue_family_indices.present_family};
+ if (found_queue_family_indices.graphics_family !=
+ found_queue_family_indices.present_family) {
+ create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
+ create_info.queueFamilyIndexCount = 2;
+ create_info.pQueueFamilyIndices = queue_family_indices;
+ } else {
+ create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ }
+
+ create_info.preTransform = support_details.capabilities.currentTransform;
+ create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
+ create_info.presentMode = present_mode;
+ create_info.clipped = VK_TRUE;
+ create_info.oldSwapchain = VK_NULL_HANDLE;
+
+ if (vkCreateSwapchainKHR(device->device, &create_info, NULL,
+ &surface->swapchain) != VK_SUCCESS) {
+ LRHI_LOG("Swapchain creation failed!");
+ goto err;
+ }
+
+ if (vkGetSwapchainImagesKHR(device->device, surface->swapchain,
+ &surface->img_count, NULL) != VK_SUCCESS) {
+ LRHI_LOG("Couldn't get swapchain image count");
+ goto destroy_swapchain;
+ }
+
+ if (surface->img_count > VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY) {
+ LRHI_LOG("Swapchain image array cannot fit all %u swapchain images",
+ surface->img_count);
+ goto destroy_swapchain;
+ }
+
+ VkImage images[VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY] = {0};
+ if (vkGetSwapchainImagesKHR(device->device, surface->swapchain,
+ &surface->img_count, images) != VK_SUCCESS) {
+ LRHI_LOG("Couldn't get swapchain images");
+ goto destroy_swapchain;
+ }
+
+ VkImageView image_views[VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY] = {0};
+ if (!create_swapchain_image_views(image_views, images, surface->img_count,
+ surface_fmt.format, device)) {
+ LRHI_LOG("Couldn't create swapchain image views.");
+ goto destroy_swapchain;
+ }
+
+ for (uint32_t i = 0; i < surface->img_count; i++) {
+ surface->images[i].image = images[i];
+ surface->image_views[i].view = image_views[i];
+ }
+
+ surface->extent = extent;
+ surface->fmt = surface_fmt.format;
+ surface->resized = false;
+ return true;
+destroy_swapchain:
+ vkDestroySwapchainKHR(device->device, surface->swapchain, NULL);
+err:
+ return false;
+}
+void lrhi_surface_reconfigure(
+ lrhi_surface *surface, lrhi_device *device,
+ const struct lrhi_surface_configuration *surface_configuration) {
+ lrhi_surface_destroy_swapchain(surface, device);
+ lrhi_surface_create_swapchain(surface, device, surface_configuration);
+ surface->surface_reconfigured_callback(
+ surface->extent.width, surface->extent.height,
+ surface->surface_reconfigured_user_data);
+}
+
+bool lrhi_surface_configure(
+ lrhi_surface *surface, struct lrhi_device *device,
+ const struct lrhi_surface_configuration *configuration) {
+
+ if (!lrhi_surface_create_swapchain(surface, device, configuration)) {
+ LRHI_LOG_ERR("Swapchain creation failed");
+ goto fail;
+ }
+
+ VkSemaphoreCreateInfo semaphore_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO};
+
+ uint32_t semaphore_frame_idx = 0;
+ for (semaphore_frame_idx = 0;
+ semaphore_frame_idx < VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY;
+ semaphore_frame_idx++) {
+ if ((semaphore_frame_idx < MAX_FRAME_IN_FLIGHT &&
+ vkCreateSemaphore(
+ device->device, &semaphore_info, NULL,
+ &surface->image_available_semaphore[semaphore_frame_idx]) !=
+ VK_SUCCESS) ||
+ vkCreateSemaphore(
+ device->device, &semaphore_info, NULL,
+ &surface->render_finished_semaphore[semaphore_frame_idx]) !=
+ VK_SUCCESS) {
+
+ LRHI_LOG_ERR("Surface semaphores creation failed");
+ goto destroy_semaphores;
+ }
+ }
+
+ return true;
+destroy_semaphores:
+ for (uint32_t i = 0; i < semaphore_frame_idx; i++) {
+ if (i < MAX_FRAME_IN_FLIGHT) {
+ vkDestroySemaphore(
+ device->device,
+ surface->image_available_semaphore[semaphore_frame_idx], NULL);
+ }
+ vkDestroySemaphore(device->device,
+ surface->render_finished_semaphore[semaphore_frame_idx],
+ NULL);
+ }
+ lrhi_surface_destroy_swapchain(surface, device);
+
+fail:
+ return false;
+}
+void lrhi_surface_resize(lrhi_surface *surface, int32_t width, int32_t height) {
+ surface->new_extent.width = width;
+ surface->new_extent.height = height;
+ surface->resized = true;
+}
+lrhi_texture_view *lrhi_surface_acquire_next_image(lrhi_surface *surface,
+ lrhi_device *device) {
+ vkWaitForFences(device->device, 1,
+ &device->in_flight_fence[device->current_frame], VK_TRUE,
+ UINT64_MAX);
+ VkResult acquire_result = vkAcquireNextImageKHR(
+ device->device, surface->swapchain, UINT64_MAX,
+ surface->image_available_semaphore[device->current_frame], VK_NULL_HANDLE,
+ &surface->image_index);
+
+ if (acquire_result == VK_ERROR_OUT_OF_DATE_KHR) {
+ lrhi_surface_reconfigure(
+ surface, device,
+ &(const struct lrhi_surface_configuration){
+ .width = surface->extent.width, .height = surface->extent.height});
+ } else if (acquire_result != VK_SUCCESS &&
+ acquire_result != VK_SUBOPTIMAL_KHR) {
+ LRHI_LOG_ERR("Failed to acquire swapchain image");
+ goto fail;
+ }
+
+ vkResetFences(device->device, 1,
+ &device->in_flight_fence[device->current_frame]);
+
+ return &surface->image_views[surface->image_index];
+fail:
+ return NULL;
+}
+bool lrhi_surface_present(lrhi_device *device, lrhi_surface *surface) {
+ VkSwapchainKHR swapchains[] = {surface->swapchain};
+ VkSemaphore signal_semaphores[] = {
+ surface->render_finished_semaphore[surface->image_index]};
+
+ VkPresentInfoKHR present_info = {
+ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = signal_semaphores,
+ .swapchainCount = 1,
+ .pSwapchains = swapchains,
+ .pImageIndices = &surface->image_index,
+ };
+
+ device->current_frame = (device->current_frame + 1) % MAX_FRAME_IN_FLIGHT;
+ VkResult present_result =
+ vkQueuePresentKHR(device->present_queue, &present_info);
+ if (present_result == VK_ERROR_OUT_OF_DATE_KHR ||
+ present_result == VK_SUBOPTIMAL_KHR || surface->resized) {
+ surface->resized = false;
+ vkDeviceWaitIdle(device->device);
+ framebuffer_cache_clear(device);
+ lrhi_surface_reconfigure(
+ surface, device,
+ &(const struct lrhi_surface_configuration){
+ .width = surface->extent.width, .height = surface->extent.height});
+ } else if (present_result != VK_SUCCESS) {
+ LRHI_LOG_ERR("Failed to present swapchain image");
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+void lrhi_instance_destroy_surface(lrhi_instance *instance, lrhi_device *device,
+ lrhi_surface *surface) {
+ for (int i = 0; i < VK_SWAPCHAIN_IMAGE_BUFFER_CAPACITY; i++) {
+ if (i < MAX_FRAME_IN_FLIGHT) {
+ vkDestroySemaphore(device->device, surface->image_available_semaphore[i],
+ NULL);
+ }
+ vkDestroySemaphore(device->device, surface->render_finished_semaphore[i],
+ NULL);
+ }
+
+ if (surface->swapchain != NULL) {
+ lrhi_surface_destroy_swapchain(surface, device);
+ }
+
+ vkDestroySurfaceKHR(instance->instance, surface->surface, NULL);
+ lrhi_allocator_free(&instance->allocator, sizeof(struct lrhi_surface),
+ surface);
+}
diff --git a/src/lrhi.c b/src/lrhi.c
new file mode 100644
index 0000000..38eed3a
--- /dev/null
+++ b/src/lrhi.c
@@ -0,0 +1,55 @@
+#include "lrhi.h"
+
+void *lrhi_allocator_allocate(struct lrhi_allocator *allocator,
+ ptrdiff_t size) {
+ return allocator->allocate(size, allocator->ctx);
+}
+void lrhi_allocator_free(struct lrhi_allocator *allocator, ptrdiff_t size,
+ void *ptr) {
+ allocator->free(ptr, size, allocator->ctx);
+}
+
+struct lrhi_arena {
+ unsigned char *data;
+ int allocated;
+ int capacity;
+};
+
+lrhi_arena *lrhi_arena_create(struct lrhi_allocator *allocator,
+ ptrdiff_t arena_size) {
+ lrhi_arena *arena = lrhi_allocator_allocate(allocator, sizeof(lrhi_arena));
+ arena->data = lrhi_allocator_allocate(allocator, arena_size);
+ arena->allocated = 0;
+ arena->capacity = arena_size;
+ return arena;
+}
+void lrhi_arena_destroy(struct lrhi_allocator *allocator, lrhi_arena *arena) {
+ lrhi_allocator_free(allocator, arena->capacity, arena->data);
+ lrhi_allocator_free(allocator, sizeof(lrhi_arena), arena);
+}
+void *lrhi_arena_allocate(lrhi_arena *arena, ptrdiff_t size) {
+ if (arena->allocated + size > arena->capacity) {
+ LRHI_PANIC("Arena is full, allocation failed");
+ }
+
+ unsigned char *ptr = arena->data + arena->allocated;
+ arena->allocated += size;
+ return ptr;
+}
+void lrhi_arena_reset(lrhi_arena *arena) { arena->allocated = 0; }
+
+static void *arena_allocator_allocate(ptrdiff_t size, void *ctx) {
+ return lrhi_arena_allocate((lrhi_arena *)ctx, size);
+}
+
+static void arena_allocator_free(void *ptr, ptrdiff_t size, void *ctx) {
+ (void)ptr;
+ (void)size;
+ (void)ctx;
+}
+
+struct lrhi_allocator lrhi_arena_allocator(lrhi_arena *arena) {
+ return (struct lrhi_allocator){.allocate = arena_allocator_allocate,
+ .free = arena_allocator_free,
+ .ctx = arena};
+}
Go back to lisible.xyz