diff options
| author | Clement Sibille <clements@lisible.xyz> | 2025-05-05 08:32:33 +0200 |
|---|---|---|
| committer | Clement Sibille <clements@lisible.xyz> | 2025-05-05 12:24:27 +0200 |
| commit | b71eac2069a30349435c192d682e865718c86a15 (patch) | |
| tree | 33754245a23533e31e6a83390bf190c11dfe2bb9 /src | |
| parent | 6017db0069977ae85e698a1234f4a2b7632ee495 (diff) | |
Add a vulkan renderer that renders an OBJ
Diffstat (limited to 'src')
| -rw-r--r-- | src/alloc.c | 171 | ||||
| -rw-r--r-- | src/alloc.h | 40 | ||||
| -rw-r--r-- | src/engine.c | 17 | ||||
| -rw-r--r-- | src/engine.h | 14 | ||||
| -rw-r--r-- | src/hash.c | 15 | ||||
| -rw-r--r-- | src/hash.h | 9 | ||||
| -rw-r--r-- | src/image.c | 20 | ||||
| -rw-r--r-- | src/image.h | 21 | ||||
| -rw-r--r-- | src/log.c | 13 | ||||
| -rw-r--r-- | src/log.h | 21 | ||||
| -rw-r--r-- | src/main.c | 28 | ||||
| -rw-r--r-- | src/maths.c | 103 | ||||
| -rw-r--r-- | src/maths.h | 49 | ||||
| -rw-r--r-- | src/platform.c | 5 | ||||
| -rw-r--r-- | src/platform.h | 98 | ||||
| -rw-r--r-- | src/platform_sdl.c | 124 | ||||
| -rw-r--r-- | src/renderer.c | 1470 | ||||
| -rw-r--r-- | src/renderer.h | 45 | ||||
| -rw-r--r-- | src/renderer/renderer.c | 2559 | ||||
| -rw-r--r-- | src/renderer/renderer.h | 126 | ||||
| -rw-r--r-- | src/renderer/vma_usage.cpp | 4 | ||||
| -rw-r--r-- | src/renderer/vma_usage.h | 6 | ||||
| -rw-r--r-- | src/str.c | 181 | ||||
| -rw-r--r-- | src/str.h | 62 |
24 files changed, 3523 insertions, 1678 deletions
diff --git a/src/alloc.c b/src/alloc.c new file mode 100644 index 0000000..2fb7a78 --- /dev/null +++ b/src/alloc.c @@ -0,0 +1,171 @@ +#include "alloc.h" +#include "maths.h" +#include "platform.h" +#include <assert.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +void *vgltf_allocator_allocate(struct vgltf_allocator *allocator, size_t size) { + assert(allocator); + return allocator->allocate(size, allocator->ctx); +} +void *vgltf_allocator_allocate_aligned(struct vgltf_allocator *allocator, + size_t alignment, size_t size) { + assert(allocator); + return allocator->allocate_aligned(alignment, size, allocator->ctx); +} +void *vgltf_allocator_allocate_array(struct vgltf_allocator *allocator, + size_t count, size_t item_size) { + assert(allocator); + return allocator->allocate_array(count, item_size, allocator->ctx); +} +void *vgltf_allocator_reallocate(struct vgltf_allocator *allocator, void *ptr, + size_t old_size, size_t new_size) { + assert(allocator); + return allocator->reallocate(ptr, old_size, new_size, allocator->ctx); +} +void vgltf_allocator_free(struct vgltf_allocator *allocator, void *ptr) { + assert(allocator); + allocator->free(ptr, allocator->ctx); +} + +static void *memory_allocate(size_t size, void *ctx) { + (void)ctx; + void *ptr = malloc(size); + if (!ptr) { + VGLTF_PANIC("Couldn't allocate memory (out of mem?)"); + } + return ptr; +} + +static void *memory_allocate_aligned(size_t alignment, size_t size, void *ctx) { + (void)ctx; +#ifdef VGLTF_PLATFORM_WINDOWS + void *ptr = _aligned_malloc(size, VGLTF_MAX(alignment, sizeof(void *))); +#else + void *ptr = aligned_alloc(VGLTF_MAX(alignment, sizeof(void *)), size); +#endif + if (!ptr) { + VGLTF_PANIC("Couldn't allocate aligned memory (out of mem?)"); + } + return ptr; +} + +static void *memory_allocate_array(size_t count, size_t item_size, void *ctx) { + (void)ctx; + void *ptr = calloc(count, item_size); + if (!ptr) { + VGLTF_PANIC("Couldn't allocate memory (out of mem?)"); + } + return ptr; +} + +static void *memory_reallocate(void *ptr, size_t old_size, size_t new_size, + void *ctx) { + (void)old_size; + (void)ctx; + ptr = realloc(ptr, new_size); + if (!ptr) { + VGLTF_PANIC("Couldn't allocate memory (out of mem?)"); + } + return ptr; +} + +static void memory_free(void *ptr, void *ctx) { + (void)ctx; + free(ptr); +} + +thread_local struct vgltf_allocator system_allocator = { + .allocate = memory_allocate, + .allocate_aligned = memory_allocate_aligned, + .allocate_array = memory_allocate_array, + .reallocate = memory_reallocate, + .free = memory_free}; + +void vgltf_arena_init(struct vgltf_allocator *allocator, struct vgltf_arena *arena, + size_t size) { + assert(allocator); + assert(arena); + arena->size = 0; + arena->capacity = size; + arena->data = vgltf_allocator_allocate(allocator, size); +} +void vgltf_arena_deinit(struct vgltf_allocator *allocator, + struct vgltf_arena *arena) { + assert(allocator); + assert(arena); + vgltf_allocator_free(allocator, arena->data); +} +void *vgltf_arena_allocate(struct vgltf_arena *arena, size_t size) { + assert(arena); + assert(arena->size + size <= arena->capacity); + void *ptr = arena->data + arena->size; + arena->size += size; + return ptr; +} + +void *vgltf_arena_allocate_array(struct vgltf_arena *arena, size_t count, + size_t item_size) { + assert(arena); + void *ptr = vgltf_arena_allocate(arena, count * item_size); + memset(ptr, 0, count * item_size); + return ptr; +} + +void vgltf_arena_reset(struct vgltf_arena *arena) { + assert(arena); + arena->size = 0; +} + +static void *arena_allocator_allocate(size_t size, void *ctx) { + assert(ctx); + return vgltf_arena_allocate(ctx, size); +} +static void *arena_allocator_allocate_aligned(size_t alignment, size_t size, + void *ctx) { + assert(ctx); + if (alignment < sizeof(void *) || (alignment & (alignment - 1)) != 0) { + return NULL; + } + + void *ptr = vgltf_arena_allocate(ctx, size + alignment - 1 + sizeof(void *)); + if (!ptr) { + return NULL; + } + + return (void *)(((uintptr_t)ptr + sizeof(void *) + alignment - 1) & + ~(alignment - 1)); +} + +static void *arena_allocator_allocate_array(size_t count, size_t item_size, + void *ctx) { + assert(ctx); + return vgltf_arena_allocate_array(ctx, count, item_size); +} + +static void *arena_allocator_reallocate(void *ptr, size_t old_size, + size_t new_size, void *ctx) { + assert(ptr); + assert(ctx); + + void *new_ptr = vgltf_arena_allocate(ctx, new_size); + memcpy(new_ptr, ptr, old_size); + return new_ptr; +} + +static void arena_allocator_free(void *ptr, void *ctx) { + assert(ctx); + (void)ptr; +} + +struct vgltf_allocator vgltf_arena_allocator(struct vgltf_arena *arena) { + return (struct vgltf_allocator){ + .ctx = arena, + .allocate = arena_allocator_allocate, + .allocate_aligned = arena_allocator_allocate_aligned, + .allocate_array = arena_allocator_allocate_array, + .reallocate = arena_allocator_reallocate, + .free = arena_allocator_free}; +} diff --git a/src/alloc.h b/src/alloc.h new file mode 100644 index 0000000..bde1d55 --- /dev/null +++ b/src/alloc.h @@ -0,0 +1,40 @@ +#ifndef VGLTF_ALLOC_H +#define VGLTF_ALLOC_H + +#include <stddef.h> + +struct vgltf_allocator { + void *(*allocate)(size_t size, void *ctx); + void *(*allocate_aligned)(size_t alignment, size_t size, void *ctx); + void *(*allocate_array)(size_t count, size_t item_size, void *ctx); + void *(*reallocate)(void *ptr, size_t old_size, size_t new_size, void *ctx); + void (*free)(void *ptr, void *ctx); + void *ctx; +}; + +void *vgltf_allocator_allocate(struct vgltf_allocator *allocator, size_t size); +void *vgltf_allocator_allocate_aligned(struct vgltf_allocator *allocator, + size_t alignment, size_t size); +void *vgltf_allocator_allocate_array(struct vgltf_allocator *allocator, + size_t count, size_t item_size); +void *vgltf_allocator_reallocate(struct vgltf_allocator *allocator, void *ptr, + size_t old_size, size_t new_size); +void vgltf_allocator_free(struct vgltf_allocator *allocator, void *ptr); + +extern thread_local struct vgltf_allocator system_allocator; + +struct vgltf_arena { + size_t capacity; + size_t size; + char *data; +}; +void vgltf_arena_init(struct vgltf_allocator *allocator, struct vgltf_arena *arena, + size_t size); +void vgltf_arena_deinit(struct vgltf_allocator *allocator, struct vgltf_arena *arena); +void *vgltf_arena_allocate(struct vgltf_arena *arena, size_t size); +void *vgltf_arena_allocate_array(struct vgltf_arena *arena, size_t count, + size_t item_size); +void vgltf_arena_reset(struct vgltf_arena *arena); +struct vgltf_allocator vgltf_arena_allocator(struct vgltf_arena *arena); + +#endif // VGLTF_ALLOC_H diff --git a/src/engine.c b/src/engine.c new file mode 100644 index 0000000..8904474 --- /dev/null +++ b/src/engine.c @@ -0,0 +1,17 @@ +#include "engine.h" + +bool vgltf_engine_init(struct vgltf_engine *engine, struct vgltf_platform *platform) { + if (!vgltf_renderer_init(&engine->renderer, platform)) { + goto err; + } + + return true; +err: + return false; +} +void vgltf_engine_deinit(struct vgltf_engine *engine) { + vgltf_renderer_deinit(&engine->renderer); +} +void vgltf_engine_run_frame(struct vgltf_engine *engine) { + vgltf_renderer_render_frame(&engine->renderer); +} diff --git a/src/engine.h b/src/engine.h new file mode 100644 index 0000000..5a7bc2d --- /dev/null +++ b/src/engine.h @@ -0,0 +1,14 @@ +#ifndef VGLTF_ENGINE_H +#define VGLTF_ENGINE_H + +#include "renderer/renderer.h" + +struct vgltf_engine { + struct vgltf_renderer renderer; +}; + +bool vgltf_engine_init(struct vgltf_engine *engine, struct vgltf_platform *platform); +void vgltf_engine_deinit(struct vgltf_engine *engine); +void vgltf_engine_run_frame(struct vgltf_engine *engine); + +#endif // VGLTF_ENGINE_H diff --git a/src/hash.c b/src/hash.c new file mode 100644 index 0000000..cfdafc3 --- /dev/null +++ b/src/hash.c @@ -0,0 +1,15 @@ +#include "hash.h" +#include <assert.h> + +uint64_t vgltf_hash_fnv_1a(const char *bytes, size_t nbytes) { + assert(bytes); + static const uint64_t FNV_OFFSET_BASIS = 14695981039346656037u; + static const uint64_t FNV_PRIME = 1099511628211u; + uint64_t hash = FNV_OFFSET_BASIS; + for (size_t i = 0; i < nbytes; i++) { + hash = hash ^ bytes[i]; + hash = hash * FNV_PRIME; + } + + return hash; +} diff --git a/src/hash.h b/src/hash.h new file mode 100644 index 0000000..f4f8e76 --- /dev/null +++ b/src/hash.h @@ -0,0 +1,9 @@ +#ifndef VGLTF_HASH_H +#define VGLTF_HASH_H + +#include <stddef.h> +#include <stdint.h> + +uint64_t vgltf_hash_fnv_1a(const char *bytes, size_t nbytes); + +#endif // VGLTF_HASH_H diff --git a/src/image.c b/src/image.c new file mode 100644 index 0000000..a2d29c7 --- /dev/null +++ b/src/image.c @@ -0,0 +1,20 @@ +#include "image.h" + +#define STB_IMAGE_IMPLEMENTATION +#include <stb_image.h> + +bool vgltf_image_load_from_file(struct vgltf_image *image, + struct vgltf_string_view path) { + int width; + int height; + int tex_channels; + image->data = + stbi_load(path.data, &width, &height, &tex_channels, STBI_rgb_alpha); + image->width = width; + image->height = height; + image->format = VGLTF_IMAGE_FORMAT_R8G8B8A8; + + return image->data != nullptr; +} + +void vgltf_image_deinit(struct vgltf_image *image) { stbi_image_free(image->data); } diff --git a/src/image.h b/src/image.h new file mode 100644 index 0000000..426d605 --- /dev/null +++ b/src/image.h @@ -0,0 +1,21 @@ +#ifndef VGLTF_IMAGE_H +#define VGLTF_IMAGE_H + +#include <stdint.h> +#include "str.h" + +enum vgltf_image_format { + VGLTF_IMAGE_FORMAT_R8G8B8A8, +}; + +struct vgltf_image { + unsigned char* data; + uint32_t width; + uint32_t height; + enum vgltf_image_format format; +}; + +bool vgltf_image_load_from_file(struct vgltf_image* image, struct vgltf_string_view path); +void vgltf_image_deinit(struct vgltf_image* image); + +#endif // VGLTF_IMAGE_H @@ -1,12 +1,5 @@ #include "log.h" -const char *vgltf_log_level_to_str(enum vgltf_log_level level) { - switch (level) { - case VGLTF_LOG_ERROR: - return "error"; - case VGLTF_LOG_INFO: - return "info"; - case VGLTF_LOG_DEBUG: - return "debug"; - } -} +const char *vgltf_log_level_str[] = {[VGLTF_LOG_LEVEL_DBG] = "debug", + [VGLTF_LOG_LEVEL_INFO] = "info", + [VGLTF_LOG_LEVEL_ERR] = "error"}; @@ -1,25 +1,26 @@ #ifndef VGLTF_LOG_H #define VGLTF_LOG_H -#include <stdio.h> +#include <stdio.h> // IWYU pragma: keep enum vgltf_log_level { - VGLTF_LOG_DEBUG, - VGLTF_LOG_INFO, - VGLTF_LOG_ERROR, + VGLTF_LOG_LEVEL_DBG, + VGLTF_LOG_LEVEL_INFO, + VGLTF_LOG_LEVEL_ERR, }; -const char *vgltf_log_level_to_str(enum vgltf_log_level level); -#define VGLTF_LOG(level, ...) \ +extern const char *vgltf_log_level_str[]; + +#define VGLTF_LOG(level, ...) \ do { \ - fprintf(stderr, "[%s %s:%d] ", vgltf_log_level_to_str(level), __FILE__, \ + fprintf(stderr, "[%s %s:%d] ", vgltf_log_level_str[level], __FILE__, \ __LINE__); \ fprintf(stderr, __VA_ARGS__); \ fprintf(stderr, "\n"); \ } while (0) -#define VGLTF_LOG_DBG(...) VGLTF_LOG(VGLTF_LOG_DEBUG, __VA_ARGS__) -#define VGLTF_LOG_INFO(...) VGLTF_LOG(VGLTF_LOG_INFO, __VA_ARGS__) -#define VGLTF_LOG_ERR(...) VGLTF_LOG(VGLTF_LOG_ERROR, __VA_ARGS__) +#define VGLTF_LOG_DBG(...) VGLTF_LOG(VGLTF_LOG_LEVEL_DBG, __VA_ARGS__) +#define VGLTF_LOG_INFO(...) VGLTF_LOG(VGLTF_LOG_LEVEL_INFO, __VA_ARGS__) +#define VGLTF_LOG_ERR(...) VGLTF_LOG(VGLTF_LOG_LEVEL_ERR, __VA_ARGS__) #endif // VGLTF_LOG_H @@ -1,43 +1,39 @@ +#include "engine.h" #include "log.h" #include "platform.h" -#include "renderer.h" int main(void) { struct vgltf_platform platform = {}; if (!vgltf_platform_init(&platform)) { - VGLTF_LOG_ERR("Couldn't initialize the platform layer"); + VGLTF_LOG_ERR("Platform initialization failed"); goto err; } - struct vgltf_renderer renderer = {}; - if (!vgltf_renderer_init(&renderer, &platform)) { - VGLTF_LOG_ERR("Couldn't initialize the renderer"); + struct vgltf_engine engine = {}; + if (!vgltf_engine_init(&engine, &platform)) { + VGLTF_LOG_ERR("Couldn't initialize the engine"); goto deinit_platform; } + VGLTF_LOG_INFO("Starting main loop"); while (true) { struct vgltf_event event; while (vgltf_platform_poll_event(&platform, &event)) { - if (event.type == VGLTF_EVENT_QUIT || - (event.type == VGLTF_EVENT_KEY_DOWN && - event.key.key == VGLTF_KEY_ESCAPE)) { + if (event.type == VGLTF_EVENT_QUIT || (event.type == VGLTF_EVENT_KEY_DOWN && + event.key.key == VGLTF_KEY_ESCAPE)) { goto out_main_loop; - } else if (event.type == VGLTF_EVENT_WINDOW_RESIZED) { - vgltf_renderer_on_window_resized( - &renderer, - (struct vgltf_window_size){.width = event.window_resized.width, - .height = event.window_resized.height}); } } - vgltf_renderer_triangle_pass(&renderer); + vgltf_engine_run_frame(&engine); } out_main_loop: - vgltf_renderer_deinit(&renderer); + VGLTF_LOG_INFO("Exiting main loop"); + vgltf_engine_deinit(&engine); vgltf_platform_deinit(&platform); return 0; deinit_platform: vgltf_platform_deinit(&platform); err: - return 1; + return -1; } diff --git a/src/maths.c b/src/maths.c new file mode 100644 index 0000000..a79c68f --- /dev/null +++ b/src/maths.c @@ -0,0 +1,103 @@ +#include "maths.h" +#include <math.h> +#include <string.h> + +vgltf_vec3 vgltf_vec3_sub(vgltf_vec3 lhs, vgltf_vec3 rhs) { + return (vgltf_vec3){.x = lhs.x - rhs.x, .y = lhs.y - rhs.y, .z = lhs.z - rhs.z}; +} +vgltf_vec3 vgltf_vec3_cross(vgltf_vec3 lhs, vgltf_vec3 rhs) { + return (vgltf_vec3){.x = lhs.y * rhs.z - lhs.z * rhs.y, + .y = lhs.z * rhs.x - lhs.x * rhs.z, + .z = lhs.x * rhs.y - lhs.y * rhs.x}; +} +vgltf_vec_value_type vgltf_vec3_dot(vgltf_vec3 lhs, vgltf_vec3 rhs) { + return lhs.x * rhs.x + lhs.y * rhs.y + lhs.z * rhs.z; +} +vgltf_vec_value_type vgltf_vec3_length(vgltf_vec3 vec) { + return sqrtf(vec.x * vec.x + vec.y * vec.y + vec.z * vec.z); +} +vgltf_vec3 vgltf_vec3_normalized(vgltf_vec3 vec) { + vgltf_vec_value_type length = vgltf_vec3_length(vec); + return (vgltf_vec3){ + .x = vec.x / length, .y = vec.y / length, .z = vec.z / length}; +} +void vgltf_mat4_multiply(vgltf_mat4 out, vgltf_mat4 lhs, vgltf_mat4 rhs) { + for (int i = 0; i < 4; ++i) { + for (int j = 0; j < 4; ++j) { + out[i * 4 + j] = + lhs[i * 4 + 0] * rhs[0 * 4 + j] + lhs[i * 4 + 1] * rhs[1 * 4 + j] + + lhs[i * 4 + 2] * rhs[2 * 4 + j] + lhs[i * 4 + 3] * rhs[3 * 4 + j]; + } + } +} +void vgltf_mat4_rotate(vgltf_mat4 out, vgltf_mat4 matrix, + vgltf_mat_value_type angle_radians, vgltf_vec3 axis) { + vgltf_vec3 a = vgltf_vec3_normalized(axis); + vgltf_vec_value_type c = cosf(angle_radians); + vgltf_vec_value_type s = sinf(angle_radians); + vgltf_vec_value_type t = 1.f - c; + + vgltf_mat4 rotation_matrix = {t * a.x * a.x + c, + t * a.x * a.y - s * a.z, + t * a.x * a.z + s * a.y, + 0.f, + t * a.x * a.y + s * a.z, + t * a.y * a.y + c, + t * a.y * a.z - s * a.x, + 0.f, + t * a.x * a.z - s * a.y, + t * a.y * a.z + s * a.x, + t * a.z * a.z + c, + 0.f, + 0.f, + 0.f, + 0.f, + 1.f}; + + vgltf_mat4_multiply(out, matrix, rotation_matrix); +} +void vgltf_mat4_look_at(vgltf_mat4 out, vgltf_vec3 eye_position, + vgltf_vec3 target_position, vgltf_vec3 up_axis) { + vgltf_vec3 forward = + vgltf_vec3_normalized(vgltf_vec3_sub(target_position, eye_position)); + vgltf_vec3 right = vgltf_vec3_normalized(vgltf_vec3_cross(forward, up_axis)); + vgltf_vec3 camera_up = vgltf_vec3_cross(right, forward); + + memcpy(out, (const vgltf_mat4)VGLTF_MAT4_IDENTITY, sizeof(vgltf_mat4)); + out[0 * 4 + 0] = right.x; + out[1 * 4 + 0] = right.y; + out[2 * 4 + 0] = right.z; + out[0 * 4 + 1] = camera_up.x; + out[1 * 4 + 1] = camera_up.y; + out[2 * 4 + 1] = camera_up.z; + out[0 * 4 + 2] = -forward.x; + out[1 * 4 + 2] = -forward.y; + out[2 * 4 + 2] = -forward.z; + out[3 * 4 + 0] = -vgltf_vec3_dot(right, eye_position); + out[3 * 4 + 1] = -vgltf_vec3_dot(camera_up, eye_position); + out[3 * 4 + 2] = vgltf_vec3_dot(forward, eye_position); +} +void vgltf_mat4_perspective(vgltf_mat4 out, vgltf_mat_value_type fov_radians, + vgltf_mat_value_type aspect_ratio, + vgltf_mat_value_type near, vgltf_mat_value_type far) { + float tan_half_fovy = tanf(fov_radians / 2.0f); + out[0] = 1.f / (aspect_ratio * tan_half_fovy); + out[1] = 0.0f; + out[2] = 0.0f; + out[3] = 0.0f; + + out[4] = 0.0f; + out[5] = 1.f / tan_half_fovy; + out[6] = 0.0f; + out[7] = 0.0f; + + out[8] = 0.0f; + out[9] = 0.0f; + out[10] = -(far + near) / (far - near); + out[11] = -1.0f; + + out[12] = 0.0f; + out[13] = 0.0f; + out[14] = -(2.0f * far * near) / (far - near); + out[15] = 0.0f; +} diff --git a/src/maths.h b/src/maths.h new file mode 100644 index 0000000..d50f285 --- /dev/null +++ b/src/maths.h @@ -0,0 +1,49 @@ +#ifndef VGLTF_MATHS_H +#define VGLTF_MATHS_H + +typedef float vgltf_vec_value_type; + +constexpr double VGLTF_MATHS_PI = 3.14159265358979323846; +#define VGLTF_MATHS_DEG_TO_RAD(deg) (deg * VGLTF_MATHS_PI / 180.0) +#define VGLTF_MAX(x, y) ((x) > (y) ? (x) : (y)) + +typedef struct { + vgltf_vec_value_type x; + vgltf_vec_value_type y; +} vgltf_vec2; + +typedef struct { + vgltf_vec_value_type x; + vgltf_vec_value_type y; + vgltf_vec_value_type z; +} vgltf_vec3; +vgltf_vec3 vgltf_vec3_sub(vgltf_vec3 lhs, vgltf_vec3 rhs); +vgltf_vec3 vgltf_vec3_cross(vgltf_vec3 lhs, vgltf_vec3 rhs); +vgltf_vec_value_type vgltf_vec3_dot(vgltf_vec3 lhs, vgltf_vec3 rhs); + +vgltf_vec_value_type vgltf_vec3_length(vgltf_vec3 vec); +vgltf_vec3 vgltf_vec3_normalized(vgltf_vec3 vec); + +typedef vgltf_vec_value_type vgltf_mat_value_type; + +// row major +typedef vgltf_mat_value_type vgltf_mat4[16]; +void vgltf_mat4_multiply(vgltf_mat4 out, vgltf_mat4 lhs, vgltf_mat4 rhs); +void vgltf_mat4_rotate(vgltf_mat4 out, vgltf_mat4 matrix, + vgltf_mat_value_type angle_radians, vgltf_vec3 axis); +void vgltf_mat4_look_at(vgltf_mat4 out, vgltf_vec3 eye_position, + vgltf_vec3 target_position, vgltf_vec3 up_axis); +void vgltf_mat4_perspective(vgltf_mat4 out, vgltf_mat_value_type fov, + vgltf_mat_value_type aspect_ratio, + vgltf_mat_value_type near, vgltf_mat_value_type far); + +// clang-format off +#define VGLTF_MAT4_IDENTITY { \ + 1, 0, 0, 0, \ + 0, 1, 0, 0, \ + 0, 0, 1, 0, \ + 0, 0, 0, 1, \ +} +// clang-format on + +#endif // VGLTF_MATHS_H diff --git a/src/platform.c b/src/platform.c new file mode 100644 index 0000000..da4d7d4 --- /dev/null +++ b/src/platform.c @@ -0,0 +1,5 @@ +#include "platform.h" + +#define VGLTF_GENERATE_KEY_STRING(KEY) #KEY, +const char *vgltf_key_str[] = {VGLTF_FOREACH_KEY(VGLTF_GENERATE_KEY_STRING)}; +#undef VGLTF_GENERATE_KEY_STRING diff --git a/src/platform.h b/src/platform.h index fe719d3..aff673f 100644 --- a/src/platform.h +++ b/src/platform.h @@ -3,66 +3,61 @@ #include "log.h" #include <stdint.h> -#include <stdlib.h> +#include <stdlib.h> // IWYU pragma: keep -#define VGLTF_PANIC(...) \ +#define VGLTF_PANIC(...) \ do { \ - VGLTF_LOG_ERR("panic: " __VA_ARGS__); \ + VGLTF_LOG_ERR("PANIC " __VA_ARGS__); \ exit(1); \ } while (0) -enum vgltf_event_type { - VGLTF_EVENT_QUIT, - VGLTF_EVENT_KEY_DOWN, - VGLTF_EVENT_WINDOW_RESIZED, - VGLTF_EVENT_UNKNOWN, -}; +#define VGLTF_FOREACH_KEY(_M) \ + _M(A) \ + _M(B) \ + _M(C) \ + _M(D) \ + _M(E) \ + _M(F) \ + _M(G) \ + _M(H) \ + _M(I) \ + _M(J) \ + _M(K) \ + _M(L) \ + _M(M) \ + _M(N) \ + _M(O) \ + _M(P) \ + _M(Q) \ + _M(R) \ + _M(S) \ + _M(T) \ + _M(U) \ + _M(V) \ + _M(W) \ + _M(X) \ + _M(Y) \ + _M(Z) \ + _M(ESCAPE) +#define VGLTF_GENERATE_KEY_ENUM(KEY) VGLTF_KEY_##KEY, enum vgltf_key { - VGLTF_KEY_A, - VGLTF_KEY_B, - VGLTF_KEY_C, - VGLTF_KEY_D, - VGLTF_KEY_E, - VGLTF_KEY_F, - VGLTF_KEY_G, - VGLTF_KEY_H, - VGLTF_KEY_I, - VGLTF_KEY_J, - VGLTF_KEY_K, - VGLTF_KEY_L, - VGLTF_KEY_M, - VGLTF_KEY_N, - VGLTF_KEY_O, - VGLTF_KEY_P, - VGLTF_KEY_Q, - VGLTF_KEY_R, - VGLTF_KEY_S, - VGLTF_KEY_T, - VGLTF_KEY_U, - VGLTF_KEY_V, - VGLTF_KEY_W, - VGLTF_KEY_X, - VGLTF_KEY_Y, - VGLTF_KEY_Z, - VGLTF_KEY_ESCAPE, + VGLTF_FOREACH_KEY(VGLTF_GENERATE_KEY_ENUM) VGLTF_KEY_COUNT, VGLTF_KEY_UNKNOWN }; +#undef VGLTF_GENERATE_KEY_ENUM +extern const char *vgltf_key_str[]; + +enum vgltf_event_type { VGLTF_EVENT_QUIT, VGLTF_EVENT_KEY_DOWN, VGLTF_EVENT_UNKNOWN }; struct vgltf_key_event { enum vgltf_key key; }; -struct vgltf_window_resized_event { - int32_t width; - int32_t height; -}; - struct vgltf_event { enum vgltf_event_type type; union { struct vgltf_key_event key; - struct vgltf_window_resized_event window_resized; }; }; @@ -75,18 +70,19 @@ struct vgltf_platform; bool vgltf_platform_init(struct vgltf_platform *platform); void vgltf_platform_deinit(struct vgltf_platform *platform); bool vgltf_platform_poll_event(struct vgltf_platform *platform, - struct vgltf_event *event); + struct vgltf_event *event); bool vgltf_platform_get_window_size(struct vgltf_platform *platform, - struct vgltf_window_size *window_size); - -// Vulkan specifics -#include "vulkan/vulkan_core.h" -char const *const * + struct vgltf_window_size *window_size); +bool vgltf_platform_get_current_time_nanoseconds(long *time); +char *vgltf_platform_read_file_to_string(const char *filepath, size_t *out_size); +const char *const * vgltf_platform_get_vulkan_instance_extensions(struct vgltf_platform *platform, - uint32_t *count); + uint32_t *count); + +#include <vulkan/vulkan.h> bool vgltf_platform_create_vulkan_surface(struct vgltf_platform *platform, - VkInstance instance, - VkSurfaceKHR *surface); + VkInstance instance, + VkSurfaceKHR *surface); #include "platform_sdl.h" diff --git a/src/platform_sdl.c b/src/platform_sdl.c index 5cc6032..6593b9e 100644 --- a/src/platform_sdl.c +++ b/src/platform_sdl.c @@ -1,29 +1,25 @@ +#include "platform_sdl.h" #include "log.h" #include "platform.h" -#include "platform_sdl.h" -#include <SDL3/SDL_vulkan.h> bool vgltf_platform_init(struct vgltf_platform *platform) { + VGLTF_LOG_INFO("Initializing SDL platform..."); + if (!SDL_Init(SDL_INIT_VIDEO)) { VGLTF_LOG_ERR("SDL initialization failed: %s", SDL_GetError()); goto err; } - constexpr char WINDOW_TITLE[] = "VisibleGLTF"; - constexpr int WINDOW_WIDTH = 800; - constexpr int WINDOW_HEIGHT = 600; - SDL_Window *window = - SDL_CreateWindow(WINDOW_TITLE, WINDOW_WIDTH, WINDOW_HEIGHT, - SDL_WINDOW_VULKAN | SDL_WINDOW_RESIZABLE); - if (!window) { - VGLTF_LOG_ERR("SDL window creation failed: %s", SDL_GetError()); - goto quit_sdl; + platform->window = SDL_CreateWindow("vgltf", 800, 600, SDL_WINDOW_VULKAN); + if (!platform->window) { + VGLTF_LOG_ERR("Window creation failed: %s", SDL_GetError()); + goto deinit_sdl; } - platform->window = window; - + VGLTF_LOG_INFO("SDL platform initialized"); return true; -quit_sdl: + +deinit_sdl: SDL_Quit(); err: return false; @@ -31,67 +27,23 @@ err: void vgltf_platform_deinit(struct vgltf_platform *platform) { SDL_DestroyWindow(platform->window); SDL_Quit(); + VGLTF_LOG_INFO("SDL platform deinitialized"); } -static enum vgltf_key vgltf_key_from_sdl_keycode(SDL_Keycode keycode) { - switch (keycode) { - case SDLK_A: - return VGLTF_KEY_A; - case SDLK_B: - return VGLTF_KEY_B; - case SDLK_C: - return VGLTF_KEY_C; - case SDLK_D: - return VGLTF_KEY_D; - case SDLK_E: - return VGLTF_KEY_E; - case SDLK_F: - return VGLTF_KEY_F; - case SDLK_G: - return VGLTF_KEY_G; - case SDLK_H: - return VGLTF_KEY_H; - case SDLK_I: - return VGLTF_KEY_I; - case SDLK_J: - return VGLTF_KEY_J; - case SDLK_K: - return VGLTF_KEY_K; - case SDLK_L: - return VGLTF_KEY_L; - case SDLK_M: - return VGLTF_KEY_M; - case SDLK_N: - return VGLTF_KEY_N; - case SDLK_O: - return VGLTF_KEY_O; - case SDLK_P: - return VGLTF_KEY_P; - case SDLK_Q: - return VGLTF_KEY_Q; - case SDLK_R: - return VGLTF_KEY_R; - case SDLK_S: - return VGLTF_KEY_S; - case SDLK_T: - return VGLTF_KEY_T; - case SDLK_U: - return VGLTF_KEY_U; - case SDLK_V: - return VGLTF_KEY_V; - case SDLK_W: - return VGLTF_KEY_W; - case SDLK_X: - return VGLTF_KEY_X; - case SDLK_Y: - return VGLTF_KEY_Y; - case SDLK_Z: - return VGLTF_KEY_Z; - case SDLK_ESCAPE: - return VGLTF_KEY_ESCAPE; + +#define VGLTF_GENERATE_SDL_KEYCODE_MAPPING(KEY) \ + case SDLK_##KEY: \ + return VGLTF_KEY_##KEY; + +static enum vgltf_key vgltf_key_from_sdl_keycode(SDL_Keycode key_code) { + switch (key_code) { + VGLTF_FOREACH_KEY(VGLTF_GENERATE_SDL_KEYCODE_MAPPING) default: return VGLTF_KEY_UNKNOWN; } } + +#undef VGLTF_GENERATE_SDL_KEYCODE_MAPPING + bool vgltf_platform_poll_event(struct vgltf_platform *platform, struct vgltf_event *event) { (void)platform; @@ -106,16 +58,12 @@ bool vgltf_platform_poll_event(struct vgltf_platform *platform, event->type = VGLTF_EVENT_KEY_DOWN; event->key.key = vgltf_key_from_sdl_keycode(sdl_event.key.key); break; - case SDL_EVENT_WINDOW_RESIZED: - event->type = VGLTF_EVENT_WINDOW_RESIZED; - event->window_resized.width = sdl_event.display.data1; - event->window_resized.height = sdl_event.display.data2; - break; default: event->type = VGLTF_EVENT_UNKNOWN; break; } } + return pending_events; } bool vgltf_platform_get_window_size(struct vgltf_platform *platform, @@ -123,7 +71,31 @@ bool vgltf_platform_get_window_size(struct vgltf_platform *platform, return SDL_GetWindowSize(platform->window, &window_size->width, &window_size->height); } -char const *const * +bool vgltf_platform_get_current_time_nanoseconds(long *time) { + if (!SDL_GetCurrentTime(time)) { + VGLTF_LOG_ERR("'SDL_GetCurrentTime failed: %s", SDL_GetError()); + goto err; + } + + return true; +err: + return false; +} + +char *vgltf_platform_read_file_to_string(const char *filepath, + size_t *out_size) { + char *file_data = SDL_LoadFile(filepath, out_size); + if (!file_data) { + VGLTF_LOG_ERR("Couldn't load file: %s", SDL_GetError()); + return NULL; + } + + return file_data; +} + +#include <SDL3/SDL_vulkan.h> + +const char *const * vgltf_platform_get_vulkan_instance_extensions(struct vgltf_platform *platform, uint32_t *count) { (void)platform; diff --git a/src/renderer.c b/src/renderer.c deleted file mode 100644 index 7022af6..0000000 --- a/src/renderer.c +++ /dev/null @@ -1,1470 +0,0 @@ -#include "log.h" -#include "renderer.h" -#include "src/platform.h" -#include "vulkan/vulkan_core.h" -#include <assert.h> - -static const char *VALIDATION_LAYERS[] = {"VK_LAYER_KHRONOS_validation"}; -static constexpr int VALIDATION_LAYER_COUNT = - sizeof(VALIDATION_LAYERS) / sizeof(VALIDATION_LAYERS[0]); - -#ifdef VGLTF_DEBUG -static constexpr bool enable_validation_layers = true; -#else -static constexpr bool enable_validation_layers = false; -#endif - -static VKAPI_ATTR VkBool32 VKAPI_CALL -debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, - VkDebugUtilsMessageTypeFlagBitsEXT message_type, - const VkDebugUtilsMessengerCallbackDataEXT *callback_data, - void *user_data) { - (void)message_severity; - (void)message_type; - (void)user_data; - VGLTF_LOG_DBG("validation layer: %s", callback_data->pMessage); - return VK_FALSE; -} - -static constexpr int REQUIRED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY = 10; -struct required_instance_extensions { - const char *extensions[REQUIRED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY]; - uint32_t count; -}; -void required_instance_extensions_push( - struct required_instance_extensions *required_instance_extensions, - const char *required_instance_extension) { - if (required_instance_extensions->count == - REQUIRED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY) { - VGLTF_PANIC("required instance extensions array is full"); - } - required_instance_extensions - ->extensions[required_instance_extensions->count++] = - required_instance_extension; -} - -static constexpr int SUPPORTED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY = 128; -struct supported_instance_extensions { - VkExtensionProperties - properties[SUPPORTED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY]; - uint32_t count; -}; -bool supported_instance_extensions_init( - struct supported_instance_extensions *supported_instance_extensions) { - if (vkEnumerateInstanceExtensionProperties( - nullptr, &supported_instance_extensions->count, nullptr) != - VK_SUCCESS) { - goto err; - } - - if (supported_instance_extensions->count > - SUPPORTED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY) { - VGLTF_LOG_ERR("supported instance extensions array cannot fit all the " - "VkExtensionProperties"); - goto err; - } - - if (vkEnumerateInstanceExtensionProperties( - nullptr, &supported_instance_extensions->count, - supported_instance_extensions->properties) != VK_SUCCESS) { - goto err; - } - return true; -err: - return false; -} -void supported_instance_extensions_debug_print( - const struct supported_instance_extensions *supported_instance_extensions) { - VGLTF_LOG_DBG("Supported instance extensions:"); - for (uint32_t i = 0; i < supported_instance_extensions->count; i++) { - VGLTF_LOG_DBG("\t- %s", - supported_instance_extensions->properties[i].extensionName); - } -} -bool supported_instance_extensions_includes( - const struct supported_instance_extensions *supported_instance_extensions, - const char *extension_name) { - for (uint32_t supported_instance_extension_index = 0; - supported_instance_extension_index < - supported_instance_extensions->count; - supported_instance_extension_index++) { - const VkExtensionProperties *extension_properties = - &supported_instance_extensions - ->properties[supported_instance_extension_index]; - if (strcmp(extension_properties->extensionName, extension_name) == 0) { - return true; - } - } - - return false; -} - -static constexpr uint32_t SUPPORTED_VALIDATION_LAYERS_ARRAY_CAPACITY = 64; -struct supported_validation_layers { - VkLayerProperties properties[SUPPORTED_VALIDATION_LAYERS_ARRAY_CAPACITY]; - uint32_t count; -}; -bool supported_validation_layers_init( - struct supported_validation_layers *supported_validation_layers) { - if (vkEnumerateInstanceLayerProperties(&supported_validation_layers->count, - nullptr) != VK_SUCCESS) { - goto err; - } - - if (supported_validation_layers->count > - SUPPORTED_VALIDATION_LAYERS_ARRAY_CAPACITY) { - VGLTF_LOG_ERR("supported validation layers array cannot fit all the " - "VkLayerProperties"); - goto err; - } - - if (vkEnumerateInstanceLayerProperties( - &supported_validation_layers->count, - supported_validation_layers->properties) != VK_SUCCESS) { - goto err; - } - - return true; -err: - return false; -} - -static bool are_validation_layer_supported() { - struct supported_validation_layers supported_layers = {}; - if (!supported_validation_layers_init(&supported_layers)) { - goto err; - } - - for (int requested_layer_index = 0; - requested_layer_index < VALIDATION_LAYER_COUNT; - requested_layer_index++) { - const char *requested_layer_name = VALIDATION_LAYERS[requested_layer_index]; - bool requested_layer_found = false; - for (uint32_t supported_layer_index = 0; - supported_layer_index < supported_layers.count; - supported_layer_index++) { - VkLayerProperties *supported_layer = - &supported_layers.properties[supported_layer_index]; - if (strcmp(requested_layer_name, supported_layer->layerName) == 0) { - requested_layer_found = true; - break; - } - } - - if (!requested_layer_found) { - goto err; - } - } - - return true; -err: - return false; -} - -static bool fetch_required_instance_extensions( - struct required_instance_extensions *required_extensions, - struct vgltf_platform *platform) { - struct supported_instance_extensions supported_extensions = {}; - if (!supported_instance_extensions_init(&supported_extensions)) { - VGLTF_LOG_ERR( - "Couldn't fetch supported instance extensions details (OOM?)"); - goto err; - } - supported_instance_extensions_debug_print(&supported_extensions); - - uint32_t platform_required_extension_count = 0; - const char *const *platform_required_extensions = - vgltf_platform_get_vulkan_instance_extensions( - platform, &platform_required_extension_count); - for (uint32_t platform_required_extension_index = 0; - platform_required_extension_index < platform_required_extension_count; - platform_required_extension_index++) { - required_instance_extensions_push( - required_extensions, - platform_required_extensions[platform_required_extension_index]); - } - required_instance_extensions_push( - required_extensions, VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME); - - if (enable_validation_layers) { - required_instance_extensions_push(required_extensions, - VK_EXT_DEBUG_UTILS_EXTENSION_NAME); - } - - bool all_extensions_supported = true; - for (uint32_t required_extension_index = 0; - required_extension_index < required_extensions->count; - required_extension_index++) { - const char *required_extension_name = - required_extensions->extensions[required_extension_index]; - if (!supported_instance_extensions_includes(&supported_extensions, - required_extension_name)) { - VGLTF_LOG_ERR("Unsupported instance extension: %s", - required_extension_name); - all_extensions_supported = false; - } - } - - if (!all_extensions_supported) { - VGLTF_LOG_ERR("Some required extensions are unsupported."); - goto err; - } - - return true; -err: - return false; -} - -static void populate_debug_messenger_create_info( - VkDebugUtilsMessengerCreateInfoEXT *create_info) { - *create_info = (VkDebugUtilsMessengerCreateInfoEXT){}; - create_info->sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; - create_info->messageSeverity = - VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; - create_info->messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; - create_info->pfnUserCallback = debug_callback; -} - -static bool vgltf_renderer_create_instance(struct vgltf_renderer *renderer, - struct vgltf_platform *platform) { - VGLTF_LOG_INFO("Creating vulkan instance..."); - if (enable_validation_layers && !are_validation_layer_supported()) { - VGLTF_LOG_ERR("Requested validation layers aren't supported"); - goto err; - } - - VkApplicationInfo application_info = { - .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, - .pApplicationName = "Visible GLTF", - .applicationVersion = VK_MAKE_VERSION(0, 1, 0), - .pEngineName = "No Engine", - .engineVersion = VK_MAKE_VERSION(1, 0, 0), - .apiVersion = VK_API_VERSION_1_2}; - - struct required_instance_extensions required_extensions = {}; - fetch_required_instance_extensions(&required_extensions, platform); - - VkInstanceCreateInfo create_info = { - .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, - .pApplicationInfo = &application_info, - .enabledExtensionCount = required_extensions.count, - .ppEnabledExtensionNames = required_extensions.extensions, - .flags = VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR}; - - VkDebugUtilsMessengerCreateInfoEXT debug_create_info; - if (enable_validation_layers) { - create_info.enabledLayerCount = VALIDATION_LAYER_COUNT; - create_info.ppEnabledLayerNames = VALIDATION_LAYERS; - populate_debug_messenger_create_info(&debug_create_info); - create_info.pNext = &debug_create_info; - } - - if (vkCreateInstance(&create_info, nullptr, &renderer->instance) != - VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to create VkInstance"); - goto err; - } - - return true; -err: - return false; -} - -static VkResult create_debug_utils_messenger_ext( - VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT *create_info, - const VkAllocationCallbacks *allocator, - VkDebugUtilsMessengerEXT *debug_messenger) { - auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr( - instance, "vkCreateDebugUtilsMessengerEXT"); - if (func != nullptr) { - return func(instance, create_info, allocator, debug_messenger); - } - - return VK_ERROR_EXTENSION_NOT_PRESENT; -} - -static void -destroy_debug_utils_messenger_ext(VkInstance instance, - VkDebugUtilsMessengerEXT debug_messenger, - const VkAllocationCallbacks *allocator) { - auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr( - instance, "vkDestroyDebugUtilsMessengerEXT"); - if (func != nullptr) { - func(instance, debug_messenger, allocator); - } -} - -static void -vgltf_renderer_setup_debug_messenger(struct vgltf_renderer *renderer) { - if (!enable_validation_layers) - return; - VkDebugUtilsMessengerCreateInfoEXT create_info; - populate_debug_messenger_create_info(&create_info); - create_debug_utils_messenger_ext(renderer->instance, &create_info, nullptr, - &renderer->debug_messenger); -} - -static constexpr int AVAILABLE_PHYSICAL_DEVICE_ARRAY_CAPACITY = 128; -struct available_physical_devices { - VkPhysicalDevice devices[AVAILABLE_PHYSICAL_DEVICE_ARRAY_CAPACITY]; - uint32_t count; -}; -static bool -available_physical_devices_init(VkInstance instance, - struct available_physical_devices *devices) { - - if (vkEnumeratePhysicalDevices(instance, &devices->count, nullptr) != - VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't enumerate physical devices"); - goto err; - } - - if (devices->count == 0) { - VGLTF_LOG_ERR("Failed to find any GPU with Vulkan support"); - goto err; - } - - if (devices->count > AVAILABLE_PHYSICAL_DEVICE_ARRAY_CAPACITY) { - VGLTF_LOG_ERR("available physical devices array cannot fit all available " - "physical devices"); - goto err; - } - - if (vkEnumeratePhysicalDevices(instance, &devices->count, devices->devices) != - VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't enumerate physical devices"); - goto err; - } - - return true; -err: - return false; -} - -struct queue_family_indices { - uint32_t graphics_family; - uint32_t present_family; - bool has_graphics_family; - bool has_present_family; -}; -bool queue_family_indices_is_complete( - const struct queue_family_indices *indices) { - return indices->has_graphics_family && indices->has_present_family; -} -bool queue_family_indices_for_device(struct queue_family_indices *indices, - VkPhysicalDevice device, - VkSurfaceKHR surface) { - static constexpr uint32_t QUEUE_FAMILY_PROPERTIES_ARRAY_CAPACITY = 64; - uint32_t queue_family_count = 0; - vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, - nullptr); - - if (queue_family_count > QUEUE_FAMILY_PROPERTIES_ARRAY_CAPACITY) { - VGLTF_LOG_ERR( - "Queue family properties array cannot fit all queue family properties"); - goto err; - } - - VkQueueFamilyProperties - queue_family_properties[QUEUE_FAMILY_PROPERTIES_ARRAY_CAPACITY] = {}; - vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, - queue_family_properties); - - for (uint32_t queue_family_index = 0; queue_family_index < queue_family_count; - queue_family_index++) { - VkQueueFamilyProperties *queue_family = - &queue_family_properties[queue_family_index]; - - VkBool32 present_support; - vkGetPhysicalDeviceSurfaceSupportKHR(device, queue_family_index, surface, - &present_support); - - if (queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT) { - indices->graphics_family = queue_family_index; - indices->has_graphics_family = true; - } - - if (present_support) { - indices->present_family = queue_family_index; - indices->has_present_family = true; - } - - if (queue_family_indices_is_complete(indices)) { - break; - } - } - - return true; -err: - return false; -} - -static bool is_in_array(uint32_t *array, int length, uint32_t value) { - for (int i = 0; i < length; i++) { - if (array[i] == value) { - return true; - } - } - - return false; -} - -static constexpr uint32_t SUPPORTED_EXTENSIONS_ARRAY_CAPACITY = 128; -struct supported_extensions { - VkExtensionProperties properties[SUPPORTED_EXTENSIONS_ARRAY_CAPACITY]; - uint32_t count; -}; -bool supported_extensions_init( - struct supported_extensions *supported_extensions, - VkPhysicalDevice device) { - if (vkEnumerateDeviceExtensionProperties(device, nullptr, - &supported_extensions->count, - nullptr) != VK_SUCCESS) { - goto err; - } - - if (supported_extensions->count > SUPPORTED_EXTENSIONS_ARRAY_CAPACITY) { - VGLTF_LOG_ERR( - "supported extensions aarray cannot fit all the VkExtensionProperties"); - goto err; - } - - if (vkEnumerateDeviceExtensionProperties( - device, nullptr, &supported_extensions->count, - supported_extensions->properties) != VK_SUCCESS) { - goto err; - } - - return true; -err: - return false; -} - -static bool supported_extensions_includes_extension( - struct supported_extensions *supported_extensions, - const char *extension_name) { - for (uint32_t supported_extension_index = 0; - supported_extension_index < supported_extensions->count; - supported_extension_index++) { - if (strcmp(supported_extensions->properties[supported_extension_index] - .extensionName, - extension_name) == 0) { - return true; - } - } - return false; -} - -static const char *DEVICE_EXTENSIONS[] = {VK_KHR_SWAPCHAIN_EXTENSION_NAME, - "VK_KHR_portability_subset"}; -static constexpr int DEVICE_EXTENSION_COUNT = - sizeof(DEVICE_EXTENSIONS) / sizeof(DEVICE_EXTENSIONS[0]); -static bool are_device_extensions_supported(VkPhysicalDevice device) { - struct supported_extensions supported_extensions = {}; - if (!supported_extensions_init(&supported_extensions, device)) { - goto err; - } - - for (uint32_t required_extension_index = 0; - required_extension_index < DEVICE_EXTENSION_COUNT; - required_extension_index++) { - if (!supported_extensions_includes_extension( - &supported_extensions, - DEVICE_EXTENSIONS[required_extension_index])) { - VGLTF_LOG_DBG("Unsupported: %s", - DEVICE_EXTENSIONS[required_extension_index]); - goto err; - } - } - - return true; - -err: - return false; -} - -static constexpr int SWAPCHAIN_SUPPORT_DETAILS_MAX_SURFACE_FORMAT_COUNT = 256; -static constexpr int SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT = 256; -struct swapchain_support_details { - VkSurfaceCapabilitiesKHR capabilities; - VkSurfaceFormatKHR - formats[SWAPCHAIN_SUPPORT_DETAILS_MAX_SURFACE_FORMAT_COUNT]; - VkPresentModeKHR - present_modes[SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT]; - uint32_t format_count; - uint32_t present_mode_count; -}; -bool swapchain_support_details_query_from_device( - struct swapchain_support_details *swapchain_support_details, - VkPhysicalDevice device, VkSurfaceKHR surface) { - if (vkGetPhysicalDeviceSurfaceCapabilitiesKHR( - device, surface, &swapchain_support_details->capabilities) != - VK_SUCCESS) { - goto err; - } - - if (vkGetPhysicalDeviceSurfaceFormatsKHR( - device, surface, &swapchain_support_details->format_count, nullptr) != - VK_SUCCESS) { - goto err; - } - - if (swapchain_support_details->format_count != 0 && - swapchain_support_details->format_count < - SWAPCHAIN_SUPPORT_DETAILS_MAX_SURFACE_FORMAT_COUNT) { - if (vkGetPhysicalDeviceSurfaceFormatsKHR( - device, surface, &swapchain_support_details->format_count, - swapchain_support_details->formats) != VK_SUCCESS) { - goto err; - } - } - - if (vkGetPhysicalDeviceSurfacePresentModesKHR( - device, surface, &swapchain_support_details->present_mode_count, - nullptr) != VK_SUCCESS) { - goto err; - } - - if (swapchain_support_details->present_mode_count != 0 && - swapchain_support_details->present_mode_count < - SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT) { - if (vkGetPhysicalDeviceSurfacePresentModesKHR( - device, surface, &swapchain_support_details->present_mode_count, - swapchain_support_details->present_modes) != VK_SUCCESS) { - goto err; - } - } - - return true; -err: - return false; -} - -static bool is_physical_device_suitable(VkPhysicalDevice device, - VkSurfaceKHR surface) { - struct queue_family_indices indices = {}; - queue_family_indices_for_device(&indices, device, surface); - - VGLTF_LOG_DBG("Checking for physical device extension support"); - bool extensions_supported = are_device_extensions_supported(device); - VGLTF_LOG_DBG("Supported: %d", extensions_supported); - - bool swapchain_adequate = false; - if (extensions_supported) { - - VGLTF_LOG_DBG("Checking for swapchain support details"); - struct swapchain_support_details swapchain_support_details = {}; - if (!swapchain_support_details_query_from_device(&swapchain_support_details, - device, surface)) { - VGLTF_LOG_ERR("Couldn't query swapchain support details from device"); - goto err; - } - - swapchain_adequate = swapchain_support_details.format_count > 0 && - swapchain_support_details.present_mode_count > 0; - } - - return queue_family_indices_is_complete(&indices) && extensions_supported && - swapchain_adequate; -err: - return false; -} - -static bool -vgltf_renderer_pick_physical_device(struct vgltf_renderer *renderer) { - VkPhysicalDevice physical_device = VK_NULL_HANDLE; - - struct available_physical_devices available_physical_devices = {}; - if (!available_physical_devices_init(renderer->instance, - &available_physical_devices)) { - VGLTF_LOG_ERR("Couldn't fetch available physical devices"); - goto err; - } - - for (uint32_t available_physical_device_index = 0; - available_physical_device_index < available_physical_devices.count; - available_physical_device_index++) { - VkPhysicalDevice available_physical_device = - available_physical_devices.devices[available_physical_device_index]; - if (is_physical_device_suitable(available_physical_device, - renderer->surface)) { - physical_device = available_physical_device; - break; - } - } - - if (physical_device == VK_NULL_HANDLE) { - VGLTF_LOG_ERR("Failed to find a suitable GPU"); - goto err; - } - - renderer->physical_device = physical_device; - - return true; -err: - return false; -} - -static bool -vgltf_renderer_create_logical_device(struct vgltf_renderer *renderer) { - struct queue_family_indices queue_family_indices = {}; - queue_family_indices_for_device(&queue_family_indices, - renderer->physical_device, renderer->surface); - static constexpr int MAX_QUEUE_FAMILY_COUNT = 2; - - uint32_t unique_queue_families[MAX_QUEUE_FAMILY_COUNT] = {}; - int unique_queue_family_count = 0; - - if (!is_in_array(unique_queue_families, unique_queue_family_count, - queue_family_indices.graphics_family)) { - assert(unique_queue_family_count < MAX_QUEUE_FAMILY_COUNT); - unique_queue_families[unique_queue_family_count++] = - queue_family_indices.graphics_family; - } - if (!is_in_array(unique_queue_families, unique_queue_family_count, - queue_family_indices.present_family)) { - assert(unique_queue_family_count < MAX_QUEUE_FAMILY_COUNT); - unique_queue_families[unique_queue_family_count++] = - queue_family_indices.present_family; - } - - float queue_priority = 1.f; - VkDeviceQueueCreateInfo queue_create_infos[MAX_QUEUE_FAMILY_COUNT] = {}; - int queue_create_info_count = 0; - for (int unique_queue_family_index = 0; - unique_queue_family_index < unique_queue_family_count; - unique_queue_family_index++) { - queue_create_infos[queue_create_info_count++] = (VkDeviceQueueCreateInfo){ - .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, - .queueFamilyIndex = unique_queue_families[unique_queue_family_index], - .queueCount = 1, - .pQueuePriorities = &queue_priority}; - } - - VkPhysicalDeviceFeatures device_features = {}; - VkDeviceCreateInfo create_info = { - .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, - .pQueueCreateInfos = queue_create_infos, - .queueCreateInfoCount = queue_create_info_count, - .pEnabledFeatures = &device_features, - .ppEnabledExtensionNames = DEVICE_EXTENSIONS, - .enabledExtensionCount = DEVICE_EXTENSION_COUNT}; - if (vkCreateDevice(renderer->physical_device, &create_info, nullptr, - &renderer->device) != VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to create logical device"); - goto err; - } - - vkGetDeviceQueue(renderer->device, queue_family_indices.graphics_family, 0, - &renderer->graphics_queue); - vkGetDeviceQueue(renderer->device, queue_family_indices.present_family, 0, - &renderer->present_queue); - - return true; -err: - return false; -} - -static bool vgltf_renderer_create_surface(struct vgltf_renderer *renderer, - struct vgltf_platform *platform) { - if (!vgltf_platform_create_vulkan_surface(platform, renderer->instance, - &renderer->surface)) { - VGLTF_LOG_ERR("Couldn't create surface"); - goto err; - } - - return true; -err: - return false; -} - -static VkSurfaceFormatKHR -choose_swapchain_surface_format(VkSurfaceFormatKHR *available_formats, - uint32_t available_format_count) { - for (uint32_t available_format_index = 0; - available_format_index < available_format_count; - available_format_index++) { - VkSurfaceFormatKHR *available_format = - &available_formats[available_format_index]; - if (available_format->format == VK_FORMAT_B8G8R8A8_SRGB && - available_format->colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) { - return *available_format; - } - } - - return available_formats[0]; -} - -static VkPresentModeKHR -choose_swapchain_present_mode(VkPresentModeKHR *available_modes, - uint32_t available_mode_count) { - for (uint32_t available_mode_index = 0; - available_mode_index < available_mode_count; available_mode_index++) { - VkPresentModeKHR available_mode = available_modes[available_mode_index]; - if (available_mode == VK_PRESENT_MODE_MAILBOX_KHR) { - return available_mode; - } - } - - return VK_PRESENT_MODE_FIFO_KHR; -} - -static uint32_t clamp_uint32(uint32_t min, uint32_t max, uint32_t value) { - return value < min ? min : value > max ? max : value; -} - -static VkExtent2D -choose_swapchain_extent(const VkSurfaceCapabilitiesKHR *capabilities, int width, - int height) { - if (capabilities->currentExtent.width != UINT32_MAX) { - return capabilities->currentExtent; - } else { - VkExtent2D actual_extent = {width, height}; - actual_extent.width = - clamp_uint32(capabilities->minImageExtent.width, - capabilities->maxImageExtent.width, actual_extent.width); - actual_extent.height = - clamp_uint32(capabilities->minImageExtent.height, - capabilities->maxImageExtent.height, actual_extent.height); - return actual_extent; - } -} - -static bool vgltf_renderer_create_swapchain(struct vgltf_renderer *renderer) { - struct swapchain_support_details swapchain_support_details = {}; - swapchain_support_details_query_from_device( - &swapchain_support_details, renderer->physical_device, renderer->surface); - - VkSurfaceFormatKHR surface_format = - choose_swapchain_surface_format(swapchain_support_details.formats, - swapchain_support_details.format_count); - VkPresentModeKHR present_mode = choose_swapchain_present_mode( - swapchain_support_details.present_modes, - swapchain_support_details.present_mode_count); - - VkExtent2D extent = choose_swapchain_extent( - &swapchain_support_details.capabilities, renderer->window_size.width, - renderer->window_size.height); - uint32_t image_count = - swapchain_support_details.capabilities.minImageCount + 1; - if (swapchain_support_details.capabilities.maxImageCount > 0 && - image_count > swapchain_support_details.capabilities.maxImageCount) { - image_count = swapchain_support_details.capabilities.maxImageCount; - } - - VkSwapchainCreateInfoKHR create_info = { - .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, - .surface = renderer->surface, - .minImageCount = image_count, - .imageFormat = surface_format.format, - .imageColorSpace = surface_format.colorSpace, - .imageExtent = extent, - .imageArrayLayers = 1, - .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT}; - struct queue_family_indices indices = {}; - queue_family_indices_for_device(&indices, renderer->physical_device, - renderer->surface); - uint32_t queue_family_indices[] = {indices.graphics_family, - indices.present_family}; - if (indices.graphics_family != indices.present_family) { - create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT; - create_info.queueFamilyIndexCount = 2; - create_info.pQueueFamilyIndices = queue_family_indices; - } else { - create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; - } - - create_info.preTransform = - swapchain_support_details.capabilities.currentTransform; - create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; - create_info.presentMode = present_mode; - create_info.clipped = VK_TRUE; - create_info.oldSwapchain = VK_NULL_HANDLE; - - if (vkCreateSwapchainKHR(renderer->device, &create_info, nullptr, - &renderer->swapchain) != VK_SUCCESS) { - VGLTF_LOG_ERR("Swapchain creation failed!"); - goto err; - } - - if (vkGetSwapchainImagesKHR(renderer->device, renderer->swapchain, - &renderer->swapchain_image_count, - nullptr) != VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't get swapchain image count"); - goto destroy_swapchain; - } - - if (renderer->swapchain_image_count > - VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT) { - VGLTF_LOG_ERR("Swapchain image array cannot fit all %d swapchain images", - renderer->swapchain_image_count); - goto destroy_swapchain; - } - - if (vkGetSwapchainImagesKHR(renderer->device, renderer->swapchain, - &renderer->swapchain_image_count, - renderer->swapchain_images) != VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't get swapchain images"); - goto destroy_swapchain; - } - - renderer->swapchain_image_format = surface_format.format; - renderer->swapchain_extent = extent; - - return true; -destroy_swapchain: - vkDestroySwapchainKHR(renderer->device, renderer->swapchain, nullptr); -err: - return false; -} - -static bool vgltf_renderer_create_image_views(struct vgltf_renderer *renderer) { - uint32_t swapchain_image_index; - for (swapchain_image_index = 0; - swapchain_image_index < renderer->swapchain_image_count; - swapchain_image_index++) { - VkImage swapchain_image = renderer->swapchain_images[swapchain_image_index]; - - VkImageViewCreateInfo create_info = { - .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, - .image = swapchain_image, - .viewType = VK_IMAGE_VIEW_TYPE_2D, - .format = renderer->swapchain_image_format, - .components = {VK_COMPONENT_SWIZZLE_IDENTITY, - VK_COMPONENT_SWIZZLE_IDENTITY, - VK_COMPONENT_SWIZZLE_IDENTITY, - VK_COMPONENT_SWIZZLE_IDENTITY}, - .subresourceRange = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .levelCount = 1, - .layerCount = 1}}; - - if (vkCreateImageView( - renderer->device, &create_info, nullptr, - &renderer->swapchain_image_views[swapchain_image_index]) != - VK_SUCCESS) { - goto err; - } - } - return true; -err: - for (uint32_t to_remove_index = 0; to_remove_index < swapchain_image_index; - to_remove_index++) { - vkDestroyImageView(renderer->device, - renderer->swapchain_image_views[to_remove_index], - nullptr); - } - return false; -} - -static bool create_shader_module(VkDevice device, const unsigned char *code, - int size, VkShaderModule *out) { - VkShaderModuleCreateInfo create_info = { - .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, - .codeSize = size, - .pCode = (const uint32_t *)code, - }; - if (vkCreateShaderModule(device, &create_info, nullptr, out) != VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't create shader module"); - goto err; - } - return true; -err: - return false; -} - -static bool vgltf_renderer_create_render_pass(struct vgltf_renderer *renderer) { - VkAttachmentDescription color_attachment = { - .format = renderer->swapchain_image_format, - .samples = VK_SAMPLE_COUNT_1_BIT, - .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, - .storeOp = VK_ATTACHMENT_STORE_OP_STORE, - .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, - .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, - .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, - .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR}; - VkAttachmentReference color_attachment_ref = { - .attachment = 0, - .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, - }; - VkSubpassDescription subpass = {.pipelineBindPoint = - VK_PIPELINE_BIND_POINT_GRAPHICS, - .pColorAttachments = &color_attachment_ref, - .colorAttachmentCount = 1}; - VkSubpassDependency dependency = { - .srcSubpass = VK_SUBPASS_EXTERNAL, - .dstSubpass = 0, - .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .srcAccessMask = 0, - .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT}; - - VkRenderPassCreateInfo render_pass_info = { - .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, - .attachmentCount = 1, - .pAttachments = &color_attachment, - .subpassCount = 1, - .pSubpasses = &subpass, - .dependencyCount = 1, - .pDependencies = &dependency}; - - if (vkCreateRenderPass(renderer->device, &render_pass_info, nullptr, - &renderer->render_pass) != VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to create render pass"); - goto err; - } - - return true; -err: - return false; -} - -static bool -vgltf_renderer_create_graphics_pipeline(struct vgltf_renderer *renderer) { - static constexpr unsigned char triangle_shader_vert_code[] = { -#embed "../compiled_shaders/triangle.vert.spv" - }; - static constexpr unsigned char triangle_shader_frag_code[] = { -#embed "../compiled_shaders/triangle.frag.spv" - }; - - VkShaderModule triangle_shader_vert_module; - if (!create_shader_module(renderer->device, triangle_shader_vert_code, - sizeof(triangle_shader_vert_code), - &triangle_shader_vert_module)) { - VGLTF_LOG_ERR("Couldn't create triangle vert shader module"); - goto err; - } - - VkShaderModule triangle_shader_frag_module; - if (!create_shader_module(renderer->device, triangle_shader_frag_code, - sizeof(triangle_shader_frag_code), - &triangle_shader_frag_module)) { - VGLTF_LOG_ERR("Couldn't create triangle frag shader module"); - goto destroy_vert_shader_module; - } - - VkPipelineShaderStageCreateInfo triangle_shader_vert_stage_create_info = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, - .stage = VK_SHADER_STAGE_VERTEX_BIT, - .module = triangle_shader_vert_module, - .pName = "main"}; - VkPipelineShaderStageCreateInfo triangle_shader_frag_stage_create_info = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, - .stage = VK_SHADER_STAGE_FRAGMENT_BIT, - .module = triangle_shader_frag_module, - .pName = "main"}; - VkPipelineShaderStageCreateInfo shader_stages[] = { - triangle_shader_vert_stage_create_info, - triangle_shader_frag_stage_create_info}; - - VkDynamicState dynamic_states[] = { - VK_DYNAMIC_STATE_VIEWPORT, - VK_DYNAMIC_STATE_SCISSOR, - }; - - VkPipelineDynamicStateCreateInfo dynamic_state = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, - .dynamicStateCount = sizeof(dynamic_states) / sizeof(dynamic_states[0]), - .pDynamicStates = dynamic_states}; - - VkPipelineVertexInputStateCreateInfo vertex_input_info = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, - .vertexBindingDescriptionCount = 0, - .vertexAttributeDescriptionCount = 0, - }; - - VkPipelineInputAssemblyStateCreateInfo input_assembly = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, - .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, - .primitiveRestartEnable = VK_FALSE, - }; - - VkPipelineViewportStateCreateInfo viewport_state = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, - .viewportCount = 1, - .scissorCount = 1}; - - VkPipelineRasterizationStateCreateInfo rasterizer = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, - .depthClampEnable = VK_FALSE, - .rasterizerDiscardEnable = VK_FALSE, - .polygonMode = VK_POLYGON_MODE_FILL, - .lineWidth = 1.f, - .cullMode = VK_CULL_MODE_BACK_BIT, - .frontFace = VK_FRONT_FACE_CLOCKWISE, - .depthBiasEnable = VK_FALSE}; - - VkPipelineMultisampleStateCreateInfo multisampling = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, - .sampleShadingEnable = VK_FALSE, - .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT, - }; - - VkPipelineColorBlendAttachmentState color_blend_attachment = { - .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | - VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT, - .blendEnable = VK_FALSE, - }; - - VkPipelineColorBlendStateCreateInfo color_blending = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, - .logicOpEnable = VK_FALSE, - .attachmentCount = 1, - .pAttachments = &color_blend_attachment}; - - VkPipelineLayoutCreateInfo pipeline_layout_info = { - .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, - }; - - if (vkCreatePipelineLayout(renderer->device, &pipeline_layout_info, nullptr, - &renderer->pipeline_layout) != VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't create pipeline layout"); - goto destroy_frag_shader_module; - } - - VkGraphicsPipelineCreateInfo pipeline_info = { - .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, - .stageCount = 2, - .pStages = shader_stages, - .pVertexInputState = &vertex_input_info, - .pInputAssemblyState = &input_assembly, - .pViewportState = &viewport_state, - .pRasterizationState = &rasterizer, - .pMultisampleState = &multisampling, - .pColorBlendState = &color_blending, - .pDynamicState = &dynamic_state, - .layout = renderer->pipeline_layout, - .renderPass = renderer->render_pass, - .subpass = 0, - }; - - if (vkCreateGraphicsPipelines(renderer->device, VK_NULL_HANDLE, 1, - &pipeline_info, nullptr, - &renderer->graphics_pipeline) != VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't create pipeline"); - goto destroy_pipeline_layout; - } - - vkDestroyShaderModule(renderer->device, triangle_shader_frag_module, nullptr); - vkDestroyShaderModule(renderer->device, triangle_shader_vert_module, nullptr); - return true; -destroy_pipeline_layout: - vkDestroyPipelineLayout(renderer->device, renderer->pipeline_layout, nullptr); -destroy_frag_shader_module: - vkDestroyShaderModule(renderer->device, triangle_shader_frag_module, nullptr); -destroy_vert_shader_module: - vkDestroyShaderModule(renderer->device, triangle_shader_vert_module, nullptr); -err: - return false; -} - -static bool -vgltf_renderer_create_framebuffers(struct vgltf_renderer *renderer) { - for (uint32_t i = 0; i < renderer->swapchain_image_count; i++) { - VkImageView attachments[] = {renderer->swapchain_image_views[i]}; - - VkFramebufferCreateInfo framebuffer_info = { - .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, - .renderPass = renderer->render_pass, - .attachmentCount = 1, - .pAttachments = attachments, - .width = renderer->swapchain_extent.width, - .height = renderer->swapchain_extent.height, - .layers = 1}; - - if (vkCreateFramebuffer(renderer->device, &framebuffer_info, nullptr, - &renderer->swapchain_framebuffers[i]) != - VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to create framebuffer"); - goto err; - } - } - - return true; -err: - return false; -} - -static bool -vgltf_renderer_create_command_pool(struct vgltf_renderer *renderer) { - struct queue_family_indices queue_family_indices = {}; - if (!queue_family_indices_for_device(&queue_family_indices, - renderer->physical_device, - renderer->surface)) { - VGLTF_LOG_ERR("Couldn't fetch queue family indices"); - goto err; - } - - VkCommandPoolCreateInfo pool_info = { - .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, - .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, - .queueFamilyIndex = queue_family_indices.graphics_family}; - - if (vkCreateCommandPool(renderer->device, &pool_info, nullptr, - &renderer->command_pool) != VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't create command pool"); - goto err; - } - - return true; -err: - return false; -} - -static bool -vgltf_renderer_create_command_buffer(struct vgltf_renderer *renderer) { - VkCommandBufferAllocateInfo allocate_info = { - .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, - .commandPool = renderer->command_pool, - .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, - .commandBufferCount = VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT}; - - if (vkAllocateCommandBuffers(renderer->device, &allocate_info, - renderer->command_buffer) != VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't allocate command buffers"); - goto err; - } - - return true; -err: - return false; -} - -static bool -vgltf_renderer_create_sync_objects(struct vgltf_renderer *renderer) { - VkSemaphoreCreateInfo semaphore_info = { - .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, - }; - - VkFenceCreateInfo fence_info = {.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, - .flags = VK_FENCE_CREATE_SIGNALED_BIT}; - - int frame_in_flight_index = 0; - for (; frame_in_flight_index < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; - frame_in_flight_index++) { - if (vkCreateSemaphore( - renderer->device, &semaphore_info, nullptr, - &renderer->image_available_semaphores[frame_in_flight_index]) != - VK_SUCCESS || - vkCreateSemaphore( - renderer->device, &semaphore_info, nullptr, - &renderer->render_finished_semaphores[frame_in_flight_index]) != - VK_SUCCESS || - vkCreateFence(renderer->device, &fence_info, nullptr, - &renderer->in_flight_fences[frame_in_flight_index]) != - VK_SUCCESS) { - VGLTF_LOG_ERR("Couldn't create sync objects"); - goto err; - } - } - - return true; -err: - for (int frame_in_flight_to_delete_index = 0; - frame_in_flight_to_delete_index < frame_in_flight_index; - frame_in_flight_to_delete_index++) { - vkDestroyFence(renderer->device, - renderer->in_flight_fences[frame_in_flight_index], nullptr); - vkDestroySemaphore( - renderer->device, - renderer->render_finished_semaphores[frame_in_flight_index], nullptr); - vkDestroySemaphore( - renderer->device, - renderer->image_available_semaphores[frame_in_flight_index], nullptr); - } - return false; -} - -static void vgltf_renderer_cleanup_swapchain(struct vgltf_renderer *renderer) { - for (uint32_t framebuffer_index = 0; - framebuffer_index < renderer->swapchain_image_count; - framebuffer_index++) { - vkDestroyFramebuffer(renderer->device, - renderer->swapchain_framebuffers[framebuffer_index], - nullptr); - } - - for (uint32_t image_view_index = 0; - image_view_index < renderer->swapchain_image_count; image_view_index++) { - vkDestroyImageView(renderer->device, - renderer->swapchain_image_views[image_view_index], - nullptr); - } - - vkDestroySwapchainKHR(renderer->device, renderer->swapchain, nullptr); -} - -static bool vgltf_renderer_recreate_swapchain(struct vgltf_renderer *renderer) { - vkDeviceWaitIdle(renderer->device); - vgltf_renderer_cleanup_swapchain(renderer); - - // TODO add error handling - vgltf_renderer_create_swapchain(renderer); - vgltf_renderer_create_image_views(renderer); - vgltf_renderer_create_framebuffers(renderer); - return true; -} - -bool vgltf_renderer_triangle_pass(struct vgltf_renderer *renderer) { - vkWaitForFences(renderer->device, 1, - &renderer->in_flight_fences[renderer->current_frame], VK_TRUE, - UINT64_MAX); - - uint32_t image_index; - VkResult acquire_swapchain_image_result = vkAcquireNextImageKHR( - renderer->device, renderer->swapchain, UINT64_MAX, - renderer->image_available_semaphores[renderer->current_frame], - VK_NULL_HANDLE, &image_index); - if (acquire_swapchain_image_result == VK_ERROR_OUT_OF_DATE_KHR || - acquire_swapchain_image_result == VK_SUBOPTIMAL_KHR || - renderer->framebuffer_resized) { - renderer->framebuffer_resized = false; - vgltf_renderer_recreate_swapchain(renderer); - return true; - } else if (acquire_swapchain_image_result != VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to acquire a swapchain image"); - goto err; - } - - vkResetFences(renderer->device, 1, - &renderer->in_flight_fences[renderer->current_frame]); - - vkResetCommandBuffer(renderer->command_buffer[renderer->current_frame], 0); - VkCommandBufferBeginInfo begin_info = { - .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, - }; - - if (vkBeginCommandBuffer(renderer->command_buffer[renderer->current_frame], - &begin_info) != VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to begin recording command buffer"); - goto err; - } - - VkRenderPassBeginInfo render_pass_info = { - .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, - .renderPass = renderer->render_pass, - .framebuffer = renderer->swapchain_framebuffers[image_index], - .renderArea = {.offset = {}, .extent = renderer->swapchain_extent}, - .clearValueCount = 1, - .pClearValues = - &(const VkClearValue){.color = {.float32 = {0.f, 0.f, 0.f, 1.f}}}, - - }; - - vkCmdBeginRenderPass(renderer->command_buffer[renderer->current_frame], - &render_pass_info, VK_SUBPASS_CONTENTS_INLINE); - vkCmdBindPipeline(renderer->command_buffer[renderer->current_frame], - VK_PIPELINE_BIND_POINT_GRAPHICS, - renderer->graphics_pipeline); - VkViewport viewport = {.x = 0.f, - .y = 0.f, - .width = (float)renderer->swapchain_extent.width, - .height = (float)renderer->swapchain_extent.height, - .minDepth = 0.f, - .maxDepth = 1.f}; - vkCmdSetViewport(renderer->command_buffer[renderer->current_frame], 0, 1, - &viewport); - VkRect2D scissor = {.offset = {}, .extent = renderer->swapchain_extent}; - vkCmdSetScissor(renderer->command_buffer[renderer->current_frame], 0, 1, - &scissor); - - vkCmdDraw(renderer->command_buffer[renderer->current_frame], 3, 1, 0, 0); - - vkCmdEndRenderPass(renderer->command_buffer[renderer->current_frame]); - - if (vkEndCommandBuffer(renderer->command_buffer[renderer->current_frame]) != - VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to record command buffer"); - goto err; - } - - VkSubmitInfo submit_info = { - .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, - }; - - VkSemaphore wait_semaphores[] = { - renderer->image_available_semaphores[renderer->current_frame]}; - VkPipelineStageFlags wait_stages[] = { - VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}; - submit_info.waitSemaphoreCount = 1; - submit_info.pWaitSemaphores = wait_semaphores; - submit_info.pWaitDstStageMask = wait_stages; - submit_info.commandBufferCount = 1; - submit_info.pCommandBuffers = - &renderer->command_buffer[renderer->current_frame]; - - VkSemaphore signal_semaphores[] = { - renderer->render_finished_semaphores[renderer->current_frame]}; - submit_info.signalSemaphoreCount = 1; - submit_info.pSignalSemaphores = signal_semaphores; - if (vkQueueSubmit(renderer->graphics_queue, 1, &submit_info, - renderer->in_flight_fences[renderer->current_frame]) != - VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to submit draw command buffer"); - goto err; - } - - VkPresentInfoKHR present_info = {.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, - .waitSemaphoreCount = 1, - .pWaitSemaphores = signal_semaphores}; - - VkSwapchainKHR swapchains[] = {renderer->swapchain}; - present_info.swapchainCount = 1; - present_info.pSwapchains = swapchains; - present_info.pImageIndices = &image_index; - VkResult result = vkQueuePresentKHR(renderer->present_queue, &present_info); - if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) { - vgltf_renderer_recreate_swapchain(renderer); - } else if (acquire_swapchain_image_result != VK_SUCCESS) { - VGLTF_LOG_ERR("Failed to acquire a swapchain image"); - goto err; - } - renderer->current_frame = - (renderer->current_frame + 1) % VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; - return true; -err: - return false; -} - -bool vgltf_renderer_init(struct vgltf_renderer *renderer, - struct vgltf_platform *platform) { - if (!vgltf_renderer_create_instance(renderer, platform)) { - VGLTF_LOG_ERR("instance creation failed"); - goto err; - } - vgltf_renderer_setup_debug_messenger(renderer); - if (!vgltf_renderer_create_surface(renderer, platform)) { - goto destroy_instance; - } - - if (!vgltf_renderer_pick_physical_device(renderer)) { - VGLTF_LOG_ERR("Couldn't pick physical device"); - goto destroy_surface; - } - if (!vgltf_renderer_create_logical_device(renderer)) { - VGLTF_LOG_ERR("Couldn't create logical device"); - goto destroy_device; - } - - struct vgltf_window_size window_size = {800, 600}; - if (!vgltf_platform_get_window_size(platform, &window_size)) { - VGLTF_LOG_ERR("Couldn't get window size"); - goto destroy_device; - } - renderer->window_size = window_size; - - if (!vgltf_renderer_create_swapchain(renderer)) { - VGLTF_LOG_ERR("Couldn't create swapchain"); - goto destroy_device; - } - - if (!vgltf_renderer_create_image_views(renderer)) { - VGLTF_LOG_ERR("Couldn't create image views"); - goto destroy_swapchain; - } - - if (!vgltf_renderer_create_render_pass(renderer)) { - VGLTF_LOG_ERR("Couldn't create render pass"); - goto destroy_image_views; - } - - if (!vgltf_renderer_create_graphics_pipeline(renderer)) { - VGLTF_LOG_ERR("Couldn't create graphics pipeline"); - goto destroy_render_pass; - } - - if (!vgltf_renderer_create_framebuffers(renderer)) { - VGLTF_LOG_ERR("Couldn't create framebuffers"); - goto destroy_graphics_pipeline; - } - - if (!vgltf_renderer_create_command_pool(renderer)) { - VGLTF_LOG_ERR("Couldn't create command pool"); - goto destroy_frame_buffers; - } - - if (!vgltf_renderer_create_command_buffer(renderer)) { - VGLTF_LOG_ERR("Couldn't create command buffer"); - goto destroy_command_pool; - } - - if (!vgltf_renderer_create_sync_objects(renderer)) { - VGLTF_LOG_ERR("Couldn't create sync objects"); - goto destroy_command_pool; - } - - return true; - -destroy_command_pool: - vkDestroyCommandPool(renderer->device, renderer->command_pool, nullptr); -destroy_frame_buffers: - for (uint32_t swapchain_framebuffer_index = 0; - swapchain_framebuffer_index < renderer->swapchain_image_count; - swapchain_framebuffer_index++) { - vkDestroyFramebuffer( - renderer->device, - renderer->swapchain_framebuffers[swapchain_framebuffer_index], nullptr); - } -destroy_graphics_pipeline: - vkDestroyPipeline(renderer->device, renderer->graphics_pipeline, nullptr); - vkDestroyPipelineLayout(renderer->device, renderer->pipeline_layout, nullptr); -destroy_render_pass: - vkDestroyRenderPass(renderer->device, renderer->render_pass, nullptr); -destroy_image_views: - for (uint32_t swapchain_image_view_index = 0; - swapchain_image_view_index < renderer->swapchain_image_count; - swapchain_image_view_index++) { - vkDestroyImageView( - renderer->device, - renderer->swapchain_image_views[swapchain_image_view_index], nullptr); - } -destroy_swapchain: - vkDestroySwapchainKHR(renderer->device, renderer->swapchain, nullptr); -destroy_device: - vkDestroyDevice(renderer->device, nullptr); -destroy_surface: - vkDestroySurfaceKHR(renderer->instance, renderer->surface, nullptr); -destroy_instance: - if (enable_validation_layers) { - destroy_debug_utils_messenger_ext(renderer->instance, - renderer->debug_messenger, nullptr); - } - vkDestroyInstance(renderer->instance, nullptr); -err: - return false; -} -void vgltf_renderer_deinit(struct vgltf_renderer *renderer) { - vkDeviceWaitIdle(renderer->device); - vgltf_renderer_cleanup_swapchain(renderer); - vkDestroyPipeline(renderer->device, renderer->graphics_pipeline, nullptr); - vkDestroyPipelineLayout(renderer->device, renderer->pipeline_layout, nullptr); - vkDestroyRenderPass(renderer->device, renderer->render_pass, nullptr); - for (int i = 0; i < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; i++) { - vkDestroySemaphore(renderer->device, - renderer->image_available_semaphores[i], nullptr); - vkDestroySemaphore(renderer->device, - renderer->render_finished_semaphores[i], nullptr); - vkDestroyFence(renderer->device, renderer->in_flight_fences[i], nullptr); - } - vkDestroyCommandPool(renderer->device, renderer->command_pool, nullptr); - vkDestroyDevice(renderer->device, nullptr); - if (enable_validation_layers) { - destroy_debug_utils_messenger_ext(renderer->instance, - renderer->debug_messenger, nullptr); - } - vkDestroySurfaceKHR(renderer->instance, renderer->surface, nullptr); - vkDestroyInstance(renderer->instance, nullptr); -} -void vgltf_renderer_on_window_resized(struct vgltf_renderer *renderer, - struct vgltf_window_size size) { - if (size.width > 0 && size.height > 0 && - size.width != renderer->window_size.width && - size.height != renderer->window_size.height) { - renderer->window_size = size; - renderer->framebuffer_resized = true; - } -} diff --git a/src/renderer.h b/src/renderer.h deleted file mode 100644 index a0417aa..0000000 --- a/src/renderer.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef VGLTF_RENDERER_H -#define VGLTF_RENDERER_H - -#include "platform.h" -#include <vulkan/vulkan.h> - -constexpr int VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT = 2; -constexpr int VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT = 32; -struct vgltf_renderer { - VkInstance instance; - VkPhysicalDevice physical_device; - VkDevice device; - VkQueue graphics_queue; - VkQueue present_queue; - VkDebugUtilsMessengerEXT debug_messenger; - VkSurfaceKHR surface; - VkSwapchainKHR swapchain; - VkImage swapchain_images[VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT]; - VkImageView swapchain_image_views[VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT]; - VkFormat swapchain_image_format; - VkExtent2D swapchain_extent; - uint32_t swapchain_image_count; - VkRenderPass render_pass; - VkPipelineLayout pipeline_layout; - VkPipeline graphics_pipeline; - VkFramebuffer - swapchain_framebuffers[VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT]; - VkCommandPool command_pool; - VkCommandBuffer command_buffer[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; - VkSemaphore - image_available_semaphores[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; - VkSemaphore - render_finished_semaphores[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; - VkFence in_flight_fences[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; - struct vgltf_window_size window_size; - uint32_t current_frame; - bool framebuffer_resized; -}; -bool vgltf_renderer_init(struct vgltf_renderer *renderer, - struct vgltf_platform *platform); -void vgltf_renderer_deinit(struct vgltf_renderer *renderer); -bool vgltf_renderer_triangle_pass(struct vgltf_renderer *renderer); -void vgltf_renderer_on_window_resized(struct vgltf_renderer *renderer, - struct vgltf_window_size size); -#endif // VGLTF_RENDERER_H diff --git a/src/renderer/renderer.c b/src/renderer/renderer.c new file mode 100644 index 0000000..d34ef73 --- /dev/null +++ b/src/renderer/renderer.c @@ -0,0 +1,2559 @@ +#include "renderer.h" +#include "../image.h" +#include "../log.h" +#include "../maths.h" +#include "../platform.h" +#include "vma_usage.h" +#include <math.h> + +#define TINYOBJ_LOADER_C_IMPLEMENTATION +#include "vendor/tiny_obj_loader_c.h" + +#include <assert.h> +#include <vulkan/vulkan_core.h> + +static const char MODEL_PATH[] = "assets/model.obj"; +static const char TEXTURE_PATH[] = "assets/texture.png"; + +VkVertexInputBindingDescription vgltf_vertex_binding_description() { + return (VkVertexInputBindingDescription){ + .binding = 0, + .stride = sizeof(struct vgltf_vertex), + .inputRate = VK_VERTEX_INPUT_RATE_VERTEX}; +} +struct vgltf_vertex_input_attribute_descriptions +vgltf_vertex_attribute_descriptions(void) { + return (struct vgltf_vertex_input_attribute_descriptions){ + .descriptions = {(VkVertexInputAttributeDescription){ + .binding = 0, + .location = 0, + .format = VK_FORMAT_R32G32B32_SFLOAT, + .offset = offsetof(struct vgltf_vertex, position)}, + (VkVertexInputAttributeDescription){ + .binding = 0, + .location = 1, + .format = VK_FORMAT_R32G32B32_SFLOAT, + .offset = offsetof(struct vgltf_vertex, color)}, + (VkVertexInputAttributeDescription){ + .binding = 0, + .location = 2, + .format = VK_FORMAT_R32G32_SFLOAT, + .offset = offsetof(struct vgltf_vertex, + texture_coordinates)}}, + .count = 3}; +} + +static const char *VALIDATION_LAYERS[] = {"VK_LAYER_KHRONOS_validation"}; +static constexpr int VALIDATION_LAYER_COUNT = + sizeof(VALIDATION_LAYERS) / sizeof(VALIDATION_LAYERS[0]); + +#ifdef VGLTF_DEBUG +static constexpr bool enable_validation_layers = true; +#else +static constexpr bool enable_validation_layers = false; +#endif + +static VKAPI_ATTR VkBool32 VKAPI_CALL +debug_callback(VkDebugUtilsMessageSeverityFlagBitsEXT message_severity, + VkDebugUtilsMessageTypeFlagBitsEXT message_type, + const VkDebugUtilsMessengerCallbackDataEXT *callback_data, + void *user_data) { + (void)message_severity; + (void)message_type; + (void)user_data; + VGLTF_LOG_DBG("validation layer: %s", callback_data->pMessage); + return VK_FALSE; +} + +static constexpr int REQUIRED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY = 10; +struct required_instance_extensions { + const char *extensions[REQUIRED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY]; + uint32_t count; +}; +void required_instance_extensions_push( + struct required_instance_extensions *required_instance_extensions, + const char *required_instance_extension) { + if (required_instance_extensions->count == + REQUIRED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY) { + VGLTF_PANIC("required instance extensions array is full"); + } + required_instance_extensions + ->extensions[required_instance_extensions->count++] = + required_instance_extension; +} + +static constexpr int SUPPORTED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY = 128; +struct supported_instance_extensions { + VkExtensionProperties + properties[SUPPORTED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY]; + uint32_t count; +}; +bool supported_instance_extensions_init( + struct supported_instance_extensions *supported_instance_extensions) { + if (vkEnumerateInstanceExtensionProperties( + nullptr, &supported_instance_extensions->count, nullptr) != + VK_SUCCESS) { + goto err; + } + + if (supported_instance_extensions->count > + SUPPORTED_INSTANCE_EXTENSIONS_ARRAY_CAPACITY) { + VGLTF_LOG_ERR("supported instance extensions array cannot fit all the " + "VkExtensionProperties"); + goto err; + } + + if (vkEnumerateInstanceExtensionProperties( + nullptr, &supported_instance_extensions->count, + supported_instance_extensions->properties) != VK_SUCCESS) { + goto err; + } + return true; +err: + return false; +} +void supported_instance_extensions_debug_print( + const struct supported_instance_extensions *supported_instance_extensions) { + VGLTF_LOG_DBG("Supported instance extensions:"); + for (uint32_t i = 0; i < supported_instance_extensions->count; i++) { + VGLTF_LOG_DBG("\t- %s", + supported_instance_extensions->properties[i].extensionName); + } +} +bool supported_instance_extensions_includes( + const struct supported_instance_extensions *supported_instance_extensions, + const char *extension_name) { + for (uint32_t supported_instance_extension_index = 0; + supported_instance_extension_index < + supported_instance_extensions->count; + supported_instance_extension_index++) { + const VkExtensionProperties *extension_properties = + &supported_instance_extensions + ->properties[supported_instance_extension_index]; + if (strcmp(extension_properties->extensionName, extension_name) == 0) { + return true; + } + } + + return false; +} + +static constexpr uint32_t SUPPORTED_VALIDATION_LAYERS_ARRAY_CAPACITY = 64; +struct supported_validation_layers { + VkLayerProperties properties[SUPPORTED_VALIDATION_LAYERS_ARRAY_CAPACITY]; + uint32_t count; +}; +bool supported_validation_layers_init( + struct supported_validation_layers *supported_validation_layers) { + if (vkEnumerateInstanceLayerProperties(&supported_validation_layers->count, + nullptr) != VK_SUCCESS) { + goto err; + } + + if (supported_validation_layers->count > + SUPPORTED_VALIDATION_LAYERS_ARRAY_CAPACITY) { + VGLTF_LOG_ERR("supported validation layers array cannot fit all the " + "VkLayerProperties"); + goto err; + } + + if (vkEnumerateInstanceLayerProperties( + &supported_validation_layers->count, + supported_validation_layers->properties) != VK_SUCCESS) { + goto err; + } + + return true; +err: + return false; +} + +static bool are_validation_layer_supported() { + struct supported_validation_layers supported_layers = {}; + if (!supported_validation_layers_init(&supported_layers)) { + goto err; + } + + for (int requested_layer_index = 0; + requested_layer_index < VALIDATION_LAYER_COUNT; + requested_layer_index++) { + const char *requested_layer_name = VALIDATION_LAYERS[requested_layer_index]; + bool requested_layer_found = false; + for (uint32_t supported_layer_index = 0; + supported_layer_index < supported_layers.count; + supported_layer_index++) { + VkLayerProperties *supported_layer = + &supported_layers.properties[supported_layer_index]; + if (strcmp(requested_layer_name, supported_layer->layerName) == 0) { + requested_layer_found = true; + break; + } + } + + if (!requested_layer_found) { + goto err; + } + } + + return true; +err: + return false; +} + +static bool fetch_required_instance_extensions( + struct required_instance_extensions *required_extensions, + struct vgltf_platform *platform) { + struct supported_instance_extensions supported_extensions = {}; + if (!supported_instance_extensions_init(&supported_extensions)) { + VGLTF_LOG_ERR( + "Couldn't fetch supported instance extensions details (OOM?)"); + goto err; + } + supported_instance_extensions_debug_print(&supported_extensions); + + uint32_t platform_required_extension_count = 0; + const char *const *platform_required_extensions = + vgltf_platform_get_vulkan_instance_extensions( + platform, &platform_required_extension_count); + for (uint32_t platform_required_extension_index = 0; + platform_required_extension_index < platform_required_extension_count; + platform_required_extension_index++) { + required_instance_extensions_push( + required_extensions, + platform_required_extensions[platform_required_extension_index]); + } +#ifdef VGLTF_PLATFORM_MACOS + required_instance_extensions_push( + required_extensions, VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME); +#endif // VGLTF_PLATFORM_MACOS + + if (enable_validation_layers) { + required_instance_extensions_push(required_extensions, + VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + } + + bool all_extensions_supported = true; + for (uint32_t required_extension_index = 0; + required_extension_index < required_extensions->count; + required_extension_index++) { + const char *required_extension_name = + required_extensions->extensions[required_extension_index]; + if (!supported_instance_extensions_includes(&supported_extensions, + required_extension_name)) { + VGLTF_LOG_ERR("Unsupported instance extension: %s", + required_extension_name); + all_extensions_supported = false; + } + } + + if (!all_extensions_supported) { + VGLTF_LOG_ERR("Some required extensions are unsupported."); + goto err; + } + + return true; +err: + return false; +} + +static void populate_debug_messenger_create_info( + VkDebugUtilsMessengerCreateInfoEXT *create_info) { + *create_info = (VkDebugUtilsMessengerCreateInfoEXT){}; + create_info->sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; + create_info->messageSeverity = + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + create_info->messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + create_info->pfnUserCallback = debug_callback; +} + +static bool vgltf_vk_instance_init(struct vgltf_vk_instance *instance, + struct vgltf_platform *platform) { + VGLTF_LOG_INFO("Creating vulkan instance..."); + if (enable_validation_layers && !are_validation_layer_supported()) { + VGLTF_LOG_ERR("Requested validation layers aren't supported"); + goto err; + } + + VkApplicationInfo application_info = { + .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, + .pApplicationName = "Visible GLTF", + .applicationVersion = VK_MAKE_VERSION(0, 1, 0), + .pEngineName = "No Engine", + .engineVersion = VK_MAKE_VERSION(1, 0, 0), + .apiVersion = VK_API_VERSION_1_2}; + + struct required_instance_extensions required_extensions = {}; + fetch_required_instance_extensions(&required_extensions, platform); + + VkInstanceCreateFlags flags = 0; +#ifdef VGLTF_PLATFORM_MACOS + flags |= VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR; +#endif // VGLTF_PLATFORM_MACOS + + VkInstanceCreateInfo create_info = { + .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + .pApplicationInfo = &application_info, + .enabledExtensionCount = required_extensions.count, + .ppEnabledExtensionNames = required_extensions.extensions, + .flags = flags}; + + VkDebugUtilsMessengerCreateInfoEXT debug_create_info; + if (enable_validation_layers) { + create_info.enabledLayerCount = VALIDATION_LAYER_COUNT; + create_info.ppEnabledLayerNames = VALIDATION_LAYERS; + populate_debug_messenger_create_info(&debug_create_info); + create_info.pNext = &debug_create_info; + } + + if (vkCreateInstance(&create_info, nullptr, &instance->instance) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to create VkInstance"); + goto err; + } + + return true; +err: + return false; +} +static void vgltf_vk_instance_deinit(struct vgltf_vk_instance *instance) { + vkDestroyInstance(instance->instance, nullptr); +} + +static VkResult create_debug_utils_messenger_ext( + VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT *create_info, + const VkAllocationCallbacks *allocator, + VkDebugUtilsMessengerEXT *debug_messenger) { + auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr( + instance, "vkCreateDebugUtilsMessengerEXT"); + if (func != nullptr) { + return func(instance, create_info, allocator, debug_messenger); + } + + return VK_ERROR_EXTENSION_NOT_PRESENT; +} + +static void +destroy_debug_utils_messenger_ext(VkInstance instance, + VkDebugUtilsMessengerEXT debug_messenger, + const VkAllocationCallbacks *allocator) { + auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr( + instance, "vkDestroyDebugUtilsMessengerEXT"); + if (func != nullptr) { + func(instance, debug_messenger, allocator); + } +} + +static void +vgltf_renderer_setup_debug_messenger(struct vgltf_renderer *renderer) { + if (!enable_validation_layers) + return; + VkDebugUtilsMessengerCreateInfoEXT create_info; + populate_debug_messenger_create_info(&create_info); + create_debug_utils_messenger_ext(renderer->instance.instance, &create_info, + nullptr, &renderer->debug_messenger); +} + +static constexpr int AVAILABLE_PHYSICAL_DEVICE_ARRAY_CAPACITY = 128; +struct available_physical_devices { + VkPhysicalDevice devices[AVAILABLE_PHYSICAL_DEVICE_ARRAY_CAPACITY]; + uint32_t count; +}; +static bool +available_physical_devices_init(VkInstance instance, + struct available_physical_devices *devices) { + + if (vkEnumeratePhysicalDevices(instance, &devices->count, nullptr) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't enumerate physical devices"); + goto err; + } + + if (devices->count == 0) { + VGLTF_LOG_ERR("Failed to find any GPU with Vulkan support"); + goto err; + } + + if (devices->count > AVAILABLE_PHYSICAL_DEVICE_ARRAY_CAPACITY) { + VGLTF_LOG_ERR("available physical devices array cannot fit all available " + "physical devices"); + goto err; + } + + if (vkEnumeratePhysicalDevices(instance, &devices->count, devices->devices) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't enumerate physical devices"); + goto err; + } + + return true; +err: + return false; +} + +struct queue_family_indices { + uint32_t graphics_family; + uint32_t present_family; + bool has_graphics_family; + bool has_present_family; +}; +bool queue_family_indices_is_complete( + const struct queue_family_indices *indices) { + return indices->has_graphics_family && indices->has_present_family; +} +bool queue_family_indices_for_device(struct queue_family_indices *indices, + VkPhysicalDevice device, + VkSurfaceKHR surface) { + static constexpr uint32_t QUEUE_FAMILY_PROPERTIES_ARRAY_CAPACITY = 64; + uint32_t queue_family_count = 0; + vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, + nullptr); + + if (queue_family_count > QUEUE_FAMILY_PROPERTIES_ARRAY_CAPACITY) { + VGLTF_LOG_ERR( + "Queue family properties array cannot fit all queue family properties"); + goto err; + } + + VkQueueFamilyProperties + queue_family_properties[QUEUE_FAMILY_PROPERTIES_ARRAY_CAPACITY] = {}; + vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, + queue_family_properties); + + for (uint32_t queue_family_index = 0; queue_family_index < queue_family_count; + queue_family_index++) { + VkQueueFamilyProperties *queue_family = + &queue_family_properties[queue_family_index]; + + VkBool32 present_support; + vkGetPhysicalDeviceSurfaceSupportKHR(device, queue_family_index, surface, + &present_support); + + if (queue_family->queueFlags & VK_QUEUE_GRAPHICS_BIT) { + indices->graphics_family = queue_family_index; + indices->has_graphics_family = true; + } + + if (present_support) { + indices->present_family = queue_family_index; + indices->has_present_family = true; + } + + if (queue_family_indices_is_complete(indices)) { + break; + } + } + + return true; +err: + return false; +} + +static bool is_in_array(uint32_t *array, int length, uint32_t value) { + for (int i = 0; i < length; i++) { + if (array[i] == value) { + return true; + } + } + + return false; +} + +static constexpr uint32_t SUPPORTED_EXTENSIONS_ARRAY_CAPACITY = 1024; +struct supported_extensions { + VkExtensionProperties properties[SUPPORTED_EXTENSIONS_ARRAY_CAPACITY]; + uint32_t count; +}; +bool supported_extensions_init( + struct supported_extensions *supported_extensions, + VkPhysicalDevice device) { + if (vkEnumerateDeviceExtensionProperties(device, nullptr, + &supported_extensions->count, + nullptr) != VK_SUCCESS) { + goto err; + } + + if (supported_extensions->count > SUPPORTED_EXTENSIONS_ARRAY_CAPACITY) { + VGLTF_LOG_ERR("supported extensions array cannot fit all the supported " + "VkExtensionProperties (%u)", + supported_extensions->count); + goto err; + } + + if (vkEnumerateDeviceExtensionProperties( + device, nullptr, &supported_extensions->count, + supported_extensions->properties) != VK_SUCCESS) { + goto err; + } + + return true; +err: + return false; +} + +static bool supported_extensions_includes_extension( + struct supported_extensions *supported_extensions, + const char *extension_name) { + for (uint32_t supported_extension_index = 0; + supported_extension_index < supported_extensions->count; + supported_extension_index++) { + if (strcmp(supported_extensions->properties[supported_extension_index] + .extensionName, + extension_name) == 0) { + return true; + } + } + return false; +} + +static const char *DEVICE_EXTENSIONS[] = { + VK_KHR_SWAPCHAIN_EXTENSION_NAME, +#ifdef VGLTF_PLATFORM_MACOS + "VK_KHR_portability_subset", +#endif +}; +static constexpr int DEVICE_EXTENSION_COUNT = + sizeof(DEVICE_EXTENSIONS) / sizeof(DEVICE_EXTENSIONS[0]); +static bool are_device_extensions_supported(VkPhysicalDevice device) { + struct supported_extensions supported_extensions = {}; + if (!supported_extensions_init(&supported_extensions, device)) { + goto err; + } + + for (uint32_t required_extension_index = 0; + required_extension_index < DEVICE_EXTENSION_COUNT; + required_extension_index++) { + if (!supported_extensions_includes_extension( + &supported_extensions, + DEVICE_EXTENSIONS[required_extension_index])) { + VGLTF_LOG_DBG("Unsupported: %s", + DEVICE_EXTENSIONS[required_extension_index]); + goto err; + } + } + + return true; + +err: + return false; +} + +static constexpr int SWAPCHAIN_SUPPORT_DETAILS_MAX_SURFACE_FORMAT_COUNT = 256; +static constexpr int SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT = 256; +struct swapchain_support_details { + VkSurfaceCapabilitiesKHR capabilities; + VkSurfaceFormatKHR + formats[SWAPCHAIN_SUPPORT_DETAILS_MAX_SURFACE_FORMAT_COUNT]; + VkPresentModeKHR + present_modes[SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT]; + uint32_t format_count; + uint32_t present_mode_count; +}; +bool swapchain_support_details_query_from_device( + struct swapchain_support_details *swapchain_support_details, + VkPhysicalDevice device, VkSurfaceKHR surface) { + if (vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + device, surface, &swapchain_support_details->capabilities) != + VK_SUCCESS) { + goto err; + } + + if (vkGetPhysicalDeviceSurfaceFormatsKHR( + device, surface, &swapchain_support_details->format_count, nullptr) != + VK_SUCCESS) { + goto err; + } + + if (swapchain_support_details->format_count != 0 && + swapchain_support_details->format_count < + SWAPCHAIN_SUPPORT_DETAILS_MAX_SURFACE_FORMAT_COUNT) { + if (vkGetPhysicalDeviceSurfaceFormatsKHR( + device, surface, &swapchain_support_details->format_count, + swapchain_support_details->formats) != VK_SUCCESS) { + goto err; + } + } + + if (vkGetPhysicalDeviceSurfacePresentModesKHR( + device, surface, &swapchain_support_details->present_mode_count, + nullptr) != VK_SUCCESS) { + goto err; + } + + if (swapchain_support_details->present_mode_count != 0 && + swapchain_support_details->present_mode_count < + SWAPCHAIN_SUPPORT_DETAILS_MAX_PRESENT_MODE_COUNT) { + if (vkGetPhysicalDeviceSurfacePresentModesKHR( + device, surface, &swapchain_support_details->present_mode_count, + swapchain_support_details->present_modes) != VK_SUCCESS) { + goto err; + } + } + + return true; +err: + return false; +} + +static bool is_physical_device_suitable(VkPhysicalDevice device, + VkSurfaceKHR surface) { + struct queue_family_indices indices = {}; + queue_family_indices_for_device(&indices, device, surface); + + VGLTF_LOG_DBG("Checking for physical device extension support"); + bool extensions_supported = are_device_extensions_supported(device); + VGLTF_LOG_DBG("Supported: %d", extensions_supported); + + bool swapchain_adequate = false; + if (extensions_supported) { + + VGLTF_LOG_DBG("Checking for swapchain support details"); + struct swapchain_support_details swapchain_support_details = {}; + if (!swapchain_support_details_query_from_device(&swapchain_support_details, + device, surface)) { + VGLTF_LOG_ERR("Couldn't query swapchain support details from device"); + goto err; + } + + swapchain_adequate = swapchain_support_details.format_count > 0 && + swapchain_support_details.present_mode_count > 0; + } + + VkPhysicalDeviceFeatures supported_features; + vkGetPhysicalDeviceFeatures(device, &supported_features); + + return queue_family_indices_is_complete(&indices) && extensions_supported && + swapchain_adequate && supported_features.samplerAnisotropy; +err: + return false; +} + +static bool pick_physical_device(VkPhysicalDevice *physical_device, + struct vgltf_vk_instance *instance, + VkSurfaceKHR surface) { + VkPhysicalDevice vk_physical_device = VK_NULL_HANDLE; + struct available_physical_devices available_physical_devices = {}; + if (!available_physical_devices_init(instance->instance, + &available_physical_devices)) { + VGLTF_LOG_ERR("Couldn't fetch available physical devices"); + goto err; + } + + for (uint32_t available_physical_device_index = 0; + available_physical_device_index < available_physical_devices.count; + available_physical_device_index++) { + VkPhysicalDevice available_physical_device = + available_physical_devices.devices[available_physical_device_index]; + if (is_physical_device_suitable(available_physical_device, surface)) { + vk_physical_device = available_physical_device; + break; + } + } + + if (vk_physical_device == VK_NULL_HANDLE) { + VGLTF_LOG_ERR("Failed to find a suitable GPU"); + goto err; + } + + *physical_device = vk_physical_device; + + return true; +err: + return false; +} + +static bool create_logical_device(VkDevice *device, VkQueue *graphics_queue, + VkQueue *present_queue, + VkPhysicalDevice physical_device, + VkSurfaceKHR surface) { + struct queue_family_indices queue_family_indices = {}; + queue_family_indices_for_device(&queue_family_indices, physical_device, + surface); + static constexpr int MAX_QUEUE_FAMILY_COUNT = 2; + + uint32_t unique_queue_families[MAX_QUEUE_FAMILY_COUNT] = {}; + int unique_queue_family_count = 0; + + if (!is_in_array(unique_queue_families, unique_queue_family_count, + queue_family_indices.graphics_family)) { + assert(unique_queue_family_count < MAX_QUEUE_FAMILY_COUNT); + unique_queue_families[unique_queue_family_count++] = + queue_family_indices.graphics_family; + } + if (!is_in_array(unique_queue_families, unique_queue_family_count, + queue_family_indices.present_family)) { + assert(unique_queue_family_count < MAX_QUEUE_FAMILY_COUNT); + unique_queue_families[unique_queue_family_count++] = + queue_family_indices.present_family; + } + + float queue_priority = 1.f; + VkDeviceQueueCreateInfo queue_create_infos[MAX_QUEUE_FAMILY_COUNT] = {}; + int queue_create_info_count = 0; + for (int unique_queue_family_index = 0; + unique_queue_family_index < unique_queue_family_count; + unique_queue_family_index++) { + queue_create_infos[queue_create_info_count++] = (VkDeviceQueueCreateInfo){ + .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + .queueFamilyIndex = unique_queue_families[unique_queue_family_index], + .queueCount = 1, + .pQueuePriorities = &queue_priority}; + } + + VkPhysicalDeviceFeatures device_features = { + .samplerAnisotropy = VK_TRUE, + }; + VkDeviceCreateInfo create_info = { + .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + .pQueueCreateInfos = queue_create_infos, + .queueCreateInfoCount = queue_create_info_count, + .pEnabledFeatures = &device_features, + .ppEnabledExtensionNames = DEVICE_EXTENSIONS, + .enabledExtensionCount = DEVICE_EXTENSION_COUNT}; + if (vkCreateDevice(physical_device, &create_info, nullptr, device) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to create logical device"); + goto err; + } + + vkGetDeviceQueue(*device, queue_family_indices.graphics_family, 0, + graphics_queue); + vkGetDeviceQueue(*device, queue_family_indices.present_family, 0, + present_queue); + + return true; +err: + return false; +} + +static bool create_allocator(VmaAllocator *allocator, + struct vgltf_vk_device *device, + struct vgltf_vk_instance *instance) { + VmaAllocatorCreateInfo create_info = {.device = device->device, + .instance = instance->instance, + .physicalDevice = + device->physical_device}; + + if (vmaCreateAllocator(&create_info, allocator) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't create VMA allocator"); + goto err; + } + return true; +err: + return false; +} + +static bool vgltf_vk_surface_init(struct vgltf_vk_surface *surface, + struct vgltf_vk_instance *instance, + struct vgltf_platform *platform) { + if (!vgltf_platform_create_vulkan_surface(platform, instance->instance, + &surface->surface)) { + VGLTF_LOG_ERR("Couldn't create surface"); + goto err; + } + + return true; +err: + return false; +} + +static void vgltf_vk_surface_deinit(struct vgltf_vk_surface *surface, + struct vgltf_vk_instance *instance) { + vkDestroySurfaceKHR(instance->instance, surface->surface, nullptr); +} + +static VkSurfaceFormatKHR +choose_swapchain_surface_format(VkSurfaceFormatKHR *available_formats, + uint32_t available_format_count) { + for (uint32_t available_format_index = 0; + available_format_index < available_format_count; + available_format_index++) { + VkSurfaceFormatKHR *available_format = + &available_formats[available_format_index]; + if (available_format->format == VK_FORMAT_B8G8R8A8_SRGB && + available_format->colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) { + return *available_format; + } + } + + return available_formats[0]; +} + +static VkPresentModeKHR +choose_swapchain_present_mode(VkPresentModeKHR *available_modes, + uint32_t available_mode_count) { + for (uint32_t available_mode_index = 0; + available_mode_index < available_mode_count; available_mode_index++) { + VkPresentModeKHR available_mode = available_modes[available_mode_index]; + if (available_mode == VK_PRESENT_MODE_MAILBOX_KHR) { + return available_mode; + } + } + + return VK_PRESENT_MODE_FIFO_KHR; +} + +static uint32_t clamp_uint32(uint32_t min, uint32_t max, uint32_t value) { + return value < min ? min : value > max ? max : value; +} + +static VkExtent2D +choose_swapchain_extent(const VkSurfaceCapabilitiesKHR *capabilities, int width, + int height) { + if (capabilities->currentExtent.width != UINT32_MAX) { + return capabilities->currentExtent; + } else { + VkExtent2D actual_extent = {width, height}; + actual_extent.width = + clamp_uint32(capabilities->minImageExtent.width, + capabilities->maxImageExtent.width, actual_extent.width); + actual_extent.height = + clamp_uint32(capabilities->minImageExtent.height, + capabilities->maxImageExtent.height, actual_extent.height); + return actual_extent; + } +} + +static bool create_swapchain(struct vgltf_vk_swapchain *swapchain, + struct vgltf_vk_device *device, + struct vgltf_vk_surface *surface, + struct vgltf_window_size *window_size) { + struct swapchain_support_details swapchain_support_details = {}; + swapchain_support_details_query_from_device( + &swapchain_support_details, device->physical_device, surface->surface); + + VkSurfaceFormatKHR surface_format = + choose_swapchain_surface_format(swapchain_support_details.formats, + swapchain_support_details.format_count); + VkPresentModeKHR present_mode = choose_swapchain_present_mode( + swapchain_support_details.present_modes, + swapchain_support_details.present_mode_count); + + VkExtent2D extent = + choose_swapchain_extent(&swapchain_support_details.capabilities, + window_size->width, window_size->height); + uint32_t image_count = + swapchain_support_details.capabilities.minImageCount + 1; + if (swapchain_support_details.capabilities.maxImageCount > 0 && + image_count > swapchain_support_details.capabilities.maxImageCount) { + image_count = swapchain_support_details.capabilities.maxImageCount; + } + + VkSwapchainCreateInfoKHR create_info = { + .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + .surface = surface->surface, + .minImageCount = image_count, + .imageFormat = surface_format.format, + .imageColorSpace = surface_format.colorSpace, + .imageExtent = extent, + .imageArrayLayers = 1, + .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT}; + struct queue_family_indices indices = {}; + queue_family_indices_for_device(&indices, device->physical_device, + surface->surface); + uint32_t queue_family_indices[] = {indices.graphics_family, + indices.present_family}; + if (indices.graphics_family != indices.present_family) { + create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT; + create_info.queueFamilyIndexCount = 2; + create_info.pQueueFamilyIndices = queue_family_indices; + } else { + create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; + } + + create_info.preTransform = + swapchain_support_details.capabilities.currentTransform; + create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; + create_info.presentMode = present_mode; + create_info.clipped = VK_TRUE; + create_info.oldSwapchain = VK_NULL_HANDLE; + + if (vkCreateSwapchainKHR(device->device, &create_info, nullptr, + &swapchain->swapchain) != VK_SUCCESS) { + VGLTF_LOG_ERR("Swapchain creation failed!"); + goto err; + } + + if (vkGetSwapchainImagesKHR(device->device, swapchain->swapchain, + &swapchain->swapchain_image_count, + nullptr) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't get swapchain image count"); + goto destroy_swapchain; + } + + if (swapchain->swapchain_image_count > + VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT) { + VGLTF_LOG_ERR("Swapchain image array cannot fit all %d swapchain images", + swapchain->swapchain_image_count); + goto destroy_swapchain; + } + + if (vkGetSwapchainImagesKHR(device->device, swapchain->swapchain, + &swapchain->swapchain_image_count, + swapchain->swapchain_images) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't get swapchain images"); + goto destroy_swapchain; + } + + swapchain->swapchain_image_format = surface_format.format; + swapchain->swapchain_extent = extent; + + return true; +destroy_swapchain: + vkDestroySwapchainKHR(device->device, swapchain->swapchain, nullptr); +err: + return false; +} + +static bool create_image_view(struct vgltf_vk_device *device, VkImage image, + VkFormat format, VkImageView *image_view, + VkImageAspectFlags aspect_flags, + uint32_t mip_level_count) { + + VkImageViewCreateInfo create_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = image, + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = format, + .components = {VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_IDENTITY}, + .subresourceRange = {.aspectMask = aspect_flags, + .levelCount = mip_level_count, + .layerCount = 1}}; + if (vkCreateImageView(device->device, &create_info, nullptr, image_view) != + VK_SUCCESS) { + return false; + } + + return true; +} + +static bool create_swapchain_image_views(struct vgltf_vk_swapchain *swapchain, + struct vgltf_vk_device *device) { + uint32_t swapchain_image_index; + for (swapchain_image_index = 0; + swapchain_image_index < swapchain->swapchain_image_count; + swapchain_image_index++) { + VkImage swapchain_image = + swapchain->swapchain_images[swapchain_image_index]; + + if (!create_image_view( + device, swapchain_image, swapchain->swapchain_image_format, + &swapchain->swapchain_image_views[swapchain_image_index], + VK_IMAGE_ASPECT_COLOR_BIT, 1)) { + goto err; + } + } + return true; +err: + for (uint32_t to_remove_index = 0; to_remove_index < swapchain_image_index; + to_remove_index++) { + vkDestroyImageView(device->device, + swapchain->swapchain_image_views[to_remove_index], + nullptr); + } + return false; +} + +static bool create_shader_module(VkDevice device, const unsigned char *code, + int size, VkShaderModule *out) { + VkShaderModuleCreateInfo create_info = { + .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + .codeSize = size, + .pCode = (const uint32_t *)code, + }; + if (vkCreateShaderModule(device, &create_info, nullptr, out) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't create shader module"); + goto err; + } + return true; +err: + return false; +} + +static VkFormat find_supported_format(struct vgltf_renderer *renderer, + const VkFormat *candidates, + int candidate_count, VkImageTiling tiling, + VkFormatFeatureFlags features) { + for (int candidate_index = 0; candidate_index < candidate_count; + candidate_index++) { + VkFormat candidate = candidates[candidate_index]; + VkFormatProperties properties; + vkGetPhysicalDeviceFormatProperties(renderer->device.physical_device, + candidate, &properties); + if (tiling == VK_IMAGE_TILING_LINEAR && + (properties.linearTilingFeatures & features) == features) { + return candidate; + } else if (tiling == VK_IMAGE_TILING_OPTIMAL && + (properties.optimalTilingFeatures & features) == features) { + return candidate; + } + } + + return VK_FORMAT_UNDEFINED; +} + +static VkFormat find_depth_format(struct vgltf_renderer *renderer) { + return find_supported_format(renderer, + (const VkFormat[]){VK_FORMAT_D32_SFLOAT, + VK_FORMAT_D32_SFLOAT_S8_UINT, + VK_FORMAT_D24_UNORM_S8_UINT}, + 3, VK_IMAGE_TILING_OPTIMAL, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT); +} + +static bool vgltf_renderer_create_render_pass(struct vgltf_renderer *renderer) { + VkAttachmentDescription color_attachment = { + .format = renderer->swapchain.swapchain_image_format, + .samples = VK_SAMPLE_COUNT_1_BIT, + .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, + .storeOp = VK_ATTACHMENT_STORE_OP_STORE, + .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, + .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR}; + VkAttachmentReference color_attachment_ref = { + .attachment = 0, + .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + }; + VkAttachmentDescription depth_attachment = { + .format = find_depth_format(renderer), + .samples = VK_SAMPLE_COUNT_1_BIT, + .loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR, + .storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, + .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, + .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; + VkAttachmentReference depth_attachment_ref = { + .attachment = 1, + .layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + }; + + VkSubpassDescription subpass = { + .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, + .pColorAttachments = &color_attachment_ref, + .colorAttachmentCount = 1, + .pDepthStencilAttachment = &depth_attachment_ref}; + VkSubpassDependency dependency = { + .srcSubpass = VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, + .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, + .srcAccessMask = 0, + .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, + .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT}; + + VkAttachmentDescription attachments[] = {color_attachment, depth_attachment}; + int attachment_count = sizeof(attachments) / sizeof(attachments[0]); + VkRenderPassCreateInfo render_pass_info = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + .attachmentCount = attachment_count, + .pAttachments = attachments, + .subpassCount = 1, + .pSubpasses = &subpass, + .dependencyCount = 1, + .pDependencies = &dependency}; + + if (vkCreateRenderPass(renderer->device.device, &render_pass_info, nullptr, + &renderer->render_pass) != VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to create render pass"); + goto err; + } + + return true; +err: + return false; +} + +static bool +vgltf_renderer_create_descriptor_set_layout(struct vgltf_renderer *renderer) { + VkDescriptorSetLayoutBinding ubo_layout_binding = { + .binding = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = 1, + .stageFlags = VK_SHADER_STAGE_VERTEX_BIT, + }; + VkDescriptorSetLayoutBinding sampler_layout_binding = { + .binding = 1, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT, + }; + + VkDescriptorSetLayoutBinding bindings[] = {ubo_layout_binding, + sampler_layout_binding}; + int binding_count = sizeof(bindings) / sizeof(bindings[0]); + + VkDescriptorSetLayoutCreateInfo layout_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = binding_count, + .pBindings = bindings}; + + if (vkCreateDescriptorSetLayout(renderer->device.device, &layout_info, + nullptr, &renderer->descriptor_set_layout) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to create descriptor set layout"); + goto err; + } + return true; +err: + return false; +} + +static bool +vgltf_renderer_create_graphics_pipeline(struct vgltf_renderer *renderer) { + static constexpr unsigned char triangle_shader_vert_code[] = { +#embed "../compiled_shaders/triangle.vert.spv" + }; + static constexpr unsigned char triangle_shader_frag_code[] = { +#embed "../compiled_shaders/triangle.frag.spv" + }; + + VkShaderModule triangle_shader_vert_module; + if (!create_shader_module(renderer->device.device, triangle_shader_vert_code, + sizeof(triangle_shader_vert_code), + &triangle_shader_vert_module)) { + VGLTF_LOG_ERR("Couldn't create triangle vert shader module"); + goto err; + } + + VkShaderModule triangle_shader_frag_module; + if (!create_shader_module(renderer->device.device, triangle_shader_frag_code, + sizeof(triangle_shader_frag_code), + &triangle_shader_frag_module)) { + VGLTF_LOG_ERR("Couldn't create triangle frag shader module"); + goto destroy_vert_shader_module; + } + + VkPipelineShaderStageCreateInfo triangle_shader_vert_stage_create_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + .stage = VK_SHADER_STAGE_VERTEX_BIT, + .module = triangle_shader_vert_module, + .pName = "main"}; + VkPipelineShaderStageCreateInfo triangle_shader_frag_stage_create_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + .stage = VK_SHADER_STAGE_FRAGMENT_BIT, + .module = triangle_shader_frag_module, + .pName = "main"}; + VkPipelineShaderStageCreateInfo shader_stages[] = { + triangle_shader_vert_stage_create_info, + triangle_shader_frag_stage_create_info}; + + VkDynamicState dynamic_states[] = { + VK_DYNAMIC_STATE_VIEWPORT, + VK_DYNAMIC_STATE_SCISSOR, + }; + + VkPipelineDynamicStateCreateInfo dynamic_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, + .dynamicStateCount = sizeof(dynamic_states) / sizeof(dynamic_states[0]), + .pDynamicStates = dynamic_states}; + + VkVertexInputBindingDescription vertex_binding_description = + vgltf_vertex_binding_description(); + struct vgltf_vertex_input_attribute_descriptions + vertex_attribute_descriptions = vgltf_vertex_attribute_descriptions(); + + VkPipelineVertexInputStateCreateInfo vertex_input_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + .vertexBindingDescriptionCount = 1, + .vertexAttributeDescriptionCount = vertex_attribute_descriptions.count, + .pVertexBindingDescriptions = &vertex_binding_description, + .pVertexAttributeDescriptions = + vertex_attribute_descriptions.descriptions}; + + VkPipelineInputAssemblyStateCreateInfo input_assembly = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, + .primitiveRestartEnable = VK_FALSE, + }; + + VkPipelineViewportStateCreateInfo viewport_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, + .viewportCount = 1, + .scissorCount = 1}; + + VkPipelineRasterizationStateCreateInfo rasterizer = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + .depthClampEnable = VK_FALSE, + .rasterizerDiscardEnable = VK_FALSE, + .polygonMode = VK_POLYGON_MODE_FILL, + .lineWidth = 1.f, + .cullMode = VK_CULL_MODE_BACK_BIT, + .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE, + .depthBiasEnable = VK_FALSE}; + + VkPipelineMultisampleStateCreateInfo multisampling = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + .sampleShadingEnable = VK_FALSE, + .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT, + }; + + VkPipelineColorBlendAttachmentState color_blend_attachment = { + .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | + VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT, + .blendEnable = VK_FALSE, + }; + + VkPipelineDepthStencilStateCreateInfo depth_stencil = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, + .depthTestEnable = VK_TRUE, + .depthWriteEnable = VK_TRUE, + .depthCompareOp = VK_COMPARE_OP_LESS, + .depthBoundsTestEnable = VK_FALSE, + .stencilTestEnable = VK_FALSE, + }; + + VkPipelineColorBlendStateCreateInfo color_blending = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + .logicOpEnable = VK_FALSE, + .attachmentCount = 1, + .pAttachments = &color_blend_attachment}; + + VkPipelineLayoutCreateInfo pipeline_layout_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + .setLayoutCount = 1, + .pSetLayouts = &renderer->descriptor_set_layout}; + + if (vkCreatePipelineLayout(renderer->device.device, &pipeline_layout_info, + nullptr, + &renderer->pipeline_layout) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't create pipeline layout"); + goto destroy_frag_shader_module; + } + + VkGraphicsPipelineCreateInfo pipeline_info = { + .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, + .stageCount = 2, + .pStages = shader_stages, + .pVertexInputState = &vertex_input_info, + .pInputAssemblyState = &input_assembly, + .pViewportState = &viewport_state, + .pRasterizationState = &rasterizer, + .pMultisampleState = &multisampling, + .pColorBlendState = &color_blending, + .pDepthStencilState = &depth_stencil, + .pDynamicState = &dynamic_state, + .layout = renderer->pipeline_layout, + .renderPass = renderer->render_pass, + .subpass = 0, + }; + + if (vkCreateGraphicsPipelines(renderer->device.device, VK_NULL_HANDLE, 1, + &pipeline_info, nullptr, + &renderer->graphics_pipeline) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't create pipeline"); + goto destroy_pipeline_layout; + } + + vkDestroyShaderModule(renderer->device.device, triangle_shader_frag_module, + nullptr); + vkDestroyShaderModule(renderer->device.device, triangle_shader_vert_module, + nullptr); + return true; +destroy_pipeline_layout: + vkDestroyPipelineLayout(renderer->device.device, renderer->pipeline_layout, + nullptr); +destroy_frag_shader_module: + vkDestroyShaderModule(renderer->device.device, triangle_shader_frag_module, + nullptr); +destroy_vert_shader_module: + vkDestroyShaderModule(renderer->device.device, triangle_shader_vert_module, + nullptr); +err: + return false; +} + +static bool +vgltf_renderer_create_framebuffers(struct vgltf_renderer *renderer) { + for (uint32_t i = 0; i < renderer->swapchain.swapchain_image_count; i++) { + VkImageView attachments[] = {renderer->swapchain.swapchain_image_views[i], + renderer->depth_image_view}; + int attachment_count = sizeof(attachments) / sizeof(attachments[0]); + + VkFramebufferCreateInfo framebuffer_info = { + .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + .renderPass = renderer->render_pass, + .attachmentCount = attachment_count, + .pAttachments = attachments, + .width = renderer->swapchain.swapchain_extent.width, + .height = renderer->swapchain.swapchain_extent.height, + .layers = 1}; + + if (vkCreateFramebuffer(renderer->device.device, &framebuffer_info, nullptr, + &renderer->swapchain_framebuffers[i]) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to create framebuffer"); + goto err; + } + } + + return true; +err: + return false; +} + +static bool +vgltf_renderer_create_command_pool(struct vgltf_renderer *renderer) { + struct queue_family_indices queue_family_indices = {}; + if (!queue_family_indices_for_device(&queue_family_indices, + renderer->device.physical_device, + renderer->surface.surface)) { + VGLTF_LOG_ERR("Couldn't fetch queue family indices"); + goto err; + } + + VkCommandPoolCreateInfo pool_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + .queueFamilyIndex = queue_family_indices.graphics_family}; + + if (vkCreateCommandPool(renderer->device.device, &pool_info, nullptr, + &renderer->command_pool) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't create command pool"); + goto err; + } + + return true; +err: + return false; +} + +static VkCommandBuffer +begin_single_time_commands(struct vgltf_renderer *renderer) { + VkCommandBufferAllocateInfo allocate_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = renderer->command_pool, + .commandBufferCount = 1}; + + VkCommandBuffer command_buffer; + vkAllocateCommandBuffers(renderer->device.device, &allocate_info, + &command_buffer); + + VkCommandBufferBeginInfo begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT}; + + vkBeginCommandBuffer(command_buffer, &begin_info); + + return command_buffer; +} + +static void end_single_time_commands(struct vgltf_renderer *renderer, + VkCommandBuffer command_buffer) { + vkEndCommandBuffer(command_buffer); + VkSubmitInfo submit_info = {.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &command_buffer}; + + vkQueueSubmit(renderer->device.graphics_queue, 1, &submit_info, + VK_NULL_HANDLE); + vkQueueWaitIdle(renderer->device.graphics_queue); + vkFreeCommandBuffers(renderer->device.device, renderer->command_pool, 1, + &command_buffer); +} + +static bool vgltf_renderer_copy_buffer(struct vgltf_renderer *renderer, + VkBuffer src_buffer, VkBuffer dst_buffer, + VkDeviceSize size) { + VkCommandBuffer command_buffer = begin_single_time_commands(renderer); + VkBufferCopy copy_region = {.size = size}; + vkCmdCopyBuffer(command_buffer, src_buffer, dst_buffer, 1, ©_region); + end_single_time_commands(renderer, command_buffer); + return true; +} + +static void vgltf_renderer_create_image( + struct vgltf_renderer *renderer, uint32_t width, uint32_t height, + uint32_t mip_level_count, VkFormat format, VkImageTiling tiling, + VkImageUsageFlags usage, VkMemoryPropertyFlags properties, + struct vgltf_renderer_allocated_image *image) { + + vmaCreateImage( + renderer->device.allocator, + &(const VkImageCreateInfo){ + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .imageType = VK_IMAGE_TYPE_2D, + .extent = {width, height, 1}, + .mipLevels = mip_level_count, + .arrayLayers = 1, + .format = format, + .tiling = tiling, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + .usage = usage, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .samples = VK_SAMPLE_COUNT_1_BIT, + }, + &(const VmaAllocationCreateInfo){.usage = VMA_MEMORY_USAGE_GPU_ONLY, + .requiredFlags = properties}, + &image->image, &image->allocation, &image->info); +} + +static bool has_stencil_component(VkFormat format) { + return format == VK_FORMAT_D32_SFLOAT_S8_UINT || + format == VK_FORMAT_D24_UNORM_S8_UINT; +} + +static bool transition_image_layout(struct vgltf_renderer *renderer, + VkImage image, VkFormat format, + VkImageLayout old_layout, + VkImageLayout new_layout, + uint32_t mip_level_count) { + (void)format; + VkCommandBuffer command_buffer = begin_single_time_commands(renderer); + VkImageMemoryBarrier barrier = { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .oldLayout = old_layout, + .newLayout = new_layout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = mip_level_count, + .baseArrayLayer = 0, + .layerCount = 1}, + .srcAccessMask = 0, + .dstAccessMask = 0}; + + if (new_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { + barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; + + if (has_stencil_component(format)) { + barrier.subresourceRange.aspectMask |= VK_IMAGE_ASPECT_DEPTH_BIT; + } + } else { + barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + } + + VkPipelineStageFlags source_stage; + VkPipelineStageFlags destination_stage; + if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED && + new_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) { + barrier.srcAccessMask = 0; + barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + source_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; + destination_stage = VK_PIPELINE_STAGE_TRANSFER_BIT; + } else if (old_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && + new_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { + barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + source_stage = VK_PIPELINE_STAGE_TRANSFER_BIT; + destination_stage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; + } else if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED && + new_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) { + barrier.srcAccessMask = 0; + barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + source_stage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; + destination_stage = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT; + } else { + goto err; + } + + vkCmdPipelineBarrier(command_buffer, source_stage, destination_stage, 0, 0, + nullptr, 0, nullptr, 1, &barrier); + + end_single_time_commands(renderer, command_buffer); + return true; +err: + return false; +} + +void copy_buffer_to_image(struct vgltf_renderer *renderer, VkBuffer buffer, + VkImage image, uint32_t width, uint32_t height) { + VkCommandBuffer command_buffer = begin_single_time_commands(renderer); + VkBufferImageCopy region = { + .bufferOffset = 0, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1}, + .imageOffset = {0, 0, 0}, + .imageExtent = {width, height, 1}}; + + vkCmdCopyBufferToImage(command_buffer, buffer, image, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + + end_single_time_commands(renderer, command_buffer); +} + +static bool +vgltf_renderer_create_depth_resources(struct vgltf_renderer *renderer) { + VkFormat depth_format = find_depth_format(renderer); + vgltf_renderer_create_image( + renderer, renderer->swapchain.swapchain_extent.width, + renderer->swapchain.swapchain_extent.height, 1, depth_format, + VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &renderer->depth_image); + create_image_view(&renderer->device, renderer->depth_image.image, + depth_format, &renderer->depth_image_view, + VK_IMAGE_ASPECT_DEPTH_BIT, 1); + + transition_image_layout(renderer, renderer->depth_image.image, depth_format, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, 1); + return true; +} + +static bool +vgltf_renderer_create_buffer(struct vgltf_renderer *renderer, VkDeviceSize size, + VkBufferUsageFlags usage, + VkMemoryPropertyFlags properties, + struct vgltf_renderer_allocated_buffer *buffer) { + VkBufferCreateInfo buffer_info = {.sType = + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .size = size, + .usage = usage, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE}; + VmaAllocationCreateInfo alloc_info = { + .usage = VMA_MEMORY_USAGE_AUTO, + .flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT, + .preferredFlags = properties}; + + if (vmaCreateBuffer(renderer->device.allocator, &buffer_info, &alloc_info, + &buffer->buffer, &buffer->allocation, + &buffer->info) != VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to create buffer"); + goto err; + } + + return true; +err: + return false; +} + +static void generate_mipmaps(struct vgltf_renderer *renderer, VkImage image, + VkFormat image_format, int32_t texture_width, + int32_t texture_height, uint32_t mip_levels) { + VkFormatProperties format_properties; + vkGetPhysicalDeviceFormatProperties(renderer->device.physical_device, + image_format, &format_properties); + if (!(format_properties.optimalTilingFeatures & + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) { + VGLTF_PANIC("Texture image format does not support linear blitting!"); + } + + VkCommandBuffer command_buffer = begin_single_time_commands(renderer); + VkImageMemoryBarrier barrier = { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .image = image, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .subresourceRange = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseArrayLayer = 0, + .layerCount = 1, + .levelCount = 1}}; + + int32_t mip_width = texture_width; + int32_t mip_height = texture_height; + + for (uint32_t i = 1; i < mip_levels; i++) { + barrier.subresourceRange.baseMipLevel = i - 1; + barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; + barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, + nullptr, 1, &barrier); + VkImageBlit blit = { + .srcOffsets = {{0, 0, 0}, {mip_width, mip_height, 1}}, + .srcSubresource = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = i - 1, + .baseArrayLayer = 0, + .layerCount = 1}, + .dstOffsets = {{0, 0, 0}, + {mip_width > 1 ? mip_width / 2 : 1, + mip_height > 1 ? mip_height / 2 : 1, 1}}, + .dstSubresource = {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = i, + .baseArrayLayer = 0, + .layerCount = 1}, + }; + vkCmdBlitImage(command_buffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, + VK_FILTER_LINEAR); + barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; + barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr, + 0, nullptr, 1, &barrier); + if (mip_width > 1) + mip_width /= 2; + if (mip_height > 1) + mip_height /= 2; + } + barrier.subresourceRange.baseMipLevel = mip_levels - 1; + barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr, 0, + nullptr, 1, &barrier); + + end_single_time_commands(renderer, command_buffer); +} + +static bool +vgltf_renderer_create_texture_image(struct vgltf_renderer *renderer) { + struct vgltf_image image; + if (!vgltf_image_load_from_file(&image, SV(TEXTURE_PATH))) { + VGLTF_LOG_ERR("Couldn't load image from file"); + goto err; + } + renderer->mip_level_count = + floor(log2(VGLTF_MAX(image.width, image.height))) + 1; + + VkDeviceSize image_size = image.width * image.height * 4; + struct vgltf_renderer_allocated_buffer staging_buffer = {}; + if (!vgltf_renderer_create_buffer(renderer, image_size, + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + &staging_buffer)) { + VGLTF_LOG_ERR("Couldn't create staging buffer"); + goto deinit_image; + } + + void *data; + vmaMapMemory(renderer->device.allocator, staging_buffer.allocation, &data); + memcpy(data, image.data, image_size); + vmaUnmapMemory(renderer->device.allocator, staging_buffer.allocation); + + vgltf_renderer_create_image( + renderer, image.width, image.height, renderer->mip_level_count, + VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | + VK_IMAGE_USAGE_SAMPLED_BIT, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &renderer->texture_image); + + transition_image_layout(renderer, renderer->texture_image.image, + VK_FORMAT_R8G8B8A8_SRGB, VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + renderer->mip_level_count); + copy_buffer_to_image(renderer, staging_buffer.buffer, + renderer->texture_image.image, image.width, + image.height); + + generate_mipmaps(renderer, renderer->texture_image.image, + VK_FORMAT_R8G8B8A8_SRGB, image.width, image.height, + renderer->mip_level_count); + + vmaDestroyBuffer(renderer->device.allocator, staging_buffer.buffer, + staging_buffer.allocation); + vgltf_image_deinit(&image); + return true; +deinit_image: + vgltf_image_deinit(&image); +err: + return false; +} + +static bool +vgltf_renderer_create_texture_image_view(struct vgltf_renderer *renderer) { + return create_image_view( + &renderer->device, renderer->texture_image.image, VK_FORMAT_R8G8B8A8_SRGB, + &renderer->texture_image_view, VK_IMAGE_ASPECT_COLOR_BIT, + renderer->mip_level_count); +} + +static bool +vgltf_renderer_create_texture_sampler(struct vgltf_renderer *renderer) { + VkPhysicalDeviceProperties properties = {}; + vkGetPhysicalDeviceProperties(renderer->device.physical_device, &properties); + + VkSamplerCreateInfo sampler_info = { + .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + .magFilter = VK_FILTER_LINEAR, + .minFilter = VK_FILTER_LINEAR, + .addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .anisotropyEnable = VK_TRUE, + .maxAnisotropy = properties.limits.maxSamplerAnisotropy, + .borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK, + .unnormalizedCoordinates = VK_FALSE, + .compareEnable = VK_FALSE, + .compareOp = VK_COMPARE_OP_ALWAYS, + .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR, + .mipLodBias = 0.f, + .minLod = 0.f, + .maxLod = renderer->mip_level_count}; + + if (vkCreateSampler(renderer->device.device, &sampler_info, nullptr, + &renderer->texture_sampler) != VK_SUCCESS) { + goto err; + } + + return true; +err: + return false; +} + +static void get_file_data(void *ctx, const char *filename, const int is_mtl, + const char *obj_filename, char **data, size_t *len) { + (void)ctx; + (void)is_mtl; + + if (!filename) { + VGLTF_LOG_ERR("Null filename"); + *data = NULL; + *len = 0; + return; + } + *data = vgltf_platform_read_file_to_string(obj_filename, len); +} + +static bool load_model(struct vgltf_renderer *renderer) { + tinyobj_attrib_t attrib; + tinyobj_shape_t *shapes = nullptr; + size_t shape_count; + tinyobj_material_t *materials = nullptr; + size_t material_count; + + if ((tinyobj_parse_obj(&attrib, &shapes, &shape_count, &materials, + &material_count, MODEL_PATH, get_file_data, nullptr, + TINYOBJ_FLAG_TRIANGULATE)) != TINYOBJ_SUCCESS) { + VGLTF_LOG_ERR("Couldn't load obj"); + return false; + } + + for (size_t shape_index = 0; shape_index < shape_count; shape_index++) { + tinyobj_shape_t *shape = &shapes[shape_index]; + unsigned int face_offset = shape->face_offset; + for (size_t face_index = face_offset; + face_index < face_offset + shape->length; face_index++) { + float v[3][3]; + float t[3][2]; + + tinyobj_vertex_index_t idx0 = attrib.faces[face_index * 3 + 0]; + tinyobj_vertex_index_t idx1 = attrib.faces[face_index * 3 + 1]; + tinyobj_vertex_index_t idx2 = attrib.faces[face_index * 3 + 2]; + + for (int k = 0; k < 3; k++) { + int f0 = idx0.v_idx; + int f1 = idx1.v_idx; + int f2 = idx2.v_idx; + + v[0][k] = attrib.vertices[3 * (size_t)f0 + k]; + v[1][k] = attrib.vertices[3 * (size_t)f1 + k]; + v[2][k] = attrib.vertices[3 * (size_t)f2 + k]; + } + + for (int k = 0; k < 2; k++) { + int t0 = idx0.vt_idx; + int t1 = idx1.vt_idx; + int t2 = idx2.vt_idx; + + t[0][k] = attrib.texcoords[2 * (size_t)t0 + k]; + t[1][k] = attrib.texcoords[2 * (size_t)t1 + k]; + t[2][k] = attrib.texcoords[2 * (size_t)t2 + k]; + } + + for (int k = 0; k < 3; k++) { + renderer->vertices[renderer->vertex_count++] = (struct vgltf_vertex){ + .position = {v[k][0], v[k][1], v[k][2]}, + .texture_coordinates = {t[k][0], 1.f - t[k][1]}, + .color = {1.f, 1.f, 1.f}}; + renderer->indices[renderer->index_count++] = renderer->index_count; + } + } + tinyobj_attrib_free(&attrib); + tinyobj_shapes_free(shapes, shape_count); + tinyobj_materials_free(materials, material_count); + } + return true; +} + +static bool +vgltf_renderer_create_vertex_buffer(struct vgltf_renderer *renderer) { + VkDeviceSize buffer_size = + renderer->vertex_count * sizeof(struct vgltf_vertex); + + struct vgltf_renderer_allocated_buffer staging_buffer = {}; + if (!vgltf_renderer_create_buffer(renderer, buffer_size, + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + &staging_buffer)) { + VGLTF_LOG_ERR("Failed to create transfer buffer"); + goto err; + } + + void *data; + vmaMapMemory(renderer->device.allocator, staging_buffer.allocation, &data); + memcpy(data, renderer->vertices, + renderer->vertex_count * sizeof(struct vgltf_vertex)); + vmaUnmapMemory(renderer->device.allocator, staging_buffer.allocation); + + if (!vgltf_renderer_create_buffer( + renderer, buffer_size, + VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &renderer->vertex_buffer)) { + VGLTF_LOG_ERR("Failed to create vertex buffer"); + goto destroy_staging_buffer; + } + + vgltf_renderer_copy_buffer(renderer, staging_buffer.buffer, + renderer->vertex_buffer.buffer, buffer_size); + vmaDestroyBuffer(renderer->device.allocator, staging_buffer.buffer, + staging_buffer.allocation); + return true; +destroy_staging_buffer: + vmaDestroyBuffer(renderer->device.allocator, staging_buffer.buffer, + staging_buffer.allocation); +err: + return false; +} + +static bool +vgltf_renderer_create_index_buffer(struct vgltf_renderer *renderer) { + VkDeviceSize buffer_size = renderer->index_count * sizeof(uint16_t); + struct vgltf_renderer_allocated_buffer staging_buffer = {}; + if (!vgltf_renderer_create_buffer(renderer, buffer_size, + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + &staging_buffer)) { + VGLTF_LOG_ERR("Failed to create transfer buffer"); + goto err; + } + + void *data; + vmaMapMemory(renderer->device.allocator, staging_buffer.allocation, &data); + memcpy(data, renderer->indices, renderer->index_count * sizeof(uint16_t)); + vmaUnmapMemory(renderer->device.allocator, staging_buffer.allocation); + + if (!vgltf_renderer_create_buffer( + renderer, buffer_size, + VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &renderer->index_buffer)) { + VGLTF_LOG_ERR("Failed to create index buffer"); + goto destroy_staging_buffer; + } + vgltf_renderer_copy_buffer(renderer, staging_buffer.buffer, + renderer->index_buffer.buffer, buffer_size); + vmaDestroyBuffer(renderer->device.allocator, staging_buffer.buffer, + staging_buffer.allocation); + return true; + +destroy_staging_buffer: + vmaDestroyBuffer(renderer->device.allocator, staging_buffer.buffer, + staging_buffer.allocation); +err: + return false; +} + +static bool +vgltf_renderer_create_command_buffer(struct vgltf_renderer *renderer) { + VkCommandBufferAllocateInfo allocate_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = renderer->command_pool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT}; + + if (vkAllocateCommandBuffers(renderer->device.device, &allocate_info, + renderer->command_buffer) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't allocate command buffers"); + goto err; + } + + return true; +err: + return false; +} + +static bool +vgltf_renderer_create_sync_objects(struct vgltf_renderer *renderer) { + VkSemaphoreCreateInfo semaphore_info = { + .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + }; + + VkFenceCreateInfo fence_info = {.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + .flags = VK_FENCE_CREATE_SIGNALED_BIT}; + + int frame_in_flight_index = 0; + for (; frame_in_flight_index < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; + frame_in_flight_index++) { + if (vkCreateSemaphore( + renderer->device.device, &semaphore_info, nullptr, + &renderer->image_available_semaphores[frame_in_flight_index]) != + VK_SUCCESS || + vkCreateSemaphore( + renderer->device.device, &semaphore_info, nullptr, + &renderer->render_finished_semaphores[frame_in_flight_index]) != + VK_SUCCESS || + vkCreateFence(renderer->device.device, &fence_info, nullptr, + &renderer->in_flight_fences[frame_in_flight_index]) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't create sync objects"); + goto err; + } + } + + return true; +err: + for (int frame_in_flight_to_delete_index = 0; + frame_in_flight_to_delete_index < frame_in_flight_index; + frame_in_flight_to_delete_index++) { + vkDestroyFence(renderer->device.device, + renderer->in_flight_fences[frame_in_flight_index], nullptr); + vkDestroySemaphore( + renderer->device.device, + renderer->render_finished_semaphores[frame_in_flight_index], nullptr); + vkDestroySemaphore( + renderer->device.device, + renderer->image_available_semaphores[frame_in_flight_index], nullptr); + } + return false; +} + +static bool vgltf_vk_swapchain_init(struct vgltf_vk_swapchain *swapchain, + struct vgltf_vk_device *device, + struct vgltf_vk_surface *surface, + struct vgltf_window_size *window_size) { + if (!create_swapchain(swapchain, device, surface, window_size)) { + VGLTF_LOG_ERR("Couldn't create swapchain"); + goto err; + } + + if (!create_swapchain_image_views(swapchain, device)) { + VGLTF_LOG_ERR("Couldn't create image views"); + goto destroy_swapchain; + } + + return true; +destroy_swapchain: + vkDestroySwapchainKHR(device->device, swapchain->swapchain, nullptr); +err: + return false; +} + +static void vgltf_vk_swapchain_deinit(struct vgltf_vk_swapchain *swapchain, + struct vgltf_vk_device *device) { + for (uint32_t swapchain_image_view_index = 0; + swapchain_image_view_index < swapchain->swapchain_image_count; + swapchain_image_view_index++) { + vkDestroyImageView( + device->device, + swapchain->swapchain_image_views[swapchain_image_view_index], nullptr); + } + vkDestroySwapchainKHR(device->device, swapchain->swapchain, nullptr); +} + +static void vgltf_renderer_cleanup_swapchain(struct vgltf_renderer *renderer) { + vkDestroyImageView(renderer->device.device, renderer->depth_image_view, + nullptr); + vmaDestroyImage(renderer->device.allocator, renderer->depth_image.image, + renderer->depth_image.allocation); + + for (uint32_t framebuffer_index = 0; + framebuffer_index < renderer->swapchain.swapchain_image_count; + framebuffer_index++) { + vkDestroyFramebuffer(renderer->device.device, + renderer->swapchain_framebuffers[framebuffer_index], + nullptr); + } + + vgltf_vk_swapchain_deinit(&renderer->swapchain, &renderer->device); +} + +static bool vgltf_renderer_recreate_swapchain(struct vgltf_renderer *renderer) { + vkDeviceWaitIdle(renderer->device.device); + vgltf_renderer_cleanup_swapchain(renderer); + + // TODO add error handling + create_swapchain(&renderer->swapchain, &renderer->device, &renderer->surface, + &renderer->window_size); + create_swapchain_image_views(&renderer->swapchain, &renderer->device); + vgltf_renderer_create_depth_resources(renderer); + vgltf_renderer_create_framebuffers(renderer); + return true; +} + +static void vgltf_renderer_triangle_pass(struct vgltf_renderer *renderer, + uint32_t swapchain_image_index) { + VkRenderPassBeginInfo render_pass_info = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + .renderPass = renderer->render_pass, + .framebuffer = renderer->swapchain_framebuffers[swapchain_image_index], + .renderArea = {.offset = {}, + .extent = renderer->swapchain.swapchain_extent}, + .clearValueCount = 2, + .pClearValues = + (const VkClearValue[]){{.color = {.float32 = {0.f, 0.f, 0.f, 1.f}}}, + {.depthStencil = {1.0f, 0}}}, + + }; + + vkCmdBeginRenderPass(renderer->command_buffer[renderer->current_frame], + &render_pass_info, VK_SUBPASS_CONTENTS_INLINE); + vkCmdBindPipeline(renderer->command_buffer[renderer->current_frame], + VK_PIPELINE_BIND_POINT_GRAPHICS, + renderer->graphics_pipeline); + VkViewport viewport = { + .x = 0.f, + .y = 0.f, + .width = (float)renderer->swapchain.swapchain_extent.width, + .height = (float)renderer->swapchain.swapchain_extent.height, + .minDepth = 0.f, + .maxDepth = 1.f}; + vkCmdSetViewport(renderer->command_buffer[renderer->current_frame], 0, 1, + &viewport); + VkRect2D scissor = {.offset = {}, + .extent = renderer->swapchain.swapchain_extent}; + vkCmdSetScissor(renderer->command_buffer[renderer->current_frame], 0, 1, + &scissor); + + VkBuffer vertex_buffers[] = {renderer->vertex_buffer.buffer}; + VkDeviceSize offsets[] = {0}; + vkCmdBindVertexBuffers(renderer->command_buffer[renderer->current_frame], 0, + 1, vertex_buffers, offsets); + vkCmdBindIndexBuffer(renderer->command_buffer[renderer->current_frame], + renderer->index_buffer.buffer, 0, VK_INDEX_TYPE_UINT16); + + vkCmdBindDescriptorSets( + renderer->command_buffer[renderer->current_frame], + VK_PIPELINE_BIND_POINT_GRAPHICS, renderer->pipeline_layout, 0, 1, + &renderer->descriptor_sets[renderer->current_frame], 0, nullptr); + vkCmdDrawIndexed(renderer->command_buffer[renderer->current_frame], + renderer->index_count, 1, 0, 0, 0); + + vkCmdEndRenderPass(renderer->command_buffer[renderer->current_frame]); +} + +static void update_uniform_buffer(struct vgltf_renderer *renderer, + uint32_t current_frame) { + static long long start_time_nanoseconds = 0; + if (start_time_nanoseconds == 0) { + if (!vgltf_platform_get_current_time_nanoseconds(&start_time_nanoseconds)) { + VGLTF_LOG_ERR("Couldn't get current time"); + } + } + + long long current_time_nanoseconds = 0; + if (!vgltf_platform_get_current_time_nanoseconds(¤t_time_nanoseconds)) { + VGLTF_LOG_ERR("Couldn't get current time"); + } + + long elapsed_time_nanoseconds = + current_time_nanoseconds - start_time_nanoseconds; + float elapsed_time_seconds = elapsed_time_nanoseconds / 1e9f; + VGLTF_LOG_INFO("Elapsed time: %f", elapsed_time_seconds); + + vgltf_mat4 model_matrix; + vgltf_mat4_rotate(model_matrix, (vgltf_mat4)VGLTF_MAT4_IDENTITY, + elapsed_time_seconds * VGLTF_MATHS_DEG_TO_RAD(90.0f), + (vgltf_vec3){0.f, 0.f, 1.f}); + + vgltf_mat4 view_matrix; + vgltf_mat4_look_at(view_matrix, (vgltf_vec3){2.f, 2.f, 2.f}, + (vgltf_vec3){0.f, 0.f, 0.f}, (vgltf_vec3){0.f, 0.f, 1.f}); + + vgltf_mat4 projection_matrix; + vgltf_mat4_perspective(projection_matrix, VGLTF_MATHS_DEG_TO_RAD(45.f), + (float)renderer->swapchain.swapchain_extent.width / + (float)renderer->swapchain.swapchain_extent.height, + 0.1f, 10.f); + projection_matrix[1 * 4 + 1] *= -1; + + struct vgltf_renderer_uniform_buffer_object ubo = {}; + memcpy(ubo.model, model_matrix, sizeof(vgltf_mat4)); + memcpy(ubo.view, view_matrix, sizeof(vgltf_mat4)); + memcpy(ubo.projection, projection_matrix, sizeof(vgltf_mat4)); + memcpy(renderer->mapped_uniform_buffers[current_frame], &ubo, sizeof(ubo)); +} + +bool vgltf_renderer_render_frame(struct vgltf_renderer *renderer) { + vkWaitForFences(renderer->device.device, 1, + &renderer->in_flight_fences[renderer->current_frame], VK_TRUE, + UINT64_MAX); + + uint32_t image_index; + VkResult acquire_swapchain_image_result = vkAcquireNextImageKHR( + renderer->device.device, renderer->swapchain.swapchain, UINT64_MAX, + renderer->image_available_semaphores[renderer->current_frame], + VK_NULL_HANDLE, &image_index); + if (acquire_swapchain_image_result == VK_ERROR_OUT_OF_DATE_KHR || + acquire_swapchain_image_result == VK_SUBOPTIMAL_KHR || + renderer->framebuffer_resized) { + renderer->framebuffer_resized = false; + vgltf_renderer_recreate_swapchain(renderer); + return true; + } else if (acquire_swapchain_image_result != VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to acquire a swapchain image"); + goto err; + } + + vkResetFences(renderer->device.device, 1, + &renderer->in_flight_fences[renderer->current_frame]); + + vkResetCommandBuffer(renderer->command_buffer[renderer->current_frame], 0); + VkCommandBufferBeginInfo begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + }; + + if (vkBeginCommandBuffer(renderer->command_buffer[renderer->current_frame], + &begin_info) != VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to begin recording command buffer"); + goto err; + } + + vgltf_renderer_triangle_pass(renderer, image_index); + + if (vkEndCommandBuffer(renderer->command_buffer[renderer->current_frame]) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to record command buffer"); + goto err; + } + + update_uniform_buffer(renderer, renderer->current_frame); + + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + }; + + VkSemaphore wait_semaphores[] = { + renderer->image_available_semaphores[renderer->current_frame]}; + VkPipelineStageFlags wait_stages[] = { + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}; + submit_info.waitSemaphoreCount = 1; + submit_info.pWaitSemaphores = wait_semaphores; + submit_info.pWaitDstStageMask = wait_stages; + submit_info.commandBufferCount = 1; + submit_info.pCommandBuffers = + &renderer->command_buffer[renderer->current_frame]; + + VkSemaphore signal_semaphores[] = { + renderer->render_finished_semaphores[renderer->current_frame]}; + submit_info.signalSemaphoreCount = 1; + submit_info.pSignalSemaphores = signal_semaphores; + if (vkQueueSubmit(renderer->device.graphics_queue, 1, &submit_info, + renderer->in_flight_fences[renderer->current_frame]) != + VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to submit draw command buffer"); + goto err; + } + + VkPresentInfoKHR present_info = {.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + .waitSemaphoreCount = 1, + .pWaitSemaphores = signal_semaphores}; + + VkSwapchainKHR swapchains[] = {renderer->swapchain.swapchain}; + present_info.swapchainCount = 1; + present_info.pSwapchains = swapchains; + present_info.pImageIndices = &image_index; + VkResult result = + vkQueuePresentKHR(renderer->device.present_queue, &present_info); + if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) { + vgltf_renderer_recreate_swapchain(renderer); + } else if (acquire_swapchain_image_result != VK_SUCCESS) { + VGLTF_LOG_ERR("Failed to acquire a swapchain image"); + goto err; + } + renderer->current_frame = + (renderer->current_frame + 1) % VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; + return true; +err: + return false; +} +static bool +vgltf_renderer_create_uniform_buffers(struct vgltf_renderer *renderer) { + VkDeviceSize buffer_size = + sizeof(struct vgltf_renderer_uniform_buffer_object); + + for (int i = 0; i < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; i++) { + vgltf_renderer_create_buffer(renderer, buffer_size, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + &renderer->uniform_buffers[i]); + vmaMapMemory(renderer->device.allocator, + renderer->uniform_buffers[i].allocation, + &renderer->mapped_uniform_buffers[i]); + } + + return true; +} + +static bool +vgltf_renderer_create_descriptor_pool(struct vgltf_renderer *renderer) { + VkDescriptorPoolSize pool_sizes[] = { + (VkDescriptorPoolSize){.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = + VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT}, + (VkDescriptorPoolSize){.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = + VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT}}; + int pool_size_count = sizeof(pool_sizes) / sizeof(pool_sizes[0]); + + VkDescriptorPoolCreateInfo pool_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .poolSizeCount = pool_size_count, + .pPoolSizes = pool_sizes, + .maxSets = VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT}; + + if (vkCreateDescriptorPool(renderer->device.device, &pool_info, nullptr, + &renderer->descriptor_pool) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't create uniform descriptor pool"); + goto err; + } + + return true; +err: + return false; +} +static bool +vgltf_renderer_create_descriptor_sets(struct vgltf_renderer *renderer) { + VkDescriptorSetLayout layouts[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT] = {}; + for (int layout_index = 0; + layout_index < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; + layout_index++) { + layouts[layout_index] = renderer->descriptor_set_layout; + } + + VkDescriptorSetAllocateInfo alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = renderer->descriptor_pool, + .descriptorSetCount = VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT, + .pSetLayouts = layouts}; + + if (vkAllocateDescriptorSets(renderer->device.device, &alloc_info, + renderer->descriptor_sets) != VK_SUCCESS) { + VGLTF_LOG_ERR("Couldn't create descriptor sets"); + goto err; + } + + for (int set_index = 0; set_index < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; + set_index++) { + VkDescriptorBufferInfo buffer_info = { + .buffer = renderer->uniform_buffers[set_index].buffer, + .offset = 0, + .range = sizeof(struct vgltf_renderer_uniform_buffer_object)}; + + VkDescriptorImageInfo image_info = { + .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + .imageView = renderer->texture_image_view, + .sampler = renderer->texture_sampler, + }; + + VkWriteDescriptorSet descriptor_writes[] = { + (VkWriteDescriptorSet){.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = renderer->descriptor_sets[set_index], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorType = + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = 1, + .pBufferInfo = &buffer_info}, + + (VkWriteDescriptorSet){.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = renderer->descriptor_sets[set_index], + .dstBinding = 1, + .dstArrayElement = 0, + .descriptorType = + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1, + .pImageInfo = &image_info}}; + int descriptor_write_count = + sizeof(descriptor_writes) / sizeof(descriptor_writes[0]); + + vkUpdateDescriptorSets(renderer->device.device, descriptor_write_count, + descriptor_writes, 0, nullptr); + } + + return true; +err: + return false; +} + +static bool vgltf_vk_device_init(struct vgltf_vk_device *device, + struct vgltf_vk_instance *instance, + struct vgltf_vk_surface *surface) { + if (!pick_physical_device(&device->physical_device, instance, + surface->surface)) { + VGLTF_LOG_ERR("Couldn't pick physical device"); + goto err; + } + + if (!create_logical_device(&device->device, &device->graphics_queue, + &device->present_queue, device->physical_device, + surface->surface)) { + VGLTF_LOG_ERR("Couldn't pick logical device"); + goto err; + } + + if (!create_allocator(&device->allocator, device, instance)) { + VGLTF_LOG_ERR("Couldn't create allocator"); + goto destroy_logical_device; + } + + return true; +destroy_logical_device: + vkDestroyDevice(device->device, nullptr); +err: + return false; +} + +static void vgltf_vk_device_deinit(struct vgltf_vk_device *device) { + vmaDestroyAllocator(device->allocator); + vkDestroyDevice(device->device, nullptr); +} + +bool vgltf_renderer_init(struct vgltf_renderer *renderer, + struct vgltf_platform *platform) { + if (!vgltf_vk_instance_init(&renderer->instance, platform)) { + VGLTF_LOG_ERR("instance creation failed"); + goto err; + } + vgltf_renderer_setup_debug_messenger(renderer); + if (!vgltf_vk_surface_init(&renderer->surface, &renderer->instance, + platform)) { + goto destroy_instance; + } + + if (!vgltf_vk_device_init(&renderer->device, &renderer->instance, + &renderer->surface)) { + VGLTF_LOG_ERR("Device creation failed"); + goto destroy_surface; + } + + struct vgltf_window_size window_size = {800, 600}; + if (!vgltf_platform_get_window_size(platform, &window_size)) { + VGLTF_LOG_ERR("Couldn't get window size"); + goto destroy_device; + } + renderer->window_size = window_size; + + if (!vgltf_vk_swapchain_init(&renderer->swapchain, &renderer->device, + &renderer->surface, &renderer->window_size)) { + VGLTF_LOG_ERR("Couldn't create swapchain"); + goto destroy_device; + } + + if (!vgltf_renderer_create_render_pass(renderer)) { + VGLTF_LOG_ERR("Couldn't create render pass"); + goto destroy_swapchain; + } + + if (!vgltf_renderer_create_descriptor_set_layout(renderer)) { + VGLTF_LOG_ERR("Couldn't create descriptor set layout"); + goto destroy_render_pass; + } + + if (!vgltf_renderer_create_graphics_pipeline(renderer)) { + VGLTF_LOG_ERR("Couldn't create graphics pipeline"); + goto destroy_descriptor_set_layout; + } + + if (!vgltf_renderer_create_command_pool(renderer)) { + VGLTF_LOG_ERR("Couldn't create command pool"); + goto destroy_graphics_pipeline; + } + + if (!vgltf_renderer_create_depth_resources(renderer)) { + VGLTF_LOG_ERR("Couldn't create depth resources"); + goto destroy_command_pool; + } + + if (!vgltf_renderer_create_framebuffers(renderer)) { + VGLTF_LOG_ERR("Couldn't create framebuffers"); + goto destroy_depth_resources; + } + + if (!vgltf_renderer_create_texture_image(renderer)) { + VGLTF_LOG_ERR("Couldn't create texture image"); + goto destroy_frame_buffers; + } + + if (!vgltf_renderer_create_texture_image_view(renderer)) { + VGLTF_LOG_ERR("Couldn't create texture image view"); + goto destroy_texture_image; + } + + if (!vgltf_renderer_create_texture_sampler(renderer)) { + VGLTF_LOG_ERR("Couldn't create texture sampler"); + goto destroy_texture_image_view; + } + + if (!load_model(renderer)) { + VGLTF_LOG_ERR("Couldn't load model"); + goto destroy_texture_sampler; + } + + if (!vgltf_renderer_create_vertex_buffer(renderer)) { + VGLTF_LOG_ERR("Couldn't create vertex buffer"); + goto destroy_model; + } + + if (!vgltf_renderer_create_index_buffer(renderer)) { + VGLTF_LOG_ERR("Couldn't create index buffer"); + goto destroy_vertex_buffer; + } + + if (!vgltf_renderer_create_uniform_buffers(renderer)) { + VGLTF_LOG_ERR("Couldn't create uniform buffers"); + goto destroy_index_buffer; + } + + if (!vgltf_renderer_create_descriptor_pool(renderer)) { + VGLTF_LOG_ERR("Couldn't create descriptor pool"); + goto destroy_uniform_buffers; + } + + if (!vgltf_renderer_create_descriptor_sets(renderer)) { + VGLTF_LOG_ERR("Couldn't create descriptor sets"); + goto destroy_descriptor_pool; + } + + if (!vgltf_renderer_create_command_buffer(renderer)) { + VGLTF_LOG_ERR("Couldn't create command buffer"); + goto destroy_descriptor_pool; + } + + if (!vgltf_renderer_create_sync_objects(renderer)) { + VGLTF_LOG_ERR("Couldn't create sync objects"); + goto destroy_descriptor_pool; + } + + return true; + +destroy_descriptor_pool: + vkDestroyDescriptorPool(renderer->device.device, renderer->descriptor_pool, + nullptr); +destroy_uniform_buffers: + for (int i = 0; i < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; i++) { + vmaDestroyBuffer(renderer->device.allocator, + renderer->uniform_buffers[i].buffer, + renderer->uniform_buffers[i].allocation); + } +destroy_index_buffer: + vmaDestroyBuffer(renderer->device.allocator, renderer->index_buffer.buffer, + renderer->index_buffer.allocation); +destroy_vertex_buffer: + vmaDestroyBuffer(renderer->device.allocator, renderer->vertex_buffer.buffer, + renderer->vertex_buffer.allocation); +destroy_model: + // TODO +destroy_texture_sampler: + vkDestroySampler(renderer->device.device, renderer->texture_sampler, nullptr); +destroy_texture_image_view: + vkDestroyImageView(renderer->device.device, renderer->texture_image_view, + nullptr); +destroy_texture_image: + vmaDestroyImage(renderer->device.allocator, renderer->texture_image.image, + renderer->texture_image.allocation); +destroy_depth_resources: + vkDestroyImageView(renderer->device.device, renderer->depth_image_view, + nullptr); + vmaDestroyImage(renderer->device.allocator, renderer->depth_image.image, + renderer->depth_image.allocation); +destroy_command_pool: + vkDestroyCommandPool(renderer->device.device, renderer->command_pool, + nullptr); +destroy_frame_buffers: + for (uint32_t swapchain_framebuffer_index = 0; + swapchain_framebuffer_index < renderer->swapchain.swapchain_image_count; + swapchain_framebuffer_index++) { + vkDestroyFramebuffer( + renderer->device.device, + renderer->swapchain_framebuffers[swapchain_framebuffer_index], nullptr); + } +destroy_graphics_pipeline: + vkDestroyPipeline(renderer->device.device, renderer->graphics_pipeline, + nullptr); + vkDestroyPipelineLayout(renderer->device.device, renderer->pipeline_layout, + nullptr); +destroy_descriptor_set_layout: + vkDestroyDescriptorSetLayout(renderer->device.device, + renderer->descriptor_set_layout, nullptr); +destroy_render_pass: + vkDestroyRenderPass(renderer->device.device, renderer->render_pass, nullptr); +destroy_swapchain: + vgltf_vk_swapchain_deinit(&renderer->swapchain, &renderer->device); +destroy_device: + vgltf_vk_device_deinit(&renderer->device); +destroy_surface: + vgltf_vk_surface_deinit(&renderer->surface, &renderer->instance); +destroy_instance: + if (enable_validation_layers) { + destroy_debug_utils_messenger_ext(renderer->instance.instance, + renderer->debug_messenger, nullptr); + } + vgltf_vk_instance_deinit(&renderer->instance); +err: + return false; +} +void vgltf_renderer_deinit(struct vgltf_renderer *renderer) { + vkDeviceWaitIdle(renderer->device.device); + vgltf_renderer_cleanup_swapchain(renderer); + for (int i = 0; i < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; i++) { + vmaUnmapMemory(renderer->device.allocator, + renderer->uniform_buffers[i].allocation); + vmaDestroyBuffer(renderer->device.allocator, + renderer->uniform_buffers[i].buffer, + renderer->uniform_buffers[i].allocation); + } + vmaDestroyBuffer(renderer->device.allocator, renderer->index_buffer.buffer, + renderer->index_buffer.allocation); + vmaDestroyBuffer(renderer->device.allocator, renderer->vertex_buffer.buffer, + renderer->vertex_buffer.allocation); + vkDestroySampler(renderer->device.device, renderer->texture_sampler, nullptr); + vkDestroyImageView(renderer->device.device, renderer->texture_image_view, + nullptr); + vmaDestroyImage(renderer->device.allocator, renderer->texture_image.image, + renderer->texture_image.allocation); + vkDestroyPipeline(renderer->device.device, renderer->graphics_pipeline, + nullptr); + vkDestroyPipelineLayout(renderer->device.device, renderer->pipeline_layout, + nullptr); + vkDestroyDescriptorPool(renderer->device.device, renderer->descriptor_pool, + nullptr); + vkDestroyDescriptorSetLayout(renderer->device.device, + renderer->descriptor_set_layout, nullptr); + vkDestroyRenderPass(renderer->device.device, renderer->render_pass, nullptr); + for (int i = 0; i < VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT; i++) { + vkDestroySemaphore(renderer->device.device, + renderer->image_available_semaphores[i], nullptr); + vkDestroySemaphore(renderer->device.device, + renderer->render_finished_semaphores[i], nullptr); + vkDestroyFence(renderer->device.device, renderer->in_flight_fences[i], + nullptr); + } + vkDestroyCommandPool(renderer->device.device, renderer->command_pool, + nullptr); + vgltf_vk_device_deinit(&renderer->device); + vgltf_vk_surface_deinit(&renderer->surface, &renderer->instance); + if (enable_validation_layers) { + destroy_debug_utils_messenger_ext(renderer->instance.instance, + renderer->debug_messenger, nullptr); + } + vgltf_vk_instance_deinit(&renderer->instance); +} +void vgltf_renderer_on_window_resized(struct vgltf_renderer *renderer, + struct vgltf_window_size size) { + if (size.width > 0 && size.height > 0 && + size.width != renderer->window_size.width && + size.height != renderer->window_size.height) { + renderer->window_size = size; + renderer->framebuffer_resized = true; + } +} diff --git a/src/renderer/renderer.h b/src/renderer/renderer.h new file mode 100644 index 0000000..79e1f3d --- /dev/null +++ b/src/renderer/renderer.h @@ -0,0 +1,126 @@ +#ifndef VGLTF_RENDERER_H +#define VGLTF_RENDERER_H + +#include "../maths.h" +#include "../platform.h" +#include "vma_usage.h" +#include <vulkan/vulkan.h> + +struct vgltf_vertex { + vgltf_vec3 position; + vgltf_vec3 color; + vgltf_vec2 texture_coordinates; +}; +VkVertexInputBindingDescription vgltf_vertex_binding_description(void); + +struct vgltf_vertex_input_attribute_descriptions { + VkVertexInputAttributeDescription descriptions[3]; + uint32_t count; +}; +struct vgltf_vertex_input_attribute_descriptions +vgltf_vertex_attribute_descriptions(void); + +struct vgltf_renderer_uniform_buffer_object { + alignas(16) vgltf_mat4 model; + alignas(16) vgltf_mat4 view; + alignas(16) vgltf_mat4 projection; +}; + +struct vgltf_renderer_allocated_buffer { + VkBuffer buffer; + VmaAllocation allocation; + VmaAllocationInfo info; +}; + +struct vgltf_renderer_allocated_image { + VkImage image; + VmaAllocation allocation; + VmaAllocationInfo info; +}; + +struct vgltf_vk_instance { + VkInstance instance; +}; + +struct vgltf_vk_device { + VkPhysicalDevice physical_device; + VkDevice device; + VkQueue graphics_queue; + VkQueue present_queue; + VmaAllocator allocator; +}; + +struct vgltf_vk_surface { + VkSurfaceKHR surface; +}; + +constexpr int VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT = 32; +struct vgltf_vk_swapchain { + VkSwapchainKHR swapchain; + VkFormat swapchain_image_format; + VkImage swapchain_images[VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT]; + VkImageView swapchain_image_views[VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT]; + VkExtent2D swapchain_extent; + uint32_t swapchain_image_count; +}; + +struct vgltf_vk_pipeline { + VkPipelineLayout layout; + VkPipeline pipeline; +}; + +constexpr int VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT = 2; +struct vgltf_renderer { + struct vgltf_vk_instance instance; + struct vgltf_vk_device device; + VkDebugUtilsMessengerEXT debug_messenger; + struct vgltf_vk_surface surface; + struct vgltf_vk_swapchain swapchain; + struct vgltf_renderer_allocated_image depth_image; + VkImageView depth_image_view; + + VkRenderPass render_pass; + VkDescriptorSetLayout descriptor_set_layout; + + VkDescriptorPool descriptor_pool; + VkDescriptorSet descriptor_sets[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; + VkPipelineLayout pipeline_layout; + VkPipeline graphics_pipeline; + + VkFramebuffer swapchain_framebuffers[VGLTF_RENDERER_MAX_SWAPCHAIN_IMAGE_COUNT]; + + VkCommandPool command_pool; + VkCommandBuffer command_buffer[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; + VkSemaphore + image_available_semaphores[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; + VkSemaphore + render_finished_semaphores[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; + VkFence in_flight_fences[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; + + struct vgltf_renderer_allocated_buffer + uniform_buffers[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; + void *mapped_uniform_buffers[VGLTF_RENDERER_MAX_FRAME_IN_FLIGHT_COUNT]; + + uint32_t mip_level_count; + struct vgltf_renderer_allocated_image texture_image; + VkImageView texture_image_view; + VkSampler texture_sampler; + struct vgltf_vertex vertices[100000]; + int vertex_count; + uint16_t indices[100000]; + int index_count; + struct vgltf_renderer_allocated_buffer vertex_buffer; + struct vgltf_renderer_allocated_buffer index_buffer; + + struct vgltf_window_size window_size; + uint32_t current_frame; + bool framebuffer_resized; +}; +bool vgltf_renderer_init(struct vgltf_renderer *renderer, + struct vgltf_platform *platform); +void vgltf_renderer_deinit(struct vgltf_renderer *renderer); +bool vgltf_renderer_render_frame(struct vgltf_renderer *renderer); +void vgltf_renderer_on_window_resized(struct vgltf_renderer *renderer, + struct vgltf_window_size size); + +#endif // VGLTF_RENDERER_H diff --git a/src/renderer/vma_usage.cpp b/src/renderer/vma_usage.cpp new file mode 100644 index 0000000..83006a1 --- /dev/null +++ b/src/renderer/vma_usage.cpp @@ -0,0 +1,4 @@ +#include "vma_usage.h" + +#define VMA_IMPLEMENTATION +#include <vk_mem_alloc.h> diff --git a/src/renderer/vma_usage.h b/src/renderer/vma_usage.h new file mode 100644 index 0000000..e9b5aa4 --- /dev/null +++ b/src/renderer/vma_usage.h @@ -0,0 +1,6 @@ +#ifndef VGLTF_VMA_USAGE_H +#define VGLTF_VMA_USAGE_H + +#include <vk_mem_alloc.h> + +#endif // VGLTF_VMA_USAGE_H diff --git a/src/str.c b/src/str.c new file mode 100644 index 0000000..9c68d43 --- /dev/null +++ b/src/str.c @@ -0,0 +1,181 @@ +#include "str.h" +#include "alloc.h" +#include "hash.h" +#include "platform.h" +#include <assert.h> +#include <stdarg.h> +#include <string.h> + +struct vgltf_string_view vgltf_string_view_from_literal(const char *str) { + assert(str); + size_t length = strlen(str); + return (struct vgltf_string_view){.length = length, .data = str}; +} +struct vgltf_string_view vgltf_string_view_from_string(struct vgltf_string string) { + return (struct vgltf_string_view){.length = string.length, .data = string.data}; +} +char vgltf_string_view_at(const struct vgltf_string_view *string_view, + size_t index) { + assert(string_view); + assert(index < string_view->length); + return string_view->data[index]; +} +bool vgltf_string_view_eq(struct vgltf_string_view view, + struct vgltf_string_view other) { + return view.length == other.length && + (strncmp(view.data, other.data, view.length) == 0); +} +size_t vgltf_string_view_length(const struct vgltf_string_view *string_view) { + assert(string_view); + return string_view->length; +} + +uint64_t vgltf_string_view_hash(const struct vgltf_string_view view) { + return vgltf_hash_fnv_1a(view.data, view.length); +} + +int vgltf_string_view_utf8_codepoint_at_offset(struct vgltf_string_view view, + size_t offset, + uint32_t *codepoint) { + assert(codepoint); + assert(offset < view.length); + + const unsigned char *s = (unsigned char *)&view.data[offset]; + + int size; + if ((*s & 0x80) == 0) { + *codepoint = *s; + size = 1; + } else if ((*s & 0xE0) == 0xC0) { + *codepoint = *s & 0x1f; + size = 2; + } else if ((*s & 0xF0) == 0xE0) { + *codepoint = *s & 0x0f; + size = 3; + } else if ((*s & 0xF8) == 0xF0) { + *codepoint = *s & 0x07; + size = 4; + } else { + VGLTF_LOG_ERR("Invalid UTF-8 sequence"); + return 0; + } + + for (int i = 1; i < size; i++) { + if ((s[i] & 0xC0) != 0x80) { + VGLTF_LOG_ERR("Invalid UTF-8 continuation byte"); + return 0; + } + + *codepoint = (*codepoint << 6) | (s[i] & 0x3F); + } + + return size; +} +int vgltf_string_utf8_encode_codepoint(uint32_t codepoint, + char encoded_codepoint[4]) { + assert(encoded_codepoint); + if (codepoint > 0x10FFFF) { + return -1; + } + + if (codepoint <= 0x7F) { + encoded_codepoint[0] = (uint8_t)codepoint; + return 1; + } else if (codepoint <= 0x7FF) { + encoded_codepoint[0] = 0xC0 | ((codepoint >> 6) & 0x1F); + encoded_codepoint[1] = 0x80 | (codepoint & 0x3F); + return 2; + } else if (codepoint <= 0xFFFF) { + encoded_codepoint[0] = 0xE0 | ((codepoint >> 12) & 0x0F); + encoded_codepoint[1] = 0x80 | ((codepoint >> 6) & 0x3F); + encoded_codepoint[2] = 0x80 | (codepoint & 0x3F); + return 3; + } else { + encoded_codepoint[0] = 0xF0 | ((codepoint >> 18) & 0x07); + encoded_codepoint[1] = 0x80 | ((codepoint >> 12) & 0x3F); + encoded_codepoint[2] = 0x80 | ((codepoint >> 6) & 0x3F); + encoded_codepoint[3] = 0x80 | (codepoint & 0x3F); + return 4; + } +} + +struct vgltf_string +vgltf_string_from_null_terminated(struct vgltf_allocator *allocator, + const char *str) { + assert(allocator); + assert(str); + struct vgltf_string string; + size_t length = strlen(str); + char *data = vgltf_allocator_allocate(allocator, length + 1); + if (!data) { + VGLTF_PANIC("Couldn't allocate string"); + } + strncpy(data, str, length); + string.length = length; + string.data = data; + return string; +} +struct vgltf_string vgltf_string_clone(struct vgltf_allocator *allocator, + const struct vgltf_string string) { + assert(allocator); + + size_t length = string.length; + char *data = vgltf_allocator_allocate(allocator, length + 1); + memcpy(data, string.data, length); + data[length] = '\0'; + + return (struct vgltf_string){.data = data, .length = length}; +} +struct vgltf_string vgltf_string_concatenate(struct vgltf_allocator *allocator, + struct vgltf_string_view head, + struct vgltf_string_view tail) { + assert(allocator); + size_t length = head.length + tail.length; + char *data = vgltf_allocator_allocate(allocator, length + 1); + memcpy(data, head.data, head.length); + memcpy(data + head.length, tail.data, tail.length); + data[length] = '\0'; + return (struct vgltf_string){.data = data, .length = length}; +} +struct vgltf_string vgltf_string_formatted(struct vgltf_allocator *allocator, + struct vgltf_string_view fmt, ...) { + va_list args; + va_start(args, fmt); + struct vgltf_string formatted_string = + vgltf_string_vformatted(allocator, fmt, args); + va_end(args); + + return formatted_string; +} +struct vgltf_string vgltf_string_vformatted(struct vgltf_allocator *allocator, + struct vgltf_string_view fmt, + va_list args) { + assert(allocator); + char str[1024]; + size_t length = vsnprintf(str, 1024, fmt.data, args); + char *data = vgltf_allocator_allocate(allocator, length + 1); + memcpy(data, str, length); + data[length] = '\0'; + return (struct vgltf_string){.data = data, .length = length}; +} +void vgltf_string_deinit(struct vgltf_allocator *allocator, + struct vgltf_string *string) { + assert(allocator); + assert(string); + vgltf_allocator_free(allocator, string->data); +} +size_t vgltf_string_length(const struct vgltf_string *string) { + return string->length; +} +bool vgltf_string_eq_view(const struct vgltf_string string, + const struct vgltf_string_view view) { + return string.length == view.length && + (strncmp(string.data, view.data, string.length) == 0); +} +uint64_t vgltf_string_hash(const struct vgltf_string string) { + return vgltf_hash_fnv_1a(string.data, string.length); +} +bool vgltf_string_eq(struct vgltf_string string, struct vgltf_string other) { + return string.length == other.length && + (strncmp(string.data, other.data, string.length) == 0); +} diff --git a/src/str.h b/src/str.h new file mode 100644 index 0000000..c0e4e5c --- /dev/null +++ b/src/str.h @@ -0,0 +1,62 @@ +#ifndef VGLTF_STR_H +#define VGLTF_STR_H + +#include "alloc.h" +#include <stdarg.h> +#include <stdbool.h> +#include <stddef.h> +#include <stdint.h> +#include <string.h> // IWYU pragma: keep + +#define SV(str) \ + (struct vgltf_string_view) { .data = str, .length = strlen(str) } + +struct vgltf_string; +struct vgltf_string_view { + const char *data; + size_t length; +}; + +struct vgltf_string_view vgltf_string_view_from_literal(const char *str); +struct vgltf_string_view vgltf_string_view_from_string(struct vgltf_string string); +size_t vgltf_string_view_length(const struct vgltf_string_view *string_view); +char vgltf_string_view_at(const struct vgltf_string_view *string_view, + size_t index); +bool vgltf_string_view_eq(struct vgltf_string_view view, + struct vgltf_string_view other); +uint64_t vgltf_string_view_hash(const struct vgltf_string_view view); +// Fetches the next utf8 codepoint in the string at the given offset +// Returns the size of the codepoint in bytes, 0 in case of error +int vgltf_string_view_utf8_codepoint_at_offset(struct vgltf_string_view view, + size_t offset, + uint32_t *codepoint); +// codepoint has to be a char[4] +int vgltf_string_utf8_encode_codepoint(uint32_t codepoint, + char encoded_codepoint[4]); + +struct vgltf_string { + char *data; + size_t length; +}; +struct vgltf_string +vgltf_string_from_null_terminated(struct vgltf_allocator *allocator, + const char *str); +struct vgltf_string vgltf_string_clone(struct vgltf_allocator *allocator, + const struct vgltf_string string); +struct vgltf_string vgltf_string_concatenate(struct vgltf_allocator *allocator, + struct vgltf_string_view head, + struct vgltf_string_view tail); +struct vgltf_string vgltf_string_formatted(struct vgltf_allocator *allocator, + struct vgltf_string_view fmt, ...); +struct vgltf_string vgltf_string_vformatted(struct vgltf_allocator *allocator, + struct vgltf_string_view fmt, + va_list args); +void vgltf_string_deinit(struct vgltf_allocator *allocator, + struct vgltf_string *string); +size_t vgltf_string_length(const struct vgltf_string *string); +bool vgltf_string_eq_view(const struct vgltf_string string, + const struct vgltf_string_view view); +uint64_t vgltf_string_hash(const struct vgltf_string string); +bool vgltf_string_eq(struct vgltf_string string, struct vgltf_string other); + +#endif // VGLTF_STR_H |
