From aff66bb4820e3118f33b901ad47941c125b645ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Gasi=C5=84ski?= Date: Sat, 26 Oct 2024 00:03:04 +0200 Subject: [PATCH] Continue refactor --- src/Context.zig | 340 ++++++++++++++++++++++++++++++++++++++++ src/Mesh.zig | 65 ++++---- src/MeshModel.zig | 13 ++ src/ResourceManager.zig | 55 +++++++ src/Texture.zig | 200 +++++++++++++++++++++++ src/utilities.zig | 27 ++-- src/vulkan_renderer.zig | 148 ++++++++++++++++- 7 files changed, 803 insertions(+), 45 deletions(-) create mode 100644 src/Context.zig create mode 100644 src/ResourceManager.zig create mode 100644 src/Texture.zig diff --git a/src/Context.zig b/src/Context.zig new file mode 100644 index 0000000..d90f54c --- /dev/null +++ b/src/Context.zig @@ -0,0 +1,340 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const vk = @import("vulkan"); +const sdl = @import("sdl"); +const img = @import("zstbi"); + +const validation = @import("validation_layers.zig"); +const Swapchain = @import("Swapchain.zig"); +const QueueUtils = @import("queue_utils.zig"); + +const device_extensions = [_][*:0]const u8{vk.extensions.khr_swapchain.name}; + +pub const apis: []const vk.ApiInfo = &.{ + vk.features.version_1_0, + vk.features.version_1_1, + vk.features.version_1_2, + vk.features.version_1_3, + vk.extensions.khr_surface, + vk.extensions.khr_swapchain, + vk.extensions.ext_debug_utils, +}; + +const enable_validation_layers = builtin.mode == .Debug; +const validation_layers = [_][*:0]const u8{"VK_LAYER_KHRONOS_validation"}; + +const BaseDispatch = vk.BaseWrapper(apis); +const InstanceDispatch = vk.InstanceWrapper(apis); +const DeviceDispatch = vk.DeviceWrapper(apis); + +pub const Instance = vk.InstanceProxy(apis); +pub const Device = vk.DeviceProxy(apis); +pub const Queue = vk.QueueProxy(apis); + +// --- + +const Self = @This(); + +allocator: std.mem.Allocator, + +vkb: BaseDispatch, + +window: sdl.Window, + +instance: Instance, +physical_device: vk.PhysicalDevice, +device: Device, + +command_pool: vk.CommandPool, +graphics_queue: Queue, +presentation_queue: Queue, +surface: vk.SurfaceKHR, +swapchain: Swapchain, + +debug_utils: ?vk.DebugUtilsMessengerEXT, + +pub fn init(allocator: std.mem.Allocator, window: sdl.Window) !Self { + var self: Self = undefined; + + self.window = window; + self.allocator = allocator; + self.vkb = try BaseDispatch.load(try sdl.vulkan.getVkGetInstanceProcAddr()); + + img.init(allocator); + + try self.createInstance(); + + if (enable_validation_layers) { + self.debug_utils = try validation.createDebugMessenger(self.instance); + } + + try self.createSurface(); + + try self.getPhysicalDevice(); + try self.createLogicalDevice(); + self.swapchain = try Swapchain.create(allocator, self); + + return self; +} + +pub fn deinit(self: *Self) void { + if (enable_validation_layers) { + self.instance.destroyDebugUtilsMessengerEXT(self.debug_utils.?, null); + } + + self.device.destroyDevice(null); + self.instance.destroySurfaceKHR(self.surface, null); + self.instance.destroyInstance(null); + + self.allocator.destroy(self.device.wrapper); + self.allocator.destroy(self.instance.wrapper); + + img.deinit(); +} + +fn createInstance(self: *Self) !void { + if (enable_validation_layers and !self.checkValidationLayersSupport()) { + // TODO Better error + return error.LayerNotPresent; + } + + const extensions = try self.getRequiredExtensions(); + defer self.allocator.free(extensions); + + std.debug.print("[Required instance extensions]\n", .{}); + for (extensions) |ext| { + std.debug.print("\t- {s}\n", .{ext}); + } + + if (!try self.checkInstanceExtensions(&extensions)) { + return error.ExtensionNotPresent; + } + + const app_info = vk.ApplicationInfo{ + .p_application_name = "Vulkan SDL Test", + .application_version = vk.makeApiVersion(0, 0, 1, 0), + .p_engine_name = "Vulkan SDL Test", + .engine_version = vk.makeApiVersion(0, 0, 1, 0), + .api_version = vk.API_VERSION_1_3, + }; + + var instance_create_info: vk.InstanceCreateInfo = .{ + .p_application_info = &app_info, + .enabled_extension_count = @intCast(extensions.len), + .pp_enabled_extension_names = @ptrCast(extensions), + }; + + if (enable_validation_layers) { + const debug_create_info = validation.getDebugUtilsCreateInfo(); + + instance_create_info.enabled_layer_count = @intCast(validation_layers.len); + instance_create_info.pp_enabled_layer_names = &validation_layers; + instance_create_info.p_next = &debug_create_info; + } + + const instance_handle = try self.vkb.createInstance(&instance_create_info, null); + const vki = try self.allocator.create(InstanceDispatch); + errdefer self.allocator.destroy(vki); + vki.* = try InstanceDispatch.load(instance_handle, self.vkb.dispatch.vkGetInstanceProcAddr); + + self.instance = Instance.init(instance_handle, vki); +} + +fn createSurface(self: *Self) !void { + self.surface = try sdl.vulkan.createSurface(self.window, self.instance.handle); +} + +fn getPhysicalDevice(self: *Self) !void { + var pdev_count: u32 = 0; + _ = try self.instance.enumeratePhysicalDevices(&pdev_count, null); + + const pdevs = try self.allocator.alloc(vk.PhysicalDevice, pdev_count); + defer self.allocator.free(pdevs); + + _ = try self.instance.enumeratePhysicalDevices(&pdev_count, pdevs.ptr); + + for (pdevs) |pdev| { + if (self.checkDeviceSuitable(pdev)) { + self.physical_device = pdev; + break; + } + } else { + // TODO Obviously needs to be something else + unreachable; + } +} + +fn createLogicalDevice(self: *Self) !void { + const indices = try QueueUtils.getQueueFamilies(self.*, self.physical_device); + // 1 is the highest priority + const priority = [_]f32{1}; + + const qci = [_]vk.DeviceQueueCreateInfo{ + .{ + .queue_family_index = indices.graphics_family.?, + .queue_count = 1, + .p_queue_priorities = &priority, + }, + .{ + .queue_family_index = indices.presentation_family.?, + .queue_count = 1, + .p_queue_priorities = &priority, + }, + }; + + const queue_count: u32 = if (indices.graphics_family.? == indices.presentation_family.?) + 1 + else + 2; + + // Device features + const device_features: vk.PhysicalDeviceFeatures = .{ + .sampler_anisotropy = vk.TRUE, // Enable anisotropy + }; + + const device_create_info: vk.DeviceCreateInfo = .{ + .queue_create_info_count = queue_count, + .p_queue_create_infos = &qci, + .pp_enabled_extension_names = &device_extensions, + .enabled_extension_count = @intCast(device_extensions.len), + .p_enabled_features = &device_features, + }; + + const device_handle = try self.instance.createDevice(self.physical_device, &device_create_info, null); + + const vkd = try self.allocator.create(DeviceDispatch); + errdefer self.allocator.destroy(vkd); + vkd.* = try DeviceDispatch.load(device_handle, self.instance.wrapper.dispatch.vkGetDeviceProcAddr); + + self.device = Device.init(device_handle, vkd); + + const queues = try QueueUtils.getDeviceQueues(self.*); + + self.graphics_queue = Queue.init(queues[0], self.device.wrapper); + self.presentation_queue = Queue.init(queues[1], self.device.wrapper); +} + +fn createCommandPool(self: *Self) !void { + // Get indices of queue families from device + const queue_family_indices = try QueueUtils.getQueueFamilies(self.*, self.physical_device); + + const pool_create_info: vk.CommandPoolCreateInfo = .{ + // Queue family type that buffers from this command pool will use + .queue_family_index = queue_family_indices.graphics_family.?, + .flags = .{ .reset_command_buffer_bit = true }, + }; + + // Create a graphics queue family command pool + self.graphics_command_pool = try self.device.createCommandPool(&pool_create_info, null); +} + +fn getRequiredExtensions(self: Self) ![][*:0]const u8 { + var ext_count = sdl.vulkan.getInstanceExtensionsCount(self.window); + + if (enable_validation_layers) { + ext_count += 1; + } + + var extensions = try self.allocator.alloc([*:0]const u8, ext_count); + _ = try sdl.vulkan.getInstanceExtensions(self.window, extensions); + + if (enable_validation_layers) { + extensions[extensions.len - 1] = vk.extensions.ext_debug_utils.name; + } + + return extensions; +} + +fn checkInstanceExtensions(self: Self, required_extensions: *const [][*:0]const u8) !bool { + var prop_count: u32 = 0; + _ = try self.vkb.enumerateInstanceExtensionProperties(null, &prop_count, null); + + const props = try self.allocator.alloc(vk.ExtensionProperties, prop_count); + defer self.allocator.free(props); + + _ = try self.vkb.enumerateInstanceExtensionProperties(null, &prop_count, props.ptr); + + for (required_extensions.*) |required_extension| { + for (props) |prop| { + if (std.mem.eql(u8, std.mem.sliceTo(&prop.extension_name, 0), std.mem.span(required_extension))) { + break; + } + } else { + return false; + } + } + + return true; +} + +fn checkDeviceExtensions(self: Self, pdev: vk.PhysicalDevice) !bool { + var prop_count: u32 = 0; + _ = try self.instance.enumerateDeviceExtensionProperties(pdev, null, &prop_count, null); + + if (prop_count == 0) { + return false; + } + + const props = try self.allocator.alloc(vk.ExtensionProperties, prop_count); + defer self.allocator.free(props); + + _ = try self.instance.enumerateDeviceExtensionProperties(pdev, null, &prop_count, props.ptr); + + for (device_extensions) |device_extension| { + for (props) |prop| { + if (std.mem.eql(u8, std.mem.sliceTo(&prop.extension_name, 0), std.mem.span(device_extension))) { + break; + } + } else { + return false; + } + } + + return true; +} + +fn checkDeviceSuitable(self: Self, pdev: vk.PhysicalDevice) bool { + const pdev_properties = self.instance.getPhysicalDeviceProperties(pdev); + + if (pdev_properties.device_type == .cpu) { + return false; + } + + const pdev_features = self.instance.getPhysicalDeviceFeatures(pdev); + const queue_family_indices = QueueUtils.getQueueFamilies(self, pdev) catch return false; + const extension_support = self.checkDeviceExtensions(pdev) catch return false; + + const swapchain_details = Swapchain.getSwapchainDetails( + self.allocator, + self.instance, + pdev, + self.surface, + ) catch return false; + defer self.allocator.free(swapchain_details.formats); + defer self.allocator.free(swapchain_details.presentation_modes); + + const swapchain_valid = swapchain_details.formats.len != 0 and swapchain_details.formats.len != 0; + + return queue_family_indices.isValid() and extension_support and swapchain_valid and pdev_features.sampler_anisotropy == vk.TRUE; +} + +fn checkValidationLayersSupport(self: Self) bool { + var layer_count: u32 = undefined; + _ = self.vkb.enumerateInstanceLayerProperties(&layer_count, null) catch return false; + + const available_layers = self.allocator.alloc(vk.LayerProperties, layer_count) catch unreachable; + defer self.allocator.free(available_layers); + + _ = self.vkb.enumerateInstanceLayerProperties(&layer_count, available_layers.ptr) catch return false; + + for (validation_layers) |validation_layer| { + for (available_layers) |available_layer| { + if (std.mem.eql(u8, std.mem.span(validation_layer), std.mem.sliceTo(&available_layer.layer_name, 0))) { + return true; + } + } + } + + return false; +} diff --git a/src/Mesh.zig b/src/Mesh.zig index 74e52d4..ba0a4c3 100644 --- a/src/Mesh.zig +++ b/src/Mesh.zig @@ -2,6 +2,7 @@ const std = @import("std"); const vk = @import("vulkan"); const zm = @import("zmath"); +const Context = @import("Context.zig"); const Utilities = @import("utilities.zig"); const Vertex = Utilities.Vertex; const Device = @import("vulkan_renderer.zig").Device; @@ -11,7 +12,6 @@ const Model = @import("vulkan_renderer.zig").Model; const Self = @This(); ubo_model: Model, -tex_id: u32, vertex_count: u32, vertex_buffer: vk.Buffer, @@ -21,16 +21,20 @@ index_count: u32, index_buffer: vk.Buffer, index_buffer_memory: vk.DeviceMemory, -instance: Instance, -physical_device: vk.PhysicalDevice, -device: Device, +ctx: Context, allocator: std.mem.Allocator, +<<<<<<< Updated upstream pub fn new( instance: Instance, pdev: vk.PhysicalDevice, device: Device, +======= +pub fn create( + allocator: std.mem.Allocator, + ctx: Context, +>>>>>>> Stashed changes transfer_queue: vk.Queue, transfer_command_pool: vk.CommandPool, vertices: []const Vertex, @@ -40,13 +44,12 @@ pub fn new( ) !Self { var self: Self = undefined; + self.allocator = allocator; + self.vertex_count = @intCast(vertices.len); self.index_count = @intCast(indices.len); - self.instance = instance; - self.physical_device = pdev; - self.device = device; - self.allocator = allocator; + self.ctx = ctx; try self.createVertexBuffer(transfer_queue, transfer_command_pool, vertices); try self.createIndexBuffer(transfer_queue, transfer_command_pool, indices); @@ -57,12 +60,18 @@ pub fn new( return self; } +<<<<<<< Updated upstream pub fn destroyBuffers(self: Self) void { self.device.destroyBuffer(self.vertex_buffer, null); self.device.freeMemory(self.vertex_buffer_memory, null); +======= +pub fn destroy(self: Self) void { + self.ctx.device.destroyBuffer(self.vertex_buffer, null); + self.ctx.device.freeMemory(self.vertex_buffer_memory, null); +>>>>>>> Stashed changes - self.device.destroyBuffer(self.index_buffer, null); - self.device.freeMemory(self.index_buffer_memory, null); + self.ctx.device.destroyBuffer(self.index_buffer, null); + self.ctx.device.freeMemory(self.index_buffer_memory, null); } fn createVertexBuffer( @@ -77,14 +86,12 @@ fn createVertexBuffer( // Temporary buffer to "stage" vertex data before transfering to GPU var staging_buffer: vk.Buffer = undefined; var staging_buffer_memory: vk.DeviceMemory = undefined; - defer self.device.destroyBuffer(staging_buffer, null); - defer self.device.freeMemory(staging_buffer_memory, null); + defer self.ctx.device.destroyBuffer(staging_buffer, null); + defer self.ctx.device.freeMemory(staging_buffer_memory, null); // Create buffer and allocate memory to it try Utilities.createBuffer( - self.physical_device, - self.instance, - self.device, + self.ctx, buffer_size, .{ .transfer_src_bit = true }, .{ .host_visible_bit = true, .host_coherent_bit = true }, @@ -95,21 +102,19 @@ fn createVertexBuffer( // Map memory to vertex // 1. Create pointer to a point in normal memory // 2. Map the vertex buffer memory to that point - const data = try self.device.mapMemory(staging_buffer_memory, 0, buffer_size, .{}); + const data = try self.ctx.device.mapMemory(staging_buffer_memory, 0, buffer_size, .{}); // 3. Copy memory from vertices vector to the point in memory const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data)); @memcpy(gpu_vertices, vertices[0..]); // 4. Unmap the vertex buffer memory - self.device.unmapMemory(staging_buffer_memory); + self.ctx.device.unmapMemory(staging_buffer_memory); // --- // Create buffer with TRANSFER_DST_BIT to mark as recipient of transfer data (also VERTEX_BUFFER) // Buffer memory is to be DEVICE_LOCAL_BIT meaning memory is on the GPU and only accessible by it and not CPU (host) try Utilities.createBuffer( - self.physical_device, - self.instance, - self.device, + self.ctx, buffer_size, .{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, .{ .device_local_bit = true }, @@ -119,7 +124,7 @@ fn createVertexBuffer( // Copy staging buffer to vertex buffer on GPU try Utilities.copyBuffer( - self.device, + self.ctx, transfer_queue, transfer_command_pool, staging_buffer, @@ -140,13 +145,11 @@ fn createIndexBuffer( // Temporary buffer to "stage" vertex data before transfering to GPU var staging_buffer: vk.Buffer = undefined; var staging_buffer_memory: vk.DeviceMemory = undefined; - defer self.device.destroyBuffer(staging_buffer, null); - defer self.device.freeMemory(staging_buffer_memory, null); + defer self.ctx.device.destroyBuffer(staging_buffer, null); + defer self.ctx.device.freeMemory(staging_buffer_memory, null); try Utilities.createBuffer( - self.physical_device, - self.instance, - self.device, + self.ctx, buffer_size, .{ .transfer_src_bit = true }, .{ .host_visible_bit = true, .host_coherent_bit = true }, @@ -155,16 +158,14 @@ fn createIndexBuffer( ); // Map memory to index buffer - const data = try self.device.mapMemory(staging_buffer_memory, 0, buffer_size, .{}); + const data = try self.ctx.device.mapMemory(staging_buffer_memory, 0, buffer_size, .{}); const gpu_vertices: [*]u32 = @ptrCast(@alignCast(data)); @memcpy(gpu_vertices, indices[0..]); - self.device.unmapMemory(staging_buffer_memory); + self.ctx.device.unmapMemory(staging_buffer_memory); // Create buffer for index data on GPU access only try Utilities.createBuffer( - self.physical_device, - self.instance, - self.device, + self.ctx, buffer_size, .{ .transfer_dst_bit = true, .index_buffer_bit = true }, .{ .device_local_bit = true }, @@ -174,7 +175,7 @@ fn createIndexBuffer( // Copy from staging buffer to GPU access buffer try Utilities.copyBuffer( - self.device, + self.ctx, transfer_queue, transfer_command_pool, staging_buffer, diff --git a/src/MeshModel.zig b/src/MeshModel.zig index 4e7ca77..bdd4be9 100644 --- a/src/MeshModel.zig +++ b/src/MeshModel.zig @@ -15,7 +15,20 @@ allocator: std.mem.Allocator, mesh_list: std.ArrayList(Mesh), model: zm.Mat, +<<<<<<< Updated upstream pub fn new(allocator: std.mem.Allocator, mesh_list: std.ArrayList(Mesh)) Self { +======= +sampler_descriptor_sets: std.ArrayList(vk.DescriptorSet), + +pub fn new( + allocator: std.mem.Allocator, + ctx: Context, + graphics_command_pool: vk.CommandPool, + texture_sampler: vk.Sampler, + model_file: []const u8, +) Self { + _ = texture_sampler; +>>>>>>> Stashed changes var new_mesh_model: Self = undefined; new_mesh_model.allocator = allocator; diff --git a/src/ResourceManager.zig b/src/ResourceManager.zig new file mode 100644 index 0000000..88badf3 --- /dev/null +++ b/src/ResourceManager.zig @@ -0,0 +1,55 @@ +const std = @import("std"); +const vk = @import("vulkan"); + +const Context = @import("Context.zig"); +const Mesh = @import("Mesh.zig"); +const Material = @import("Material.zig"); + +const Self = @This(); + +allocator: std.mem.Allocator, + +ctx: Context, + +sampler_descriptor_pool: vk.DescriptorPool, +sampler_descriptor_set_layout: vk.DescriptorSetLayout, + +mesh_cache: std.AutoArrayHashMap([]const u8, Mesh), +material_cache: std.AutoArrayHashMap([]const u8, Material), + +pub fn new(allocator: std.mem.Allocator, ctx: Context) Self { + var self: Self = undefined; + + self.allocator = allocator; + self.ctx = ctx; + + self.mesh_cache = std.AutoArrayHashMap([]const u8, Mesh).init(allocator); + self.material_cache = std.AutoArrayHashMap([]const u8, Material).init(allocator); + + return self; +} + +pub fn deinit(self: *Self) void { + // TODO Release resources properly + self.mesh_cache.deinit(); + self.material_cache.deinit(); +} + +pub fn getMesh(self: *Self, file_name: []const u8) !Mesh { + if (self.mesh_cache.get(file_name)) |mesh| { + return mesh; + } + + // TODO Create mesh + // load mesh + + return undefined; +} + +fn allocateDescriptorSet(self: *Self) !void { + // TODO +} + +fn createDescriptorSetLayout(self: *Self) !void { + // TODO +} diff --git a/src/Texture.zig b/src/Texture.zig new file mode 100644 index 0000000..cadc83d --- /dev/null +++ b/src/Texture.zig @@ -0,0 +1,200 @@ +const std = @import("std"); +const vk = @import("vulkan"); +const img = @import("zstbi"); + +const Context = @import("Context.zig"); +const Image = @import("image.zig"); +const Utilities = @import("utilities.zig"); + +const Self = @This(); + +allocator: std.mem.Allocator, + +ctx: Context, + +idx: u32, +texture_image: vk.Image, +texture_image_memory: vk.DeviceMemory, +texture_image_view: vk.ImageView, + +sampler_descriptor_set: vk.DescriptorSet, + +image_file: img.Image, + +pub fn create( + file_name: []const u8, + ctx: Context, + graphics_command_pool: vk.CommandPool, + texture_sampler: vk.Sampler, + sampler_set_layout: vk.DescriptorSetLayout, + sampler_descriptor_pool: vk.DescriptorPool, +) Self { + var self: Self = undefined; + + self.ctx = ctx; + + // Create texture image and get its location in the array + const texture_image_loc = try self.createTextureImage(file_name, graphics_command_pool); + + // Create image view + self.texture_image_view = try Image.createImageView( + ctx, + self.texture_images.items[texture_image_loc], + .r8g8b8a8_srgb, + .{ .color_bit = true }, + ); + + // Create texture descriptor + try self.createTextureDescriptor( + texture_sampler, + sampler_set_layout, + sampler_descriptor_pool, + ); + + // Return location of set with texture + return self; +} + +pub fn destroy(self: *Self) void { + _ = self; +} + +fn createTextureImage( + self: *Self, + file_name: []const u8, + graphics_command_pool: vk.CommandPool, +) !u32 { + // Load image file + var width: u32 = undefined; + var height: u32 = undefined; + var image_size: vk.DeviceSize = undefined; + const image = try self.loadTextureFile(file_name, &width, &height, &image_size); + + // Create staging buffer to hold loaded data, ready to copy to device + var image_staging_buffer: vk.Buffer = undefined; + var image_staging_buffer_memory: vk.DeviceMemory = undefined; + defer self.ctx.device.destroyBuffer(image_staging_buffer, null); + defer self.ctx.device.freeMemory(image_staging_buffer_memory, null); + + try Utilities.createBuffer( + self.ctx.physical_device, + self.ctx.instance, + self.ctx.device, + image_size, + .{ .transfer_src_bit = true }, + .{ .host_visible_bit = true, .host_coherent_bit = true }, + &image_staging_buffer, + &image_staging_buffer_memory, + ); + + // Copy data to staging buffer + const data = try self.ctx.device.mapMemory(image_staging_buffer_memory, 0, image_size, .{}); + const image_data: [*]u8 = @ptrCast(@alignCast(data)); + + @memcpy(image_data, image[0..]); + self.ctx.device.unmapMemory(image_staging_buffer_memory); + + // Create image to hold final texture + var tex_image_memory: vk.DeviceMemory = undefined; + const tex_image = try Image.createImage( + self.ctx, + width, + height, + .r8g8b8a8_srgb, + .optimal, + .{ .transfer_dst_bit = true, .sampled_bit = true }, + .{ .device_local_bit = true }, + &tex_image_memory, + ); + + // Transition image to be DST for copy operation + try Utilities.transitionImageLayout( + self.ctx.device, + self.ctx.graphics_queue.handle, + graphics_command_pool, + tex_image, + .undefined, + .transfer_dst_optimal, + ); + + // Copy data to image + try Utilities.copyImageBuffer( + self.ctx.device, + self.ctx.graphics_queue.handle, + graphics_command_pool, + image_staging_buffer, + tex_image, + width, + height, + ); + + // Transition image to be shader readable for shader usage + try Utilities.transitionImageLayout( + self.ctx.device, + self.ctx.graphics_queue.handle, + graphics_command_pool, + tex_image, + .transfer_dst_optimal, + .shader_read_only_optimal, + ); + + self.texture_image = tex_image; + self.texture_image_memory = tex_image_memory; + + // Return index of new texture image + return @intCast(self.texture_images.items.len - 1); +} + +fn createTextureDescriptor( + self: *Self, + texture_sampler: vk.Sampler, + sampler_set_layout: vk.DescriptorSetLayout, + sampler_descriptor_pool: vk.DescriptorPool, +) !u32 { + // Descriptor set allocation info + const set_alloc_info: vk.DescriptorSetAllocateInfo = .{ + .descriptor_pool = sampler_descriptor_pool, + .descriptor_set_count = 1, + .p_set_layouts = @ptrCast(&sampler_set_layout), + }; + + // Allocate descriptor sets + try self.ctx.device.allocateDescriptorSets(&set_alloc_info, @ptrCast(&self.sampler_descriptor_set)); + + const image_info: vk.DescriptorImageInfo = .{ + .image_layout = .shader_read_only_optimal, // Image layout when in use + .image_view = self.texture_image_view, // Image to bind to set + .sampler = texture_sampler, // Sampler to use for set + }; + + // Descriptor write info + const descriptor_write: vk.WriteDescriptorSet = .{ + .dst_set = self.sampler_descriptor_set, + .dst_binding = 0, + .dst_array_element = 0, + .descriptor_type = .combined_image_sampler, + .descriptor_count = 1, + .p_image_info = @ptrCast(&image_info), + .p_buffer_info = undefined, + .p_texel_buffer_view = undefined, + }; + + // Update the new descriptor set + self.ctx.device.updateDescriptorSets(1, @ptrCast(&descriptor_write), 0, null); +} + +fn loadTextureFile(self: *Self, file_name: []const u8, width: *u32, height: *u32, image_size: *vk.DeviceSize) !void { + const path_concat = [2][]const u8{ "./assets/textures/", file_name }; + const path = try std.mem.concatWithSentinel(self.allocator, u8, &path_concat, 0); + defer self.allocator.free(path); + + const image = try img.Image.loadFromFile(path, 0); + + width.* = image.width; + height.* = image.height; + + // Calculate image size using given and known data + image_size.* = width.* * height.* * 4; + + self.image_file = image; +} diff --git a/src/utilities.zig b/src/utilities.zig index 94eed98..80b6ea3 100644 --- a/src/utilities.zig +++ b/src/utilities.zig @@ -1,8 +1,14 @@ const std = @import("std"); const vk = @import("vulkan"); +<<<<<<< Updated upstream const Instance = @import("vulkan_renderer.zig").Instance; const Device = @import("vulkan_renderer.zig").Device; +======= +const Context = @import("Context.zig"); +const Instance = @import("Context.zig").Instance; +const Device = @import("Context.zig").Device; +>>>>>>> Stashed changes const CommandBuffer = @import("vulkan_renderer.zig").CommandBuffer; pub const device_extensions = [_][*:0]const u8{vk.extensions.khr_swapchain.name}; @@ -17,6 +23,7 @@ pub const Vertex = struct { tex: Vector2, // Texture coords (u, v) }; +<<<<<<< Updated upstream pub const QueueFamilyIndices = struct { graphics_family: ?u32 = null, presentation_family: ?u32 = null, @@ -38,8 +45,11 @@ pub const SwapchainImage = struct { }; pub fn findMemoryTypeIndex(pdev: vk.PhysicalDevice, instance: Instance, allowed_types: u32, properties: vk.MemoryPropertyFlags) u32 { +======= +pub fn findMemoryTypeIndex(ctx: Context, allowed_types: u32, properties: vk.MemoryPropertyFlags) u32 { +>>>>>>> Stashed changes // Get properties of physical device memory - const memory_properties = instance.getPhysicalDeviceMemoryProperties(pdev); + const memory_properties = ctx.instance.getPhysicalDeviceMemoryProperties(ctx.physical_device); const mem_type_count = memory_properties.memory_type_count; for (memory_properties.memory_types[0..mem_type_count], 0..mem_type_count) |mem_type, i| { @@ -54,9 +64,7 @@ pub fn findMemoryTypeIndex(pdev: vk.PhysicalDevice, instance: Instance, allowed_ } pub fn createBuffer( - pdev: vk.PhysicalDevice, - instance: Instance, - device: Device, + ctx: Context, buffer_size: vk.DeviceSize, buffer_usage: vk.BufferUsageFlags, buffer_properties: vk.MemoryPropertyFlags, @@ -72,17 +80,16 @@ pub fn createBuffer( .sharing_mode = .exclusive, // Similar to swapchain images, can share vertex buffers }; - buffer.* = try device.createBuffer(&buffer_create_info, null); + buffer.* = try ctx.device.createBuffer(&buffer_create_info, null); // Get buffer memory requirements - const mem_requirements = device.getBufferMemoryRequirements(buffer.*); + const mem_requirements = ctx.device.getBufferMemoryRequirements(buffer.*); // Allocate memory to buffer const allocate_info: vk.MemoryAllocateInfo = .{ .allocation_size = mem_requirements.size, .memory_type_index = findMemoryTypeIndex( - pdev, - instance, + ctx, mem_requirements.memory_type_bits, // Index of memory type of physical device that has required bit flags // Host visible: CPU can interact with memory // Host coherent: Allows placement of data straight into buffer after mapping (otherwise would have to specify manually) @@ -91,10 +98,10 @@ pub fn createBuffer( }; // Allocate memory to vkDeviceMemory - buffer_memory.* = try device.allocateMemory(&allocate_info, null); + buffer_memory.* = try ctx.device.allocateMemory(&allocate_info, null); // Allocate memory to given vertex buffer - try device.bindBufferMemory(buffer.*, buffer_memory.*, 0); + try ctx.device.bindBufferMemory(buffer.*, buffer_memory.*, 0); } fn beginCommandBuffer(device: Device, command_pool: vk.CommandPool) !CommandBuffer { diff --git a/src/vulkan_renderer.zig b/src/vulkan_renderer.zig index 75eabd9..b635f56 100644 --- a/src/vulkan_renderer.zig +++ b/src/vulkan_renderer.zig @@ -59,9 +59,13 @@ pub const VulkanRenderer = struct { vkb: BaseDispatch, +<<<<<<< Updated upstream window: sdl.Window, current_frame: u32 = 0, +======= + ctx: Context, +>>>>>>> Stashed changes // Scene settings ubo_view_projection: UboViewProjection, @@ -122,9 +126,6 @@ pub const VulkanRenderer = struct { render_pass: vk.RenderPass, - // Pools - graphics_command_pool: vk.CommandPool, - // Utilities swapchain_image_format: vk.Format, depth_format: vk.Format, @@ -142,8 +143,11 @@ pub const VulkanRenderer = struct { self.window = window; self.current_frame = 0; +<<<<<<< Updated upstream self.allocator = allocator; self.vkb = try BaseDispatch.load(try sdl.vulkan.getVkGetInstanceProcAddr()); +======= +>>>>>>> Stashed changes img.init(allocator); @@ -166,7 +170,11 @@ pub const VulkanRenderer = struct { try self.createFramebuffers(); try self.createCommandPool(); +<<<<<<< Updated upstream self.sampler_descriptor_sets = try std.ArrayList(vk.DescriptorSet).initCapacity(self.allocator, self.swapchain_images.len); +======= + self.sampler_descriptor_sets = try std.ArrayList(vk.DescriptorSet).initCapacity(self.allocator, self.ctx.swapchain.swapchain_images.len); +>>>>>>> Stashed changes try self.createCommandBuffers(); try self.createTextureSampler(); @@ -182,7 +190,11 @@ pub const VulkanRenderer = struct { self.texture_image_views = std.ArrayList(vk.ImageView).init(self.allocator); self.model_list = std.ArrayList(MeshModel).init(allocator); +<<<<<<< Updated upstream const aspect: f32 = @as(f32, @floatFromInt(self.extent.width)) / @as(f32, @floatFromInt(self.extent.height)); +======= + const aspect: f32 = @as(f32, @floatFromInt(self.ctx.swapchain.extent.width)) / @as(f32, @floatFromInt(self.ctx.swapchain.extent.height)); +>>>>>>> Stashed changes self.ubo_view_projection.projection = zm.perspectiveFovRh( std.math.degreesToRadians(45.0), aspect, @@ -226,8 +238,13 @@ pub const VulkanRenderer = struct { // -- Get next image // Get index of next image to be drawn to, and signal semaphore when ready to be drawn to +<<<<<<< Updated upstream const image_index_result = try self.device.acquireNextImageKHR( self.swapchain, +======= + const image_index_result = try self.ctx.device.acquireNextImageKHR( + self.ctx.swapchain.handle, +>>>>>>> Stashed changes std.math.maxInt(u64), self.image_available[self.current_frame], .null_handle, @@ -258,7 +275,11 @@ pub const VulkanRenderer = struct { .wait_semaphore_count = 1, // Number of semaphores to wait on .p_wait_semaphores = @ptrCast(&self.render_finished[self.current_frame]), // Semaphores to wait on .swapchain_count = 1, // Number of swapchains to present to +<<<<<<< Updated upstream .p_swapchains = @ptrCast(&self.swapchain), // Swapchains to present images to +======= + .p_swapchains = @ptrCast(&self.ctx.swapchain.handle), // Swapchains to present images to +>>>>>>> Stashed changes .p_image_indices = @ptrCast(&image_index_result.image_index), // Index of images in swapchains to present }; @@ -331,9 +352,15 @@ pub const VulkanRenderer = struct { self.sampler_descriptor_sets.deinit(); self.allocator.free(self.input_descriptor_sets); +<<<<<<< Updated upstream for (0..self.swapchain_images.len) |i| { self.device.destroyBuffer(self.vp_uniform_buffer[i], null); self.device.freeMemory(self.vp_uniform_buffer_memory[i], null); +======= + for (0..self.ctx.swapchain.swapchain_images.len) |i| { + self.ctx.device.destroyBuffer(self.vp_uniform_buffer[i], null); + self.ctx.device.freeMemory(self.vp_uniform_buffer_memory[i], null); +>>>>>>> Stashed changes } self.allocator.free(self.vp_uniform_buffer); self.allocator.free(self.vp_uniform_buffer_memory); @@ -352,7 +379,11 @@ pub const VulkanRenderer = struct { self.device.destroyFramebuffer(framebuffer, null); } +<<<<<<< Updated upstream self.allocator.free(self.swapchain_framebuffers); +======= + self.ctx.swapchain.deinit(); +>>>>>>> Stashed changes self.device.destroyPipeline(self.second_pipeline, null); self.device.destroyPipelineLayout(self.second_pipeline_layout, null); @@ -616,7 +647,11 @@ pub const VulkanRenderer = struct { // Colour attachment of the render pass const swapchain_colour_attachment: vk.AttachmentDescription = .{ +<<<<<<< Updated upstream .format = self.swapchain_image_format, // Format to use for attachment +======= + .format = self.ctx.swapchain.swapchain_image_format, // Format to use for attachment +>>>>>>> Stashed changes .samples = .{ .@"1_bit" = true }, // Number of samples to write for multisampling .load_op = .clear, // Describes what to do with attachment before rendering .store_op = .store, // Describes what to do with attachment after rendering @@ -787,9 +822,15 @@ pub const VulkanRenderer = struct { } fn createColourBufferImage(self: *Self) !void { +<<<<<<< Updated upstream self.colour_buffer_image = try self.allocator.alloc(vk.Image, self.swapchain_images.len); self.colour_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len); self.colour_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.swapchain_images.len); +======= + self.colour_buffer_image = try self.allocator.alloc(vk.Image, self.ctx.swapchain.swapchain_images.len); + self.colour_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.ctx.swapchain.swapchain_images.len); + self.colour_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.ctx.swapchain.swapchain_images.len); +>>>>>>> Stashed changes // Get supported format for colour attachment const colour_format = chooseSupportedFormat( @@ -802,9 +843,16 @@ pub const VulkanRenderer = struct { // Create colour buffers for (0..self.colour_buffer_image.len) |i| { +<<<<<<< Updated upstream self.colour_buffer_image[i] = try self.createImage( self.extent.width, self.extent.height, +======= + self.colour_buffer_image[i] = try Image.createImage( + self.ctx, + self.ctx.swapchain.extent.width, + self.ctx.swapchain.extent.height, +>>>>>>> Stashed changes colour_format, .optimal, .{ .color_attachment_bit = true, .input_attachment_bit = true }, @@ -821,9 +869,15 @@ pub const VulkanRenderer = struct { } fn createDepthBufferImage(self: *Self) !void { +<<<<<<< Updated upstream self.depth_buffer_image = try self.allocator.alloc(vk.Image, self.swapchain_images.len); self.depth_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len); self.depth_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.swapchain_images.len); +======= + self.depth_buffer_image = try self.allocator.alloc(vk.Image, self.ctx.swapchain.swapchain_images.len); + self.depth_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.ctx.swapchain.swapchain_images.len); + self.depth_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.ctx.swapchain.swapchain_images.len); +>>>>>>> Stashed changes // Get supported depth buffer format const formats = [_]vk.Format{ .d32_sfloat_s8_uint, .d32_sfloat, .d24_unorm_s8_uint }; @@ -837,9 +891,16 @@ pub const VulkanRenderer = struct { for (0..self.depth_buffer_image.len) |i| { // Create depth buffer image +<<<<<<< Updated upstream self.depth_buffer_image[i] = try self.createImage( self.extent.width, self.extent.height, +======= + self.depth_buffer_image[i] = try Image.createImage( + self.ctx, + self.ctx.swapchain.extent.width, + self.ctx.swapchain.extent.height, +>>>>>>> Stashed changes self.depth_format, .optimal, .{ .depth_stencil_attachment_bit = true, .input_attachment_bit = true }, @@ -939,15 +1000,24 @@ pub const VulkanRenderer = struct { self.viewport = .{ .x = 0.0, .y = 0.0, +<<<<<<< Updated upstream .width = @floatFromInt(self.extent.width), .height = @floatFromInt(self.extent.height), +======= + .width = @floatFromInt(self.ctx.swapchain.extent.width), + .height = @floatFromInt(self.ctx.swapchain.extent.height), +>>>>>>> Stashed changes .min_depth = 0.0, .max_depth = 1.0, }; self.scissor = .{ .offset = .{ .x = 0, .y = 0 }, +<<<<<<< Updated upstream .extent = self.extent, +======= + .extent = self.ctx.swapchain.extent, +>>>>>>> Stashed changes }; const viewport_state_create_info: vk.PipelineViewportStateCreateInfo = .{ @@ -1121,10 +1191,17 @@ pub const VulkanRenderer = struct { } fn createFramebuffers(self: *Self) !void { +<<<<<<< Updated upstream self.swapchain_framebuffers = try self.allocator.alloc(vk.Framebuffer, self.swapchain_images.len); // Create a frammebuffer for each swapchain image for (self.swapchain_images, 0..) |swapchain_image, i| { +======= + self.ctx.swapchain.swapchain_framebuffers = try self.allocator.alloc(vk.Framebuffer, self.ctx.swapchain.swapchain_images.len); + + // Create a frammebuffer for each swapchain image + for (self.ctx.swapchain.swapchain_images, 0..) |swapchain_image, i| { +>>>>>>> Stashed changes // Order matters const attachments = [_]vk.ImageView{ swapchain_image.image_view, @@ -1136,6 +1213,7 @@ pub const VulkanRenderer = struct { .render_pass = self.render_pass, // Render pass layout the frambuffer will be used with .attachment_count = @intCast(attachments.len), .p_attachments = &attachments, // List of attachments (1:1 with render pass) +<<<<<<< Updated upstream .width = self.extent.width, // Framebuffer width .height = self.extent.height, // Framebuffer height .layers = 1, // Framebuffer layers @@ -1162,6 +1240,20 @@ pub const VulkanRenderer = struct { fn createCommandBuffers(self: *Self) !void { // Allocate one command buffer for each framebuffer const command_buffer_handles = try self.allocator.alloc(vk.CommandBuffer, self.swapchain_framebuffers.len); +======= + .width = self.ctx.swapchain.extent.width, // Framebuffer width + .height = self.ctx.swapchain.extent.height, // Framebuffer height + .layers = 1, // Framebuffer layers + }; + + self.ctx.swapchain.swapchain_framebuffers[i] = try self.ctx.device.createFramebuffer(&framebuffer_create_info, null); + } + } + + fn createCommandBuffers(self: *Self) !void { + // Allocate one command buffer for each framebuffer + const command_buffer_handles = try self.allocator.alloc(vk.CommandBuffer, self.ctx.swapchain.swapchain_framebuffers.len); +>>>>>>> Stashed changes defer self.allocator.free(command_buffer_handles); self.command_buffers = try self.allocator.alloc(CommandBuffer, command_buffer_handles.len); @@ -1218,8 +1310,13 @@ pub const VulkanRenderer = struct { const vp_buffer_size: vk.DeviceSize = @sizeOf(UboViewProjection); // One uniform buffer for each image (and by extension, command buffer) +<<<<<<< Updated upstream self.vp_uniform_buffer = try self.allocator.alloc(vk.Buffer, self.swapchain_images.len); self.vp_uniform_buffer_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len); +======= + self.vp_uniform_buffer = try self.allocator.alloc(vk.Buffer, self.ctx.swapchain.swapchain_images.len); + self.vp_uniform_buffer_memory = try self.allocator.alloc(vk.DeviceMemory, self.ctx.swapchain.swapchain_images.len); +>>>>>>> Stashed changes // Create the uniform buffers for (0..self.vp_uniform_buffer.len) |i| { @@ -1251,7 +1348,11 @@ pub const VulkanRenderer = struct { // Data to create descriptor pool const pool_create_info: vk.DescriptorPoolCreateInfo = .{ +<<<<<<< Updated upstream .max_sets = @intCast(self.swapchain_images.len), // Maximum number of descriptor sets that can be created from pool +======= + .max_sets = @intCast(self.ctx.swapchain.swapchain_images.len), // Maximum number of descriptor sets that can be created from pool +>>>>>>> Stashed changes .pool_size_count = @intCast(descriptor_pool_sizes.len), // Amount of pool sizes being passed .p_pool_sizes = &descriptor_pool_sizes, // Pool sizes to create pool with }; @@ -1293,7 +1394,11 @@ pub const VulkanRenderer = struct { // Create input attachment pool const input_pool_create_info: vk.DescriptorPoolCreateInfo = .{ +<<<<<<< Updated upstream .max_sets = @intCast(self.swapchain_images.len), +======= + .max_sets = @intCast(self.ctx.swapchain.swapchain_images.len), +>>>>>>> Stashed changes .pool_size_count = @intCast(input_pool_sizes.len), .p_pool_sizes = &input_pool_sizes, }; @@ -1303,9 +1408,15 @@ pub const VulkanRenderer = struct { fn createDescriptorSets(self: *Self) !void { // One descriptor set for every buffer +<<<<<<< Updated upstream self.descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.swapchain_images.len); var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.swapchain_images.len); +======= + self.descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.ctx.swapchain.swapchain_images.len); + + var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.ctx.swapchain.swapchain_images.len); +>>>>>>> Stashed changes defer self.allocator.free(set_layouts); for (0..set_layouts.len) |i| { set_layouts[i] = self.descriptor_set_layout; @@ -1314,7 +1425,11 @@ pub const VulkanRenderer = struct { // Descriptor set allocation info const set_alloc_info: vk.DescriptorSetAllocateInfo = .{ .descriptor_pool = self.descriptor_pool, // Pool to allocate descriptor set from +<<<<<<< Updated upstream .descriptor_set_count = @intCast(self.swapchain_images.len), // Number of sets to allocate +======= + .descriptor_set_count = @intCast(self.ctx.swapchain.swapchain_images.len), // Number of sets to allocate +>>>>>>> Stashed changes .p_set_layouts = set_layouts.ptr, // Layouts to use to allocate sets (1:1 relationship) }; @@ -1322,7 +1437,11 @@ pub const VulkanRenderer = struct { try self.device.allocateDescriptorSets(&set_alloc_info, self.descriptor_sets.ptr); // Update all of descriptor set buffer bindings +<<<<<<< Updated upstream for (0..self.swapchain_images.len) |i| { +======= + for (0..self.ctx.swapchain.swapchain_images.len) |i| { +>>>>>>> Stashed changes // -- View projection descriptor // Buffer info and data offset info const vp_buffer_info: vk.DescriptorBufferInfo = .{ @@ -1352,10 +1471,17 @@ pub const VulkanRenderer = struct { } fn createInputDescriptorSets(self: *Self) !void { +<<<<<<< Updated upstream self.input_descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.swapchain_images.len); // Fill array of layouts ready for set creation var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.swapchain_images.len); +======= + self.input_descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.ctx.swapchain.swapchain_images.len); + + // Fill array of layouts ready for set creation + var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.ctx.swapchain.swapchain_images.len); +>>>>>>> Stashed changes defer self.allocator.free(set_layouts); for (0..set_layouts.len) |i| { set_layouts[i] = self.input_set_layout; @@ -1364,7 +1490,11 @@ pub const VulkanRenderer = struct { // Input attachment descriptor set allocation info const set_alloc_info: vk.DescriptorSetAllocateInfo = .{ .descriptor_pool = self.input_descriptor_pool, +<<<<<<< Updated upstream .descriptor_set_count = @intCast(self.swapchain_images.len), +======= + .descriptor_set_count = @intCast(self.ctx.swapchain.swapchain_images.len), +>>>>>>> Stashed changes .p_set_layouts = set_layouts.ptr, }; @@ -1372,7 +1502,11 @@ pub const VulkanRenderer = struct { try self.device.allocateDescriptorSets(&set_alloc_info, self.input_descriptor_sets.ptr); // Update each descriptor set with input attachment +<<<<<<< Updated upstream for (0..self.swapchain_images.len) |i| { +======= + for (0..self.ctx.swapchain.swapchain_images.len) |i| { +>>>>>>> Stashed changes // Colour attachment descriptor const colour_attachment_descriptor: vk.DescriptorImageInfo = .{ .image_layout = .shader_read_only_optimal, @@ -1451,11 +1585,19 @@ pub const VulkanRenderer = struct { .render_pass = self.render_pass, // Render pass to begin .render_area = .{ .offset = .{ .x = 0, .y = 0 }, // Start point of render pass in pixels +<<<<<<< Updated upstream .extent = self.extent, // Size of region to run render pass on (starting at offset) }, .p_clear_values = &clear_values, // List of clear values .clear_value_count = @intCast(clear_values.len), .framebuffer = self.swapchain_framebuffers[current_image], +======= + .extent = self.ctx.swapchain.extent, // Size of region to run render pass on (starting at offset) + }, + .p_clear_values = &clear_values, // List of clear values + .clear_value_count = @intCast(clear_values.len), + .framebuffer = self.ctx.swapchain.swapchain_framebuffers[current_image], +>>>>>>> Stashed changes }; const command_buffer = self.command_buffers[current_image];