vulkan-zig/src/vulkan_renderer.zig

1512 lines
63 KiB
Zig

const std = @import("std");
const sdl = @import("sdl2");
const vk = @import("vulkan");
const builtin = @import("builtin");
const shaders = @import("shaders");
const zm = @import("zmath");
const Utilities = @import("utilities.zig");
const QueueFamilyIndices = Utilities.QueueFamilyIndices;
const SwapchainDetails = Utilities.SwapchainDetails;
const SwapchainImage = Utilities.SwapchainImage;
const Vertex = Utilities.Vertex;
const Mesh = @import("Mesh.zig");
const enable_validation_layers = builtin.mode == .Debug;
const validation_layers = [_][*:0]const u8{"VK_LAYER_KHRONOS_validation"};
const MAX_FRAME_DRAWS: u32 = 2;
const MAX_OBJECTS: u32 = 2;
const apis: []const vk.ApiInfo = &.{
vk.features.version_1_0,
vk.features.version_1_1,
vk.features.version_1_2,
vk.features.version_1_3,
vk.extensions.khr_surface,
vk.extensions.khr_swapchain,
vk.extensions.ext_debug_utils,
};
const BaseDispatch = vk.BaseWrapper(apis);
const InstanceDispatch = vk.InstanceWrapper(apis);
const DeviceDispatch = vk.DeviceWrapper(apis);
pub const Instance = vk.InstanceProxy(apis);
pub const Device = vk.DeviceProxy(apis);
pub const Queue = vk.QueueProxy(apis);
pub const CommandBuffer = vk.CommandBufferProxy(apis);
const UboViewProjection = struct {
projection: zm.Mat align(16),
view: zm.Mat align(16),
};
pub const Model = struct {
model: zm.Mat align(16),
};
pub const VulkanRenderer = struct {
const Self = @This();
allocator: std.mem.Allocator,
vkb: BaseDispatch,
window: sdl.Window,
current_frame: u32 = 0,
// Scene objects
meshes: [2]Mesh,
// Scene settings
ubo_view_projection: UboViewProjection,
// Main
instance: Instance,
physical_device: vk.PhysicalDevice,
device: Device,
graphics_queue: Queue,
presentation_queue: Queue,
surface: vk.SurfaceKHR,
swapchain: vk.SwapchainKHR,
viewport: vk.Viewport,
scissor: vk.Rect2D,
swapchain_images: []SwapchainImage,
swapchain_framebuffers: []vk.Framebuffer,
command_buffers: []CommandBuffer,
depth_buffer_image: vk.Image,
depth_buffer_image_memory: vk.DeviceMemory,
depth_buffer_image_view: vk.ImageView,
// Descriptors
descriptor_set_layout: vk.DescriptorSetLayout,
push_constant_range: vk.PushConstantRange,
descriptor_pool: vk.DescriptorPool,
descriptor_sets: []vk.DescriptorSet,
vp_uniform_buffer: []vk.Buffer,
vp_uniform_buffer_memory: []vk.DeviceMemory,
// Pipeline
graphics_pipeline: vk.Pipeline,
pipeline_layout: vk.PipelineLayout,
render_pass: vk.RenderPass,
// Pools
graphics_command_pool: vk.CommandPool,
// Utilities
swapchain_image_format: vk.Format,
depth_format: vk.Format,
extent: vk.Extent2D,
// Synchronisation
image_available: [MAX_FRAME_DRAWS]vk.Semaphore,
render_finished: [MAX_FRAME_DRAWS]vk.Semaphore,
draw_fences: [MAX_FRAME_DRAWS]vk.Fence,
debug_utils: ?vk.DebugUtilsMessengerEXT,
pub fn init(window: sdl.Window, allocator: std.mem.Allocator) !Self {
var self: Self = undefined;
self.window = window;
self.current_frame = 0;
self.allocator = allocator;
self.vkb = try BaseDispatch.load(try sdl.vulkan.getVkGetInstanceProcAddr());
try self.createInstance();
try self.createSurface();
if (enable_validation_layers) {
self.debug_utils = try createDebugMessenger(self.instance);
}
try self.getPhysicalDevice();
try self.createLogicalDevice();
try self.createSwapchain();
try self.createDepthBufferImage();
try self.createRenderPass();
try self.createDescriptorSetLayout();
try self.createPushConstantRange();
try self.createGraphicsPipeline();
try self.createFramebuffers();
try self.createCommandPool();
const aspect: f32 = @as(f32, @floatFromInt(self.extent.width)) / @as(f32, @floatFromInt(self.extent.height));
self.ubo_view_projection.projection = zm.perspectiveFovRh(
std.math.degreesToRadians(45.0),
aspect,
0.1,
100.0,
);
self.ubo_view_projection.view = zm.lookAtRh(
zm.Vec{ 0.0, 0.0, 2.0, 0.0 },
zm.Vec{ 0.0, 0.0, 0.0, 0.0 },
zm.Vec{ 0.0, 1.0, 0.0, 0.0 },
);
// Invert y scale
self.ubo_view_projection.projection[1][1] *= -1;
// Create meshes
// Vertex Data
var mesh_vertices = [_]Vertex{
.{ .pos = .{ -0.4, 0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 } }, // 0
.{ .pos = .{ -0.4, -0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 } }, // 1
.{ .pos = .{ 0.4, -0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 } }, // 2
.{ .pos = .{ 0.4, 0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 } }, // 3
};
var mesh_vertices2 = [_]Vertex{
.{ .pos = .{ -0.25, 0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 } }, // 0
.{ .pos = .{ -0.25, -0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 } }, // 1
.{ .pos = .{ 0.25, -0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 } }, // 2
.{ .pos = .{ 0.25, 0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 } }, // 3
};
// Index Data
const mesh_indices = [_]u32{
0, 1, 2,
2, 3, 0,
};
const first_mesh = try Mesh.new(
self.instance,
self.physical_device,
self.device,
self.graphics_queue.handle,
self.graphics_command_pool,
&mesh_vertices,
&mesh_indices,
self.allocator,
);
const second_mesh = try Mesh.new(
self.instance,
self.physical_device,
self.device,
self.graphics_queue.handle,
self.graphics_command_pool,
&mesh_vertices2,
&mesh_indices,
self.allocator,
);
self.meshes = [_]Mesh{ first_mesh, second_mesh };
try self.createCommandBuffers();
try self.createUniformBuffers();
try self.createDescriptorPool();
try self.createDescriptorSets();
try self.createSynchronisation();
return self;
}
pub fn updateModel(self: *Self, model_id: u32, new_model: zm.Mat) !void {
if (model_id < self.meshes.len) {
self.meshes[model_id].ubo_model.model = new_model;
}
}
pub fn draw(self: *Self) !void {
// Wait for given fence to signal (open) from last draw before continuing
_ = try self.device.waitForFences(
1,
@ptrCast(&self.draw_fences[self.current_frame]),
vk.TRUE,
std.math.maxInt(u64),
);
// Manually reset (close) fences
try self.device.resetFences(1, @ptrCast(&self.draw_fences[self.current_frame]));
// -- Get next image
// Get index of next image to be drawn to, and signal semaphore when ready to be drawn to
const image_index_result = try self.device.acquireNextImageKHR(
self.swapchain,
std.math.maxInt(u64),
self.image_available[self.current_frame],
.null_handle,
);
try self.recordCommands(image_index_result.image_index);
try self.updateUniformBuffers(image_index_result.image_index);
// -- Submit command buffer to render
// Queue submission information
const wait_stages = [_]vk.PipelineStageFlags{.{ .color_attachment_output_bit = true }};
const submit_info: vk.SubmitInfo = .{
.wait_semaphore_count = 1, // Number of semaphores to wait on
.p_wait_semaphores = @ptrCast(&self.image_available[self.current_frame]), // List of semaphores to wait on
.p_wait_dst_stage_mask = &wait_stages, // Stages to check semaphores at
.command_buffer_count = 1, // Number of command buffers to submit
.p_command_buffers = @ptrCast(&self.command_buffers[image_index_result.image_index]), // Command buffer to submit
.signal_semaphore_count = 1, // Number of semaphores to signal
.p_signal_semaphores = @ptrCast(&self.render_finished[self.current_frame]), // List of semaphores to signal when command buffer finishes
};
// Submit command buffer to queue
try self.device.queueSubmit(self.graphics_queue.handle, 1, @ptrCast(&submit_info), self.draw_fences[self.current_frame]);
// -- Present rendered image to screen
const present_info: vk.PresentInfoKHR = .{
.wait_semaphore_count = 1, // Number of semaphores to wait on
.p_wait_semaphores = @ptrCast(&self.render_finished[self.current_frame]), // Semaphores to wait on
.swapchain_count = 1, // Number of swapchains to present to
.p_swapchains = @ptrCast(&self.swapchain), // Swapchains to present images to
.p_image_indices = @ptrCast(&image_index_result.image_index), // Index of images in swapchains to present
};
// Present image
_ = try self.device.queuePresentKHR(self.presentation_queue.handle, &present_info);
// Get next frame (use % to keep the current frame below MAX_FRAME_DRAWS)
self.current_frame = (self.current_frame + 1) % MAX_FRAME_DRAWS;
}
pub fn deinit(self: *Self) void {
self.device.deviceWaitIdle() catch undefined;
if (enable_validation_layers) {
self.instance.destroyDebugUtilsMessengerEXT(self.debug_utils.?, null);
}
self.device.destroyImageView(self.depth_buffer_image_view, null);
self.device.destroyImage(self.depth_buffer_image, null);
self.device.freeMemory(self.depth_buffer_image_memory, null);
self.device.destroyDescriptorPool(self.descriptor_pool, null);
self.device.destroyDescriptorSetLayout(self.descriptor_set_layout, null);
for (0..self.swapchain_images.len) |i| {
self.device.destroyBuffer(self.vp_uniform_buffer[i], null);
self.device.freeMemory(self.vp_uniform_buffer_memory[i], null);
}
self.allocator.free(self.vp_uniform_buffer);
self.allocator.free(self.vp_uniform_buffer_memory);
self.allocator.free(self.descriptor_sets);
for (self.meshes) |mesh| {
mesh.destroyBuffers();
}
for (0..MAX_FRAME_DRAWS) |i| {
self.device.destroySemaphore(self.render_finished[i], null);
self.device.destroySemaphore(self.image_available[i], null);
self.device.destroyFence(self.draw_fences[i], null);
}
self.allocator.free(self.command_buffers);
self.device.destroyCommandPool(self.graphics_command_pool, null);
for (self.swapchain_framebuffers) |framebuffer| {
self.device.destroyFramebuffer(framebuffer, null);
}
self.allocator.free(self.swapchain_framebuffers);
self.device.destroyPipeline(self.graphics_pipeline, null);
self.device.destroyPipelineLayout(self.pipeline_layout, null);
self.device.destroyRenderPass(self.render_pass, null);
for (self.swapchain_images) |swapchain_image| {
self.device.destroyImageView(swapchain_image.image_view, null);
}
self.allocator.free(self.swapchain_images);
self.device.destroySwapchainKHR(self.swapchain, null);
self.device.destroyDevice(null);
self.instance.destroySurfaceKHR(self.surface, null);
self.instance.destroyInstance(null);
self.allocator.destroy(self.device.wrapper);
self.allocator.destroy(self.instance.wrapper);
}
fn createInstance(self: *Self) !void {
if (enable_validation_layers and !self.checkValidationLayersSupport()) {
// TODO Better error
return error.LayerNotPresent;
}
const extensions = try self.getRequiredExtensions();
defer self.allocator.free(extensions);
std.debug.print("[Required instance extensions]\n", .{});
for (extensions) |ext| {
std.debug.print("\t- {s}\n", .{ext});
}
if (!try self.checkInstanceExtensions(&extensions)) {
return error.ExtensionNotPresent;
}
const app_info = vk.ApplicationInfo{
.p_application_name = "Vulkan SDL Test",
.application_version = vk.makeApiVersion(0, 0, 1, 0),
.p_engine_name = "Vulkan SDL Test",
.engine_version = vk.makeApiVersion(0, 0, 1, 0),
.api_version = vk.API_VERSION_1_3,
};
var instance_create_info: vk.InstanceCreateInfo = .{
.p_application_info = &app_info,
.enabled_extension_count = @intCast(extensions.len),
.pp_enabled_extension_names = @ptrCast(extensions),
};
if (enable_validation_layers) {
const debug_create_info = getDebugUtilsCreateInfo();
instance_create_info.enabled_layer_count = @intCast(validation_layers.len);
instance_create_info.pp_enabled_layer_names = &validation_layers;
instance_create_info.p_next = &debug_create_info;
}
const instance_handle = try self.vkb.createInstance(&instance_create_info, null);
const vki = try self.allocator.create(InstanceDispatch);
errdefer self.allocator.destroy(vki);
vki.* = try InstanceDispatch.load(instance_handle, self.vkb.dispatch.vkGetInstanceProcAddr);
self.instance = Instance.init(instance_handle, vki);
}
fn createSurface(self: *Self) !void {
self.surface = try sdl.vulkan.createSurface(self.window, self.instance.handle);
}
fn createLogicalDevice(self: *Self) !void {
const indices = try self.getQueueFamilies(self.physical_device);
// 1 is the highest priority
const priority = [_]f32{1};
const qci = [_]vk.DeviceQueueCreateInfo{
.{
.queue_family_index = indices.graphics_family.?,
.queue_count = 1,
.p_queue_priorities = &priority,
},
.{
.queue_family_index = indices.presentation_family.?,
.queue_count = 1,
.p_queue_priorities = &priority,
},
};
const queue_count: u32 = if (indices.graphics_family.? == indices.presentation_family.?)
1
else
2;
const device_create_info: vk.DeviceCreateInfo = .{
.queue_create_info_count = queue_count,
.p_queue_create_infos = &qci,
.pp_enabled_extension_names = &Utilities.device_extensions,
.enabled_extension_count = @intCast(Utilities.device_extensions.len),
};
const device_handle = try self.instance.createDevice(self.physical_device, &device_create_info, null);
const vkd = try self.allocator.create(DeviceDispatch);
errdefer self.allocator.destroy(vkd);
vkd.* = try DeviceDispatch.load(device_handle, self.instance.wrapper.dispatch.vkGetDeviceProcAddr);
self.device = Device.init(device_handle, vkd);
const queues = try self.getDeviceQueues();
self.graphics_queue = Queue.init(queues[0], self.device.wrapper);
self.presentation_queue = Queue.init(queues[1], self.device.wrapper);
}
fn createSwapchain(self: *Self) !void {
const swapchain_details = try self.getSwapchainDetails(self.physical_device);
defer self.allocator.free(swapchain_details.formats);
defer self.allocator.free(swapchain_details.presentation_modes);
// 1. Choose best surface format
const surface_format = chooseBestSurfaceFormat(swapchain_details.formats);
// 2. Choose best presentation mode
const present_mode = chooseBestPresentationMode(swapchain_details.presentation_modes);
// 3. Choose swapchain image resolution
const extent = chooseSwapExtent(&self.window, swapchain_details.surface_capabilities);
// How many images are in the swapchain? Get 1 more than the minimum to allow triple buffering
var image_count: u32 = swapchain_details.surface_capabilities.min_image_count + 1;
const max_image_count = swapchain_details.surface_capabilities.max_image_count;
// Clamp down if higher
// If 0, it means it's limitless
if (max_image_count != 0 and image_count > max_image_count) {
image_count = max_image_count;
}
var swapchain_create_info: vk.SwapchainCreateInfoKHR = .{
.image_format = surface_format.format,
.image_color_space = surface_format.color_space,
.present_mode = present_mode,
.image_extent = extent,
.min_image_count = image_count,
.image_array_layers = 1, // Number of layers for each image
.image_usage = .{ .color_attachment_bit = true }, // What attachment will images be used as
.pre_transform = swapchain_details.surface_capabilities.current_transform, // Transform to perform on swapchain images
.composite_alpha = .{ .opaque_bit_khr = true }, // How to handle blending images with external graphics (e.g.: other windows)
.clipped = vk.TRUE, // Whether to clip parts of images not in view (e.g.: behind another window, off-screen, etc...)
.old_swapchain = .null_handle, // Links old one to quickly share responsibilities in case it's been destroyed and replaced
.surface = self.surface,
.image_sharing_mode = .exclusive,
};
// Get queue family indices
const family_indices = try self.getQueueFamilies(self.physical_device);
// If graphic and presentation families are different, then swapchain must let images be shared between families
if (family_indices.graphics_family.? != family_indices.presentation_family.?) {
const qfi = [_]u32{
family_indices.graphics_family.?,
family_indices.presentation_family.?,
};
swapchain_create_info.image_sharing_mode = .concurrent;
swapchain_create_info.queue_family_index_count = @intCast(qfi.len); // Number of queues to share images between
swapchain_create_info.p_queue_family_indices = &qfi;
}
self.swapchain = try self.device.createSwapchainKHR(&swapchain_create_info, null);
self.swapchain_image_format = surface_format.format;
self.extent = extent;
// Swapchain images
var swapchain_image_count: u32 = 0;
_ = try self.device.getSwapchainImagesKHR(self.swapchain, &swapchain_image_count, null);
const images = try self.allocator.alloc(vk.Image, swapchain_image_count);
defer self.allocator.free(images);
_ = try self.device.getSwapchainImagesKHR(self.swapchain, &swapchain_image_count, images.ptr);
self.swapchain_images = try self.allocator.alloc(SwapchainImage, swapchain_image_count);
for (images, 0..) |image, i| {
self.swapchain_images[i] = .{
.image = image,
.image_view = try self.createImageView(image, self.swapchain_image_format, .{ .color_bit = true }),
};
}
}
fn createRenderPass(self: *Self) !void {
// -- Attachments --
// Colour attachment of the render pass
const colour_attachment: vk.AttachmentDescription = .{
.format = self.swapchain_image_format, // Format to use for attachment
.samples = .{ .@"1_bit" = true }, // Number of samples to write for multisampling
.load_op = .clear, // Describes what to do with attachment before rendering
.store_op = .store, // Describes what to do with attachment after rendering
.stencil_load_op = .dont_care, // Describes what to do with stencil before rendering
.stencil_store_op = .dont_care, // Describes what to do with stencil after rendering
// Framebuffer data will be stored as an image, but images can be given different data layouts
// to give optimal use for certain operations
.initial_layout = vk.ImageLayout.undefined, // Image data layout before render pass starts
.final_layout = vk.ImageLayout.present_src_khr, // Image data layout after render pass (to change to)
};
// Depth attachment of render pass
const depth_attachment: vk.AttachmentDescription = .{
.format = self.depth_format,
.samples = .{ .@"1_bit" = true },
.load_op = .clear,
.store_op = .dont_care,
.stencil_load_op = .dont_care,
.stencil_store_op = .dont_care,
.initial_layout = .undefined,
.final_layout = .depth_stencil_attachment_optimal,
};
// -- References --
// Attachment reference uses an attachment index that refers to index in the attachment list passed to render pass create info
const colour_attachment_reference: vk.AttachmentReference = .{
.attachment = 0,
.layout = vk.ImageLayout.color_attachment_optimal,
};
const depth_attachment_reference: vk.AttachmentReference = .{
.attachment = 1,
.layout = vk.ImageLayout.depth_stencil_attachment_optimal,
};
// Information about a particular subpass the render pass is using
const subpass: vk.SubpassDescription = .{
.pipeline_bind_point = .graphics, // Pipeline type subpass is to be bound to
.color_attachment_count = 1,
.p_color_attachments = @ptrCast(&colour_attachment_reference),
.p_depth_stencil_attachment = &depth_attachment_reference,
};
// Need to determine when layout transitions occur using subpass dependencies
const subpass_dependencies = [2]vk.SubpassDependency{
// Conversion from VK_IMAGE_LAYOUT_UNDEFINED to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
vk.SubpassDependency{
// Transition must happen after...
.src_subpass = vk.SUBPASS_EXTERNAL, // Subpass index (VK_SUBPASS_EXTERNAL = outside of renderpass)
.src_stage_mask = .{ .bottom_of_pipe_bit = true }, // Pipeline stage
.src_access_mask = .{ .memory_read_bit = true }, // Stage access mask (memory access)
// But must happen before...
.dst_subpass = 0,
.dst_stage_mask = .{ .color_attachment_output_bit = true },
.dst_access_mask = .{ .memory_read_bit = true, .memory_write_bit = true },
},
// Conversion from VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
vk.SubpassDependency{
// Transition must happen after...
.src_subpass = 0,
.src_stage_mask = .{ .color_attachment_output_bit = true },
.src_access_mask = .{ .memory_read_bit = true, .memory_write_bit = true },
// But must happen before...
.dst_subpass = vk.SUBPASS_EXTERNAL,
.dst_stage_mask = .{ .bottom_of_pipe_bit = true },
.dst_access_mask = .{ .memory_read_bit = true },
},
};
// Order matters
const render_pass_attachments = [_]vk.AttachmentDescription{ colour_attachment, depth_attachment };
const render_pass_create_info: vk.RenderPassCreateInfo = .{
.attachment_count = @intCast(render_pass_attachments.len),
.p_attachments = &render_pass_attachments,
.subpass_count = 1,
.p_subpasses = @ptrCast(&subpass),
.dependency_count = @intCast(subpass_dependencies.len),
.p_dependencies = &subpass_dependencies,
};
self.render_pass = try self.device.createRenderPass(&render_pass_create_info, null);
}
fn createDescriptorSetLayout(self: *Self) !void {
// UboViewProjection binding info
const vp_layout_binding: vk.DescriptorSetLayoutBinding = .{
.binding = 0, // Binding point in shader (designated by binding number in shader)
.descriptor_type = .uniform_buffer, // Type of descriptor (unifor, dynamic uniform, image sampler, etc)
.descriptor_count = 1, // Number of descriptors for binding
.stage_flags = .{ .vertex_bit = true }, // Shader stage to bind to
.p_immutable_samplers = null, // For texture: can make smapler data immutable by specifying in layout
};
const layout_bindings = [_]vk.DescriptorSetLayoutBinding{vp_layout_binding};
// Create descriptor set layout with given bindings
const layout_create_info: vk.DescriptorSetLayoutCreateInfo = .{
.binding_count = @intCast(layout_bindings.len), // Number of binding infos
.p_bindings = &layout_bindings, // Array of binding infos
};
// Create descriptor set layout
self.descriptor_set_layout = try self.device.createDescriptorSetLayout(&layout_create_info, null);
}
fn createPushConstantRange(self: *Self) !void {
// Define push constant values (no 'create' needed)
self.push_constant_range = .{
.stage_flags = .{ .vertex_bit = true }, // Shader stage push constant will go to
.offset = 0, // Offset into given data to pass to push constant
.size = @sizeOf(Model), // Size of data being passed
};
}
fn createDepthBufferImage(self: *Self) !void {
// Get supported depth buffer format
const formats = [_]vk.Format{ .d32_sfloat_s8_uint, .d32_sfloat, .d24_unorm_s8_uint };
self.depth_format = chooseSupportedFormat(
self.physical_device,
self.instance,
&formats,
.optimal,
.{ .depth_stencil_attachment_bit = true },
) orelse return error.UnsupportedDepthBufferFormat;
// Create depth buffer image
self.depth_buffer_image = try self.createImage(
self.extent.width,
self.extent.height,
self.depth_format,
.optimal,
.{ .depth_stencil_attachment_bit = true },
.{ .device_local_bit = true },
&self.depth_buffer_image_memory,
);
// Create depth buffer image view
self.depth_buffer_image_view = try self.createImageView(self.depth_buffer_image, self.depth_format, .{ .depth_bit = true });
}
fn createGraphicsPipeline(self: *Self) !void {
// Create shader modules
const vert = try self.device.createShaderModule(&.{
.code_size = shaders.shader_vert.len,
.p_code = @ptrCast(&shaders.shader_vert),
}, null);
defer self.device.destroyShaderModule(vert, null);
const frag = try self.device.createShaderModule(&.{
.code_size = shaders.shader_frag.len,
.p_code = @ptrCast(&shaders.shader_frag),
}, null);
defer self.device.destroyShaderModule(frag, null);
// -- Shader stage creation information --
// Vertex stage creation information
const vertex_shader_create_info: vk.PipelineShaderStageCreateInfo = .{
.stage = .{ .vertex_bit = true },
.module = vert,
.p_name = "main",
};
// Fragment stage creation information
const fragment_shader_create_info: vk.PipelineShaderStageCreateInfo = .{
.stage = .{ .fragment_bit = true },
.module = frag,
.p_name = "main",
};
const shader_create_infos = [_]vk.PipelineShaderStageCreateInfo{
vertex_shader_create_info,
fragment_shader_create_info,
};
// How the data for a single vertex (including info such as position, colour, texture coords, normals, etc...) is as a whole
const binding_description: vk.VertexInputBindingDescription = .{
.binding = 0, // Can bind multiple streams of data, this defines which one
.stride = @sizeOf(Vertex), // Size of simple vertex object
.input_rate = .vertex, // How to move between data after each vertex
// vertex: move to the next vertex
// instance: move to a vertex for the next instance
};
// How the data for an attribute is defined within the vertex
const attribute_descriptions = [_]vk.VertexInputAttributeDescription{
// Position attribute
.{
.binding = 0, // Which binding the data is at (should be same as above)
.location = 0, // Location in shader where data will be read from
.format = vk.Format.r32g32b32_sfloat, // Format the data will take (also helps define size of data)
.offset = @offsetOf(Vertex, "pos"), // Where this attribute is defined in data for a single vertex
},
// Colour attribute
.{
.binding = 0,
.location = 1,
.format = vk.Format.r32g32b32_sfloat,
.offset = @offsetOf(Vertex, "col"),
},
};
// -- Vertex input --
const vertex_input_create_info: vk.PipelineVertexInputStateCreateInfo = .{
.vertex_binding_description_count = 1,
.p_vertex_binding_descriptions = @ptrCast(&binding_description), // List of vertex binding descriptions (data spacing, stride info)
.vertex_attribute_description_count = @intCast(attribute_descriptions.len),
.p_vertex_attribute_descriptions = &attribute_descriptions, // List of vertex attribute descriptions (data format and where to bind to/from)
};
// -- Input assembly --
const assembly_create_info: vk.PipelineInputAssemblyStateCreateInfo = .{
.topology = .triangle_list, // Primitive type to assemble vertices as
.primitive_restart_enable = vk.FALSE, // Allow overrinding of strip topology to start new primitives
};
// -- Viewport & scissor --
self.viewport = .{
.x = 0.0,
.y = 0.0,
.width = @floatFromInt(self.extent.width),
.height = @floatFromInt(self.extent.height),
.min_depth = 0.0,
.max_depth = 1.0,
};
self.scissor = .{
.offset = .{ .x = 0, .y = 0 },
.extent = self.extent,
};
const viewport_state_create_info: vk.PipelineViewportStateCreateInfo = .{
.viewport_count = 1,
.p_viewports = @ptrCast(&self.viewport),
.scissor_count = 1,
.p_scissors = @ptrCast(&self.scissor),
};
// -- Dynamic states --
// Dynamic states to enable (TODO: To investigate later)
const dynamic_states = [_]vk.DynamicState{ .viewport, .scissor };
const dynamic_state_create_info: vk.PipelineDynamicStateCreateInfo = .{
.dynamic_state_count = @intCast(dynamic_states.len),
.p_dynamic_states = &dynamic_states,
};
// -- Rasterizer --
const rasterizer_create_info: vk.PipelineRasterizationStateCreateInfo = .{
.depth_clamp_enable = vk.FALSE, // Change if fragments beyond near/far planes are clipped (default) or clamped to plane
.rasterizer_discard_enable = vk.FALSE, // Whether to discard data and skip rasterizer (never creates fragments)
.polygon_mode = .fill, // How to handle filling points between vertices
.line_width = 1.0, // How thick the lines should be when drawn
.cull_mode = .{ .back_bit = true }, // Which face of a triangle to cull
.front_face = .counter_clockwise, // Winding to determine which side is front
.depth_bias_enable = vk.FALSE, // Whether to add depth bias to fragments (good for stopping "shadow acne" in shadow mapping)
.depth_bias_constant_factor = 0,
.depth_bias_clamp = 0,
.depth_bias_slope_factor = 0,
};
// -- Multisampling --
const multisampling_create_info: vk.PipelineMultisampleStateCreateInfo = .{
.sample_shading_enable = vk.FALSE, // Enable multisample shading or not
.rasterization_samples = .{ .@"1_bit" = true }, // Number of samples to use per fragment
.min_sample_shading = 1,
.alpha_to_coverage_enable = vk.FALSE,
.alpha_to_one_enable = vk.FALSE,
};
// -- Blending --
// Blend attachment state (how blending is handled)
const colour_state: vk.PipelineColorBlendAttachmentState = .{
.color_write_mask = .{ .r_bit = true, .g_bit = true, .b_bit = true, .a_bit = true }, // Colours to apply blending to
.blend_enable = vk.TRUE, // Enable blending
.src_color_blend_factor = vk.BlendFactor.src_alpha,
.dst_color_blend_factor = vk.BlendFactor.one_minus_src_alpha,
.color_blend_op = vk.BlendOp.add,
// Summary: (VK_BLEND_FACTOR_SRC_ALPHA * new colour) + (VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA * old colour)
.src_alpha_blend_factor = vk.BlendFactor.one,
.dst_alpha_blend_factor = vk.BlendFactor.zero,
.alpha_blend_op = vk.BlendOp.add,
// Summary (1 * new alpha) + (0 * old alpha) = new alpha
};
// Blending uses equation: (srcColorBlendFactor * new colour) colorBlendOp (dstColorBlendFactor * old colour)
const colour_blending_create_info: vk.PipelineColorBlendStateCreateInfo = .{
.logic_op_enable = vk.FALSE, // Alternative to calculations is to use logical operations
.logic_op = .copy,
.attachment_count = 1,
.p_attachments = @ptrCast(&colour_state),
.blend_constants = [_]f32{ 0, 0, 0, 0 },
};
// -- Pipeline layout --
const pipeline_layout_create_info: vk.PipelineLayoutCreateInfo = .{
.set_layout_count = 1,
.p_set_layouts = @ptrCast(&self.descriptor_set_layout),
.push_constant_range_count = 1,
.p_push_constant_ranges = @ptrCast(&self.push_constant_range),
};
self.pipeline_layout = try self.device.createPipelineLayout(&pipeline_layout_create_info, null);
// -- Depth stencil testing --
const depth_stencil_create_info: vk.PipelineDepthStencilStateCreateInfo = .{
.depth_test_enable = vk.TRUE, // Enable checking depth to determine fragment write
.depth_write_enable = vk.TRUE, // Enable writing to depth buffer to replace all values
.depth_compare_op = .less, // Comparison operation that allows an overwrite (is in front)
.depth_bounds_test_enable = vk.FALSE, // Depth bounds test: does the depth value exist between two bounds
.stencil_test_enable = vk.FALSE, // Enable stencil test
.front = undefined,
.back = undefined,
.min_depth_bounds = undefined,
.max_depth_bounds = undefined,
};
// -- Graphics pipeline creation --
const pipeline_create_info: vk.GraphicsPipelineCreateInfo = .{
.stage_count = @intCast(shader_create_infos.len), // Number of shader stages
.p_stages = &shader_create_infos, // List of shader stages
.p_vertex_input_state = &vertex_input_create_info,
.p_input_assembly_state = &assembly_create_info,
.p_viewport_state = &viewport_state_create_info,
.p_dynamic_state = &dynamic_state_create_info,
.p_rasterization_state = &rasterizer_create_info,
.p_multisample_state = &multisampling_create_info,
.p_color_blend_state = &colour_blending_create_info,
.p_depth_stencil_state = &depth_stencil_create_info,
.layout = self.pipeline_layout, // Pipeline layout the pipeline should use
.render_pass = self.render_pass, // Renderpass description the pipeline is compatible with
.subpass = 0, // Subpass of renderpass to use with pipeline
// Pipeline derivatives: can create multiple pipelines that derive from one another for optimisation
.base_pipeline_handle = .null_handle, // Existing pipeline to derive from...
.base_pipeline_index = -1, // Or index of pipeline being created to derive from (in case creating multiple at once)
};
_ = try self.device.createGraphicsPipelines(
.null_handle,
1,
@ptrCast(&pipeline_create_info),
null,
@ptrCast(&self.graphics_pipeline),
);
}
fn createFramebuffers(self: *Self) !void {
self.swapchain_framebuffers = try self.allocator.alloc(vk.Framebuffer, self.swapchain_images.len);
// Create a frammebuffer for each swapchain image
for (self.swapchain_images, 0..) |swapchain_image, i| {
// Order matters
const attachments = [_]vk.ImageView{ swapchain_image.image_view, self.depth_buffer_image_view };
const framebuffer_create_info: vk.FramebufferCreateInfo = .{
.render_pass = self.render_pass, // Render pass layout the frambuffer will be used with
.attachment_count = @intCast(attachments.len),
.p_attachments = &attachments, // List of attachments (1:1 with render pass)
.width = self.extent.width, // Framebuffer width
.height = self.extent.height, // Framebuffer height
.layers = 1, // Framebuffer layers
};
self.swapchain_framebuffers[i] = try self.device.createFramebuffer(&framebuffer_create_info, null);
}
}
fn createCommandPool(self: *Self) !void {
// Get indices of queue families from device
const queue_family_indices = try self.getQueueFamilies(self.physical_device);
const pool_create_info: vk.CommandPoolCreateInfo = .{
// Queue family type that buffers from this command pool will use
.queue_family_index = queue_family_indices.graphics_family.?,
.flags = .{ .reset_command_buffer_bit = true },
};
// Create a graphics queue family command pool
self.graphics_command_pool = try self.device.createCommandPool(&pool_create_info, null);
}
fn createCommandBuffers(self: *Self) !void {
// Allocate one command buffer for each framebuffer
const command_buffer_handles = try self.allocator.alloc(vk.CommandBuffer, self.swapchain_framebuffers.len);
defer self.allocator.free(command_buffer_handles);
self.command_buffers = try self.allocator.alloc(CommandBuffer, command_buffer_handles.len);
const command_buffer_allocate_info: vk.CommandBufferAllocateInfo = .{
.command_pool = self.graphics_command_pool,
.level = .primary, // primary: buffer you submit directly to queue. Can't be called by other buffers
.command_buffer_count = @intCast(command_buffer_handles.len),
};
// Allocate command buffers and place handles in array of buffers
try self.device.allocateCommandBuffers(&command_buffer_allocate_info, command_buffer_handles.ptr);
for (command_buffer_handles, 0..) |command_buffer_handle, i| {
self.command_buffers[i] = CommandBuffer.init(command_buffer_handle, self.device.wrapper);
}
}
fn createSynchronisation(self: *Self) !void {
// Fence create information
const fence_create_info: vk.FenceCreateInfo = .{ .flags = .{ .signaled_bit = true } };
// Semaphore creation information
for (0..MAX_FRAME_DRAWS) |i| {
self.image_available[i] = try self.device.createSemaphore(&.{}, null);
self.render_finished[i] = try self.device.createSemaphore(&.{}, null);
self.draw_fences[i] = try self.device.createFence(&fence_create_info, null);
}
}
fn createUniformBuffers(self: *Self) !void {
// View projection buffer size
const vp_buffer_size: vk.DeviceSize = @sizeOf(UboViewProjection);
// One uniform buffer for each image (and by extension, command buffer)
self.vp_uniform_buffer = try self.allocator.alloc(vk.Buffer, self.swapchain_images.len);
self.vp_uniform_buffer_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len);
// Create the uniform buffers
for (0..self.vp_uniform_buffer.len) |i| {
try Utilities.createBuffer(
self.physical_device,
self.instance,
self.device,
vp_buffer_size,
.{ .uniform_buffer_bit = true },
.{ .host_visible_bit = true, .host_coherent_bit = true },
&self.vp_uniform_buffer[i],
&self.vp_uniform_buffer_memory[i],
);
}
}
fn createDescriptorPool(self: *Self) !void {
// Type of descriptors + how many descriptors (!= descriptor sets) (combined makes the pool size)
// View projection pool
const vp_pool_size: vk.DescriptorPoolSize = .{
.type = .uniform_buffer,
.descriptor_count = @intCast(self.vp_uniform_buffer.len),
};
// List of pool sizes
const descriptor_pool_sizes = [_]vk.DescriptorPoolSize{vp_pool_size};
// Data to create descriptor pool
const pool_create_info: vk.DescriptorPoolCreateInfo = .{
.max_sets = @intCast(self.swapchain_images.len), // Maximum number of descriptor sets that can be created from pool
.pool_size_count = @intCast(descriptor_pool_sizes.len), // Amount of pool sizes being passed
.p_pool_sizes = &descriptor_pool_sizes, // Pool sizes to create pool with
};
// Create descriptor pool
self.descriptor_pool = try self.device.createDescriptorPool(&pool_create_info, null);
}
fn createDescriptorSets(self: *Self) !void {
// One descriptor set for every buffer
self.descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.swapchain_images.len);
var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.swapchain_images.len);
defer self.allocator.free(set_layouts);
for (0..set_layouts.len) |i| {
set_layouts[i] = self.descriptor_set_layout;
}
// Descriptor set allocation info
const set_alloc_info: vk.DescriptorSetAllocateInfo = .{
.descriptor_pool = self.descriptor_pool, // Pool to allocate descriptor set from
.descriptor_set_count = @intCast(self.swapchain_images.len), // Number of sets to allocate
.p_set_layouts = set_layouts.ptr, // Layouts to use to allocate sets (1:1 relationship)
};
// Allocate descriptor sets (multiple)
try self.device.allocateDescriptorSets(&set_alloc_info, self.descriptor_sets.ptr);
// Update all of descriptor set buffer bindings
for (0..self.swapchain_images.len) |i| {
// -- View projection descriptor
// Buffer info and data offset info
const vp_buffer_info: vk.DescriptorBufferInfo = .{
.buffer = self.vp_uniform_buffer[i], // Bufer to get data from
.offset = 0, // Position of start of data
.range = @sizeOf(UboViewProjection), // Size of data
};
// Data about connection between binding and buffer
const vp_set_write: vk.WriteDescriptorSet = .{
.dst_set = self.descriptor_sets[i], // Descriptor set to update
.dst_binding = 0, // Binding to update (matches with binding on layout/shader)
.dst_array_element = 0, // Index in array to update
.descriptor_type = .uniform_buffer, // Type of descriptor
.descriptor_count = 1, // Amount to update
.p_buffer_info = @ptrCast(&vp_buffer_info), // Information about buffer data to bind
.p_image_info = undefined,
.p_texel_buffer_view = undefined,
};
// List of descriptor set writes
const set_writes = [_]vk.WriteDescriptorSet{vp_set_write};
// Update the descriptor sets with new buffer/binding info
self.device.updateDescriptorSets(@intCast(set_writes.len), &set_writes, 0, null);
}
}
fn updateUniformBuffers(self: *Self, image_index: u32) !void {
// Copy VP data
const data = try self.device.mapMemory(
self.vp_uniform_buffer_memory[image_index],
0,
@sizeOf(UboViewProjection),
.{},
);
const vp_data: *UboViewProjection = @ptrCast(@alignCast(data));
vp_data.* = self.ubo_view_projection;
self.device.unmapMemory(self.vp_uniform_buffer_memory[image_index]);
}
fn recordCommands(self: *Self, current_image: u32) !void {
// Information about how to begin each command
const buffer_begin_info: vk.CommandBufferBeginInfo = .{
// Buffer can be resubmitted when it has already been submitted and is awaiting execution
.flags = .{ .simultaneous_use_bit = true },
};
const clear_values = [_]vk.ClearValue{
.{ .color = .{ .float_32 = [4]f32{ 0.6, 0.65, 0.4, 1.0 } } },
.{ .depth_stencil = .{ .depth = 1.0, .stencil = 1 } },
};
// Information about how to begin a render pass (only needed for graphical application)
var render_pass_begin_info: vk.RenderPassBeginInfo = .{
.render_pass = self.render_pass, // Render pass to begin
.render_area = .{
.offset = .{ .x = 0, .y = 0 }, // Start point of render pass in pixels
.extent = self.extent, // Size of region to run render pass on (starting at offset)
},
.p_clear_values = &clear_values, // List of clear values
.clear_value_count = @intCast(clear_values.len),
.framebuffer = undefined,
};
render_pass_begin_info.framebuffer = self.swapchain_framebuffers[current_image];
const command_buffer = self.command_buffers[current_image];
// Start recording commands to command buffer
try command_buffer.beginCommandBuffer(&buffer_begin_info);
{
// Begin render pass
command_buffer.beginRenderPass(&render_pass_begin_info, vk.SubpassContents.@"inline");
// Needed when using dynamic state
command_buffer.setViewport(0, 1, @ptrCast(&self.viewport));
command_buffer.setScissor(0, 1, @ptrCast(&self.scissor));
for (self.meshes) |mesh| {
// Bind pipeline to be used in render pass
command_buffer.bindPipeline(.graphics, self.graphics_pipeline);
// Buffers to bind
const vertex_buffers = [_]vk.Buffer{mesh.vertex_buffer};
// Offsets into buffers being bound
const offsets = [_]vk.DeviceSize{0};
// Command to bind vertex buffer before drawing with them
command_buffer.bindVertexBuffers(0, 1, &vertex_buffers, &offsets);
// Bind mesh index buffer, with 0 offset and using the uint32 type
command_buffer.bindIndexBuffer(mesh.index_buffer, 0, .uint32);
// Push constants to given shader stage directly (no buffer)
command_buffer.pushConstants(
self.pipeline_layout,
.{ .vertex_bit = true }, // Stage to push constants to
0, // Offset of push constants to update
@sizeOf(Model), // Size of data being pushed
@ptrCast(&mesh.ubo_model.model), // Actual data being pushed (can be array)
);
// Bind descriptor sets
command_buffer.bindDescriptorSets(
.graphics,
self.pipeline_layout,
0,
1,
@ptrCast(&self.descriptor_sets[current_image]),
0,
null,
);
// Execute a pipeline
command_buffer.drawIndexed(mesh.index_count, 1, 0, 0, 0);
}
// End render pass
command_buffer.endRenderPass();
}
// Stop recording to command buffer
try command_buffer.endCommandBuffer();
}
fn getPhysicalDevice(self: *Self) !void {
var pdev_count: u32 = 0;
_ = try self.instance.enumeratePhysicalDevices(&pdev_count, null);
const pdevs = try self.allocator.alloc(vk.PhysicalDevice, pdev_count);
defer self.allocator.free(pdevs);
_ = try self.instance.enumeratePhysicalDevices(&pdev_count, pdevs.ptr);
for (pdevs) |pdev| {
if (self.checkDeviceSuitable(pdev)) {
self.physical_device = pdev;
break;
}
} else {
// TODO Obviously needs to be something else
unreachable;
}
}
fn getRequiredExtensions(self: Self) ![][*:0]const u8 {
var ext_count = sdl.vulkan.getInstanceExtensionsCount(self.window);
if (enable_validation_layers) {
ext_count += 1;
}
var extensions = try self.allocator.alloc([*:0]const u8, ext_count);
_ = try sdl.vulkan.getInstanceExtensions(self.window, extensions);
if (enable_validation_layers) {
extensions[extensions.len - 1] = vk.extensions.ext_debug_utils.name;
}
return extensions;
}
fn getQueueFamilies(self: Self, pdev: vk.PhysicalDevice) !QueueFamilyIndices {
var indices: QueueFamilyIndices = .{ .graphics_family = null };
var queue_family_count: u32 = 0;
self.instance.getPhysicalDeviceQueueFamilyProperties(pdev, &queue_family_count, null);
const queue_family_list = try self.allocator.alloc(vk.QueueFamilyProperties, queue_family_count);
defer self.allocator.free(queue_family_list);
self.instance.getPhysicalDeviceQueueFamilyProperties(pdev, &queue_family_count, queue_family_list.ptr);
for (queue_family_list, 0..) |queue_family, i| {
if (queue_family.queue_count > 0 and queue_family.queue_flags.graphics_bit) {
indices.graphics_family = @intCast(i);
}
const presentation_support = try self.instance.getPhysicalDeviceSurfaceSupportKHR(pdev, @intCast(i), self.surface);
if (queue_family.queue_count > 0 and presentation_support == vk.TRUE) {
indices.presentation_family = @intCast(i);
}
if (indices.isValid()) {
return indices;
}
}
unreachable;
}
fn getDeviceQueues(self: Self) ![2]vk.Queue {
const indices = try self.getQueueFamilies(self.physical_device);
const graphics_queue = self.device.getDeviceQueue(indices.graphics_family.?, 0);
const presentation_queue = self.device.getDeviceQueue(indices.presentation_family.?, 0);
return .{ graphics_queue, presentation_queue };
}
fn checkInstanceExtensions(self: Self, required_extensions: *const [][*:0]const u8) !bool {
var prop_count: u32 = 0;
_ = try self.vkb.enumerateInstanceExtensionProperties(null, &prop_count, null);
const props = try self.allocator.alloc(vk.ExtensionProperties, prop_count);
defer self.allocator.free(props);
_ = try self.vkb.enumerateInstanceExtensionProperties(null, &prop_count, props.ptr);
for (required_extensions.*) |required_extension| {
for (props) |prop| {
if (std.mem.eql(u8, std.mem.sliceTo(&prop.extension_name, 0), std.mem.span(required_extension))) {
break;
}
} else {
return false;
}
}
return true;
}
fn checkDeviceExtensions(self: Self, pdev: vk.PhysicalDevice) !bool {
var prop_count: u32 = 0;
_ = try self.instance.enumerateDeviceExtensionProperties(pdev, null, &prop_count, null);
if (prop_count == 0) {
return false;
}
const props = try self.allocator.alloc(vk.ExtensionProperties, prop_count);
defer self.allocator.free(props);
_ = try self.instance.enumerateDeviceExtensionProperties(pdev, null, &prop_count, props.ptr);
for (Utilities.device_extensions) |device_extension| {
for (props) |prop| {
if (std.mem.eql(u8, std.mem.sliceTo(&prop.extension_name, 0), std.mem.span(device_extension))) {
break;
}
} else {
return false;
}
}
return true;
}
fn checkDeviceSuitable(self: Self, pdev: vk.PhysicalDevice) bool {
const pdev_properties = self.instance.getPhysicalDeviceProperties(pdev);
if (pdev_properties.device_type == .cpu) {
return false;
}
const queue_family_indices = self.getQueueFamilies(pdev) catch return false;
const extension_support = self.checkDeviceExtensions(pdev) catch return false;
const swapchain_details = self.getSwapchainDetails(pdev) catch return false;
defer self.allocator.free(swapchain_details.formats);
defer self.allocator.free(swapchain_details.presentation_modes);
const swapchain_valid = swapchain_details.formats.len != 0 and swapchain_details.formats.len != 0;
return queue_family_indices.isValid() and extension_support and swapchain_valid;
}
fn checkValidationLayersSupport(self: Self) bool {
var layer_count: u32 = undefined;
_ = self.vkb.enumerateInstanceLayerProperties(&layer_count, null) catch return false;
const available_layers = self.allocator.alloc(vk.LayerProperties, layer_count) catch unreachable;
defer self.allocator.free(available_layers);
_ = self.vkb.enumerateInstanceLayerProperties(&layer_count, available_layers.ptr) catch return false;
for (validation_layers) |validation_layer| {
for (available_layers) |available_layer| {
if (std.mem.eql(u8, std.mem.span(validation_layer), std.mem.sliceTo(&available_layer.layer_name, 0))) {
return true;
}
}
}
return false;
}
fn getSwapchainDetails(self: Self, pdev: vk.PhysicalDevice) !SwapchainDetails {
// Capabilities
const surface_capabilities = try self.instance.getPhysicalDeviceSurfaceCapabilitiesKHR(pdev, self.surface);
// Formats
var format_count: u32 = 0;
_ = try self.instance.getPhysicalDeviceSurfaceFormatsKHR(pdev, self.surface, &format_count, null);
const formats = try self.allocator.alloc(vk.SurfaceFormatKHR, format_count);
_ = try self.instance.getPhysicalDeviceSurfaceFormatsKHR(pdev, self.surface, &format_count, formats.ptr);
// Presentation modes
var present_mode_count: u32 = 0;
_ = try self.instance.getPhysicalDeviceSurfacePresentModesKHR(pdev, self.surface, &present_mode_count, null);
const presentation_modes = try self.allocator.alloc(vk.PresentModeKHR, format_count);
_ = try self.instance.getPhysicalDeviceSurfacePresentModesKHR(pdev, self.surface, &present_mode_count, presentation_modes.ptr);
return .{
.surface_capabilities = surface_capabilities,
.formats = formats,
.presentation_modes = presentation_modes,
};
}
fn createImage(
self: *Self,
width: u32,
height: u32,
format: vk.Format,
tiling: vk.ImageTiling,
use_flags: vk.ImageUsageFlags,
prop_flags: vk.MemoryPropertyFlags,
image_memory: *vk.DeviceMemory,
) !vk.Image {
// -- Create Image --
const image_create_info: vk.ImageCreateInfo = .{
.image_type = .@"2d", // Type of image (1D, 2D or 3D)
.extent = .{
.width = width, // Width of image extent
.height = height, // Height of image extent
.depth = 1, // Depth of image (just 1, no 3D aspecct)
},
.mip_levels = 1, // Number of mipmap levels
.array_layers = 1, // Number of level in image array
.format = format, // Format type of image
.tiling = tiling, // How image data should be tiled (arranged for optimal reading)
.initial_layout = .undefined, // Layout of image data on creation
.usage = use_flags, // Bit flags defining what image will be used for
.samples = .{ .@"1_bit" = true }, // Number of samples for multi-sampling
.sharing_mode = .exclusive, // Whether image can be shared between queues
};
const image = try self.device.createImage(&image_create_info, null);
// -- Create memory for image --
// Get memory requirements for a type of image
const memory_requirements = self.device.getImageMemoryRequirements(image);
// Allocate memory using image requirements and user-defined properties
const memory_alloc_info: vk.MemoryAllocateInfo = .{
.allocation_size = memory_requirements.size,
.memory_type_index = Utilities.findMemoryTypeIndex(self.physical_device, self.instance, memory_requirements.memory_type_bits, prop_flags),
};
image_memory.* = try self.device.allocateMemory(&memory_alloc_info, null);
// Connect memory to image
try self.device.bindImageMemory(image, image_memory.*, 0);
return image;
}
fn createImageView(self: Self, image: vk.Image, format: vk.Format, aspect_flags: vk.ImageAspectFlags) !vk.ImageView {
const image_view_create_info: vk.ImageViewCreateInfo = .{
.image = image,
.format = format,
.view_type = .@"2d",
.components = .{
// Used for remapping rgba values to other rgba values
.r = .identity,
.g = .identity,
.b = .identity,
.a = .identity,
},
.subresource_range = .{
.aspect_mask = aspect_flags, // Which aspect of image to view (e.g.: colour, depth, stencil, etc...)
.base_mip_level = 0, // Start mipmap level to view from
.level_count = 1, // Number of mipmap levels to view
.base_array_layer = 0, // Start array level to view from
.layer_count = 1, // Number of array levels to view
},
};
return try self.device.createImageView(&image_view_create_info, null);
}
};
// Format: VK_FORMAT_R8G8B8A8_UNORM (VK_FORMAT_B8G8R8A8_UNORM as backup)
// Color space: VK_COLOR_SPACE_SRGB_NONLINEAR_KHR
fn chooseBestSurfaceFormat(formats: []vk.SurfaceFormatKHR) vk.SurfaceFormatKHR {
// If only one format available and is undefined, then this means all formats are available
if (formats.len == 1 and formats[0].format == vk.Format.undefined) {
return .{
.format = vk.Format.r8g8b8a8_unorm,
.color_space = vk.ColorSpaceKHR.srgb_nonlinear_khr,
};
}
for (formats) |format| {
if ((format.format == vk.Format.r8g8b8a8_unorm or format.format == vk.Format.b8g8r8a8_unorm) and format.color_space == vk.ColorSpaceKHR.srgb_nonlinear_khr) {
return format;
}
}
return formats[0];
}
fn chooseBestPresentationMode(presentation_modes: []vk.PresentModeKHR) vk.PresentModeKHR {
for (presentation_modes) |presentation_mode| {
if (presentation_mode == vk.PresentModeKHR.mailbox_khr) {
return presentation_mode;
}
}
// Use FIFO as Vulkan spec says it must be present
return vk.PresentModeKHR.fifo_khr;
}
fn chooseSwapExtent(window: *sdl.Window, surface_capabilities: vk.SurfaceCapabilitiesKHR) vk.Extent2D {
// If the current extent is at max value, the extent can vary. Otherwise it's the size of the window
if (surface_capabilities.current_extent.width != std.math.maxInt(u32)) {
return surface_capabilities.current_extent;
}
// If value can very, need to set the extent manually
const framebuffer_size = sdl.vulkan.getDrawableSize(window);
var extent: vk.Extent2D = .{
.width = @intCast(framebuffer_size.width),
.height = @intCast(framebuffer_size.height),
};
// Surface also defines max and min, so make sure it's within boundaries by clamping values
extent.width = @max(surface_capabilities.min_image_extent.width, @min(surface_capabilities.max_image_extent.width, extent.width));
extent.height = @max(surface_capabilities.min_image_extent.height, @min(surface_capabilities.max_image_extent.height, extent.height));
return extent;
}
fn chooseSupportedFormat(
pdev: vk.PhysicalDevice,
instance: Instance,
formats: []const vk.Format,
tiling: vk.ImageTiling,
feature_flags: vk.FormatFeatureFlags,
) ?vk.Format {
// Loop through the options and find a compatible one
// Depending on tiling choice. Need to check for different bit flag
for (formats) |format| {
// Get properties for given format on this device
const properties = instance.getPhysicalDeviceFormatProperties(pdev, format);
if (tiling == .linear and properties.linear_tiling_features.contains(feature_flags)) {
return format;
} else if (tiling == .optimal and properties.optimal_tiling_features.contains(feature_flags)) {
return format;
}
}
return null;
}
// Validation layers stuff
fn createDebugMessenger(instance: Instance) !vk.DebugUtilsMessengerEXT {
const debug_create_info = getDebugUtilsCreateInfo();
return try instance.createDebugUtilsMessengerEXT(&debug_create_info, null);
}
fn getDebugUtilsCreateInfo() vk.DebugUtilsMessengerCreateInfoEXT {
return vk.DebugUtilsMessengerCreateInfoEXT{
.message_severity = .{ .verbose_bit_ext = true, .warning_bit_ext = true, .error_bit_ext = true },
.message_type = .{ .general_bit_ext = true, .validation_bit_ext = true, .performance_bit_ext = true },
.pfn_user_callback = debugCallback,
};
}
fn debugCallback(
message_severity: vk.DebugUtilsMessageSeverityFlagsEXT,
message_types: vk.DebugUtilsMessageTypeFlagsEXT,
p_callback_data: ?*const vk.DebugUtilsMessengerCallbackDataEXT,
p_user_data: ?*anyopaque,
) callconv(vk.vulkan_call_conv) vk.Bool32 {
_ = p_user_data;
const severity = getMessageSeverityLabel(message_severity);
const message_type = getMessageTypeLabel(message_types);
std.debug.print("[{s}] ({s}): {s}\n", .{ severity, message_type, p_callback_data.?.p_message.? });
return vk.TRUE;
}
inline fn getMessageSeverityLabel(message_severity: vk.DebugUtilsMessageSeverityFlagsEXT) []const u8 {
if (message_severity.verbose_bit_ext) {
return "VERBOSE";
} else if (message_severity.info_bit_ext) {
return "INFO";
} else if (message_severity.warning_bit_ext) {
return "WARNING";
} else if (message_severity.error_bit_ext) {
return "ERROR";
} else {
unreachable;
}
}
inline fn getMessageTypeLabel(message_types: vk.DebugUtilsMessageTypeFlagsEXT) []const u8 {
if (message_types.general_bit_ext) {
return "general";
} else if (message_types.validation_bit_ext) {
return "validation";
} else if (message_types.performance_bit_ext) {
return "performance";
} else if (message_types.device_address_binding_bit_ext) {
return "device_address_binding";
} else {
return "unknown";
}
}