vulkan-zig/src/utilities.zig

236 lines
9.3 KiB
Zig

const std = @import("std");
const vk = @import("vulkan");
const Instance = @import("vulkan_renderer.zig").Instance;
const Device = @import("vulkan_renderer.zig").Device;
const CommandBuffer = @import("vulkan_renderer.zig").CommandBuffer;
pub const Vector3 = @Vector(3, f32);
pub const Vector2 = @Vector(2, f32);
// Vertex data representation
pub const Vertex = struct {
pos: Vector3, // Vertex position (x, y, z)
col: Vector3, // Vertex colour (r, g, b)
tex: Vector2, // Texture coords (u, v)
};
pub fn findMemoryTypeIndex(pdev: vk.PhysicalDevice, instance: Instance, allowed_types: u32, properties: vk.MemoryPropertyFlags) u32 {
// Get properties of physical device memory
const memory_properties = instance.getPhysicalDeviceMemoryProperties(pdev);
const mem_type_count = memory_properties.memory_type_count;
for (memory_properties.memory_types[0..mem_type_count], 0..mem_type_count) |mem_type, i| {
// Index of memory type must match corresponding bit in allowed_types
if (allowed_types & (@as(u32, 1) << @truncate(i)) != 0 and mem_type.property_flags.contains(properties)) {
// Return the index of the valid memory type
return @truncate(i);
}
}
unreachable;
}
pub fn createBuffer(
pdev: vk.PhysicalDevice,
instance: Instance,
device: Device,
buffer_size: vk.DeviceSize,
buffer_usage: vk.BufferUsageFlags,
buffer_properties: vk.MemoryPropertyFlags,
buffer: *vk.Buffer,
buffer_memory: *vk.DeviceMemory,
) !void {
// Create vertex buffer
// Information to create buffer (doesn't include assigning memory)
const buffer_create_info: vk.BufferCreateInfo = .{
.size = buffer_size, // Size of buffer (size of 1 vertex * number of vertices)
.usage = buffer_usage, // Multiple types of buffer possible
.sharing_mode = .exclusive, // Similar to swapchain images, can share vertex buffers
};
buffer.* = try device.createBuffer(&buffer_create_info, null);
// Get buffer memory requirements
const mem_requirements = device.getBufferMemoryRequirements(buffer.*);
// Allocate memory to buffer
const allocate_info: vk.MemoryAllocateInfo = .{
.allocation_size = mem_requirements.size,
.memory_type_index = findMemoryTypeIndex(
pdev,
instance,
mem_requirements.memory_type_bits, // Index of memory type of physical device that has required bit flags
// Host visible: CPU can interact with memory
// Host coherent: Allows placement of data straight into buffer after mapping (otherwise would have to specify manually)
buffer_properties,
),
};
// Allocate memory to vkDeviceMemory
buffer_memory.* = try device.allocateMemory(&allocate_info, null);
// Allocate memory to given vertex buffer
try device.bindBufferMemory(buffer.*, buffer_memory.*, 0);
}
fn beginCommandBuffer(device: Device, command_pool: vk.CommandPool) !CommandBuffer {
// Command buffer to hold transfer commands
var command_buffer_handle: vk.CommandBuffer = undefined;
// Command buffer details
const alloc_info: vk.CommandBufferAllocateInfo = .{
.command_pool = command_pool,
.level = .primary,
.command_buffer_count = 1,
};
// Allocate command buffer from pool
try device.allocateCommandBuffers(&alloc_info, @ptrCast(&command_buffer_handle));
const command_buffer = CommandBuffer.init(command_buffer_handle, device.wrapper);
// Information to begin the command buffer record
const begin_info: vk.CommandBufferBeginInfo = .{
.flags = .{ .one_time_submit_bit = true }, // We're only using the command buffer once, so set to one time submit
};
// Begin recording transfer commands
try command_buffer.beginCommandBuffer(&begin_info);
return command_buffer;
}
fn endAndSubmitCommandBuffer(device: Device, command_buffer: CommandBuffer, command_pool: vk.CommandPool, queue: vk.Queue) !void {
// End commands
try command_buffer.endCommandBuffer();
// Queue submission information
const submit_info: vk.SubmitInfo = .{
.command_buffer_count = 1,
.p_command_buffers = @ptrCast(&command_buffer.handle),
};
// Submit transfer command to transfer queue and wait until it finishes
try device.queueSubmit(queue, 1, @ptrCast(&submit_info), .null_handle);
try device.queueWaitIdle(queue);
// Free temporary buffer back to pool
device.freeCommandBuffers(command_pool, 1, @ptrCast(&command_buffer.handle));
}
pub fn copyBuffer(
device: Device,
transfer_queue: vk.Queue,
transfer_command_pool: vk.CommandPool,
src_buffer: vk.Buffer,
dst_buffer: vk.Buffer,
buffer_size: vk.DeviceSize,
) !void {
// Create buffer
const transfer_command_buffer = try beginCommandBuffer(device, transfer_command_pool);
// Region of data to copy from and to
const buffer_copy_region: vk.BufferCopy = .{
.src_offset = 0,
.dst_offset = 0,
.size = buffer_size,
};
// Command to copy src buffer to dst buffer
transfer_command_buffer.copyBuffer(src_buffer, dst_buffer, 1, @ptrCast(&buffer_copy_region));
// Command to copy src buffer to dst buffer
try endAndSubmitCommandBuffer(device, transfer_command_buffer, transfer_command_pool, transfer_queue);
}
pub fn copyImageBuffer(
device: Device,
transfer_queue: vk.Queue,
transfer_command_pool: vk.CommandPool,
src_buffer: vk.Buffer,
image: vk.Image,
width: u32,
height: u32,
) !void {
const transfer_command_buffer = try beginCommandBuffer(device, transfer_command_pool);
const image_region: vk.BufferImageCopy = .{
.buffer_offset = 0, // Offset into data
.buffer_row_length = 0, // Row length of data to calculate data spacing
.buffer_image_height = 0, // Image height to calculate data spacing
.image_subresource = .{
.aspect_mask = .{ .color_bit = true }, // Which aspect of image to copy
.mip_level = 0, // Mipmap level to copy
.base_array_layer = 0, // Starting array layer (if array)
.layer_count = 1, // Number of layers of copy starting at base_array_layer
},
.image_offset = .{ .x = 0, .y = 0, .z = 0 }, // Offset to image (as opposed to raw data buffer offset)
.image_extent = .{ .width = width, .height = height, .depth = 1 }, // Size of the region to copy as x, y, z values
};
transfer_command_buffer.copyBufferToImage(src_buffer, image, .transfer_dst_optimal, 1, @ptrCast(&image_region));
try endAndSubmitCommandBuffer(device, transfer_command_buffer, transfer_command_pool, transfer_queue);
}
pub fn transitionImageLayout(
device: Device,
queue: vk.Queue,
command_pool: vk.CommandPool,
image: vk.Image,
old_layout: vk.ImageLayout,
new_layout: vk.ImageLayout,
) !void {
const command_buffer = try beginCommandBuffer(device, command_pool);
var image_memory_barrier: vk.ImageMemoryBarrier = .{
.old_layout = old_layout, // Layout to transition from
.new_layout = new_layout, // Layout to transition to
.src_queue_family_index = vk.QUEUE_FAMILY_IGNORED, // Queue family to transition from
.dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED, // Queue family to transition to
.image = image, // Image being accessed and modified as part of barrier
.subresource_range = .{
.aspect_mask = .{ .color_bit = true }, // Aspect of image being altered
.base_mip_level = 0, // First mip level to start alterations on
.level_count = 1, // Number of mipmap levels to alter starting from base mipmap level
.base_array_layer = 0, // First layer to start alterations on
.layer_count = 1, // Number of layers to alter starting from base array layer
},
.src_access_mask = .{}, // Memory access stage transition must happen after
.dst_access_mask = .{}, // Memory access stage transition must happen before
};
var src_stage: vk.PipelineStageFlags = .{};
var dst_stage: vk.PipelineStageFlags = .{};
// If transitioning from new image to image ready to receive data
if (old_layout == vk.ImageLayout.undefined and new_layout == vk.ImageLayout.transfer_dst_optimal) {
image_memory_barrier.dst_access_mask.transfer_write_bit = true;
src_stage.top_of_pipe_bit = true;
dst_stage.transfer_bit = true;
} else if (old_layout == vk.ImageLayout.transfer_dst_optimal and new_layout == vk.ImageLayout.shader_read_only_optimal) {
// If transitioning from transfer destination to shader readable
image_memory_barrier.src_access_mask.transfer_write_bit = true;
image_memory_barrier.dst_access_mask.shader_read_bit = true;
src_stage.transfer_bit = true;
dst_stage.fragment_shader_bit = true;
}
command_buffer.pipelineBarrier(
src_stage, // Pipeline stages (match to src and dst access mask)
dst_stage,
.{}, // Dependency flags
0, // Memory barrier count
null, // Memory barrier data
0, // Buffer memory barrier count
null, // Buffer memory barrier data
1, // Image memory barrier count
@ptrCast(&image_memory_barrier), // Image memory barrier data
);
try endAndSubmitCommandBuffer(device, command_buffer, command_pool, queue);
}