Compare commits

..

1 commit
temp ... master

Author SHA1 Message Date
61a18e6f0b Clean up model 2024-09-01 19:24:00 +02:00
9 changed files with 45 additions and 972 deletions

View file

@ -1,24 +0,0 @@
newmtl white
Ka 0 0 0
Kd 1 1 1
Ks 0 0 0
newmtl red
Ka 0 0 0
Kd 1 0 0
Ks 0 0 0
newmtl green
Ka 0 0 0
Kd 0 1 0
Ks 0 0 0
newmtl blue
Ka 0 0 0
Kd 0 0 1
Ks 0 0 0
newmtl light
Ka 20 20 20
Kd 1 1 1
Ks 0 0 0

View file

@ -1,145 +0,0 @@
# cornell_box.obj and cornell_box.mtl are grabbed from Intel's embree project.
# original cornell box data
# comment
# empty line including some space
mtllib cornell_box.mtl
o floor
usemtl white
v 552.8 0.0 0.0
v 0.0 0.0 0.0
v 0.0 0.0 559.2
v 549.6 0.0 559.2
v 130.0 0.0 65.0
v 82.0 0.0 225.0
v 240.0 0.0 272.0
v 290.0 0.0 114.0
v 423.0 0.0 247.0
v 265.0 0.0 296.0
v 314.0 0.0 456.0
v 472.0 0.0 406.0
f 1 2 3 4
f 8 7 6 5
f 12 11 10 9
o light
usemtl light
v 343.0 548.0 227.0
v 343.0 548.0 332.0
v 213.0 548.0 332.0
v 213.0 548.0 227.0
f -4 -3 -2 -1
o ceiling
usemtl white
v 556.0 548.8 0.0
v 556.0 548.8 559.2
v 0.0 548.8 559.2
v 0.0 548.8 0.0
f -4 -3 -2 -1
o back_wall
usemtl white
v 549.6 0.0 559.2
v 0.0 0.0 559.2
v 0.0 548.8 559.2
v 556.0 548.8 559.2
f -4 -3 -2 -1
o front_wall
usemtl blue
v 549.6 0.0 0
v 0.0 0.0 0
v 0.0 548.8 0
v 556.0 548.8 0
#f -1 -2 -3 -4
o green_wall
usemtl green
v 0.0 0.0 559.2
v 0.0 0.0 0.0
v 0.0 548.8 0.0
v 0.0 548.8 559.2
f -4 -3 -2 -1
o red_wall
usemtl red
v 552.8 0.0 0.0
v 549.6 0.0 559.2
v 556.0 548.8 559.2
v 556.0 548.8 0.0
f -4 -3 -2 -1
o short_block
usemtl white
v 130.0 165.0 65.0
v 82.0 165.0 225.0
v 240.0 165.0 272.0
v 290.0 165.0 114.0
f -4 -3 -2 -1
v 290.0 0.0 114.0
v 290.0 165.0 114.0
v 240.0 165.0 272.0
v 240.0 0.0 272.0
f -4 -3 -2 -1
v 130.0 0.0 65.0
v 130.0 165.0 65.0
v 290.0 165.0 114.0
v 290.0 0.0 114.0
f -4 -3 -2 -1
v 82.0 0.0 225.0
v 82.0 165.0 225.0
v 130.0 165.0 65.0
v 130.0 0.0 65.0
f -4 -3 -2 -1
v 240.0 0.0 272.0
v 240.0 165.0 272.0
v 82.0 165.0 225.0
v 82.0 0.0 225.0
f -4 -3 -2 -1
o tall_block
usemtl white
v 423.0 330.0 247.0
v 265.0 330.0 296.0
v 314.0 330.0 456.0
v 472.0 330.0 406.0
f -4 -3 -2 -1
usemtl white
v 423.0 0.0 247.0
v 423.0 330.0 247.0
v 472.0 330.0 406.0
v 472.0 0.0 406.0
f -4 -3 -2 -1
v 472.0 0.0 406.0
v 472.0 330.0 406.0
v 314.0 330.0 456.0
v 314.0 0.0 456.0
f -4 -3 -2 -1
v 314.0 0.0 456.0
v 314.0 330.0 456.0
v 265.0 330.0 296.0
v 265.0 0.0 296.0
f -4 -3 -2 -1
v 265.0 0.0 296.0
v 265.0 330.0 296.0
v 423.0 330.0 247.0
v 423.0 0.0 247.0
f -4 -3 -2 -1

View file

@ -1,340 +0,0 @@
const std = @import("std");
const builtin = @import("builtin");
const vk = @import("vulkan");
const sdl = @import("sdl");
const img = @import("zstbi");
const validation = @import("validation_layers.zig");
const Swapchain = @import("Swapchain.zig");
const QueueUtils = @import("queue_utils.zig");
const device_extensions = [_][*:0]const u8{vk.extensions.khr_swapchain.name};
pub const apis: []const vk.ApiInfo = &.{
vk.features.version_1_0,
vk.features.version_1_1,
vk.features.version_1_2,
vk.features.version_1_3,
vk.extensions.khr_surface,
vk.extensions.khr_swapchain,
vk.extensions.ext_debug_utils,
};
const enable_validation_layers = builtin.mode == .Debug;
const validation_layers = [_][*:0]const u8{"VK_LAYER_KHRONOS_validation"};
const BaseDispatch = vk.BaseWrapper(apis);
const InstanceDispatch = vk.InstanceWrapper(apis);
const DeviceDispatch = vk.DeviceWrapper(apis);
pub const Instance = vk.InstanceProxy(apis);
pub const Device = vk.DeviceProxy(apis);
pub const Queue = vk.QueueProxy(apis);
// ---
const Self = @This();
allocator: std.mem.Allocator,
vkb: BaseDispatch,
window: sdl.Window,
instance: Instance,
physical_device: vk.PhysicalDevice,
device: Device,
command_pool: vk.CommandPool,
graphics_queue: Queue,
presentation_queue: Queue,
surface: vk.SurfaceKHR,
swapchain: Swapchain,
debug_utils: ?vk.DebugUtilsMessengerEXT,
pub fn init(allocator: std.mem.Allocator, window: sdl.Window) !Self {
var self: Self = undefined;
self.window = window;
self.allocator = allocator;
self.vkb = try BaseDispatch.load(try sdl.vulkan.getVkGetInstanceProcAddr());
img.init(allocator);
try self.createInstance();
if (enable_validation_layers) {
self.debug_utils = try validation.createDebugMessenger(self.instance);
}
try self.createSurface();
try self.getPhysicalDevice();
try self.createLogicalDevice();
self.swapchain = try Swapchain.create(allocator, self);
return self;
}
pub fn deinit(self: *Self) void {
if (enable_validation_layers) {
self.instance.destroyDebugUtilsMessengerEXT(self.debug_utils.?, null);
}
self.device.destroyDevice(null);
self.instance.destroySurfaceKHR(self.surface, null);
self.instance.destroyInstance(null);
self.allocator.destroy(self.device.wrapper);
self.allocator.destroy(self.instance.wrapper);
img.deinit();
}
fn createInstance(self: *Self) !void {
if (enable_validation_layers and !self.checkValidationLayersSupport()) {
// TODO Better error
return error.LayerNotPresent;
}
const extensions = try self.getRequiredExtensions();
defer self.allocator.free(extensions);
std.debug.print("[Required instance extensions]\n", .{});
for (extensions) |ext| {
std.debug.print("\t- {s}\n", .{ext});
}
if (!try self.checkInstanceExtensions(&extensions)) {
return error.ExtensionNotPresent;
}
const app_info = vk.ApplicationInfo{
.p_application_name = "Vulkan SDL Test",
.application_version = vk.makeApiVersion(0, 0, 1, 0),
.p_engine_name = "Vulkan SDL Test",
.engine_version = vk.makeApiVersion(0, 0, 1, 0),
.api_version = vk.API_VERSION_1_3,
};
var instance_create_info: vk.InstanceCreateInfo = .{
.p_application_info = &app_info,
.enabled_extension_count = @intCast(extensions.len),
.pp_enabled_extension_names = @ptrCast(extensions),
};
if (enable_validation_layers) {
const debug_create_info = validation.getDebugUtilsCreateInfo();
instance_create_info.enabled_layer_count = @intCast(validation_layers.len);
instance_create_info.pp_enabled_layer_names = &validation_layers;
instance_create_info.p_next = &debug_create_info;
}
const instance_handle = try self.vkb.createInstance(&instance_create_info, null);
const vki = try self.allocator.create(InstanceDispatch);
errdefer self.allocator.destroy(vki);
vki.* = try InstanceDispatch.load(instance_handle, self.vkb.dispatch.vkGetInstanceProcAddr);
self.instance = Instance.init(instance_handle, vki);
}
fn createSurface(self: *Self) !void {
self.surface = try sdl.vulkan.createSurface(self.window, self.instance.handle);
}
fn getPhysicalDevice(self: *Self) !void {
var pdev_count: u32 = 0;
_ = try self.instance.enumeratePhysicalDevices(&pdev_count, null);
const pdevs = try self.allocator.alloc(vk.PhysicalDevice, pdev_count);
defer self.allocator.free(pdevs);
_ = try self.instance.enumeratePhysicalDevices(&pdev_count, pdevs.ptr);
for (pdevs) |pdev| {
if (self.checkDeviceSuitable(pdev)) {
self.physical_device = pdev;
break;
}
} else {
// TODO Obviously needs to be something else
unreachable;
}
}
fn createLogicalDevice(self: *Self) !void {
const indices = try QueueUtils.getQueueFamilies(self.*, self.physical_device);
// 1 is the highest priority
const priority = [_]f32{1};
const qci = [_]vk.DeviceQueueCreateInfo{
.{
.queue_family_index = indices.graphics_family.?,
.queue_count = 1,
.p_queue_priorities = &priority,
},
.{
.queue_family_index = indices.presentation_family.?,
.queue_count = 1,
.p_queue_priorities = &priority,
},
};
const queue_count: u32 = if (indices.graphics_family.? == indices.presentation_family.?)
1
else
2;
// Device features
const device_features: vk.PhysicalDeviceFeatures = .{
.sampler_anisotropy = vk.TRUE, // Enable anisotropy
};
const device_create_info: vk.DeviceCreateInfo = .{
.queue_create_info_count = queue_count,
.p_queue_create_infos = &qci,
.pp_enabled_extension_names = &device_extensions,
.enabled_extension_count = @intCast(device_extensions.len),
.p_enabled_features = &device_features,
};
const device_handle = try self.instance.createDevice(self.physical_device, &device_create_info, null);
const vkd = try self.allocator.create(DeviceDispatch);
errdefer self.allocator.destroy(vkd);
vkd.* = try DeviceDispatch.load(device_handle, self.instance.wrapper.dispatch.vkGetDeviceProcAddr);
self.device = Device.init(device_handle, vkd);
const queues = try QueueUtils.getDeviceQueues(self.*);
self.graphics_queue = Queue.init(queues[0], self.device.wrapper);
self.presentation_queue = Queue.init(queues[1], self.device.wrapper);
}
fn createCommandPool(self: *Self) !void {
// Get indices of queue families from device
const queue_family_indices = try QueueUtils.getQueueFamilies(self.*, self.physical_device);
const pool_create_info: vk.CommandPoolCreateInfo = .{
// Queue family type that buffers from this command pool will use
.queue_family_index = queue_family_indices.graphics_family.?,
.flags = .{ .reset_command_buffer_bit = true },
};
// Create a graphics queue family command pool
self.graphics_command_pool = try self.device.createCommandPool(&pool_create_info, null);
}
fn getRequiredExtensions(self: Self) ![][*:0]const u8 {
var ext_count = sdl.vulkan.getInstanceExtensionsCount(self.window);
if (enable_validation_layers) {
ext_count += 1;
}
var extensions = try self.allocator.alloc([*:0]const u8, ext_count);
_ = try sdl.vulkan.getInstanceExtensions(self.window, extensions);
if (enable_validation_layers) {
extensions[extensions.len - 1] = vk.extensions.ext_debug_utils.name;
}
return extensions;
}
fn checkInstanceExtensions(self: Self, required_extensions: *const [][*:0]const u8) !bool {
var prop_count: u32 = 0;
_ = try self.vkb.enumerateInstanceExtensionProperties(null, &prop_count, null);
const props = try self.allocator.alloc(vk.ExtensionProperties, prop_count);
defer self.allocator.free(props);
_ = try self.vkb.enumerateInstanceExtensionProperties(null, &prop_count, props.ptr);
for (required_extensions.*) |required_extension| {
for (props) |prop| {
if (std.mem.eql(u8, std.mem.sliceTo(&prop.extension_name, 0), std.mem.span(required_extension))) {
break;
}
} else {
return false;
}
}
return true;
}
fn checkDeviceExtensions(self: Self, pdev: vk.PhysicalDevice) !bool {
var prop_count: u32 = 0;
_ = try self.instance.enumerateDeviceExtensionProperties(pdev, null, &prop_count, null);
if (prop_count == 0) {
return false;
}
const props = try self.allocator.alloc(vk.ExtensionProperties, prop_count);
defer self.allocator.free(props);
_ = try self.instance.enumerateDeviceExtensionProperties(pdev, null, &prop_count, props.ptr);
for (device_extensions) |device_extension| {
for (props) |prop| {
if (std.mem.eql(u8, std.mem.sliceTo(&prop.extension_name, 0), std.mem.span(device_extension))) {
break;
}
} else {
return false;
}
}
return true;
}
fn checkDeviceSuitable(self: Self, pdev: vk.PhysicalDevice) bool {
const pdev_properties = self.instance.getPhysicalDeviceProperties(pdev);
if (pdev_properties.device_type == .cpu) {
return false;
}
const pdev_features = self.instance.getPhysicalDeviceFeatures(pdev);
const queue_family_indices = QueueUtils.getQueueFamilies(self, pdev) catch return false;
const extension_support = self.checkDeviceExtensions(pdev) catch return false;
const swapchain_details = Swapchain.getSwapchainDetails(
self.allocator,
self.instance,
pdev,
self.surface,
) catch return false;
defer self.allocator.free(swapchain_details.formats);
defer self.allocator.free(swapchain_details.presentation_modes);
const swapchain_valid = swapchain_details.formats.len != 0 and swapchain_details.formats.len != 0;
return queue_family_indices.isValid() and extension_support and swapchain_valid and pdev_features.sampler_anisotropy == vk.TRUE;
}
fn checkValidationLayersSupport(self: Self) bool {
var layer_count: u32 = undefined;
_ = self.vkb.enumerateInstanceLayerProperties(&layer_count, null) catch return false;
const available_layers = self.allocator.alloc(vk.LayerProperties, layer_count) catch unreachable;
defer self.allocator.free(available_layers);
_ = self.vkb.enumerateInstanceLayerProperties(&layer_count, available_layers.ptr) catch return false;
for (validation_layers) |validation_layer| {
for (available_layers) |available_layer| {
if (std.mem.eql(u8, std.mem.span(validation_layer), std.mem.sliceTo(&available_layer.layer_name, 0))) {
return true;
}
}
}
return false;
}

View file

@ -2,7 +2,6 @@ const std = @import("std");
const vk = @import("vulkan"); const vk = @import("vulkan");
const zm = @import("zmath"); const zm = @import("zmath");
const Context = @import("Context.zig");
const Utilities = @import("utilities.zig"); const Utilities = @import("utilities.zig");
const Vertex = Utilities.Vertex; const Vertex = Utilities.Vertex;
const Device = @import("vulkan_renderer.zig").Device; const Device = @import("vulkan_renderer.zig").Device;
@ -12,6 +11,7 @@ const Model = @import("vulkan_renderer.zig").Model;
const Self = @This(); const Self = @This();
ubo_model: Model, ubo_model: Model,
tex_id: u32,
vertex_count: u32, vertex_count: u32,
vertex_buffer: vk.Buffer, vertex_buffer: vk.Buffer,
@ -21,20 +21,16 @@ index_count: u32,
index_buffer: vk.Buffer, index_buffer: vk.Buffer,
index_buffer_memory: vk.DeviceMemory, index_buffer_memory: vk.DeviceMemory,
ctx: Context, instance: Instance,
physical_device: vk.PhysicalDevice,
device: Device,
allocator: std.mem.Allocator, allocator: std.mem.Allocator,
<<<<<<< Updated upstream
pub fn new( pub fn new(
instance: Instance, instance: Instance,
pdev: vk.PhysicalDevice, pdev: vk.PhysicalDevice,
device: Device, device: Device,
=======
pub fn create(
allocator: std.mem.Allocator,
ctx: Context,
>>>>>>> Stashed changes
transfer_queue: vk.Queue, transfer_queue: vk.Queue,
transfer_command_pool: vk.CommandPool, transfer_command_pool: vk.CommandPool,
vertices: []const Vertex, vertices: []const Vertex,
@ -44,12 +40,13 @@ pub fn create(
) !Self { ) !Self {
var self: Self = undefined; var self: Self = undefined;
self.allocator = allocator;
self.vertex_count = @intCast(vertices.len); self.vertex_count = @intCast(vertices.len);
self.index_count = @intCast(indices.len); self.index_count = @intCast(indices.len);
self.ctx = ctx; self.instance = instance;
self.physical_device = pdev;
self.device = device;
self.allocator = allocator;
try self.createVertexBuffer(transfer_queue, transfer_command_pool, vertices); try self.createVertexBuffer(transfer_queue, transfer_command_pool, vertices);
try self.createIndexBuffer(transfer_queue, transfer_command_pool, indices); try self.createIndexBuffer(transfer_queue, transfer_command_pool, indices);
@ -60,18 +57,12 @@ pub fn create(
return self; return self;
} }
<<<<<<< Updated upstream
pub fn destroyBuffers(self: Self) void { pub fn destroyBuffers(self: Self) void {
self.device.destroyBuffer(self.vertex_buffer, null); self.device.destroyBuffer(self.vertex_buffer, null);
self.device.freeMemory(self.vertex_buffer_memory, null); self.device.freeMemory(self.vertex_buffer_memory, null);
=======
pub fn destroy(self: Self) void {
self.ctx.device.destroyBuffer(self.vertex_buffer, null);
self.ctx.device.freeMemory(self.vertex_buffer_memory, null);
>>>>>>> Stashed changes
self.ctx.device.destroyBuffer(self.index_buffer, null); self.device.destroyBuffer(self.index_buffer, null);
self.ctx.device.freeMemory(self.index_buffer_memory, null); self.device.freeMemory(self.index_buffer_memory, null);
} }
fn createVertexBuffer( fn createVertexBuffer(
@ -86,12 +77,14 @@ fn createVertexBuffer(
// Temporary buffer to "stage" vertex data before transfering to GPU // Temporary buffer to "stage" vertex data before transfering to GPU
var staging_buffer: vk.Buffer = undefined; var staging_buffer: vk.Buffer = undefined;
var staging_buffer_memory: vk.DeviceMemory = undefined; var staging_buffer_memory: vk.DeviceMemory = undefined;
defer self.ctx.device.destroyBuffer(staging_buffer, null); defer self.device.destroyBuffer(staging_buffer, null);
defer self.ctx.device.freeMemory(staging_buffer_memory, null); defer self.device.freeMemory(staging_buffer_memory, null);
// Create buffer and allocate memory to it // Create buffer and allocate memory to it
try Utilities.createBuffer( try Utilities.createBuffer(
self.ctx, self.physical_device,
self.instance,
self.device,
buffer_size, buffer_size,
.{ .transfer_src_bit = true }, .{ .transfer_src_bit = true },
.{ .host_visible_bit = true, .host_coherent_bit = true }, .{ .host_visible_bit = true, .host_coherent_bit = true },
@ -102,19 +95,21 @@ fn createVertexBuffer(
// Map memory to vertex // Map memory to vertex
// 1. Create pointer to a point in normal memory // 1. Create pointer to a point in normal memory
// 2. Map the vertex buffer memory to that point // 2. Map the vertex buffer memory to that point
const data = try self.ctx.device.mapMemory(staging_buffer_memory, 0, buffer_size, .{}); const data = try self.device.mapMemory(staging_buffer_memory, 0, buffer_size, .{});
// 3. Copy memory from vertices vector to the point in memory // 3. Copy memory from vertices vector to the point in memory
const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data)); const gpu_vertices: [*]Vertex = @ptrCast(@alignCast(data));
@memcpy(gpu_vertices, vertices[0..]); @memcpy(gpu_vertices, vertices[0..]);
// 4. Unmap the vertex buffer memory // 4. Unmap the vertex buffer memory
self.ctx.device.unmapMemory(staging_buffer_memory); self.device.unmapMemory(staging_buffer_memory);
// --- // ---
// Create buffer with TRANSFER_DST_BIT to mark as recipient of transfer data (also VERTEX_BUFFER) // Create buffer with TRANSFER_DST_BIT to mark as recipient of transfer data (also VERTEX_BUFFER)
// Buffer memory is to be DEVICE_LOCAL_BIT meaning memory is on the GPU and only accessible by it and not CPU (host) // Buffer memory is to be DEVICE_LOCAL_BIT meaning memory is on the GPU and only accessible by it and not CPU (host)
try Utilities.createBuffer( try Utilities.createBuffer(
self.ctx, self.physical_device,
self.instance,
self.device,
buffer_size, buffer_size,
.{ .transfer_dst_bit = true, .vertex_buffer_bit = true }, .{ .transfer_dst_bit = true, .vertex_buffer_bit = true },
.{ .device_local_bit = true }, .{ .device_local_bit = true },
@ -124,7 +119,7 @@ fn createVertexBuffer(
// Copy staging buffer to vertex buffer on GPU // Copy staging buffer to vertex buffer on GPU
try Utilities.copyBuffer( try Utilities.copyBuffer(
self.ctx, self.device,
transfer_queue, transfer_queue,
transfer_command_pool, transfer_command_pool,
staging_buffer, staging_buffer,
@ -145,11 +140,13 @@ fn createIndexBuffer(
// Temporary buffer to "stage" vertex data before transfering to GPU // Temporary buffer to "stage" vertex data before transfering to GPU
var staging_buffer: vk.Buffer = undefined; var staging_buffer: vk.Buffer = undefined;
var staging_buffer_memory: vk.DeviceMemory = undefined; var staging_buffer_memory: vk.DeviceMemory = undefined;
defer self.ctx.device.destroyBuffer(staging_buffer, null); defer self.device.destroyBuffer(staging_buffer, null);
defer self.ctx.device.freeMemory(staging_buffer_memory, null); defer self.device.freeMemory(staging_buffer_memory, null);
try Utilities.createBuffer( try Utilities.createBuffer(
self.ctx, self.physical_device,
self.instance,
self.device,
buffer_size, buffer_size,
.{ .transfer_src_bit = true }, .{ .transfer_src_bit = true },
.{ .host_visible_bit = true, .host_coherent_bit = true }, .{ .host_visible_bit = true, .host_coherent_bit = true },
@ -158,14 +155,16 @@ fn createIndexBuffer(
); );
// Map memory to index buffer // Map memory to index buffer
const data = try self.ctx.device.mapMemory(staging_buffer_memory, 0, buffer_size, .{}); const data = try self.device.mapMemory(staging_buffer_memory, 0, buffer_size, .{});
const gpu_vertices: [*]u32 = @ptrCast(@alignCast(data)); const gpu_vertices: [*]u32 = @ptrCast(@alignCast(data));
@memcpy(gpu_vertices, indices[0..]); @memcpy(gpu_vertices, indices[0..]);
self.ctx.device.unmapMemory(staging_buffer_memory); self.device.unmapMemory(staging_buffer_memory);
// Create buffer for index data on GPU access only // Create buffer for index data on GPU access only
try Utilities.createBuffer( try Utilities.createBuffer(
self.ctx, self.physical_device,
self.instance,
self.device,
buffer_size, buffer_size,
.{ .transfer_dst_bit = true, .index_buffer_bit = true }, .{ .transfer_dst_bit = true, .index_buffer_bit = true },
.{ .device_local_bit = true }, .{ .device_local_bit = true },
@ -175,7 +174,7 @@ fn createIndexBuffer(
// Copy from staging buffer to GPU access buffer // Copy from staging buffer to GPU access buffer
try Utilities.copyBuffer( try Utilities.copyBuffer(
self.ctx, self.device,
transfer_queue, transfer_queue,
transfer_command_pool, transfer_command_pool,
staging_buffer, staging_buffer,

View file

@ -15,20 +15,7 @@ allocator: std.mem.Allocator,
mesh_list: std.ArrayList(Mesh), mesh_list: std.ArrayList(Mesh),
model: zm.Mat, model: zm.Mat,
<<<<<<< Updated upstream
pub fn new(allocator: std.mem.Allocator, mesh_list: std.ArrayList(Mesh)) Self { pub fn new(allocator: std.mem.Allocator, mesh_list: std.ArrayList(Mesh)) Self {
=======
sampler_descriptor_sets: std.ArrayList(vk.DescriptorSet),
pub fn new(
allocator: std.mem.Allocator,
ctx: Context,
graphics_command_pool: vk.CommandPool,
texture_sampler: vk.Sampler,
model_file: []const u8,
) Self {
_ = texture_sampler;
>>>>>>> Stashed changes
var new_mesh_model: Self = undefined; var new_mesh_model: Self = undefined;
new_mesh_model.allocator = allocator; new_mesh_model.allocator = allocator;

View file

@ -1,55 +0,0 @@
const std = @import("std");
const vk = @import("vulkan");
const Context = @import("Context.zig");
const Mesh = @import("Mesh.zig");
const Material = @import("Material.zig");
const Self = @This();
allocator: std.mem.Allocator,
ctx: Context,
sampler_descriptor_pool: vk.DescriptorPool,
sampler_descriptor_set_layout: vk.DescriptorSetLayout,
mesh_cache: std.AutoArrayHashMap([]const u8, Mesh),
material_cache: std.AutoArrayHashMap([]const u8, Material),
pub fn new(allocator: std.mem.Allocator, ctx: Context) Self {
var self: Self = undefined;
self.allocator = allocator;
self.ctx = ctx;
self.mesh_cache = std.AutoArrayHashMap([]const u8, Mesh).init(allocator);
self.material_cache = std.AutoArrayHashMap([]const u8, Material).init(allocator);
return self;
}
pub fn deinit(self: *Self) void {
// TODO Release resources properly
self.mesh_cache.deinit();
self.material_cache.deinit();
}
pub fn getMesh(self: *Self, file_name: []const u8) !Mesh {
if (self.mesh_cache.get(file_name)) |mesh| {
return mesh;
}
// TODO Create mesh
// load mesh
return undefined;
}
fn allocateDescriptorSet(self: *Self) !void {
// TODO
}
fn createDescriptorSetLayout(self: *Self) !void {
// TODO
}

View file

@ -1,200 +0,0 @@
const std = @import("std");
const vk = @import("vulkan");
const img = @import("zstbi");
const Context = @import("Context.zig");
const Image = @import("image.zig");
const Utilities = @import("utilities.zig");
const Self = @This();
allocator: std.mem.Allocator,
ctx: Context,
idx: u32,
texture_image: vk.Image,
texture_image_memory: vk.DeviceMemory,
texture_image_view: vk.ImageView,
sampler_descriptor_set: vk.DescriptorSet,
image_file: img.Image,
pub fn create(
file_name: []const u8,
ctx: Context,
graphics_command_pool: vk.CommandPool,
texture_sampler: vk.Sampler,
sampler_set_layout: vk.DescriptorSetLayout,
sampler_descriptor_pool: vk.DescriptorPool,
) Self {
var self: Self = undefined;
self.ctx = ctx;
// Create texture image and get its location in the array
const texture_image_loc = try self.createTextureImage(file_name, graphics_command_pool);
// Create image view
self.texture_image_view = try Image.createImageView(
ctx,
self.texture_images.items[texture_image_loc],
.r8g8b8a8_srgb,
.{ .color_bit = true },
);
// Create texture descriptor
try self.createTextureDescriptor(
texture_sampler,
sampler_set_layout,
sampler_descriptor_pool,
);
// Return location of set with texture
return self;
}
pub fn destroy(self: *Self) void {
_ = self;
}
fn createTextureImage(
self: *Self,
file_name: []const u8,
graphics_command_pool: vk.CommandPool,
) !u32 {
// Load image file
var width: u32 = undefined;
var height: u32 = undefined;
var image_size: vk.DeviceSize = undefined;
const image = try self.loadTextureFile(file_name, &width, &height, &image_size);
// Create staging buffer to hold loaded data, ready to copy to device
var image_staging_buffer: vk.Buffer = undefined;
var image_staging_buffer_memory: vk.DeviceMemory = undefined;
defer self.ctx.device.destroyBuffer(image_staging_buffer, null);
defer self.ctx.device.freeMemory(image_staging_buffer_memory, null);
try Utilities.createBuffer(
self.ctx.physical_device,
self.ctx.instance,
self.ctx.device,
image_size,
.{ .transfer_src_bit = true },
.{ .host_visible_bit = true, .host_coherent_bit = true },
&image_staging_buffer,
&image_staging_buffer_memory,
);
// Copy data to staging buffer
const data = try self.ctx.device.mapMemory(image_staging_buffer_memory, 0, image_size, .{});
const image_data: [*]u8 = @ptrCast(@alignCast(data));
@memcpy(image_data, image[0..]);
self.ctx.device.unmapMemory(image_staging_buffer_memory);
// Create image to hold final texture
var tex_image_memory: vk.DeviceMemory = undefined;
const tex_image = try Image.createImage(
self.ctx,
width,
height,
.r8g8b8a8_srgb,
.optimal,
.{ .transfer_dst_bit = true, .sampled_bit = true },
.{ .device_local_bit = true },
&tex_image_memory,
);
// Transition image to be DST for copy operation
try Utilities.transitionImageLayout(
self.ctx.device,
self.ctx.graphics_queue.handle,
graphics_command_pool,
tex_image,
.undefined,
.transfer_dst_optimal,
);
// Copy data to image
try Utilities.copyImageBuffer(
self.ctx.device,
self.ctx.graphics_queue.handle,
graphics_command_pool,
image_staging_buffer,
tex_image,
width,
height,
);
// Transition image to be shader readable for shader usage
try Utilities.transitionImageLayout(
self.ctx.device,
self.ctx.graphics_queue.handle,
graphics_command_pool,
tex_image,
.transfer_dst_optimal,
.shader_read_only_optimal,
);
self.texture_image = tex_image;
self.texture_image_memory = tex_image_memory;
// Return index of new texture image
return @intCast(self.texture_images.items.len - 1);
}
fn createTextureDescriptor(
self: *Self,
texture_sampler: vk.Sampler,
sampler_set_layout: vk.DescriptorSetLayout,
sampler_descriptor_pool: vk.DescriptorPool,
) !u32 {
// Descriptor set allocation info
const set_alloc_info: vk.DescriptorSetAllocateInfo = .{
.descriptor_pool = sampler_descriptor_pool,
.descriptor_set_count = 1,
.p_set_layouts = @ptrCast(&sampler_set_layout),
};
// Allocate descriptor sets
try self.ctx.device.allocateDescriptorSets(&set_alloc_info, @ptrCast(&self.sampler_descriptor_set));
const image_info: vk.DescriptorImageInfo = .{
.image_layout = .shader_read_only_optimal, // Image layout when in use
.image_view = self.texture_image_view, // Image to bind to set
.sampler = texture_sampler, // Sampler to use for set
};
// Descriptor write info
const descriptor_write: vk.WriteDescriptorSet = .{
.dst_set = self.sampler_descriptor_set,
.dst_binding = 0,
.dst_array_element = 0,
.descriptor_type = .combined_image_sampler,
.descriptor_count = 1,
.p_image_info = @ptrCast(&image_info),
.p_buffer_info = undefined,
.p_texel_buffer_view = undefined,
};
// Update the new descriptor set
self.ctx.device.updateDescriptorSets(1, @ptrCast(&descriptor_write), 0, null);
}
fn loadTextureFile(self: *Self, file_name: []const u8, width: *u32, height: *u32, image_size: *vk.DeviceSize) !void {
const path_concat = [2][]const u8{ "./assets/textures/", file_name };
const path = try std.mem.concatWithSentinel(self.allocator, u8, &path_concat, 0);
defer self.allocator.free(path);
const image = try img.Image.loadFromFile(path, 0);
width.* = image.width;
height.* = image.height;
// Calculate image size using given and known data
image_size.* = width.* * height.* * 4;
self.image_file = image;
}

View file

@ -1,14 +1,8 @@
const std = @import("std"); const std = @import("std");
const vk = @import("vulkan"); const vk = @import("vulkan");
<<<<<<< Updated upstream
const Instance = @import("vulkan_renderer.zig").Instance; const Instance = @import("vulkan_renderer.zig").Instance;
const Device = @import("vulkan_renderer.zig").Device; const Device = @import("vulkan_renderer.zig").Device;
=======
const Context = @import("Context.zig");
const Instance = @import("Context.zig").Instance;
const Device = @import("Context.zig").Device;
>>>>>>> Stashed changes
const CommandBuffer = @import("vulkan_renderer.zig").CommandBuffer; const CommandBuffer = @import("vulkan_renderer.zig").CommandBuffer;
pub const device_extensions = [_][*:0]const u8{vk.extensions.khr_swapchain.name}; pub const device_extensions = [_][*:0]const u8{vk.extensions.khr_swapchain.name};
@ -23,7 +17,6 @@ pub const Vertex = struct {
tex: Vector2, // Texture coords (u, v) tex: Vector2, // Texture coords (u, v)
}; };
<<<<<<< Updated upstream
pub const QueueFamilyIndices = struct { pub const QueueFamilyIndices = struct {
graphics_family: ?u32 = null, graphics_family: ?u32 = null,
presentation_family: ?u32 = null, presentation_family: ?u32 = null,
@ -45,11 +38,8 @@ pub const SwapchainImage = struct {
}; };
pub fn findMemoryTypeIndex(pdev: vk.PhysicalDevice, instance: Instance, allowed_types: u32, properties: vk.MemoryPropertyFlags) u32 { pub fn findMemoryTypeIndex(pdev: vk.PhysicalDevice, instance: Instance, allowed_types: u32, properties: vk.MemoryPropertyFlags) u32 {
=======
pub fn findMemoryTypeIndex(ctx: Context, allowed_types: u32, properties: vk.MemoryPropertyFlags) u32 {
>>>>>>> Stashed changes
// Get properties of physical device memory // Get properties of physical device memory
const memory_properties = ctx.instance.getPhysicalDeviceMemoryProperties(ctx.physical_device); const memory_properties = instance.getPhysicalDeviceMemoryProperties(pdev);
const mem_type_count = memory_properties.memory_type_count; const mem_type_count = memory_properties.memory_type_count;
for (memory_properties.memory_types[0..mem_type_count], 0..mem_type_count) |mem_type, i| { for (memory_properties.memory_types[0..mem_type_count], 0..mem_type_count) |mem_type, i| {
@ -64,7 +54,9 @@ pub fn findMemoryTypeIndex(ctx: Context, allowed_types: u32, properties: vk.Memo
} }
pub fn createBuffer( pub fn createBuffer(
ctx: Context, pdev: vk.PhysicalDevice,
instance: Instance,
device: Device,
buffer_size: vk.DeviceSize, buffer_size: vk.DeviceSize,
buffer_usage: vk.BufferUsageFlags, buffer_usage: vk.BufferUsageFlags,
buffer_properties: vk.MemoryPropertyFlags, buffer_properties: vk.MemoryPropertyFlags,
@ -80,16 +72,17 @@ pub fn createBuffer(
.sharing_mode = .exclusive, // Similar to swapchain images, can share vertex buffers .sharing_mode = .exclusive, // Similar to swapchain images, can share vertex buffers
}; };
buffer.* = try ctx.device.createBuffer(&buffer_create_info, null); buffer.* = try device.createBuffer(&buffer_create_info, null);
// Get buffer memory requirements // Get buffer memory requirements
const mem_requirements = ctx.device.getBufferMemoryRequirements(buffer.*); const mem_requirements = device.getBufferMemoryRequirements(buffer.*);
// Allocate memory to buffer // Allocate memory to buffer
const allocate_info: vk.MemoryAllocateInfo = .{ const allocate_info: vk.MemoryAllocateInfo = .{
.allocation_size = mem_requirements.size, .allocation_size = mem_requirements.size,
.memory_type_index = findMemoryTypeIndex( .memory_type_index = findMemoryTypeIndex(
ctx, pdev,
instance,
mem_requirements.memory_type_bits, // Index of memory type of physical device that has required bit flags mem_requirements.memory_type_bits, // Index of memory type of physical device that has required bit flags
// Host visible: CPU can interact with memory // Host visible: CPU can interact with memory
// Host coherent: Allows placement of data straight into buffer after mapping (otherwise would have to specify manually) // Host coherent: Allows placement of data straight into buffer after mapping (otherwise would have to specify manually)
@ -98,10 +91,10 @@ pub fn createBuffer(
}; };
// Allocate memory to vkDeviceMemory // Allocate memory to vkDeviceMemory
buffer_memory.* = try ctx.device.allocateMemory(&allocate_info, null); buffer_memory.* = try device.allocateMemory(&allocate_info, null);
// Allocate memory to given vertex buffer // Allocate memory to given vertex buffer
try ctx.device.bindBufferMemory(buffer.*, buffer_memory.*, 0); try device.bindBufferMemory(buffer.*, buffer_memory.*, 0);
} }
fn beginCommandBuffer(device: Device, command_pool: vk.CommandPool) !CommandBuffer { fn beginCommandBuffer(device: Device, command_pool: vk.CommandPool) !CommandBuffer {

View file

@ -59,13 +59,9 @@ pub const VulkanRenderer = struct {
vkb: BaseDispatch, vkb: BaseDispatch,
<<<<<<< Updated upstream
window: sdl.Window, window: sdl.Window,
current_frame: u32 = 0, current_frame: u32 = 0,
=======
ctx: Context,
>>>>>>> Stashed changes
// Scene settings // Scene settings
ubo_view_projection: UboViewProjection, ubo_view_projection: UboViewProjection,
@ -126,6 +122,9 @@ pub const VulkanRenderer = struct {
render_pass: vk.RenderPass, render_pass: vk.RenderPass,
// Pools
graphics_command_pool: vk.CommandPool,
// Utilities // Utilities
swapchain_image_format: vk.Format, swapchain_image_format: vk.Format,
depth_format: vk.Format, depth_format: vk.Format,
@ -143,11 +142,8 @@ pub const VulkanRenderer = struct {
self.window = window; self.window = window;
self.current_frame = 0; self.current_frame = 0;
<<<<<<< Updated upstream
self.allocator = allocator; self.allocator = allocator;
self.vkb = try BaseDispatch.load(try sdl.vulkan.getVkGetInstanceProcAddr()); self.vkb = try BaseDispatch.load(try sdl.vulkan.getVkGetInstanceProcAddr());
=======
>>>>>>> Stashed changes
img.init(allocator); img.init(allocator);
@ -170,11 +166,7 @@ pub const VulkanRenderer = struct {
try self.createFramebuffers(); try self.createFramebuffers();
try self.createCommandPool(); try self.createCommandPool();
<<<<<<< Updated upstream
self.sampler_descriptor_sets = try std.ArrayList(vk.DescriptorSet).initCapacity(self.allocator, self.swapchain_images.len); self.sampler_descriptor_sets = try std.ArrayList(vk.DescriptorSet).initCapacity(self.allocator, self.swapchain_images.len);
=======
self.sampler_descriptor_sets = try std.ArrayList(vk.DescriptorSet).initCapacity(self.allocator, self.ctx.swapchain.swapchain_images.len);
>>>>>>> Stashed changes
try self.createCommandBuffers(); try self.createCommandBuffers();
try self.createTextureSampler(); try self.createTextureSampler();
@ -190,11 +182,7 @@ pub const VulkanRenderer = struct {
self.texture_image_views = std.ArrayList(vk.ImageView).init(self.allocator); self.texture_image_views = std.ArrayList(vk.ImageView).init(self.allocator);
self.model_list = std.ArrayList(MeshModel).init(allocator); self.model_list = std.ArrayList(MeshModel).init(allocator);
<<<<<<< Updated upstream
const aspect: f32 = @as(f32, @floatFromInt(self.extent.width)) / @as(f32, @floatFromInt(self.extent.height)); const aspect: f32 = @as(f32, @floatFromInt(self.extent.width)) / @as(f32, @floatFromInt(self.extent.height));
=======
const aspect: f32 = @as(f32, @floatFromInt(self.ctx.swapchain.extent.width)) / @as(f32, @floatFromInt(self.ctx.swapchain.extent.height));
>>>>>>> Stashed changes
self.ubo_view_projection.projection = zm.perspectiveFovRh( self.ubo_view_projection.projection = zm.perspectiveFovRh(
std.math.degreesToRadians(45.0), std.math.degreesToRadians(45.0),
aspect, aspect,
@ -238,13 +226,8 @@ pub const VulkanRenderer = struct {
// -- Get next image // -- Get next image
// Get index of next image to be drawn to, and signal semaphore when ready to be drawn to // Get index of next image to be drawn to, and signal semaphore when ready to be drawn to
<<<<<<< Updated upstream
const image_index_result = try self.device.acquireNextImageKHR( const image_index_result = try self.device.acquireNextImageKHR(
self.swapchain, self.swapchain,
=======
const image_index_result = try self.ctx.device.acquireNextImageKHR(
self.ctx.swapchain.handle,
>>>>>>> Stashed changes
std.math.maxInt(u64), std.math.maxInt(u64),
self.image_available[self.current_frame], self.image_available[self.current_frame],
.null_handle, .null_handle,
@ -275,11 +258,7 @@ pub const VulkanRenderer = struct {
.wait_semaphore_count = 1, // Number of semaphores to wait on .wait_semaphore_count = 1, // Number of semaphores to wait on
.p_wait_semaphores = @ptrCast(&self.render_finished[self.current_frame]), // Semaphores to wait on .p_wait_semaphores = @ptrCast(&self.render_finished[self.current_frame]), // Semaphores to wait on
.swapchain_count = 1, // Number of swapchains to present to .swapchain_count = 1, // Number of swapchains to present to
<<<<<<< Updated upstream
.p_swapchains = @ptrCast(&self.swapchain), // Swapchains to present images to .p_swapchains = @ptrCast(&self.swapchain), // Swapchains to present images to
=======
.p_swapchains = @ptrCast(&self.ctx.swapchain.handle), // Swapchains to present images to
>>>>>>> Stashed changes
.p_image_indices = @ptrCast(&image_index_result.image_index), // Index of images in swapchains to present .p_image_indices = @ptrCast(&image_index_result.image_index), // Index of images in swapchains to present
}; };
@ -352,15 +331,9 @@ pub const VulkanRenderer = struct {
self.sampler_descriptor_sets.deinit(); self.sampler_descriptor_sets.deinit();
self.allocator.free(self.input_descriptor_sets); self.allocator.free(self.input_descriptor_sets);
<<<<<<< Updated upstream
for (0..self.swapchain_images.len) |i| { for (0..self.swapchain_images.len) |i| {
self.device.destroyBuffer(self.vp_uniform_buffer[i], null); self.device.destroyBuffer(self.vp_uniform_buffer[i], null);
self.device.freeMemory(self.vp_uniform_buffer_memory[i], null); self.device.freeMemory(self.vp_uniform_buffer_memory[i], null);
=======
for (0..self.ctx.swapchain.swapchain_images.len) |i| {
self.ctx.device.destroyBuffer(self.vp_uniform_buffer[i], null);
self.ctx.device.freeMemory(self.vp_uniform_buffer_memory[i], null);
>>>>>>> Stashed changes
} }
self.allocator.free(self.vp_uniform_buffer); self.allocator.free(self.vp_uniform_buffer);
self.allocator.free(self.vp_uniform_buffer_memory); self.allocator.free(self.vp_uniform_buffer_memory);
@ -379,11 +352,7 @@ pub const VulkanRenderer = struct {
self.device.destroyFramebuffer(framebuffer, null); self.device.destroyFramebuffer(framebuffer, null);
} }
<<<<<<< Updated upstream
self.allocator.free(self.swapchain_framebuffers); self.allocator.free(self.swapchain_framebuffers);
=======
self.ctx.swapchain.deinit();
>>>>>>> Stashed changes
self.device.destroyPipeline(self.second_pipeline, null); self.device.destroyPipeline(self.second_pipeline, null);
self.device.destroyPipelineLayout(self.second_pipeline_layout, null); self.device.destroyPipelineLayout(self.second_pipeline_layout, null);
@ -647,11 +616,7 @@ pub const VulkanRenderer = struct {
// Colour attachment of the render pass // Colour attachment of the render pass
const swapchain_colour_attachment: vk.AttachmentDescription = .{ const swapchain_colour_attachment: vk.AttachmentDescription = .{
<<<<<<< Updated upstream
.format = self.swapchain_image_format, // Format to use for attachment .format = self.swapchain_image_format, // Format to use for attachment
=======
.format = self.ctx.swapchain.swapchain_image_format, // Format to use for attachment
>>>>>>> Stashed changes
.samples = .{ .@"1_bit" = true }, // Number of samples to write for multisampling .samples = .{ .@"1_bit" = true }, // Number of samples to write for multisampling
.load_op = .clear, // Describes what to do with attachment before rendering .load_op = .clear, // Describes what to do with attachment before rendering
.store_op = .store, // Describes what to do with attachment after rendering .store_op = .store, // Describes what to do with attachment after rendering
@ -822,15 +787,9 @@ pub const VulkanRenderer = struct {
} }
fn createColourBufferImage(self: *Self) !void { fn createColourBufferImage(self: *Self) !void {
<<<<<<< Updated upstream
self.colour_buffer_image = try self.allocator.alloc(vk.Image, self.swapchain_images.len); self.colour_buffer_image = try self.allocator.alloc(vk.Image, self.swapchain_images.len);
self.colour_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len); self.colour_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len);
self.colour_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.swapchain_images.len); self.colour_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.swapchain_images.len);
=======
self.colour_buffer_image = try self.allocator.alloc(vk.Image, self.ctx.swapchain.swapchain_images.len);
self.colour_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.ctx.swapchain.swapchain_images.len);
self.colour_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.ctx.swapchain.swapchain_images.len);
>>>>>>> Stashed changes
// Get supported format for colour attachment // Get supported format for colour attachment
const colour_format = chooseSupportedFormat( const colour_format = chooseSupportedFormat(
@ -843,16 +802,9 @@ pub const VulkanRenderer = struct {
// Create colour buffers // Create colour buffers
for (0..self.colour_buffer_image.len) |i| { for (0..self.colour_buffer_image.len) |i| {
<<<<<<< Updated upstream
self.colour_buffer_image[i] = try self.createImage( self.colour_buffer_image[i] = try self.createImage(
self.extent.width, self.extent.width,
self.extent.height, self.extent.height,
=======
self.colour_buffer_image[i] = try Image.createImage(
self.ctx,
self.ctx.swapchain.extent.width,
self.ctx.swapchain.extent.height,
>>>>>>> Stashed changes
colour_format, colour_format,
.optimal, .optimal,
.{ .color_attachment_bit = true, .input_attachment_bit = true }, .{ .color_attachment_bit = true, .input_attachment_bit = true },
@ -869,15 +821,9 @@ pub const VulkanRenderer = struct {
} }
fn createDepthBufferImage(self: *Self) !void { fn createDepthBufferImage(self: *Self) !void {
<<<<<<< Updated upstream
self.depth_buffer_image = try self.allocator.alloc(vk.Image, self.swapchain_images.len); self.depth_buffer_image = try self.allocator.alloc(vk.Image, self.swapchain_images.len);
self.depth_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len); self.depth_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len);
self.depth_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.swapchain_images.len); self.depth_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.swapchain_images.len);
=======
self.depth_buffer_image = try self.allocator.alloc(vk.Image, self.ctx.swapchain.swapchain_images.len);
self.depth_buffer_image_memory = try self.allocator.alloc(vk.DeviceMemory, self.ctx.swapchain.swapchain_images.len);
self.depth_buffer_image_view = try self.allocator.alloc(vk.ImageView, self.ctx.swapchain.swapchain_images.len);
>>>>>>> Stashed changes
// Get supported depth buffer format // Get supported depth buffer format
const formats = [_]vk.Format{ .d32_sfloat_s8_uint, .d32_sfloat, .d24_unorm_s8_uint }; const formats = [_]vk.Format{ .d32_sfloat_s8_uint, .d32_sfloat, .d24_unorm_s8_uint };
@ -891,16 +837,9 @@ pub const VulkanRenderer = struct {
for (0..self.depth_buffer_image.len) |i| { for (0..self.depth_buffer_image.len) |i| {
// Create depth buffer image // Create depth buffer image
<<<<<<< Updated upstream
self.depth_buffer_image[i] = try self.createImage( self.depth_buffer_image[i] = try self.createImage(
self.extent.width, self.extent.width,
self.extent.height, self.extent.height,
=======
self.depth_buffer_image[i] = try Image.createImage(
self.ctx,
self.ctx.swapchain.extent.width,
self.ctx.swapchain.extent.height,
>>>>>>> Stashed changes
self.depth_format, self.depth_format,
.optimal, .optimal,
.{ .depth_stencil_attachment_bit = true, .input_attachment_bit = true }, .{ .depth_stencil_attachment_bit = true, .input_attachment_bit = true },
@ -1000,24 +939,15 @@ pub const VulkanRenderer = struct {
self.viewport = .{ self.viewport = .{
.x = 0.0, .x = 0.0,
.y = 0.0, .y = 0.0,
<<<<<<< Updated upstream
.width = @floatFromInt(self.extent.width), .width = @floatFromInt(self.extent.width),
.height = @floatFromInt(self.extent.height), .height = @floatFromInt(self.extent.height),
=======
.width = @floatFromInt(self.ctx.swapchain.extent.width),
.height = @floatFromInt(self.ctx.swapchain.extent.height),
>>>>>>> Stashed changes
.min_depth = 0.0, .min_depth = 0.0,
.max_depth = 1.0, .max_depth = 1.0,
}; };
self.scissor = .{ self.scissor = .{
.offset = .{ .x = 0, .y = 0 }, .offset = .{ .x = 0, .y = 0 },
<<<<<<< Updated upstream
.extent = self.extent, .extent = self.extent,
=======
.extent = self.ctx.swapchain.extent,
>>>>>>> Stashed changes
}; };
const viewport_state_create_info: vk.PipelineViewportStateCreateInfo = .{ const viewport_state_create_info: vk.PipelineViewportStateCreateInfo = .{
@ -1191,17 +1121,10 @@ pub const VulkanRenderer = struct {
} }
fn createFramebuffers(self: *Self) !void { fn createFramebuffers(self: *Self) !void {
<<<<<<< Updated upstream
self.swapchain_framebuffers = try self.allocator.alloc(vk.Framebuffer, self.swapchain_images.len); self.swapchain_framebuffers = try self.allocator.alloc(vk.Framebuffer, self.swapchain_images.len);
// Create a frammebuffer for each swapchain image // Create a frammebuffer for each swapchain image
for (self.swapchain_images, 0..) |swapchain_image, i| { for (self.swapchain_images, 0..) |swapchain_image, i| {
=======
self.ctx.swapchain.swapchain_framebuffers = try self.allocator.alloc(vk.Framebuffer, self.ctx.swapchain.swapchain_images.len);
// Create a frammebuffer for each swapchain image
for (self.ctx.swapchain.swapchain_images, 0..) |swapchain_image, i| {
>>>>>>> Stashed changes
// Order matters // Order matters
const attachments = [_]vk.ImageView{ const attachments = [_]vk.ImageView{
swapchain_image.image_view, swapchain_image.image_view,
@ -1213,7 +1136,6 @@ pub const VulkanRenderer = struct {
.render_pass = self.render_pass, // Render pass layout the frambuffer will be used with .render_pass = self.render_pass, // Render pass layout the frambuffer will be used with
.attachment_count = @intCast(attachments.len), .attachment_count = @intCast(attachments.len),
.p_attachments = &attachments, // List of attachments (1:1 with render pass) .p_attachments = &attachments, // List of attachments (1:1 with render pass)
<<<<<<< Updated upstream
.width = self.extent.width, // Framebuffer width .width = self.extent.width, // Framebuffer width
.height = self.extent.height, // Framebuffer height .height = self.extent.height, // Framebuffer height
.layers = 1, // Framebuffer layers .layers = 1, // Framebuffer layers
@ -1240,20 +1162,6 @@ pub const VulkanRenderer = struct {
fn createCommandBuffers(self: *Self) !void { fn createCommandBuffers(self: *Self) !void {
// Allocate one command buffer for each framebuffer // Allocate one command buffer for each framebuffer
const command_buffer_handles = try self.allocator.alloc(vk.CommandBuffer, self.swapchain_framebuffers.len); const command_buffer_handles = try self.allocator.alloc(vk.CommandBuffer, self.swapchain_framebuffers.len);
=======
.width = self.ctx.swapchain.extent.width, // Framebuffer width
.height = self.ctx.swapchain.extent.height, // Framebuffer height
.layers = 1, // Framebuffer layers
};
self.ctx.swapchain.swapchain_framebuffers[i] = try self.ctx.device.createFramebuffer(&framebuffer_create_info, null);
}
}
fn createCommandBuffers(self: *Self) !void {
// Allocate one command buffer for each framebuffer
const command_buffer_handles = try self.allocator.alloc(vk.CommandBuffer, self.ctx.swapchain.swapchain_framebuffers.len);
>>>>>>> Stashed changes
defer self.allocator.free(command_buffer_handles); defer self.allocator.free(command_buffer_handles);
self.command_buffers = try self.allocator.alloc(CommandBuffer, command_buffer_handles.len); self.command_buffers = try self.allocator.alloc(CommandBuffer, command_buffer_handles.len);
@ -1310,13 +1218,8 @@ pub const VulkanRenderer = struct {
const vp_buffer_size: vk.DeviceSize = @sizeOf(UboViewProjection); const vp_buffer_size: vk.DeviceSize = @sizeOf(UboViewProjection);
// One uniform buffer for each image (and by extension, command buffer) // One uniform buffer for each image (and by extension, command buffer)
<<<<<<< Updated upstream
self.vp_uniform_buffer = try self.allocator.alloc(vk.Buffer, self.swapchain_images.len); self.vp_uniform_buffer = try self.allocator.alloc(vk.Buffer, self.swapchain_images.len);
self.vp_uniform_buffer_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len); self.vp_uniform_buffer_memory = try self.allocator.alloc(vk.DeviceMemory, self.swapchain_images.len);
=======
self.vp_uniform_buffer = try self.allocator.alloc(vk.Buffer, self.ctx.swapchain.swapchain_images.len);
self.vp_uniform_buffer_memory = try self.allocator.alloc(vk.DeviceMemory, self.ctx.swapchain.swapchain_images.len);
>>>>>>> Stashed changes
// Create the uniform buffers // Create the uniform buffers
for (0..self.vp_uniform_buffer.len) |i| { for (0..self.vp_uniform_buffer.len) |i| {
@ -1348,11 +1251,7 @@ pub const VulkanRenderer = struct {
// Data to create descriptor pool // Data to create descriptor pool
const pool_create_info: vk.DescriptorPoolCreateInfo = .{ const pool_create_info: vk.DescriptorPoolCreateInfo = .{
<<<<<<< Updated upstream
.max_sets = @intCast(self.swapchain_images.len), // Maximum number of descriptor sets that can be created from pool .max_sets = @intCast(self.swapchain_images.len), // Maximum number of descriptor sets that can be created from pool
=======
.max_sets = @intCast(self.ctx.swapchain.swapchain_images.len), // Maximum number of descriptor sets that can be created from pool
>>>>>>> Stashed changes
.pool_size_count = @intCast(descriptor_pool_sizes.len), // Amount of pool sizes being passed .pool_size_count = @intCast(descriptor_pool_sizes.len), // Amount of pool sizes being passed
.p_pool_sizes = &descriptor_pool_sizes, // Pool sizes to create pool with .p_pool_sizes = &descriptor_pool_sizes, // Pool sizes to create pool with
}; };
@ -1394,11 +1293,7 @@ pub const VulkanRenderer = struct {
// Create input attachment pool // Create input attachment pool
const input_pool_create_info: vk.DescriptorPoolCreateInfo = .{ const input_pool_create_info: vk.DescriptorPoolCreateInfo = .{
<<<<<<< Updated upstream
.max_sets = @intCast(self.swapchain_images.len), .max_sets = @intCast(self.swapchain_images.len),
=======
.max_sets = @intCast(self.ctx.swapchain.swapchain_images.len),
>>>>>>> Stashed changes
.pool_size_count = @intCast(input_pool_sizes.len), .pool_size_count = @intCast(input_pool_sizes.len),
.p_pool_sizes = &input_pool_sizes, .p_pool_sizes = &input_pool_sizes,
}; };
@ -1408,15 +1303,9 @@ pub const VulkanRenderer = struct {
fn createDescriptorSets(self: *Self) !void { fn createDescriptorSets(self: *Self) !void {
// One descriptor set for every buffer // One descriptor set for every buffer
<<<<<<< Updated upstream
self.descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.swapchain_images.len); self.descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.swapchain_images.len);
var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.swapchain_images.len); var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.swapchain_images.len);
=======
self.descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.ctx.swapchain.swapchain_images.len);
var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.ctx.swapchain.swapchain_images.len);
>>>>>>> Stashed changes
defer self.allocator.free(set_layouts); defer self.allocator.free(set_layouts);
for (0..set_layouts.len) |i| { for (0..set_layouts.len) |i| {
set_layouts[i] = self.descriptor_set_layout; set_layouts[i] = self.descriptor_set_layout;
@ -1425,11 +1314,7 @@ pub const VulkanRenderer = struct {
// Descriptor set allocation info // Descriptor set allocation info
const set_alloc_info: vk.DescriptorSetAllocateInfo = .{ const set_alloc_info: vk.DescriptorSetAllocateInfo = .{
.descriptor_pool = self.descriptor_pool, // Pool to allocate descriptor set from .descriptor_pool = self.descriptor_pool, // Pool to allocate descriptor set from
<<<<<<< Updated upstream
.descriptor_set_count = @intCast(self.swapchain_images.len), // Number of sets to allocate .descriptor_set_count = @intCast(self.swapchain_images.len), // Number of sets to allocate
=======
.descriptor_set_count = @intCast(self.ctx.swapchain.swapchain_images.len), // Number of sets to allocate
>>>>>>> Stashed changes
.p_set_layouts = set_layouts.ptr, // Layouts to use to allocate sets (1:1 relationship) .p_set_layouts = set_layouts.ptr, // Layouts to use to allocate sets (1:1 relationship)
}; };
@ -1437,11 +1322,7 @@ pub const VulkanRenderer = struct {
try self.device.allocateDescriptorSets(&set_alloc_info, self.descriptor_sets.ptr); try self.device.allocateDescriptorSets(&set_alloc_info, self.descriptor_sets.ptr);
// Update all of descriptor set buffer bindings // Update all of descriptor set buffer bindings
<<<<<<< Updated upstream
for (0..self.swapchain_images.len) |i| { for (0..self.swapchain_images.len) |i| {
=======
for (0..self.ctx.swapchain.swapchain_images.len) |i| {
>>>>>>> Stashed changes
// -- View projection descriptor // -- View projection descriptor
// Buffer info and data offset info // Buffer info and data offset info
const vp_buffer_info: vk.DescriptorBufferInfo = .{ const vp_buffer_info: vk.DescriptorBufferInfo = .{
@ -1471,17 +1352,10 @@ pub const VulkanRenderer = struct {
} }
fn createInputDescriptorSets(self: *Self) !void { fn createInputDescriptorSets(self: *Self) !void {
<<<<<<< Updated upstream
self.input_descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.swapchain_images.len); self.input_descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.swapchain_images.len);
// Fill array of layouts ready for set creation // Fill array of layouts ready for set creation
var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.swapchain_images.len); var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.swapchain_images.len);
=======
self.input_descriptor_sets = try self.allocator.alloc(vk.DescriptorSet, self.ctx.swapchain.swapchain_images.len);
// Fill array of layouts ready for set creation
var set_layouts = try self.allocator.alloc(vk.DescriptorSetLayout, self.ctx.swapchain.swapchain_images.len);
>>>>>>> Stashed changes
defer self.allocator.free(set_layouts); defer self.allocator.free(set_layouts);
for (0..set_layouts.len) |i| { for (0..set_layouts.len) |i| {
set_layouts[i] = self.input_set_layout; set_layouts[i] = self.input_set_layout;
@ -1490,11 +1364,7 @@ pub const VulkanRenderer = struct {
// Input attachment descriptor set allocation info // Input attachment descriptor set allocation info
const set_alloc_info: vk.DescriptorSetAllocateInfo = .{ const set_alloc_info: vk.DescriptorSetAllocateInfo = .{
.descriptor_pool = self.input_descriptor_pool, .descriptor_pool = self.input_descriptor_pool,
<<<<<<< Updated upstream
.descriptor_set_count = @intCast(self.swapchain_images.len), .descriptor_set_count = @intCast(self.swapchain_images.len),
=======
.descriptor_set_count = @intCast(self.ctx.swapchain.swapchain_images.len),
>>>>>>> Stashed changes
.p_set_layouts = set_layouts.ptr, .p_set_layouts = set_layouts.ptr,
}; };
@ -1502,11 +1372,7 @@ pub const VulkanRenderer = struct {
try self.device.allocateDescriptorSets(&set_alloc_info, self.input_descriptor_sets.ptr); try self.device.allocateDescriptorSets(&set_alloc_info, self.input_descriptor_sets.ptr);
// Update each descriptor set with input attachment // Update each descriptor set with input attachment
<<<<<<< Updated upstream
for (0..self.swapchain_images.len) |i| { for (0..self.swapchain_images.len) |i| {
=======
for (0..self.ctx.swapchain.swapchain_images.len) |i| {
>>>>>>> Stashed changes
// Colour attachment descriptor // Colour attachment descriptor
const colour_attachment_descriptor: vk.DescriptorImageInfo = .{ const colour_attachment_descriptor: vk.DescriptorImageInfo = .{
.image_layout = .shader_read_only_optimal, .image_layout = .shader_read_only_optimal,
@ -1585,19 +1451,11 @@ pub const VulkanRenderer = struct {
.render_pass = self.render_pass, // Render pass to begin .render_pass = self.render_pass, // Render pass to begin
.render_area = .{ .render_area = .{
.offset = .{ .x = 0, .y = 0 }, // Start point of render pass in pixels .offset = .{ .x = 0, .y = 0 }, // Start point of render pass in pixels
<<<<<<< Updated upstream
.extent = self.extent, // Size of region to run render pass on (starting at offset) .extent = self.extent, // Size of region to run render pass on (starting at offset)
}, },
.p_clear_values = &clear_values, // List of clear values .p_clear_values = &clear_values, // List of clear values
.clear_value_count = @intCast(clear_values.len), .clear_value_count = @intCast(clear_values.len),
.framebuffer = self.swapchain_framebuffers[current_image], .framebuffer = self.swapchain_framebuffers[current_image],
=======
.extent = self.ctx.swapchain.extent, // Size of region to run render pass on (starting at offset)
},
.p_clear_values = &clear_values, // List of clear values
.clear_value_count = @intCast(clear_values.len),
.framebuffer = self.ctx.swapchain.swapchain_framebuffers[current_image],
>>>>>>> Stashed changes
}; };
const command_buffer = self.command_buffers[current_image]; const command_buffer = self.command_buffers[current_image];