Compare commits

...

2 commits

Author SHA1 Message Date
e182d73edc Fix loading textures 2024-08-06 15:54:44 +02:00
c0591ecb24 Load textures 2024-08-05 17:54:40 +02:00
19 changed files with 13654 additions and 67 deletions

BIN
assets/textures/giraffe.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 674 KiB

BIN
assets/textures/test.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

View file

@ -41,12 +41,24 @@ pub fn build(b: *std.Build) void {
const zmath = b.dependency("zmath", .{});
exe.root_module.addImport("zmath", zmath.module("root"));
// zstbi
const zstbi = b.dependency("zstbi", .{});
exe.root_module.addImport("zstbi", zstbi.module("root"));
exe.linkLibrary(zstbi.artifact("zstbi"));
// ---
b.installArtifact(exe);
const exe_check = b.addExecutable(.{
.name = "vulkan-test",
.root_source_file = b.path("src/main.zig"),
.target = target,
.optimize = optimize,
});
const check = b.step("check", "Check if vulkan-test compiles");
check.dependOn(&exe.step);
check.dependOn(&exe_check.step);
const run_cmd = b.addRunArtifact(exe);

View file

@ -5,14 +5,11 @@
.dependencies = .{
.zmath = .{ .path = "libs/zmath" },
.zstbi = .{ .path = "libs/zstbi" },
.vulkan_zig = .{
.url = "https://github.com/Snektron/vulkan-zig/archive/9f6e6177b1fdb3ed22231d9216a24480e84cfa5e.tar.gz",
.hash = "1220f2961df224f7d35dee774b26194b8b937cc252fa8e4023407776c58521d53e38",
},
// .sdl = .{
// .url = "https://github.com/ikskuh/SDL.zig/archive/9663dc70c19b13afcb4b9f596c928d7b2838e548.tar.gz",
// .hash = "12202141beb92d68ef5088538ff761d5c3ecd2d4e11867c89fbbdcd9f814b8cba8ee",
// },
},
.paths = .{

22
libs/zstbi/LICENSE Normal file
View file

@ -0,0 +1,22 @@
MIT License
Copyright (c) 2021 Michal Ziulek
Copyright (c) 2024 zig-gamedev contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

93
libs/zstbi/README.md Normal file
View file

@ -0,0 +1,93 @@
# zstbi v0.10.0 - stb image bindings
## Features
* Supports Zig memory allocators
* Supports decoding most popular formats
* Supports HDR images
* Supports 8-bits and 16-bits per channel
* Supports image resizing
* Supports image writing (.png, .jpg)
## Getting started
Copy `zstbi` to a subdirectory of your project and add the following to your `build.zig.zon` .dependencies:
```zig
.zstbi = .{ .path = "libs/zstbi" },
```
Then in your `build.zig` add:
```zig
pub fn build(b: *std.Build) void {
const exe = b.addExecutable(.{ ... });
const zstbi = b.dependency("zstbi", .{});
exe.root_module.addImport("zstbi", zstbi.module("root"));
exe.linkLibrary(zstbi.artifact("zstbi"));
}
```
Now in your code you may import and use `zstbi`.
Init the lib. `zstbi.init()` is cheap and you may call it whenever you need to change memory allocator. Must be called from the main thread.
```zig
const zstbi = @import("zstbi");
zstbi.init(allocator);
defer zstbi.deinit();
```
```zig
pub const Image = struct {
data: []u8,
width: u32,
height: u32,
num_components: u32,
bytes_per_component: u32,
bytes_per_row: u32,
is_hdr: bool,
...
```
```zig
pub fn loadFromFile(pathname: [:0]const u8, forced_num_components: u32) !Image
pub fn loadFromMemory(data: []const u8, forced_num_components: u32) !Image
pub fn createEmpty(width: u32, height: u32, num_components: u32, args: struct {
bytes_per_component: u32 = 0,
bytes_per_row: u32 = 0,
}) !Image
pub fn info(pathname: [:0]const u8) struct {
is_supported: bool,
width: u32,
height: u32,
num_components: u32,
}
pub fn resize(image: *const Image, new_width: u32, new_height: u32) Image
pub fn writeToFile(
image: *const Image,
filename: [:0]const u8,
image_format: ImageWriteFormat,
) ImageWriteError!void
pub fn writeToFn(
image: *const Image,
write_fn: *const fn (ctx: ?*anyopaque, data: ?*anyopaque, size: c_int) callconv(.C) void,
context: ?*anyopaque,
image_format: ImageWriteFormat,
) ImageWriteError!void
```
```zig
var image = try zstbi.Image.loadFromFile("data/image.png", forced_num_components);
defer image.deinit();
const new_resized_image = image.resize(1024, 1024);
```
Misc functions:
```zig
pub fn isHdr(filename: [:0]const u8) bool
pub fn is16bit(filename: [:0]const u8) bool
pub fn setFlipVerticallyOnLoad(should_flip: bool) void
```

52
libs/zstbi/build.zig Normal file
View file

@ -0,0 +1,52 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
_ = b.addModule("root", .{
.root_source_file = b.path("src/zstbi.zig"),
});
const zstbi_lib = b.addStaticLibrary(.{
.name = "zstbi",
.target = target,
.optimize = optimize,
});
zstbi_lib.addIncludePath(b.path("libs/stbi"));
if (optimize == .Debug) {
// TODO: Workaround for Zig bug.
zstbi_lib.addCSourceFile(.{
.file = b.path("src/zstbi.c"),
.flags = &.{
"-std=c99",
"-fno-sanitize=undefined",
"-g",
"-O0",
},
});
} else {
zstbi_lib.addCSourceFile(.{
.file = b.path("src/zstbi.c"),
.flags = &.{
"-std=c99",
"-fno-sanitize=undefined",
},
});
}
zstbi_lib.linkLibC();
b.installArtifact(zstbi_lib);
const test_step = b.step("test", "Run zstbi tests");
const tests = b.addTest(.{
.name = "zstbi-tests",
.root_source_file = b.path("src/zstbi.zig"),
.target = target,
.optimize = optimize,
});
tests.linkLibrary(zstbi_lib);
b.installArtifact(tests);
test_step.dependOn(&b.addRunArtifact(tests).step);
}

11
libs/zstbi/build.zig.zon Normal file
View file

@ -0,0 +1,11 @@
.{
.name = "zstbi",
.version = "0.10.0",
.paths = .{
"build.zig",
"build.zig.zon",
"libs",
"src",
"README.md",
},
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

32
libs/zstbi/src/zstbi.c Normal file
View file

@ -0,0 +1,32 @@
#include <stdlib.h>
void* (*zstbiMallocPtr)(size_t size) = NULL;
void* (*zstbiReallocPtr)(void* ptr, size_t size) = NULL;
void (*zstbiFreePtr)(void* ptr) = NULL;
#define STBI_MALLOC(size) zstbiMallocPtr(size)
#define STBI_REALLOC(ptr, size) zstbiReallocPtr(ptr, size)
#define STBI_FREE(ptr) zstbiFreePtr(ptr)
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
void* (*zstbirMallocPtr)(size_t size, void* context) = NULL;
void (*zstbirFreePtr)(void* ptr, void* context) = NULL;
#define STBIR_MALLOC(size, context) zstbirMallocPtr(size, context)
#define STBIR_FREE(ptr, context) zstbirFreePtr(ptr, context)
#define STB_IMAGE_RESIZE_IMPLEMENTATION
#include "stb_image_resize.h"
void* (*zstbiwMallocPtr)(size_t size) = NULL;
void* (*zstbiwReallocPtr)(void* ptr, size_t size) = NULL;
void (*zstbiwFreePtr)(void* ptr) = NULL;
#define STBIW_MALLOC(size) zstbiwMallocPtr(size)
#define STBIW_REALLOC(ptr, size) zstbiwReallocPtr(ptr, size)
#define STBIW_FREE(ptr) zstbiwFreePtr(ptr)
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"

619
libs/zstbi/src/zstbi.zig Normal file
View file

@ -0,0 +1,619 @@
const std = @import("std");
const testing = std.testing;
const assert = std.debug.assert;
pub fn init(allocator: std.mem.Allocator) void {
assert(mem_allocator == null);
mem_allocator = allocator;
mem_allocations = std.AutoHashMap(usize, usize).init(allocator);
// stb image
zstbiMallocPtr = zstbiMalloc;
zstbiReallocPtr = zstbiRealloc;
zstbiFreePtr = zstbiFree;
// stb image resize
zstbirMallocPtr = zstbirMalloc;
zstbirFreePtr = zstbirFree;
// stb image write
zstbiwMallocPtr = zstbiMalloc;
zstbiwReallocPtr = zstbiRealloc;
zstbiwFreePtr = zstbiFree;
}
pub fn deinit() void {
assert(mem_allocator != null);
assert(mem_allocations.?.count() == 0);
setFlipVerticallyOnLoad(false);
setFlipVerticallyOnWrite(false);
mem_allocations.?.deinit();
mem_allocations = null;
mem_allocator = null;
}
pub const JpgWriteSettings = struct {
quality: u32,
};
pub const ImageWriteFormat = union(enum) {
png,
jpg: JpgWriteSettings,
};
pub const ImageWriteError = error{
CouldNotWriteImage,
};
pub const Image = struct {
data: []u8,
width: u32,
height: u32,
num_components: u32,
bytes_per_component: u32,
bytes_per_row: u32,
is_hdr: bool,
pub fn info(pathname: [:0]const u8) struct {
is_supported: bool,
width: u32,
height: u32,
num_components: u32,
} {
assert(mem_allocator != null);
var w: c_int = 0;
var h: c_int = 0;
var c: c_int = 0;
const is_supported = stbi_info(pathname, &w, &h, &c);
return .{
.is_supported = if (is_supported == 1) true else false,
.width = @as(u32, @intCast(w)),
.height = @as(u32, @intCast(h)),
.num_components = @as(u32, @intCast(c)),
};
}
pub fn loadFromFile(pathname: [:0]const u8, forced_num_components: u32) !Image {
assert(mem_allocator != null);
var width: u32 = 0;
var height: u32 = 0;
var num_components: u32 = 0;
var bytes_per_component: u32 = 0;
var bytes_per_row: u32 = 0;
var is_hdr = false;
const data = if (isHdr(pathname)) data: {
var x: c_int = undefined;
var y: c_int = undefined;
var ch: c_int = undefined;
const ptr = stbi_loadf(
pathname,
&x,
&y,
&ch,
@as(c_int, @intCast(forced_num_components)),
);
if (ptr == null) return error.ImageInitFailed;
num_components = if (forced_num_components == 0) @as(u32, @intCast(ch)) else forced_num_components;
width = @as(u32, @intCast(x));
height = @as(u32, @intCast(y));
bytes_per_component = 2;
bytes_per_row = width * num_components * bytes_per_component;
is_hdr = true;
// Convert each component from f32 to f16.
var ptr_f16 = @as([*]f16, @ptrCast(ptr.?));
const num = width * height * num_components;
var i: u32 = 0;
while (i < num) : (i += 1) {
ptr_f16[i] = @as(f16, @floatCast(ptr.?[i]));
}
break :data @as([*]u8, @ptrCast(ptr_f16))[0 .. height * bytes_per_row];
} else data: {
var x: c_int = undefined;
var y: c_int = undefined;
var ch: c_int = undefined;
const is_16bit = is16bit(pathname);
const ptr = if (is_16bit) @as(?[*]u8, @ptrCast(stbi_load_16(
pathname,
&x,
&y,
&ch,
@as(c_int, @intCast(forced_num_components)),
))) else stbi_load(
pathname,
&x,
&y,
&ch,
@as(c_int, @intCast(forced_num_components)),
);
if (ptr == null) return error.ImageInitFailed;
num_components = if (forced_num_components == 0) @as(u32, @intCast(ch)) else forced_num_components;
width = @as(u32, @intCast(x));
height = @as(u32, @intCast(y));
bytes_per_component = if (is_16bit) 2 else 1;
bytes_per_row = width * num_components * bytes_per_component;
is_hdr = false;
break :data @as([*]u8, @ptrCast(ptr))[0 .. height * bytes_per_row];
};
return Image{
.data = data,
.width = width,
.height = height,
.num_components = num_components,
.bytes_per_component = bytes_per_component,
.bytes_per_row = bytes_per_row,
.is_hdr = is_hdr,
};
}
pub fn loadFromMemory(data: []const u8, forced_num_components: u32) !Image {
assert(mem_allocator != null);
var width: u32 = 0;
var height: u32 = 0;
var num_components: u32 = 0;
var bytes_per_component: u32 = 0;
var bytes_per_row: u32 = 0;
var is_hdr = false;
const image_data = if (isHdrFromMem(data)) data: {
var x: c_int = undefined;
var y: c_int = undefined;
var ch: c_int = undefined;
const ptr = stbi_loadf_from_memory(
data.ptr,
@as(c_int, @intCast(data.len)),
&x,
&y,
&ch,
@as(c_int, @intCast(forced_num_components)),
);
if (ptr == null) return error.ImageInitFailed;
num_components = if (forced_num_components == 0) @as(u32, @intCast(ch)) else forced_num_components;
width = @as(u32, @intCast(x));
height = @as(u32, @intCast(y));
bytes_per_component = 2;
bytes_per_row = width * num_components * bytes_per_component;
is_hdr = true;
// Convert each component from f32 to f16.
var ptr_f16 = @as([*]f16, @ptrCast(ptr.?));
const num = width * height * num_components;
var i: u32 = 0;
while (i < num) : (i += 1) {
ptr_f16[i] = @as(f16, @floatCast(ptr.?[i]));
}
break :data @as([*]u8, @ptrCast(ptr_f16))[0 .. height * bytes_per_row];
} else data: {
var x: c_int = undefined;
var y: c_int = undefined;
var ch: c_int = undefined;
const ptr = stbi_load_from_memory(
data.ptr,
@as(c_int, @intCast(data.len)),
&x,
&y,
&ch,
@as(c_int, @intCast(forced_num_components)),
);
if (ptr == null) return error.ImageInitFailed;
num_components = if (forced_num_components == 0) @as(u32, @intCast(ch)) else forced_num_components;
width = @as(u32, @intCast(x));
height = @as(u32, @intCast(y));
bytes_per_component = 1;
bytes_per_row = width * num_components * bytes_per_component;
break :data @as([*]u8, @ptrCast(ptr))[0 .. height * bytes_per_row];
};
return Image{
.data = image_data,
.width = width,
.height = height,
.num_components = num_components,
.bytes_per_component = bytes_per_component,
.bytes_per_row = bytes_per_row,
.is_hdr = is_hdr,
};
}
pub fn createEmpty(width: u32, height: u32, num_components: u32, args: struct {
bytes_per_component: u32 = 0,
bytes_per_row: u32 = 0,
}) !Image {
assert(mem_allocator != null);
const bytes_per_component = if (args.bytes_per_component == 0) 1 else args.bytes_per_component;
const bytes_per_row = if (args.bytes_per_row == 0)
width * num_components * bytes_per_component
else
args.bytes_per_row;
const size = height * bytes_per_row;
const data = @as([*]u8, @ptrCast(zstbiMalloc(size)));
@memset(data[0..size], 0);
return Image{
.data = data[0..size],
.width = width,
.height = height,
.num_components = num_components,
.bytes_per_component = bytes_per_component,
.bytes_per_row = bytes_per_row,
.is_hdr = false,
};
}
pub fn resize(image: *const Image, new_width: u32, new_height: u32) Image {
assert(mem_allocator != null);
// TODO: Add support for HDR images
const new_bytes_per_row = new_width * image.num_components * image.bytes_per_component;
const new_size = new_height * new_bytes_per_row;
const new_data = @as([*]u8, @ptrCast(zstbiMalloc(new_size)));
stbir_resize_uint8(
image.data.ptr,
@as(c_int, @intCast(image.width)),
@as(c_int, @intCast(image.height)),
0,
new_data,
@as(c_int, @intCast(new_width)),
@as(c_int, @intCast(new_height)),
0,
@as(c_int, @intCast(image.num_components)),
);
return .{
.data = new_data[0..new_size],
.width = new_width,
.height = new_height,
.num_components = image.num_components,
.bytes_per_component = image.bytes_per_component,
.bytes_per_row = new_bytes_per_row,
.is_hdr = image.is_hdr,
};
}
pub fn writeToFile(
image: Image,
filename: [:0]const u8,
image_format: ImageWriteFormat,
) ImageWriteError!void {
assert(mem_allocator != null);
const w = @as(c_int, @intCast(image.width));
const h = @as(c_int, @intCast(image.height));
const comp = @as(c_int, @intCast(image.num_components));
const result = switch (image_format) {
.png => stbi_write_png(filename.ptr, w, h, comp, image.data.ptr, 0),
.jpg => |settings| stbi_write_jpg(
filename.ptr,
w,
h,
comp,
image.data.ptr,
@as(c_int, @intCast(settings.quality)),
),
};
// if the result is 0 then it means an error occured (per stb image write docs)
if (result == 0) {
return ImageWriteError.CouldNotWriteImage;
}
}
pub fn writeToFn(
image: Image,
write_fn: *const fn (ctx: ?*anyopaque, data: ?*anyopaque, size: c_int) callconv(.C) void,
context: ?*anyopaque,
image_format: ImageWriteFormat,
) ImageWriteError!void {
assert(mem_allocator != null);
const w = @as(c_int, @intCast(image.width));
const h = @as(c_int, @intCast(image.height));
const comp = @as(c_int, @intCast(image.num_components));
const result = switch (image_format) {
.png => stbi_write_png_to_func(write_fn, context, w, h, comp, image.data.ptr, 0),
.jpg => |settings| stbi_write_jpg_to_func(
write_fn,
context,
w,
h,
comp,
image.data.ptr,
@as(c_int, @intCast(settings.quality)),
),
};
// if the result is 0 then it means an error occured (per stb image write docs)
if (result == 0) {
return ImageWriteError.CouldNotWriteImage;
}
}
pub fn deinit(image: *Image) void {
stbi_image_free(image.data.ptr);
image.* = undefined;
}
};
/// `pub fn setHdrToLdrScale(scale: f32) void`
pub const setHdrToLdrScale = stbi_hdr_to_ldr_scale;
/// `pub fn setHdrToLdrGamma(gamma: f32) void`
pub const setHdrToLdrGamma = stbi_hdr_to_ldr_gamma;
/// `pub fn setLdrToHdrScale(scale: f32) void`
pub const setLdrToHdrScale = stbi_ldr_to_hdr_scale;
/// `pub fn setLdrToHdrGamma(gamma: f32) void`
pub const setLdrToHdrGamma = stbi_ldr_to_hdr_gamma;
pub fn isHdr(filename: [:0]const u8) bool {
return stbi_is_hdr(filename) != 0;
}
pub fn isHdrFromMem(buffer: []const u8) bool {
return stbi_is_hdr_from_memory(buffer.ptr, @as(c_int, @intCast(buffer.len))) != 0;
}
pub fn is16bit(filename: [:0]const u8) bool {
return stbi_is_16_bit(filename) != 0;
}
pub fn setFlipVerticallyOnLoad(should_flip: bool) void {
stbi_set_flip_vertically_on_load(if (should_flip) 1 else 0);
}
pub fn setFlipVerticallyOnWrite(should_flip: bool) void {
stbi_flip_vertically_on_write(if (should_flip) 1 else 0);
}
var mem_allocator: ?std.mem.Allocator = null;
var mem_allocations: ?std.AutoHashMap(usize, usize) = null;
var mem_mutex: std.Thread.Mutex = .{};
const mem_alignment = 16;
extern var zstbiMallocPtr: ?*const fn (size: usize) callconv(.C) ?*anyopaque;
extern var zstbiwMallocPtr: ?*const fn (size: usize) callconv(.C) ?*anyopaque;
fn zstbiMalloc(size: usize) callconv(.C) ?*anyopaque {
mem_mutex.lock();
defer mem_mutex.unlock();
const mem = mem_allocator.?.alignedAlloc(
u8,
mem_alignment,
size,
) catch @panic("zstbi: out of memory");
mem_allocations.?.put(@intFromPtr(mem.ptr), size) catch @panic("zstbi: out of memory");
return mem.ptr;
}
extern var zstbiReallocPtr: ?*const fn (ptr: ?*anyopaque, size: usize) callconv(.C) ?*anyopaque;
extern var zstbiwReallocPtr: ?*const fn (ptr: ?*anyopaque, size: usize) callconv(.C) ?*anyopaque;
fn zstbiRealloc(ptr: ?*anyopaque, size: usize) callconv(.C) ?*anyopaque {
mem_mutex.lock();
defer mem_mutex.unlock();
const old_size = if (ptr != null) mem_allocations.?.get(@intFromPtr(ptr.?)).? else 0;
const old_mem = if (old_size > 0)
@as([*]align(mem_alignment) u8, @ptrCast(@alignCast(ptr)))[0..old_size]
else
@as([*]align(mem_alignment) u8, undefined)[0..0];
const new_mem = mem_allocator.?.realloc(old_mem, size) catch @panic("zstbi: out of memory");
if (ptr != null) {
const removed = mem_allocations.?.remove(@intFromPtr(ptr.?));
std.debug.assert(removed);
}
mem_allocations.?.put(@intFromPtr(new_mem.ptr), size) catch @panic("zstbi: out of memory");
return new_mem.ptr;
}
extern var zstbiFreePtr: ?*const fn (maybe_ptr: ?*anyopaque) callconv(.C) void;
extern var zstbiwFreePtr: ?*const fn (maybe_ptr: ?*anyopaque) callconv(.C) void;
fn zstbiFree(maybe_ptr: ?*anyopaque) callconv(.C) void {
if (maybe_ptr) |ptr| {
mem_mutex.lock();
defer mem_mutex.unlock();
const size = mem_allocations.?.fetchRemove(@intFromPtr(ptr)).?.value;
const mem = @as([*]align(mem_alignment) u8, @ptrCast(@alignCast(ptr)))[0..size];
mem_allocator.?.free(mem);
}
}
extern var zstbirMallocPtr: ?*const fn (size: usize, maybe_context: ?*anyopaque) callconv(.C) ?*anyopaque;
fn zstbirMalloc(size: usize, _: ?*anyopaque) callconv(.C) ?*anyopaque {
return zstbiMalloc(size);
}
extern var zstbirFreePtr: ?*const fn (maybe_ptr: ?*anyopaque, maybe_context: ?*anyopaque) callconv(.C) void;
fn zstbirFree(maybe_ptr: ?*anyopaque, _: ?*anyopaque) callconv(.C) void {
zstbiFree(maybe_ptr);
}
extern fn stbi_info(filename: [*:0]const u8, x: *c_int, y: *c_int, comp: *c_int) c_int;
extern fn stbi_load(
filename: [*:0]const u8,
x: *c_int,
y: *c_int,
channels_in_file: *c_int,
desired_channels: c_int,
) ?[*]u8;
extern fn stbi_load_16(
filename: [*:0]const u8,
x: *c_int,
y: *c_int,
channels_in_file: *c_int,
desired_channels: c_int,
) ?[*]u16;
extern fn stbi_loadf(
filename: [*:0]const u8,
x: *c_int,
y: *c_int,
channels_in_file: *c_int,
desired_channels: c_int,
) ?[*]f32;
pub extern fn stbi_load_from_memory(
buffer: [*]const u8,
len: c_int,
x: *c_int,
y: *c_int,
channels_in_file: *c_int,
desired_channels: c_int,
) ?[*]u8;
pub extern fn stbi_loadf_from_memory(
buffer: [*]const u8,
len: c_int,
x: *c_int,
y: *c_int,
channels_in_file: *c_int,
desired_channels: c_int,
) ?[*]f32;
extern fn stbi_image_free(image_data: ?[*]u8) void;
extern fn stbi_hdr_to_ldr_scale(scale: f32) void;
extern fn stbi_hdr_to_ldr_gamma(gamma: f32) void;
extern fn stbi_ldr_to_hdr_scale(scale: f32) void;
extern fn stbi_ldr_to_hdr_gamma(gamma: f32) void;
extern fn stbi_is_16_bit(filename: [*:0]const u8) c_int;
extern fn stbi_is_hdr(filename: [*:0]const u8) c_int;
extern fn stbi_is_hdr_from_memory(buffer: [*]const u8, len: c_int) c_int;
extern fn stbi_set_flip_vertically_on_load(flag_true_if_should_flip: c_int) void;
extern fn stbi_flip_vertically_on_write(flag: c_int) void; // flag is non-zero to flip data vertically
extern fn stbir_resize_uint8(
input_pixels: [*]const u8,
input_w: c_int,
input_h: c_int,
input_stride_in_bytes: c_int,
output_pixels: [*]u8,
output_w: c_int,
output_h: c_int,
output_stride_in_bytes: c_int,
num_channels: c_int,
) void;
extern fn stbi_write_jpg(
filename: [*:0]const u8,
w: c_int,
h: c_int,
comp: c_int,
data: [*]const u8,
quality: c_int,
) c_int;
extern fn stbi_write_png(
filename: [*:0]const u8,
w: c_int,
h: c_int,
comp: c_int,
data: [*]const u8,
stride_in_bytes: c_int,
) c_int;
extern fn stbi_write_png_to_func(
func: *const fn (?*anyopaque, ?*anyopaque, c_int) callconv(.C) void,
context: ?*anyopaque,
w: c_int,
h: c_int,
comp: c_int,
data: [*]const u8,
stride_in_bytes: c_int,
) c_int;
extern fn stbi_write_jpg_to_func(
func: *const fn (?*anyopaque, ?*anyopaque, c_int) callconv(.C) void,
context: ?*anyopaque,
x: c_int,
y: c_int,
comp: c_int,
data: [*]const u8,
quality: c_int,
) c_int;
test "zstbi basic" {
init(testing.allocator);
defer deinit();
var im1 = try Image.createEmpty(8, 6, 4, .{});
defer im1.deinit();
try testing.expect(im1.width == 8);
try testing.expect(im1.height == 6);
try testing.expect(im1.num_components == 4);
}
test "zstbi resize" {
init(testing.allocator);
defer deinit();
var im1 = try Image.createEmpty(32, 32, 4, .{});
defer im1.deinit();
var im2 = im1.resize(8, 6);
defer im2.deinit();
try testing.expect(im2.width == 8);
try testing.expect(im2.height == 6);
try testing.expect(im2.num_components == 4);
}
test "zstbi write and load file" {
init(testing.allocator);
defer deinit();
const pth = try std.fs.selfExeDirPathAlloc(testing.allocator);
defer testing.allocator.free(pth);
try std.posix.chdir(pth);
var img = try Image.createEmpty(8, 6, 4, .{});
defer img.deinit();
try img.writeToFile("test_img.png", ImageWriteFormat.png);
try img.writeToFile("test_img.jpg", .{ .jpg = .{ .quality = 80 } });
var img_png = try Image.loadFromFile("test_img.png", 0);
defer img_png.deinit();
try testing.expect(img_png.width == img.width);
try testing.expect(img_png.height == img.height);
try testing.expect(img_png.num_components == img.num_components);
var img_jpg = try Image.loadFromFile("test_img.jpg", 0);
defer img_jpg.deinit();
try testing.expect(img_jpg.width == img.width);
try testing.expect(img_jpg.height == img.height);
try testing.expect(img_jpg.num_components == 3); // RGB JPEG
try std.fs.cwd().deleteFile("test_img.png");
try std.fs.cwd().deleteFile("test_img.jpg");
}

1
profile.json Normal file

File diff suppressed because one or more lines are too long

View file

@ -11,6 +11,7 @@ const Model = @import("vulkan_renderer.zig").Model;
const Self = @This();
ubo_model: Model,
tex_id: u32,
vertex_count: u32,
vertex_buffer: vk.Buffer,
@ -34,6 +35,7 @@ pub fn new(
transfer_command_pool: vk.CommandPool,
vertices: []const Vertex,
indices: []const u32,
tex_id: u32,
allocator: std.mem.Allocator,
) !Self {
var self: Self = undefined;
@ -50,6 +52,7 @@ pub fn new(
try self.createIndexBuffer(transfer_queue, transfer_command_pool, indices);
self.ubo_model = .{ .model = zm.identity() };
self.tex_id = tex_id;
return self;
}
@ -122,7 +125,6 @@ fn createVertexBuffer(
staging_buffer,
self.vertex_buffer,
buffer_size,
self.allocator,
);
}
@ -178,6 +180,5 @@ fn createIndexBuffer(
staging_buffer,
self.index_buffer,
buffer_size,
self.allocator,
);
}

View file

@ -1,10 +1,13 @@
#version 450
layout(location = 0) in vec3 fragCol;
layout(location = 1) in vec2 fragTex;
// Final output output (must also have location)
layout(location = 0) out vec4 outColour;
layout(set = 1, binding = 0) uniform sampler2D textureSampler;
void main() {
outColour = vec4(fragCol, 1.0);
outColour = texture(textureSampler, fragTex);
}

View file

@ -2,10 +2,12 @@
layout(location = 0) in vec3 pos;
layout(location = 1) in vec3 col;
layout(location = 2) in vec2 tex;
layout(location = 0) out vec3 fragCol;
layout(location = 1) out vec2 fragTex;
layout(binding = 0) uniform UboViewProjection {
layout(set = 0, binding = 0) uniform UboViewProjection {
mat4 projection;
mat4 view;
} uboViewProjection;
@ -17,4 +19,5 @@ layout(push_constant) uniform PushModel {
void main() {
gl_Position = uboViewProjection.projection * uboViewProjection.view * pushModel.model * vec4(pos, 1.0);
fragCol = col;
fragTex = tex;
}

View file

@ -8,12 +8,13 @@ const CommandBuffer = @import("vulkan_renderer.zig").CommandBuffer;
pub const device_extensions = [_][*:0]const u8{vk.extensions.khr_swapchain.name};
pub const Vector3 = @Vector(3, f32);
pub const Vector2 = @Vector(2, f32);
// Vertex data representation
pub const Vertex = struct {
// Vertex position (x, y, z)
pos: Vector3,
col: Vector3,
pos: Vector3, // Vertex position (x, y, z)
col: Vector3, // Vertex colour (r, g, b)
tex: Vector2, // Texture coords (u, v)
};
pub const QueueFamilyIndices = struct {
@ -96,31 +97,20 @@ pub fn createBuffer(
try device.bindBufferMemory(buffer.*, buffer_memory.*, 0);
}
pub fn copyBuffer(
device: Device,
transfer_queue: vk.Queue,
transfer_command_pool: vk.CommandPool,
src_buffer: vk.Buffer,
dst_buffer: vk.Buffer,
buffer_size: vk.DeviceSize,
allocator: std.mem.Allocator,
) !void {
fn beginCommandBuffer(device: Device, command_pool: vk.CommandPool) !CommandBuffer {
// Command buffer to hold transfer commands
const transfer_command_buffer_handle = try allocator.create(vk.CommandBuffer);
defer allocator.destroy(transfer_command_buffer_handle);
// Free temporary buffer back to pool
defer device.freeCommandBuffers(transfer_command_pool, 1, @ptrCast(transfer_command_buffer_handle));
var command_buffer_handle: vk.CommandBuffer = undefined;
// Command buffer details
const alloc_info: vk.CommandBufferAllocateInfo = .{
.command_pool = transfer_command_pool,
.command_pool = command_pool,
.level = .primary,
.command_buffer_count = 1,
};
// Allocate command buffer from pool
try device.allocateCommandBuffers(&alloc_info, @ptrCast(transfer_command_buffer_handle));
const transfer_command_buffer = CommandBuffer.init(transfer_command_buffer_handle.*, device.wrapper);
try device.allocateCommandBuffers(&alloc_info, @ptrCast(&command_buffer_handle));
const command_buffer = CommandBuffer.init(command_buffer_handle, device.wrapper);
// Information to begin the command buffer record
const begin_info: vk.CommandBufferBeginInfo = .{
@ -128,7 +118,39 @@ pub fn copyBuffer(
};
// Begin recording transfer commands
try transfer_command_buffer.beginCommandBuffer(&begin_info);
try command_buffer.beginCommandBuffer(&begin_info);
return command_buffer;
}
fn endAndSubmitCommandBuffer(device: Device, command_buffer: CommandBuffer, command_pool: vk.CommandPool, queue: vk.Queue) !void {
// End commands
try command_buffer.endCommandBuffer();
// Queue submission information
const submit_info: vk.SubmitInfo = .{
.command_buffer_count = 1,
.p_command_buffers = @ptrCast(&command_buffer.handle),
};
// Submit transfer command to transfer queue and wait until it finishes
try device.queueSubmit(queue, 1, @ptrCast(&submit_info), .null_handle);
try device.queueWaitIdle(queue);
// Free temporary buffer back to pool
device.freeCommandBuffers(command_pool, 1, @ptrCast(&command_buffer.handle));
}
pub fn copyBuffer(
device: Device,
transfer_queue: vk.Queue,
transfer_command_pool: vk.CommandPool,
src_buffer: vk.Buffer,
dst_buffer: vk.Buffer,
buffer_size: vk.DeviceSize,
) !void {
// Create buffer
const transfer_command_buffer = try beginCommandBuffer(device, transfer_command_pool);
// Region of data to copy from and to
const buffer_copy_region: vk.BufferCopy = .{
@ -140,16 +162,96 @@ pub fn copyBuffer(
// Command to copy src buffer to dst buffer
transfer_command_buffer.copyBuffer(src_buffer, dst_buffer, 1, @ptrCast(&buffer_copy_region));
// End commands
try transfer_command_buffer.endCommandBuffer();
// Command to copy src buffer to dst buffer
try endAndSubmitCommandBuffer(device, transfer_command_buffer, transfer_command_pool, transfer_queue);
}
// Queue submission information
const submit_info: vk.SubmitInfo = .{
.command_buffer_count = 1,
.p_command_buffers = @ptrCast(&transfer_command_buffer.handle),
pub fn copyImageBuffer(
device: Device,
transfer_queue: vk.Queue,
transfer_command_pool: vk.CommandPool,
src_buffer: vk.Buffer,
image: vk.Image,
width: u32,
height: u32,
) !void {
const transfer_command_buffer = try beginCommandBuffer(device, transfer_command_pool);
const image_region: vk.BufferImageCopy = .{
.buffer_offset = 0, // Offset into data
.buffer_row_length = 0, // Row length of data to calculate data spacing
.buffer_image_height = 0, // Image height to calculate data spacing
.image_subresource = .{
.aspect_mask = .{ .color_bit = true }, // Which aspect of image to copy
.mip_level = 0, // Mipmap level to copy
.base_array_layer = 0, // Starting array layer (if array)
.layer_count = 1, // Number of layers of copy starting at base_array_layer
},
.image_offset = .{ .x = 0, .y = 0, .z = 0 }, // Offset to image (as opposed to raw data buffer offset)
.image_extent = .{ .width = width, .height = height, .depth = 1 }, // Size of the region to copy as x, y, z values
};
// Submit transfer command to transfer queue and wait until it finishes
try device.queueSubmit(transfer_queue, 1, @ptrCast(&submit_info), .null_handle);
try device.queueWaitIdle(transfer_queue);
transfer_command_buffer.copyBufferToImage(src_buffer, image, .transfer_dst_optimal, 1, @ptrCast(&image_region));
try endAndSubmitCommandBuffer(device, transfer_command_buffer, transfer_command_pool, transfer_queue);
}
pub fn transitionImageLayout(
device: Device,
queue: vk.Queue,
command_pool: vk.CommandPool,
image: vk.Image,
old_layout: vk.ImageLayout,
new_layout: vk.ImageLayout,
) !void {
const command_buffer = try beginCommandBuffer(device, command_pool);
var image_memory_barrier: vk.ImageMemoryBarrier = .{
.old_layout = old_layout, // Layout to transition from
.new_layout = new_layout, // Layout to transition to
.src_queue_family_index = vk.QUEUE_FAMILY_IGNORED, // Queue family to transition from
.dst_queue_family_index = vk.QUEUE_FAMILY_IGNORED, // Queue family to transition to
.image = image, // Image being accessed and modified as part of barrier
.subresource_range = .{
.aspect_mask = .{ .color_bit = true }, // Aspect of image being altered
.base_mip_level = 0, // First mip level to start alterations on
.level_count = 1, // Number of mipmap levels to alter starting from base mipmap level
.base_array_layer = 0, // First layer to start alterations on
.layer_count = 1, // Number of layers to alter starting from base array layer
},
.src_access_mask = .{}, // Memory access stage transition must happen after
.dst_access_mask = .{}, // Memory access stage transition must happen before
};
var src_stage: vk.PipelineStageFlags = .{};
var dst_stage: vk.PipelineStageFlags = .{};
// If transitioning from new image to image ready to receive data
if (old_layout == vk.ImageLayout.undefined and new_layout == vk.ImageLayout.transfer_dst_optimal) {
image_memory_barrier.dst_access_mask.transfer_write_bit = true;
src_stage.top_of_pipe_bit = true;
dst_stage.transfer_bit = true;
} else if (old_layout == vk.ImageLayout.transfer_dst_optimal and new_layout == vk.ImageLayout.shader_read_only_optimal) {
// If transitioning from transfer destination to shader readable
image_memory_barrier.src_access_mask.transfer_write_bit = true;
image_memory_barrier.dst_access_mask.shader_read_bit = true;
src_stage.transfer_bit = true;
dst_stage.fragment_shader_bit = true;
}
command_buffer.pipelineBarrier(
src_stage, // Pipeline stages (match to src and dst access mask)
dst_stage,
.{}, // Dependency flags
0, // Memory barrier count
null, // Memory barrier data
0, // Buffer memory barrier count
null, // Buffer memory barrier data
1, // Image memory barrier count
@ptrCast(&image_memory_barrier), // Image memory barrier data
);
try endAndSubmitCommandBuffer(device, command_buffer, command_pool, queue);
}

View file

@ -4,6 +4,8 @@ const vk = @import("vulkan");
const builtin = @import("builtin");
const shaders = @import("shaders");
const zm = @import("zmath");
// const img = @import("zigimg");
const img = @import("zstbi");
const Utilities = @import("utilities.zig");
const QueueFamilyIndices = Utilities.QueueFamilyIndices;
@ -74,6 +76,7 @@ pub const VulkanRenderer = struct {
swapchain: vk.SwapchainKHR,
viewport: vk.Viewport,
scissor: vk.Rect2D,
texture_sampler: vk.Sampler,
swapchain_images: []SwapchainImage,
swapchain_framebuffers: []vk.Framebuffer,
@ -85,14 +88,23 @@ pub const VulkanRenderer = struct {
// Descriptors
descriptor_set_layout: vk.DescriptorSetLayout,
sampler_set_layout: vk.DescriptorSetLayout,
push_constant_range: vk.PushConstantRange,
descriptor_pool: vk.DescriptorPool,
sampler_descriptor_pool: vk.DescriptorPool,
descriptor_sets: []vk.DescriptorSet,
sampler_descriptor_sets: std.ArrayList(vk.DescriptorSet),
vp_uniform_buffer: []vk.Buffer,
vp_uniform_buffer_memory: []vk.DeviceMemory,
// Assets
image_files: std.ArrayList(img.Image),
texture_images: std.ArrayList(vk.Image),
texture_image_memory: std.ArrayList(vk.DeviceMemory),
texture_image_views: std.ArrayList(vk.ImageView),
// Pipeline
graphics_pipeline: vk.Pipeline,
pipeline_layout: vk.PipelineLayout,
@ -121,6 +133,8 @@ pub const VulkanRenderer = struct {
self.allocator = allocator;
self.vkb = try BaseDispatch.load(try sdl.vulkan.getVkGetInstanceProcAddr());
img.init(allocator);
try self.createInstance();
try self.createSurface();
@ -139,6 +153,20 @@ pub const VulkanRenderer = struct {
try self.createFramebuffers();
try self.createCommandPool();
try self.createCommandBuffers();
try self.createTextureSampler();
try self.createUniformBuffers();
try self.createDescriptorPool();
try self.createDescriptorSets();
try self.createSynchronisation();
self.image_files = std.ArrayList(img.Image).init(self.allocator);
self.texture_images = std.ArrayList(vk.Image).init(self.allocator);
self.texture_image_memory = std.ArrayList(vk.DeviceMemory).init(self.allocator);
self.texture_image_views = std.ArrayList(vk.ImageView).init(self.allocator);
self.sampler_descriptor_sets = std.ArrayList(vk.DescriptorSet).init(self.allocator);
const aspect: f32 = @as(f32, @floatFromInt(self.extent.width)) / @as(f32, @floatFromInt(self.extent.height));
self.ubo_view_projection.projection = zm.perspectiveFovRh(
std.math.degreesToRadians(45.0),
@ -158,17 +186,16 @@ pub const VulkanRenderer = struct {
// Create meshes
// Vertex Data
var mesh_vertices = [_]Vertex{
.{ .pos = .{ -0.4, 0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 } }, // 0
.{ .pos = .{ -0.4, -0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 } }, // 1
.{ .pos = .{ 0.4, -0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 } }, // 2
.{ .pos = .{ 0.4, 0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 } }, // 3
.{ .pos = .{ -0.4, 0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 }, .tex = .{ 1.0, 1.0 } }, // 0
.{ .pos = .{ -0.4, -0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 }, .tex = .{ 1.0, 0.0 } }, // 1
.{ .pos = .{ 0.4, -0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 }, .tex = .{ 0.0, 0.0 } }, // 2
.{ .pos = .{ 0.4, 0.4, 0.0 }, .col = .{ 1.0, 0.0, 0.0 }, .tex = .{ 0.0, 1.0 } }, // 3
};
var mesh_vertices2 = [_]Vertex{
.{ .pos = .{ -0.25, 0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 } }, // 0
.{ .pos = .{ -0.25, -0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 } }, // 1
.{ .pos = .{ 0.25, -0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 } }, // 2
.{ .pos = .{ 0.25, 0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 } }, // 3
.{ .pos = .{ -0.25, 0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 }, .tex = .{ 1.0, 1.0 } }, // 0
.{ .pos = .{ -0.25, -0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 }, .tex = .{ 1.0, 0.0 } }, // 1
.{ .pos = .{ 0.25, -0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 }, .tex = .{ 0.0, 0.0 } }, // 2
.{ .pos = .{ 0.25, 0.6, 0.0 }, .col = .{ 0.0, 0.0, 1.0 }, .tex = .{ 0.0, 1.0 } }, // 3
};
// Index Data
@ -185,6 +212,7 @@ pub const VulkanRenderer = struct {
self.graphics_command_pool,
&mesh_vertices,
&mesh_indices,
try self.createTexture("test.png"),
self.allocator,
);
@ -196,18 +224,12 @@ pub const VulkanRenderer = struct {
self.graphics_command_pool,
&mesh_vertices2,
&mesh_indices,
try self.createTexture("giraffe.png"),
self.allocator,
);
self.meshes = [_]Mesh{ first_mesh, second_mesh };
try self.createCommandBuffers();
try self.createUniformBuffers();
try self.createDescriptorPool();
try self.createDescriptorSets();
try self.createSynchronisation();
return self;
}
@ -280,12 +302,36 @@ pub const VulkanRenderer = struct {
self.instance.destroyDebugUtilsMessengerEXT(self.debug_utils.?, null);
}
for (0..self.image_files.items.len) |i| {
self.image_files.items[i].deinit();
}
self.image_files.deinit();
self.device.destroySampler(self.texture_sampler, null);
for (
self.texture_images.items,
self.texture_image_memory.items,
self.texture_image_views.items,
) |tex_image, tex_image_memory, tex_image_view| {
self.device.destroyImage(tex_image, null);
self.device.freeMemory(tex_image_memory, null);
self.device.destroyImageView(tex_image_view, null);
}
self.texture_images.deinit();
self.texture_image_memory.deinit();
self.texture_image_views.deinit();
self.device.destroyImageView(self.depth_buffer_image_view, null);
self.device.destroyImage(self.depth_buffer_image, null);
self.device.freeMemory(self.depth_buffer_image_memory, null);
self.device.destroyDescriptorPool(self.descriptor_pool, null);
self.device.destroyDescriptorSetLayout(self.descriptor_set_layout, null);
self.device.destroyDescriptorPool(self.sampler_descriptor_pool, null);
self.device.destroyDescriptorSetLayout(self.sampler_set_layout, null);
self.sampler_descriptor_sets.deinit();
for (0..self.swapchain_images.len) |i| {
self.device.destroyBuffer(self.vp_uniform_buffer[i], null);
@ -330,6 +376,8 @@ pub const VulkanRenderer = struct {
self.allocator.destroy(self.device.wrapper);
self.allocator.destroy(self.instance.wrapper);
img.deinit();
}
fn createInstance(self: *Self) !void {
@ -407,11 +455,17 @@ pub const VulkanRenderer = struct {
else
2;
// Device features
const device_features: vk.PhysicalDeviceFeatures = .{
.sampler_anisotropy = vk.TRUE, // Enable anisotropy
};
const device_create_info: vk.DeviceCreateInfo = .{
.queue_create_info_count = queue_count,
.p_queue_create_infos = &qci,
.pp_enabled_extension_names = &Utilities.device_extensions,
.enabled_extension_count = @intCast(Utilities.device_extensions.len),
.p_enabled_features = &device_features,
};
const device_handle = try self.instance.createDevice(self.physical_device, &device_create_info, null);
@ -564,14 +618,14 @@ pub const VulkanRenderer = struct {
// But must happen before...
.dst_subpass = 0,
.dst_stage_mask = .{ .color_attachment_output_bit = true },
.dst_access_mask = .{ .memory_read_bit = true, .memory_write_bit = true },
.dst_access_mask = .{ .color_attachment_read_bit = true, .color_attachment_write_bit = true },
},
// Conversion from VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
vk.SubpassDependency{
// Transition must happen after...
.src_subpass = 0,
.src_stage_mask = .{ .color_attachment_output_bit = true },
.src_access_mask = .{ .memory_read_bit = true, .memory_write_bit = true },
.src_access_mask = .{ .color_attachment_read_bit = true, .color_attachment_write_bit = true },
// But must happen before...
.dst_subpass = vk.SUBPASS_EXTERNAL,
.dst_stage_mask = .{ .bottom_of_pipe_bit = true },
@ -595,6 +649,8 @@ pub const VulkanRenderer = struct {
}
fn createDescriptorSetLayout(self: *Self) !void {
// -- Uniform values descriptor set layout --
// UboViewProjection binding info
const vp_layout_binding: vk.DescriptorSetLayoutBinding = .{
.binding = 0, // Binding point in shader (designated by binding number in shader)
@ -614,6 +670,25 @@ pub const VulkanRenderer = struct {
// Create descriptor set layout
self.descriptor_set_layout = try self.device.createDescriptorSetLayout(&layout_create_info, null);
// -- Texture sampler descriptor set layout --
// Texture binding info
const sampler_layout_binding: vk.DescriptorSetLayoutBinding = .{
.binding = 0,
.descriptor_type = .combined_image_sampler,
.descriptor_count = 1,
.stage_flags = .{ .fragment_bit = true },
.p_immutable_samplers = null,
};
// Create a descriptor set layout with given bindings for texture
const texture_layout_info: vk.DescriptorSetLayoutCreateInfo = .{
.binding_count = 1,
.p_bindings = @ptrCast(&sampler_layout_binding),
};
self.sampler_set_layout = try self.device.createDescriptorSetLayout(&texture_layout_info, null);
}
fn createPushConstantRange(self: *Self) !void {
@ -711,6 +786,13 @@ pub const VulkanRenderer = struct {
.format = vk.Format.r32g32b32_sfloat,
.offset = @offsetOf(Vertex, "col"),
},
// Texture attribute
.{
.binding = 0,
.location = 2,
.format = vk.Format.r32g32_sfloat,
.offset = @offsetOf(Vertex, "tex"),
},
};
// -- Vertex input --
@ -808,9 +890,11 @@ pub const VulkanRenderer = struct {
};
// -- Pipeline layout --
const descriptor_set_layouts = [_]vk.DescriptorSetLayout{ self.descriptor_set_layout, self.sampler_set_layout };
const pipeline_layout_create_info: vk.PipelineLayoutCreateInfo = .{
.set_layout_count = 1,
.p_set_layouts = @ptrCast(&self.descriptor_set_layout),
.set_layout_count = @intCast(descriptor_set_layouts.len),
.p_set_layouts = &descriptor_set_layouts,
.push_constant_range_count = 1,
.p_push_constant_ranges = @ptrCast(&self.push_constant_range),
};
@ -925,6 +1009,29 @@ pub const VulkanRenderer = struct {
}
}
fn createTextureSampler(self: *Self) !void {
// Sampler create info
const sampler_create_info: vk.SamplerCreateInfo = .{
.mag_filter = .linear, // How to render when image is magnified on screen
.min_filter = .linear, // How to render when image is minified on screen
.address_mode_u = .repeat, // How to handle texture wrap in U (x direction)
.address_mode_v = .repeat, // How to handle texture wrap in U (y direction)
.address_mode_w = .repeat, // How to handle texture wrap in U (z direction)
.border_color = .int_opaque_black, // Border beyond texture (only works for border clamp)
.unnormalized_coordinates = vk.FALSE, // Whether coords should be normalized (between 0 and 1)
.mipmap_mode = .linear, // Mipmap interpolation mode
.mip_lod_bias = 0.0, // Level of detail bias for mip level
.min_lod = 0.0, // Minimum lod to pick mip level
.max_lod = 0.0, // Maximum lod to pick mip level
.anisotropy_enable = vk.TRUE, // Enable anisotropy
.max_anisotropy = 16.0, // Anisotropy sample level
.compare_enable = vk.FALSE,
.compare_op = .never,
};
self.texture_sampler = try self.device.createSampler(&sampler_create_info, null);
}
fn createUniformBuffers(self: *Self) !void {
// View projection buffer size
const vp_buffer_size: vk.DeviceSize = @sizeOf(UboViewProjection);
@ -949,6 +1056,8 @@ pub const VulkanRenderer = struct {
}
fn createDescriptorPool(self: *Self) !void {
// -- Create uniform descriptor pool --
// Type of descriptors + how many descriptors (!= descriptor sets) (combined makes the pool size)
// View projection pool
const vp_pool_size: vk.DescriptorPoolSize = .{
@ -968,6 +1077,23 @@ pub const VulkanRenderer = struct {
// Create descriptor pool
self.descriptor_pool = try self.device.createDescriptorPool(&pool_create_info, null);
// -- Create sampler descriptor pool --
// Texture sampler pool
const sampler_pool_size: vk.DescriptorPoolSize = .{
.type = .combined_image_sampler,
.descriptor_count = MAX_OBJECTS,
};
// FIXME Not the best (look into array layers)
const sampler_pool_create_info: vk.DescriptorPoolCreateInfo = .{
.max_sets = MAX_OBJECTS,
.pool_size_count = 1,
.p_pool_sizes = @ptrCast(&sampler_pool_size),
};
self.sampler_descriptor_pool = try self.device.createDescriptorPool(&sampler_pool_create_info, null);
}
fn createDescriptorSets(self: *Self) !void {
@ -1055,10 +1181,9 @@ pub const VulkanRenderer = struct {
},
.p_clear_values = &clear_values, // List of clear values
.clear_value_count = @intCast(clear_values.len),
.framebuffer = undefined,
.framebuffer = self.swapchain_framebuffers[current_image],
};
render_pass_begin_info.framebuffer = self.swapchain_framebuffers[current_image];
const command_buffer = self.command_buffers[current_image];
// Start recording commands to command buffer
@ -1095,13 +1220,18 @@ pub const VulkanRenderer = struct {
@ptrCast(&mesh.ubo_model.model), // Actual data being pushed (can be array)
);
const descriptor_set_group = [_]vk.DescriptorSet{
self.descriptor_sets[current_image],
self.sampler_descriptor_sets.items[mesh.tex_id],
};
// Bind descriptor sets
command_buffer.bindDescriptorSets(
.graphics,
self.pipeline_layout,
0,
1,
@ptrCast(&self.descriptor_sets[current_image]),
@intCast(descriptor_set_group.len),
&descriptor_set_group,
0,
null,
);
@ -1247,8 +1377,8 @@ pub const VulkanRenderer = struct {
return false;
}
const pdev_features = self.instance.getPhysicalDeviceFeatures(pdev);
const queue_family_indices = self.getQueueFamilies(pdev) catch return false;
const extension_support = self.checkDeviceExtensions(pdev) catch return false;
const swapchain_details = self.getSwapchainDetails(pdev) catch return false;
@ -1257,7 +1387,7 @@ pub const VulkanRenderer = struct {
const swapchain_valid = swapchain_details.formats.len != 0 and swapchain_details.formats.len != 0;
return queue_family_indices.isValid() and extension_support and swapchain_valid;
return queue_family_indices.isValid() and extension_support and swapchain_valid and pdev_features.sampler_anisotropy == vk.TRUE;
}
fn checkValidationLayersSupport(self: Self) bool {
@ -1376,6 +1506,170 @@ pub const VulkanRenderer = struct {
return try self.device.createImageView(&image_view_create_info, null);
}
fn createTextureImage(self: *Self, file_name: []const u8) !u32 {
// Load image file
var width: u32 = undefined;
var height: u32 = undefined;
var image_size: vk.DeviceSize = undefined;
const image = try self.loadTextureFile(file_name, &width, &height, &image_size);
// Create staging buffer to hold loaded data, ready to copy to device
var image_staging_buffer: vk.Buffer = undefined;
var image_staging_buffer_memory: vk.DeviceMemory = undefined;
defer self.device.destroyBuffer(image_staging_buffer, null);
defer self.device.freeMemory(image_staging_buffer_memory, null);
try Utilities.createBuffer(
self.physical_device,
self.instance,
self.device,
image_size,
.{ .transfer_src_bit = true },
.{ .host_visible_bit = true, .host_coherent_bit = true },
&image_staging_buffer,
&image_staging_buffer_memory,
);
std.debug.print("Image size: {d}\n", .{image_size});
// Copy data to staging buffer
const data = try self.device.mapMemory(image_staging_buffer_memory, 0, image_size, .{});
const image_data: [*]u8 = @ptrCast(@alignCast(data));
// std.debug.print("Data len: {d}\tImage len:{d}\n", .{ image_data., image.len });
// image_data.* = image;
// std.mem.copyForwards(u8, image_data, image);
@memcpy(image_data, image[0..]);
self.device.unmapMemory(image_staging_buffer_memory);
// Create image to hold final texture
var tex_image_memory: vk.DeviceMemory = undefined;
const tex_image: vk.Image = try self.createImage(
width,
height,
.r8g8b8a8_srgb,
.optimal,
.{ .transfer_dst_bit = true, .sampled_bit = true },
.{ .device_local_bit = true },
&tex_image_memory,
);
// Transition image to be DST for copy operation
try Utilities.transitionImageLayout(
self.device,
self.graphics_queue.handle,
self.graphics_command_pool,
tex_image,
.undefined,
.transfer_dst_optimal,
);
// Copy data to image
try Utilities.copyImageBuffer(
self.device,
self.graphics_queue.handle,
self.graphics_command_pool,
image_staging_buffer,
tex_image,
width,
height,
);
// Transition image to be shader readable for shader usage
try Utilities.transitionImageLayout(
self.device,
self.graphics_queue.handle,
self.graphics_command_pool,
tex_image,
.transfer_dst_optimal,
.shader_read_only_optimal,
);
// Add texture data to array for reference
try self.texture_images.append(tex_image);
try self.texture_image_memory.append(tex_image_memory);
// Return index of new texture image
return @intCast(self.texture_images.items.len - 1);
}
fn createTexture(self: *Self, file_name: []const u8) !u32 {
// Create texture image and get its location in the array
const texture_image_loc = try self.createTextureImage(file_name);
// Create image view and add to list
const image_view = try self.createImageView(
self.texture_images.items[texture_image_loc],
.r8g8b8a8_srgb,
.{ .color_bit = true },
);
try self.texture_image_views.append(image_view);
// Create texture descriptor
const descriptor_loc = try self.createTextureDescriptor(image_view);
// Return location of set with texture
return descriptor_loc;
}
fn createTextureDescriptor(self: *Self, texture_image: vk.ImageView) !u32 {
var descriptor_set: vk.DescriptorSet = undefined;
// Descriptor set allocation info
const set_alloc_info: vk.DescriptorSetAllocateInfo = .{
.descriptor_pool = self.sampler_descriptor_pool,
.descriptor_set_count = 1,
.p_set_layouts = @ptrCast(&self.sampler_set_layout),
};
// Allocate descriptor sets
try self.device.allocateDescriptorSets(&set_alloc_info, @ptrCast(&descriptor_set));
const image_info: vk.DescriptorImageInfo = .{
.image_layout = .shader_read_only_optimal, // Image layout when in use
.image_view = texture_image, // Image to bind to set
.sampler = self.texture_sampler, // Sampler to use for set
};
// Descriptor write info
const descriptor_write: vk.WriteDescriptorSet = .{
.dst_set = descriptor_set,
.dst_binding = 0,
.dst_array_element = 0,
.descriptor_type = .combined_image_sampler,
.descriptor_count = 1,
.p_image_info = @ptrCast(&image_info),
.p_buffer_info = undefined,
.p_texel_buffer_view = undefined,
};
// Update the new descriptor set
self.device.updateDescriptorSets(1, @ptrCast(&descriptor_write), 0, null);
try self.sampler_descriptor_sets.append(descriptor_set);
// Return descriptor set location
return @intCast(self.sampler_descriptor_sets.items.len - 1);
}
fn loadTextureFile(self: *Self, file_name: []const u8, width: *u32, height: *u32, image_size: *vk.DeviceSize) ![]const u8 {
const path_concat = [2][]const u8{ "./assets/textures/", file_name };
const path = try std.mem.concatWithSentinel(self.allocator, u8, &path_concat, 0);
defer self.allocator.free(path);
const image = try img.Image.loadFromFile(path, 0);
try self.image_files.append(image);
width.* = image.width;
height.* = image.height;
// Calculate image size using given and known data
image_size.* = width.* * height.* * 4;
return image.data;
}
};
// Format: VK_FORMAT_R8G8B8A8_UNORM (VK_FORMAT_B8G8R8A8_UNORM as backup)
@ -1384,13 +1678,13 @@ fn chooseBestSurfaceFormat(formats: []vk.SurfaceFormatKHR) vk.SurfaceFormatKHR {
// If only one format available and is undefined, then this means all formats are available
if (formats.len == 1 and formats[0].format == vk.Format.undefined) {
return .{
.format = vk.Format.r8g8b8a8_unorm,
.format = vk.Format.r8g8b8a8_srgb,
.color_space = vk.ColorSpaceKHR.srgb_nonlinear_khr,
};
}
for (formats) |format| {
if ((format.format == vk.Format.r8g8b8a8_unorm or format.format == vk.Format.b8g8r8a8_unorm) and format.color_space == vk.ColorSpaceKHR.srgb_nonlinear_khr) {
if ((format.format == vk.Format.r8g8b8a8_srgb or format.format == vk.Format.b8g8r8a8_srgb) and format.color_space == vk.ColorSpaceKHR.srgb_nonlinear_khr) {
return format;
}
}
@ -1479,7 +1773,7 @@ fn debugCallback(
const severity = getMessageSeverityLabel(message_severity);
const message_type = getMessageTypeLabel(message_types);
std.debug.print("[{s}] ({s}): {s}\n", .{ severity, message_type, p_callback_data.?.p_message.? });
std.debug.print("[{s}] ({s}): {s}\n=====\n", .{ severity, message_type, p_callback_data.?.p_message.? });
return vk.TRUE;
}