Files
lunar/src/VulkanRenderer.cpp
Slendi f4fad2c1ac Hand tracking
Signed-off-by: Slendi <slendi@socopon.com>
2026-01-17 14:40:34 +02:00

2735 lines
86 KiB
C++

#define VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
#include "VulkanRenderer.h"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdint>
#include <cstring>
#include <format>
#include <iostream>
#include <limits>
#include <optional>
#include <print>
#include <stdexcept>
#include <type_traits>
#include <utility>
#include <SDL3/SDL_video.h>
#include <SDL3/SDL_vulkan.h>
#include <VkBootstrap.h>
#include <imgui_impl_sdl3.h>
#include <imgui_impl_vulkan.h>
#if defined(TRACY_ENABLE)
# include <tracy/Tracy.hpp>
#endif
#include "DescriptorLayoutBuilder.h"
#include "DescriptorWriter.h"
#include "GraphicsPipelineBuilder.h"
#include "Util.h"
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE;
namespace Lunar {
VulkanRenderer::GL::GL(VulkanRenderer &renderer)
: m_renderer(renderer)
{
}
auto VulkanRenderer::GL::begin_drawing(vk::CommandBuffer cmd,
AllocatedImage &color_target, AllocatedImage *depth_target) -> void
{
if (m_drawing) {
end_drawing();
}
m_cmd = cmd;
m_color_target = &color_target;
m_depth_target = depth_target;
m_vertices.clear();
m_indices.clear();
m_inside_primitive = false;
m_drawing = true;
m_transform_stack.clear();
m_active_pipeline = m_culling_enabled
? &m_renderer.m_vk.mesh_pipeline_culled
: &m_renderer.m_vk.mesh_pipeline;
m_transform = smath::Mat4::identity();
m_current_color = { 1.0f, 1.0f, 1.0f, 1.0f };
m_current_normal = { 0.0f, 0.0f, 1.0f };
m_current_uv = { 0.0f, 0.0f };
m_bound_texture = &m_renderer.m_vk.error_image;
auto const extent = vk::Extent2D {
m_color_target->extent.width,
m_color_target->extent.height,
};
vk::RenderingAttachmentInfo color_att {};
vk::ClearValue clear {};
clear.color = vk::ClearColorValue {
smath::Vec4 { Colors::DARK_SLATE_GRAY, 1.0f },
};
if (m_renderer.m_vk.msaa_samples != vk::SampleCountFlagBits::e1) {
assert(m_renderer.m_vk.msaa_color_image.image_view
&& "MSAA enabled but MSAA color image is missing");
color_att = vkinit::attachment_info(
m_renderer.m_vk.msaa_color_image.image_view, &clear,
vk::ImageLayout::eColorAttachmentOptimal);
color_att.resolveMode = vk::ResolveModeFlagBits::eAverage;
color_att.resolveImageView = m_color_target->image_view;
color_att.resolveImageLayout = vk::ImageLayout::eColorAttachmentOptimal;
color_att.storeOp = vk::AttachmentStoreOp::eDontCare;
} else {
color_att = vkinit::attachment_info(m_color_target->image_view, &clear,
vk::ImageLayout::eColorAttachmentOptimal);
}
std::optional<vk::RenderingAttachmentInfo> depth_att;
if (m_depth_target) {
depth_att = vkinit::depth_attachment_info(m_depth_target->image_view,
vk::ImageLayout::eDepthAttachmentOptimal);
}
auto render_info { vkinit::render_info(
extent, &color_att, depth_att ? &*depth_att : nullptr) };
m_cmd.beginRendering(render_info);
vk::Viewport viewport {};
viewport.x = 0.0f;
viewport.y = 0.0f;
viewport.width = static_cast<float>(extent.width);
viewport.height = static_cast<float>(extent.height);
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
m_cmd.setViewport(0, viewport);
vk::Rect2D scissor {};
scissor.offset.x = 0;
scissor.offset.y = 0;
scissor.extent = extent;
m_cmd.setScissor(0, scissor);
bind_pipeline_if_needed();
}
auto VulkanRenderer::GL::end_drawing() -> void
{
if (!m_drawing)
return;
if (m_inside_primitive) {
end();
}
flush();
m_cmd.endRendering();
m_cmd = nullptr;
m_color_target = nullptr;
m_depth_target = nullptr;
m_drawing = false;
m_active_pipeline = nullptr;
}
auto VulkanRenderer::GL::begin(GeometryKind kind) -> void
{
assert(m_drawing && "begin_drawing must be called first");
if (m_inside_primitive) {
end();
}
m_current_kind = kind;
m_primitive_start = m_vertices.size();
m_inside_primitive = true;
}
auto VulkanRenderer::GL::color(smath::Vec3 const &rgb) -> void
{
m_current_color = smath::Vec4 { rgb, 1.0f };
}
auto VulkanRenderer::GL::color(smath::Vec4 const &rgba) -> void
{
m_current_color = rgba;
}
auto VulkanRenderer::GL::uv(smath::Vec2 const &uv) -> void
{
m_current_uv = uv;
}
auto VulkanRenderer::GL::normal(smath::Vec3 const &normal) -> void
{
m_current_normal = normal;
}
auto VulkanRenderer::GL::set_texture(
std::optional<AllocatedImage const *> texture) -> void
{
assert(m_drawing && "begin_drawing must be called first");
flush();
m_bound_texture = texture.value_or(&m_renderer.m_vk.error_image);
}
auto VulkanRenderer::GL::set_culling(bool enabled) -> void
{
if (m_culling_enabled == enabled) {
return;
}
if (m_drawing) {
flush();
}
m_culling_enabled = enabled;
if (!m_drawing) {
return;
}
if (m_active_pipeline == &m_renderer.m_vk.mesh_pipeline
|| m_active_pipeline == &m_renderer.m_vk.mesh_pipeline_culled) {
m_active_pipeline = enabled ? &m_renderer.m_vk.mesh_pipeline_culled
: &m_renderer.m_vk.mesh_pipeline;
} else if (m_active_pipeline == &m_renderer.m_vk.triangle_pipeline
|| m_active_pipeline == &m_renderer.m_vk.triangle_pipeline_culled) {
m_active_pipeline = enabled ? &m_renderer.m_vk.triangle_pipeline_culled
: &m_renderer.m_vk.triangle_pipeline;
}
bind_pipeline_if_needed();
}
auto VulkanRenderer::GL::end() -> void
{
if (!m_inside_primitive)
return;
auto const count = m_vertices.size() - m_primitive_start;
emit_indices(m_primitive_start, count);
m_inside_primitive = false;
}
auto VulkanRenderer::GL::flush() -> void
{
if (!m_drawing || m_vertices.empty() || m_indices.empty())
return;
auto const vertex_data_size { m_vertices.size() * sizeof(Vertex) };
auto const index_data_size { m_indices.size() * sizeof(uint32_t) };
auto const staging_size { vertex_data_size + index_data_size };
auto staging = m_renderer.create_buffer(staging_size,
vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY);
void *staging_dst = staging.info.pMappedData;
bool staging_mapped_here { false };
if (!staging_dst) {
VkResult res = vmaMapMemory(
m_renderer.m_vk.allocator, staging.allocation, &staging_dst);
assert(res == VK_SUCCESS);
staging_mapped_here = true;
}
memcpy(staging_dst, m_vertices.data(), vertex_data_size);
memcpy(reinterpret_cast<uint8_t *>(staging_dst) + vertex_data_size,
m_indices.data(), index_data_size);
if (staging_mapped_here) {
vmaUnmapMemory(m_renderer.m_vk.allocator, staging.allocation);
}
auto vertex_buffer { m_renderer.create_buffer(vertex_data_size,
vk::BufferUsageFlagBits::eVertexBuffer
| vk::BufferUsageFlagBits::eTransferDst
| vk::BufferUsageFlagBits::eShaderDeviceAddress,
VMA_MEMORY_USAGE_GPU_ONLY) };
auto index_buffer { m_renderer.create_buffer(index_data_size,
vk::BufferUsageFlagBits::eIndexBuffer
| vk::BufferUsageFlagBits::eTransferDst,
VMA_MEMORY_USAGE_GPU_ONLY) };
m_renderer.immediate_submit(
[&](vk::CommandBuffer cmd) {
vk::BufferCopy vertex_copy {};
vertex_copy.srcOffset = 0;
vertex_copy.dstOffset = 0;
vertex_copy.size = vertex_data_size;
cmd.copyBuffer(
staging.buffer, vertex_buffer.buffer, 1, &vertex_copy);
vk::BufferCopy index_copy {};
index_copy.srcOffset = vertex_data_size;
index_copy.dstOffset = 0;
index_copy.size = index_data_size;
cmd.copyBuffer(staging.buffer, index_buffer.buffer, 1, &index_copy);
},
/*flush_frame_deletion_queue=*/false,
/*clear_frame_descriptors=*/false);
m_renderer.destroy_buffer(staging);
auto cmd { m_cmd };
bind_pipeline_if_needed();
if (m_active_pipeline == &m_renderer.m_vk.mesh_pipeline
|| m_active_pipeline == &m_renderer.m_vk.mesh_pipeline_culled) {
auto const image_set {
m_renderer.m_vk.get_current_frame().frame_descriptors.allocate(
m_renderer.m_logger, m_renderer.m_vkb.dev.device,
m_renderer.m_vk.single_image_descriptor_layout)
};
auto const *image
= m_bound_texture ? m_bound_texture : &m_renderer.m_vk.error_image;
DescriptorWriter()
.write_image(0, image->image_view,
m_renderer.m_vk.default_sampler_nearest.get(),
static_cast<VkImageLayout>(
vk::ImageLayout::eShaderReadOnlyOptimal),
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
.update_set(m_renderer.m_vkb.dev.device, image_set);
auto vk_image_set { vk::DescriptorSet { image_set } };
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics,
m_active_pipeline->get_layout(), 0, vk_image_set, {});
GPUDrawPushConstants push_constants {};
push_constants.world_matrix = m_transform;
vk::BufferDeviceAddressInfo device_address_info {};
device_address_info.buffer = vertex_buffer.buffer;
push_constants.vertex_buffer
= m_renderer.m_device.getBufferAddress(device_address_info);
cmd.pushConstants(m_active_pipeline->get_layout(),
vk::ShaderStageFlagBits::eVertex, 0, sizeof(push_constants),
&push_constants);
}
cmd.bindIndexBuffer(index_buffer.buffer, 0, vk::IndexType::eUint32);
cmd.drawIndexed(static_cast<uint32_t>(m_indices.size()), 1, 0, 0, 0);
m_renderer.m_vk.get_current_frame().deletion_queue.emplace([=, this]() {
m_renderer.destroy_buffer(index_buffer);
m_renderer.destroy_buffer(vertex_buffer);
});
m_vertices.clear();
m_indices.clear();
}
auto VulkanRenderer::GL::use_pipeline(Pipeline &pipeline) -> void
{
Pipeline *resolved_pipeline = &pipeline;
if (&pipeline == &m_renderer.m_vk.mesh_pipeline
|| &pipeline == &m_renderer.m_vk.mesh_pipeline_culled) {
resolved_pipeline = m_culling_enabled
? &m_renderer.m_vk.mesh_pipeline_culled
: &m_renderer.m_vk.mesh_pipeline;
} else if (&pipeline == &m_renderer.m_vk.triangle_pipeline
|| &pipeline == &m_renderer.m_vk.triangle_pipeline_culled) {
resolved_pipeline = m_culling_enabled
? &m_renderer.m_vk.triangle_pipeline_culled
: &m_renderer.m_vk.triangle_pipeline;
}
if (resolved_pipeline == m_active_pipeline) {
return;
}
flush();
m_active_pipeline = resolved_pipeline;
bind_pipeline_if_needed();
}
auto VulkanRenderer::GL::set_transform(smath::Mat4 const &transform) -> void
{
flush();
m_transform = transform;
}
auto VulkanRenderer::GL::push_transform() -> void
{
m_transform_stack.push_back(m_transform);
}
auto VulkanRenderer::GL::pop_transform() -> void
{
if (m_transform_stack.empty()) {
return;
}
flush();
m_transform = m_transform_stack.back();
m_transform_stack.pop_back();
}
auto VulkanRenderer::GL::draw_rectangle(smath::Vec2 pos, smath::Vec2 size,
smath::Vec4 rect_color, float rotation) -> void
{
auto const half_size = size * 0.5f;
auto const center = pos + half_size;
auto rotate = [&](smath::Vec2 const &p) {
float const c = std::cos(rotation);
float const s = std::sin(rotation);
return smath::Vec2 { c * p.x() - s * p.y(), s * p.x() + c * p.y() };
};
auto const br
= center + rotate(smath::Vec2 { half_size.x(), -half_size.y() });
auto const tr
= center + rotate(smath::Vec2 { half_size.x(), half_size.y() });
auto const bl
= center + rotate(smath::Vec2 { -half_size.x(), -half_size.y() });
auto const tl
= center + rotate(smath::Vec2 { -half_size.x(), half_size.y() });
begin(GeometryKind::Quads);
color(rect_color);
uv(smath::Vec2 { 1.0f, 1.0f });
vert(smath::Vec3 { br.x(), br.y(), 0.0f });
color(rect_color);
uv(smath::Vec2 { 1.0f, 0.0f });
vert(smath::Vec3 { tr.x(), tr.y(), 0.0f });
color(rect_color);
uv(smath::Vec2 { 0.0f, 1.0f });
vert(smath::Vec3 { bl.x(), bl.y(), 0.0f });
color(rect_color);
uv(smath::Vec2 { 0.0f, 0.0f });
vert(smath::Vec3 { tl.x(), tl.y(), 0.0f });
end();
}
auto VulkanRenderer::GL::draw_sphere(smath::Vec3 center, float radius,
int rings, int segments, std::optional<smath::Vec4> sphere_color) -> void
{
assert(m_drawing && "begin_drawing must be called first");
if (radius <= 0.0f)
return;
if (rings < 2)
rings = 2;
if (segments < 3)
segments = 3;
float const pi = 3.14159265358979323846f;
if (sphere_color.has_value())
color(*sphere_color);
begin(GeometryKind::Triangles);
for (int y = 0; y < rings; y++) {
float const v1 = static_cast<float>(y) / static_cast<float>(rings);
float const v2 = static_cast<float>(y + 1) / static_cast<float>(rings);
float const theta1 = v1 * pi;
float const theta2 = v2 * pi;
float const s1 = std::sin(theta1);
float const c1 = std::cos(theta1);
float const s2 = std::sin(theta2);
float const c2 = std::cos(theta2);
for (int x = 0; x < segments; x++) {
float const u1
= static_cast<float>(x) / static_cast<float>(segments);
float const u2
= static_cast<float>(x + 1) / static_cast<float>(segments);
float const phi1 = u1 * (2.0f * pi);
float const phi2 = u2 * (2.0f * pi);
float const sp1 = std::sin(phi1);
float const cp1 = std::cos(phi1);
float const sp2 = std::sin(phi2);
float const cp2 = std::cos(phi2);
smath::Vec3 n1 { s1 * cp1, c1, s1 * sp1 };
smath::Vec3 n2 { s1 * cp2, c1, s1 * sp2 };
smath::Vec3 n3 { s2 * cp1, c2, s2 * sp1 };
smath::Vec3 n4 { s2 * cp2, c2, s2 * sp2 };
normal(n1);
uv(smath::Vec2 { u1, 1.0f - v1 });
vert(center + n1 * radius);
normal(n2);
uv(smath::Vec2 { u2, 1.0f - v1 });
vert(center + n2 * radius);
normal(n3);
uv(smath::Vec2 { u1, 1.0f - v2 });
vert(center + n3 * radius);
normal(n2);
uv(smath::Vec2 { u2, 1.0f - v1 });
vert(center + n2 * radius);
normal(n4);
uv(smath::Vec2 { u2, 1.0f - v2 });
vert(center + n4 * radius);
normal(n3);
uv(smath::Vec2 { u1, 1.0f - v2 });
vert(center + n3 * radius);
}
}
end();
}
auto VulkanRenderer::GL::draw_mesh(GPUMeshBuffers const &mesh,
smath::Mat4 const &transform, uint32_t index_count, uint32_t first_index,
int32_t vertex_offset) -> void
{
assert(m_drawing && "begin_drawing must be called first");
flush();
Pipeline &mesh_pipeline = m_culling_enabled
? m_renderer.m_vk.mesh_pipeline_culled
: m_renderer.m_vk.mesh_pipeline;
use_pipeline(mesh_pipeline);
auto const image_set {
m_renderer.m_vk.get_current_frame().frame_descriptors.allocate(
m_renderer.m_logger, m_renderer.m_vkb.dev.device,
m_renderer.m_vk.single_image_descriptor_layout)
};
auto const *image
= m_bound_texture ? m_bound_texture : &m_renderer.m_vk.error_image;
DescriptorWriter()
.write_image(0, image->image_view,
m_renderer.m_vk.default_sampler_nearest.get(),
static_cast<VkImageLayout>(vk::ImageLayout::eShaderReadOnlyOptimal),
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
.update_set(m_renderer.m_vkb.dev.device, image_set);
auto vk_image_set { vk::DescriptorSet { image_set } };
m_cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics,
mesh_pipeline.get_layout(), 0, vk_image_set, {});
GPUDrawPushConstants push_constants {};
push_constants.world_matrix = transform;
push_constants.vertex_buffer = mesh.vertex_buffer_address;
m_cmd.pushConstants(mesh_pipeline.get_layout(),
vk::ShaderStageFlagBits::eVertex, 0, sizeof(push_constants),
&push_constants);
m_cmd.bindIndexBuffer(mesh.index_buffer.buffer, 0, vk::IndexType::eUint32);
m_cmd.drawIndexed(index_count, 1, first_index, vertex_offset, 0);
}
auto VulkanRenderer::GL::draw_indexed(Pipeline &pipeline,
vk::DescriptorSet descriptor_set, AllocatedBuffer const &vertex_buffer,
AllocatedBuffer const &index_buffer, uint32_t index_count,
std::span<std::byte const> push_constants) -> void
{
assert(m_drawing && "begin_drawing must be called first");
if (m_inside_primitive) {
end();
}
flush();
use_pipeline(pipeline);
auto cmd { m_cmd };
cmd.bindDescriptorSets(vk::PipelineBindPoint::eGraphics,
pipeline.get_layout(), 0, descriptor_set, {});
if (!push_constants.empty()) {
cmd.pushConstants(pipeline.get_layout(),
vk::ShaderStageFlagBits::eVertex, 0,
static_cast<uint32_t>(push_constants.size()),
push_constants.data());
}
vk::DeviceSize offset { 0 };
cmd.bindVertexBuffers(0, vertex_buffer.buffer, offset);
cmd.bindIndexBuffer(index_buffer.buffer, 0, vk::IndexType::eUint32);
cmd.drawIndexed(index_count, 1, 0, 0, 0);
}
auto VulkanRenderer::GL::push_vertex(smath::Vec3 const &pos) -> void
{
assert(m_drawing && "begin_drawing must be called first");
Vertex v {};
v.position = pos;
v.u = m_current_uv.x();
v.v = m_current_uv.y();
v.normal = m_current_normal;
v.color = m_current_color;
m_vertices.emplace_back(v);
}
auto VulkanRenderer::GL::emit_indices(size_t start, size_t count) -> void
{
switch (m_current_kind) {
case GeometryKind::Triangles: {
for (size_t i = 0; (i + 2) < count; i += 3) {
m_indices.emplace_back(static_cast<uint32_t>(start + i + 0));
m_indices.emplace_back(static_cast<uint32_t>(start + i + 1));
m_indices.emplace_back(static_cast<uint32_t>(start + i + 2));
}
break;
}
case GeometryKind::TriangleStrip: {
if (count < 3)
break;
for (size_t i = 0; (i + 2) < count; i++) {
if (i % 2 == 0) {
m_indices.emplace_back(static_cast<uint32_t>(start + i + 0));
m_indices.emplace_back(static_cast<uint32_t>(start + i + 1));
m_indices.emplace_back(static_cast<uint32_t>(start + i + 2));
} else {
m_indices.emplace_back(static_cast<uint32_t>(start + i + 1));
m_indices.emplace_back(static_cast<uint32_t>(start + i + 0));
m_indices.emplace_back(static_cast<uint32_t>(start + i + 2));
}
}
break;
}
case GeometryKind::TriangleFan: {
if (count < 3)
break;
for (size_t i = 1; (i + 1) < count; i++) {
m_indices.emplace_back(static_cast<uint32_t>(start));
m_indices.emplace_back(static_cast<uint32_t>(start + i));
m_indices.emplace_back(static_cast<uint32_t>(start + i + 1));
}
break;
}
case GeometryKind::Quads: {
if (count < 4)
break;
size_t const quad_count { count / 4 };
for (size_t q = 0; q < quad_count; q++) {
size_t const base = start + q * 4;
m_indices.emplace_back(static_cast<uint32_t>(base + 0));
m_indices.emplace_back(static_cast<uint32_t>(base + 1));
m_indices.emplace_back(static_cast<uint32_t>(base + 2));
m_indices.emplace_back(static_cast<uint32_t>(base + 2));
m_indices.emplace_back(static_cast<uint32_t>(base + 1));
m_indices.emplace_back(static_cast<uint32_t>(base + 3));
}
break;
}
}
}
auto VulkanRenderer::GL::bind_pipeline_if_needed() -> void
{
if (!m_drawing || !m_active_pipeline)
return;
m_cmd.bindPipeline(
vk::PipelineBindPoint::eGraphics, m_active_pipeline->get());
}
VulkanRenderer::VulkanRenderer(SDL_Window *window, Logger &logger,
std::span<std::string const> instance_extensions,
std::span<std::string const> device_extensions)
: gl(*this)
, m_window(window)
, m_logger(logger)
, m_extra_instance_extensions(
instance_extensions.begin(), instance_extensions.end())
, m_extra_device_extensions(
device_extensions.begin(), device_extensions.end())
{
if (m_window == nullptr) {
throw std::runtime_error("VulkanRenderer requires a valid window");
}
m_use_kms = false;
m_imgui_enabled = true;
vk_init();
swapchain_init();
commands_init();
sync_init();
descriptors_init();
pipelines_init();
default_data_init();
imgui_init();
}
VulkanRenderer::VulkanRenderer(KmsSurfaceConfig /*config*/, Logger &logger,
std::span<std::string const> instance_extensions,
std::span<std::string const> device_extensions)
: gl(*this)
, m_logger(logger)
, m_extra_instance_extensions(
instance_extensions.begin(), instance_extensions.end())
, m_extra_device_extensions(
device_extensions.begin(), device_extensions.end())
{
m_use_kms = true;
m_imgui_enabled = false;
vk_init();
swapchain_init();
commands_init();
sync_init();
descriptors_init();
pipelines_init();
default_data_init();
}
VulkanRenderer::~VulkanRenderer()
{
m_device.waitIdle();
for (auto &frame_data : m_vk.frames) {
frame_data.deletion_queue.flush();
frame_data.main_command_buffer.reset();
frame_data.command_pool.reset();
frame_data.swapchain_semaphore.reset();
frame_data.render_fence.reset();
}
m_vk.present_semaphores.clear();
m_vk.swapchain_image_views.clear();
m_vk.imm_command_buffer.reset();
m_vk.imm_command_pool.reset();
m_vk.imm_fence.reset();
m_vk.triangle_pipeline.reset();
m_vk.triangle_pipeline_culled.reset();
m_vk.mesh_pipeline.reset();
m_vk.mesh_pipeline_culled.reset();
m_vk.default_sampler_linear.reset();
m_vk.default_sampler_nearest.reset();
if (m_latest_screenshot) {
destroy_image(*m_latest_screenshot);
m_latest_screenshot.reset();
}
destroy_swapchain();
destroy_draw_image();
destroy_msaa_color_image();
destroy_depth_image();
m_vk.deletion_queue.flush();
if (m_vk.allocator) {
vmaDestroyAllocator(m_vk.allocator);
m_vk.allocator = nullptr;
}
if (m_vk.surface) {
if (m_use_kms) {
m_instance.destroySurfaceKHR(m_vk.surface);
} else {
SDL_Vulkan_DestroySurface(m_vkb.instance,
static_cast<VkSurfaceKHR>(m_vk.surface), nullptr);
}
m_vk.surface = nullptr;
}
vkb::destroy_device(m_vkb.dev);
vkb::destroy_instance(m_vkb.instance);
}
auto VulkanRenderer::resize(uint32_t width, uint32_t height) -> void
{
recreate_swapchain(width, height);
}
auto VulkanRenderer::set_offscreen_extent(vk::Extent2D extent) -> void
{
if (extent.width == 0 || extent.height == 0) {
return;
}
if (m_vk.draw_image.extent.width == extent.width
&& m_vk.draw_image.extent.height == extent.height) {
return;
}
m_device.waitIdle();
destroy_draw_image();
destroy_msaa_color_image();
destroy_depth_image();
create_draw_image(extent.width, extent.height);
create_msaa_color_image(extent.width, extent.height);
create_depth_image(extent.width, extent.height);
}
auto VulkanRenderer::set_antialiasing(AntiAliasingKind kind) -> void
{
enqueue_render_command(RenderCommand {
RenderCommand::SetAntiAliasing { kind },
});
}
auto VulkanRenderer::set_antialiasing_immediate(AntiAliasingKind kind) -> void
{
apply_antialiasing(kind);
}
auto VulkanRenderer::apply_antialiasing(AntiAliasingKind kind) -> void
{
auto requested_samples = [&](AntiAliasingKind aa) {
switch (aa) {
case AntiAliasingKind::NONE:
return vk::SampleCountFlagBits::e1;
case AntiAliasingKind::MSAA_2X:
return vk::SampleCountFlagBits::e2;
case AntiAliasingKind::MSAA_4X:
return vk::SampleCountFlagBits::e4;
case AntiAliasingKind::MSAA_8X:
return vk::SampleCountFlagBits::e8;
}
return vk::SampleCountFlagBits::e1;
}(kind);
auto best_supported = [&](vk::SampleCountFlagBits requested) {
auto const supported = m_vk.supported_framebuffer_samples;
auto pick_if_supported = [&](vk::SampleCountFlagBits candidate) {
return (supported & candidate) == candidate;
};
if (requested >= vk::SampleCountFlagBits::e64
&& pick_if_supported(vk::SampleCountFlagBits::e64)) {
return vk::SampleCountFlagBits::e64;
}
if (requested >= vk::SampleCountFlagBits::e32
&& pick_if_supported(vk::SampleCountFlagBits::e32)) {
return vk::SampleCountFlagBits::e32;
}
if (requested >= vk::SampleCountFlagBits::e16
&& pick_if_supported(vk::SampleCountFlagBits::e16)) {
return vk::SampleCountFlagBits::e16;
}
if (requested >= vk::SampleCountFlagBits::e8
&& pick_if_supported(vk::SampleCountFlagBits::e8)) {
return vk::SampleCountFlagBits::e8;
}
if (requested >= vk::SampleCountFlagBits::e4
&& pick_if_supported(vk::SampleCountFlagBits::e4)) {
return vk::SampleCountFlagBits::e4;
}
if (requested >= vk::SampleCountFlagBits::e2
&& pick_if_supported(vk::SampleCountFlagBits::e2)) {
return vk::SampleCountFlagBits::e2;
}
return vk::SampleCountFlagBits::e1;
}(requested_samples);
auto kind_for_samples = [](vk::SampleCountFlagBits samples) {
switch (samples) {
case vk::SampleCountFlagBits::e2:
return AntiAliasingKind::MSAA_2X;
case vk::SampleCountFlagBits::e4:
return AntiAliasingKind::MSAA_4X;
case vk::SampleCountFlagBits::e8:
return AntiAliasingKind::MSAA_8X;
default:
return AntiAliasingKind::NONE;
}
};
auto const effective_kind = kind_for_samples(best_supported);
if (m_vk.antialiasing_kind == effective_kind
&& m_vk.msaa_samples == best_supported) {
return;
}
if (best_supported != requested_samples) {
m_logger.warn("Requested antialiasing {} but using {}",
static_cast<int>(kind), static_cast<int>(effective_kind));
}
m_vk.antialiasing_kind = effective_kind;
m_vk.msaa_samples = best_supported;
if (!m_vk.swapchain || m_vk.swapchain_extent.width == 0
|| m_vk.swapchain_extent.height == 0) {
return;
}
m_device.waitIdle();
create_msaa_color_image(
m_vk.swapchain_extent.width, m_vk.swapchain_extent.height);
create_depth_image(
m_vk.swapchain_extent.width, m_vk.swapchain_extent.height);
pipelines_init();
}
auto VulkanRenderer::enqueue_render_command(RenderCommand &&command) -> void
{
std::scoped_lock lock { m_command_mutex };
m_pending_render_commands.emplace_back(std::move(command));
}
auto VulkanRenderer::process_render_commands() -> void
{
std::vector<RenderCommand> commands;
{
std::scoped_lock lock { m_command_mutex };
commands.swap(m_pending_render_commands);
}
for (auto &command : commands) {
std::visit(
[&](auto &&payload) {
using Payload = std::decay_t<decltype(payload)>;
if constexpr (std::is_same_v<Payload,
RenderCommand::SetAntiAliasing>) {
apply_antialiasing(payload.kind);
}
},
command.payload);
}
}
auto VulkanRenderer::immediate_submit(
std::function<void(vk::CommandBuffer cmd)> &&function,
bool flush_frame_deletion_queue, bool clear_frame_descriptors) -> void
{
m_device.resetFences(m_vk.imm_fence.get());
m_vk.imm_command_buffer.get().reset();
auto cmd { m_vk.imm_command_buffer.get() };
vk::CommandBufferBeginInfo cmd_begin_info {};
cmd_begin_info.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit;
cmd.begin(cmd_begin_info);
function(cmd);
cmd.end();
auto cmd_info { vkinit::command_buffer_submit_info(cmd) };
auto submit { vkinit::submit_info2(&cmd_info, nullptr, nullptr) };
m_vk.graphics_queue.submit2(submit, m_vk.imm_fence.get());
VK_CHECK(m_logger,
m_device.waitForFences(m_vk.imm_fence.get(), true, 9'999'999'999));
if (flush_frame_deletion_queue) {
m_vk.get_current_frame().deletion_queue.flush();
}
if (clear_frame_descriptors) {
m_vk.get_current_frame().frame_descriptors.clear_pools(
m_vkb.dev.device);
}
}
auto VulkanRenderer::setup_kms_surface() -> void
{
auto const devices = m_instance.enumeratePhysicalDevices();
if (devices.empty()) {
m_logger.err("No Vulkan physical devices available for KMS");
throw std::runtime_error("App init fail");
}
for (auto const &device : devices) {
auto const displays = device.getDisplayPropertiesKHR();
if (displays.empty()) {
continue;
}
for (auto const &display_props : displays) {
auto const modes
= device.getDisplayModePropertiesKHR(display_props.display);
if (modes.empty()) {
continue;
}
auto const best_mode_it = std::max_element(modes.begin(),
modes.end(), [](auto const &lhs, auto const &rhs) {
auto const lhs_extent = lhs.parameters.visibleRegion;
auto const rhs_extent = rhs.parameters.visibleRegion;
auto const lhs_area
= static_cast<uint64_t>(lhs_extent.width)
* static_cast<uint64_t>(lhs_extent.height);
auto const rhs_area
= static_cast<uint64_t>(rhs_extent.width)
* static_cast<uint64_t>(rhs_extent.height);
if (lhs_area == rhs_area) {
return lhs.parameters.refreshRate
< rhs.parameters.refreshRate;
}
return lhs_area < rhs_area;
});
auto const planes = device.getDisplayPlanePropertiesKHR();
std::optional<uint32_t> plane_index;
uint32_t plane_stack_index { 0 };
for (uint32_t i = 0; i < planes.size(); ++i) {
auto const supported_displays
= device.getDisplayPlaneSupportedDisplaysKHR(i);
if (std::find(supported_displays.begin(),
supported_displays.end(), display_props.display)
!= supported_displays.end()) {
plane_index = i;
plane_stack_index = planes[i].currentStackIndex;
break;
}
}
if (!plane_index) {
continue;
}
auto const extent = best_mode_it->parameters.visibleRegion;
KmsState state {};
state.display = display_props.display;
state.mode = best_mode_it->displayMode;
state.extent = extent;
state.plane_index = *plane_index;
state.plane_stack_index = plane_stack_index;
if (display_props.displayName) {
state.display_name = display_props.displayName;
}
m_kms_state = state;
m_kms_extent = extent;
m_kms_physical_device = device;
m_kms_physical_device_set = true;
m_logger.info("Using KMS display {} ({}x{} @ {} mHz)",
state.display_name.empty() ? "unnamed" : state.display_name,
extent.width, extent.height,
best_mode_it->parameters.refreshRate);
break;
}
if (m_kms_state) {
break;
}
}
if (!m_kms_state) {
m_logger.err("No suitable KMS display found");
throw std::runtime_error("App init fail");
}
vk::DisplaySurfaceCreateInfoKHR surface_info {};
surface_info.displayMode = m_kms_state->mode;
surface_info.planeIndex = m_kms_state->plane_index;
surface_info.planeStackIndex = m_kms_state->plane_stack_index;
surface_info.transform = vk::SurfaceTransformFlagBitsKHR::eIdentity;
surface_info.alphaMode = vk::DisplayPlaneAlphaFlagBitsKHR::eOpaque;
surface_info.globalAlpha = 1.0f;
surface_info.imageExtent = m_kms_state->extent;
m_vk.surface = m_instance.createDisplayPlaneSurfaceKHR(surface_info);
}
auto VulkanRenderer::vk_init() -> void
{
VULKAN_HPP_DEFAULT_DISPATCHER.init(vkGetInstanceProcAddr);
vkb::InstanceBuilder instance_builder {};
instance_builder
.enable_extension(VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME)
.set_app_name("Lunar")
.set_engine_name("Lunar")
.require_api_version(1, 3, 0)
.set_debug_callback_user_data_pointer(this)
.set_debug_callback(
[](VkDebugUtilsMessageSeverityFlagBitsEXT message_severity,
VkDebugUtilsMessageTypeFlagsEXT message_type,
VkDebugUtilsMessengerCallbackDataEXT const *callback_data,
void *user_data) {
auto renderer { reinterpret_cast<VulkanRenderer *>(user_data) };
auto level { Logger::Level::Debug };
if (message_severity
& VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
level = Logger::Level::Error;
} else if (message_severity
& VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
level = Logger::Level::Warning;
} else if (message_severity
& VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
level = Logger::Level::Info;
}
renderer->m_logger.log(level,
std::format("[Vulkan] [{}] {}",
vkb::to_string_message_type(message_type),
callback_data->pMessage));
return VK_FALSE;
});
if (m_use_kms) {
instance_builder.enable_extension(VK_KHR_DISPLAY_EXTENSION_NAME);
}
for (auto const &extension : m_extra_instance_extensions) {
instance_builder.enable_extension(extension.c_str());
}
#ifndef NDEBUG
instance_builder.request_validation_layers();
#endif
auto const instance_builder_ret { instance_builder.build() };
if (!instance_builder_ret) {
std::println(std::cerr, "Failed to create Vulkan instance. Error: {}",
instance_builder_ret.error().message());
throw std::runtime_error("App init fail");
}
m_vkb.instance = instance_builder_ret.value();
m_instance = vk::Instance { m_vkb.instance.instance };
VULKAN_HPP_DEFAULT_DISPATCHER.init(m_instance);
if (m_use_kms) {
setup_kms_surface();
} else {
VkSurfaceKHR raw_surface {};
if (!SDL_Vulkan_CreateSurface(
m_window, m_vkb.instance, nullptr, &raw_surface)) {
m_logger.err("Failed to create vulkan surface");
throw std::runtime_error("App init fail");
}
m_vk.surface = vk::SurfaceKHR { raw_surface };
}
vkb::PhysicalDeviceSelector phys_device_selector { m_vkb.instance };
VkPhysicalDeviceVulkan13Features features_13 {};
features_13.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_3_FEATURES;
features_13.pNext = nullptr;
features_13.synchronization2 = VK_TRUE;
features_13.dynamicRendering = VK_TRUE;
VkPhysicalDeviceBufferDeviceAddressFeatures
buffer_device_address_features {};
buffer_device_address_features.sType
= VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES;
buffer_device_address_features.bufferDeviceAddress = VK_TRUE;
std::vector<char const *> desired_extensions {
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME,
VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
VK_KHR_MAINTENANCE1_EXTENSION_NAME,
VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME,
VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME,
};
std::vector<char const *> required_extensions {};
for (auto const &extension : m_extra_device_extensions) {
required_extensions.push_back(extension.c_str());
}
phys_device_selector.set_surface(m_vk.surface)
.add_desired_extensions(desired_extensions)
.set_required_features_13(features_13)
.add_required_extension_features(buffer_device_address_features);
if (!required_extensions.empty()) {
phys_device_selector.add_required_extensions(required_extensions);
}
auto physical_device_selector_return { phys_device_selector.select() };
if (!physical_device_selector_return) {
std::println(std::cerr,
"Failed to find Vulkan physical device. Error: {}",
physical_device_selector_return.error().message());
throw std::runtime_error("App init fail");
}
m_vkb.phys_dev = physical_device_selector_return.value();
m_physical_device = vk::PhysicalDevice { m_vkb.phys_dev.physical_device };
m_logger.info("Chosen Vulkan physical device: {}",
m_vkb.phys_dev.properties.deviceName);
if (m_use_kms && m_kms_physical_device_set
&& m_physical_device != m_kms_physical_device) {
m_logger.warn("KMS display is not on the selected physical device");
}
auto const props = m_physical_device.getProperties();
m_vk.supported_framebuffer_samples
= props.limits.framebufferColorSampleCounts
& props.limits.framebufferDepthSampleCounts;
m_vk.msaa_samples = vk::SampleCountFlagBits::e1;
m_vk.antialiasing_kind = AntiAliasingKind::NONE;
vkb::DeviceBuilder device_builder { m_vkb.phys_dev };
auto dev_ret { device_builder.build() };
if (!dev_ret) {
std::println(std::cerr, "Failed to create Vulkan device. Error: {}",
dev_ret.error().message());
throw std::runtime_error("App init fail");
}
m_vkb.dev = dev_ret.value();
m_device = vk::Device { m_vkb.dev.device };
VULKAN_HPP_DEFAULT_DISPATCHER.init(m_device);
auto queue_family_ret { m_vkb.dev.get_queue_index(
vkb::QueueType::graphics) };
if (!queue_family_ret) {
std::println(std::cerr, "Failed to get graphics queue. Error: {}",
queue_family_ret.error().message());
throw std::runtime_error("App init fail");
}
m_vk.graphics_queue_family = queue_family_ret.value();
m_vk.graphics_queue = m_device.getQueue(m_vk.graphics_queue_family, 0);
if (m_use_kms) {
if (!m_physical_device.getSurfaceSupportKHR(
m_vk.graphics_queue_family, m_vk.surface)) {
m_logger.err("Selected device does not support KMS surface");
throw std::runtime_error("App init fail");
}
}
VmaAllocatorCreateInfo allocator_ci {};
allocator_ci.physicalDevice = m_vkb.phys_dev.physical_device;
allocator_ci.device = m_vkb.dev.device;
allocator_ci.instance = m_vkb.instance.instance;
allocator_ci.flags = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT;
vmaCreateAllocator(&allocator_ci, &m_vk.allocator);
}
auto VulkanRenderer::swapchain_init() -> void
{
uint32_t width { 0 };
uint32_t height { 0 };
if (m_use_kms) {
width = m_kms_extent.width;
height = m_kms_extent.height;
} else {
int w {}, h {};
SDL_GetWindowSize(m_window, &w, &h);
width = static_cast<uint32_t>(w);
height = static_cast<uint32_t>(h);
}
create_swapchain(width, height);
create_draw_image(width, height);
create_msaa_color_image(width, height);
create_depth_image(width, height);
}
auto VulkanRenderer::commands_init() -> void
{
vk::CommandPoolCreateInfo ci {};
ci.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer;
ci.queueFamilyIndex = m_vk.graphics_queue_family;
for (auto &frame_data : m_vk.frames) {
frame_data.command_pool = m_device.createCommandPoolUnique(ci);
vk::CommandBufferAllocateInfo ai {};
ai.commandPool = frame_data.command_pool.get();
ai.level = vk::CommandBufferLevel::ePrimary;
ai.commandBufferCount = 1;
frame_data.main_command_buffer
= std::move(m_device.allocateCommandBuffersUnique(ai).front());
}
m_vk.imm_command_pool = m_device.createCommandPoolUnique(ci);
vk::CommandBufferAllocateInfo ai {};
ai.commandPool = m_vk.imm_command_pool.get();
ai.level = vk::CommandBufferLevel::ePrimary;
ai.commandBufferCount = 1;
m_vk.imm_command_buffer
= std::move(m_device.allocateCommandBuffersUnique(ai).front());
}
auto VulkanRenderer::sync_init() -> void
{
vk::FenceCreateInfo fence_ci {};
fence_ci.flags = vk::FenceCreateFlagBits::eSignaled;
vk::SemaphoreCreateInfo semaphore_ci {};
for (auto &frame_data : m_vk.frames) {
frame_data.render_fence = m_device.createFenceUnique(fence_ci);
frame_data.swapchain_semaphore
= m_device.createSemaphoreUnique(semaphore_ci);
}
m_vk.imm_fence = m_device.createFenceUnique(fence_ci);
}
auto VulkanRenderer::descriptors_init() -> void
{
m_vk.deletion_queue.emplace([&]() {
m_device.destroyDescriptorSetLayout(
m_vk.gpu_scene_data_descriptor_layout);
m_device.destroyDescriptorSetLayout(
m_vk.single_image_descriptor_layout);
});
for (unsigned int i = 0; i < FRAME_OVERLAP; i++) {
std::vector<DescriptorAllocatorGrowable::PoolSizeRatio> frame_sizes = {
{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 3 },
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 3 },
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 3 },
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 4 },
};
m_vk.frames[i].frame_descriptors = DescriptorAllocatorGrowable {};
m_vk.frames[i].frame_descriptors.init(
m_vkb.dev.device, 1000, frame_sizes);
m_vk.deletion_queue.emplace([&, i]() {
m_vk.frames[i].frame_descriptors.destroy_pools(m_vkb.dev.device);
});
}
auto scene_layout_raw
= DescriptorLayoutBuilder()
.add_binding(0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
.build(m_logger, m_vkb.dev.device,
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT);
m_vk.gpu_scene_data_descriptor_layout
= vk::DescriptorSetLayout { scene_layout_raw };
auto single_layout_raw
= DescriptorLayoutBuilder()
.add_binding(0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
.build(m_logger, m_vkb.dev.device, VK_SHADER_STAGE_FRAGMENT_BIT);
m_vk.single_image_descriptor_layout
= vk::DescriptorSetLayout { single_layout_raw };
}
auto VulkanRenderer::pipelines_init() -> void
{
triangle_pipeline_init();
mesh_pipeline_init();
}
auto VulkanRenderer::triangle_pipeline_init() -> void
{
Pipeline::Builder builder { m_device, m_logger };
uint8_t triangle_vert_shader_data[] {
#embed "triangle_vert.spv"
};
auto triangle_vert_shader = vkutil::load_shader_module(
std::span<uint8_t>(
triangle_vert_shader_data, sizeof(triangle_vert_shader_data)),
m_device);
if (!triangle_vert_shader) {
m_logger.err("Failed to load triangle vert shader");
}
uint8_t triangle_frag_shader_data[] {
#embed "triangle_frag.spv"
};
auto triangle_frag_shader = vkutil::load_shader_module(
std::span<uint8_t>(
triangle_frag_shader_data, sizeof(triangle_frag_shader_data)),
m_device);
if (!triangle_frag_shader) {
m_logger.err("Failed to load triangle frag shader");
}
m_vk.triangle_pipeline
= builder.build_graphics([&](GraphicsPipelineBuilder &pipeline_builder)
-> GraphicsPipelineBuilder & {
return pipeline_builder
.set_shaders(
triangle_vert_shader.get(), triangle_frag_shader.get())
.set_input_topology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
.set_polygon_mode(VK_POLYGON_MODE_FILL)
.set_multisampling(
static_cast<VkSampleCountFlagBits>(m_vk.msaa_samples))
.enable_blending_additive()
.disable_depth_testing()
.set_color_attachment_format(
static_cast<VkFormat>(m_vk.draw_image.format))
.set_depth_format(
static_cast<VkFormat>(m_vk.depth_image.format));
});
m_vk.triangle_pipeline_culled
= builder.build_graphics([&](GraphicsPipelineBuilder &pipeline_builder)
-> GraphicsPipelineBuilder & {
return pipeline_builder
.set_shaders(
triangle_vert_shader.get(), triangle_frag_shader.get())
.set_input_topology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
.set_polygon_mode(VK_POLYGON_MODE_FILL)
.set_cull_mode(
VK_CULL_MODE_BACK_BIT, VK_FRONT_FACE_COUNTER_CLOCKWISE)
.set_multisampling(
static_cast<VkSampleCountFlagBits>(m_vk.msaa_samples))
.enable_blending_additive()
.disable_depth_testing()
.set_color_attachment_format(
static_cast<VkFormat>(m_vk.draw_image.format))
.set_depth_format(
static_cast<VkFormat>(m_vk.depth_image.format));
});
}
auto VulkanRenderer::mesh_pipeline_init() -> void
{
Pipeline::Builder builder { m_device, m_logger };
uint8_t triangle_vert_shader_data[] {
#embed "triangle_mesh_vert.spv"
};
auto triangle_vert_shader = vkutil::load_shader_module(
std::span<uint8_t>(
triangle_vert_shader_data, sizeof(triangle_vert_shader_data)),
m_device);
if (!triangle_vert_shader) {
m_logger.err("Failed to load triangle vert shader");
}
uint8_t triangle_frag_shader_data[] {
#embed "tex_image_frag.spv"
};
auto triangle_frag_shader = vkutil::load_shader_module(
std::span<uint8_t>(
triangle_frag_shader_data, sizeof(triangle_frag_shader_data)),
m_device);
if (!triangle_frag_shader) {
m_logger.err("Failed to load triangle frag shader");
}
vk::PushConstantRange push_constant_range {};
push_constant_range.stageFlags = vk::ShaderStageFlagBits::eVertex;
push_constant_range.offset = 0;
push_constant_range.size = sizeof(GPUDrawPushConstants);
std::array push_constant_ranges { push_constant_range };
builder.set_push_constant_ranges(push_constant_ranges);
std::array descriptor_set_layouts { m_vk.single_image_descriptor_layout };
builder.set_descriptor_set_layouts(descriptor_set_layouts);
m_vk.mesh_pipeline
= builder.build_graphics([&](GraphicsPipelineBuilder &pipeline_builder)
-> GraphicsPipelineBuilder & {
return pipeline_builder
.set_shaders(
triangle_vert_shader.get(), triangle_frag_shader.get())
.set_input_topology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
.set_polygon_mode(VK_POLYGON_MODE_FILL)
.set_cull_mode(VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE)
.set_multisampling(
static_cast<VkSampleCountFlagBits>(m_vk.msaa_samples))
.disable_blending()
.enable_depth_testing()
.set_color_attachment_format(
static_cast<VkFormat>(m_vk.draw_image.format))
.set_depth_format(
static_cast<VkFormat>(m_vk.depth_image.format));
});
m_vk.mesh_pipeline_culled
= builder.build_graphics([&](GraphicsPipelineBuilder &pipeline_builder)
-> GraphicsPipelineBuilder & {
return pipeline_builder
.set_shaders(
triangle_vert_shader.get(), triangle_frag_shader.get())
.set_input_topology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
.set_polygon_mode(VK_POLYGON_MODE_FILL)
.set_cull_mode(
VK_CULL_MODE_BACK_BIT, VK_FRONT_FACE_COUNTER_CLOCKWISE)
.set_multisampling(
static_cast<VkSampleCountFlagBits>(m_vk.msaa_samples))
.disable_blending()
.enable_depth_testing()
.set_color_attachment_format(
static_cast<VkFormat>(m_vk.draw_image.format))
.set_depth_format(
static_cast<VkFormat>(m_vk.depth_image.format));
});
}
auto VulkanRenderer::imgui_init() -> void
{
VkDescriptorPoolSize pool_sizes[] = {
{ VK_DESCRIPTOR_TYPE_SAMPLER, 1000 },
{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1000 },
{ VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1000 },
{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1000 },
{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1000 },
{ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1000 },
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000 },
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1000 },
{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1000 },
{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1000 },
{ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1000 },
};
VkDescriptorPoolCreateInfo pool_info {};
pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
pool_info.maxSets = 1000;
pool_info.poolSizeCount = (uint32_t)std::size(pool_sizes);
pool_info.pPoolSizes = pool_sizes;
m_vk.imgui_descriptor_pool = m_device.createDescriptorPoolUnique(pool_info);
ImGui::CreateContext();
ImGui_ImplSDL3_InitForVulkan(m_window);
ImGui_ImplVulkan_InitInfo init_info {};
init_info.Instance = m_vkb.instance;
init_info.PhysicalDevice = m_vkb.phys_dev.physical_device;
init_info.Device = m_vkb.dev.device;
init_info.Queue = static_cast<VkQueue>(m_vk.graphics_queue);
init_info.DescriptorPool = m_vk.imgui_descriptor_pool.get();
init_info.MinImageCount = 3;
init_info.ImageCount = 3;
init_info.UseDynamicRendering = true;
init_info.PipelineInfoMain.PipelineRenderingCreateInfo.sType
= VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO;
init_info.PipelineInfoMain.PipelineRenderingCreateInfo.colorAttachmentCount
= 1;
auto swapchain_format { static_cast<VkFormat>(
m_vk.swapchain_image_format) };
init_info.PipelineInfoMain.PipelineRenderingCreateInfo
.pColorAttachmentFormats
= &swapchain_format;
init_info.PipelineInfoMain.MSAASamples = VK_SAMPLE_COUNT_1_BIT;
ImGui_ImplVulkan_Init(&init_info);
m_vk.deletion_queue.emplace([this]() {
ImGui_ImplVulkan_Shutdown();
ImGui_ImplSDL3_Shutdown();
ImGui::DestroyContext();
m_vk.imgui_descriptor_pool.reset();
});
}
auto VulkanRenderer::default_data_init() -> void
{
std::array<Vertex, 4> rect_vertices;
rect_vertices[0].position = { 0.5, -0.5, 0 };
rect_vertices[1].position = { 0.5, 0.5, 0 };
rect_vertices[2].position = { -0.5, -0.5, 0 };
rect_vertices[3].position = { -0.5, 0.5, 0 };
rect_vertices[0].u = 1.0f;
rect_vertices[0].v = 1.0f;
rect_vertices[1].u = 1.0f;
rect_vertices[1].v = 0.0f;
rect_vertices[2].u = 0.0f;
rect_vertices[2].v = 1.0f;
rect_vertices[3].u = 0.0f;
rect_vertices[3].v = 0.0f;
for (auto &v : rect_vertices) {
v.normal = { 0.0f, 0.0f, 1.0f };
}
rect_vertices[0].color = { 0, 0, 0, 1 };
rect_vertices[1].color = { 0.5, 0.5, 0.5, 1 };
rect_vertices[2].color = { 1, 0, 0, 1 };
rect_vertices[3].color = { 0, 1, 0, 1 };
std::array<uint32_t, 6> rect_indices;
rect_indices[0] = 0;
rect_indices[1] = 1;
rect_indices[2] = 2;
rect_indices[3] = 2;
rect_indices[4] = 1;
rect_indices[5] = 3;
m_vk.rectangle = upload_mesh(rect_indices, rect_vertices);
m_vk.deletion_queue.emplace([&]() {
destroy_buffer(m_vk.rectangle.index_buffer);
destroy_buffer(m_vk.rectangle.vertex_buffer);
});
{
// Solid color images
auto const white = smath::pack_unorm4x8(smath::Vec4 { 1, 1, 1, 1 });
m_vk.white_image = create_image(&white, vk::Extent3D { 1, 1, 1 },
vk::Format::eR8G8B8A8Unorm, vk::ImageUsageFlagBits::eSampled);
auto const black = smath::pack_unorm4x8(smath::Vec4 { 0, 0, 0, 1 });
m_vk.black_image = create_image(&black, vk::Extent3D { 1, 1, 1 },
vk::Format::eR8G8B8A8Unorm, vk::ImageUsageFlagBits::eSampled);
auto const gray
= smath::pack_unorm4x8(smath::Vec4 { 0.6f, 0.6f, 0.6f, 1 });
m_vk.gray_image = create_image(&gray, vk::Extent3D { 1, 1, 1 },
vk::Format::eR8G8B8A8Unorm, vk::ImageUsageFlagBits::eSampled);
// Error checkerboard image
auto const magenta = smath::pack_unorm4x8(smath::Vec4 { 1, 0, 1, 1 });
std::array<uint32_t, 16 * 16> checkerboard;
for (int x = 0; x < 16; x++) {
for (int y = 0; y < 16; y++) {
checkerboard[y * 16 + x]
= ((x % 2) ^ (y % 2)) ? magenta : black;
}
}
m_vk.error_image
= create_image(checkerboard.data(), vk::Extent3D { 16, 16, 1 },
vk::Format::eR8G8B8A8Unorm, vk::ImageUsageFlagBits::eSampled);
}
vk::SamplerCreateInfo sampler_ci {};
sampler_ci.magFilter = vk::Filter::eNearest;
sampler_ci.minFilter = vk::Filter::eNearest;
m_vk.default_sampler_nearest = m_device.createSamplerUnique(sampler_ci);
sampler_ci.magFilter = vk::Filter::eLinear;
sampler_ci.minFilter = vk::Filter::eLinear;
m_vk.default_sampler_linear = m_device.createSamplerUnique(sampler_ci);
m_vk.deletion_queue.emplace([&]() {
m_vk.default_sampler_linear.reset();
m_vk.default_sampler_nearest.reset();
destroy_image(m_vk.error_image);
destroy_image(m_vk.gray_image);
destroy_image(m_vk.black_image);
destroy_image(m_vk.white_image);
});
}
auto VulkanRenderer::render(std::function<void(GL &)> const &record) -> void
{
defer(m_vk.frame_number++);
if (!m_vk.swapchain || m_vk.swapchain_extent.width == 0
|| m_vk.swapchain_extent.height == 0) {
return;
}
process_render_commands();
auto &frame = m_vk.get_current_frame();
VK_CHECK(m_logger,
m_device.waitForFences(frame.render_fence.get(), true, 1'000'000'000));
frame.deletion_queue.flush();
frame.frame_descriptors.clear_pools(m_vkb.dev.device);
emit_frame_screenshot(frame);
#if defined(TRACY_ENABLE)
emit_tracy_frame_image(frame);
#endif
auto raw_fence { static_cast<VkFence>(frame.render_fence.get()) };
VK_CHECK(m_logger, vkResetFences(m_vkb.dev.device, 1, &raw_fence));
auto const acquire_result = m_device.acquireNextImageKHR(
m_vk.swapchain, 1'000'000'000, frame.swapchain_semaphore.get(), {});
if (acquire_result.result == vk::Result::eErrorOutOfDateKHR
|| acquire_result.result == vk::Result::eSuboptimalKHR) {
if (m_use_kms) {
recreate_swapchain(m_kms_extent.width, m_kms_extent.height);
} else {
int width {}, height {};
SDL_GetWindowSize(m_window, &width, &height);
recreate_swapchain(
static_cast<uint32_t>(width), static_cast<uint32_t>(height));
}
return;
}
VK_CHECK(m_logger, acquire_result.result);
uint32_t const swapchain_image_idx { acquire_result.value };
auto cmd { frame.main_command_buffer.get() };
cmd.reset();
m_vk.draw_extent.width = m_vk.draw_image.extent.width;
m_vk.draw_extent.height = m_vk.draw_image.extent.height;
vk::CommandBufferBeginInfo cmd_begin_info {};
cmd_begin_info.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit;
VK_CHECK(m_logger,
vkBeginCommandBuffer(static_cast<VkCommandBuffer>(cmd),
reinterpret_cast<VkCommandBufferBeginInfo *>(&cmd_begin_info)));
bool const msaa_enabled = m_vk.msaa_samples != vk::SampleCountFlagBits::e1;
vkutil::transition_image(cmd, m_vk.draw_image.image, m_vk.draw_image_layout,
vk::ImageLayout::eColorAttachmentOptimal);
m_vk.draw_image_layout = vk::ImageLayout::eColorAttachmentOptimal;
if (msaa_enabled) {
vkutil::transition_image(cmd, m_vk.msaa_color_image.image,
m_vk.msaa_color_image_layout,
vk::ImageLayout::eColorAttachmentOptimal);
m_vk.msaa_color_image_layout = vk::ImageLayout::eColorAttachmentOptimal;
}
vkutil::transition_image(cmd, m_vk.depth_image.image,
m_vk.depth_image_layout, vk::ImageLayout::eDepthAttachmentOptimal);
m_vk.depth_image_layout = vk::ImageLayout::eDepthAttachmentOptimal;
gl.begin_drawing(cmd, m_vk.draw_image, &m_vk.depth_image);
if (record) {
record(gl);
}
gl.end_drawing();
vkutil::transition_image(cmd, m_vk.draw_image.image, m_vk.draw_image_layout,
vk::ImageLayout::eTransferSrcOptimal);
m_vk.draw_image_layout = vk::ImageLayout::eTransferSrcOptimal;
vkutil::transition_image(cmd, m_vk.swapchain_images.at(swapchain_image_idx),
vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal);
vkutil::copy_image_to_image(cmd, m_vk.draw_image.image,
m_vk.swapchain_images.at(swapchain_image_idx), m_vk.draw_extent,
m_vk.swapchain_extent);
vkutil::transition_image(cmd, m_vk.swapchain_images[swapchain_image_idx],
vk::ImageLayout::eTransferDstOptimal,
vk::ImageLayout::eColorAttachmentOptimal);
if (m_imgui_enabled) {
draw_imgui(
cmd, m_vk.swapchain_image_views.at(swapchain_image_idx).get());
}
vkutil::transition_image(cmd, m_vk.swapchain_images.at(swapchain_image_idx),
vk::ImageLayout::eColorAttachmentOptimal,
vk::ImageLayout::eTransferSrcOptimal);
if (frame.screenshot_buffer.buffer) {
vk::BufferImageCopy screenshot_copy {};
screenshot_copy.imageSubresource.aspectMask
= vk::ImageAspectFlagBits::eColor;
screenshot_copy.imageSubresource.mipLevel = 0;
screenshot_copy.imageSubresource.baseArrayLayer = 0;
screenshot_copy.imageSubresource.layerCount = 1;
screenshot_copy.imageExtent
= vk::Extent3D { m_vk.swapchain_extent.width,
m_vk.swapchain_extent.height, 1 };
cmd.copyImageToBuffer(m_vk.swapchain_images.at(swapchain_image_idx),
vk::ImageLayout::eTransferSrcOptimal,
frame.screenshot_buffer.buffer, screenshot_copy);
frame.screenshot_ready = true;
} else {
frame.screenshot_ready = false;
}
#if defined(TRACY_ENABLE)
constexpr std::uint64_t tracy_frame_stride { 10 };
bool const tracy_capture { TracyIsConnected
&& (m_vk.frame_number % tracy_frame_stride == 0) };
frame.tracy_frame_ready = false;
frame.frame_image_ready = false;
if (tracy_capture && frame.frame_image_buffer.buffer
&& m_vk.tracy_capture_image.image) {
vkutil::transition_image(cmd, m_vk.tracy_capture_image.image,
m_vk.tracy_capture_image_layout,
vk::ImageLayout::eTransferDstOptimal);
m_vk.tracy_capture_image_layout = vk::ImageLayout::eTransferDstOptimal;
vk::ImageBlit blit_region {};
blit_region.srcSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
blit_region.srcSubresource.mipLevel = 0;
blit_region.srcSubresource.baseArrayLayer = 0;
blit_region.srcSubresource.layerCount = 1;
blit_region.srcOffsets[0] = vk::Offset3D { 0, 0, 0 };
blit_region.srcOffsets[1]
= vk::Offset3D { static_cast<int32_t>(m_vk.swapchain_extent.width),
static_cast<int32_t>(m_vk.swapchain_extent.height), 1 };
blit_region.dstSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
blit_region.dstSubresource.mipLevel = 0;
blit_region.dstSubresource.baseArrayLayer = 0;
blit_region.dstSubresource.layerCount = 1;
blit_region.dstOffsets[0] = vk::Offset3D { 0, 0, 0 };
blit_region.dstOffsets[1] = vk::Offset3D {
static_cast<int32_t>(m_vk.tracy_capture_extent.width),
static_cast<int32_t>(m_vk.tracy_capture_extent.height), 1
};
cmd.blitImage(m_vk.swapchain_images.at(swapchain_image_idx),
vk::ImageLayout::eTransferSrcOptimal,
m_vk.tracy_capture_image.image,
vk::ImageLayout::eTransferDstOptimal, blit_region,
vk::Filter::eLinear);
vkutil::transition_image(cmd, m_vk.tracy_capture_image.image,
m_vk.tracy_capture_image_layout,
vk::ImageLayout::eTransferSrcOptimal);
m_vk.tracy_capture_image_layout = vk::ImageLayout::eTransferSrcOptimal;
vk::BufferImageCopy copy_region {};
copy_region.imageSubresource.aspectMask
= vk::ImageAspectFlagBits::eColor;
copy_region.imageSubresource.mipLevel = 0;
copy_region.imageSubresource.baseArrayLayer = 0;
copy_region.imageSubresource.layerCount = 1;
copy_region.imageExtent
= vk::Extent3D { m_vk.tracy_capture_extent.width,
m_vk.tracy_capture_extent.height, 1 };
cmd.copyImageToBuffer(m_vk.tracy_capture_image.image,
vk::ImageLayout::eTransferSrcOptimal,
frame.frame_image_buffer.buffer, copy_region);
frame.frame_image_ready = true;
frame.tracy_frame_ready = true;
}
#endif
vkutil::transition_image(cmd, m_vk.swapchain_images.at(swapchain_image_idx),
vk::ImageLayout::eTransferSrcOptimal, vk::ImageLayout::ePresentSrcKHR);
cmd.end();
auto render_semaphore
= m_vk.present_semaphores.at(swapchain_image_idx).get();
vk::PipelineStageFlags2 wait_stage
= vk::PipelineStageFlagBits2::eColorAttachmentOutput;
auto wait_info { vkinit::semaphore_submit_info(
wait_stage, m_vk.get_current_frame().swapchain_semaphore.get()) };
auto command_buffer_info { vkinit::command_buffer_submit_info(cmd) };
auto signal_info { vkinit::semaphore_submit_info(
vk::PipelineStageFlagBits2::eAllCommands, render_semaphore) };
auto submit_info { vkinit::submit_info2(
&command_buffer_info, &wait_info, &signal_info) };
m_vk.graphics_queue.submit2(
submit_info, m_vk.get_current_frame().render_fence.get());
vk::PresentInfoKHR present_info {};
present_info.setSwapchains(m_vk.swapchain);
present_info.setWaitSemaphores(render_semaphore);
present_info.setImageIndices(swapchain_image_idx);
auto const present_result = m_vk.graphics_queue.presentKHR(present_info);
if (present_result == vk::Result::eErrorOutOfDateKHR
|| present_result == vk::Result::eSuboptimalKHR) {
if (m_use_kms) {
recreate_swapchain(m_kms_extent.width, m_kms_extent.height);
} else {
int width {}, height {};
SDL_GetWindowSize(m_window, &width, &height);
recreate_swapchain(
static_cast<uint32_t>(width), static_cast<uint32_t>(height));
}
return;
}
VK_CHECK(m_logger, present_result);
}
auto VulkanRenderer::render_to_image(vk::Image target_image,
vk::Extent2D target_extent, std::function<void(GL &)> const &record) -> void
{
defer(m_vk.frame_number++);
if (!target_image || target_extent.width == 0
|| target_extent.height == 0) {
return;
}
process_render_commands();
auto &frame = m_vk.get_current_frame();
VK_CHECK(m_logger,
m_device.waitForFences(frame.render_fence.get(), true, 1'000'000'000));
frame.deletion_queue.flush();
frame.frame_descriptors.clear_pools(m_vkb.dev.device);
auto raw_fence { static_cast<VkFence>(frame.render_fence.get()) };
VK_CHECK(m_logger, vkResetFences(m_vkb.dev.device, 1, &raw_fence));
auto cmd { frame.main_command_buffer.get() };
cmd.reset();
m_vk.draw_extent.width = m_vk.draw_image.extent.width;
m_vk.draw_extent.height = m_vk.draw_image.extent.height;
vk::CommandBufferBeginInfo cmd_begin_info {};
cmd_begin_info.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit;
VK_CHECK(m_logger,
vkBeginCommandBuffer(static_cast<VkCommandBuffer>(cmd),
reinterpret_cast<VkCommandBufferBeginInfo *>(&cmd_begin_info)));
bool const msaa_enabled = m_vk.msaa_samples != vk::SampleCountFlagBits::e1;
vkutil::transition_image(cmd, m_vk.draw_image.image, m_vk.draw_image_layout,
vk::ImageLayout::eColorAttachmentOptimal);
m_vk.draw_image_layout = vk::ImageLayout::eColorAttachmentOptimal;
if (msaa_enabled) {
vkutil::transition_image(cmd, m_vk.msaa_color_image.image,
m_vk.msaa_color_image_layout,
vk::ImageLayout::eColorAttachmentOptimal);
m_vk.msaa_color_image_layout = vk::ImageLayout::eColorAttachmentOptimal;
}
vkutil::transition_image(cmd, m_vk.depth_image.image,
m_vk.depth_image_layout, vk::ImageLayout::eDepthAttachmentOptimal);
m_vk.depth_image_layout = vk::ImageLayout::eDepthAttachmentOptimal;
gl.begin_drawing(cmd, m_vk.draw_image, &m_vk.depth_image);
if (record) {
record(gl);
}
gl.end_drawing();
vkutil::transition_image(cmd, m_vk.draw_image.image, m_vk.draw_image_layout,
vk::ImageLayout::eTransferSrcOptimal);
m_vk.draw_image_layout = vk::ImageLayout::eTransferSrcOptimal;
vkutil::transition_image(cmd, target_image, vk::ImageLayout::eUndefined,
vk::ImageLayout::eTransferDstOptimal);
vkutil::copy_image_to_image(cmd, m_vk.draw_image.image, target_image,
m_vk.draw_extent, target_extent);
vkutil::transition_image(cmd, target_image,
vk::ImageLayout::eTransferDstOptimal,
vk::ImageLayout::eColorAttachmentOptimal);
cmd.end();
auto command_buffer_info { vkinit::command_buffer_submit_info(cmd) };
auto submit_info { vkinit::submit_info2(
&command_buffer_info, nullptr, nullptr) };
m_vk.graphics_queue.submit2(submit_info, frame.render_fence.get());
VK_CHECK(m_logger,
m_device.waitForFences(frame.render_fence.get(), true, 1'000'000'000));
}
auto VulkanRenderer::draw_imgui(
vk::CommandBuffer cmd, vk::ImageView target_image_view) -> void
{
auto const color_attachment { vkinit::attachment_info(
target_image_view, nullptr, vk::ImageLayout::eColorAttachmentOptimal) };
auto const render_info { vkinit::render_info(
m_vk.draw_extent, &color_attachment, nullptr) };
cmd.beginRendering(render_info);
ImGui_ImplVulkan_RenderDrawData(
ImGui::GetDrawData(), static_cast<VkCommandBuffer>(cmd));
cmd.endRendering();
}
auto VulkanRenderer::create_swapchain(uint32_t width, uint32_t height) -> void
{
vkb::SwapchainBuilder builder { m_vkb.phys_dev, m_vkb.dev, m_vk.surface };
m_vk.swapchain_image_format = vk::Format::eB8G8R8A8Unorm;
auto const swapchain_ret { builder
.set_desired_format({
.format = static_cast<VkFormat>(m_vk.swapchain_image_format),
.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
})
.set_desired_present_mode(VK_PRESENT_MODE_FIFO_KHR)
.set_desired_extent(width, height)
.set_image_usage_flags(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
| VK_IMAGE_USAGE_TRANSFER_DST_BIT
| VK_IMAGE_USAGE_TRANSFER_SRC_BIT)
.build() };
if (!swapchain_ret) {
std::println(std::cerr, "Failed to create swapchain. Error: {}",
swapchain_ret.error().message());
throw std::runtime_error("App init fail");
}
m_vkb.swapchain = swapchain_ret.value();
m_vk.swapchain = m_vkb.swapchain.swapchain;
m_vk.swapchain_extent = vk::Extent2D { m_vkb.swapchain.extent.width,
m_vkb.swapchain.extent.height };
auto images { m_vkb.swapchain.get_images().value() };
m_vk.swapchain_images.assign(images.begin(), images.end());
m_vk.swapchain_image_views.clear();
for (auto img : m_vk.swapchain_images) {
vk::ImageViewCreateInfo iv_ci {};
iv_ci.image = img;
iv_ci.viewType = vk::ImageViewType::e2D;
iv_ci.format = m_vk.swapchain_image_format;
iv_ci.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
iv_ci.subresourceRange.levelCount = 1;
iv_ci.subresourceRange.layerCount = 1;
m_vk.swapchain_image_views.emplace_back(
m_device.createImageViewUnique(iv_ci));
}
vk::SemaphoreCreateInfo semaphore_ci {};
m_vk.present_semaphores.resize(m_vk.swapchain_images.size());
for (auto &semaphore : m_vk.present_semaphores) {
semaphore = m_device.createSemaphoreUnique(semaphore_ci);
}
ensure_screenshot_buffers(m_vk.swapchain_extent);
#if defined(TRACY_ENABLE)
ensure_tracy_frame_buffers(m_vk.swapchain_extent);
#endif
}
auto VulkanRenderer::create_draw_image(uint32_t width, uint32_t height) -> void
{
destroy_draw_image();
auto const flags { vk::ImageUsageFlagBits::eTransferSrc
| vk::ImageUsageFlagBits::eTransferDst
| vk::ImageUsageFlagBits::eSampled
| vk::ImageUsageFlagBits::eColorAttachment };
m_vk.draw_image = create_image(
{ width, height, 1 }, vk::Format::eR16G16B16A16Sfloat, flags);
m_vk.draw_image_layout = vk::ImageLayout::eUndefined;
}
auto VulkanRenderer::create_msaa_color_image(uint32_t width, uint32_t height)
-> void
{
destroy_msaa_color_image();
if (m_vk.msaa_samples == vk::SampleCountFlagBits::e1) {
return;
}
auto const flags { vk::ImageUsageFlagBits::eColorAttachment };
m_vk.msaa_color_image = create_image(
{ width, height, 1 }, m_vk.draw_image.format, flags, m_vk.msaa_samples);
m_vk.msaa_color_image_layout = vk::ImageLayout::eUndefined;
}
auto VulkanRenderer::create_depth_image(uint32_t width, uint32_t height) -> void
{
destroy_depth_image();
auto const flags { vk::ImageUsageFlagBits::eTransferSrc
| vk::ImageUsageFlagBits::eTransferDst
| vk::ImageUsageFlagBits::eDepthStencilAttachment };
m_vk.depth_image = create_image(
{ width, height, 1 }, vk::Format::eD32Sfloat, flags, m_vk.msaa_samples);
m_vk.depth_image_layout = vk::ImageLayout::eUndefined;
}
auto VulkanRenderer::destroy_depth_image() -> void
{
if (m_vk.depth_image.image) {
m_device.destroyImageView(m_vk.depth_image.image_view);
m_vk.depth_image.image_view = vk::ImageView {};
vmaDestroyImage(m_vk.allocator,
static_cast<VkImage>(m_vk.depth_image.image),
m_vk.depth_image.allocation);
m_vk.depth_image.image = vk::Image {};
m_vk.depth_image.allocation = nullptr;
m_vk.depth_image.extent = vk::Extent3D { 0, 0, 0 };
m_vk.depth_image_layout = vk::ImageLayout::eUndefined;
}
}
auto VulkanRenderer::destroy_draw_image() -> void
{
if (m_vk.draw_image.image) {
m_device.destroyImageView(m_vk.draw_image.image_view);
m_vk.draw_image.image_view = vk::ImageView {};
vmaDestroyImage(m_vk.allocator,
static_cast<VkImage>(m_vk.draw_image.image),
m_vk.draw_image.allocation);
m_vk.draw_image.image = vk::Image {};
m_vk.draw_image.allocation = nullptr;
m_vk.draw_image.extent = vk::Extent3D { 0, 0, 0 };
m_vk.draw_image_layout = vk::ImageLayout::eUndefined;
}
}
auto VulkanRenderer::destroy_msaa_color_image() -> void
{
if (m_vk.msaa_color_image.image) {
m_device.destroyImageView(m_vk.msaa_color_image.image_view);
m_vk.msaa_color_image.image_view = vk::ImageView {};
vmaDestroyImage(m_vk.allocator,
static_cast<VkImage>(m_vk.msaa_color_image.image),
m_vk.msaa_color_image.allocation);
m_vk.msaa_color_image.image = vk::Image {};
m_vk.msaa_color_image.allocation = nullptr;
m_vk.msaa_color_image.extent = vk::Extent3D { 0, 0, 0 };
m_vk.msaa_color_image_layout = vk::ImageLayout::eUndefined;
}
}
auto VulkanRenderer::recreate_swapchain(uint32_t width, uint32_t height) -> void
{
m_device.waitIdle();
if (width == 0 || height == 0) {
destroy_swapchain();
destroy_draw_image();
destroy_msaa_color_image();
destroy_depth_image();
m_vk.swapchain_extent = vk::Extent2D { 0, 0 };
return;
}
destroy_swapchain();
destroy_draw_image();
destroy_msaa_color_image();
destroy_depth_image();
create_swapchain(width, height);
create_draw_image(width, height);
create_msaa_color_image(width, height);
create_depth_image(width, height);
}
auto VulkanRenderer::destroy_swapchain() -> void
{
if (!m_vk.swapchain)
return;
destroy_screenshot_buffers();
#if defined(TRACY_ENABLE)
destroy_tracy_frame_buffers();
#endif
m_vk.present_semaphores.clear();
m_device.destroySwapchainKHR(m_vk.swapchain);
m_vk.swapchain = vk::SwapchainKHR {};
m_vk.swapchain_image_views.clear();
m_vk.swapchain_images.clear();
m_vk.present_semaphores.clear();
m_vk.swapchain_extent = vk::Extent2D { 0, 0 };
}
auto VulkanRenderer::ensure_screenshot_buffers(vk::Extent2D extent) -> void
{
if (extent.width == 0 || extent.height == 0) {
return;
}
auto const byte_count { static_cast<size_t>(extent.width)
* static_cast<size_t>(extent.height) * 4 };
for (auto &frame : m_vk.frames) {
auto const same_extent { frame.screenshot_buffer.buffer
&& frame.screenshot_extent.width == extent.width
&& frame.screenshot_extent.height == extent.height };
if (!same_extent && frame.screenshot_buffer.buffer) {
destroy_buffer(frame.screenshot_buffer);
frame.screenshot_buffer = AllocatedBuffer {};
}
if (!same_extent) {
frame.screenshot_buffer = create_buffer(byte_count,
vk::BufferUsageFlagBits::eTransferDst,
VMA_MEMORY_USAGE_GPU_TO_CPU);
frame.screenshot_extent = extent;
frame.screenshot_rgba.resize(byte_count);
}
frame.screenshot_ready = false;
}
}
auto VulkanRenderer::destroy_screenshot_buffers() -> void
{
for (auto &frame : m_vk.frames) {
if (frame.screenshot_buffer.buffer) {
destroy_buffer(frame.screenshot_buffer);
frame.screenshot_buffer = AllocatedBuffer {};
}
frame.screenshot_extent = vk::Extent2D {};
frame.screenshot_rgba.clear();
frame.screenshot_ready = false;
}
m_latest_screenshot_pixels.clear();
m_latest_screenshot_extent = vk::Extent2D {};
if (m_latest_screenshot) {
destroy_image(*m_latest_screenshot);
m_latest_screenshot.reset();
}
}
auto VulkanRenderer::emit_frame_screenshot(FrameData &frame) -> void
{
if (!frame.screenshot_ready) {
return;
}
auto const extent { frame.screenshot_extent };
if (extent.width == 0 || extent.height == 0) {
return;
}
auto const byte_count { static_cast<size_t>(extent.width)
* static_cast<size_t>(extent.height) * 4 };
if (!frame.screenshot_buffer.buffer) {
return;
}
VmaAllocationInfo info {};
vmaGetAllocationInfo(
m_vk.allocator, frame.screenshot_buffer.allocation, &info);
void *mapped { info.pMappedData };
bool mapped_here { false };
if (!mapped) {
auto const map_result { vmaMapMemory(
m_vk.allocator, frame.screenshot_buffer.allocation, &mapped) };
if (map_result != VK_SUCCESS) {
return;
}
mapped_here = true;
}
auto *source { static_cast<std::uint8_t *>(mapped) };
auto &destination { frame.screenshot_rgba };
if (destination.size() != byte_count) {
destination.resize(byte_count);
}
for (size_t i = 0; i < byte_count; i += 4) {
destination[i] = source[i + 2];
destination[i + 1] = source[i + 1];
destination[i + 2] = source[i];
destination[i + 3] = source[i + 3];
}
auto const screenshot_flags { vk::ImageUsageFlagBits::eSampled };
auto const screenshot_extent { vk::Extent3D {
extent.width, extent.height, 1 } };
if (m_latest_screenshot) {
destroy_image(*m_latest_screenshot);
m_latest_screenshot.reset();
}
m_latest_screenshot = create_image(destination.data(), screenshot_extent,
vk::Format::eR8G8B8A8Unorm, screenshot_flags);
m_latest_screenshot_pixels = destination;
m_latest_screenshot_extent = extent;
if (mapped_here) {
vmaUnmapMemory(m_vk.allocator, frame.screenshot_buffer.allocation);
}
frame.screenshot_ready = false;
}
#if defined(TRACY_ENABLE)
namespace {
[[nodiscard]] auto tracy_capture_extent(vk::Extent2D extent) -> vk::Extent2D
{
constexpr uint32_t max_width { 320 };
constexpr uint32_t max_height { 180 };
auto width { std::min(extent.width, max_width) };
auto height { std::min(extent.height, max_height) };
width -= width % 4;
height -= height % 4;
return vk::Extent2D { width, height };
}
} // namespace
auto VulkanRenderer::ensure_tracy_frame_buffers(vk::Extent2D extent) -> void
{
auto const capture_extent { tracy_capture_extent(extent) };
if (capture_extent.width == 0 || capture_extent.height == 0) {
return;
}
auto const byte_count { static_cast<size_t>(capture_extent.width)
* static_cast<size_t>(capture_extent.height) * 4 };
if (m_vk.tracy_capture_extent.width != capture_extent.width
|| m_vk.tracy_capture_extent.height != capture_extent.height) {
if (m_vk.tracy_capture_image.image) {
destroy_image(m_vk.tracy_capture_image);
}
auto const flags { vk::ImageUsageFlagBits::eTransferDst
| vk::ImageUsageFlagBits::eTransferSrc
| vk::ImageUsageFlagBits::eColorAttachment
| vk::ImageUsageFlagBits::eSampled };
auto const capture_size { vk::Extent3D {
capture_extent.width, capture_extent.height, 1 } };
m_vk.tracy_capture_image = create_image_no_view(
capture_size, m_vk.swapchain_image_format, flags);
m_vk.tracy_capture_image_layout = vk::ImageLayout::eUndefined;
m_vk.tracy_capture_extent = capture_extent;
}
for (auto &frame : m_vk.frames) {
auto const same_extent { frame.frame_image_buffer.buffer
&& frame.frame_image_extent.width == capture_extent.width
&& frame.frame_image_extent.height == capture_extent.height };
if (!same_extent && frame.frame_image_buffer.buffer) {
destroy_buffer(frame.frame_image_buffer);
frame.frame_image_buffer = AllocatedBuffer {};
}
if (!same_extent) {
frame.frame_image_buffer = create_buffer(byte_count,
vk::BufferUsageFlagBits::eTransferDst,
VMA_MEMORY_USAGE_GPU_TO_CPU);
frame.frame_image_extent = capture_extent;
frame.frame_image_rgba.resize(byte_count);
}
frame.frame_image_ready = false;
frame.tracy_frame_ready = false;
}
}
auto VulkanRenderer::destroy_tracy_frame_buffers() -> void
{
for (auto &frame : m_vk.frames) {
if (frame.frame_image_buffer.buffer) {
destroy_buffer(frame.frame_image_buffer);
frame.frame_image_buffer = AllocatedBuffer {};
}
frame.frame_image_extent = vk::Extent2D {};
frame.frame_image_rgba.clear();
frame.frame_image_ready = false;
frame.tracy_frame_ready = false;
}
if (m_vk.tracy_capture_image.image) {
destroy_image(m_vk.tracy_capture_image);
m_vk.tracy_capture_image = AllocatedImage {};
}
m_vk.tracy_capture_image_layout = vk::ImageLayout::eUndefined;
m_vk.tracy_capture_extent = vk::Extent2D {};
}
auto VulkanRenderer::emit_tracy_frame_image(FrameData &frame) -> void
{
if (!frame.frame_image_ready) {
return;
}
auto const extent { frame.frame_image_extent };
if (extent.width == 0 || extent.height == 0) {
return;
}
if (extent.width % 4 != 0 || extent.height % 4 != 0) {
return;
}
if (extent.width > std::numeric_limits<std::uint16_t>::max()
|| extent.height > std::numeric_limits<std::uint16_t>::max()) {
return;
}
auto const byte_count { static_cast<size_t>(extent.width)
* static_cast<size_t>(extent.height) * 4 };
if (!frame.frame_image_buffer.buffer) {
return;
}
VmaAllocationInfo info {};
vmaGetAllocationInfo(
m_vk.allocator, frame.frame_image_buffer.allocation, &info);
void *mapped { info.pMappedData };
bool mapped_here { false };
if (!mapped) {
auto const map_result { vmaMapMemory(
m_vk.allocator, frame.frame_image_buffer.allocation, &mapped) };
if (map_result != VK_SUCCESS) {
return;
}
mapped_here = true;
}
auto *source { static_cast<std::uint8_t *>(mapped) };
auto &destination { frame.frame_image_rgba };
if (destination.size() != byte_count) {
destination.resize(byte_count);
}
for (size_t i = 0; i < byte_count; i += 4) {
destination[i] = source[i + 2];
destination[i + 1] = source[i + 1];
destination[i + 2] = source[i];
destination[i + 3] = source[i + 3];
}
if (!frame.tracy_frame_ready || !TracyIsConnected) {
frame.frame_image_ready = false;
frame.tracy_frame_ready = false;
if (mapped_here) {
vmaUnmapMemory(m_vk.allocator, frame.frame_image_buffer.allocation);
}
return;
}
auto const frame_offset { static_cast<std::uint8_t>(
m_vk.frames.size() - 1) };
FrameImage(destination.data(), static_cast<std::uint16_t>(extent.width),
static_cast<std::uint16_t>(extent.height), frame_offset, false);
if (mapped_here) {
vmaUnmapMemory(m_vk.allocator, frame.frame_image_buffer.allocation);
}
frame.frame_image_ready = false;
frame.tracy_frame_ready = false;
}
#endif
auto VulkanRenderer::create_image_no_view(vk::Extent3D size, vk::Format format,
vk::ImageUsageFlags flags, vk::SampleCountFlagBits samples, bool mipmapped)
-> AllocatedImage
{
AllocatedImage new_image {};
new_image.format = format;
new_image.extent = size;
auto img_ci { vkinit::image_create_info(format, flags, size, samples) };
if (mipmapped) {
img_ci.mipLevels = static_cast<uint32_t>(std::floor(
std::log2(std::max(size.width, size.height))))
+ 1;
}
VmaAllocationCreateInfo alloc_ci {};
alloc_ci.usage = VMA_MEMORY_USAGE_GPU_ONLY;
alloc_ci.requiredFlags
= VkMemoryPropertyFlags(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK(m_logger,
vmaCreateImage(m_vk.allocator,
reinterpret_cast<VkImageCreateInfo const *>(&img_ci), &alloc_ci,
reinterpret_cast<VkImage *>(&new_image.image),
&new_image.allocation, nullptr));
return new_image;
}
auto VulkanRenderer::create_image(vk::Extent3D size, vk::Format format,
vk::ImageUsageFlags flags, vk::SampleCountFlagBits samples, bool mipmapped)
-> AllocatedImage
{
AllocatedImage new_image { create_image_no_view(
size, format, flags, samples, mipmapped) };
vk::ImageAspectFlags aspect_flag { vk::ImageAspectFlagBits::eColor };
if (format == vk::Format::eD32Sfloat) {
aspect_flag = vk::ImageAspectFlagBits::eDepth;
}
auto const view_ci { vkinit::imageview_create_info(
format, new_image.image, aspect_flag) };
new_image.image_view = m_device.createImageView(view_ci);
return new_image;
}
auto VulkanRenderer::create_image(void const *data, vk::Extent3D size,
vk::Format format, vk::ImageUsageFlags flags, bool mipmapped)
-> AllocatedImage
{
size_t data_size {
static_cast<uint32_t>(size.depth) * static_cast<uint32_t>(size.width)
* static_cast<uint32_t>(size.height) * 4,
};
auto const upload_buffer {
create_buffer(data_size, vk::BufferUsageFlagBits::eTransferSrc,
VMA_MEMORY_USAGE_CPU_TO_GPU),
};
VmaAllocationInfo info {};
vmaGetAllocationInfo(m_vk.allocator, upload_buffer.allocation, &info);
void *mapped_data { reinterpret_cast<GPUSceneData *>(info.pMappedData) };
bool mapped_here { false };
if (!mapped_data) {
VkResult res = vmaMapMemory(
m_vk.allocator, upload_buffer.allocation, (void **)&mapped_data);
assert(res == VK_SUCCESS);
mapped_here = true;
}
memcpy(mapped_data, data, data_size);
auto const new_image {
create_image(size, format,
flags | vk::ImageUsageFlagBits::eTransferDst
| vk::ImageUsageFlagBits::eTransferSrc,
vk::SampleCountFlagBits::e1, mipmapped),
};
immediate_submit([&](vk::CommandBuffer cmd) {
vkutil::transition_image(cmd, new_image.image,
vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal);
vk::BufferImageCopy copy_region {};
copy_region.imageSubresource.aspectMask
= vk::ImageAspectFlagBits::eColor;
copy_region.imageSubresource.mipLevel = 0;
copy_region.imageSubresource.baseArrayLayer = 0;
copy_region.imageSubresource.layerCount = 1;
copy_region.imageExtent = size;
cmd.copyBufferToImage(upload_buffer.buffer, new_image.image,
vk::ImageLayout::eTransferDstOptimal, copy_region);
vkutil::transition_image(cmd, new_image.image,
vk::ImageLayout::eTransferDstOptimal,
vk::ImageLayout::eShaderReadOnlyOptimal);
});
if (mapped_here) {
vmaUnmapMemory(m_vk.allocator, upload_buffer.allocation);
}
destroy_buffer(upload_buffer);
return new_image;
}
auto VulkanRenderer::create_image(CPUTexture const &texture,
vk::ImageUsageFlags flags, bool mipmapped) -> AllocatedImage
{
vk::Extent3D size { texture.width, texture.height, 1 };
return create_image(
texture.pixels.data(), size, texture.format, flags, mipmapped);
}
auto VulkanRenderer::create_cubemap(std::span<uint8_t const> pixels,
uint32_t face_size, vk::Format format, vk::ImageUsageFlags flags)
-> AllocatedImage
{
size_t const face_bytes = static_cast<size_t>(face_size) * face_size * 4;
if (pixels.size() < face_bytes * 6) {
m_logger.err("Cubemap data size is invalid");
return {};
}
auto const upload_buffer {
create_buffer(pixels.size(), vk::BufferUsageFlagBits::eTransferSrc,
VMA_MEMORY_USAGE_CPU_TO_GPU),
};
VmaAllocationInfo info {};
vmaGetAllocationInfo(m_vk.allocator, upload_buffer.allocation, &info);
void *mapped_data { reinterpret_cast<GPUSceneData *>(info.pMappedData) };
bool mapped_here { false };
if (!mapped_data) {
VkResult res = vmaMapMemory(
m_vk.allocator, upload_buffer.allocation, (void **)&mapped_data);
assert(res == VK_SUCCESS);
mapped_here = true;
}
memcpy(mapped_data, pixels.data(), pixels.size());
AllocatedImage new_image {};
new_image.format = format;
new_image.extent = vk::Extent3D { face_size, face_size, 1 };
auto img_ci { vkinit::image_create_info(format,
flags | vk::ImageUsageFlagBits::eTransferDst, new_image.extent,
vk::SampleCountFlagBits::e1) };
img_ci.arrayLayers = 6;
img_ci.flags = vk::ImageCreateFlagBits::eCubeCompatible;
VmaAllocationCreateInfo alloc_ci {};
alloc_ci.usage = VMA_MEMORY_USAGE_GPU_ONLY;
alloc_ci.requiredFlags
= VkMemoryPropertyFlags(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK(m_logger,
vmaCreateImage(m_vk.allocator,
reinterpret_cast<VkImageCreateInfo const *>(&img_ci), &alloc_ci,
reinterpret_cast<VkImage *>(&new_image.image),
&new_image.allocation, nullptr));
vk::ImageViewCreateInfo view_ci {};
view_ci.viewType = vk::ImageViewType::eCube;
view_ci.image = new_image.image;
view_ci.format = format;
view_ci.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
view_ci.subresourceRange.baseMipLevel = 0;
view_ci.subresourceRange.levelCount = 1;
view_ci.subresourceRange.baseArrayLayer = 0;
view_ci.subresourceRange.layerCount = 6;
new_image.image_view = m_device.createImageView(view_ci);
immediate_submit([&](vk::CommandBuffer cmd) {
vk::ImageMemoryBarrier to_transfer {};
to_transfer.srcAccessMask = vk::AccessFlagBits::eNone;
to_transfer.dstAccessMask = vk::AccessFlagBits::eTransferWrite;
to_transfer.oldLayout = vk::ImageLayout::eUndefined;
to_transfer.newLayout = vk::ImageLayout::eTransferDstOptimal;
to_transfer.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
to_transfer.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
to_transfer.image = new_image.image;
to_transfer.subresourceRange.aspectMask
= vk::ImageAspectFlagBits::eColor;
to_transfer.subresourceRange.baseMipLevel = 0;
to_transfer.subresourceRange.levelCount = 1;
to_transfer.subresourceRange.baseArrayLayer = 0;
to_transfer.subresourceRange.layerCount = 6;
cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTopOfPipe,
vk::PipelineStageFlagBits::eTransfer, {}, {}, {}, to_transfer);
std::array<vk::BufferImageCopy, 6> regions {};
for (uint32_t layer = 0; layer < 6; ++layer) {
vk::BufferImageCopy region {};
region.bufferOffset = face_bytes * layer;
region.imageSubresource.aspectMask
= vk::ImageAspectFlagBits::eColor;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = layer;
region.imageSubresource.layerCount = 1;
region.imageExtent = new_image.extent;
regions[layer] = region;
}
cmd.copyBufferToImage(upload_buffer.buffer, new_image.image,
vk::ImageLayout::eTransferDstOptimal, regions);
vk::ImageMemoryBarrier to_read {};
to_read.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
to_read.dstAccessMask = vk::AccessFlagBits::eShaderRead;
to_read.oldLayout = vk::ImageLayout::eTransferDstOptimal;
to_read.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
to_read.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
to_read.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
to_read.image = new_image.image;
to_read.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor;
to_read.subresourceRange.baseMipLevel = 0;
to_read.subresourceRange.levelCount = 1;
to_read.subresourceRange.baseArrayLayer = 0;
to_read.subresourceRange.layerCount = 6;
cmd.pipelineBarrier(vk::PipelineStageFlagBits::eTransfer,
vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {}, to_read);
});
if (mapped_here) {
vmaUnmapMemory(m_vk.allocator, upload_buffer.allocation);
}
destroy_buffer(upload_buffer);
return new_image;
}
auto VulkanRenderer::destroy_image(AllocatedImage const &img) -> void
{
if (img.image_view) {
m_device.destroyImageView(img.image_view);
}
vmaDestroyImage(
m_vk.allocator, static_cast<VkImage>(img.image), img.allocation);
}
auto VulkanRenderer::create_buffer(size_t alloc_size,
vk::BufferUsageFlags usage, VmaMemoryUsage memory_usage) -> AllocatedBuffer
{
vk::BufferCreateInfo buffer_ci {};
buffer_ci.size = alloc_size;
buffer_ci.usage = usage;
buffer_ci.sharingMode = vk::SharingMode::eExclusive;
VmaAllocationCreateInfo alloc_ci {};
alloc_ci.usage = memory_usage;
alloc_ci.flags = 0;
if (memory_usage == VMA_MEMORY_USAGE_CPU_ONLY
|| memory_usage == VMA_MEMORY_USAGE_CPU_TO_GPU) {
alloc_ci.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT
| VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
}
AllocatedBuffer buffer;
VK_CHECK(m_logger,
vmaCreateBuffer(m_vk.allocator,
reinterpret_cast<VkBufferCreateInfo const *>(&buffer_ci), &alloc_ci,
reinterpret_cast<VkBuffer *>(&buffer.buffer), &buffer.allocation,
&buffer.info));
return buffer;
}
auto VulkanRenderer::destroy_buffer(AllocatedBuffer const &buffer) -> void
{
vmaDestroyBuffer(m_vk.allocator, buffer.buffer, buffer.allocation);
}
auto VulkanRenderer::upload_mesh(
std::span<uint32_t> indices, std::span<Vertex> vertices) -> GPUMeshBuffers
{
auto const vertex_buffer_size { vertices.size() * sizeof(Vertex) };
auto const index_buffer_size { indices.size() * sizeof(uint32_t) };
GPUMeshBuffers new_surface;
new_surface.vertex_buffer = create_buffer(vertex_buffer_size,
vk::BufferUsageFlagBits::eVertexBuffer
| vk::BufferUsageFlagBits::eTransferDst
| vk::BufferUsageFlagBits::eShaderDeviceAddress,
VMA_MEMORY_USAGE_GPU_ONLY);
vk::BufferDeviceAddressInfo device_address_info {};
device_address_info.buffer = new_surface.vertex_buffer.buffer;
new_surface.vertex_buffer_address
= m_device.getBufferAddress(device_address_info);
new_surface.index_buffer = create_buffer(index_buffer_size,
vk::BufferUsageFlagBits::eIndexBuffer
| vk::BufferUsageFlagBits::eTransferDst
| vk::BufferUsageFlagBits::eShaderDeviceAddress,
VMA_MEMORY_USAGE_GPU_ONLY);
auto staging { create_buffer(vertex_buffer_size + index_buffer_size,
vk::BufferUsageFlagBits::eTransferSrc, VMA_MEMORY_USAGE_CPU_ONLY) };
VmaAllocationInfo info {};
vmaGetAllocationInfo(m_vk.allocator, staging.allocation, &info);
void *data = info.pMappedData;
bool mapped_here { false };
if (!data) {
VkResult res { vmaMapMemory(
m_vk.allocator, staging.allocation, &data) };
assert(res == VK_SUCCESS);
mapped_here = true;
}
memcpy(data, vertices.data(), vertex_buffer_size);
memcpy(reinterpret_cast<void *>(
reinterpret_cast<size_t>(data) + vertex_buffer_size),
indices.data(), index_buffer_size);
immediate_submit([&](vk::CommandBuffer cmd) {
vk::BufferCopy vertex_copy {};
vertex_copy.dstOffset = 0;
vertex_copy.srcOffset = 0;
vertex_copy.size = vertex_buffer_size;
cmd.copyBuffer(
staging.buffer, new_surface.vertex_buffer.buffer, vertex_copy);
vk::BufferCopy index_copy {};
index_copy.dstOffset = 0;
index_copy.srcOffset = vertex_buffer_size;
index_copy.size = index_buffer_size;
cmd.copyBuffer(
staging.buffer, new_surface.index_buffer.buffer, index_copy);
});
if (mapped_here) {
vmaUnmapMemory(m_vk.allocator, staging.allocation);
}
destroy_buffer(staging);
return new_surface;
}
} // namespace Lunar