#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct BindGroupLayout(Handle);
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
-pub struct BindGroup(Handle);
-
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct Pipeline(Handle);
pub max_lod: f32,
}
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum BindingType {
- Sampler,
- Texture,
- UniformBuffer,
- StorageBuffer,
- DynamicUniformBuffer,
- DynamicStorageBuffer,
-}
-
-pub struct BindGroupLayoutEntryDesc {
- pub slot: u32,
- pub stages: ShaderStageFlags,
- pub binding_type: BindingType,
- pub count: u32,
-}
-
-pub struct BindGroupLayoutDesc<'a> {
- pub entries: &'a [BindGroupLayoutEntryDesc],
-}
-
pub struct GraphicsPipelineLayout<'a> {
pub color_attachment_formats: &'a [TextureFormat],
pub depth_attachment_format: Option<TextureFormat>,
pub stencil_attachment: Option<RenderingAttachment>,
}
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum BindingType {
+ Sampler,
+ Texture,
+ UniformBuffer,
+ StorageBuffer,
+ DynamicUniformBuffer,
+ DynamicStorageBuffer,
+}
+
+pub struct BindGroupLayoutEntryDesc {
+ pub slot: u32,
+ pub stages: ShaderStageFlags,
+ pub binding_type: BindingType,
+ pub count: u32,
+}
+
+pub struct BindGroupLayoutDesc<'a> {
+ pub entries: &'a [BindGroupLayoutEntryDesc],
+}
+
+pub struct Bind<'a> {
+ pub binding: u32,
+ pub array_element: u32,
+ pub typed: TypedBind<'a>,
+}
+
+pub enum TypedBind<'a> {
+ Sampler(&'a [Sampler]),
+ Texture(&'a [Texture]),
+ Buffer(&'a [Buffer]),
+}
+
thread_token_def!(ThreadToken, GpuConcurrent, 8);
pub struct FrameToken<'device> {
phantom: PhantomData<&'device dyn Device>,
}
-pub struct CommandBufferToken<'frame, 'thread> {
- frame_token: &'frame FrameToken<'frame>,
- thread_token: &'thread mut ThreadToken,
+pub struct CommandBufferToken {
index: usize,
raw: u64,
- phantom: PhantomUnsend,
+ phantom_unsend: PhantomUnsend,
}
pub trait Device {
);
fn destroy_pipeline(&self, frame_token: &FrameToken, pipeline: Pipeline);
+ unsafe fn map_buffer(&self, buffer: Buffer) -> *mut u8;
+ unsafe fn unmap_buffer(&self, buffer: Buffer);
+
fn acquire_swapchain(
&self,
frame_token: &FrameToken,
) -> (u32, u32, Texture);
fn destroy_window(&self, window: Window);
- fn create_command_buffer<'frame>(
- &'frame self,
- frame_token: &'frame FrameToken,
- thread_token: &'frame mut ThreadToken,
+ fn create_command_buffer(
+ &self,
+ frame_token: &FrameToken,
+ thread_token: &mut ThreadToken,
) -> CommandBufferToken;
- fn cmd_bind_pipeline(&self, command_buffer_token: &mut CommandBufferToken, pipeline: Pipeline);
+ fn cmd_set_bind_group(
+ &self,
+ frame_token: &FrameToken,
+ thread_token: &mut ThreadToken,
+ command_buffer_token: &mut CommandBufferToken,
+ pipeline: Pipeline,
+ layout: BindGroupLayout,
+ bind_group_index: u32,
+ bindings: &[Bind],
+ );
+
+ fn cmd_set_pipeline(&self, command_buffer_token: &mut CommandBufferToken, pipeline: Pipeline);
+
fn cmd_begin_rendering(
&self,
+ frame_token: &FrameToken,
+ thread_token: &mut ThreadToken,
command_buffer_token: &mut CommandBufferToken,
desc: &RenderingDesc,
);
+
fn cmd_end_rendering(&self, command_buffer_token: &mut CommandBufferToken);
+
fn cmd_set_viewports(
&self,
command_buffer_token: &mut CommandBufferToken,
viewports: &[Viewport],
);
+
fn cmd_set_scissors(&self, command_buffer_token: &mut CommandBufferToken, scissors: &[Scissor]);
+
fn cmd_draw(
&self,
command_buffer_token: &mut CommandBufferToken,
first_instance: u32,
);
- fn submit(&self, command_buffer_token: CommandBufferToken);
+ fn submit(
+ &self,
+ frame_token: &FrameToken,
+ thread_token: &mut ThreadToken,
+ command_buffer_token: CommandBufferToken,
+ );
fn begin_frame(&self) -> FrameToken;
+
fn end_frame<'device>(&'device self, frame_token: FrameToken<'device>);
}
use narcissus_app::{App, Window};
use narcissus_core::{
- cstr, default, manual_arc, manual_arc::ManualArc, Mutex, PhantomUnsend, Pool,
+ cstr, default, manual_arc, manual_arc::ManualArc, HybridArena, Mutex, PhantomUnsend, Pool,
};
-use vk::DeviceFunctions;
use vulkan_sys as vk;
use crate::{
- BindGroupLayout, BindGroupLayoutDesc, BindingType, Buffer, BufferDesc, BufferUsageFlags,
+ Bind, BindGroupLayout, BindGroupLayoutDesc, BindingType, Buffer, BufferDesc, BufferUsageFlags,
ClearValue, CommandBufferToken, ComputePipelineDesc, Device, FrameToken, GpuConcurrent,
GraphicsPipelineDesc, LoadOp, MemoryLocation, Pipeline, Sampler, SamplerAddressMode,
SamplerCompareOp, SamplerDesc, SamplerFilter, ShaderStageFlags, Texture, TextureDesc,
- TextureDimension, TextureFormat, TextureUsageFlags, TextureViewDesc, ThreadToken,
+ TextureDimension, TextureFormat, TextureUsageFlags, TextureViewDesc, ThreadToken, TypedBind,
};
const NUM_FRAMES: usize = 2;
Swapchain(VulkanTextureSwapchain),
}
+impl VulkanTextureHolder {
+ fn image_view(&self) -> vk::ImageView {
+ match self {
+ VulkanTextureHolder::Unique(x) => x.view,
+ VulkanTextureHolder::Shared(x) => x.view,
+ VulkanTextureHolder::Swapchain(x) => x.view,
+ }
+ }
+}
+
struct VulkanSampler(vk::Sampler);
struct VulkanBindGroupLayout(vk::DescriptorSetLayout);
struct VulkanPipeline {
pipeline: vk::Pipeline,
+ pipeline_layout: vk::PipelineLayout,
pipeline_bind_point: vk::PipelineBindPoint,
}
struct VulkanMemory {
memory: vk::DeviceMemory,
offset: u64,
- _size: u64,
+ size: u64,
}
+#[derive(Default)]
struct VulkanPools {
textures: Pool<VulkanTextureHolder>,
buffers: Pool<VulkanBuffer>,
universal_queue_fence: AtomicU64,
command_buffer_pools: GpuConcurrent<VulkanCommandBufferPool>,
+ descriptor_pool_pools: GpuConcurrent<vk::DescriptorPool>,
present_swapchains: Mutex<HashMap<Window, VulkanPresentInfo>>,
destroyed_image_views: Mutex<VecDeque<vk::ImageView>>,
destroyed_samplers: Mutex<VecDeque<vk::Sampler>>,
destroyed_descriptor_set_layouts: Mutex<VecDeque<vk::DescriptorSetLayout>>,
+ destroyed_pipeline_layouts: Mutex<VecDeque<vk::PipelineLayout>>,
destroyed_pipelines: Mutex<VecDeque<vk::Pipeline>>,
recycled_semaphores: Mutex<VecDeque<vk::Semaphore>>,
+ recycled_descriptor_pools: Mutex<VecDeque<vk::DescriptorPool>>,
+}
+
+impl VulkanFrame {
+ fn command_buffer_mut<'token>(
+ &self,
+ thread_token: &'token mut ThreadToken,
+ command_buffer_token: &CommandBufferToken,
+ ) -> &'token mut VulkanCommandBuffer {
+ let command_buffer_pool = self.command_buffer_pools.get_mut(thread_token);
+ &mut command_buffer_pool.command_buffers[command_buffer_token.index]
+ }
+
+ fn recycle_semaphore(&self, semaphore: vk::Semaphore) {
+ self.recycled_semaphores.lock().push_back(semaphore);
+ }
+
+ fn recycle_descriptor_pool(&self, descriptor_pool: vk::DescriptorPool) {
+ self.recycled_descriptor_pools
+ .lock()
+ .push_back(descriptor_pool)
+ }
}
type SwapchainDestroyQueue = DelayQueue<(
pools: Mutex<VulkanPools>,
semaphores: Mutex<VecDeque<vk::Semaphore>>,
+ descriptor_pools: Mutex<VecDeque<vk::DescriptorPool>>,
_global_fn: vk::GlobalFunctions,
instance_fn: vk::InstanceFunctions,
};
let frames = Box::new(std::array::from_fn(|_| {
- let cmd_buffer_pools = GpuConcurrent::new(|| {
+ let command_buffer_pools = GpuConcurrent::new(|| {
let pool = {
let create_info = vk::CommandPoolCreateInfo {
flags: vk::CommandPoolCreateFlags::TRANSIENT,
next_free_index: 0,
}
});
+
+ let descriptor_pool_pools = GpuConcurrent::new(|| vk::DescriptorPool::null());
+
UnsafeCell::new(VulkanFrame {
- command_buffer_pools: cmd_buffer_pools,
+ command_buffer_pools,
+ descriptor_pool_pools,
universal_queue_fence: AtomicU64::new(universal_queue_fence),
present_swapchains: Default::default(),
destroyed_allocations: Default::default(),
destroyed_image_views: Default::default(),
destroyed_samplers: Default::default(),
destroyed_descriptor_set_layouts: Default::default(),
+ destroyed_pipeline_layouts: Default::default(),
destroyed_pipelines: Default::default(),
recycled_semaphores: Default::default(),
+ recycled_descriptor_pools: Default::default(),
})
}));
swapchains: Mutex::new(HashMap::new()),
destroyed_swapchains: Mutex::new(DelayQueue::new(8)),
- pools: Mutex::new(VulkanPools {
- textures: Pool::new(),
- buffers: Pool::new(),
- samplers: Pool::new(),
- bind_group_layouts: Pool::new(),
- pipelines: Pool::new(),
- }),
+ pools: Default::default(),
- semaphores: Mutex::new(VecDeque::new()),
+ semaphores: Default::default(),
+ descriptor_pools: Default::default(),
_global_fn: global_fn,
instance_fn,
unsafe { &*self.frames[frame_token.frame_index % NUM_FRAMES].get() }
}
- fn frame_mut<'token>(
- &'token self,
- frame_token: &'token mut FrameToken,
- ) -> &'token mut VulkanFrame {
+ fn frame_mut<'token>(&self, frame_token: &'token mut FrameToken) -> &'token mut VulkanFrame {
frame_token.check_device(self);
frame_token.check_frame_counter(self.frame_counter.load());
// SAFETY: mutable reference is bound to the frame token exposed by the API. only one frame token can be valid at a time.
unsafe { &mut *self.frames[frame_token.frame_index % NUM_FRAMES].get() }
}
- fn command_buffer_mut<'token>(
- &'token self,
- command_buffer_token: &'token mut CommandBufferToken,
- ) -> &'token mut VulkanCommandBuffer {
- let frame = self.frame(command_buffer_token.frame_token);
- let command_buffer_pool = frame
- .command_buffer_pools
- .get_mut(command_buffer_token.thread_token);
- &mut command_buffer_pool.command_buffers[command_buffer_token.index]
- }
-
fn find_memory_type_index(&self, filter: u32, flags: vk::MemoryPropertyFlags) -> u32 {
(0..self.physical_device_memory_properties.memory_type_count)
.map(|memory_type_index| {
VulkanMemory {
memory,
offset: 0,
- _size: desc.requirements.size,
+ size: desc.requirements.size,
}
}
})
}
+ fn request_descriptor_pool(&self) -> vk::DescriptorPool {
+ if let Some(descriptor_pool) = self.descriptor_pools.lock().pop_front() {
+ descriptor_pool
+ } else {
+ let descriptor_count = 500;
+ let pool_sizes = &[
+ vk::DescriptorPoolSize {
+ descriptor_type: vk::DescriptorType::Sampler,
+ descriptor_count,
+ },
+ vk::DescriptorPoolSize {
+ descriptor_type: vk::DescriptorType::UniformBuffer,
+ descriptor_count,
+ },
+ vk::DescriptorPoolSize {
+ descriptor_type: vk::DescriptorType::UniformBufferDynamic,
+ descriptor_count,
+ },
+ vk::DescriptorPoolSize {
+ descriptor_type: vk::DescriptorType::StorageBuffer,
+ descriptor_count,
+ },
+ vk::DescriptorPoolSize {
+ descriptor_type: vk::DescriptorType::StorageBufferDynamic,
+ descriptor_count,
+ },
+ vk::DescriptorPoolSize {
+ descriptor_type: vk::DescriptorType::SampledImage,
+ descriptor_count: 500,
+ },
+ ];
+
+ let mut descriptor_pool = vk::DescriptorPool::null();
+ let create_info = vk::DescriptorPoolCreateInfo {
+ max_sets: 500,
+ pool_sizes: pool_sizes.into(),
+ ..default()
+ };
+ vk_check!(self.device_fn.create_descriptor_pool(
+ self.device,
+ &create_info,
+ None,
+ &mut descriptor_pool
+ ));
+ descriptor_pool
+ }
+ }
+
fn request_semaphore(&self) -> vk::Semaphore {
if let Some(semaphore) = self.semaphores.lock().pop_front() {
semaphore
}
}
- fn recycle_semaphore(&self, frame: &VulkanFrame, semaphore: vk::Semaphore) {
- frame.recycled_semaphores.lock().push_back(semaphore)
- }
-
fn request_transient_semaphore(&self, frame: &VulkanFrame) -> vk::Semaphore {
let semaphore = self.request_semaphore();
- self.recycle_semaphore(frame, semaphore);
+ frame.recycle_semaphore(semaphore);
semaphore
}
- fn destroy_deferred(device_fn: &DeviceFunctions, device: vk::Device, frame: &mut VulkanFrame) {
+ fn destroy_deferred(
+ device_fn: &vk::DeviceFunctions,
+ device: vk::Device,
+ frame: &mut VulkanFrame,
+ ) {
+ for pipeline_layout in frame.destroyed_pipeline_layouts.get_mut().drain(..) {
+ unsafe { device_fn.destroy_pipeline_layout(device, pipeline_layout, None) }
+ }
for pipeline in frame.destroyed_pipelines.get_mut().drain(..) {
unsafe { device_fn.destroy_pipeline(device, pipeline, None) }
}
}
fn create_graphics_pipeline(&self, desc: &GraphicsPipelineDesc) -> Pipeline {
+ let arena = HybridArena::<1024>::new();
+ let set_layouts_iter = desc.bind_group_layouts.iter().map(|bind_group_layout| {
+ self.pools
+ .lock()
+ .bind_group_layouts
+ .get(bind_group_layout.0)
+ .unwrap()
+ .0
+ });
+ let set_layouts = arena.alloc_slice_fill_iter(set_layouts_iter);
+
let layout = {
let create_info = vk::PipelineLayoutCreateInfo {
- //set_layouts: set_layouts.as_slice().into(),
+ set_layouts: set_layouts.into(),
..default()
};
let mut pipeline_layout = vk::PipelineLayout::null();
self.device_fn
.destroy_shader_module(self.device, fragment_module, None)
};
- unsafe {
- self.device_fn
- .destroy_pipeline_layout(self.device, layout, None)
- };
let handle = self.pools.lock().pipelines.insert(VulkanPipeline {
pipeline: pipelines[0],
+ pipeline_layout: layout,
pipeline_bind_point: vk::PipelineBindPoint::Graphics,
});
fn destroy_pipeline(&self, frame_token: &FrameToken, pipeline: Pipeline) {
if let Some(pipeline) = self.pools.lock().pipelines.remove(pipeline.0) {
- self.frame(frame_token)
+ let frame = self.frame(frame_token);
+ frame
+ .destroyed_pipeline_layouts
+ .lock()
+ .push_back(pipeline.pipeline_layout);
+ frame
.destroyed_pipelines
.lock()
- .push_back(pipeline.pipeline)
+ .push_back(pipeline.pipeline);
}
}
}
}
- fn create_command_buffer<'frame>(
- &'frame self,
- frame_token: &'frame FrameToken,
- thread_token: &'frame mut ThreadToken,
+ fn create_command_buffer(
+ &self,
+ frame_token: &FrameToken,
+ thread_token: &mut ThreadToken,
) -> CommandBufferToken {
let command_buffer_pool = self
.frame(frame_token)
));
CommandBufferToken {
- frame_token,
- thread_token,
index,
raw: command_buffer.as_raw(),
- phantom: PhantomUnsend {},
+ phantom_unsend: PhantomUnsend {},
}
}
- fn cmd_bind_pipeline(&self, command_buffer_token: &mut CommandBufferToken, pipeline: Pipeline) {
+ fn cmd_set_bind_group(
+ &self,
+ frame_token: &FrameToken,
+ thread_token: &mut ThreadToken,
+ command_buffer_token: &mut CommandBufferToken,
+ pipeline: Pipeline,
+ layout: BindGroupLayout,
+ bind_group_index: u32,
+ bindings: &[Bind],
+ ) {
+ let arena = HybridArena::<4096>::new();
+
+ let frame = self.frame(frame_token);
+ let pools = self.pools.lock();
+
+ let descriptor_set_layout = pools.bind_group_layouts.get(layout.0).unwrap().0;
+
+ let mut descriptor_pool = *frame.descriptor_pool_pools.get(thread_token);
+ let mut allocated_pool = false;
+ let descriptor_set = loop {
+ if descriptor_pool.is_null() {
+ // Need to fetch a new descriptor pool
+ descriptor_pool = self.request_descriptor_pool();
+ frame.recycle_descriptor_pool(descriptor_pool);
+ *frame.descriptor_pool_pools.get_mut(thread_token) = descriptor_pool;
+ allocated_pool = true;
+ }
+ let allocate_info = vk::DescriptorSetAllocateInfo {
+ descriptor_pool,
+ set_layouts: std::slice::from_ref(&descriptor_set_layout).into(),
+ ..default()
+ };
+ let mut descriptor_set = vk::DescriptorSet::null();
+ match unsafe {
+ self.device_fn.allocate_descriptor_sets(
+ self.device,
+ &allocate_info,
+ &mut descriptor_set,
+ )
+ } {
+ vk::Result::Success => break descriptor_set,
+ _ => {
+ // If we fail to allocate after just creating a new descriptor set, then we'll
+ // never be able to allocate one. :'(
+ if allocated_pool {
+ panic!("failed to allocate descriptor set")
+ }
+ }
+ }
+ };
+
+ let write_descriptors_iter = bindings.iter().map(|bind| match bind.typed {
+ TypedBind::Sampler(samplers) => {
+ let sampler_infos_iter = samplers.iter().map(|sampler| {
+ let sampler = pools.samplers.get(sampler.0).unwrap();
+ vk::DescriptorImageInfo {
+ image_layout: vk::ImageLayout::Undefined,
+ image_view: vk::ImageView::null(),
+ sampler: sampler.0,
+ }
+ });
+ let image_infos = arena.alloc_slice_fill_iter(sampler_infos_iter);
+ vk::WriteDescriptorSet {
+ dst_set: descriptor_set,
+ dst_binding: bind.binding,
+ dst_array_element: bind.array_element,
+ descriptor_count: image_infos.len() as u32,
+ descriptor_type: vk::DescriptorType::Sampler,
+ image_info: image_infos.as_ptr(),
+ ..default()
+ }
+ }
+ TypedBind::Texture(textures) => {
+ let image_infos_iter = textures.iter().map(|texture| {
+ let texture = pools.textures.get(texture.0).unwrap();
+ vk::DescriptorImageInfo {
+ image_layout: vk::ImageLayout::ColorAttachmentOptimal,
+ image_view: texture.image_view(),
+ sampler: vk::Sampler::null(),
+ }
+ });
+ let image_infos = arena.alloc_slice_fill_iter(image_infos_iter);
+ vk::WriteDescriptorSet {
+ dst_set: descriptor_set,
+ dst_binding: bind.binding,
+ dst_array_element: bind.array_element,
+ descriptor_count: image_infos.len() as u32,
+ descriptor_type: vk::DescriptorType::SampledImage,
+ image_info: image_infos.as_ptr(),
+ ..default()
+ }
+ }
+ TypedBind::Buffer(buffers) => {
+ let buffer_infos_iter = buffers.iter().map(|buffer| {
+ let buffer = pools.buffers.get(buffer.0).unwrap();
+ vk::DescriptorBufferInfo {
+ buffer: buffer.buffer,
+ offset: 0,
+ range: !0,
+ }
+ });
+ let buffer_infos = arena.alloc_slice_fill_iter(buffer_infos_iter);
+ vk::WriteDescriptorSet {
+ dst_set: descriptor_set,
+ dst_binding: bind.binding,
+ dst_array_element: bind.array_element,
+ descriptor_count: buffer_infos.len() as u32,
+ descriptor_type: vk::DescriptorType::UniformBuffer,
+ buffer_info: buffer_infos.as_ptr(),
+ ..default()
+ }
+ }
+ });
+ let write_descriptors = arena.alloc_slice_fill_iter(write_descriptors_iter);
+
+ unsafe {
+ self.device_fn
+ .update_descriptor_sets(self.device, write_descriptors, &[])
+ };
+
+ let pipeline = pools.pipelines.get(pipeline.0).unwrap();
+ let command_buffer = vk::CommandBuffer::from_raw(command_buffer_token.raw);
+ unsafe {
+ self.device_fn.cmd_bind_descriptor_sets(
+ command_buffer,
+ pipeline.pipeline_bind_point,
+ pipeline.pipeline_layout,
+ bind_group_index,
+ &[descriptor_set],
+ &[],
+ )
+ }
+ }
+
+ fn cmd_set_pipeline(&self, command_buffer_token: &mut CommandBufferToken, pipeline: Pipeline) {
let command_buffer = vk::CommandBuffer::from_raw(command_buffer_token.raw);
let VulkanPipeline {
pipeline,
+ pipeline_layout: _,
pipeline_bind_point,
} = *self.pools.lock().pipelines.get(pipeline.0).unwrap();
unsafe {
fn cmd_begin_rendering(
&self,
+ frame_token: &FrameToken,
+ thread_token: &mut ThreadToken,
command_buffer_token: &mut CommandBufferToken,
desc: &crate::RenderingDesc,
) {
- let command_buffer = self.command_buffer_mut(command_buffer_token);
+ let frame = self.frame(frame_token);
+ let command_buffer = frame.command_buffer_mut(thread_token, command_buffer_token);
let pools = self.pools.lock();
let color_attachments = desc
}
}
- fn submit(&self, mut command_buffer_token: CommandBufferToken) {
+ fn submit(
+ &self,
+ frame_token: &FrameToken,
+ thread_token: &mut ThreadToken,
+ mut command_buffer_token: CommandBufferToken,
+ ) {
let fence = self.universal_queue_fence.fetch_add(1, Ordering::SeqCst) + 1;
- let frame = self.frame(command_buffer_token.frame_token);
+ let frame = self.frame(frame_token);
frame.universal_queue_fence.store(fence, Ordering::Relaxed);
- let command_buffer = self.command_buffer_mut(&mut command_buffer_token);
+ let command_buffer = frame.command_buffer_mut(thread_token, &mut command_buffer_token);
for &(image, _) in command_buffer.swapchains_touched.values() {
// transition swapchain image from attachment optimal to present src
vk_check!(device_fn.wait_semaphores(device, &wait_info, !0));
}
+ for pool in frame.descriptor_pool_pools.slots_mut() {
+ *pool = vk::DescriptorPool::null()
+ }
+
for pool in frame.command_buffer_pools.slots_mut() {
if pool.next_free_index == 0 {
continue;
.lock()
.extend(frame.recycled_semaphores.get_mut().drain(..));
+ for descriptor_pool in frame.recycled_descriptor_pools.get_mut() {
+ vk_check!(device_fn.reset_descriptor_pool(
+ device,
+ *descriptor_pool,
+ vk::DescriptorPoolResetFlags::default()
+ ))
+ }
+
+ self.descriptor_pools
+ .lock()
+ .extend(frame.recycled_descriptor_pools.get_mut().drain(..));
+
Self::destroy_deferred(device_fn, device, frame);
self.destroyed_swapchains
self.frame_counter.release(frame_token);
}
+
+ unsafe fn map_buffer(&self, buffer: Buffer) -> *mut u8 {
+ let mut ptr = std::ptr::null_mut();
+ if let Some(buffer) = self.pools.lock().buffers.get(buffer.0) {
+ vk_check!(self.device_fn.map_memory(
+ self.device,
+ buffer.memory.memory,
+ buffer.memory.offset,
+ buffer.memory.size,
+ vk::MemoryMapFlags::default(),
+ &mut ptr
+ ))
+ }
+ std::mem::transmute::<*mut c_void, *mut u8>(ptr)
+ }
+
+ unsafe fn unmap_buffer(&self, buffer: Buffer) {
+ if let Some(buffer) = self.pools.lock().buffers.get(buffer.0) {
+ self.device_fn
+ .unmap_memory(self.device, buffer.memory.memory)
+ }
+ }
}
impl<'app> Drop for VulkanDevice<'app> {
unsafe { device_fn.destroy_semaphore(device, *semaphore, None) }
}
+ for descriptor_pool in frame.recycled_descriptor_pools.get_mut() {
+ unsafe { device_fn.destroy_descriptor_pool(device, *descriptor_pool, None) }
+ }
+
Self::destroy_deferred(device_fn, device, frame);
for pool in frame.command_buffer_pools.slots_mut() {
}
}
- let mut image_views = Vec::new();
- let mut images = Vec::new();
- for texture in self.pools.get_mut().textures.values() {
- match texture {
- VulkanTextureHolder::Unique(texture) => {
- image_views.push(texture.view);
- images.push(texture.texture.image)
- }
- VulkanTextureHolder::Shared(texture) => {
- image_views.push(texture.view);
- }
- VulkanTextureHolder::Swapchain(texture) => {
- image_views.push(texture.view);
+ let VulkanPools {
+ textures,
+ buffers,
+ samplers,
+ bind_group_layouts,
+ pipelines,
+ } = self.pools.get_mut();
+
+ for buffer in buffers.values() {
+ unsafe { device_fn.destroy_buffer(device, buffer.buffer, None) }
+ unsafe { device_fn.free_memory(device, buffer.memory.memory, None) }
+ }
+
+ {
+ let mut image_views = Vec::new();
+ let mut images = Vec::new();
+ for texture in textures.values() {
+ match texture {
+ VulkanTextureHolder::Unique(texture) => {
+ image_views.push(texture.view);
+ images.push(texture.texture.image)
+ }
+ VulkanTextureHolder::Shared(texture) => {
+ image_views.push(texture.view);
+ }
+ VulkanTextureHolder::Swapchain(texture) => {
+ image_views.push(texture.view);
+ }
}
}
+
+ for image_view in image_views {
+ unsafe { device_fn.destroy_image_view(device, image_view, None) }
+ }
+
+ for image in images {
+ unsafe { device_fn.destroy_image(device, image, None) }
+ }
}
- for image_view in image_views {
- unsafe { device_fn.destroy_image_view(device, image_view, None) }
+ for sampler in samplers.values() {
+ unsafe { device_fn.destroy_sampler(device, sampler.0, None) }
}
- for image in images {
- unsafe { device_fn.destroy_image(device, image, None) }
+ for pipeline in pipelines.values() {
+ unsafe {
+ self.device_fn
+ .destroy_pipeline_layout(self.device, pipeline.pipeline_layout, None)
+ };
+ unsafe { device_fn.destroy_pipeline(device, pipeline.pipeline, None) }
+ }
+
+ for descriptor_set_layout in bind_group_layouts.values() {
+ unsafe {
+ device_fn.destroy_descriptor_set_layout(device, descriptor_set_layout.0, None)
+ }
}
for semaphore in self
unsafe { device_fn.destroy_semaphore(device, *semaphore, None) }
}
+ for descriptor_pool in self.descriptor_pools.get_mut() {
+ unsafe { device_fn.destroy_descriptor_pool(device, *descriptor_pool, None) }
+ }
+
{
let destroyed_swapchains = self
.destroyed_swapchains
}
}
- for pipeline in self.pools.get_mut().pipelines.values() {
- unsafe { device_fn.destroy_pipeline(device, pipeline.pipeline, None) }
- }
-
- for descriptor_set_layout in self.pools.get_mut().bind_group_layouts.values() {
- unsafe {
- device_fn.destroy_descriptor_set_layout(device, descriptor_set_layout.0, None)
- }
- }
-
unsafe { device_fn.destroy_device(device, None) }
unsafe { self.instance_fn.destroy_instance(self.instance, None) };
}
-use narcissus_app::{create_app, Event, Window, WindowDesc};
+use std::time::Instant;
+
+use narcissus_app::{create_app, Event, WindowDesc};
use narcissus_core::{cstr, obj, slice, Image};
use narcissus_gpu::{
- create_vulkan_device, BindGroupLayoutDesc, BindGroupLayoutEntryDesc, BindingType, ClearValue,
- Device, FrameToken, GraphicsPipelineDesc, GraphicsPipelineLayout, LoadOp, MemoryLocation,
- Pipeline, RenderingAttachment, RenderingDesc, Scissor, ShaderDesc, ShaderStageFlags, StoreOp,
- TextureDesc, TextureDimension, TextureFormat, TextureUsageFlags, TextureViewDesc, ThreadToken,
- Viewport,
+ create_vulkan_device, Bind, BindGroupLayoutDesc, BindGroupLayoutEntryDesc, BindingType, Buffer,
+ BufferDesc, BufferUsageFlags, ClearValue, Device, GraphicsPipelineDesc, GraphicsPipelineLayout,
+ LoadOp, MemoryLocation, RenderingAttachment, RenderingDesc, Scissor, ShaderDesc,
+ ShaderStageFlags, StoreOp, TextureDesc, TextureDimension, TextureFormat, TextureUsageFlags,
+ TextureViewDesc, ThreadToken, TypedBind, Viewport,
};
use narcissus_maths::{Vec2, Vec3};
let vert_shader_spv = Spirv(*include_bytes!("shaders/triangle.vert.spv"));
let frag_shader_spv = Spirv(*include_bytes!("shaders/triangle.frag.spv"));
- let global_layout = device.create_bind_group_layout(&BindGroupLayoutDesc {
- entries: &[
- // Global uniforms.
- BindGroupLayoutEntryDesc {
- slot: 0,
- stages: ShaderStageFlags::ALL,
- binding_type: BindingType::UniformBuffer,
- count: 1,
- },
- ],
- });
-
- let per_material_layout = device.create_bind_group_layout(&BindGroupLayoutDesc {
- entries: &[
- // Per-material uniforms.
- BindGroupLayoutEntryDesc {
- slot: 0,
- stages: ShaderStageFlags::ALL,
- binding_type: BindingType::UniformBuffer,
- count: 1,
- },
- // Per-material textures.
- BindGroupLayoutEntryDesc {
- slot: 1,
- stages: ShaderStageFlags::ALL,
- binding_type: BindingType::Texture,
- count: 1,
- },
- ],
- });
-
- let per_draw_layout = device.create_bind_group_layout(&BindGroupLayoutDesc {
- entries: &[
- // Per-draw Uniforms
- BindGroupLayoutEntryDesc {
- slot: 0,
- stages: ShaderStageFlags::ALL,
- binding_type: BindingType::DynamicUniformBuffer,
- count: 1,
- },
- ],
+ let bind_group_layout = device.create_bind_group_layout(&BindGroupLayoutDesc {
+ entries: &[BindGroupLayoutEntryDesc {
+ slot: 0,
+ stages: ShaderStageFlags::ALL,
+ binding_type: BindingType::UniformBuffer,
+ count: 1,
+ }],
});
let pipeline = device.create_graphics_pipeline(&GraphicsPipelineDesc {
entrypoint_name: cstr!("main"),
code: &frag_shader_spv.0,
},
- bind_group_layouts: &[
- // Set 0
- global_layout,
- // Set 1
- per_material_layout,
- // Set 2
- per_draw_layout,
- ],
+ bind_group_layouts: &[bind_group_layout],
layout: GraphicsPipelineLayout {
color_attachment_formats: &[TextureFormat::BGRA8_SRGB],
depth_attachment_format: None,
device.destroy_texture(&frame_token, texture2);
device.end_frame(frame_token);
+ struct UniformBufferMap<'a> {
+ device: &'a dyn Device,
+ buffer: Buffer,
+ slice: &'a mut [u8],
+ }
+
+ impl<'a> UniformBufferMap<'a> {
+ pub fn new(device: &'a dyn Device, len: usize) -> Self {
+ let buffer = device.create_buffer(&BufferDesc {
+ memory_location: MemoryLocation::PreferDevice,
+ usage: BufferUsageFlags::UNIFORM,
+ size: len,
+ });
+ unsafe {
+ let ptr = device.map_buffer(buffer);
+ let slice = std::slice::from_raw_parts_mut(ptr, len);
+ Self {
+ device,
+ buffer,
+ slice,
+ }
+ }
+ }
+
+ pub fn buffer(&self) -> Buffer {
+ self.buffer
+ }
+
+ pub fn write_f32(&mut self, value: f32) {
+ self.slice.copy_from_slice(&value.to_le_bytes());
+ }
+ }
+
+ impl<'a> Drop for UniformBufferMap<'a> {
+ fn drop(&mut self) {
+ // Safety: Make sure we don't have the slice outlive the mapping.
+ unsafe {
+ self.device.unmap_buffer(self.buffer);
+ }
+ }
+ }
+
+ let mut uniforms = UniformBufferMap::new(device.as_ref(), 4);
+
+ let start_time = Instant::now();
'main: loop {
let frame_token = device.begin_frame();
+ let frame_start = Instant::now() - start_time;
+ let frame_start = frame_start.as_secs_f32();
+
+ uniforms.write_f32(frame_start);
+
while let Some(event) = app.poll_event() {
use Event::*;
match event {
}
}
- render_window(
- device.as_ref(),
+ let (width, height, swapchain_image) =
+ device.acquire_swapchain(&frame_token, window, TextureFormat::BGRA8_SRGB);
+
+ let mut command_buffer_token =
+ device.create_command_buffer(&frame_token, &mut thread_token);
+
+ device.cmd_begin_rendering(
&frame_token,
&mut thread_token,
- pipeline,
- window,
+ &mut command_buffer_token,
+ &RenderingDesc {
+ x: 0,
+ y: 0,
+ width,
+ height,
+ color_attachments: &[RenderingAttachment {
+ texture: swapchain_image,
+ load_op: LoadOp::Clear(ClearValue::ColorF32([
+ 0.392157, 0.584314, 0.929412, 1.0,
+ ])),
+ store_op: StoreOp::Store,
+ }],
+ depth_attachment: None,
+ stencil_attachment: None,
+ },
);
- device.end_frame(frame_token);
- }
-}
+ device.cmd_set_pipeline(&mut command_buffer_token, pipeline);
+ device.cmd_set_bind_group(
+ &frame_token,
+ &mut thread_token,
+ &mut command_buffer_token,
+ pipeline,
+ bind_group_layout,
+ 0,
+ &[Bind {
+ binding: 0,
+ array_element: 0,
+ typed: TypedBind::Buffer(&[uniforms.buffer()]),
+ }],
+ );
-fn render_window(
- device: &dyn Device,
- frame_token: &FrameToken,
- thread_token: &mut ThreadToken,
- pipeline: Pipeline,
- window: Window,
-) {
- let (width, height, swapchain_image) =
- device.acquire_swapchain(frame_token, window, TextureFormat::BGRA8_SRGB);
- let mut command_buffer_token = device.create_command_buffer(frame_token, thread_token);
- device.cmd_begin_rendering(
- &mut command_buffer_token,
- &RenderingDesc {
- x: 0,
- y: 0,
- width,
- height,
- color_attachments: &[RenderingAttachment {
- texture: swapchain_image,
- load_op: LoadOp::Clear(ClearValue::ColorF32([0.392157, 0.584314, 0.929412, 1.0])),
- store_op: StoreOp::Store,
+ device.cmd_set_scissors(
+ &mut command_buffer_token,
+ &[Scissor {
+ x: 0,
+ y: 0,
+ width,
+ height,
}],
- depth_attachment: None,
- stencil_attachment: None,
- },
- );
- device.cmd_bind_pipeline(&mut command_buffer_token, pipeline);
- device.cmd_set_scissors(
- &mut command_buffer_token,
- &[Scissor {
- x: 0,
- y: 0,
- width,
- height,
- }],
- );
- device.cmd_set_viewports(
- &mut command_buffer_token,
- &[Viewport {
- x: 0.0,
- y: 0.0,
- width: width as f32,
- height: height as f32,
- min_depth: 0.0,
- max_depth: 1.0,
- }],
- );
- device.cmd_draw(&mut command_buffer_token, 3, 1, 0, 0);
- device.cmd_end_rendering(&mut command_buffer_token);
+ );
+ device.cmd_set_viewports(
+ &mut command_buffer_token,
+ &[Viewport {
+ x: 0.0,
+ y: 0.0,
+ width: width as f32,
+ height: height as f32,
+ min_depth: 0.0,
+ max_depth: 1.0,
+ }],
+ );
+ device.cmd_draw(&mut command_buffer_token, 3, 1, 0, 0);
+ device.cmd_end_rendering(&mut command_buffer_token);
- device.submit(command_buffer_token);
+ device.submit(&frame_token, &mut thread_token, command_buffer_token);
+
+ device.end_frame(frame_token);
+ }
}