From f72ba2cfc36bf8b5b455ceed80dc298bcf4aba0e Mon Sep 17 00:00:00 2001 From: Joshua Simmons Date: Sun, 13 Nov 2022 14:13:56 +0100 Subject: [PATCH] Rename `FrameToken` -> `Frame` --- narcissus-gpu/src/lib.rs | 28 ++-- narcissus-gpu/src/vulkan.rs | 289 +++++++++++++++++------------------- narcissus/src/main.rs | 16 +- 3 files changed, 154 insertions(+), 179 deletions(-) diff --git a/narcissus-gpu/src/lib.rs b/narcissus-gpu/src/lib.rs index 6fd297d..a607836 100644 --- a/narcissus-gpu/src/lib.rs +++ b/narcissus-gpu/src/lib.rs @@ -347,7 +347,7 @@ pub enum TypedBind<'a> { thread_token_def!(ThreadToken, GpuConcurrent, 8); -pub struct FrameToken<'a> { +pub struct Frame<'a> { device_addr: usize, frame_index: usize, _phantom: &'a PhantomData<()>, @@ -368,15 +368,11 @@ pub trait Device { fn create_graphics_pipeline(&self, desc: &GraphicsPipelineDesc) -> Pipeline; fn create_compute_pipeline(&self, desc: &ComputePipelineDesc) -> Pipeline; - fn destroy_buffer(&self, frame_token: &FrameToken, buffer: Buffer); - fn destroy_texture(&self, frame_token: &FrameToken, texture: Texture); - fn destroy_sampler(&self, frame_token: &FrameToken, sampler: Sampler); - fn destroy_bind_group_layout( - &self, - frame_token: &FrameToken, - bind_group_layout: BindGroupLayout, - ); - fn destroy_pipeline(&self, frame_token: &FrameToken, pipeline: Pipeline); + fn destroy_buffer(&self, frame: &Frame, buffer: Buffer); + fn destroy_texture(&self, frame: &Frame, texture: Texture); + fn destroy_sampler(&self, frame: &Frame, sampler: Sampler); + fn destroy_bind_group_layout(&self, frame: &Frame, bind_group_layout: BindGroupLayout); + fn destroy_pipeline(&self, frame: &Frame, pipeline: Pipeline); /// Map the given buffer in its entirety to system memory and return a pointer to it. /// @@ -395,7 +391,7 @@ pub trait Device { fn acquire_swapchain( &self, - frame_token: &FrameToken, + frame: &Frame, window: Window, format: TextureFormat, ) -> (u32, u32, Texture); @@ -403,13 +399,13 @@ pub trait Device { fn create_cmd_buffer<'a, 'thread>( &'a self, - frame_token: &'a FrameToken, + frame: &'a Frame, thread_token: &'thread mut ThreadToken, ) -> CmdBuffer<'a>; fn cmd_set_bind_group( &self, - frame_token: &FrameToken, + frame: &Frame, thread_token: &mut ThreadToken, cmd_buffer: &mut CmdBuffer, layout: BindGroupLayout, @@ -454,11 +450,11 @@ pub trait Device { first_instance: u32, ); - fn submit(&self, frame_token: &FrameToken, cmd_buffer_token: CmdBuffer); + fn submit(&self, frame: &Frame, cmd_buffer: CmdBuffer); - fn begin_frame(&self) -> FrameToken; + fn begin_frame(&self) -> Frame; - fn end_frame<'device>(&'device self, frame_token: FrameToken<'device>); + fn end_frame<'device>(&'device self, frame: Frame<'device>); } pub fn create_vulkan_device<'app>(app: &'app dyn App) -> Box { diff --git a/narcissus-gpu/src/vulkan.rs b/narcissus-gpu/src/vulkan.rs index 89681e5..9cfd9bb 100644 --- a/narcissus-gpu/src/vulkan.rs +++ b/narcissus-gpu/src/vulkan.rs @@ -18,8 +18,8 @@ use vulkan_sys as vk; use crate::{ delay_queue::DelayQueue, Bind, BindGroupLayout, BindGroupLayoutDesc, BindingType, Buffer, BufferDesc, BufferUsageFlags, ClearValue, CmdBuffer, CompareOp, ComputePipelineDesc, - CullingMode, Device, FrameToken, FrontFace, GpuConcurrent, GraphicsPipelineDesc, IndexType, - LoadOp, MemoryLocation, Pipeline, PolygonMode, Sampler, SamplerAddressMode, SamplerCompareOp, + CullingMode, Device, Frame, FrontFace, GpuConcurrent, GraphicsPipelineDesc, IndexType, LoadOp, + MemoryLocation, Pipeline, PolygonMode, Sampler, SamplerAddressMode, SamplerCompareOp, SamplerDesc, SamplerFilter, ShaderStageFlags, StencilOp, StencilOpState, StoreOp, Texture, TextureDesc, TextureDimension, TextureFormat, TextureUsageFlags, TextureViewDesc, ThreadToken, Topology, TypedBind, @@ -369,13 +369,10 @@ struct VulkanCmdBufferPool { command_buffers: Vec, } -impl<'device> FrameToken<'device> { +impl<'device> Frame<'device> { fn check_device(&self, device: &VulkanDevice) { let device_address = device as *const _ as usize; - assert_eq!( - self.device_addr, device_address, - "frame token device mismatch" - ) + assert_eq!(self.device_addr, device_address, "frame device mismatch") } fn check_frame_counter(&self, frame_counter_value: usize) { @@ -383,7 +380,7 @@ impl<'device> FrameToken<'device> { assert_eq!( self.frame_index, frame_counter_value >> 1, - "token does not match current frame" + "frame does not match device frame" ); } } @@ -404,26 +401,26 @@ impl FrameCounter { self.value.load(Ordering::Relaxed) } - fn acquire(&self, device: &VulkanDevice) -> FrameToken { + fn acquire(&self, device: &VulkanDevice) -> Frame { let old_frame_counter = self.value.fetch_add(1, Ordering::SeqCst); assert!( old_frame_counter & 1 == 1, - "acquiring a frame token before previous frame token has been released" + "acquiring a frame before previous frame has been released" ); let frame_counter = old_frame_counter + 1; let frame_index = frame_counter >> 1; - FrameToken { + Frame { device_addr: device as *const _ as usize, frame_index, _phantom: &PhantomData, } } - fn release(&self, frame_token: FrameToken) { + fn release(&self, frame: Frame) { let old_frame_counter = self.value.fetch_add(1, Ordering::SeqCst); - frame_token.check_frame_counter(old_frame_counter); + frame.check_frame_counter(old_frame_counter); } } @@ -455,18 +452,6 @@ struct VulkanFrame { } impl VulkanFrame { - // fn cmd_buffer_mut<'a>( - // &self, - // thread_token: &'a mut ThreadToken, - // cmd_buffer_token: &'a CmdBuffer, - // ) -> &'a mut VulkanCmdBuffer { - // &mut self - // .per_thread - // .get_mut(thread_token) - // .cmd_buffer_pool - // .cmd_buffers[cmd_buffer_token.index] - // } - fn recycle_semaphore(&self, semaphore: vk::Semaphore) { self.recycled_semaphores.lock().push_back(semaphore); } @@ -814,22 +799,20 @@ impl<'app> VulkanDevice<'app> { } } - fn frame<'token>(&self, frame_token: &'token FrameToken) -> &'token VulkanFrame { - frame_token.check_device(self); - frame_token.check_frame_counter(self.frame_counter.load()); - // Safety: Reference is bound to the frame token exposed by the API. only one frame token - // can be valid at a time. The returned frame is only valid so long as we have a ref on the - // token. - unsafe { &*self.frames[frame_token.frame_index % NUM_FRAMES].get() } + fn frame<'token>(&self, frame: &'token Frame) -> &'token VulkanFrame { + frame.check_device(self); + frame.check_frame_counter(self.frame_counter.load()); + // Safety: Reference is bound to the frame exposed by the API. only one frame can be valid + // at a time. The returned VulkanFrame is only valid so long as we have a ref on the frame. + unsafe { &*self.frames[frame.frame_index % NUM_FRAMES].get() } } - fn frame_mut<'token>(&self, frame_token: &'token mut FrameToken) -> &'token mut VulkanFrame { - frame_token.check_device(self); - frame_token.check_frame_counter(self.frame_counter.load()); - // Safety: Mutable reference is bound to the frame token exposed by the API. only one frame - // token can be valid at a time. The returned frame is only valid so long as we have a mut - // ref on the token. - unsafe { &mut *self.frames[frame_token.frame_index % NUM_FRAMES].get() } + fn frame_mut<'token>(&self, frame: &'token mut Frame) -> &'token mut VulkanFrame { + frame.check_device(self); + frame.check_frame_counter(self.frame_counter.load()); + // Safety: Reference is bound to the frame exposed by the API. only one frame can be valid + // at a time. The returned VulkanFrame is only valid so long as we have a ref on the frame. + unsafe { &mut *self.frames[frame.frame_index % NUM_FRAMES].get() } } fn cmd_buffer_mut<'a>(&self, cmd_buffer: &'a mut CmdBuffer) -> &'a mut VulkanCmdBuffer { @@ -1572,17 +1555,17 @@ impl<'driver> Device for VulkanDevice<'driver> { todo!() } - fn destroy_buffer(&self, frame_token: &FrameToken, buffer: Buffer) { + fn destroy_buffer(&self, frame: &Frame, buffer: Buffer) { if let Some(buffer) = self.buffer_pool.lock().remove(buffer.0) { - let frame = self.frame(frame_token); + let frame = self.frame(frame); frame.destroyed_buffers.lock().push_back(buffer.buffer); frame.destroyed_allocations.lock().push_back(buffer.memory); } } - fn destroy_texture(&self, frame_token: &FrameToken, texture: Texture) { + fn destroy_texture(&self, frame: &Frame, texture: Texture) { if let Some(texture) = self.texture_pool.lock().remove(texture.0) { - let frame = self.frame(frame_token); + let frame = self.frame(frame); match texture { // The texture is unique, we've never allocated a reference counted object for it. @@ -1613,35 +1596,31 @@ impl<'driver> Device for VulkanDevice<'driver> { } } - fn destroy_sampler(&self, frame_token: &FrameToken, sampler: Sampler) { + fn destroy_sampler(&self, frame: &Frame, sampler: Sampler) { if let Some(sampler) = self.sampler_pool.lock().remove(sampler.0) { - self.frame(frame_token) + self.frame(frame) .destroyed_samplers .lock() .push_back(sampler.0) } } - fn destroy_bind_group_layout( - &self, - frame_token: &FrameToken, - bind_group_layout: BindGroupLayout, - ) { + fn destroy_bind_group_layout(&self, frame: &Frame, bind_group_layout: BindGroupLayout) { if let Some(bind_group_layout) = self .bind_group_layout_pool .lock() .remove(bind_group_layout.0) { - self.frame(frame_token) + self.frame(frame) .destroyed_descriptor_set_layouts .lock() .push_back(bind_group_layout.0) } } - fn destroy_pipeline(&self, frame_token: &FrameToken, pipeline: Pipeline) { + fn destroy_pipeline(&self, frame: &Frame, pipeline: Pipeline) { if let Some(pipeline) = self.pipeline_pool.lock().remove(pipeline.0) { - let frame = self.frame(frame_token); + let frame = self.frame(frame); frame .destroyed_pipeline_layouts .lock() @@ -1698,7 +1677,7 @@ impl<'driver> Device for VulkanDevice<'driver> { fn acquire_swapchain( &self, - frame_token: &FrameToken, + frame: &Frame, window: Window, format: TextureFormat, ) -> (u32, u32, Texture) { @@ -1769,7 +1748,7 @@ impl<'driver> Device for VulkanDevice<'driver> { assert_eq!(format, vulkan_swapchain.surface_format.format); - let frame = self.frame(frame_token); + let frame = self.frame(frame); let mut texture_pool = self.texture_pool.lock(); let mut present_swapchains = frame.present_swapchains.lock(); @@ -1980,12 +1959,8 @@ impl<'driver> Device for VulkanDevice<'driver> { } } - fn create_cmd_buffer( - &self, - frame_token: &FrameToken, - thread_token: &mut ThreadToken, - ) -> CmdBuffer { - let frame = self.frame(frame_token); + fn create_cmd_buffer(&self, frame: &Frame, thread_token: &mut ThreadToken) -> CmdBuffer { + let frame = self.frame(frame); let per_thread = frame.per_thread.get_mut(thread_token); let cmd_buffer_pool = &mut per_thread.cmd_buffer_pool; @@ -2033,7 +2008,7 @@ impl<'driver> Device for VulkanDevice<'driver> { fn cmd_set_bind_group( &self, - frame_token: &FrameToken, + frame: &Frame, thread_token: &mut ThreadToken, cmd_buffer: &mut CmdBuffer, layout: BindGroupLayout, @@ -2044,7 +2019,7 @@ impl<'driver> Device for VulkanDevice<'driver> { let descriptor_set_layout = self.bind_group_layout_pool.lock().get(layout.0).unwrap().0; - let frame = self.frame(frame_token); + let frame = self.frame(frame); let per_thread = frame.per_thread.get_mut(thread_token); let mut descriptor_pool = per_thread.descriptor_pool; @@ -2421,10 +2396,10 @@ impl<'driver> Device for VulkanDevice<'driver> { } } - fn submit(&self, frame_token: &FrameToken, mut cmd_buffer: CmdBuffer) { + fn submit(&self, frame: &Frame, mut cmd_buffer: CmdBuffer) { let fence = self.universal_queue_fence.fetch_add(1, Ordering::SeqCst) + 1; - let frame = self.frame(frame_token); + let frame = self.frame(frame); frame.universal_queue_fence.store(fence, Ordering::Relaxed); let cmd_buffer = self.cmd_buffer_mut(&mut cmd_buffer); @@ -2513,121 +2488,125 @@ impl<'driver> Device for VulkanDevice<'driver> { )); } - fn begin_frame(&self) -> FrameToken { + fn begin_frame(&self) -> Frame { let device_fn = &self.device_fn; let device = self.device; - let mut frame_token = self.frame_counter.acquire(self); - let frame = self.frame_mut(&mut frame_token); - + let mut frame = self.frame_counter.acquire(self); { - let semaphore_fences = &[frame - .universal_queue_fence - .load(std::sync::atomic::Ordering::Relaxed)]; - let semaphores = &[self.universal_queue_semaphore]; - let wait_info = vk::SemaphoreWaitInfo { - semaphores: (semaphores, semaphore_fences).into(), - ..default() - }; - vk_check!(device_fn.wait_semaphores(device, &wait_info, !0)); - } + let frame = self.frame_mut(&mut frame); - for per_thread in frame.per_thread.slots_mut() { - per_thread.descriptor_pool = vk::DescriptorPool::null(); - if per_thread.cmd_buffer_pool.next_free_index != 0 { - vk_check!(device_fn.reset_command_pool( - device, - per_thread.cmd_buffer_pool.command_pool, - vk::CommandPoolResetFlags::default() - )); + { + let semaphore_fences = &[frame + .universal_queue_fence + .load(std::sync::atomic::Ordering::Relaxed)]; + let semaphores = &[self.universal_queue_semaphore]; + let wait_info = vk::SemaphoreWaitInfo { + semaphores: (semaphores, semaphore_fences).into(), + ..default() + }; + vk_check!(device_fn.wait_semaphores(device, &wait_info, !0)); + } - per_thread.cmd_buffer_pool.next_free_index = 0; + for per_thread in frame.per_thread.slots_mut() { + per_thread.descriptor_pool = vk::DescriptorPool::null(); + if per_thread.cmd_buffer_pool.next_free_index != 0 { + vk_check!(device_fn.reset_command_pool( + device, + per_thread.cmd_buffer_pool.command_pool, + vk::CommandPoolResetFlags::default() + )); + + per_thread.cmd_buffer_pool.next_free_index = 0; + } + per_thread.arena.reset() } - per_thread.arena.reset() - } - self.recycled_semaphores - .lock() - .extend(frame.recycled_semaphores.get_mut().drain(..)); + self.recycled_semaphores + .lock() + .extend(frame.recycled_semaphores.get_mut().drain(..)); - for descriptor_pool in frame.recycled_descriptor_pools.get_mut() { - vk_check!(device_fn.reset_descriptor_pool( - device, - *descriptor_pool, - vk::DescriptorPoolResetFlags::default() - )) - } + for descriptor_pool in frame.recycled_descriptor_pools.get_mut() { + vk_check!(device_fn.reset_descriptor_pool( + device, + *descriptor_pool, + vk::DescriptorPoolResetFlags::default() + )) + } - self.recycled_descriptor_pools - .lock() - .extend(frame.recycled_descriptor_pools.get_mut().drain(..)); + self.recycled_descriptor_pools + .lock() + .extend(frame.recycled_descriptor_pools.get_mut().drain(..)); - Self::destroy_deferred(device_fn, device, frame); + Self::destroy_deferred(device_fn, device, frame); - self.destroyed_swapchains - .lock() - .expire(|(window, swapchain, surface, image_views)| { - self.destroy_swapchain(window, surface, swapchain, &image_views); - }); + self.destroyed_swapchains + .lock() + .expire(|(window, swapchain, surface, image_views)| { + self.destroy_swapchain(window, surface, swapchain, &image_views); + }); + } - frame_token + frame } - fn end_frame(&self, mut frame_token: FrameToken) { + fn end_frame(&self, mut frame: Frame) { let arena = HybridArena::<512>::new(); - let frame = self.frame_mut(&mut frame_token); - - let present_swapchains = frame.present_swapchains.get_mut(); - if !present_swapchains.is_empty() { - let windows = arena.alloc_slice_fill_iter(present_swapchains.keys().copied()); - let wait_semaphores = - arena.alloc_slice_fill_iter(present_swapchains.values().map(|x| x.release)); - let swapchains = - arena.alloc_slice_fill_iter(present_swapchains.values().map(|x| x.swapchain)); - let swapchain_image_indices = - arena.alloc_slice_fill_iter(present_swapchains.values().map(|x| x.image_index)); - - present_swapchains.clear(); - - let results = arena.alloc_slice_fill_copy(swapchains.len(), vk::Result::Success); - - let present_info = vk::PresentInfoKHR { - wait_semaphores: wait_semaphores.into(), - swapchains: (swapchains, swapchain_image_indices).into(), - results: results.as_mut_ptr(), - ..default() - }; + { + let frame = self.frame_mut(&mut frame); + + let present_swapchains = frame.present_swapchains.get_mut(); + if !present_swapchains.is_empty() { + let windows = arena.alloc_slice_fill_iter(present_swapchains.keys().copied()); + let wait_semaphores = + arena.alloc_slice_fill_iter(present_swapchains.values().map(|x| x.release)); + let swapchains = + arena.alloc_slice_fill_iter(present_swapchains.values().map(|x| x.swapchain)); + let swapchain_image_indices = + arena.alloc_slice_fill_iter(present_swapchains.values().map(|x| x.image_index)); + + present_swapchains.clear(); + + let results = arena.alloc_slice_fill_copy(swapchains.len(), vk::Result::Success); + + let present_info = vk::PresentInfoKHR { + wait_semaphores: wait_semaphores.into(), + swapchains: (swapchains, swapchain_image_indices).into(), + results: results.as_mut_ptr(), + ..default() + }; - unsafe { - // check results below, so ignore this return value. - let _ = self - .swapchain_fn - .queue_present(self.universal_queue, &present_info); - }; + unsafe { + // check results below, so ignore this return value. + let _ = self + .swapchain_fn + .queue_present(self.universal_queue, &present_info); + }; - for (i, &result) in results.iter().enumerate() { - match result { - vk::Result::Success => {} - vk::Result::SuboptimalKHR => { - // Yikes - if let VulkanSwapchainState::Occupied { - width: _, - height: _, - suboptimal, - swapchain: _, - image_views: _, - } = &mut self.swapchains.lock().get_mut(&windows[i]).unwrap().state - { - *suboptimal = true; + for (i, &result) in results.iter().enumerate() { + match result { + vk::Result::Success => {} + vk::Result::SuboptimalKHR => { + // Yikes + if let VulkanSwapchainState::Occupied { + width: _, + height: _, + suboptimal, + swapchain: _, + image_views: _, + } = &mut self.swapchains.lock().get_mut(&windows[i]).unwrap().state + { + *suboptimal = true; + } } + _ => vk_check!(result), } - _ => vk_check!(result), } } } - self.frame_counter.release(frame_token); + self.frame_counter.release(frame); } unsafe fn map_buffer(&self, buffer: Buffer) -> *mut u8 { diff --git a/narcissus/src/main.rs b/narcissus/src/main.rs index fe771fd..d452b2c 100644 --- a/narcissus/src/main.rs +++ b/narcissus/src/main.rs @@ -274,7 +274,7 @@ pub fn main() { let start_time = Instant::now(); 'main: loop { - let frame_token = device.begin_frame(); + let frame = device.begin_frame(); while let Some(event) = app.poll_event() { use Event::*; @@ -302,7 +302,7 @@ pub fn main() { } let (width, height, swapchain_image) = - device.acquire_swapchain(&frame_token, main_window, TextureFormat::BGRA8_SRGB); + device.acquire_swapchain(&frame, main_window, TextureFormat::BGRA8_SRGB); let frame_start = Instant::now() - start_time; let frame_start = frame_start.as_secs_f32() * 0.5; @@ -317,7 +317,7 @@ pub fn main() { uniforms.write(Uniform { clip_from_model }); if width != depth_width || height != depth_height { - device.destroy_texture(&frame_token, depth_image); + device.destroy_texture(&frame, depth_image); depth_image = device.create_texture(&TextureDesc { memory_location: MemoryLocation::PreferDevice, usage: TextureUsageFlags::DEPTH_STENCIL, @@ -333,12 +333,12 @@ pub fn main() { depth_height = height; } - let mut cmd_buffer = device.create_cmd_buffer(&frame_token, &mut thread_token); + let mut cmd_buffer = device.create_cmd_buffer(&frame, &mut thread_token); device.cmd_set_pipeline(&mut cmd_buffer, pipeline); device.cmd_set_bind_group( - &frame_token, + &frame, &mut thread_token, &mut cmd_buffer, uniform_bind_group_layout, @@ -351,7 +351,7 @@ pub fn main() { ); device.cmd_set_bind_group( - &frame_token, + &frame, &mut thread_token, &mut cmd_buffer, storage_bind_group_layout, @@ -417,8 +417,8 @@ pub fn main() { device.cmd_end_rendering(&mut cmd_buffer); - device.submit(&frame_token, cmd_buffer); + device.submit(&frame, cmd_buffer); - device.end_frame(frame_token); + device.end_frame(frame); } } -- 2.49.0