use std::{
- cell::UnsafeCell,
+ cell::{Cell, RefCell, UnsafeCell},
collections::{hash_map::Entry, HashMap, VecDeque},
marker::PhantomData,
os::raw::{c_char, c_void},
}
struct VulkanPerThread {
- cmd_buffer_pool: VulkanCmdBufferPool,
- descriptor_pool: vk::DescriptorPool,
+ cmd_buffer_pool: RefCell<VulkanCmdBufferPool>,
+ descriptor_pool: Cell<vk::DescriptorPool>,
arena: Arena,
}
next_free_index: 0,
};
VulkanPerThread {
- cmd_buffer_pool,
- descriptor_pool: vk::DescriptorPool::null(),
+ cmd_buffer_pool: RefCell::new(cmd_buffer_pool),
+ descriptor_pool: Cell::new(vk::DescriptorPool::null()),
arena: Arena::new(),
}
});
}
}
- fn create_cmd_buffer(&self, frame: &Frame, thread_token: &mut ThreadToken) -> CmdBuffer {
+ fn create_cmd_buffer<'a, 'thread>(
+ &self,
+ frame: &'a Frame,
+ thread_token: &'thread ThreadToken,
+ ) -> CmdBuffer<'a, 'thread> {
let frame = self.frame(frame);
- let per_thread = frame.per_thread.get_mut(thread_token);
- let cmd_buffer_pool = &mut per_thread.cmd_buffer_pool;
+ let per_thread = frame.per_thread.get(thread_token);
+ let mut cmd_buffer_pool = per_thread.cmd_buffer_pool.borrow_mut();
// We have consumed all available command buffers, need to allocate a new one.
if cmd_buffer_pool.next_free_index >= cmd_buffer_pool.command_buffers.len() {
CmdBuffer {
cmd_buffer_addr: vulkan_cmd_buffer as *mut _ as usize,
_phantom: &PhantomData,
+
+ thread_token,
phantom_unsend: PhantomUnsend {},
}
}
fn cmd_set_bind_group(
&self,
frame: &Frame,
- thread_token: &mut ThreadToken,
cmd_buffer: &mut CmdBuffer,
layout: BindGroupLayout,
bind_group_index: u32,
let descriptor_set_layout = self.bind_group_layout_pool.lock().get(layout.0).unwrap().0;
let frame = self.frame(frame);
- let per_thread = frame.per_thread.get_mut(thread_token);
+ let per_thread = frame.per_thread.get(cmd_buffer.thread_token);
- let mut descriptor_pool = per_thread.descriptor_pool;
+ let mut descriptor_pool = per_thread.descriptor_pool.get();
let mut allocated_pool = false;
let descriptor_set = loop {
if descriptor_pool.is_null() {
// Need to fetch a new descriptor pool
descriptor_pool = self.request_descriptor_pool();
+ per_thread.descriptor_pool.set(descriptor_pool);
frame.recycle_descriptor_pool(descriptor_pool);
- per_thread.descriptor_pool = descriptor_pool;
allocated_pool = true;
}
let allocate_info = vk::DescriptorSetAllocateInfo {
}
for per_thread in frame.per_thread.slots_mut() {
- per_thread.descriptor_pool = vk::DescriptorPool::null();
- if per_thread.cmd_buffer_pool.next_free_index != 0 {
+ per_thread.descriptor_pool.set(vk::DescriptorPool::null());
+ let cmd_buffer_pool = per_thread.cmd_buffer_pool.get_mut();
+ if cmd_buffer_pool.next_free_index != 0 {
vk_check!(device_fn.reset_command_pool(
device,
- per_thread.cmd_buffer_pool.command_pool,
+ cmd_buffer_pool.command_pool,
vk::CommandPoolResetFlags::default()
));
-
- per_thread.cmd_buffer_pool.next_free_index = 0;
+ cmd_buffer_pool.next_free_index = 0;
}
per_thread.arena.reset()
}
let mut arena = HybridArena::<512>::new();
for per_thread in frame.per_thread.slots_mut() {
- if !per_thread.cmd_buffer_pool.command_buffers.is_empty() {
+ let cmd_buffer_pool = per_thread.cmd_buffer_pool.get_mut();
+ if !cmd_buffer_pool.command_buffers.is_empty() {
arena.reset();
- let command_buffers = arena.alloc_slice_fill_iter(
- per_thread.cmd_buffer_pool.command_buffers.iter().copied(),
- );
+ let command_buffers = arena
+ .alloc_slice_fill_iter(cmd_buffer_pool.command_buffers.iter().copied());
unsafe {
device_fn.free_command_buffers(
device,
- per_thread.cmd_buffer_pool.command_pool,
+ cmd_buffer_pool.command_pool,
command_buffers,
)
};
}
unsafe {
- device_fn.destroy_command_pool(
- device,
- per_thread.cmd_buffer_pool.command_pool,
- None,
- )
+ device_fn.destroy_command_pool(device, cmd_buffer_pool.command_pool, None)
}
}
}
fn create_image_with_data(
device: &dyn Device,
- thread_token: &mut ThreadToken,
+ thread_token: &ThreadToken,
width: u32,
height: u32,
data: &[u8],
});
let device = create_device(narcissus_gpu::DeviceBackend::Vulkan);
- let mut thread_token = ThreadToken::new();
+ let thread_token = ThreadToken::new();
let vert_spv = include_bytes_align!(4, "shaders/basic.vert.spv");
let frag_spv = include_bytes_align!(4, "shaders/basic.frag.spv");
let blåhaj_image = create_image_with_data(
device.as_ref(),
- &mut thread_token,
+ &thread_token,
blåhaj_image.width() as u32,
blåhaj_image.height() as u32,
blåhaj_image.as_slice(),
depth_height = height;
}
- let mut cmd_buffer = device.create_cmd_buffer(&frame, &mut thread_token);
+ let mut cmd_buffer = device.create_cmd_buffer(&frame, &thread_token);
device.cmd_set_pipeline(&mut cmd_buffer, pipeline);
device.cmd_set_bind_group(
&frame,
- &mut thread_token,
&mut cmd_buffer,
uniform_bind_group_layout,
0,
device.cmd_set_bind_group(
&frame,
- &mut thread_token,
&mut cmd_buffer,
storage_bind_group_layout,
1,