Frame, GlobalBarrier, GpuConcurrent, GraphicsPipelineDesc, Image, ImageBarrier, ImageBlit,
ImageDesc, ImageDimension, ImageLayout, ImageTiling, ImageViewDesc, IndexType, MemoryLocation,
Offset2d, Offset3d, PersistentBuffer, Pipeline, PipelineLayout, Sampler, SamplerAddressMode,
- SamplerCompareOp, SamplerDesc, SamplerFilter, ShaderStageFlags, SpecConstant,
+ SamplerCompareOp, SamplerDesc, SamplerFilter, ShaderDesc, ShaderStageFlags, SpecConstant,
SwapchainConfigurator, SwapchainImage, SwapchainOutOfDateError, ThreadToken, TransientBuffer,
- TypedBind, frame_counter::FrameCounter, mapped_buffer::TransientBindGroup,
+ TypedBind, VertexOrMeshShader, frame_counter::FrameCounter, mapped_buffer::TransientBindGroup,
};
mod allocator;
shader_module
}
+fn vulkan_pipeline_shader_stage_create_flags(
+ shader_desc: &ShaderDesc<'_>,
+) -> vk::PipelineShaderStageCreateFlags {
+ let mut flags = default();
+
+ if shader_desc.require_full_subgroups {
+ flags |= vk::PipelineShaderStageCreateFlags::REQUIRE_FULL_SUBGROUPS
+ }
+
+ if shader_desc.allow_varying_subgroup_size {
+ flags |= vk::PipelineShaderStageCreateFlags::ALLOW_VARYING_SUBGROUP_SIZE;
+ }
+
+ flags
+}
+
+fn vulkan_specialization_info<'a>(
+ arena: &'a HybridArena<1024>,
+ spec_constants: &'a [SpecConstant],
+) -> Option<&'a vk::SpecializationInfo<'a>> {
+ if spec_constants.is_empty() {
+ return None;
+ }
+
+ let block_len = spec_constants
+ .iter()
+ .map(|spec_constant| match spec_constant {
+ SpecConstant::Bool { id: _, value: _ }
+ | SpecConstant::U32 { id: _, value: _ }
+ | SpecConstant::I32 { id: _, value: _ }
+ | SpecConstant::F32 { id: _, value: _ } => 4,
+ })
+ .sum::<usize>();
+
+ let block = arena.alloc_slice_fill_copy(block_len, 0u8);
+
+ let mut offset = 0;
+ let map_entries = arena.alloc_slice_fill_iter(spec_constants.iter().map(|spec_constant| {
+ let constant_id;
+ let value_size;
+ match *spec_constant {
+ SpecConstant::Bool { id, value } => {
+ constant_id = id;
+ let value = if value {
+ vk::Bool32::True
+ } else {
+ vk::Bool32::False
+ } as u32;
+ value_size = std::mem::size_of_val(&value);
+ block[offset..offset + value_size].copy_from_slice(&value.to_ne_bytes())
+ }
+ SpecConstant::U32 { id, value } => {
+ constant_id = id;
+ value_size = std::mem::size_of_val(&value);
+ block[offset..offset + value_size].copy_from_slice(&value.to_ne_bytes());
+ }
+ SpecConstant::I32 { id, value } => {
+ constant_id = id;
+ value_size = std::mem::size_of_val(&value);
+ block[offset..offset + value_size].copy_from_slice(&value.to_ne_bytes());
+ }
+ SpecConstant::F32 { id, value } => {
+ constant_id = id;
+ value_size = std::mem::size_of_val(&value);
+ block[offset..offset + value_size].copy_from_slice(&value.to_ne_bytes());
+ }
+ }
+
+ let map_entry = vk::SpecializationMapEntry {
+ constant_id,
+ offset: offset as u32,
+ size: value_size,
+ };
+
+ offset += value_size;
+
+ map_entry
+ }));
+
+ Some(arena.alloc(vk::SpecializationInfo {
+ data: block.into(),
+ map_entries: map_entries.into(),
+ }) as &vk::SpecializationInfo)
+}
+
struct VulkanBuffer {
memory: VulkanMemory,
buffer: vk::Buffer,
instance_fn: vk::InstanceFunctions,
device_fn: vk::DeviceFunctions,
+ mesh_shader_fn: vk::MeshShaderFunctions,
+
#[cfg(feature = "debug_markers")]
debug_utils_fn: Option<vk::DebugUtilsFunctions>,
}
}
physical_device_properties.api_version() >= vk::VERSION_1_3
+ && physical_device_features.mesh_shader()
&& physical_device_features.dynamic_rendering()
&& physical_device_features.subgroup_size_control()
&& physical_device_features.maintenance4()
)
});
- let mut enabled_extensions = vec![];
+ let mut enabled_extensions = vec![c"VK_EXT_mesh_shader"];
VulkanWsi::check_device_extensions(
&extension_properties,
let mut enabled_features: Box<VulkanPhysicalDeviceFeatures> = default();
+ enabled_features.set_mesh_shader(true);
enabled_features.set_buffer_device_address(true);
enabled_features.set_compute_full_subgroups(true);
enabled_features.set_descriptor_binding_partially_bound(true);
let device_fn = vk::DeviceFunctions::new(&instance_fn, device, vk::VERSION_1_3);
+ let mesh_shader_fn = vk::MeshShaderFunctions::new(&instance_fn, device);
+
#[cfg(feature = "debug_markers")]
let debug_utils_fn = if has_debug_utils {
Some(vk::DebugUtilsFunctions::new(
instance_fn,
device_fn,
+ mesh_shader_fn,
+
#[cfg(feature = "debug_markers")]
debug_utils_fn,
}
let arena = HybridArena::<1024>::new();
- let vertex_module = vulkan_shader_module(
+ let vertex_or_mesh_shader_desc = match &pipeline_desc.vertex_or_mesh_shader {
+ VertexOrMeshShader::Vertex(shader_desc) => {
+ assert_eq!(
+ shader_desc.required_subgroup_size, None,
+ "cannot use subgroup size control on vertex shaders"
+ );
+ shader_desc
+ }
+ VertexOrMeshShader::Mesh(shader_desc) => shader_desc,
+ };
+
+ let vertex_or_mesh_module = vulkan_shader_module(
&self.device_fn,
self.device,
- pipeline_desc.vertex_shader.code,
+ vertex_or_mesh_shader_desc.code,
);
+
+ let vertex_or_mesh_stage_flags = match &pipeline_desc.vertex_or_mesh_shader {
+ VertexOrMeshShader::Vertex(_) => ShaderStageFlags::VERTEX,
+ VertexOrMeshShader::Mesh(_) => ShaderStageFlags::MESH,
+ };
+
+ let vertex_or_mesh_shader_stage_create_flags =
+ vulkan_pipeline_shader_stage_create_flags(&pipeline_desc.fragment_shader);
+
+ let vertex_or_mesh_specialization_info = match &pipeline_desc.vertex_or_mesh_shader {
+ VertexOrMeshShader::Vertex(_shader_desc) => None,
+ VertexOrMeshShader::Mesh(shader_desc) => {
+ vulkan_specialization_info(&arena, shader_desc.spec_constants)
+ }
+ };
+
let fragment_module = vulkan_shader_module(
&self.device_fn,
self.device,
);
assert!(
- !(pipeline_desc.vertex_shader.required_subgroup_size.is_some()
- || pipeline_desc
- .fragment_shader
- .required_subgroup_size
- .is_some()
- || pipeline_desc.vertex_shader.allow_varying_subgroup_size
+ !(pipeline_desc
+ .fragment_shader
+ .required_subgroup_size
+ .is_some()
|| pipeline_desc.fragment_shader.allow_varying_subgroup_size),
- "subgroup size control features not implemented for graphics shader stages"
+ "subgroup size control features not implemented for fragment shaders"
);
- let stages = &[
+ let fragment_shader_stage_create_flags =
+ vulkan_pipeline_shader_stage_create_flags(&pipeline_desc.fragment_shader);
+
+ let fragment_specialization_info =
+ vulkan_specialization_info(&arena, pipeline_desc.fragment_shader.spec_constants);
+
+ let stages = arena.alloc([
vk::PipelineShaderStageCreateInfo {
- stage: vk::ShaderStageFlags::VERTEX,
- name: pipeline_desc.vertex_shader.entry.as_ptr(),
- module: vertex_module,
+ stage: vulkan_shader_stage_flags(vertex_or_mesh_stage_flags),
+ name: vertex_or_mesh_shader_desc.entry.as_ptr(),
+ module: vertex_or_mesh_module,
+ flags: vertex_or_mesh_shader_stage_create_flags,
+ specialization_info: vertex_or_mesh_specialization_info,
..default()
},
vk::PipelineShaderStageCreateInfo {
stage: vk::ShaderStageFlags::FRAGMENT,
name: pipeline_desc.fragment_shader.entry.as_ptr(),
module: fragment_module,
+ flags: fragment_shader_stage_create_flags,
+ specialization_info: fragment_specialization_info,
..default()
},
- ];
+ ]);
+
+ if let Some(required_subgroup_size) = vertex_or_mesh_shader_desc.required_subgroup_size {
+ assert!(
+ self.physical_device_properties
+ .required_subgroup_size_stages()
+ .contains(vk::ShaderStageFlags::MESH_EXT)
+ );
+ assert!(
+ required_subgroup_size >= self.physical_device_properties.min_subgroup_size()
+ && required_subgroup_size
+ <= self.physical_device_properties.max_subgroup_size()
+ );
+
+ let shader_stage_required_subgroup_size_create_info =
+ arena.alloc(vk::PipelineShaderStageRequiredSubgroupSizeCreateInfo {
+ required_subgroup_size,
+ ..default()
+ });
+
+ // SAFETY: Both allocated from the same arena.
+ stages[0]._next =
+ shader_stage_required_subgroup_size_create_info as *const _ as *const _;
+ }
let topology = vulkan_primitive_topology(pipeline_desc.topology);
let primitive_restart_enable = vulkan_bool32(pipeline_desc.primitive_restart);
.copied()
.map(vulkan_format),
);
- let pipeline_rendering_create_info = vk::PipelineRenderingCreateInfo {
+ let pipeline_rendering_create_info = arena.alloc(vk::PipelineRenderingCreateInfo {
view_mask: 0,
color_attachment_formats: color_attachment_formats.into(),
depth_attachment_format: pipeline_desc
.stencil_attachment_format
.map_or(vk::Format::Undefined, vulkan_format),
..default()
- };
+ });
- let create_infos = &mut [vk::GraphicsPipelineCreateInfo {
- _next: &pipeline_rendering_create_info as *const vk::PipelineRenderingCreateInfo
+ let graphics_pipeline_create_info = vk::GraphicsPipelineCreateInfo {
+ _next: pipeline_rendering_create_info as *const vk::PipelineRenderingCreateInfo
as *const _,
stages: stages.into(),
vertex_input_state: Some(&vertex_input_state),
dynamic_state: Some(&dynamic_state),
layout: pipeline_layout.pipeline_layout,
..default()
- }];
+ };
+
+ let create_infos = &[graphics_pipeline_create_info];
let mut pipelines = [vk::Pipeline::null()];
vk_check!(unsafe {
self.device_fn.create_graphics_pipelines(
unsafe {
self.device_fn
- .destroy_shader_module(self.device, vertex_module, None)
+ .destroy_shader_module(self.device, vertex_or_mesh_module, None)
};
unsafe {
self.device_fn
let pipeline_layout = self.cache_pipeline_layout(&pipeline_desc.layout);
let module = vulkan_shader_module(&self.device_fn, self.device, pipeline_desc.shader.code);
+ let flags = vulkan_pipeline_shader_stage_create_flags(&pipeline_desc.shader);
- let mut shader_stage_create_flags = default();
-
- if pipeline_desc.shader.require_full_subgroups {
- shader_stage_create_flags |= vk::PipelineShaderStageCreateFlags::REQUIRE_FULL_SUBGROUPS
- }
-
- if pipeline_desc.shader.allow_varying_subgroup_size {
- shader_stage_create_flags |=
- vk::PipelineShaderStageCreateFlags::ALLOW_VARYING_SUBGROUP_SIZE;
- }
-
- let specialization_info: Option<&vk::SpecializationInfo> =
- if !pipeline_desc.shader.spec_constants.is_empty() {
- let block_len = pipeline_desc
- .shader
- .spec_constants
- .iter()
- .map(|spec_constant| match spec_constant {
- SpecConstant::Bool { id: _, value: _ }
- | SpecConstant::U32 { id: _, value: _ }
- | SpecConstant::I32 { id: _, value: _ }
- | SpecConstant::F32 { id: _, value: _ } => 4,
- })
- .sum::<usize>();
-
- let block = arena.alloc_slice_fill_copy(block_len, 0u8);
-
- let mut offset = 0;
- let map_entries =
- arena.alloc_slice_fill_iter(pipeline_desc.shader.spec_constants.iter().map(
- |spec_constant| {
- let constant_id;
- let value_size;
- match *spec_constant {
- SpecConstant::Bool { id, value } => {
- constant_id = id;
- let value = if value {
- vk::Bool32::True
- } else {
- vk::Bool32::False
- } as u32;
- value_size = std::mem::size_of_val(&value);
- block[offset..offset + value_size]
- .copy_from_slice(&value.to_ne_bytes())
- }
- SpecConstant::U32 { id, value } => {
- constant_id = id;
- value_size = std::mem::size_of_val(&value);
- block[offset..offset + value_size]
- .copy_from_slice(&value.to_ne_bytes());
- }
- SpecConstant::I32 { id, value } => {
- constant_id = id;
- value_size = std::mem::size_of_val(&value);
- block[offset..offset + value_size]
- .copy_from_slice(&value.to_ne_bytes());
- }
- SpecConstant::F32 { id, value } => {
- constant_id = id;
- value_size = std::mem::size_of_val(&value);
- block[offset..offset + value_size]
- .copy_from_slice(&value.to_ne_bytes());
- }
- }
-
- let map_entry = vk::SpecializationMapEntry {
- constant_id,
- offset: offset as u32,
- size: value_size,
- };
-
- offset += value_size;
-
- map_entry
- },
- ));
-
- Some(arena.alloc(vk::SpecializationInfo {
- data: block.into(),
- map_entries: map_entries.into(),
- }))
- } else {
- None
- };
+ let specialization_info =
+ vulkan_specialization_info(&arena, pipeline_desc.shader.spec_constants);
let compute_pipeline_create_info = arena.alloc(vk::ComputePipelineCreateInfo {
layout: pipeline_layout.pipeline_layout,
stage: vk::ShaderStageFlags::COMPUTE,
name: pipeline_desc.shader.entry.as_ptr(),
module,
- flags: shader_stage_create_flags,
+ flags,
specialization_info,
..default()
},
}
}
+ fn cmd_draw_mesh_tasks(
+ &self,
+ cmd_encoder: &mut CmdEncoder,
+ group_count_x: u32,
+ group_count_y: u32,
+ group_count_z: u32,
+ ) {
+ let command_buffer = self.cmd_encoder_mut(cmd_encoder).command_buffer;
+ unsafe {
+ self.mesh_shader_fn.cmd_draw_mesh_tasks_ext(
+ command_buffer,
+ group_count_x,
+ group_count_y,
+ group_count_z,
+ )
+ }
+ }
+
+ fn cmd_draw_mesh_tasks_indirect(
+ &self,
+ cmd_encoder: &mut CmdEncoder,
+ buffer: BufferArg,
+ offset: u64,
+ draw_count: u32,
+ stride: u32,
+ ) {
+ let (buffer, base_offset, _range) = self.unwrap_buffer_arg(&buffer);
+ let command_buffer = self.cmd_encoder_mut(cmd_encoder).command_buffer;
+ unsafe {
+ self.mesh_shader_fn.cmd_draw_mesh_tasks_indirect_ext(
+ command_buffer,
+ buffer,
+ base_offset + offset,
+ draw_count,
+ stride,
+ )
+ }
+ }
+
+ fn cmd_draw_mesh_tasks_indirect_count(
+ &self,
+ cmd_encoder: &mut CmdEncoder,
+ buffer: BufferArg,
+ offset: u64,
+ count_buffer: BufferArg,
+ count_buffer_offset: u64,
+ max_draw_count: u32,
+ stride: u32,
+ ) {
+ let (buffer, base_offset, _range) = self.unwrap_buffer_arg(&buffer);
+ let (count_buffer, count_base_offset, _range) = self.unwrap_buffer_arg(&count_buffer);
+ let command_buffer = self.cmd_encoder_mut(cmd_encoder).command_buffer;
+ unsafe {
+ self.mesh_shader_fn.cmd_draw_mesh_tasks_indirect_count_ext(
+ command_buffer,
+ buffer,
+ base_offset + offset,
+ count_buffer,
+ count_base_offset + count_buffer_offset,
+ max_draw_count,
+ stride,
+ )
+ }
+ }
+
fn cmd_dispatch(
&self,
cmd_encoder: &mut CmdEncoder,
flags_def!(ShaderStageFlags);
impl ShaderStageFlags {
pub const VERTEX: Self = Self(1 << 0);
- pub const FRAGMENT: Self = Self(1 << 1);
- pub const COMPUTE: Self = Self(1 << 2);
- pub const ALL: Self = Self(0b111); /* Self::VERTEX | Self::FRAGMENT | Self::COMPUTE */
+ pub const MESH: Self = Self(1 << 1);
+ pub const FRAGMENT: Self = Self(1 << 2);
+ pub const COMPUTE: Self = Self(1 << 3);
+ pub const ALL: Self = Self(0b1111); /* Self::VERTEX | Self::MESH | Self::FRAGMENT | Self::COMPUTE */
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub stencil_attachment_format: Option<ImageFormat>,
}
+pub enum VertexOrMeshShader<'a> {
+ Vertex(ShaderDesc<'a>),
+ Mesh(ShaderDesc<'a>),
+}
+
pub struct GraphicsPipelineDesc<'a> {
- pub vertex_shader: ShaderDesc<'a>,
+ pub vertex_or_mesh_shader: VertexOrMeshShader<'a>,
pub fragment_shader: ShaderDesc<'a>,
pub layout: PipelineLayout<'a>,
pub attachments: GraphicsPipelineAttachments<'a>,
/// Read as any other resource in a vertex shader.
VertexShaderOtherRead,
+ /// Read as a uniform buffer in a mesh shader.
+ MeshShaderUniformBufferRead,
+ /// Read as a sampled image or uniform texel buffer in a mesh shader.
+ MeshShaderSampledImageRead,
+ /// Read as any other resource in a mesh shader.
+ MeshShaderOtherRead,
+
/// Read as a uniform buffer in a fragment shader.
FragmentShaderUniformBufferRead,
/// Read as a sampled image or uniform texel buffer in a fragment shader.
/// Written as a depth-stencil attachment during rendering.
DepthStencilAttachmentWrite,
+ /// Written as a resource in a mesh shader.
+ MeshWrite,
+
/// Written as a resource in a compute shader.
ComputeWrite,
Access::VertexShaderUniformBufferRead => true,
Access::VertexShaderSampledImageRead => true,
Access::VertexShaderOtherRead => true,
+ Access::MeshShaderUniformBufferRead => true,
+ Access::MeshShaderSampledImageRead => true,
+ Access::MeshShaderOtherRead => true,
Access::FragmentShaderUniformBufferRead => true,
Access::FragmentShaderSampledImageRead => true,
Access::FragmentShaderOtherRead => true,
Access::FragmentShaderWrite => false,
Access::ColorAttachmentWrite => false,
Access::DepthStencilAttachmentWrite => false,
+ Access::MeshWrite => false,
Access::ComputeWrite => false,
Access::ShaderWrite => false,
Access::TransferWrite => false,
first_instance: u32,
);
+ fn cmd_draw_mesh_tasks(
+ &self,
+ cmd_encoder: &mut CmdEncoder,
+ group_count_x: u32,
+ group_count_y: u32,
+ group_count_z: u32,
+ );
+
+ fn cmd_draw_mesh_tasks_indirect(
+ &self,
+ cmd_encoder: &mut CmdEncoder,
+ buffer: BufferArg,
+ offset: u64,
+ draw_count: u32,
+ stride: u32,
+ );
+
+ fn cmd_draw_mesh_tasks_indirect_count(
+ &self,
+ cmd_encoder: &mut CmdEncoder,
+ buffer: BufferArg,
+ offset: u64,
+ count_buffer: BufferArg,
+ count_buffer_offset: u64,
+ max_draw_count: u32,
+ stride: u32,
+ );
+
fn cmd_dispatch(
&self,
cmd_encoder: &mut CmdEncoder,