image_barriers: &[ImageBarrier],
);
- unsafe fn cmd_push_constants_unchecked(
+ /// Incrementally update the push constants for the given shader stage flags and offset.
+ ///
+ /// # Safety
+ ///
+ /// The memory region from `ptr` through `ptr` + `len` must be valid.
+ unsafe fn cmd_push_constants(
&self,
cmd_encoder: &mut CmdEncoder,
stage_flags: ShaderStageFlags,
offset: u32,
- size: u32,
- src: *const u8,
+ ptr: *const u8,
+ len: usize,
);
fn cmd_copy_buffer_to_image(
}
pub trait DeviceExt: Device {
- fn cmd_push_constants<T: ?Sized>(
+ fn cmd_push_constants_with_data<T: ?Sized>(
&self,
cmd_encoder: &mut CmdEncoder,
stage_flags: ShaderStageFlags,
data: &T,
) {
let size = std::mem::size_of_val(data);
- let src = data as *const _ as *const u8;
+ let ptr = data as *const _ as *const u8;
// # Safety
//
- // The memory region from `src` through `src` + `size` must be valid as it's
+ // The memory region from `ptr` through `ptr` + `size` is ensured to be valid as it's
// directly derived from `data`.
- //
- // This function will propagate undefined values from T, for example, padding
- // bytes, however we promise not to materialize a rust reference to any such
- // data.
unsafe {
if size >= u32::MAX as usize || offset >= u32::MAX as usize {
overflow();
}
- self.cmd_push_constants_unchecked(
- cmd_encoder,
- stage_flags,
- offset as u32,
- size as u32,
- src,
- )
+ self.cmd_push_constants(cmd_encoder, stage_flags, offset as u32, ptr, size)
}
}
gpu.cmd_set_pipeline(cmd_encoder, self.pipelines.basic_pipeline);
gpu.cmd_set_bind_group(cmd_encoder, 0, &graphics_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::VERTEX,
0,
gpu.cmd_set_pipeline(cmd_encoder, self.pipelines.draw_2d_bin_0_clear_pipeline);
gpu.cmd_set_bind_group(cmd_encoder, 0, &compute_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::COMPUTE,
0,
gpu.cmd_set_pipeline(cmd_encoder, self.pipelines.draw_2d_bin_1_scatter_pipeline);
gpu.cmd_set_bind_group(cmd_encoder, 0, &compute_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::COMPUTE,
0,
gpu.cmd_dispatch(
cmd_encoder,
- draw_buffer_len.div_ceil(self.pipelines.draw_2d_bin_1_scatter_pipeline_workgroup_size),
+ draw_buffer_len
+ .div_ceil(self.pipelines.draw_2d_bin_1_scatter_pipeline_workgroup_size),
1,
1,
);
gpu.cmd_set_pipeline(cmd_encoder, self.pipelines.draw_2d_bin_2_sort_pipeline);
gpu.cmd_set_bind_group(cmd_encoder, 0, &compute_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::COMPUTE,
0,
// Upsweep
gpu.cmd_set_pipeline(cmd_encoder, self.pipelines.radix_sort_0_upsweep_pipeline);
gpu.cmd_set_bind_group(cmd_encoder, 0, &compute_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::COMPUTE,
0,
self.pipelines.radix_sort_1_downsweep_pipeline,
);
gpu.cmd_set_bind_group(cmd_encoder, 0, &compute_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::COMPUTE,
0,
gpu.cmd_set_pipeline(cmd_encoder, self.pipelines.draw_2d_bin_3_resolve_pipeline);
gpu.cmd_set_bind_group(cmd_encoder, 0, &compute_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::COMPUTE,
0,
gpu.cmd_set_pipeline(cmd_encoder, self.pipelines.draw_2d_rasterize_pipeline);
gpu.cmd_set_bind_group(cmd_encoder, 0, &compute_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::COMPUTE,
0,
gpu.cmd_set_pipeline(cmd_encoder, self.pipelines.composite_pipeline);
gpu.cmd_set_bind_group(cmd_encoder, 0, &compute_bind_group);
- gpu.cmd_push_constants(
+ gpu.cmd_push_constants_with_data(
cmd_encoder,
ShaderStageFlags::COMPUTE,
0,