use std::path::Path;
-use narcissus_core::{default, obj};
+use narcissus_core::{default, obj, Widen};
use narcissus_gpu::{
Access, Buffer, BufferDesc, BufferImageCopy, BufferUsageFlags, Device, Extent3d, Image,
ImageAspectFlags, ImageBarrier, ImageDesc, ImageDimension, ImageFormat, ImageLayout,
.flatten()
.enumerate()
.map(|(index, &(position_index, texcoord_index, normal_index))| {
- let position = visitor.positions[position_index as usize - 1];
- let normal = visitor.normals[normal_index as usize - 1];
- let texcoord = visitor.texcoords[texcoord_index as usize - 1];
+ let position = visitor.positions[position_index.widen() - 1];
+ let normal = visitor.normals[normal_index.widen() - 1];
+ let texcoord = visitor.texcoords[texcoord_index.widen() - 1];
(
Vertex {
position: vec4(position.x, position.y, position.z, 0.0).into(),
+use crate::Widen;
+
pub trait Bits: Copy + Default {
fn is_zero(self) -> bool;
/// Clear the least significant set bit and return its index.
self.base += std::mem::size_of::<T>() * 8;
}
let index = self.word.clear_least_significant_set_bit();
- Some(self.base + index as usize)
+ Some(self.base + index.widen())
}
}
mod virtual_mem;
mod virtual_vec;
mod waiter;
+mod widen;
pub use arena::{Arena, HybridArena};
pub use bitset::BitIter;
pub use finite::{FiniteF32, FiniteF64, NotFiniteError};
+pub use widen::Widen;
+
use std::{ffi::CStr, mem::MaybeUninit};
#[macro_export]
+use crate::Widen;
+
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Bin<
// The log2 of the size of the linear bin.
#[inline(always)]
pub fn index(&self) -> usize {
- self.index as usize
+ self.index.widen()
}
#[inline(always)]
use crate::{
align_offset, mod_inverse_u32, static_assert, virtual_commit, virtual_free, virtual_reserve,
+ Widen,
};
/// Each handle uses `GEN_BITS` bits of per-slot generation counter. Looking up
#[inline(always)]
fn get(&self, slot_index: SlotIndex) -> Option<&Slot> {
- let index = slot_index.0 as usize;
+ let index = slot_index.0.widen();
if index < self.len {
Some(unsafe { self.ptr.as_ptr().add(index).as_ref().unwrap() })
} else {
#[inline(always)]
fn get_mut(&mut self, slot_index: SlotIndex) -> Option<&mut Slot> {
- let index = slot_index.0 as usize;
+ let index = slot_index.0.widen();
if index < self.len {
Some(unsafe { self.ptr.as_ptr().add(index).as_mut().unwrap() })
} else {
/// Update the lookup table for the given `ValueIndex` with a new `SlotIndex`
#[inline(always)]
fn set_slot(&mut self, value_index: ValueIndex, slot_index: SlotIndex) {
- let value_index = value_index.0 as usize;
+ let value_index = value_index.0.widen();
assert!(value_index < self.len);
unsafe {
std::ptr::write(
/// lookup table.
#[inline(always)]
fn get_slot(&mut self, value_index: ValueIndex) -> SlotIndex {
- let value_index = value_index.0 as usize;
+ let value_index = value_index.0.widen();
assert!(value_index < self.len);
// SAFETY: SlotIndex is Copy so we don't invalidate the value being read.
unsafe { std::ptr::read(self.slots_ptr.as_ptr().add(value_index).as_ref().unwrap()) }
.update_value_index(value_index);
}
- let value_index = value_index.0 as usize;
+ let value_index = value_index.0.widen();
assert!(value_index < self.len);
unsafe {
self.len -= 1;
let value = std::ptr::read(ptr.add(value_index));
- std::ptr::copy(
- ptr.add(last_value_index.0 as usize),
- ptr.add(value_index),
- 1,
- );
+ std::ptr::copy(ptr.add(last_value_index.0.widen()), ptr.add(value_index), 1);
value
}
/// Panics if `value_index` is out of bounds
#[inline(always)]
fn get(&self, value_index: ValueIndex) -> &T {
- let value_index = value_index.0 as usize;
+ let value_index = value_index.0.widen();
assert!(value_index < self.len);
let ptr = self.values_ptr.as_ptr();
unsafe { ptr.add(value_index).as_ref().unwrap() }
/// Panics if `value_index` is out of bounds
#[inline(always)]
fn get_mut(&mut self, value_index: ValueIndex) -> &mut T {
- let value_index = value_index.0 as usize;
+ let value_index = value_index.0.widen();
assert!(value_index < self.len);
let ptr = self.values_ptr.as_ptr();
unsafe { ptr.add(value_index).as_mut().unwrap() }
-use crate::mul_full_width_u64;
+use crate::{mul_full_width_u64, Widen};
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Pcg64 {
result + carry as u64
}
+ /// Generates a uniformly distributed random number in the range
+ /// `0..upper_bound`
+ ///
+ /// Always draws two 64 bit words from the PRNG.
+ ///
+ /// Based on <https://github.com/apple/swift/pull/39143/commits/87b3f607042e653a42b505442cc803ec20319c1c>
+ #[inline]
+ #[must_use]
+ pub fn next_bound_usize(&mut self, upper_bound: usize) -> usize {
+ let upper_bound = upper_bound as u64;
+ let (result, fraction) = mul_full_width_u64(upper_bound, self.next_u64());
+ let (hi, _) = mul_full_width_u64(upper_bound, self.next_u64());
+ let (_, carry) = fraction.overflowing_add(hi);
+ (result + carry as u64).widen()
+ }
+
/// Generates a uniformly distributed random float in the range `-1.0..1.0`
///
/// Always draws two 64 bit words from the PRNG.
if slice.is_empty() {
None
} else {
- let index = self.next_bound_u64(slice.len() as u64) as usize;
- slice.get(index)
+ slice.get(self.next_bound_usize(slice.len()))
}
}
--- /dev/null
+use crate::static_assert;
+
+/// Trait that allows explicit integer widening.
+pub trait Widen<T> {
+ /// Returns `self` "widened" to `T`, panics if the conversion would wrap.
+ fn widen(self) -> T;
+}
+
+// Would need to further restrict widen cases for 32 bit support.
+static_assert!(
+ usize::BITS == 64,
+ "only supports machines with 64 bit usize"
+);
+
+#[cold]
+#[inline(never)]
+fn widening_failure() {
+ panic!("failed to widen type, out of bounds")
+}
+
+impl Widen<usize> for u8 {
+ #[inline(always)]
+ fn widen(self) -> usize {
+ self as usize
+ }
+}
+
+impl Widen<usize> for u16 {
+ #[inline(always)]
+ fn widen(self) -> usize {
+ self as usize
+ }
+}
+
+impl Widen<usize> for u32 {
+ #[inline(always)]
+ fn widen(self) -> usize {
+ self as usize
+ }
+}
+
+impl Widen<usize> for u64 {
+ #[inline(always)]
+ fn widen(self) -> usize {
+ self as usize
+ }
+}
+
+impl Widen<usize> for i8 {
+ #[inline(always)]
+ fn widen(self) -> usize {
+ if self < 0 {
+ widening_failure()
+ }
+ self as usize
+ }
+}
+
+impl Widen<usize> for i16 {
+ #[inline(always)]
+ fn widen(self) -> usize {
+ if self < 0 {
+ widening_failure()
+ }
+ self as usize
+ }
+}
+
+impl Widen<usize> for i32 {
+ #[inline(always)]
+ fn widen(self) -> usize {
+ if self < 0 {
+ widening_failure()
+ }
+ self as usize
+ }
+}
+
+impl Widen<usize> for i64 {
+ #[inline(always)]
+ fn widen(self) -> usize {
+ if self < 0 {
+ widening_failure()
+ }
+ self as usize
+ }
+}
use std::collections::hash_map::Entry;
use crate::{font::GlyphBitmapBox, FontCollection, GlyphIndex, Oversample, Packer};
-use narcissus_core::default;
pub use narcissus_core::FiniteF32;
+use narcissus_core::{default, Widen};
use rustc_hash::FxHashMap;
use stb_truetype_sys::rectpack::Rect;
let cached_glyph = &self.cached_glyphs[cached_glyph_index];
let rect = &self.rects[cached_glyph_index];
- let touched_glyph =
- &mut self.touched_glyphs[touched_glyph_index.0 as usize];
+ let touched_glyph = &mut self.touched_glyphs[touched_glyph_index.0.widen()];
touched_glyph.x0 = rect.x;
touched_glyph.x1 = rect.x + rect.w;
cstr, cstr_from_bytes_until_nul, default, is_aligned_to, manual_arc,
manual_arc::ManualArc,
raw_window::{AsRawWindow, RawWindow},
- Arena, HybridArena, Mutex, PhantomUnsend, Pool,
+ Arena, HybridArena, Mutex, PhantomUnsend, Pool, Widen,
};
use vulkan_sys as vk;
fn vk_vec<T, F: FnMut(&mut u32, *mut T) -> vulkan_sys::Result>(mut f: F) -> Vec<T> {
let mut count = 0;
vk_check!(f(&mut count, std::ptr::null_mut()));
- let mut v = Vec::with_capacity(count as usize);
+ let mut v = Vec::with_capacity(count.widen());
vk_check!(f(&mut count, v.as_mut_ptr()));
unsafe { v.set_len(count as usize) };
v
#[must_use]
fn vulkan_bool32(b: bool) -> vk::Bool32 {
- const VALUES: [vk::Bool32; 2] = [vk::Bool32::False, vk::Bool32::True];
- VALUES[b as usize]
+ match b {
+ false => vk::Bool32::False,
+ true => vk::Bool32::True,
+ }
}
#[must_use]
}));
let allocators = std::array::from_fn(|i| {
- if i < physical_device_memory_properties.memory_type_count as usize {
+ if i < physical_device_memory_properties.memory_type_count.widen() {
Some(default())
} else {
None
.map(|memory_type_index| {
(
memory_type_index,
- self.physical_device_memory_properties.memory_types[memory_type_index as usize],
+ self.physical_device_memory_properties.memory_types[memory_type_index.widen()],
)
})
.find(|(i, memory_type)| {
let memory_type_index =
self.find_memory_type_index(desc.requirements.memory_type_bits, memory_property_flags);
- let allocator = self.allocators[memory_type_index as usize]
+ let allocator = self.allocators[memory_type_index.widen()]
.as_ref()
.expect("returned a memory type index that has no associated allocator");
allocator.dedicated.lock().insert(memory);
let mapped_ptr = if self.physical_device_memory_properties.memory_types
- [memory_type_index as usize]
- .property_flags
- .contains(vk::MemoryPropertyFlags::HOST_VISIBLE)
+ [memory_type_index.widen()]
+ .property_flags
+ .contains(vk::MemoryPropertyFlags::HOST_VISIBLE)
{
let mut data = std::ptr::null_mut();
vk_check!(self.device_fn.map_memory(
let memory_type_index =
self.find_memory_type_index(desc.requirements.memory_type_bits, memory_property_flags);
- let allocator = self.allocators[memory_type_index as usize]
+ let allocator = self.allocators[memory_type_index.widen()]
.as_ref()
.expect("returned a memory type index that has no associated allocator");
));
let mapped_ptr = if self.physical_device_memory_properties.memory_types
- [memory_type_index as usize]
- .property_flags
- .contains(vk::MemoryPropertyFlags::HOST_VISIBLE)
+ [memory_type_index.widen()]
+ .property_flags
+ .contains(vk::MemoryPropertyFlags::HOST_VISIBLE)
{
let mut data = std::ptr::null_mut();
vk_check!(self.device_fn.map_memory(
// non-null, then we can create a slice for it.
unsafe {
let dst =
- std::slice::from_raw_parts_mut(memory.mapped_ptr(), memory.size() as usize);
+ std::slice::from_raw_parts_mut(memory.mapped_ptr(), memory.size().widen());
dst.copy_from_slice(initial_data);
}
}
for allocation in frame.destroyed_allocations.get_mut().drain(..) {
match allocation {
VulkanMemory::Dedicated(dedicated) => {
- let allocator = self.allocators[dedicated.memory_type_index as usize]
+ let allocator = self.allocators[dedicated.memory_type_index.widen()]
.as_ref()
.unwrap();
allocator.dedicated.lock().remove(&dedicated.memory);
unsafe { device_fn.free_memory(device, dedicated.memory, None) }
}
VulkanMemory::SubAlloc(sub_alloc) => {
- let allocator = self.allocators[sub_alloc.memory_type_index as usize]
+ let allocator = self.allocators[sub_alloc.memory_type_index.widen()]
.as_ref()
.unwrap();
allocator.tlsf.lock().free(sub_alloc.allocation)
present_info.acquire = acquire;
present_info.image_index = image_index;
present_info.swapchain = swapchain;
- let view = image_views[image_index as usize];
+ let view = image_views[image_index.widen()];
return Ok((width, height, view));
}
ops::{Index, IndexMut},
};
-use narcissus_core::{linear_log_binning, static_assert};
+use narcissus_core::{linear_log_binning, static_assert, Widen};
// The log2 of the size of the 'linear' bin.
pub const LINEAR_LOG2: u32 = 7; // 2^7 = 128
#[inline(always)]
fn index(&self, index: BlockIndex) -> &Self::Output {
- &self[index.0.get() as usize]
+ &self[index.0.get().widen()]
}
}
impl IndexMut<BlockIndex> for Vec<Block> {
#[inline(always)]
fn index_mut(&mut self, index: BlockIndex) -> &mut Self::Output {
- &mut self[index.0.get() as usize]
+ &mut self[index.0.get().widen()]
}
}
#[inline(always)]
fn index(&self, index: SuperBlockIndex) -> &Self::Output {
- &self[index.0 as usize]
+ &self[index.0.widen()]
}
}
{
#[inline(always)]
fn index_mut(&mut self, index: SuperBlockIndex) -> &mut Self::Output {
- &mut self[index.0 as usize]
+ &mut self[index.0.widen()]
}
}
// First we scan the second-level bitmap from sub_bin, masking out the earlier
// sub-bins so we don't end up returning a bin that's too small for the
// allocation.
- let mut second_level = self.bitmap_1[bin as usize] & (!0 << sub_bin);
+ let mut second_level = self.bitmap_1[bin.widen()] & (!0 << sub_bin);
// If that search failed, then we must scan the first-level bitmap from the next
// bin forward. If we find anything here it cannot possibly be smaller than the
// Recalculate the bin from the first level bitmap.
bin = first_level.trailing_zeros();
- second_level = self.bitmap_1[bin as usize];
+ second_level = self.bitmap_1[bin.widen()];
}
// Find the sub-bin from the second level bitmap.
/// structure.
fn set_metadata_bit(&mut self, bin: Bin) {
let sub_bin = bin.sub_bin();
- let bin = bin.bin() as usize;
+ let bin = bin.bin().widen();
self.bitmap_0 |= 1 << bin;
self.bitmap_1[bin] |= 1 << sub_bin;
}
/// structure.
fn clear_metadata_bit(&mut self, bin: Bin) {
let sub_bin = bin.sub_bin();
- let bin = bin.bin() as usize;
+ let bin = bin.bin().widen();
self.bitmap_1[bin] &= !(1 << sub_bin);
if self.bitmap_1[bin] == 0 {
self.bitmap_0 &= !(1 << bin);
}
/// Inserts a block into the empty blocks lists.
+ #[inline(always)]
fn insert_block(&mut self, block_index: BlockIndex) {
debug_assert!(self.blocks[block_index].is_free());
debug_assert!(self.blocks[block_index].free_link.is_unlinked());
}
/// Removes a block from the empty blocks lists.
+ #[inline(always)]
fn extract_block(&mut self, block_index: BlockIndex) {
debug_assert!(self.blocks[block_index].is_free());
}
/// Requests a new block, and returns its `BlockIndex`.
+ #[inline(always)]
fn request_block(
&mut self,
offset: u32,
size: u32,
super_block_index: SuperBlockIndex,
) -> BlockIndex {
+ #[cold]
+ fn create_block(
+ blocks: &mut Vec<Block>,
+ size: u32,
+ offset: u32,
+ super_block_index: SuperBlockIndex,
+ ) -> BlockIndex {
+ assert!(blocks.len() < i32::MAX as usize);
+ let block_index = BlockIndex(NonZeroU32::new(blocks.len() as u32).unwrap());
+ blocks.push(Block {
+ generation: 0,
+ size,
+ offset,
+ free_link: BlockLink::new(block_index),
+ phys_link: BlockLink::new(block_index),
+ super_block_index,
+ });
+ block_index
+ }
+
let block_index = if let Some(free_block_index) = self.free_block_head {
let next_index = self.blocks[free_block_index].free_link.next;
self.free_block_head = if next_index != free_block_index {
list_unlink!(self.blocks, free_link, free_block_index);
free_block_index
} else {
- assert!(self.blocks.len() < i32::MAX as usize);
- let block_index = BlockIndex(NonZeroU32::new(self.blocks.len() as u32).unwrap());
- self.blocks.push(Block {
- generation: 0,
- size,
- offset,
- free_link: BlockLink::new(block_index),
- phys_link: BlockLink::new(block_index),
- super_block_index,
- });
- block_index
+ create_block(&mut self.blocks, size, offset, super_block_index)
};
let block = &mut self.blocks[block_index];