impl<'a> Fonts<'a> {
pub fn new() -> Self {
- // Safety: Safe because Roboto-Regular.ttf is a valid ttf font embedded in the application.
+ // SAFETY: Safe because Roboto-Regular.ttf is a valid ttf font embedded
+ // in the application.
let roboto_regular =
unsafe { Font::from_bytes(include_bytes!("fonts/Roboto-Regular.ttf")) };
let noto_sans_japanese =
usage,
size: len,
});
- // Safety: T: Blittable which implies it's freely convertable to a byte slice.
+ // SAFETY: T: Blittable which implies it's freely convertable to a byte
+ // slice.
unsafe {
let dst = std::slice::from_raw_parts_mut(device.map_buffer(buffer), len);
let src = std::slice::from_raw_parts(data.as_ptr() as *const u8, len);
impl<'a> Drop for MappedBuffer<'a> {
fn drop(&mut self) {
- // Safety: Make sure we don't have the slice outlive the mapping.
+ // SAFETY: Make sure we don't have the slice outlive the mapping.
unsafe {
self.device.unmap_buffer(self.buffer);
}
/// Wrapper around a pointer to a page footer.
///
-/// Allows us to easily borrow the least significant bit of the page pointer to keep track of
-/// whether a given page was allocated on the heap, with the global allocator. Or if it is the stack
-/// page in a HybridArena.
+/// Allows us to easily borrow the least significant bit of the page pointer to
+/// keep track of whether a given page was allocated on the heap, with the
+/// global allocator. Or if it is the stack page in a HybridArena.
#[derive(Clone, Copy)]
struct PagePointer(*mut PageFooter);
impl PagePointer {
#[inline(always)]
fn empty() -> PagePointer {
- // We pretend the empty page is a "stack" pointer, as it allows us to remove a branch from
- // the hybrid array setup.
+ // We pretend the empty page is a "stack" pointer, as it allows us to remove a
+ // branch from the hybrid array setup.
PagePointer::new_stack(&EMPTY_PAGE as *const PageFooterSync as *mut PageFooter)
}
struct PageFooter {
/// Pointer to the start of this page.
base: NonNull<u8>,
- /// Pointer to the current bump allocation cursor. Must be within the range `base..=&self`.
+ /// Pointer to the current bump allocation cursor. Must be within the range
+ /// `base..=&self`.
bump: Cell<NonNull<u8>>,
/// Page size in bytes.
size: usize,
// Cannot wrap due to guard above.
let bump = bump.wrapping_sub(layout.size());
let remainder = bump as usize & (layout.align() - 1);
- // Cannot have a remainder greater than the magnitude of the value, so this cannot wrap.
+ // Cannot have a remainder greater than the magnitude of the value, so this
+ // cannot wrap.
let bump = bump.wrapping_sub(remainder);
if bump >= base {
///
/// # Safety
///
- /// This must only be called on pages which have no outstanding references to allocations, as it
- /// allows subsequent operations to allocate the same addresses.
+ /// This must only be called on pages which have no outstanding references to
+ /// allocations, as it allows subsequent operations to allocate the same
+ /// addresses.
unsafe fn reset(&self) {
self.bump.set(NonNull::new_unchecked(
self.base.as_ptr().add(self.size - PAGE_FOOTER_SIZE),
}
});
-/// Create a new page, large enough for the given layout, and prepend it to the linked list of
-/// pages.
+/// Create a new page, large enough for the given layout, and prepend it to the
+/// linked list of pages.
///
/// Returns the new page.
///
let page_size = page.as_ref().size;
// Double each allocated page to amortize allocation cost.
let new_page_size = page_size * 2;
- // Clamp between `PAGE_MIN_SIZE` and `PAGE_MAX_SIZE` to handle the case where the existing
- // page is the empty page, and to avoid overly large allocated blocks.
+ // Clamp between `PAGE_MIN_SIZE` and `PAGE_MAX_SIZE` to handle the case where
+ // the existing page is the empty page, and to avoid overly large allocated
+ // blocks.
let new_page_size = new_page_size.max(PAGE_MIN_SIZE).min(PAGE_MAX_SIZE);
- // Ensure that after all that, the given page is large enough to hold the thing we're trying
- // to allocate.
+ // Ensure that after all that, the given page is large enough to hold the thing
+ // we're trying to allocate.
let new_page_size = new_page_size.max(layout.size() + layout.align() + PAGE_FOOTER_SIZE);
let size_without_footer = new_page_size - PAGE_FOOTER_SIZE;
debug_assert_ne!(size_without_footer, 0);
Some(PagePointer::new_heap(footer))
}
-/// Deallocate the given page if it was allocated with the global allocator, and all the heap pages
-/// linked to it.
+/// Deallocate the given page if it was allocated with the global allocator, and
+/// all the heap pages linked to it.
///
/// # Safety
///
-/// Must not be called on any pages that hold live allocations, or pages which link to pages that
-/// hold live allocations.
+/// Must not be called on any pages that hold live allocations, or pages which
+/// link to pages that hold live allocations.
#[cold]
unsafe fn deallocate_page_list(mut page: PagePointer) {
- // Walk the linked list of pages and deallocate each one that originates from the heap.
- // The last page is either the empty page, or the hybrid page, both of which are marked as stack
- // page pointers.
+ // Walk the linked list of pages and deallocate each one that originates from
+ // the heap. The last page is either the empty page, or the hybrid page, both of
+ // which are marked as stack page pointers.
while !page.is_stack() {
let p = page;
page = page.as_ref().next.get();
///
/// Bump allocates within pages allocated from the global heap allocator.
///
-/// Objects that are allocated within the arena will never have their `Drop` function called.
+/// Objects that are allocated within the arena will never have their `Drop`
+/// function called.
#[repr(C)]
pub struct Arena {
page_list_head: Cell<PagePointer>,
/// An allocation arena with an allocation region that lives on the stack.
///
-/// Bump allocates from the stack page until it's exhausted, then behaves like a regular `Arena`.
+/// Bump allocates from the stack page until it's exhausted, then behaves like a
+/// regular `Arena`.
///
-/// Objects that are allocated within the arena will never have their `Drop` function called.
+/// Objects that are allocated within the arena will never have their `Drop`
+/// function called.
#[repr(C)]
pub struct HybridArena<const STACK_CAP: usize> {
data: MaybeUninit<[u8; STACK_CAP]>,
/// Reset the arena.
///
- /// Releases all pages to the global allocator, except for the most recently allocated one,
- /// which has its bump pointer reset.
+ /// Releases all pages to the global allocator, except for the most recently
+ /// allocated one which has its bump pointer reset.
///
/// Does not call destructors on any objects allocated by the pool.
pub fn reset(&mut self) {
- // We don't want to write to the static empty page, so abandon here if we haven't allocated
- // any pages.
+ // We don't want to write to the static empty page, so abandon here if we
+ // haven't allocated any pages.
if self.page_list_head.get().is_empty() {
return;
}
#[inline(always)]
#[allow(clippy::mut_from_ref)]
pub fn alloc<T>(&self, value: T) -> &mut T {
- // Safety: We allocate memory for `T` and then write a `T` into that location.
+ // SAFETY: We allocate memory for `T` and then write a `T` into that location.
unsafe {
let layout = Layout::new::<T>();
let ptr = self.alloc_layout(layout);
where
F: FnOnce() -> T,
{
- // Safety: We allocate memory for `T` and then write a `T` into that location.
+ // SAFETY: We allocate memory for `T` and then write a `T` into that location.
unsafe {
let layout = Layout::new::<T>();
let ptr = self.alloc_layout(layout);
where
F: FnOnce() -> T,
{
- // Safety: We allocate memory for `T` and then write a `T` into that location.
+ // SAFETY: We allocate memory for `T` and then write a `T` into that location.
unsafe {
let layout = Layout::new::<T>();
let ptr = match self.try_alloc_layout(layout) {
let src = src.as_ptr();
let dst = self.alloc_layout(layout).cast::<T>().as_ptr();
- // Safety: We allocate dst with the same size as src before copying into it.
+ // SAFETY: We allocate dst with the same size as src before copying into it.
unsafe {
std::ptr::copy_nonoverlapping(src, dst, len);
std::slice::from_raw_parts_mut(dst, len)
let layout = Layout::for_value(src);
let dst = self.alloc_layout(layout).cast::<T>().as_ptr();
- // Safety: We allocate dst with the same size as src before copying into it.
+ // SAFETY: We allocate dst with the same size as src before copying into it.
unsafe {
for (i, value) in src.iter().cloned().enumerate() {
std::ptr::write(dst.add(i), value);
#[allow(clippy::mut_from_ref)]
pub fn alloc_str(&self, src: &str) -> &mut str {
let str = self.alloc_slice_copy(src.as_bytes());
- // Safety: We've just copied this string from a valid `&str`, so it must be valid too.
+ // SAFETY: We've just copied this string from a valid `&str`, so it must be
+ // valid too.
unsafe { std::str::from_utf8_unchecked_mut(str) }
}
let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
let dst = self.alloc_layout(layout).cast::<T>();
- // Safety: We allocated an array of len elements of T above.
+ // SAFETY: We allocated an array of len elements of T above.
unsafe {
for i in 0..len {
std::ptr::write(dst.as_ptr().add(i), f(i))
impl<const STACK_CAP: usize> HybridArena<STACK_CAP> {
pub fn new() -> Self {
- // Ideally we'd pad `STACK_CAP` out to the alignment, avoiding wasting any space, but we
- // can't do maffs with constants just yet, so abort instead.
+ // Ideally we'd pad `STACK_CAP` out to the alignment, avoiding wasting any
+ // space, but we can't do maffs with constants just yet, so abort instead.
debug_assert!(STACK_CAP % std::mem::align_of::<PageFooter>() == 0);
Self {
data: MaybeUninit::uninit(),
/// Reset the arena.
///
- /// Releases all pages to the global allocator, except for the most recently allocated one,
- /// which has its bump pointer reset.
+ /// Releases all pages to the global allocator, except for the most recently
+ /// allocated one which has its bump pointer reset.
///
/// Does not call destructors on any objects allocated by the pool.
pub fn reset(&mut self) {
let page_list_head = self.page_list_head.get();
unsafe {
- // SAFETY: We're either pointing to an empty page, or a hybrid page, but the hybrid page
- // pointer might not be up to date if the object has moved, so we must call setup in
- // that case. Since setup also resets the page, handles the empty page, and is
- // idempotent, we can always call it here when we see a stack page, then return.
+ // SAFETY: We're either pointing to an empty page, or a hybrid page, but the
+ // hybrid page pointer might not be up to date if the object has moved, so we
+ // must call setup in that case. Since setup also resets the page, handles the
+ // empty page, and is idempotent, we can always call it here when we see a stack
+ // page, then return.
if page_list_head.is_stack() {
self.setup_hybrid_page();
return;
#[inline(always)]
#[allow(clippy::mut_from_ref)]
pub fn alloc<T>(&self, value: T) -> &mut T {
- // Safety: We allocate memory for `T` and then write a `T` into that location.
+ // SAFETY: We allocate memory for `T` and then write a `T` into that location.
unsafe {
let layout = Layout::new::<T>();
let ptr = self.alloc_layout(layout);
where
F: FnOnce() -> T,
{
- // Safety: We allocate memory for `T` and then write a `T` into that location.
+ // SAFETY: We allocate memory for `T` and then write a `T` into that location.
unsafe {
let layout = Layout::new::<T>();
let ptr = self.alloc_layout(layout);
where
F: FnOnce() -> T,
{
- // Safety: We allocate memory for `T` and then write a `T` into that location.
+ // SAFETY: We allocate memory for `T` and then write a `T` into that location.
unsafe {
let layout = Layout::new::<T>();
let ptr = match self.try_alloc_layout(layout) {
#[inline(always)]
pub fn try_alloc_layout(&self, layout: Layout) -> Result<NonNull<u8>, AllocError> {
- // When the arena is in its initial state, the head points to an empty page. In this case we
- // need to "allocate" the stack page and set the page head.
+ // When the arena is in its initial state, the head points to an empty page. In
+ // this case we need to "allocate" the stack page and set the page head.
//
- // We also need to ensure that if we're allocating into a hybrid array, that no moves have
- // happened in the meantime.
+ // We also need to ensure that if we're allocating into a hybrid array, that no
+ // moves have happened in the meantime.
//
// That is we need to avoid failure in the following situation.
//
// let z = arena.alloc(3);
// ```
//
- // Allocating in an arena that links to a stack page that isn't the same address as our
- // current self's page address, is a memory safety failure.
+ // Allocating in an arena that links to a stack page that isn't the same address
+ // as our current self's page address, is a memory safety failure.
//
- // It's safe to reset the page in this case, becuase it's only possible to move the arena
- // while there are no references pinning it in place.
+ // It's safe to reset the page in this case, becuase it's only possible to move
+ // the arena while there are no references pinning it in place.
let page = self.page_list_head.get();
- // We initially point to the empty page, but mark it as a stack page so this branch is
- // sufficient to handle both empty and moved cases.
+
+ // We initially point to the empty page, but mark it as a stack page so this
+ // branch is sufficient to handle both empty and moved cases.
if page.is_stack() && page.as_ptr() != self.footer.as_ptr() {
unsafe { self.setup_hybrid_page() }
}
}
}
- /// When a hybrid array is in its default state, or when it has been moved, it's necessary to
- /// fix-up the page footer and page list head.
+ /// When a hybrid array is in its default state, or when it has been moved, it's
+ /// necessary to fix-up the page footer and page list head.
///
/// # Safety
///
- /// Must not be called when there are outstanding allocations, as it will reset the hybrid page.
+ /// Must not be called when there are outstanding allocations, as it will reset
+ /// the hybrid page.
#[inline(never)]
#[cold]
unsafe fn setup_hybrid_page(&self) {
let src = src.as_ptr();
let dst = self.alloc_layout(layout).cast::<T>().as_ptr();
- // Safety: We allocate dst with the same size as src before copying into it.
+ // SAFETY: We allocate dst with the same size as src before copying into it.
unsafe {
std::ptr::copy_nonoverlapping(src, dst, len);
std::slice::from_raw_parts_mut(dst, len)
let layout = Layout::for_value(src);
let dst = self.alloc_layout(layout).cast::<T>().as_ptr();
- // Safety: We allocate dst with the same size as src before copying into it.
+ // SAFETY: We allocate dst with the same size as src before copying into it.
unsafe {
for (i, value) in src.iter().cloned().enumerate() {
std::ptr::write(dst.add(i), value);
#[allow(clippy::mut_from_ref)]
pub fn alloc_str(&self, src: &str) -> &mut str {
let str = self.alloc_slice_copy(src.as_bytes());
- // Safety: We've just copied this string from a valid `&str`, so it must be valid too.
+ // SAFETY: We've just copied this string from a valid `&str`, so it must be valid
+ // too.
unsafe { std::str::from_utf8_unchecked_mut(str) }
}
let layout = Layout::array::<T>(len).unwrap_or_else(|_| oom());
let dst = self.alloc_layout(layout).cast::<T>();
- // Safety: We allocated an array of len elements of T above.
+ // SAFETY: We allocated an array of len elements of T above.
unsafe {
for i in 0..len {
std::ptr::write(dst.as_ptr().add(i), f(i))
impl Ord for FiniteF32 {
#[inline(always)]
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
- // Safety: There are no NaNs since FiniteF32 is always finite.
+ // SAFETY: There are no NaNs since FiniteF32 is always finite.
unsafe { self.0.partial_cmp(&other.0).unwrap_unchecked() }
}
}
impl std::hash::Hash for FiniteF32 {
#[inline(always)]
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
- // Hash requires that if `a == b` then `hash(a) == hash(b)`.
- // In ieee 754 floating point `0.0 == -0.0`, so we must normalize the value before hashing.
+ // `Hash` requires that if `a == b` then `hash(a) == hash(b)`. In IEEE-754
+ // floating point `0.0 == -0.0`, so we must normalize the value before hashing.
let x = if self.0 == 0.0 { 0.0 } else { self.0 };
x.to_bits().hash(state);
}
impl Ord for FiniteF64 {
#[inline(always)]
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
- // Safety: There are no NaNs since FiniteF32 is always finite.
+ // SAFETY: There are no NaNs since FiniteF32 is always finite.
unsafe { self.0.partial_cmp(&other.0).unwrap_unchecked() }
}
}
// visible before we call drop.
std::sync::atomic::fence(Ordering::Acquire);
- // Safety: Was created by Box::leak in the constructor, so it's valid to recreate a box.
+ // SAFETY: Was created by Box::leak in the constructor, so it's valid to recreate a box.
let mut inner = Box::from_raw(ptr.as_ptr());
// extract the value from the container so we can return it.
let value = ManuallyDrop::take(&mut inner.value);
value
}
- // Safety: `release` consumes `self` so it's impossible to call twice on the same instance,
+ // SAFETY: `release` consumes `self` so it's impossible to call twice on the same instance,
// release is also the only function able to invalidate the pointer. Hence the pointer is
// always valid here.
unsafe {
impl<T> Clone for ManualArc<T> {
fn clone(&self) -> Self {
- // Safety: Inner is valid whilever we have a valid `ManualArc`, and so long as we are outside
+ // SAFETY: Inner is valid whilever we have a valid `ManualArc`, and so long as we are outside
// the `release` function.
unsafe {
let ptr = self.ptr.unwrap_unchecked();
impl<T> Deref for ManualArc<T> {
type Target = T;
- // Safety: Inner is valid whilever we have a valid `ManualArc`, and so long as we are outside
+ // SAFETY: Inner is valid whilever we have a valid `ManualArc`, and so long as we are outside
// the `release` function.
#[inline(always)]
fn deref(&self) -> &Self::Target {
align_offset, mod_inverse_u32, static_assert, virtual_commit, virtual_free, virtual_reserve,
};
-/// Each handle uses `GEN_BITS` bits of per-slot generation counter. Looking up a handle with the
-/// correct index but an incorrect generation will yield `None`.
+/// Each handle uses `GEN_BITS` bits of per-slot generation counter. Looking up
+/// a handle with the correct index but an incorrect generation will yield
+/// `None`.
const GEN_BITS: u32 = 9;
-/// Each handle uses `IDX_BITS` bits of index used to select a slot. This limits the maximum
-/// capacity of the table to `2 ^ IDX_BITS - 1`.
+/// Each handle uses `IDX_BITS` bits of index used to select a slot. This limits
+/// the maximum capacity of the table to `2 ^ IDX_BITS - 1`.
const IDX_BITS: u32 = 23;
const MAX_IDX: usize = 1 << IDX_BITS as usize;
const PAGE_SIZE: usize = 4096;
-/// Keep at least `MIN_FREE_SLOTS` available at all times in order to ensure a minimum of
-/// `MIN_FREE_SLOTS * 2 ^ (GEN_BITS - 1)` create-delete cycles are required before a duplicate handle is
-/// generated.
+/// Keep at least `MIN_FREE_SLOTS` available at all times in order to ensure a
+/// minimum of `MIN_FREE_SLOTS * 2 ^ (GEN_BITS - 1)` create-delete cycles are
+/// required before a duplicate handle is generated.
const MIN_FREE_SLOTS: usize = 512;
static_assert!(GEN_BITS + IDX_BITS == 32);
/// A handle representing an object stored in the associated pool.
///
-/// Although the handle is mixed based on a per-pool random number, it's recommended to additionally create a newtype
-/// wrapper around this type, to provide type safety preventing the handles from separate pools from becoming confused.
+/// Although the handle is mixed based on a per-pool random number, it's
+/// recommended to additionally create a newtype wrapper around this handle, to
+/// provide type safety preventing the handles from separate pools from becoming
+/// confused.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct Handle(u32);
}
impl Handle {
- /// Create a handle from the given encode_multiplier, generation counter and slot index.
+ /// Create a handle from the given encode_multiplier, generation counter and
+ /// slot index.
///
/// # Panics
///
- /// Panics if the generation counter is even, as that would reference an empty slot.
+ /// Panics if the generation counter is even, as that would reference an empty
+ /// slot.
#[inline(always)]
fn encode(encode_multiplier: u32, generation: u32, slot_index: SlotIndex) -> Self {
assert!(generation & 1 == 1);
let value = (generation & GEN_MASK) << GEN_SHIFT | (slot_index.0 & IDX_MASK) << IDX_SHIFT;
// Invert bits so that the all bits set, the null handle, becomes zero.
let value = !value;
- // Transform by the per-pool multiplier to mix bits such that handles from different pools are unlikely to collide.
- // Note this will return 0 for the null handle due to the inversion above.
+ // Transform by the per-pool multiplier to mix bits such that handles from
+ // different pools are unlikely to collide. Note this will return 0 for the null
+ // handle due to the inversion above.
let value = value.wrapping_mul(encode_multiplier);
Self(value)
}
- /// Return a tuple containing the generation counter and slot index from an encoded handle and decode multiplier.
+ /// Return a tuple containing the generation counter and slot index from an
+ /// encoded handle and decode multiplier.
///
/// # Panics
///
- /// Panics if the generation counter is even, as that would reference an empty slot.
+ /// Panics if the generation counter is even, as that would reference an empty
+ /// slot.
fn decode(self, decode_multiplier: u32) -> (u32, SlotIndex) {
let value = self.0;
- // Undo the bit mix from the encode step by multiplying by the multiplicative inverse of the encode_multiplier.
+ // Undo the bit mix from the encode step by multiplying by the multiplicative
+ // inverse of the encode_multiplier.
let value = value.wrapping_mul(decode_multiplier);
// Invert bits so zero, the null handle, becomes all bits set.
let value = !value;
let generation = (value >> GEN_SHIFT) & GEN_MASK;
let slot_index = SlotIndex((value >> IDX_SHIFT) & IDX_MASK);
- // An invalid generation counter here means either the handle itself has been corrupted, or that it's from
- // another pool.
+ // An invalid generation counter here means either the handle itself has been
+ // corrupted, or that it's from another pool.
assert!(generation & 1 == 1, "invalid generation counter");
(generation, slot_index)
}
}
-/// Packed value storing the generation and value index for each slot in the indirection table.
+/// Packed value storing the generation and value index for each slot in the
+/// indirection table.
///
-/// The least-significant bit of the generation counter serves to indicate whether the slot is occupied. If it's 1,
-/// the slot contains a valid entry. If it's 0, the slot is invalid.
+/// The least-significant bit of the generation counter serves to indicate
+/// whether the slot is occupied. If it's 1, the slot contains a valid entry. If
+/// it's 0, the slot is invalid.
struct Slot {
value_index_and_gen: u32,
}
/// Clears the slot's value index, incrementing the generation counter.
#[inline(always)]
fn clear_value_index(&mut self) {
- // Since we're clearing we need to reset the generation to one referencing an empty slot. But we still want to
- // invalidate old handles.
+ // Since we're clearing we need to reset the generation to one referencing an
+ // empty slot. But we still want to invalidate old handles.
let new_generation = (self.generation() | 1).wrapping_add(1);
self.value_index_and_gen = (new_generation & GEN_MASK) << GEN_SHIFT | IDX_MASK << IDX_SHIFT;
}
#[cold]
fn grow(&mut self) {
- // Free slots must always be a power of two so that the modular arithmetic for indexing
- // works out correctly.
+ // Free slots must always be a power of two so that the modular arithmetic for
+ // indexing works out correctly.
debug_assert!(self.cap == 0 || self.cap.is_power_of_two());
assert!(self.cap <= MAX_IDX, "freelist overflow");
)
};
- // This is slightly wrong, but our freelist doesn't need correct ordering on resize and this
- // avoids moving the values around.
+ // This is slightly wrong, but our freelist doesn't need correct ordering on
+ // resize and this avoids moving the values around.
if !self.is_empty() {
debug_assert!(self.is_full());
self.tail = 0;
// Make sure the slots array always grows by a single page.
const SLOT_GROWTH_AMOUNT: usize = PAGE_SIZE / std::mem::size_of::<Slot>();
-/// Indirection table mapping slot indices stored in handles to values in the values array.
+/// Indirection table mapping slot indices stored in handles to values in the
+/// values array.
///
/// Also contains the generation counter for each slot.
struct Slots {
/// Attempts to grow the slots array.
///
- /// Returns a tuple containing the old len and new len, or None if the array was already at capacity.
+ /// Returns a tuple containing the old len and new len, or None if the array was
+ /// already at capacity.
#[cold]
fn try_grow(&mut self) -> Option<(u32, u32)> {
let len = self.len;
}
}
-/// A contiguous growable array of values as well as a reverse-lookup table for slot indices that map to those values.
+/// A contiguous growable array of values as well as a reverse-lookup table for'
+/// slot indices that map to those values.
struct Values<T> {
cap: usize,
len: usize,
}
}
- /// Retreive the `SlotIndex` corresponding to the given `ValueIndex` from the lookup table.
+ /// Retreive the `SlotIndex` corresponding to the given `ValueIndex` from the
+ /// lookup table.
#[inline(always)]
fn get_slot(&mut self, value_index: ValueIndex) -> SlotIndex {
let value_index = value_index.0 as usize;
unsafe { std::ptr::read(self.slots_ptr.as_ptr().add(value_index).as_ref().unwrap()) }
}
- /// Push a new value into the values storage. Returns the index of the added value.
+ /// Push a new value into the values storage. Returns the index of the added
+ /// value.
#[inline(always)]
fn push(&mut self, value: T) -> ValueIndex {
if self.len == self.cap {
ValueIndex(new_value_index as u32)
}
- /// Remove the element at the given `ValueIndex` and replace it with the last element. Fixup
- /// the lookup tables for the moved element.
+ /// Remove the element at the given `ValueIndex` and replace it with the last
+ /// element. Fixup the lookup tables for the moved element.
///
/// Returns the removed value.
#[inline(always)]
}
/// Retreive a reference to the value at `value_index`
+ ///
/// Panics if `value_index` is out of bounds
#[inline(always)]
fn get(&self, value_index: ValueIndex) -> &T {
}
/// Retreive a mutable reference to the value at `value_index`
+ ///
/// Panics if `value_index` is out of bounds
#[inline(always)]
fn get_mut(&mut self, value_index: ValueIndex) -> &mut T {
}
}
-/// A pool for allocating objects of type T and associating them with a POD `Handle`.
+/// A pool for allocating objects of type T and associating them with a POD
+/// `Handle`.
///
-/// We do a basic attempt to ensure that mixing handles from different pools with either assert or return None. However
-/// it's possible that by accident lookup using a handle from another pool will return a valid object. The pool will
-/// not have memory unsafety in this case however, as it will only return valid objects from the pool.
+/// We do a basic attempt to ensure that mixing handles from different pools
+/// with either assert or return None. However it's possible that by accident
+/// lookup using a handle from another pool will return a valid object. The pool
+/// will not have memory unsafety in this case however, as it will only return
+/// valid objects from the pool.
pub struct Pool<T> {
encode_multiplier: u32,
decode_multiplier: u32,
impl<T> Pool<T> {
/// Creates a new pool.
///
- /// This will reserve a large amount of virtual memory for the maximum size of the pool, but won't commit any of it
- /// until it is required.
+ /// This will reserve a large amount of virtual memory for the maximum size of
+ /// the pool, but won't commit any of it until it is required.
pub fn new() -> Self {
let mut mapping_size = 0;
let value_slots = unsafe { mapping_base.add(value_slots_offset) } as _;
let values = unsafe { mapping_base.add(values_offset) } as _;
- // virtual reservations are page aligned, so shift out the zeroes in the bottom of the base address.
+ // Virtual reservations are page aligned, so shift out the zeroes in the bottom
+ // of the base address.
let encode_multiplier = mapping_base as usize >> 12;
- // multiplier must be odd to calculate the mod inverse.
+
+ // Multiplier must be odd to calculate the mod inverse.
let encode_multiplier = encode_multiplier as u32 | 1;
let decode_multiplier = mod_inverse_u32(encode_multiplier);
if self.free_slots.len() < MIN_FREE_SLOTS {
// We need to grow the slots array if there are insufficient free slots.
- // This is a no-op if we're already at the max capacity of the pool, which weakens the use-after-free
- // detection.
+ // This is a no-op if we're already at the max capacity of the pool, which
+ // weakens the use-after-free detection.
if let Some((lo, hi)) = self.slots.try_grow() {
for free_slot_index in lo..hi {
self.free_slots.push(SlotIndex(free_slot_index));
Handle::encode(self.encode_multiplier, slot.generation(), slot_index)
}
- /// Removes a value from the pool, returning the value associated with the handle if it was previously valid.
+ /// Removes a value from the pool, returning the value associated with the
+ /// handle if it was previously valid.
pub fn remove(&mut self, handle: Handle) -> Option<T> {
let (generation, slot_index) = handle.decode(self.decode_multiplier);
assert_eq!(pool.get(Handle::null()), None);
}
- // This test is based on randomness in the base address of the pool so disable it by default to
- // avoid flaky tests in CI.
- // We do a basic attempt to ensure that mixing handles from different pools with either assert or return None.
+ // This test is based on randomness in the base address of the pool so disable
+ // it by default to avoid flaky tests in CI.
+ //
+ // We do a basic attempt to ensure that mixing handles from different pools will
+ // either assert or return None.
#[test]
#[ignore]
#[should_panic]
((old_state >> 64) ^ old_state).rotate_right((old_state >> 122) as u32) as u64
}
- /// Generates a uniformly distributed random number in the range `0..upper_bound`
+ /// Generates a uniformly distributed random number in the range
+ /// `0..upper_bound`
+ ///
+ /// Always draws two 64 bit words from the PRNG.
///
/// Based on <https://github.com/apple/swift/pull/39143/commits/87b3f607042e653a42b505442cc803ec20319c1c>
#[inline]
}
/// Generates a uniformly distributed random float in the range `-1.0..1.0`
+ ///
+ /// Always draws two 64 bit words from the PRNG.
#[inline]
#[must_use]
pub fn next_f32(&mut self) -> f32 {
}
/// Randomly select an an element from `slice` with uniform probability.
+ ///
+ /// Always draws two 64 bit words from the PRNG.
pub fn select<'a, T>(&mut self, slice: &'a [T]) -> Option<&'a T> {
if slice.is_empty() {
None
/// Shuffle the elements in `slice` in-place.
///
- /// Note that as `Pcg64` is initialized with a 128 bit seed, it's only possible to generate
- /// `2^128` permutations. This means for slices larger than 34 elements, this function can no
- /// longer produce all permutations.
+ /// Note that as `Pcg64` is initialized with a 128 bit seed, it's only possible
+ /// to generate `2^128` permutations. This means for slices larger than 34
+ /// elements, this function can no longer produce all possible permutations.
pub fn shuffle<T>(&mut self, slice: &mut [T]) {
if !slice.is_empty() {
let mut i = slice.len() - 1;
struct Inner<T: ?Sized> {
// Number of strong references in addition to the current value.
- // A negative value indicates a non-atomic reference count, counting up from i32::MIN
- // A positive value indicates an atomic reference count, counting up from 0
+ //
+ // A negative value indicates a non-atomic reference count, counting up from
+ // `i32::MIN`
+ //
+ // A positive value indicates an atomic reference count, counting up from `0`
strong: AtomicI32,
value: T,
}
/// # Safety
///
- /// Any other [`Rc`] or [`Arc`] pointers to the same allocation must not be dereferenced for the duration of the
- /// returned borrow. This is trivially the case if no such pointers exist, for example immediately after
- /// [`Arc::new`].
+ /// Any other [`Rc`] or [`Arc`] pointers to the same allocation must not be
+ /// dereferenced for the duration of the returned borrow. This is trivially the
+ /// case if no such pointers exist, for example immediately after [`Arc::new`].
#[inline]
pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
// We are careful to *not* create a reference covering the "count" fields, as
pub fn get_mut(&mut self) -> Option<&mut T> {
if self.is_unique() {
- // This unsafety is ok because we're guaranteed that the pointer
- // returned is the *only* pointer that will ever be returned to T. Our
- // reference count is guaranteed to be 1 at this point, and we required
- // the Arc itself to be `mut`, so we're returning the only possible
- // reference to the inner data.
+ // SAFETY: We're guaranteed that the pointer returned is the *only* pointer that
+ // will ever be returned to T because our reference count is 1, and we required
+ // the Arc reference itself to be mutable.
Some(unsafe { self.get_mut_unchecked() })
} else {
None
if self.num == 0 {
return None;
}
- // SAFETY:
- // This is safe because it's indexing into a slice guaranteed to be length > N.
+ // SAFETY: Indexing into a slice guaranteed to have `len > N`.
let ret = unsafe { &*self.slice_head.cast::<[T; N]>() };
- // SAFETY: Guaranteed that there are at least 1 item remaining otherwise
- // earlier branch would've been hit
+ // SAFETY: Guaranteed that there are at least 1 item remaining otherwise earlier
+ // branch would've returned `None`.
self.slice_head = unsafe { self.slice_head.add(1) };
self.num -= 1;
///
/// # Panics
///
-/// Panics if `N` is 0. This check will most probably get changed to a compile time
-/// error before this method gets stabilized.
+/// Panics if `N` is 0. This check will most probably get changed to a compile
+/// time error before this method gets stabilized.
///
/// # Examples
///
| h_15_0
| h_15_1;
- // only possible if any of the half-words are invalid
+ // Only possible if any of the half-words are invalid.
if bits == !0 {
return Err(ParseUuidError);
}
let align = align_of::<T>();
let page_size = page_size();
- // Allocating memory with virtual alloc for a zst seems a bit of a waste :)
+ // Allocating memory with virtual alloc for a zst seems a bit of a waste. :)
assert!(size != 0);
- // mmap gaurantees we get page aligned addresses back. So as long as our alignment
- // requirement is less than that, we're all good in the hood.
+ // mmap gaurantees we get page aligned addresses back. So as long as our
+ // alignment requirement is less than that, we're all good in the hood.
assert!(align < page_size);
let max_capacity_bytes = size.checked_mul(max_capacity).unwrap();
let mut vec = Self::new(max_capacity);
unsafe {
- // we ensure that capacity is less than max_capacity, and the new function above would
- // have paniced if max_capacity * size_of::<T>() overflowed, so we're always safe here.
+ // We ensure that capacity is less than max_capacity, and the new function above
+ // would have paniced if max_capacity * size_of::<T>() overflowed, so we're
+ // always safe here.
let cap_bytes = capacity * size_of::<T>();
virtual_commit(vec.ptr.as_ptr() as *mut std::ffi::c_void, cap_bytes);
vec.cap = capacity;
let double_cap = self.cap * 2;
let new_cap = cmp::max(required_cap, cmp::min(double_cap, max_cap));
- // This can't overflow because we've already ensured that the new_cap is less than or
- // equal to the the max_cap, and the max_cap has already been checked for overflow in
- // the constructor.
+ // This can't overflow because we've already ensured that `new_cap <= max_cap`,
+ // and `max_cap` has already been checked for overflow in the constructor.
let new_cap_bytes = new_cap * size_of::<T>();
virtual_commit(self.ptr.as_ptr() as *mut std::ffi::c_void, new_cap_bytes);
fn drop(&mut self) {
unsafe {
// The preconditions here that max_cap multiplied by the size won't overflow and
- // that the pointer actually exists and is mapped are all ensured by the constructor.
+ // that the pointer actually exists and is mapped are all ensured by the
+ // constructor.
virtual_free(
self.ptr.as_ptr() as *mut std::ffi::c_void,
self.max_cap * size_of::<T>(),
}
impl<T> VirtualVec<T> {
- /// Creates a new vector backed by virtual memory. The array cannot grow beyond its original
- /// reservation
+ /// Creates a new vector backed by virtual memory. The array cannot grow beyond
+ /// its original reservation.
///
- /// Unlike a normal vector this means addresses will not be invalidated when the vector grows,
- /// nor will there be any copying.
+ /// Unlike a normal vector this means addresses will not be invalidated when the
+ /// vector grows, nor will there be any copying when resize occurs.
///
/// # Panics
///
- /// Panics if the memory reservation fails, or if there's any overflow in the size calculations.
+ /// Panics if the memory reservation fails, or if there's any overflow in the
+ /// size calculations.
pub fn new(max_capacity: usize) -> Self {
Self {
buf: VirtualRawVec::new(max_capacity),
pub fn truncate(&mut self, len: usize) {
// This is safe because:
//
- // * the slice passed to `drop_in_place` is valid; the `len > self.len`
- // case avoids creating an invalid slice, and
- // * the `len` of the vector is shrunk before calling `drop_in_place`,
- // such that no value will be dropped twice in case `drop_in_place`
- // were to panic once (if it panics twice, the program aborts).
+ // 1) The slice passed to `drop_in_place` is valid; the `len > self.len` case
+ // avoids creating an invalid slice.
+ // 2) The `len` of the vector is shrunk before calling `drop_in_place` such
+ // that no value will be dropped twice in case `drop_in_place` were to
+ // panic once (if it panics twice, the program aborts).
unsafe {
if len > self.len {
return;
self
}
- /// Returns a raw pointer to the vector's buffer.
+ /// Returns a raw pointer to the vector's internal buffer.
///
- /// The caller must ensure that the vector outlives the pointer this
- /// function returns, or else it will end up pointing to garbage.
- ///
- /// The caller must also ensure that the memory the pointer (non-transitively) points to
- /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
- /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+ /// The caller must also ensure that the memory the pointer (non-transitively)
+ /// points to is never written to (except inside an `UnsafeCell`) using this
+ /// pointer or any pointer derived from it. If you need to mutate the contents
+ /// of the slice, use [`as_mut_ptr`].
///
/// [`as_mut_ptr`]: #method.as_mut_ptr
#[inline]
pub fn as_ptr(&self) -> *const T {
- // We shadow the slice method of the same name to avoid going through
- // `deref`, which creates an intermediate reference.
+ // We shadow the slice method of the same name to avoid going through `deref`,
+ // which creates an intermediate reference.
self.buf.ptr()
}
- /// Returns an unsafe mutable pointer to the vector's buffer.
- ///
- /// The caller must ensure that the vector outlives the pointer this
- /// function returns, or else it will end up pointing to garbage.
- ///
+ /// Returns a mutable raw pointer to the vector's internal buffer.
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
self.buf.ptr()
#[inline]
pub fn swap_remove(&mut self, index: usize) -> T {
unsafe {
- // We replace self[index] with the last element. Note that if the
- // bounds check on hole succeeds there must be a last element (which
- // can be self[index] itself).
+ // We replace `self[index]` with the last element. Note that if the bounds check
+ // on hole succeeds there must be a last element (which can be `self[index]`
+ // itself).
let hole: *mut T = &mut self[index];
let last = ptr::read(self.get_unchecked(self.len - 1));
self.len -= 1;
self.reserve(1);
unsafe {
- // infallible
- // The spot to put the new value
+ // Infallible. The spot to put the new value.
{
let p = self.as_mut_ptr().add(index);
- // Shift everything over to make space. (Duplicating the
- // `index`th element into two consecutive places.)
+ // Shift everything over to make space. Duplicating the `index`th element into
+ // two consecutive places.
ptr::copy(p, p.offset(1), len - index);
- // Write it in, overwriting the first copy of the `index`th
- // element.
+ // Write it in, overwriting the first copy of the `index`th element.
ptr::write(p, element);
}
self.len += 1;
let len = self.len();
assert!(index < len);
unsafe {
- // infallible
+ // Infallible
let ret;
{
- // the place we are taking from.
+ // The place we are taking from.
let ptr = self.as_mut_ptr().add(index);
- // copy it out, unsafely having a copy of the value on
- // the stack and in the vector at the same time.
+ // Copy it out, unsafely having a copy of the value on the stack and in the
+ // vector at the same time.
ret = ptr::read(ptr);
-
// Shift everything down to fill in that spot.
ptr::copy(ptr.offset(1), ptr, len - index - 1);
}
impl<T: Clone> VirtualVec<T> {
/// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
///
- /// If `new_len` is greater than `len`, the `Vec` is extended by the
- /// difference, with each additional slot filled with `value`.
- /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ /// If `new_len >= len`, the `Vec` is extended by the difference, with each
+ /// additional slot filled with `value`.
+ ///
+ /// If `new_len < len`, the `Vec` is simply truncated.
///
- /// This method requires [`Clone`] to be able clone the passed value. If
- /// you need more flexibility (or want to rely on [`Default`] instead of
- /// [`Clone`]), use [`resize_with`].
+ /// This method requires [`Clone`] to be able clone the passed value. If you
+ /// need more flexibility (or want to rely on [`Default`] instead of [`Clone`]),
+ /// use [`resize_with`].
///
/// [`Clone`]: ../../std/clone/trait.Clone.html
/// [`Default`]: ../../std/default/trait.Default.html
// don't alias.
let mut local_len = SetLenOnDrop::new(&mut self.len);
- // Write all elements except the last one
+ // Write all elements except the last one.
for _ in 1..n {
ptr::write(ptr, value.next());
ptr = ptr.offset(1);
- // Increment the length in every step in case next() panics
+ // Increment the length in every step in case next() panics.
local_len.increment_len(1);
}
if n > 0 {
- // We can write the last element directly without cloning needlessly
+ // We can write the last element directly without cloning needlessly.
ptr::write(ptr, value.last());
local_len.increment_len(1);
}
- // len set by scope guard
+ // `len` set by scope guard.
}
}
}
// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
//
-// The idea is: The length field in SetLenOnDrop is a local variable
-// that the optimizer will see does not alias with any stores through the Vec's data
+// The idea is: The length field in SetLenOnDrop is a local variable that the
+// optimizer will see does not alias with any stores through the Vec's data
// pointer. This is a workaround for alias analysis issue #32155
struct SetLenOnDrop<'a> {
len: &'a mut usize,
Some(libc::timespec {
// Sleep forever if the timeout is longer than fits in a timespec.
tv_sec: d.as_secs().try_into().ok()?,
- // This conversion never truncates, as subsec_nanos is always <1e9.
+ // This conversion never truncates, as subsec_nanos is always `< 1e9`.
tv_nsec: d.subsec_nanos() as _,
})
});
let mut ascent = 0;
let mut descent = 0;
let mut line_gap = 0;
- // Safety: We've just initialized the font info above.
+ // SAFETY: We've just initialized the font info above.
unsafe { stbtt_GetFontVMetrics(&info, &mut ascent, &mut descent, &mut line_gap) };
VerticalMetrics {
ascent: ascent as f32,
let width = width as i32;
let height = height as i32;
- // Safety: `nodes` must not be deleted while context lives, and `context` must not be
+ // SAFETY: `nodes` must not be deleted while context lives, and `context` must not be
// relocated.
let context = unsafe {
let mut context = uninit_box();
/// Clear all previously packed rectangle state.
pub fn clear(&mut self) {
- // Safety: `context` and `nodes` are always valid while packer exists, and width always
+ // SAFETY: `context` and `nodes` are always valid while packer exists, and width always
// matches node count.
unsafe {
stbrp_init_target(
/// Returns true if all rectangles were successfully packed.
pub fn pack(&mut self, rects: &mut [rectpack::Rect]) -> bool {
let num_rects = rects.len().try_into().expect("too many rects to pack");
- // Safety: `context` and `nodes` are always valid while packer exists.
+ // SAFETY: `context` and `nodes` are always valid while packer exists.
let ret = unsafe { stbrp_pack_rects(self.context.as_mut(), rects.as_mut_ptr(), num_rects) };
ret == 1
}
// Add visibility operations if necessary.
//
- // If the src access mask is zero, this is a Write-After-Read hazard (or for some reason, a
- // Read-After-Read), so the dst access mask can be safely zeroed as these don't need
- // visibility.
+ // If the src access mask is zero, this is a Write-After-Read hazard (or for
+ // some reason, a Read-After-Read), so the dst access mask can be safely zeroed
+ // as these don't need visibility.
if src_access_mask != default() {
dst_access_mask |= info.access;
}
}
}
- // If we found any surface extensions, we need to additionally enable VK_KHR_surface.
+ // If we found any surface extensions, we need to additionally enable
+ // `VK_KHR_surface`.
if !enabled_extensions.is_empty() {
enabled_extensions.push(cstr!("VK_KHR_surface"));
}
fn frame<'token>(&self, frame: &'token Frame) -> &'token VulkanFrame {
frame.check_device(self as *const _ as usize);
frame.check_frame_counter(self.frame_counter.load());
- // Safety: Reference is bound to the frame exposed by the API. only one frame can be valid
- // at a time. The returned VulkanFrame is only valid so long as we have a ref on the frame.
+ // SAFETY: Reference is bound to the frame exposed by the API. only one frame
+ // can be valid at a time. The returned VulkanFrame is only valid so long as we
+ // have a ref on the frame.
unsafe { &*self.frames[frame.frame_index % NUM_FRAMES].get() }
}
fn frame_mut<'token>(&self, frame: &'token mut Frame) -> &'token mut VulkanFrame {
frame.check_device(self as *const _ as usize);
frame.check_frame_counter(self.frame_counter.load());
- // Safety: Reference is bound to the frame exposed by the API. only one frame can be valid
- // at a time. The returned VulkanFrame is only valid so long as we have a ref on the frame.
+ // SAFETY: Reference is bound to the frame exposed by the API. only one frame
+ // can be valid at a time. The returned VulkanFrame is only valid so long as we
+ // have a ref on the frame.
unsafe { &mut *self.frames[frame.frame_index % NUM_FRAMES].get() }
}
fn cmd_buffer_mut<'a>(&self, cmd_buffer: &'a mut CmdBuffer) -> &'a mut VulkanCmdBuffer {
- // Safety: CmdBuffer's can't outlive a frame, and the memory for a cmd_buffer is reset when
- // the frame ends. So the pointer contained in the cmd_buffer is always valid while the
- // CmdBuffer is valid. They can't cloned, copied or be sent between threads, and we have a
- // mut reference.
+ // SAFETY: `CmdBuffer`s can't outlive a frame, and the memory for a cmd_buffer
+ // is reset when the frame ends. So the pointer contained in the cmd_buffer is
+ // always valid while the `CmdBuffer` is valid. They can't cloned, copied or be
+ // sent between threads, and we have a mutable reference.
unsafe {
NonNull::new_unchecked(cmd_buffer.cmd_buffer_addr as *mut VulkanCmdBuffer).as_mut()
}
impl FrameCounter {
pub fn new() -> Self {
Self {
- // Start the frame id at 1 so that the first `begin_frame` ticks us over to a new frame index.
+ // Start the frame id at 1 so that the first `begin_frame` ticks us
+ // over to a new frame index.
value: AtomicUsize::new(1),
}
}
height: y,
components,
len,
- // Safety: We just checked that buffer is not null above.
+ // SAFETY: We just checked that buffer is not null above.
buffer: unsafe { NonNull::new_unchecked(buffer) },
})
}
/// | 3 | red, green, blue |
/// | 4 | red, green, blue, alpha |
pub fn as_slice(&self) -> &[u8] {
- // Safety: Slice size is calculated when creating `Texture`.
+ // SAFETY: Slice size is calculated when creating `Texture`.
unsafe { std::slice::from_raw_parts(self.buffer.as_ptr(), self.len) }
}
}
impl Drop for Image {
fn drop(&mut self) {
- // Safety: Always allocated by `stbi_load_xxx` functions.
+ // SAFETY: Always allocated by `stbi_load_xxx` functions.
unsafe { stbi_image_free(self.buffer.as_ptr() as *mut _) }
}
}
use crate::{Mat2, Point2, Vec2};
-/// Matrix and translation vector which together represent a 2d affine transformation.
+/// Matrix and translation vector which together represent a 2d affine
+/// transformation.
#[derive(Clone, Copy, PartialEq)]
#[repr(C)]
pub struct Affine2 {
use crate::{Mat3, Point3, Vec3};
-/// Matrix and translation vector which together represent a 3d affine transformation.
+/// Matrix and translation vector which together represent a 3d affine
+/// transformation.
#[derive(Clone, Copy, PartialEq)]
#[repr(C)]
pub struct Affine3 {
#[inline(always)]
pub const fn splat(value: $t) -> $name {
// we have to transmute here because we can't make `into()` const.
- // Safety: $name is repr(C) struct with $n elements of type $t, so the transmute is always valid.
+ // SAFETY: $name is repr(C) struct with $n elements of type $t, so the transmute is always valid.
unsafe { std::mem::transmute([value; $n]) }
}
unsafe { std::mem::transmute(rows) }
}
- /// Construct a matrix with the provided `diagonal` and all other values set to `0.0`.
+ /// Construct a matrix with the provided `diagonal` and all other values set to
+ /// `0.0`.
pub const fn from_diagonal(diagonal: Vec2) -> Mat2 {
Mat2::from_rows([[diagonal.x, 0.0], [0.0, diagonal.y]])
}
- /// Construct a transformation matrix which scales along the coordinate axis by the values given in `scale`.
+ /// Construct a transformation matrix which scales along the coordinate axis by
+ /// the values given in `scale`.
pub const fn from_scale(scale: Vec2) -> Mat2 {
Mat2::from_diagonal(scale)
}
/// Returns `true` if all elements are finite.
///
- /// If any element is `NaN`, positive infinity, or negative infinity, returns `false`.
+ /// If any element is `NaN`, positive infinity, or negative infinity, returns
+ /// `false`.
pub fn is_finite(&self) -> bool {
let mut is_finite = true;
for x in self.0 {
is_finite
}
- /// Returns `true` if any element is positive infinity, or negative infinity, and `false` otherwise.
+ /// Returns `true` if any element is positive infinity, or negative infinity,
+ /// and `false` otherwise.
pub fn is_infinite(&self) -> bool {
let mut is_infinite = false;
for x in self.0 {
unsafe { std::mem::transmute(rows) }
}
- /// Construct a matrix with the provided `diagonal` and all other values set to `0.0`.
+ /// Construct a matrix with the provided `diagonal` and all other values set to
+ /// `0.0`.
pub const fn from_diagonal(diagonal: Vec3) -> Mat3 {
Mat3::from_rows([
[diagonal.x, 0.0, 0.0],
])
}
- /// Construct a transformation matrix which scales along the coordinate axis by the values given in `scale`.
+ /// Construct a transformation matrix which scales along the coordinate axis by
+ /// the values given in `scale`.
pub const fn from_scale(scale: Vec3) -> Mat3 {
Mat3::from_diagonal(scale)
}
- /// Constructs a transformation matrix which rotates around the given `axis` by `angle`.
+ /// Constructs a transformation matrix which rotates around the given `axis` by
+ /// `angle`.
///
- /// In a right-handed coordinate system, positive angles rotate counter-clockwise around `axis`
- /// where `axis` is pointing toward the observer.
+ /// In a right-handed coordinate system, positive angles rotate
+ /// counter-clockwise around `axis` where `axis` is pointing toward the
+ /// observer.
pub fn from_axis_rotation(axis: Vec3, rotation: HalfTurn) -> Mat3 {
let (sin, cos) = sin_cos_pi_f32(rotation.as_f32());
let axis_sin = axis * sin;
/// Returns `true` if all elements are finite.
///
- /// If any element is `NaN`, positive infinity, or negative infinity, returns `false`.
+ /// If any element is `NaN`, positive infinity, or negative infinity, returns
+ /// `false`.
pub fn is_finite(&self) -> bool {
let mut is_finite = true;
for x in self.0 {
is_finite
}
- /// Returns `true` if any element is positive infinity, or negative infinity, and `false` otherwise.
+ /// Returns `true` if any element is positive infinity, or negative infinity,
+ /// and `false` otherwise.
pub fn is_infinite(&self) -> bool {
let mut is_infinite = false;
for x in self.0 {
result
}
- /// Construct a matrix with the provided `diagonal` and all other values set to `0.0`.
+ /// Construct a matrix with the provided `diagonal` and all other values set to
+ /// `0.0`.
pub const fn from_diagonal(diagonal: Vec4) -> Mat4 {
Mat4::from_rows([
[diagonal.x, 0.0, 0.0, 0.0],
])
}
- /// Construct a transformation matrix which scales along the coordinate axes by the values given in `scale`.
+ /// Construct a transformation matrix which scales along the coordinate axes by
+ /// the values given in `scale`.
pub const fn from_scale(scale: Vec3) -> Mat4 {
Mat4::from_rows([
[scale.x, 0.0, 0.0, 0.0],
])
}
- /// Construct an affine transformation matrix with the given `translation` along the coordinate axes.
+ /// Construct an affine transformation matrix with the given `translation`
+ /// along the coordinate axes.
pub const fn from_translation(translation: Vec3) -> Mat4 {
Mat4::from_rows([
[1.0, 0.0, 0.0, translation.x],
])
}
- /// Constructs a transformation matrix which rotates around the given `axis` by `angle`.
+ /// Constructs a transformation matrix which rotates around the given `axis` by
+ /// `angle`.
///
- /// In a right-handed coordinate system, positive angles rotate counter-clockwise around `axis`
- /// where `axis` is pointing toward the observer.
+ /// In a right-handed coordinate system, positive angles rotate
+ /// counter-clockwise around `axis` where `axis` is pointing toward the
+ /// observer.
pub fn from_axis_rotation(axis: Vec3, rotation: HalfTurn) -> Mat4 {
let (sin, cos) = sin_cos_pi_f32(rotation.as_f32());
let axis_sin = axis * sin;
])
}
- /// Constructs a 'look at' transformation from the given `eye` position, look at `center` point, and `up` vector.
+ /// Constructs a 'look at' transformation from the given `eye` position, look
+ /// at `center` point, and `up` vector.
///
/// Src coordinate space: right-handed, +y-up.
/// Dst coordinate space: right-handed, +y-up.
])
}
- /// Creates a perspective projection matrix with reversed infinite z and \[0,1\] depth range.
+ /// Creates a perspective projection matrix with reversed infinite z and \[0,1\]
+ /// depth range.
///
/// Destination coordinate space matches native vulkan clip space.
///
/// Returns `true` if all elements are finite.
///
- /// If any element is `NaN`, positive infinity, or negative infinity, returns `false`.
+ /// If any element is `NaN`, positive infinity, or negative infinity, returns
+ /// `false`.
pub fn is_finite(&self) -> bool {
let mut is_finite = true;
for x in self.0 {
is_finite
}
- /// Returns `true` if any element is positive infinity, or negative infinity, and `false` otherwise.
+ /// Returns `true` if any element is positive infinity, or negative infinity,
+ /// and `false` otherwise.
pub fn is_infinite(&self) -> bool {
let mut is_infinite = false;
for x in self.0 {
])
}
- // Safety: Requires SSE2.
+ // SAFETY: Requires SSE2.
#[inline]
#[target_feature(enable = "sse2")]
unsafe fn transpose_sse2(self) -> Mat4 {
)
}
- // Safety: Requires SSE4.1.
+ // SAFETY: Requires SSE4.1.
#[allow(dead_code)]
#[inline]
#[target_feature(enable = "sse4.1")]
result
}
-// Safety: Requires SSE2.
+// SAFETY: Requires SSE2.
#[allow(dead_code)]
#[inline]
#[target_feature(enable = "sse2")]
Mat4::from_m128_array([x0, x1, x2, x3])
}
-// Safety: Requires AVX2.
+// SAFETY: Requires AVX2.
#[allow(dead_code)]
#[inline]
#[target_feature(enable = "avx2")]
-/// Calculate the next representable floating-point value following x in the direction of y.
+/// Calculate the next representable floating-point value following x in the
+/// direction of y.
///
-/// If y is less than x, these functions will return the largest representable number less than x.
+/// If y is less than x, these functions will return the largest representable
+/// number less than x.
///
/// # Returns
///
-/// On success, the function returns the next representable floating-point value after x in the
+/// On success, the function returns the next representable floating-point value
+/// after x in the
/// direction of y.
///
/// * If `x` equals `y`, then `y` is returned.
/// * If `x` or `y` is a `NaN`, a `NaN` is returned.
-/// * If `x` is finite, and the result would overflow, a range error occurs, and the function
-/// returns `inf` with the correct mathematical sign.
-/// * If `x` is not equal to `y`, and the correct function result would be subnormal, zero, or
-/// underflow, a range error occurs, and either the correct value (if it can be represented),
-/// or `0.0`, is returned.
+/// * If `x` is finite, and the result would overflow, a range error occurs, and
+/// the function returns `inf` with the correct mathematical sign.
+/// * If `x` is not equal to `y`, and the correct function result would be
+/// subnormal, zero, or underflow, a range error occurs, and either the
+/// correct value (if it can be represented), or `0.0`, is returned.
/// * If x equals y, the function returns y.
pub fn next_after_f32(x: f32, y: f32) -> f32 {
if x.is_nan() || y.is_nan() {
Vec2::new(self.x, self.y)
}
- /// Returns a new [`Point2`] with the function `f` applied to each coordinate of `self` in order.
+ /// Returns a new [`Point2`] with the function `f` applied to each coordinate of
+ /// `self` in order.
#[inline(always)]
pub fn map<F>(self, mut f: F) -> Self
where
}
}
- /// Returns a new [`Point2`] with the function `f` applied to each pair of components from `self` and `rhs` in order.
+ /// Returns a new [`Point2`] with the function `f` applied to each pair of
+ /// components from `self` and `rhs` in order.
#[inline(always)]
pub fn map2<F>(self, rhs: Self, mut f: F) -> Self
where
Vec3::new(self.x, self.y, self.z)
}
- /// Returns a new [`Point3`] with the function `f` applied to each coordinate of `self` in order.
+ /// Returns a new [`Point3`] with the function `f` applied to each coordinate of
+ /// `self` in order.
#[inline(always)]
pub fn map<F>(self, mut f: F) -> Point3
where
}
}
- /// Returns a new [`Point3`] with the function `f` applied to each pair of components from `self` and `rhs` in order.
+ /// Returns a new [`Point3`] with the function `f` applied to each pair of
+ /// components from `self` and `rhs` in order.
#[inline(always)]
pub fn map2<F>(self, rhs: Point3, mut f: F) -> Point3
where
Self { a, b, c, d }
}
- /// Returns a quaternion representing a `rotation` in half turns around the given `axis`.
+ /// Returns a quaternion representing a `rotation` in half turns around the
+ /// given `axis`.
pub fn from_axis_rotation(axis: Vec3, rotation: HalfTurn) -> Self {
let (s, c) = sin_cos_pi_f32(rotation.as_f32() * 0.5);
let v = axis * s;
// Range reduction.
let r = round_ties_to_even(a + a);
- // Safety: The clamp above avoids the possibility of overflow here.
+ // SAFETY: The clamp above avoids the possibility of overflow here.
let i = unsafe { r.to_int_unchecked::<i32>() } as u32;
let r = r.mul_add(-0.5, a);
-// Based on Norbert Juffa's tanpi posted to the cuda forums. Using my own polynomial, but that might
-// be worse, todo: check whether polynomial is worse.
+// Based on Norbert Juffa's tanpi posted to the cuda forums. Using my own
+// polynomial, but that might be worse, TODO: check whether polynomial is worse.
// https://forums.developer.nvidia.com/t/an-implementation-of-single-precision-tanpi-for-cuda/48024
//
// Sollya code for generating these polynomials is in `doc/sincostan.sollya`
])
};
-/// Computes the tangent of `a` expressed in multiples of *pi* radians, or half-turns.
+/// Computes the tangent of `a` expressed in multiples of *pi* radians, or
+/// half-turns.
///
/// Returns `tan(a * pi)`
///
// Range reduction.
let r = round_ties_to_even(a + a);
- // Safety: The clamp above avoids the possibility of overflow here.
+ // SAFETY: The clamp above avoids the possibility of overflow here.
let i = unsafe { r.to_int_unchecked::<i32>() } as u32;
let r = r.mul_add(-0.5, a);
}
}
- /// Returns a new [`Vec2`] with the function `f` applied to each pair of components from `self` and `rhs` in order.
+ /// Returns a new [`Vec2`] with the function `f` applied to each pair of
+ /// components from `self` and `rhs` in order.
#[inline(always)]
pub fn map2<F>(self, rhs: Vec2, mut f: F) -> Vec2
where
}
}
- /// Returns a new [`Vec3`] with the function `f` applied to each pair of components from `self` and `rhs` in order.
+ /// Returns a new [`Vec3`] with the function `f` applied to each pair of
+ /// components from `self` and `rhs` in order.
#[inline(always)]
pub fn map2<F>(self, rhs: Self, mut f: F) -> Vec3
where
}
}
- /// Returns a new [`Vec4`] with the function `f` applied to each pair of components from `self` and `rhs` in order.
+ /// Returns a new [`Vec4`] with the function `f` applied to each pair of
+ /// components from `self` and `rhs` in order.
#[inline(always)]
pub fn map2<F>(self, rhs: Self, mut f: F) -> Vec4
where