From 8d96dc4817516ae3b6b65f0149e3b79230db0503 Mon Sep 17 00:00:00 2001 From: Joshua Simmons Date: Sat, 4 Mar 2023 10:51:59 +0100 Subject: [PATCH] Improve comment formatting --- bins/narcissus/src/fonts.rs | 3 +- bins/narcissus/src/helpers.rs | 3 +- bins/narcissus/src/mapped_buffer.rs | 2 +- libs/narcissus-core/src/arena.rs | 140 ++++++++++-------- libs/narcissus-core/src/finite.rs | 8 +- libs/narcissus-core/src/manual_arc.rs | 8 +- libs/narcissus-core/src/pool.rs | 122 +++++++++------ libs/narcissus-core/src/rand.rs | 15 +- libs/narcissus-core/src/ref_count.rs | 21 +-- libs/narcissus-core/src/slice.rs | 11 +- libs/narcissus-core/src/uuid.rs | 2 +- .../narcissus-core/src/virtual_vec/raw_vec.rs | 19 +-- libs/narcissus-core/src/virtual_vec/vec.rs | 93 ++++++------ libs/narcissus-core/src/waiter.rs | 2 +- libs/narcissus-font/src/font.rs | 2 +- libs/narcissus-font/src/packer.rs | 6 +- libs/narcissus-gpu/src/backend/vulkan/mod.rs | 27 ++-- libs/narcissus-gpu/src/frame_counter.rs | 3 +- libs/narcissus-image/src/lib.rs | 6 +- libs/narcissus-maths/src/affine2.rs | 3 +- libs/narcissus-maths/src/affine3.rs | 3 +- libs/narcissus-maths/src/lib.rs | 2 +- libs/narcissus-maths/src/mat2.rs | 12 +- libs/narcissus-maths/src/mat3.rs | 20 ++- libs/narcissus-maths/src/mat4.rs | 37 +++-- libs/narcissus-maths/src/next_after_f32.rs | 19 ++- libs/narcissus-maths/src/point2.rs | 6 +- libs/narcissus-maths/src/point3.rs | 6 +- libs/narcissus-maths/src/quat.rs | 3 +- libs/narcissus-maths/src/sin_cos_pi.rs | 2 +- libs/narcissus-maths/src/tan_pi.rs | 9 +- libs/narcissus-maths/src/vec2.rs | 3 +- libs/narcissus-maths/src/vec3.rs | 3 +- libs/narcissus-maths/src/vec4.rs | 3 +- 34 files changed, 351 insertions(+), 273 deletions(-) diff --git a/bins/narcissus/src/fonts.rs b/bins/narcissus/src/fonts.rs index 0b107ce..f231fe9 100644 --- a/bins/narcissus/src/fonts.rs +++ b/bins/narcissus/src/fonts.rs @@ -13,7 +13,8 @@ pub struct Fonts<'a> { impl<'a> Fonts<'a> { pub fn new() -> Self { - // Safety: Safe because Roboto-Regular.ttf is a valid ttf font embedded in the application. + // SAFETY: Safe because Roboto-Regular.ttf is a valid ttf font embedded + // in the application. let roboto_regular = unsafe { Font::from_bytes(include_bytes!("fonts/Roboto-Regular.ttf")) }; let noto_sans_japanese = diff --git a/bins/narcissus/src/helpers.rs b/bins/narcissus/src/helpers.rs index 08c03a3..b0612cc 100644 --- a/bins/narcissus/src/helpers.rs +++ b/bins/narcissus/src/helpers.rs @@ -107,7 +107,8 @@ where usage, size: len, }); - // Safety: T: Blittable which implies it's freely convertable to a byte slice. + // SAFETY: T: Blittable which implies it's freely convertable to a byte + // slice. unsafe { let dst = std::slice::from_raw_parts_mut(device.map_buffer(buffer), len); let src = std::slice::from_raw_parts(data.as_ptr() as *const u8, len); diff --git a/bins/narcissus/src/mapped_buffer.rs b/bins/narcissus/src/mapped_buffer.rs index 1b7575c..60ae3ce 100644 --- a/bins/narcissus/src/mapped_buffer.rs +++ b/bins/narcissus/src/mapped_buffer.rs @@ -57,7 +57,7 @@ impl<'a> MappedBuffer<'a> { impl<'a> Drop for MappedBuffer<'a> { fn drop(&mut self) { - // Safety: Make sure we don't have the slice outlive the mapping. + // SAFETY: Make sure we don't have the slice outlive the mapping. unsafe { self.device.unmap_buffer(self.buffer); } diff --git a/libs/narcissus-core/src/arena.rs b/libs/narcissus-core/src/arena.rs index 9d23d21..7accadd 100644 --- a/libs/narcissus-core/src/arena.rs +++ b/libs/narcissus-core/src/arena.rs @@ -24,17 +24,17 @@ unsafe fn layout_from_size_align(size: usize, align: usize) -> Layout { /// Wrapper around a pointer to a page footer. /// -/// Allows us to easily borrow the least significant bit of the page pointer to keep track of -/// whether a given page was allocated on the heap, with the global allocator. Or if it is the stack -/// page in a HybridArena. +/// Allows us to easily borrow the least significant bit of the page pointer to +/// keep track of whether a given page was allocated on the heap, with the +/// global allocator. Or if it is the stack page in a HybridArena. #[derive(Clone, Copy)] struct PagePointer(*mut PageFooter); impl PagePointer { #[inline(always)] fn empty() -> PagePointer { - // We pretend the empty page is a "stack" pointer, as it allows us to remove a branch from - // the hybrid array setup. + // We pretend the empty page is a "stack" pointer, as it allows us to remove a + // branch from the hybrid array setup. PagePointer::new_stack(&EMPTY_PAGE as *const PageFooterSync as *mut PageFooter) } @@ -74,7 +74,8 @@ impl PagePointer { struct PageFooter { /// Pointer to the start of this page. base: NonNull, - /// Pointer to the current bump allocation cursor. Must be within the range `base..=&self`. + /// Pointer to the current bump allocation cursor. Must be within the range + /// `base..=&self`. bump: Cell>, /// Page size in bytes. size: usize, @@ -106,7 +107,8 @@ impl PageFooter { // Cannot wrap due to guard above. let bump = bump.wrapping_sub(layout.size()); let remainder = bump as usize & (layout.align() - 1); - // Cannot have a remainder greater than the magnitude of the value, so this cannot wrap. + // Cannot have a remainder greater than the magnitude of the value, so this + // cannot wrap. let bump = bump.wrapping_sub(remainder); if bump >= base { @@ -124,8 +126,9 @@ impl PageFooter { /// /// # Safety /// - /// This must only be called on pages which have no outstanding references to allocations, as it - /// allows subsequent operations to allocate the same addresses. + /// This must only be called on pages which have no outstanding references to + /// allocations, as it allows subsequent operations to allocate the same + /// addresses. unsafe fn reset(&self) { self.bump.set(NonNull::new_unchecked( self.base.as_ptr().add(self.size - PAGE_FOOTER_SIZE), @@ -152,8 +155,8 @@ static EMPTY_PAGE: PageFooterSync = PageFooterSync(unsafe { } }); -/// Create a new page, large enough for the given layout, and prepend it to the linked list of -/// pages. +/// Create a new page, large enough for the given layout, and prepend it to the +/// linked list of pages. /// /// Returns the new page. /// @@ -165,11 +168,12 @@ unsafe fn prepend_new_page(page: PagePointer, layout: Layout) -> Option Option, @@ -227,9 +232,11 @@ pub struct Arena { /// An allocation arena with an allocation region that lives on the stack. /// -/// Bump allocates from the stack page until it's exhausted, then behaves like a regular `Arena`. +/// Bump allocates from the stack page until it's exhausted, then behaves like a +/// regular `Arena`. /// -/// Objects that are allocated within the arena will never have their `Drop` function called. +/// Objects that are allocated within the arena will never have their `Drop` +/// function called. #[repr(C)] pub struct HybridArena { data: MaybeUninit<[u8; STACK_CAP]>, @@ -246,13 +253,13 @@ impl Arena { /// Reset the arena. /// - /// Releases all pages to the global allocator, except for the most recently allocated one, - /// which has its bump pointer reset. + /// Releases all pages to the global allocator, except for the most recently + /// allocated one which has its bump pointer reset. /// /// Does not call destructors on any objects allocated by the pool. pub fn reset(&mut self) { - // We don't want to write to the static empty page, so abandon here if we haven't allocated - // any pages. + // We don't want to write to the static empty page, so abandon here if we + // haven't allocated any pages. if self.page_list_head.get().is_empty() { return; } @@ -270,7 +277,7 @@ impl Arena { #[inline(always)] #[allow(clippy::mut_from_ref)] pub fn alloc(&self, value: T) -> &mut T { - // Safety: We allocate memory for `T` and then write a `T` into that location. + // SAFETY: We allocate memory for `T` and then write a `T` into that location. unsafe { let layout = Layout::new::(); let ptr = self.alloc_layout(layout); @@ -286,7 +293,7 @@ impl Arena { where F: FnOnce() -> T, { - // Safety: We allocate memory for `T` and then write a `T` into that location. + // SAFETY: We allocate memory for `T` and then write a `T` into that location. unsafe { let layout = Layout::new::(); let ptr = self.alloc_layout(layout); @@ -302,7 +309,7 @@ impl Arena { where F: FnOnce() -> T, { - // Safety: We allocate memory for `T` and then write a `T` into that location. + // SAFETY: We allocate memory for `T` and then write a `T` into that location. unsafe { let layout = Layout::new::(); let ptr = match self.try_alloc_layout(layout) { @@ -362,7 +369,7 @@ impl Arena { let src = src.as_ptr(); let dst = self.alloc_layout(layout).cast::().as_ptr(); - // Safety: We allocate dst with the same size as src before copying into it. + // SAFETY: We allocate dst with the same size as src before copying into it. unsafe { std::ptr::copy_nonoverlapping(src, dst, len); std::slice::from_raw_parts_mut(dst, len) @@ -378,7 +385,7 @@ impl Arena { let layout = Layout::for_value(src); let dst = self.alloc_layout(layout).cast::().as_ptr(); - // Safety: We allocate dst with the same size as src before copying into it. + // SAFETY: We allocate dst with the same size as src before copying into it. unsafe { for (i, value) in src.iter().cloned().enumerate() { std::ptr::write(dst.add(i), value); @@ -391,7 +398,8 @@ impl Arena { #[allow(clippy::mut_from_ref)] pub fn alloc_str(&self, src: &str) -> &mut str { let str = self.alloc_slice_copy(src.as_bytes()); - // Safety: We've just copied this string from a valid `&str`, so it must be valid too. + // SAFETY: We've just copied this string from a valid `&str`, so it must be + // valid too. unsafe { std::str::from_utf8_unchecked_mut(str) } } @@ -404,7 +412,7 @@ impl Arena { let layout = Layout::array::(len).unwrap_or_else(|_| oom()); let dst = self.alloc_layout(layout).cast::(); - // Safety: We allocated an array of len elements of T above. + // SAFETY: We allocated an array of len elements of T above. unsafe { for i in 0..len { std::ptr::write(dst.as_ptr().add(i), f(i)) @@ -458,8 +466,8 @@ impl Drop for Arena { impl HybridArena { pub fn new() -> Self { - // Ideally we'd pad `STACK_CAP` out to the alignment, avoiding wasting any space, but we - // can't do maffs with constants just yet, so abort instead. + // Ideally we'd pad `STACK_CAP` out to the alignment, avoiding wasting any + // space, but we can't do maffs with constants just yet, so abort instead. debug_assert!(STACK_CAP % std::mem::align_of::() == 0); Self { data: MaybeUninit::uninit(), @@ -475,18 +483,19 @@ impl HybridArena { /// Reset the arena. /// - /// Releases all pages to the global allocator, except for the most recently allocated one, - /// which has its bump pointer reset. + /// Releases all pages to the global allocator, except for the most recently + /// allocated one which has its bump pointer reset. /// /// Does not call destructors on any objects allocated by the pool. pub fn reset(&mut self) { let page_list_head = self.page_list_head.get(); unsafe { - // SAFETY: We're either pointing to an empty page, or a hybrid page, but the hybrid page - // pointer might not be up to date if the object has moved, so we must call setup in - // that case. Since setup also resets the page, handles the empty page, and is - // idempotent, we can always call it here when we see a stack page, then return. + // SAFETY: We're either pointing to an empty page, or a hybrid page, but the + // hybrid page pointer might not be up to date if the object has moved, so we + // must call setup in that case. Since setup also resets the page, handles the + // empty page, and is idempotent, we can always call it here when we see a stack + // page, then return. if page_list_head.is_stack() { self.setup_hybrid_page(); return; @@ -504,7 +513,7 @@ impl HybridArena { #[inline(always)] #[allow(clippy::mut_from_ref)] pub fn alloc(&self, value: T) -> &mut T { - // Safety: We allocate memory for `T` and then write a `T` into that location. + // SAFETY: We allocate memory for `T` and then write a `T` into that location. unsafe { let layout = Layout::new::(); let ptr = self.alloc_layout(layout); @@ -520,7 +529,7 @@ impl HybridArena { where F: FnOnce() -> T, { - // Safety: We allocate memory for `T` and then write a `T` into that location. + // SAFETY: We allocate memory for `T` and then write a `T` into that location. unsafe { let layout = Layout::new::(); let ptr = self.alloc_layout(layout); @@ -536,7 +545,7 @@ impl HybridArena { where F: FnOnce() -> T, { - // Safety: We allocate memory for `T` and then write a `T` into that location. + // SAFETY: We allocate memory for `T` and then write a `T` into that location. unsafe { let layout = Layout::new::(); let ptr = match self.try_alloc_layout(layout) { @@ -559,11 +568,11 @@ impl HybridArena { #[inline(always)] pub fn try_alloc_layout(&self, layout: Layout) -> Result, AllocError> { - // When the arena is in its initial state, the head points to an empty page. In this case we - // need to "allocate" the stack page and set the page head. + // When the arena is in its initial state, the head points to an empty page. In + // this case we need to "allocate" the stack page and set the page head. // - // We also need to ensure that if we're allocating into a hybrid array, that no moves have - // happened in the meantime. + // We also need to ensure that if we're allocating into a hybrid array, that no + // moves have happened in the meantime. // // That is we need to avoid failure in the following situation. // @@ -580,14 +589,15 @@ impl HybridArena { // let z = arena.alloc(3); // ``` // - // Allocating in an arena that links to a stack page that isn't the same address as our - // current self's page address, is a memory safety failure. + // Allocating in an arena that links to a stack page that isn't the same address + // as our current self's page address, is a memory safety failure. // - // It's safe to reset the page in this case, becuase it's only possible to move the arena - // while there are no references pinning it in place. + // It's safe to reset the page in this case, becuase it's only possible to move + // the arena while there are no references pinning it in place. let page = self.page_list_head.get(); - // We initially point to the empty page, but mark it as a stack page so this branch is - // sufficient to handle both empty and moved cases. + + // We initially point to the empty page, but mark it as a stack page so this + // branch is sufficient to handle both empty and moved cases. if page.is_stack() && page.as_ptr() != self.footer.as_ptr() { unsafe { self.setup_hybrid_page() } } @@ -599,12 +609,13 @@ impl HybridArena { } } - /// When a hybrid array is in its default state, or when it has been moved, it's necessary to - /// fix-up the page footer and page list head. + /// When a hybrid array is in its default state, or when it has been moved, it's + /// necessary to fix-up the page footer and page list head. /// /// # Safety /// - /// Must not be called when there are outstanding allocations, as it will reset the hybrid page. + /// Must not be called when there are outstanding allocations, as it will reset + /// the hybrid page. #[inline(never)] #[cold] unsafe fn setup_hybrid_page(&self) { @@ -652,7 +663,7 @@ impl HybridArena { let src = src.as_ptr(); let dst = self.alloc_layout(layout).cast::().as_ptr(); - // Safety: We allocate dst with the same size as src before copying into it. + // SAFETY: We allocate dst with the same size as src before copying into it. unsafe { std::ptr::copy_nonoverlapping(src, dst, len); std::slice::from_raw_parts_mut(dst, len) @@ -668,7 +679,7 @@ impl HybridArena { let layout = Layout::for_value(src); let dst = self.alloc_layout(layout).cast::().as_ptr(); - // Safety: We allocate dst with the same size as src before copying into it. + // SAFETY: We allocate dst with the same size as src before copying into it. unsafe { for (i, value) in src.iter().cloned().enumerate() { std::ptr::write(dst.add(i), value); @@ -681,7 +692,8 @@ impl HybridArena { #[allow(clippy::mut_from_ref)] pub fn alloc_str(&self, src: &str) -> &mut str { let str = self.alloc_slice_copy(src.as_bytes()); - // Safety: We've just copied this string from a valid `&str`, so it must be valid too. + // SAFETY: We've just copied this string from a valid `&str`, so it must be valid + // too. unsafe { std::str::from_utf8_unchecked_mut(str) } } @@ -694,7 +706,7 @@ impl HybridArena { let layout = Layout::array::(len).unwrap_or_else(|_| oom()); let dst = self.alloc_layout(layout).cast::(); - // Safety: We allocated an array of len elements of T above. + // SAFETY: We allocated an array of len elements of T above. unsafe { for i in 0..len { std::ptr::write(dst.as_ptr().add(i), f(i)) diff --git a/libs/narcissus-core/src/finite.rs b/libs/narcissus-core/src/finite.rs index 13f9c51..a5e59d2 100644 --- a/libs/narcissus-core/src/finite.rs +++ b/libs/narcissus-core/src/finite.rs @@ -53,7 +53,7 @@ impl PartialOrd for FiniteF32 { impl Ord for FiniteF32 { #[inline(always)] fn cmp(&self, other: &Self) -> std::cmp::Ordering { - // Safety: There are no NaNs since FiniteF32 is always finite. + // SAFETY: There are no NaNs since FiniteF32 is always finite. unsafe { self.0.partial_cmp(&other.0).unwrap_unchecked() } } } @@ -61,8 +61,8 @@ impl Ord for FiniteF32 { impl std::hash::Hash for FiniteF32 { #[inline(always)] fn hash(&self, state: &mut H) { - // Hash requires that if `a == b` then `hash(a) == hash(b)`. - // In ieee 754 floating point `0.0 == -0.0`, so we must normalize the value before hashing. + // `Hash` requires that if `a == b` then `hash(a) == hash(b)`. In IEEE-754 + // floating point `0.0 == -0.0`, so we must normalize the value before hashing. let x = if self.0 == 0.0 { 0.0 } else { self.0 }; x.to_bits().hash(state); } @@ -110,7 +110,7 @@ impl PartialOrd for FiniteF64 { impl Ord for FiniteF64 { #[inline(always)] fn cmp(&self, other: &Self) -> std::cmp::Ordering { - // Safety: There are no NaNs since FiniteF32 is always finite. + // SAFETY: There are no NaNs since FiniteF32 is always finite. unsafe { self.0.partial_cmp(&other.0).unwrap_unchecked() } } } diff --git a/libs/narcissus-core/src/manual_arc.rs b/libs/narcissus-core/src/manual_arc.rs index ea168bb..7e4a007 100644 --- a/libs/narcissus-core/src/manual_arc.rs +++ b/libs/narcissus-core/src/manual_arc.rs @@ -86,7 +86,7 @@ impl ManualArc { // visible before we call drop. std::sync::atomic::fence(Ordering::Acquire); - // Safety: Was created by Box::leak in the constructor, so it's valid to recreate a box. + // SAFETY: Was created by Box::leak in the constructor, so it's valid to recreate a box. let mut inner = Box::from_raw(ptr.as_ptr()); // extract the value from the container so we can return it. let value = ManuallyDrop::take(&mut inner.value); @@ -96,7 +96,7 @@ impl ManualArc { value } - // Safety: `release` consumes `self` so it's impossible to call twice on the same instance, + // SAFETY: `release` consumes `self` so it's impossible to call twice on the same instance, // release is also the only function able to invalidate the pointer. Hence the pointer is // always valid here. unsafe { @@ -124,7 +124,7 @@ impl Default for ManualArc { impl Clone for ManualArc { fn clone(&self) -> Self { - // Safety: Inner is valid whilever we have a valid `ManualArc`, and so long as we are outside + // SAFETY: Inner is valid whilever we have a valid `ManualArc`, and so long as we are outside // the `release` function. unsafe { let ptr = self.ptr.unwrap_unchecked(); @@ -145,7 +145,7 @@ impl Drop for ManualArc { impl Deref for ManualArc { type Target = T; - // Safety: Inner is valid whilever we have a valid `ManualArc`, and so long as we are outside + // SAFETY: Inner is valid whilever we have a valid `ManualArc`, and so long as we are outside // the `release` function. #[inline(always)] fn deref(&self) -> &Self::Target { diff --git a/libs/narcissus-core/src/pool.rs b/libs/narcissus-core/src/pool.rs index ada1b7d..810983f 100644 --- a/libs/narcissus-core/src/pool.rs +++ b/libs/narcissus-core/src/pool.rs @@ -4,12 +4,13 @@ use crate::{ align_offset, mod_inverse_u32, static_assert, virtual_commit, virtual_free, virtual_reserve, }; -/// Each handle uses `GEN_BITS` bits of per-slot generation counter. Looking up a handle with the -/// correct index but an incorrect generation will yield `None`. +/// Each handle uses `GEN_BITS` bits of per-slot generation counter. Looking up +/// a handle with the correct index but an incorrect generation will yield +/// `None`. const GEN_BITS: u32 = 9; -/// Each handle uses `IDX_BITS` bits of index used to select a slot. This limits the maximum -/// capacity of the table to `2 ^ IDX_BITS - 1`. +/// Each handle uses `IDX_BITS` bits of index used to select a slot. This limits +/// the maximum capacity of the table to `2 ^ IDX_BITS - 1`. const IDX_BITS: u32 = 23; const MAX_IDX: usize = 1 << IDX_BITS as usize; @@ -18,9 +19,9 @@ const MAX_CAP: usize = MAX_IDX - 1; const PAGE_SIZE: usize = 4096; -/// Keep at least `MIN_FREE_SLOTS` available at all times in order to ensure a minimum of -/// `MIN_FREE_SLOTS * 2 ^ (GEN_BITS - 1)` create-delete cycles are required before a duplicate handle is -/// generated. +/// Keep at least `MIN_FREE_SLOTS` available at all times in order to ensure a +/// minimum of `MIN_FREE_SLOTS * 2 ^ (GEN_BITS - 1)` create-delete cycles are +/// required before a duplicate handle is generated. const MIN_FREE_SLOTS: usize = 512; static_assert!(GEN_BITS + IDX_BITS == 32); @@ -33,8 +34,10 @@ const GEN_SHIFT: u32 = IDX_SHIFT + IDX_BITS; /// A handle representing an object stored in the associated pool. /// -/// Although the handle is mixed based on a per-pool random number, it's recommended to additionally create a newtype -/// wrapper around this type, to provide type safety preventing the handles from separate pools from becoming confused. +/// Although the handle is mixed based on a per-pool random number, it's +/// recommended to additionally create a newtype wrapper around this handle, to +/// provide type safety preventing the handles from separate pools from becoming +/// confused. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct Handle(u32); @@ -45,11 +48,13 @@ impl Default for Handle { } impl Handle { - /// Create a handle from the given encode_multiplier, generation counter and slot index. + /// Create a handle from the given encode_multiplier, generation counter and + /// slot index. /// /// # Panics /// - /// Panics if the generation counter is even, as that would reference an empty slot. + /// Panics if the generation counter is even, as that would reference an empty + /// slot. #[inline(always)] fn encode(encode_multiplier: u32, generation: u32, slot_index: SlotIndex) -> Self { assert!(generation & 1 == 1); @@ -57,28 +62,32 @@ impl Handle { let value = (generation & GEN_MASK) << GEN_SHIFT | (slot_index.0 & IDX_MASK) << IDX_SHIFT; // Invert bits so that the all bits set, the null handle, becomes zero. let value = !value; - // Transform by the per-pool multiplier to mix bits such that handles from different pools are unlikely to collide. - // Note this will return 0 for the null handle due to the inversion above. + // Transform by the per-pool multiplier to mix bits such that handles from + // different pools are unlikely to collide. Note this will return 0 for the null + // handle due to the inversion above. let value = value.wrapping_mul(encode_multiplier); Self(value) } - /// Return a tuple containing the generation counter and slot index from an encoded handle and decode multiplier. + /// Return a tuple containing the generation counter and slot index from an + /// encoded handle and decode multiplier. /// /// # Panics /// - /// Panics if the generation counter is even, as that would reference an empty slot. + /// Panics if the generation counter is even, as that would reference an empty + /// slot. fn decode(self, decode_multiplier: u32) -> (u32, SlotIndex) { let value = self.0; - // Undo the bit mix from the encode step by multiplying by the multiplicative inverse of the encode_multiplier. + // Undo the bit mix from the encode step by multiplying by the multiplicative + // inverse of the encode_multiplier. let value = value.wrapping_mul(decode_multiplier); // Invert bits so zero, the null handle, becomes all bits set. let value = !value; let generation = (value >> GEN_SHIFT) & GEN_MASK; let slot_index = SlotIndex((value >> IDX_SHIFT) & IDX_MASK); - // An invalid generation counter here means either the handle itself has been corrupted, or that it's from - // another pool. + // An invalid generation counter here means either the handle itself has been + // corrupted, or that it's from another pool. assert!(generation & 1 == 1, "invalid generation counter"); (generation, slot_index) @@ -117,10 +126,12 @@ impl ValueIndex { } } -/// Packed value storing the generation and value index for each slot in the indirection table. +/// Packed value storing the generation and value index for each slot in the +/// indirection table. /// -/// The least-significant bit of the generation counter serves to indicate whether the slot is occupied. If it's 1, -/// the slot contains a valid entry. If it's 0, the slot is invalid. +/// The least-significant bit of the generation counter serves to indicate +/// whether the slot is occupied. If it's 1, the slot contains a valid entry. If +/// it's 0, the slot is invalid. struct Slot { value_index_and_gen: u32, } @@ -165,8 +176,8 @@ impl Slot { /// Clears the slot's value index, incrementing the generation counter. #[inline(always)] fn clear_value_index(&mut self) { - // Since we're clearing we need to reset the generation to one referencing an empty slot. But we still want to - // invalidate old handles. + // Since we're clearing we need to reset the generation to one referencing an + // empty slot. But we still want to invalidate old handles. let new_generation = (self.generation() | 1).wrapping_add(1); self.value_index_and_gen = (new_generation & GEN_MASK) << GEN_SHIFT | IDX_MASK << IDX_SHIFT; } @@ -236,8 +247,8 @@ impl FreeSlots { #[cold] fn grow(&mut self) { - // Free slots must always be a power of two so that the modular arithmetic for indexing - // works out correctly. + // Free slots must always be a power of two so that the modular arithmetic for + // indexing works out correctly. debug_assert!(self.cap == 0 || self.cap.is_power_of_two()); assert!(self.cap <= MAX_IDX, "freelist overflow"); @@ -249,8 +260,8 @@ impl FreeSlots { ) }; - // This is slightly wrong, but our freelist doesn't need correct ordering on resize and this - // avoids moving the values around. + // This is slightly wrong, but our freelist doesn't need correct ordering on + // resize and this avoids moving the values around. if !self.is_empty() { debug_assert!(self.is_full()); self.tail = 0; @@ -264,7 +275,8 @@ impl FreeSlots { // Make sure the slots array always grows by a single page. const SLOT_GROWTH_AMOUNT: usize = PAGE_SIZE / std::mem::size_of::(); -/// Indirection table mapping slot indices stored in handles to values in the values array. +/// Indirection table mapping slot indices stored in handles to values in the +/// values array. /// /// Also contains the generation counter for each slot. struct Slots { @@ -299,7 +311,8 @@ impl Slots { /// Attempts to grow the slots array. /// - /// Returns a tuple containing the old len and new len, or None if the array was already at capacity. + /// Returns a tuple containing the old len and new len, or None if the array was + /// already at capacity. #[cold] fn try_grow(&mut self) -> Option<(u32, u32)> { let len = self.len; @@ -323,7 +336,8 @@ impl Slots { } } -/// A contiguous growable array of values as well as a reverse-lookup table for slot indices that map to those values. +/// A contiguous growable array of values as well as a reverse-lookup table for' +/// slot indices that map to those values. struct Values { cap: usize, len: usize, @@ -366,7 +380,8 @@ impl Values { } } - /// Retreive the `SlotIndex` corresponding to the given `ValueIndex` from the lookup table. + /// Retreive the `SlotIndex` corresponding to the given `ValueIndex` from the + /// lookup table. #[inline(always)] fn get_slot(&mut self, value_index: ValueIndex) -> SlotIndex { let value_index = value_index.0 as usize; @@ -375,7 +390,8 @@ impl Values { unsafe { std::ptr::read(self.slots_ptr.as_ptr().add(value_index).as_ref().unwrap()) } } - /// Push a new value into the values storage. Returns the index of the added value. + /// Push a new value into the values storage. Returns the index of the added + /// value. #[inline(always)] fn push(&mut self, value: T) -> ValueIndex { if self.len == self.cap { @@ -389,8 +405,8 @@ impl Values { ValueIndex(new_value_index as u32) } - /// Remove the element at the given `ValueIndex` and replace it with the last element. Fixup - /// the lookup tables for the moved element. + /// Remove the element at the given `ValueIndex` and replace it with the last + /// element. Fixup the lookup tables for the moved element. /// /// Returns the removed value. #[inline(always)] @@ -426,6 +442,7 @@ impl Values { } /// Retreive a reference to the value at `value_index` + /// /// Panics if `value_index` is out of bounds #[inline(always)] fn get(&self, value_index: ValueIndex) -> &T { @@ -436,6 +453,7 @@ impl Values { } /// Retreive a mutable reference to the value at `value_index` + /// /// Panics if `value_index` is out of bounds #[inline(always)] fn get_mut(&mut self, value_index: ValueIndex) -> &mut T { @@ -468,11 +486,14 @@ impl Values { } } -/// A pool for allocating objects of type T and associating them with a POD `Handle`. +/// A pool for allocating objects of type T and associating them with a POD +/// `Handle`. /// -/// We do a basic attempt to ensure that mixing handles from different pools with either assert or return None. However -/// it's possible that by accident lookup using a handle from another pool will return a valid object. The pool will -/// not have memory unsafety in this case however, as it will only return valid objects from the pool. +/// We do a basic attempt to ensure that mixing handles from different pools +/// with either assert or return None. However it's possible that by accident +/// lookup using a handle from another pool will return a valid object. The pool +/// will not have memory unsafety in this case however, as it will only return +/// valid objects from the pool. pub struct Pool { encode_multiplier: u32, decode_multiplier: u32, @@ -486,8 +507,8 @@ pub struct Pool { impl Pool { /// Creates a new pool. /// - /// This will reserve a large amount of virtual memory for the maximum size of the pool, but won't commit any of it - /// until it is required. + /// This will reserve a large amount of virtual memory for the maximum size of + /// the pool, but won't commit any of it until it is required. pub fn new() -> Self { let mut mapping_size = 0; @@ -513,9 +534,11 @@ impl Pool { let value_slots = unsafe { mapping_base.add(value_slots_offset) } as _; let values = unsafe { mapping_base.add(values_offset) } as _; - // virtual reservations are page aligned, so shift out the zeroes in the bottom of the base address. + // Virtual reservations are page aligned, so shift out the zeroes in the bottom + // of the base address. let encode_multiplier = mapping_base as usize >> 12; - // multiplier must be odd to calculate the mod inverse. + + // Multiplier must be odd to calculate the mod inverse. let encode_multiplier = encode_multiplier as u32 | 1; let decode_multiplier = mod_inverse_u32(encode_multiplier); @@ -560,8 +583,8 @@ impl Pool { if self.free_slots.len() < MIN_FREE_SLOTS { // We need to grow the slots array if there are insufficient free slots. - // This is a no-op if we're already at the max capacity of the pool, which weakens the use-after-free - // detection. + // This is a no-op if we're already at the max capacity of the pool, which + // weakens the use-after-free detection. if let Some((lo, hi)) = self.slots.try_grow() { for free_slot_index in lo..hi { self.free_slots.push(SlotIndex(free_slot_index)); @@ -577,7 +600,8 @@ impl Pool { Handle::encode(self.encode_multiplier, slot.generation(), slot_index) } - /// Removes a value from the pool, returning the value associated with the handle if it was previously valid. + /// Removes a value from the pool, returning the value associated with the + /// handle if it was previously valid. pub fn remove(&mut self, handle: Handle) -> Option { let (generation, slot_index) = handle.decode(self.decode_multiplier); @@ -701,9 +725,11 @@ mod tests { assert_eq!(pool.get(Handle::null()), None); } - // This test is based on randomness in the base address of the pool so disable it by default to - // avoid flaky tests in CI. - // We do a basic attempt to ensure that mixing handles from different pools with either assert or return None. + // This test is based on randomness in the base address of the pool so disable + // it by default to avoid flaky tests in CI. + // + // We do a basic attempt to ensure that mixing handles from different pools will + // either assert or return None. #[test] #[ignore] #[should_panic] diff --git a/libs/narcissus-core/src/rand.rs b/libs/narcissus-core/src/rand.rs index 9c2af1d..1a01910 100644 --- a/libs/narcissus-core/src/rand.rs +++ b/libs/narcissus-core/src/rand.rs @@ -31,7 +31,10 @@ impl Pcg64 { ((old_state >> 64) ^ old_state).rotate_right((old_state >> 122) as u32) as u64 } - /// Generates a uniformly distributed random number in the range `0..upper_bound` + /// Generates a uniformly distributed random number in the range + /// `0..upper_bound` + /// + /// Always draws two 64 bit words from the PRNG. /// /// Based on #[inline] @@ -44,6 +47,8 @@ impl Pcg64 { } /// Generates a uniformly distributed random float in the range `-1.0..1.0` + /// + /// Always draws two 64 bit words from the PRNG. #[inline] #[must_use] pub fn next_f32(&mut self) -> f32 { @@ -52,6 +57,8 @@ impl Pcg64 { } /// Randomly select an an element from `slice` with uniform probability. + /// + /// Always draws two 64 bit words from the PRNG. pub fn select<'a, T>(&mut self, slice: &'a [T]) -> Option<&'a T> { if slice.is_empty() { None @@ -63,9 +70,9 @@ impl Pcg64 { /// Shuffle the elements in `slice` in-place. /// - /// Note that as `Pcg64` is initialized with a 128 bit seed, it's only possible to generate - /// `2^128` permutations. This means for slices larger than 34 elements, this function can no - /// longer produce all permutations. + /// Note that as `Pcg64` is initialized with a 128 bit seed, it's only possible + /// to generate `2^128` permutations. This means for slices larger than 34 + /// elements, this function can no longer produce all possible permutations. pub fn shuffle(&mut self, slice: &mut [T]) { if !slice.is_empty() { let mut i = slice.len() - 1; diff --git a/libs/narcissus-core/src/ref_count.rs b/libs/narcissus-core/src/ref_count.rs index 35ea1d2..6ec4c2d 100644 --- a/libs/narcissus-core/src/ref_count.rs +++ b/libs/narcissus-core/src/ref_count.rs @@ -7,8 +7,11 @@ use std::{ struct Inner { // Number of strong references in addition to the current value. - // A negative value indicates a non-atomic reference count, counting up from i32::MIN - // A positive value indicates an atomic reference count, counting up from 0 + // + // A negative value indicates a non-atomic reference count, counting up from + // `i32::MIN` + // + // A positive value indicates an atomic reference count, counting up from `0` strong: AtomicI32, value: T, } @@ -129,9 +132,9 @@ impl Rc { /// # Safety /// - /// Any other [`Rc`] or [`Arc`] pointers to the same allocation must not be dereferenced for the duration of the - /// returned borrow. This is trivially the case if no such pointers exist, for example immediately after - /// [`Arc::new`]. + /// Any other [`Rc`] or [`Arc`] pointers to the same allocation must not be + /// dereferenced for the duration of the returned borrow. This is trivially the + /// case if no such pointers exist, for example immediately after [`Arc::new`]. #[inline] pub unsafe fn get_mut_unchecked(&mut self) -> &mut T { // We are careful to *not* create a reference covering the "count" fields, as @@ -237,11 +240,9 @@ impl Arc { pub fn get_mut(&mut self) -> Option<&mut T> { if self.is_unique() { - // This unsafety is ok because we're guaranteed that the pointer - // returned is the *only* pointer that will ever be returned to T. Our - // reference count is guaranteed to be 1 at this point, and we required - // the Arc itself to be `mut`, so we're returning the only possible - // reference to the inner data. + // SAFETY: We're guaranteed that the pointer returned is the *only* pointer that + // will ever be returned to T because our reference count is 1, and we required + // the Arc reference itself to be mutable. Some(unsafe { self.get_mut_unchecked() }) } else { None diff --git a/libs/narcissus-core/src/slice.rs b/libs/narcissus-core/src/slice.rs index 5c469a4..66713a2 100644 --- a/libs/narcissus-core/src/slice.rs +++ b/libs/narcissus-core/src/slice.rs @@ -50,11 +50,10 @@ impl<'a, T, const N: usize> Iterator for ArrayWindows<'a, T, N> { if self.num == 0 { return None; } - // SAFETY: - // This is safe because it's indexing into a slice guaranteed to be length > N. + // SAFETY: Indexing into a slice guaranteed to have `len > N`. let ret = unsafe { &*self.slice_head.cast::<[T; N]>() }; - // SAFETY: Guaranteed that there are at least 1 item remaining otherwise - // earlier branch would've been hit + // SAFETY: Guaranteed that there are at least 1 item remaining otherwise earlier + // branch would've returned `None`. self.slice_head = unsafe { self.slice_head.add(1) }; self.num -= 1; @@ -127,8 +126,8 @@ impl<'a, T, const N: usize> DoubleEndedIterator for ArrayWindows<'a, T, N> { /// /// # Panics /// -/// Panics if `N` is 0. This check will most probably get changed to a compile time -/// error before this method gets stabilized. +/// Panics if `N` is 0. This check will most probably get changed to a compile +/// time error before this method gets stabilized. /// /// # Examples /// diff --git a/libs/narcissus-core/src/uuid.rs b/libs/narcissus-core/src/uuid.rs index acd1ecb..cce7875 100644 --- a/libs/narcissus-core/src/uuid.rs +++ b/libs/narcissus-core/src/uuid.rs @@ -124,7 +124,7 @@ impl Uuid { | h_15_0 | h_15_1; - // only possible if any of the half-words are invalid + // Only possible if any of the half-words are invalid. if bits == !0 { return Err(ParseUuidError); } diff --git a/libs/narcissus-core/src/virtual_vec/raw_vec.rs b/libs/narcissus-core/src/virtual_vec/raw_vec.rs index 5317754..3280b6c 100644 --- a/libs/narcissus-core/src/virtual_vec/raw_vec.rs +++ b/libs/narcissus-core/src/virtual_vec/raw_vec.rs @@ -20,11 +20,11 @@ impl VirtualRawVec { let align = align_of::(); let page_size = page_size(); - // Allocating memory with virtual alloc for a zst seems a bit of a waste :) + // Allocating memory with virtual alloc for a zst seems a bit of a waste. :) assert!(size != 0); - // mmap gaurantees we get page aligned addresses back. So as long as our alignment - // requirement is less than that, we're all good in the hood. + // mmap gaurantees we get page aligned addresses back. So as long as our + // alignment requirement is less than that, we're all good in the hood. assert!(align < page_size); let max_capacity_bytes = size.checked_mul(max_capacity).unwrap(); @@ -47,8 +47,9 @@ impl VirtualRawVec { let mut vec = Self::new(max_capacity); unsafe { - // we ensure that capacity is less than max_capacity, and the new function above would - // have paniced if max_capacity * size_of::() overflowed, so we're always safe here. + // We ensure that capacity is less than max_capacity, and the new function above + // would have paniced if max_capacity * size_of::() overflowed, so we're + // always safe here. let cap_bytes = capacity * size_of::(); virtual_commit(vec.ptr.as_ptr() as *mut std::ffi::c_void, cap_bytes); vec.cap = capacity; @@ -80,9 +81,8 @@ impl VirtualRawVec { let double_cap = self.cap * 2; let new_cap = cmp::max(required_cap, cmp::min(double_cap, max_cap)); - // This can't overflow because we've already ensured that the new_cap is less than or - // equal to the the max_cap, and the max_cap has already been checked for overflow in - // the constructor. + // This can't overflow because we've already ensured that `new_cap <= max_cap`, + // and `max_cap` has already been checked for overflow in the constructor. let new_cap_bytes = new_cap * size_of::(); virtual_commit(self.ptr.as_ptr() as *mut std::ffi::c_void, new_cap_bytes); @@ -126,7 +126,8 @@ impl Drop for VirtualRawVec { fn drop(&mut self) { unsafe { // The preconditions here that max_cap multiplied by the size won't overflow and - // that the pointer actually exists and is mapped are all ensured by the constructor. + // that the pointer actually exists and is mapped are all ensured by the + // constructor. virtual_free( self.ptr.as_ptr() as *mut std::ffi::c_void, self.max_cap * size_of::(), diff --git a/libs/narcissus-core/src/virtual_vec/vec.rs b/libs/narcissus-core/src/virtual_vec/vec.rs index 1e22e0b..469fb33 100644 --- a/libs/narcissus-core/src/virtual_vec/vec.rs +++ b/libs/narcissus-core/src/virtual_vec/vec.rs @@ -11,15 +11,16 @@ pub struct VirtualVec { } impl VirtualVec { - /// Creates a new vector backed by virtual memory. The array cannot grow beyond its original - /// reservation + /// Creates a new vector backed by virtual memory. The array cannot grow beyond + /// its original reservation. /// - /// Unlike a normal vector this means addresses will not be invalidated when the vector grows, - /// nor will there be any copying. + /// Unlike a normal vector this means addresses will not be invalidated when the + /// vector grows, nor will there be any copying when resize occurs. /// /// # Panics /// - /// Panics if the memory reservation fails, or if there's any overflow in the size calculations. + /// Panics if the memory reservation fails, or if there's any overflow in the + /// size calculations. pub fn new(max_capacity: usize) -> Self { Self { buf: VirtualRawVec::new(max_capacity), @@ -80,11 +81,11 @@ impl VirtualVec { pub fn truncate(&mut self, len: usize) { // This is safe because: // - // * the slice passed to `drop_in_place` is valid; the `len > self.len` - // case avoids creating an invalid slice, and - // * the `len` of the vector is shrunk before calling `drop_in_place`, - // such that no value will be dropped twice in case `drop_in_place` - // were to panic once (if it panics twice, the program aborts). + // 1) The slice passed to `drop_in_place` is valid; the `len > self.len` case + // avoids creating an invalid slice. + // 2) The `len` of the vector is shrunk before calling `drop_in_place` such + // that no value will be dropped twice in case `drop_in_place` were to + // panic once (if it panics twice, the program aborts). unsafe { if len > self.len { return; @@ -117,28 +118,22 @@ impl VirtualVec { self } - /// Returns a raw pointer to the vector's buffer. + /// Returns a raw pointer to the vector's internal buffer. /// - /// The caller must ensure that the vector outlives the pointer this - /// function returns, or else it will end up pointing to garbage. - /// - /// The caller must also ensure that the memory the pointer (non-transitively) points to - /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer - /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`]. + /// The caller must also ensure that the memory the pointer (non-transitively) + /// points to is never written to (except inside an `UnsafeCell`) using this + /// pointer or any pointer derived from it. If you need to mutate the contents + /// of the slice, use [`as_mut_ptr`]. /// /// [`as_mut_ptr`]: #method.as_mut_ptr #[inline] pub fn as_ptr(&self) -> *const T { - // We shadow the slice method of the same name to avoid going through - // `deref`, which creates an intermediate reference. + // We shadow the slice method of the same name to avoid going through `deref`, + // which creates an intermediate reference. self.buf.ptr() } - /// Returns an unsafe mutable pointer to the vector's buffer. - /// - /// The caller must ensure that the vector outlives the pointer this - /// function returns, or else it will end up pointing to garbage. - /// + /// Returns a mutable raw pointer to the vector's internal buffer. #[inline] pub fn as_mut_ptr(&mut self) -> *mut T { self.buf.ptr() @@ -156,9 +151,9 @@ impl VirtualVec { #[inline] pub fn swap_remove(&mut self, index: usize) -> T { unsafe { - // We replace self[index] with the last element. Note that if the - // bounds check on hole succeeds there must be a last element (which - // can be self[index] itself). + // We replace `self[index]` with the last element. Note that if the bounds check + // on hole succeeds there must be a last element (which can be `self[index]` + // itself). let hole: *mut T = &mut self[index]; let last = ptr::read(self.get_unchecked(self.len - 1)); self.len -= 1; @@ -180,15 +175,13 @@ impl VirtualVec { self.reserve(1); unsafe { - // infallible - // The spot to put the new value + // Infallible. The spot to put the new value. { let p = self.as_mut_ptr().add(index); - // Shift everything over to make space. (Duplicating the - // `index`th element into two consecutive places.) + // Shift everything over to make space. Duplicating the `index`th element into + // two consecutive places. ptr::copy(p, p.offset(1), len - index); - // Write it in, overwriting the first copy of the `index`th - // element. + // Write it in, overwriting the first copy of the `index`th element. ptr::write(p, element); } self.len += 1; @@ -205,15 +198,14 @@ impl VirtualVec { let len = self.len(); assert!(index < len); unsafe { - // infallible + // Infallible let ret; { - // the place we are taking from. + // The place we are taking from. let ptr = self.as_mut_ptr().add(index); - // copy it out, unsafely having a copy of the value on - // the stack and in the vector at the same time. + // Copy it out, unsafely having a copy of the value on the stack and in the + // vector at the same time. ret = ptr::read(ptr); - // Shift everything down to fill in that spot. ptr::copy(ptr.offset(1), ptr, len - index - 1); } @@ -281,13 +273,14 @@ impl DerefMut for VirtualVec { impl VirtualVec { /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. /// - /// If `new_len` is greater than `len`, the `Vec` is extended by the - /// difference, with each additional slot filled with `value`. - /// If `new_len` is less than `len`, the `Vec` is simply truncated. + /// If `new_len >= len`, the `Vec` is extended by the difference, with each + /// additional slot filled with `value`. + /// + /// If `new_len < len`, the `Vec` is simply truncated. /// - /// This method requires [`Clone`] to be able clone the passed value. If - /// you need more flexibility (or want to rely on [`Default`] instead of - /// [`Clone`]), use [`resize_with`]. + /// This method requires [`Clone`] to be able clone the passed value. If you + /// need more flexibility (or want to rely on [`Default`] instead of [`Clone`]), + /// use [`resize_with`]. /// /// [`Clone`]: ../../std/clone/trait.Clone.html /// [`Default`]: ../../std/default/trait.Default.html @@ -315,21 +308,21 @@ impl VirtualVec { // don't alias. let mut local_len = SetLenOnDrop::new(&mut self.len); - // Write all elements except the last one + // Write all elements except the last one. for _ in 1..n { ptr::write(ptr, value.next()); ptr = ptr.offset(1); - // Increment the length in every step in case next() panics + // Increment the length in every step in case next() panics. local_len.increment_len(1); } if n > 0 { - // We can write the last element directly without cloning needlessly + // We can write the last element directly without cloning needlessly. ptr::write(ptr, value.last()); local_len.increment_len(1); } - // len set by scope guard + // `len` set by scope guard. } } } @@ -381,8 +374,8 @@ impl Drop for VirtualVec { // Set the length of the vec when the `SetLenOnDrop` value goes out of scope. // -// The idea is: The length field in SetLenOnDrop is a local variable -// that the optimizer will see does not alias with any stores through the Vec's data +// The idea is: The length field in SetLenOnDrop is a local variable that the +// optimizer will see does not alias with any stores through the Vec's data // pointer. This is a workaround for alias analysis issue #32155 struct SetLenOnDrop<'a> { len: &'a mut usize, diff --git a/libs/narcissus-core/src/waiter.rs b/libs/narcissus-core/src/waiter.rs index 6e43411..e3596b5 100644 --- a/libs/narcissus-core/src/waiter.rs +++ b/libs/narcissus-core/src/waiter.rs @@ -7,7 +7,7 @@ pub fn wait(futex: &AtomicI32, expected: i32, timeout: Option) { Some(libc::timespec { // Sleep forever if the timeout is longer than fits in a timespec. tv_sec: d.as_secs().try_into().ok()?, - // This conversion never truncates, as subsec_nanos is always <1e9. + // This conversion never truncates, as subsec_nanos is always `< 1e9`. tv_nsec: d.subsec_nanos() as _, }) }); diff --git a/libs/narcissus-font/src/font.rs b/libs/narcissus-font/src/font.rs index 608ad58..3af3a89 100644 --- a/libs/narcissus-font/src/font.rs +++ b/libs/narcissus-font/src/font.rs @@ -113,7 +113,7 @@ impl<'a> Font<'a> { let mut ascent = 0; let mut descent = 0; let mut line_gap = 0; - // Safety: We've just initialized the font info above. + // SAFETY: We've just initialized the font info above. unsafe { stbtt_GetFontVMetrics(&info, &mut ascent, &mut descent, &mut line_gap) }; VerticalMetrics { ascent: ascent as f32, diff --git a/libs/narcissus-font/src/packer.rs b/libs/narcissus-font/src/packer.rs index cb78791..e752ecf 100644 --- a/libs/narcissus-font/src/packer.rs +++ b/libs/narcissus-font/src/packer.rs @@ -23,7 +23,7 @@ impl Packer { let width = width as i32; let height = height as i32; - // Safety: `nodes` must not be deleted while context lives, and `context` must not be + // SAFETY: `nodes` must not be deleted while context lives, and `context` must not be // relocated. let context = unsafe { let mut context = uninit_box(); @@ -47,7 +47,7 @@ impl Packer { /// Clear all previously packed rectangle state. pub fn clear(&mut self) { - // Safety: `context` and `nodes` are always valid while packer exists, and width always + // SAFETY: `context` and `nodes` are always valid while packer exists, and width always // matches node count. unsafe { stbrp_init_target( @@ -68,7 +68,7 @@ impl Packer { /// Returns true if all rectangles were successfully packed. pub fn pack(&mut self, rects: &mut [rectpack::Rect]) -> bool { let num_rects = rects.len().try_into().expect("too many rects to pack"); - // Safety: `context` and `nodes` are always valid while packer exists. + // SAFETY: `context` and `nodes` are always valid while packer exists. let ret = unsafe { stbrp_pack_rects(self.context.as_mut(), rects.as_mut_ptr(), num_rects) }; ret == 1 } diff --git a/libs/narcissus-gpu/src/backend/vulkan/mod.rs b/libs/narcissus-gpu/src/backend/vulkan/mod.rs index 1c55190..5a33890 100644 --- a/libs/narcissus-gpu/src/backend/vulkan/mod.rs +++ b/libs/narcissus-gpu/src/backend/vulkan/mod.rs @@ -683,9 +683,9 @@ fn vulkan_image_memory_barrier( // Add visibility operations if necessary. // - // If the src access mask is zero, this is a Write-After-Read hazard (or for some reason, a - // Read-After-Read), so the dst access mask can be safely zeroed as these don't need - // visibility. + // If the src access mask is zero, this is a Write-After-Read hazard (or for + // some reason, a Read-After-Read), so the dst access mask can be safely zeroed + // as these don't need visibility. if src_access_mask != default() { dst_access_mask |= info.access; } @@ -980,7 +980,8 @@ impl VulkanDevice { } } - // If we found any surface extensions, we need to additionally enable VK_KHR_surface. + // If we found any surface extensions, we need to additionally enable + // `VK_KHR_surface`. if !enabled_extensions.is_empty() { enabled_extensions.push(cstr!("VK_KHR_surface")); } @@ -1282,24 +1283,26 @@ impl VulkanDevice { fn frame<'token>(&self, frame: &'token Frame) -> &'token VulkanFrame { frame.check_device(self as *const _ as usize); frame.check_frame_counter(self.frame_counter.load()); - // Safety: Reference is bound to the frame exposed by the API. only one frame can be valid - // at a time. The returned VulkanFrame is only valid so long as we have a ref on the frame. + // SAFETY: Reference is bound to the frame exposed by the API. only one frame + // can be valid at a time. The returned VulkanFrame is only valid so long as we + // have a ref on the frame. unsafe { &*self.frames[frame.frame_index % NUM_FRAMES].get() } } fn frame_mut<'token>(&self, frame: &'token mut Frame) -> &'token mut VulkanFrame { frame.check_device(self as *const _ as usize); frame.check_frame_counter(self.frame_counter.load()); - // Safety: Reference is bound to the frame exposed by the API. only one frame can be valid - // at a time. The returned VulkanFrame is only valid so long as we have a ref on the frame. + // SAFETY: Reference is bound to the frame exposed by the API. only one frame + // can be valid at a time. The returned VulkanFrame is only valid so long as we + // have a ref on the frame. unsafe { &mut *self.frames[frame.frame_index % NUM_FRAMES].get() } } fn cmd_buffer_mut<'a>(&self, cmd_buffer: &'a mut CmdBuffer) -> &'a mut VulkanCmdBuffer { - // Safety: CmdBuffer's can't outlive a frame, and the memory for a cmd_buffer is reset when - // the frame ends. So the pointer contained in the cmd_buffer is always valid while the - // CmdBuffer is valid. They can't cloned, copied or be sent between threads, and we have a - // mut reference. + // SAFETY: `CmdBuffer`s can't outlive a frame, and the memory for a cmd_buffer + // is reset when the frame ends. So the pointer contained in the cmd_buffer is + // always valid while the `CmdBuffer` is valid. They can't cloned, copied or be + // sent between threads, and we have a mutable reference. unsafe { NonNull::new_unchecked(cmd_buffer.cmd_buffer_addr as *mut VulkanCmdBuffer).as_mut() } diff --git a/libs/narcissus-gpu/src/frame_counter.rs b/libs/narcissus-gpu/src/frame_counter.rs index a42dfec..983a097 100644 --- a/libs/narcissus-gpu/src/frame_counter.rs +++ b/libs/narcissus-gpu/src/frame_counter.rs @@ -12,7 +12,8 @@ pub struct FrameCounter { impl FrameCounter { pub fn new() -> Self { Self { - // Start the frame id at 1 so that the first `begin_frame` ticks us over to a new frame index. + // Start the frame id at 1 so that the first `begin_frame` ticks us + // over to a new frame index. value: AtomicUsize::new(1), } } diff --git a/libs/narcissus-image/src/lib.rs b/libs/narcissus-image/src/lib.rs index c39c469..0949655 100644 --- a/libs/narcissus-image/src/lib.rs +++ b/libs/narcissus-image/src/lib.rs @@ -52,7 +52,7 @@ impl Image { height: y, components, len, - // Safety: We just checked that buffer is not null above. + // SAFETY: We just checked that buffer is not null above. buffer: unsafe { NonNull::new_unchecked(buffer) }, }) } @@ -90,14 +90,14 @@ impl Image { /// | 3 | red, green, blue | /// | 4 | red, green, blue, alpha | pub fn as_slice(&self) -> &[u8] { - // Safety: Slice size is calculated when creating `Texture`. + // SAFETY: Slice size is calculated when creating `Texture`. unsafe { std::slice::from_raw_parts(self.buffer.as_ptr(), self.len) } } } impl Drop for Image { fn drop(&mut self) { - // Safety: Always allocated by `stbi_load_xxx` functions. + // SAFETY: Always allocated by `stbi_load_xxx` functions. unsafe { stbi_image_free(self.buffer.as_ptr() as *mut _) } } } diff --git a/libs/narcissus-maths/src/affine2.rs b/libs/narcissus-maths/src/affine2.rs index cca48f7..f01b256 100644 --- a/libs/narcissus-maths/src/affine2.rs +++ b/libs/narcissus-maths/src/affine2.rs @@ -1,6 +1,7 @@ use crate::{Mat2, Point2, Vec2}; -/// Matrix and translation vector which together represent a 2d affine transformation. +/// Matrix and translation vector which together represent a 2d affine +/// transformation. #[derive(Clone, Copy, PartialEq)] #[repr(C)] pub struct Affine2 { diff --git a/libs/narcissus-maths/src/affine3.rs b/libs/narcissus-maths/src/affine3.rs index e96c949..fd32104 100644 --- a/libs/narcissus-maths/src/affine3.rs +++ b/libs/narcissus-maths/src/affine3.rs @@ -1,6 +1,7 @@ use crate::{Mat3, Point3, Vec3}; -/// Matrix and translation vector which together represent a 3d affine transformation. +/// Matrix and translation vector which together represent a 3d affine +/// transformation. #[derive(Clone, Copy, PartialEq)] #[repr(C)] pub struct Affine3 { diff --git a/libs/narcissus-maths/src/lib.rs b/libs/narcissus-maths/src/lib.rs index cb88a6f..496bb2f 100644 --- a/libs/narcissus-maths/src/lib.rs +++ b/libs/narcissus-maths/src/lib.rs @@ -227,7 +227,7 @@ macro_rules! impl_shared { #[inline(always)] pub const fn splat(value: $t) -> $name { // we have to transmute here because we can't make `into()` const. - // Safety: $name is repr(C) struct with $n elements of type $t, so the transmute is always valid. + // SAFETY: $name is repr(C) struct with $n elements of type $t, so the transmute is always valid. unsafe { std::mem::transmute([value; $n]) } } diff --git a/libs/narcissus-maths/src/mat2.rs b/libs/narcissus-maths/src/mat2.rs index 196bea7..fd11c24 100644 --- a/libs/narcissus-maths/src/mat2.rs +++ b/libs/narcissus-maths/src/mat2.rs @@ -38,19 +38,22 @@ impl Mat2 { unsafe { std::mem::transmute(rows) } } - /// Construct a matrix with the provided `diagonal` and all other values set to `0.0`. + /// Construct a matrix with the provided `diagonal` and all other values set to + /// `0.0`. pub const fn from_diagonal(diagonal: Vec2) -> Mat2 { Mat2::from_rows([[diagonal.x, 0.0], [0.0, diagonal.y]]) } - /// Construct a transformation matrix which scales along the coordinate axis by the values given in `scale`. + /// Construct a transformation matrix which scales along the coordinate axis by + /// the values given in `scale`. pub const fn from_scale(scale: Vec2) -> Mat2 { Mat2::from_diagonal(scale) } /// Returns `true` if all elements are finite. /// - /// If any element is `NaN`, positive infinity, or negative infinity, returns `false`. + /// If any element is `NaN`, positive infinity, or negative infinity, returns + /// `false`. pub fn is_finite(&self) -> bool { let mut is_finite = true; for x in self.0 { @@ -59,7 +62,8 @@ impl Mat2 { is_finite } - /// Returns `true` if any element is positive infinity, or negative infinity, and `false` otherwise. + /// Returns `true` if any element is positive infinity, or negative infinity, + /// and `false` otherwise. pub fn is_infinite(&self) -> bool { let mut is_infinite = false; for x in self.0 { diff --git a/libs/narcissus-maths/src/mat3.rs b/libs/narcissus-maths/src/mat3.rs index 520eadd..4e39f85 100644 --- a/libs/narcissus-maths/src/mat3.rs +++ b/libs/narcissus-maths/src/mat3.rs @@ -38,7 +38,8 @@ impl Mat3 { unsafe { std::mem::transmute(rows) } } - /// Construct a matrix with the provided `diagonal` and all other values set to `0.0`. + /// Construct a matrix with the provided `diagonal` and all other values set to + /// `0.0`. pub const fn from_diagonal(diagonal: Vec3) -> Mat3 { Mat3::from_rows([ [diagonal.x, 0.0, 0.0], @@ -47,15 +48,18 @@ impl Mat3 { ]) } - /// Construct a transformation matrix which scales along the coordinate axis by the values given in `scale`. + /// Construct a transformation matrix which scales along the coordinate axis by + /// the values given in `scale`. pub const fn from_scale(scale: Vec3) -> Mat3 { Mat3::from_diagonal(scale) } - /// Constructs a transformation matrix which rotates around the given `axis` by `angle`. + /// Constructs a transformation matrix which rotates around the given `axis` by + /// `angle`. /// - /// In a right-handed coordinate system, positive angles rotate counter-clockwise around `axis` - /// where `axis` is pointing toward the observer. + /// In a right-handed coordinate system, positive angles rotate + /// counter-clockwise around `axis` where `axis` is pointing toward the + /// observer. pub fn from_axis_rotation(axis: Vec3, rotation: HalfTurn) -> Mat3 { let (sin, cos) = sin_cos_pi_f32(rotation.as_f32()); let axis_sin = axis * sin; @@ -85,7 +89,8 @@ impl Mat3 { /// Returns `true` if all elements are finite. /// - /// If any element is `NaN`, positive infinity, or negative infinity, returns `false`. + /// If any element is `NaN`, positive infinity, or negative infinity, returns + /// `false`. pub fn is_finite(&self) -> bool { let mut is_finite = true; for x in self.0 { @@ -94,7 +99,8 @@ impl Mat3 { is_finite } - /// Returns `true` if any element is positive infinity, or negative infinity, and `false` otherwise. + /// Returns `true` if any element is positive infinity, or negative infinity, + /// and `false` otherwise. pub fn is_infinite(&self) -> bool { let mut is_infinite = false; for x in self.0 { diff --git a/libs/narcissus-maths/src/mat4.rs b/libs/narcissus-maths/src/mat4.rs index 9a8409e..0046f99 100644 --- a/libs/narcissus-maths/src/mat4.rs +++ b/libs/narcissus-maths/src/mat4.rs @@ -80,7 +80,8 @@ impl Mat4 { result } - /// Construct a matrix with the provided `diagonal` and all other values set to `0.0`. + /// Construct a matrix with the provided `diagonal` and all other values set to + /// `0.0`. pub const fn from_diagonal(diagonal: Vec4) -> Mat4 { Mat4::from_rows([ [diagonal.x, 0.0, 0.0, 0.0], @@ -90,7 +91,8 @@ impl Mat4 { ]) } - /// Construct a transformation matrix which scales along the coordinate axes by the values given in `scale`. + /// Construct a transformation matrix which scales along the coordinate axes by + /// the values given in `scale`. pub const fn from_scale(scale: Vec3) -> Mat4 { Mat4::from_rows([ [scale.x, 0.0, 0.0, 0.0], @@ -100,7 +102,8 @@ impl Mat4 { ]) } - /// Construct an affine transformation matrix with the given `translation` along the coordinate axes. + /// Construct an affine transformation matrix with the given `translation` + /// along the coordinate axes. pub const fn from_translation(translation: Vec3) -> Mat4 { Mat4::from_rows([ [1.0, 0.0, 0.0, translation.x], @@ -110,10 +113,12 @@ impl Mat4 { ]) } - /// Constructs a transformation matrix which rotates around the given `axis` by `angle`. + /// Constructs a transformation matrix which rotates around the given `axis` by + /// `angle`. /// - /// In a right-handed coordinate system, positive angles rotate counter-clockwise around `axis` - /// where `axis` is pointing toward the observer. + /// In a right-handed coordinate system, positive angles rotate + /// counter-clockwise around `axis` where `axis` is pointing toward the + /// observer. pub fn from_axis_rotation(axis: Vec3, rotation: HalfTurn) -> Mat4 { let (sin, cos) = sin_cos_pi_f32(rotation.as_f32()); let axis_sin = axis * sin; @@ -145,7 +150,8 @@ impl Mat4 { ]) } - /// Constructs a 'look at' transformation from the given `eye` position, look at `center` point, and `up` vector. + /// Constructs a 'look at' transformation from the given `eye` position, look + /// at `center` point, and `up` vector. /// /// Src coordinate space: right-handed, +y-up. /// Dst coordinate space: right-handed, +y-up. @@ -193,7 +199,8 @@ impl Mat4 { ]) } - /// Creates a perspective projection matrix with reversed infinite z and \[0,1\] depth range. + /// Creates a perspective projection matrix with reversed infinite z and \[0,1\] + /// depth range. /// /// Destination coordinate space matches native vulkan clip space. /// @@ -213,7 +220,8 @@ impl Mat4 { /// Returns `true` if all elements are finite. /// - /// If any element is `NaN`, positive infinity, or negative infinity, returns `false`. + /// If any element is `NaN`, positive infinity, or negative infinity, returns + /// `false`. pub fn is_finite(&self) -> bool { let mut is_finite = true; for x in self.0 { @@ -222,7 +230,8 @@ impl Mat4 { is_finite } - /// Returns `true` if any element is positive infinity, or negative infinity, and `false` otherwise. + /// Returns `true` if any element is positive infinity, or negative infinity, + /// and `false` otherwise. pub fn is_infinite(&self) -> bool { let mut is_infinite = false; for x in self.0 { @@ -252,7 +261,7 @@ impl Mat4 { ]) } - // Safety: Requires SSE2. + // SAFETY: Requires SSE2. #[inline] #[target_feature(enable = "sse2")] unsafe fn transpose_sse2(self) -> Mat4 { @@ -324,7 +333,7 @@ impl Mat4 { ) } - // Safety: Requires SSE4.1. + // SAFETY: Requires SSE4.1. #[allow(dead_code)] #[inline] #[target_feature(enable = "sse4.1")] @@ -378,7 +387,7 @@ fn mul_mat4_base(lhs: Mat4, rhs: Mat4) -> Mat4 { result } -// Safety: Requires SSE2. +// SAFETY: Requires SSE2. #[allow(dead_code)] #[inline] #[target_feature(enable = "sse2")] @@ -406,7 +415,7 @@ unsafe fn mul_mat4_sse2(lhs: Mat4, rhs: Mat4) -> Mat4 { Mat4::from_m128_array([x0, x1, x2, x3]) } -// Safety: Requires AVX2. +// SAFETY: Requires AVX2. #[allow(dead_code)] #[inline] #[target_feature(enable = "avx2")] diff --git a/libs/narcissus-maths/src/next_after_f32.rs b/libs/narcissus-maths/src/next_after_f32.rs index 0de54d3..7402c00 100644 --- a/libs/narcissus-maths/src/next_after_f32.rs +++ b/libs/narcissus-maths/src/next_after_f32.rs @@ -1,19 +1,22 @@ -/// Calculate the next representable floating-point value following x in the direction of y. +/// Calculate the next representable floating-point value following x in the +/// direction of y. /// -/// If y is less than x, these functions will return the largest representable number less than x. +/// If y is less than x, these functions will return the largest representable +/// number less than x. /// /// # Returns /// -/// On success, the function returns the next representable floating-point value after x in the +/// On success, the function returns the next representable floating-point value +/// after x in the /// direction of y. /// /// * If `x` equals `y`, then `y` is returned. /// * If `x` or `y` is a `NaN`, a `NaN` is returned. -/// * If `x` is finite, and the result would overflow, a range error occurs, and the function -/// returns `inf` with the correct mathematical sign. -/// * If `x` is not equal to `y`, and the correct function result would be subnormal, zero, or -/// underflow, a range error occurs, and either the correct value (if it can be represented), -/// or `0.0`, is returned. +/// * If `x` is finite, and the result would overflow, a range error occurs, and +/// the function returns `inf` with the correct mathematical sign. +/// * If `x` is not equal to `y`, and the correct function result would be +/// subnormal, zero, or underflow, a range error occurs, and either the +/// correct value (if it can be represented), or `0.0`, is returned. /// * If x equals y, the function returns y. pub fn next_after_f32(x: f32, y: f32) -> f32 { if x.is_nan() || y.is_nan() { diff --git a/libs/narcissus-maths/src/point2.rs b/libs/narcissus-maths/src/point2.rs index 2d5ae71..cfb0248 100644 --- a/libs/narcissus-maths/src/point2.rs +++ b/libs/narcissus-maths/src/point2.rs @@ -28,7 +28,8 @@ impl Point2 { Vec2::new(self.x, self.y) } - /// Returns a new [`Point2`] with the function `f` applied to each coordinate of `self` in order. + /// Returns a new [`Point2`] with the function `f` applied to each coordinate of + /// `self` in order. #[inline(always)] pub fn map(self, mut f: F) -> Self where @@ -40,7 +41,8 @@ impl Point2 { } } - /// Returns a new [`Point2`] with the function `f` applied to each pair of components from `self` and `rhs` in order. + /// Returns a new [`Point2`] with the function `f` applied to each pair of + /// components from `self` and `rhs` in order. #[inline(always)] pub fn map2(self, rhs: Self, mut f: F) -> Self where diff --git a/libs/narcissus-maths/src/point3.rs b/libs/narcissus-maths/src/point3.rs index e6379ba..725bf9f 100644 --- a/libs/narcissus-maths/src/point3.rs +++ b/libs/narcissus-maths/src/point3.rs @@ -29,7 +29,8 @@ impl Point3 { Vec3::new(self.x, self.y, self.z) } - /// Returns a new [`Point3`] with the function `f` applied to each coordinate of `self` in order. + /// Returns a new [`Point3`] with the function `f` applied to each coordinate of + /// `self` in order. #[inline(always)] pub fn map(self, mut f: F) -> Point3 where @@ -42,7 +43,8 @@ impl Point3 { } } - /// Returns a new [`Point3`] with the function `f` applied to each pair of components from `self` and `rhs` in order. + /// Returns a new [`Point3`] with the function `f` applied to each pair of + /// components from `self` and `rhs` in order. #[inline(always)] pub fn map2(self, rhs: Point3, mut f: F) -> Point3 where diff --git a/libs/narcissus-maths/src/quat.rs b/libs/narcissus-maths/src/quat.rs index 43ec65d..870d909 100644 --- a/libs/narcissus-maths/src/quat.rs +++ b/libs/narcissus-maths/src/quat.rs @@ -37,7 +37,8 @@ impl Quat { Self { a, b, c, d } } - /// Returns a quaternion representing a `rotation` in half turns around the given `axis`. + /// Returns a quaternion representing a `rotation` in half turns around the + /// given `axis`. pub fn from_axis_rotation(axis: Vec3, rotation: HalfTurn) -> Self { let (s, c) = sin_cos_pi_f32(rotation.as_f32() * 0.5); let v = axis * s; diff --git a/libs/narcissus-maths/src/sin_cos_pi.rs b/libs/narcissus-maths/src/sin_cos_pi.rs index 65c9aaa..95bd0f9 100644 --- a/libs/narcissus-maths/src/sin_cos_pi.rs +++ b/libs/narcissus-maths/src/sin_cos_pi.rs @@ -52,7 +52,7 @@ pub fn sin_cos_pi_f32(a: f32) -> (f32, f32) { // Range reduction. let r = round_ties_to_even(a + a); - // Safety: The clamp above avoids the possibility of overflow here. + // SAFETY: The clamp above avoids the possibility of overflow here. let i = unsafe { r.to_int_unchecked::() } as u32; let r = r.mul_add(-0.5, a); diff --git a/libs/narcissus-maths/src/tan_pi.rs b/libs/narcissus-maths/src/tan_pi.rs index 37700bc..22d68d7 100644 --- a/libs/narcissus-maths/src/tan_pi.rs +++ b/libs/narcissus-maths/src/tan_pi.rs @@ -1,5 +1,5 @@ -// Based on Norbert Juffa's tanpi posted to the cuda forums. Using my own polynomial, but that might -// be worse, todo: check whether polynomial is worse. +// Based on Norbert Juffa's tanpi posted to the cuda forums. Using my own +// polynomial, but that might be worse, TODO: check whether polynomial is worse. // https://forums.developer.nvidia.com/t/an-implementation-of-single-precision-tanpi-for-cuda/48024 // // Sollya code for generating these polynomials is in `doc/sincostan.sollya` @@ -18,7 +18,8 @@ const F32_TAN_PI_15_K: [f32; 7] = unsafe { ]) }; -/// Computes the tangent of `a` expressed in multiples of *pi* radians, or half-turns. +/// Computes the tangent of `a` expressed in multiples of *pi* radians, or +/// half-turns. /// /// Returns `tan(a * pi)` /// @@ -35,7 +36,7 @@ pub fn tan_pi_f32(a: f32) -> f32 { // Range reduction. let r = round_ties_to_even(a + a); - // Safety: The clamp above avoids the possibility of overflow here. + // SAFETY: The clamp above avoids the possibility of overflow here. let i = unsafe { r.to_int_unchecked::() } as u32; let r = r.mul_add(-0.5, a); diff --git a/libs/narcissus-maths/src/vec2.rs b/libs/narcissus-maths/src/vec2.rs index 898fee5..746372e 100644 --- a/libs/narcissus-maths/src/vec2.rs +++ b/libs/narcissus-maths/src/vec2.rs @@ -42,7 +42,8 @@ impl Vec2 { } } - /// Returns a new [`Vec2`] with the function `f` applied to each pair of components from `self` and `rhs` in order. + /// Returns a new [`Vec2`] with the function `f` applied to each pair of + /// components from `self` and `rhs` in order. #[inline(always)] pub fn map2(self, rhs: Vec2, mut f: F) -> Vec2 where diff --git a/libs/narcissus-maths/src/vec3.rs b/libs/narcissus-maths/src/vec3.rs index 975d779..4362c75 100644 --- a/libs/narcissus-maths/src/vec3.rs +++ b/libs/narcissus-maths/src/vec3.rs @@ -45,7 +45,8 @@ impl Vec3 { } } - /// Returns a new [`Vec3`] with the function `f` applied to each pair of components from `self` and `rhs` in order. + /// Returns a new [`Vec3`] with the function `f` applied to each pair of + /// components from `self` and `rhs` in order. #[inline(always)] pub fn map2(self, rhs: Self, mut f: F) -> Vec3 where diff --git a/libs/narcissus-maths/src/vec4.rs b/libs/narcissus-maths/src/vec4.rs index 9ce2140..889da70 100644 --- a/libs/narcissus-maths/src/vec4.rs +++ b/libs/narcissus-maths/src/vec4.rs @@ -42,7 +42,8 @@ impl Vec4 { } } - /// Returns a new [`Vec4`] with the function `f` applied to each pair of components from `self` and `rhs` in order. + /// Returns a new [`Vec4`] with the function `f` applied to each pair of + /// components from `self` and `rhs` in order. #[inline(always)] pub fn map2(self, rhs: Self, mut f: F) -> Vec4 where -- 2.49.0