]> git.nega.tv - josh/narcissus/commitdiff
Initial Commit
authorJoshua Simmons <josh@nega.tv>
Sat, 22 Jan 2022 20:36:05 +0000 (21:36 +0100)
committerJoshua Simmons <josh@nega.tv>
Sun, 28 Aug 2022 10:02:03 +0000 (12:02 +0200)
57 files changed:
.cargo/config.toml [new file with mode: 0644]
.gitignore [new file with mode: 0644]
Cargo.lock [new file with mode: 0644]
Cargo.toml [new file with mode: 0644]
README.md [new file with mode: 0644]
narcissus-app/Cargo.toml [new file with mode: 0644]
narcissus-app/src/lib.rs [new file with mode: 0644]
narcissus-app/src/sdl.rs [new file with mode: 0644]
narcissus-core/Cargo.toml [new file with mode: 0644]
narcissus-core/src/bitset.rs [new file with mode: 0644]
narcissus-core/src/fixed_vec.rs [new file with mode: 0644]
narcissus-core/src/hybrid_vec.rs [new file with mode: 0644]
narcissus-core/src/lib.rs [new file with mode: 0644]
narcissus-core/src/manual_arc.rs [new file with mode: 0644]
narcissus-core/src/mutex.rs [new file with mode: 0644]
narcissus-core/src/pool.rs [new file with mode: 0644]
narcissus-core/src/ref_count.rs [new file with mode: 0644]
narcissus-core/src/uuid.rs [new file with mode: 0644]
narcissus-core/src/virtual_mem.rs [new file with mode: 0644]
narcissus-core/src/virtual_vec/mod.rs [new file with mode: 0644]
narcissus-core/src/virtual_vec/raw_virtual_vec.rs [new file with mode: 0644]
narcissus-core/src/virtual_vec/virtual_deque.rs [new file with mode: 0644]
narcissus-core/src/virtual_vec/virtual_vec.rs [new file with mode: 0644]
narcissus-core/src/waiter.rs [new file with mode: 0644]
narcissus-gpu/Cargo.toml [new file with mode: 0644]
narcissus-gpu/src/lib.rs [new file with mode: 0644]
narcissus-gpu/src/vulkan.rs [new file with mode: 0644]
narcissus-maths/Cargo.toml [new file with mode: 0644]
narcissus-maths/src/lib.rs [new file with mode: 0644]
narcissus-world/Cargo.toml [new file with mode: 0644]
narcissus-world/src/lib.rs [new file with mode: 0644]
narcissus/Cargo.toml [new file with mode: 0644]
narcissus/src/main.rs [new file with mode: 0644]
narcissus/src/shaders/triangle.frag.glsl [new file with mode: 0644]
narcissus/src/shaders/triangle.frag.spv [new file with mode: 0644]
narcissus/src/shaders/triangle.vert.glsl [new file with mode: 0644]
narcissus/src/shaders/triangle.vert.spv [new file with mode: 0644]
renderdoc-sys/Cargo.toml [new file with mode: 0644]
renderdoc-sys/src/helpers.rs [new file with mode: 0644]
renderdoc-sys/src/lib.rs [new file with mode: 0644]
sdl2-sys/Cargo.toml [new file with mode: 0644]
sdl2-sys/build.rs [new file with mode: 0644]
sdl2-sys/src/lib.rs [new file with mode: 0644]
vulkan-sys/.gitignore [new file with mode: 0644]
vulkan-sys/Cargo.toml [new file with mode: 0644]
vulkan-sys/examples/triangle.frag.glsl [new file with mode: 0644]
vulkan-sys/examples/triangle.frag.spv [new file with mode: 0644]
vulkan-sys/examples/triangle.rs [new file with mode: 0644]
vulkan-sys/examples/triangle.vert.glsl [new file with mode: 0644]
vulkan-sys/examples/triangle.vert.spv [new file with mode: 0644]
vulkan-sys/src/enums.rs [new file with mode: 0644]
vulkan-sys/src/flags.rs [new file with mode: 0644]
vulkan-sys/src/functions.rs [new file with mode: 0644]
vulkan-sys/src/handles.rs [new file with mode: 0644]
vulkan-sys/src/helpers.rs [new file with mode: 0644]
vulkan-sys/src/lib.rs [new file with mode: 0644]
vulkan-sys/src/structs.rs [new file with mode: 0644]

diff --git a/.cargo/config.toml b/.cargo/config.toml
new file mode 100644 (file)
index 0000000..2c3dbcd
--- /dev/null
@@ -0,0 +1,3 @@
+[target.x86_64-unknown-linux-gnu]
+linker = "clang"
+rustflags = ["-C", "link-arg=-fuse-ld=/usr/bin/mold"]
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..5d6d32f
--- /dev/null
@@ -0,0 +1,4 @@
+target/
+.vscode
+perf.data
+perf.data.old
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644 (file)
index 0000000..d2584da
--- /dev/null
@@ -0,0 +1,73 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "libc"
+version = "0.2.112"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125"
+
+[[package]]
+name = "narcissus"
+version = "0.1.0"
+dependencies = [
+ "libc",
+ "narcissus-app",
+ "narcissus-core",
+ "narcissus-gpu",
+]
+
+[[package]]
+name = "narcissus-app"
+version = "0.1.0"
+dependencies = [
+ "narcissus-core",
+ "sdl2-sys",
+]
+
+[[package]]
+name = "narcissus-core"
+version = "0.1.0"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "narcissus-gpu"
+version = "0.1.0"
+dependencies = [
+ "libc",
+ "narcissus-app",
+ "narcissus-core",
+ "vulkan-sys",
+]
+
+[[package]]
+name = "narcissus-math"
+version = "0.1.0"
+
+[[package]]
+name = "narcissus-world"
+version = "0.1.0"
+dependencies = [
+ "narcissus-core",
+]
+
+[[package]]
+name = "renderdoc-sys"
+version = "0.1.0"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "sdl2-sys"
+version = "0.1.0"
+
+[[package]]
+name = "vulkan-sys"
+version = "0.1.0"
+dependencies = [
+ "libc",
+]
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644 (file)
index 0000000..f52bec7
--- /dev/null
@@ -0,0 +1,20 @@
+[workspace]
+
+members = [
+    "vulkan-sys",
+    "sdl2-sys",
+    "renderdoc-sys",
+    "narcissus",
+    "narcissus-core",
+    "narcissus-gpu",
+    "narcissus-app",
+    "narcissus-maths",
+    "narcissus-world",
+]
+
+[profile.release]
+panic = "abort"
+debug = true
+
+[profile.dev]
+panic = "abort"
diff --git a/README.md b/README.md
new file mode 100644 (file)
index 0000000..3b239ef
--- /dev/null
+++ b/README.md
@@ -0,0 +1 @@
+Vanity
diff --git a/narcissus-app/Cargo.toml b/narcissus-app/Cargo.toml
new file mode 100644 (file)
index 0000000..977182d
--- /dev/null
@@ -0,0 +1,10 @@
+[package]
+name = "narcissus-app"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+narcissus-core = { path = "../narcissus-core" }
+sdl2-sys = { path = "../sdl2-sys" }
\ No newline at end of file
diff --git a/narcissus-app/src/lib.rs b/narcissus-app/src/lib.rs
new file mode 100644 (file)
index 0000000..dc4a026
--- /dev/null
@@ -0,0 +1,52 @@
+mod sdl;
+
+use std::ffi::{c_void, CStr};
+
+use narcissus_core::Handle;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Default)]
+pub struct Window(Handle);
+
+impl Window {
+    pub const fn is_null(&self) -> bool {
+        self.0.is_null()
+    }
+}
+
+pub struct WindowDesc<'a> {
+    pub title: &'a str,
+    pub width: u32,
+    pub height: u32,
+}
+
+#[non_exhaustive]
+pub enum Event {
+    Unknown,
+    Quit,
+    WindowClose(Window),
+}
+
+pub trait App {
+    fn create_window(&self, desc: &WindowDesc) -> Window;
+    fn destroy_window(&self, window: Window);
+
+    fn poll_event(&self) -> Option<Event>;
+
+    fn vk_get_loader(&self) -> *mut c_void;
+    fn vk_instance_extensions(&self) -> Vec<&'static CStr>;
+    fn vk_create_surface(&self, window: Window, instance: u64) -> u64;
+    fn vk_get_surface_extent(&self, window: Window) -> (u32, u32);
+}
+
+pub fn create_app() -> Box<dyn App> {
+    Box::new(sdl::SdlApp::new().unwrap())
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn it_works() {
+        let result = 2 + 2;
+        assert_eq!(result, 4);
+    }
+}
diff --git a/narcissus-app/src/sdl.rs b/narcissus-app/src/sdl.rs
new file mode 100644 (file)
index 0000000..385d6d2
--- /dev/null
@@ -0,0 +1,169 @@
+use std::{
+    collections::HashMap,
+    ffi::{c_void, CStr, CString},
+    mem::MaybeUninit,
+    os::raw::c_char,
+};
+
+use crate::{App, Event, Window};
+
+use narcissus_core::{Handle, Mutex, Pool};
+use sdl2_sys as sdl;
+
+struct SdlWindow(*mut sdl::Window);
+
+pub struct SdlApp {
+    windows: Mutex<Pool<SdlWindow>>,
+    window_id_to_handle: Mutex<HashMap<u32, Window>>,
+}
+
+impl SdlApp {
+    pub fn new() -> Result<Self, ()> {
+        unsafe { sdl::SDL_Init(sdl::INIT_VIDEO) };
+        Ok(Self {
+            windows: Mutex::new(Pool::new()),
+            window_id_to_handle: Mutex::new(HashMap::new()),
+        })
+    }
+}
+
+impl Drop for SdlApp {
+    fn drop(&mut self) {
+        for window in self.windows.get_mut().values() {
+            unsafe { sdl::SDL_DestroyWindow(window.0) };
+        }
+        unsafe { sdl::SDL_Quit() };
+    }
+}
+
+impl App for SdlApp {
+    fn create_window(&self, desc: &crate::WindowDesc) -> Window {
+        let title = CString::new(desc.title).unwrap();
+        let window = unsafe {
+            sdl::SDL_CreateWindow(
+                title.as_ptr(),
+                0,
+                0,
+                desc.width as i32,
+                desc.height as i32,
+                sdl::WINDOW_VULKAN | sdl::WINDOW_SHOWN | sdl::WINDOW_RESIZABLE,
+            )
+        };
+        assert!(!window.is_null());
+        let window_id = unsafe { sdl::SDL_GetWindowID(window) };
+
+        let mut window_id_to_handle = self.window_id_to_handle.lock();
+        let mut windows = self.windows.lock();
+
+        let handle = Window(windows.insert(SdlWindow(window)));
+        window_id_to_handle.insert(window_id, handle);
+        handle
+    }
+
+    fn destroy_window(&self, window: Window) {
+        if let Some(window) = self.windows.lock().remove(window.0) {
+            unsafe { sdl::SDL_DestroyWindow(window.0) };
+        }
+    }
+
+    fn vk_get_loader(&self) -> *mut c_void {
+        unsafe {
+            sdl::SDL_Vulkan_LoadLibrary(std::ptr::null());
+            sdl::SDL_Vulkan_GetVkGetInstanceProcAddr()
+        }
+    }
+
+    fn vk_instance_extensions(&self) -> Vec<&'static CStr> {
+        let mut count: u32 = 0;
+        let ret = unsafe {
+            sdl::SDL_Vulkan_GetInstanceExtensions(
+                std::ptr::null_mut(),
+                &mut count,
+                std::ptr::null_mut(),
+            )
+        };
+        assert_eq!(ret, 1, "failed to query instance extensions");
+        if count == 0 {
+            return Vec::new();
+        }
+
+        let mut names: Vec<*const c_char> = vec![std::ptr::null(); count as usize];
+        let ret = unsafe {
+            sdl::SDL_Vulkan_GetInstanceExtensions(
+                std::ptr::null_mut(),
+                &mut count,
+                names.as_mut_ptr(),
+            )
+        };
+        assert_eq!(ret, 1, "failed to query instance extensions");
+
+        names
+            .iter()
+            .map(|&val| unsafe { CStr::from_ptr(val) })
+            .collect()
+    }
+
+    fn vk_create_surface(&self, window: Window, instance: u64) -> u64 {
+        let windows = self.windows.lock();
+        let window = windows.get(window.0).unwrap();
+        let mut surface = !0;
+        let ret = unsafe { sdl::SDL_Vulkan_CreateSurface(window.0, instance, &mut surface) };
+        assert_eq!(ret, 1, "failed to create vulkan surface");
+        surface
+    }
+
+    fn vk_get_surface_extent(&self, window: Window) -> (u32, u32) {
+        let windows = self.windows.lock();
+        let window = windows.get(window.0).unwrap();
+        let mut w = 0;
+        let mut h = 0;
+        unsafe {
+            sdl::SDL_Vulkan_GetDrawableSize(window.0, &mut w, &mut h);
+        }
+        (w as u32, h as u32)
+    }
+
+    fn poll_event(&self) -> Option<Event> {
+        let mut event = MaybeUninit::uninit();
+        if unsafe { sdl::SDL_PollEvent(event.as_mut_ptr()) } == 0 {
+            return None;
+        }
+
+        let event = unsafe { event.assume_init() };
+        let e = match unsafe { event.r#type } {
+            sdl2_sys::EventType::QUIT => Event::Quit,
+            sdl2_sys::EventType::WINDOWEVENT => match unsafe { event.window.event } {
+                sdl::WindowEventId::None => Event::Unknown,
+                sdl::WindowEventId::Shown => Event::Unknown,
+                sdl::WindowEventId::Hidden => Event::Unknown,
+                sdl::WindowEventId::Exposed => Event::Unknown,
+                sdl::WindowEventId::Moved => Event::Unknown,
+                sdl::WindowEventId::Resized => Event::Unknown,
+                sdl::WindowEventId::SizeChanged => Event::Unknown,
+                sdl::WindowEventId::Minimized => Event::Unknown,
+                sdl::WindowEventId::Maximized => Event::Unknown,
+                sdl::WindowEventId::Restored => Event::Unknown,
+                sdl::WindowEventId::Enter => Event::Unknown,
+                sdl::WindowEventId::Leave => Event::Unknown,
+                sdl::WindowEventId::FocusGained => Event::Unknown,
+                sdl::WindowEventId::FocusLost => Event::Unknown,
+                sdl::WindowEventId::Close => {
+                    let handle = self
+                        .window_id_to_handle
+                        .lock()
+                        .get(&unsafe { event.window.window_id })
+                        .copied()
+                        .unwrap_or_else(|| Window(Handle::null()));
+                    Event::WindowClose(handle)
+                }
+                sdl::WindowEventId::TakeFocus => Event::Unknown,
+                sdl::WindowEventId::HitTest => Event::Unknown,
+                sdl::WindowEventId::IccprofChanged => Event::Unknown,
+                sdl::WindowEventId::DisplayChanged => Event::Unknown,
+            },
+            _ => Event::Unknown,
+        };
+
+        Some(e)
+    }
+}
diff --git a/narcissus-core/Cargo.toml b/narcissus-core/Cargo.toml
new file mode 100644 (file)
index 0000000..5b9068f
--- /dev/null
@@ -0,0 +1,9 @@
+[package]
+name = "narcissus-core"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+libc = "0.2.112"
\ No newline at end of file
diff --git a/narcissus-core/src/bitset.rs b/narcissus-core/src/bitset.rs
new file mode 100644 (file)
index 0000000..34a1eb2
--- /dev/null
@@ -0,0 +1,96 @@
+pub trait Bits: Copy + Default {
+    fn is_zero(self) -> bool;
+    /// Clear the least significant set bit and return its index.
+    fn clear_least_significant_set_bit(&mut self) -> u32;
+}
+
+#[derive(Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+pub struct BitIter<T, I> {
+    base: usize,
+    words: I,
+    word: T,
+}
+
+impl<T, I> BitIter<T, I>
+where
+    T: Bits,
+    I: Iterator<Item = T>,
+{
+    pub fn new(mut words: I) -> Self {
+        let word = words.next().unwrap_or_default();
+        Self {
+            base: 0,
+            words,
+            word,
+        }
+    }
+}
+
+impl<T, I> Iterator for BitIter<T, I>
+where
+    T: Bits,
+    I: Iterator<Item = T>,
+{
+    type Item = usize;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        while self.word.is_zero() {
+            self.word = self.words.next()?;
+            self.base += std::mem::size_of::<T>() * 8;
+        }
+        let index = self.word.clear_least_significant_set_bit();
+        Some(self.base + index as usize)
+    }
+}
+
+impl Bits for u64 {
+    fn is_zero(self) -> bool {
+        self == 0
+    }
+
+    fn clear_least_significant_set_bit(&mut self) -> u32 {
+        let b = *self;
+        let t = b & (!b + 1);
+        let index = b.trailing_zeros();
+        *self ^= t;
+        index
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::*;
+    #[test]
+    fn iterate_bits() {
+        {
+            let bits_iter = BitIter::new(std::iter::once(
+                0b0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101,
+            ));
+            let mut i = 0;
+            for index in bits_iter {
+                assert_eq!(index, i);
+                i += 2;
+            }
+            assert_eq!(i, 64);
+        }
+
+        {
+            let bits_iter = BitIter::new(
+            std::iter::repeat(
+                0b0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101_0101,
+            )
+            .take(10),
+        );
+            let mut i = 0;
+            for index in bits_iter {
+                assert_eq!(index, i);
+                i += 2;
+            }
+            assert_eq!(i, 64 * 10);
+        }
+
+        assert_eq!(BitIter::new(std::iter::empty::<u64>()).next(), None);
+        assert_eq!(BitIter::new(std::iter::repeat(0).take(10)).next(), None);
+    }
+}
diff --git a/narcissus-core/src/fixed_vec.rs b/narcissus-core/src/fixed_vec.rs
new file mode 100644 (file)
index 0000000..d6f20ca
--- /dev/null
@@ -0,0 +1,554 @@
+use std::{
+    mem::MaybeUninit,
+    ops::{Index, IndexMut},
+    slice::SliceIndex,
+};
+
+pub struct FixedVec<T, const CAP: usize> {
+    len: usize,
+    buf: [MaybeUninit<T>; CAP],
+}
+
+trait ExtendWith<T> {
+    fn next(&mut self) -> T;
+    fn last(self) -> T;
+}
+
+struct ExtendElement<T>(T);
+impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
+    fn next(&mut self) -> T {
+        self.0.clone()
+    }
+    fn last(self) -> T {
+        self.0
+    }
+}
+
+struct ExtendFunc<F>(F);
+impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
+    fn next(&mut self) -> T {
+        (self.0)()
+    }
+    fn last(mut self) -> T {
+        (self.0)()
+    }
+}
+
+impl<T, const CAP: usize> FixedVec<T, CAP> {
+    pub const fn new() -> Self {
+        FixedVec {
+            len: 0,
+            buf: unsafe { MaybeUninit::uninit().assume_init() },
+        }
+    }
+
+    pub const fn len(&self) -> usize {
+        self.len
+    }
+
+    pub const fn capacity(&self) -> usize {
+        CAP
+    }
+
+    pub fn truncate(&mut self, len: usize) {
+        if len >= self.len {
+            return;
+        }
+
+        // This is safe because:
+        //
+        // * the slice passed to `drop_in_place` is valid; the `len > self.len`
+        //   case avoids creating an invalid slice, and
+        // * the `len` of the vector is shrunk before calling `drop_in_place`,
+        //   such that no value will be dropped twice in case `drop_in_place`
+        //   were to panic once (if it panics twice, the program aborts).
+        unsafe {
+            let remaining_len = self.len - len;
+            let s = std::ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
+            self.len = len;
+            std::ptr::drop_in_place(s);
+        }
+    }
+
+    #[inline]
+    pub fn as_slice(&self) -> &[T] {
+        self
+    }
+
+    #[inline]
+    pub fn as_mut_slice(&mut self) -> &mut [T] {
+        self
+    }
+
+    #[inline]
+    pub fn as_ptr(&self) -> *const T {
+        // We shadow the slice method of the same name to avoid going through
+        // `deref`, which creates an intermediate reference.
+        self.buf.as_ptr() as _
+    }
+
+    #[inline]
+    pub fn as_mut_ptr(&mut self) -> *mut T {
+        self.buf.as_mut_ptr() as _
+    }
+
+    #[inline]
+    pub unsafe fn set_len(&mut self, new_len: usize) {
+        debug_assert!(new_len <= self.capacity());
+        self.len = new_len;
+    }
+
+    pub fn swap_remove(&mut self, index: usize) -> T {
+        #[cold]
+        #[inline(never)]
+        fn assert_failed(index: usize, len: usize) -> ! {
+            panic!(
+                "swap_remove index (is {}) should be < len (is {})",
+                index, len
+            );
+        }
+
+        let len = self.len();
+        if index >= len {
+            assert_failed(index, len);
+        }
+        unsafe {
+            // We replace self[index] with the last element. Note that if the
+            // bounds check above succeeds there must be a last element (which
+            // can be self[index] itself).
+            let value = std::ptr::read(self.as_ptr().add(index));
+            let base_ptr = self.as_mut_ptr();
+            std::ptr::copy(base_ptr.add(len - 1), base_ptr.add(index), 1);
+            self.set_len(len);
+            value
+        }
+    }
+
+    pub fn insert(&mut self, index: usize, element: T) {
+        #[cold]
+        #[inline(never)]
+        fn assert_failed(index: usize, len: usize) -> ! {
+            panic!(
+                "insertion index (is {}) should be <= len (is {})",
+                index, len
+            );
+        }
+
+        let len = self.len();
+        if index > len {
+            assert_failed(index, len);
+        }
+
+        // space for the new element
+        if len == CAP {
+            panic!("buffer is full (capacity is {})", CAP);
+        }
+
+        unsafe {
+            // infallible
+            // The spot to put the new value
+            {
+                let p = self.as_mut_ptr().add(index);
+                // Shift everything over to make space. (Duplicating the
+                // `index`th element into two consecutive places.)
+                std::ptr::copy(p, p.offset(1), len - index);
+                // Write it in, overwriting the first copy of the `index`th
+                // element.
+                std::ptr::write(p, element);
+            }
+            self.set_len(len + 1);
+        }
+    }
+
+    #[track_caller]
+    pub fn remove(&mut self, index: usize) -> T {
+        #[cold]
+        #[inline(never)]
+        #[track_caller]
+        fn assert_failed(index: usize, len: usize) -> ! {
+            panic!("removal index (is {}) should be < len (is {})", index, len);
+        }
+
+        let len = self.len();
+        if index >= len {
+            assert_failed(index, len);
+        }
+        unsafe {
+            // infallible
+            let ret;
+            {
+                // the place we are taking from.
+                let ptr = self.as_mut_ptr().add(index);
+                // copy it out, unsafely having a copy of the value on
+                // the stack and in the vector at the same time.
+                ret = std::ptr::read(ptr);
+
+                // Shift everything down to fill in that spot.
+                std::ptr::copy(ptr.offset(1), ptr, len - index - 1);
+            }
+            self.set_len(len - 1);
+            ret
+        }
+    }
+
+    pub fn retain<F>(&mut self, mut f: F)
+    where
+        F: FnMut(&T) -> bool,
+    {
+        self.retain_mut(|elem| f(elem));
+    }
+
+    pub fn retain_mut<F>(&mut self, mut f: F)
+    where
+        F: FnMut(&mut T) -> bool,
+    {
+        let original_len = self.len();
+        // Avoid double drop if the drop guard is not executed,
+        // since we may make some holes during the process.
+        unsafe { self.set_len(0) };
+
+        // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
+        //      |<-              processed len   ->| ^- next to check
+        //                  |<-  deleted cnt     ->|
+        //      |<-              original_len                          ->|
+        // Kept: Elements which predicate returns true on.
+        // Hole: Moved or dropped element slot.
+        // Unchecked: Unchecked valid elements.
+        //
+        // This drop guard will be invoked when predicate or `drop` of element panicked.
+        // It shifts unchecked elements to cover holes and `set_len` to the correct length.
+        // In cases when predicate and `drop` never panick, it will be optimized out.
+        struct BackshiftOnDrop<'a, T, const CAP: usize> {
+            v: &'a mut FixedVec<T, CAP>,
+            processed_len: usize,
+            deleted_cnt: usize,
+            original_len: usize,
+        }
+
+        impl<T, const CAP: usize> Drop for BackshiftOnDrop<'_, T, CAP> {
+            fn drop(&mut self) {
+                if self.deleted_cnt > 0 {
+                    // SAFETY: Trailing unchecked items must be valid since we never touch them.
+                    unsafe {
+                        std::ptr::copy(
+                            self.v.as_ptr().add(self.processed_len),
+                            self.v
+                                .as_mut_ptr()
+                                .add(self.processed_len - self.deleted_cnt),
+                            self.original_len - self.processed_len,
+                        );
+                    }
+                }
+                // SAFETY: After filling holes, all items are in contiguous memory.
+                unsafe {
+                    self.v.set_len(self.original_len - self.deleted_cnt);
+                }
+            }
+        }
+
+        let mut g = BackshiftOnDrop {
+            v: self,
+            processed_len: 0,
+            deleted_cnt: 0,
+            original_len,
+        };
+
+        fn process_loop<F, T, const CAP: usize, const DELETED: bool>(
+            original_len: usize,
+            f: &mut F,
+            g: &mut BackshiftOnDrop<'_, T, CAP>,
+        ) where
+            F: FnMut(&mut T) -> bool,
+        {
+            while g.processed_len != original_len {
+                // SAFETY: Unchecked element must be valid.
+                let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
+                if !f(cur) {
+                    // Advance early to avoid double drop if `drop_in_place` panicked.
+                    g.processed_len += 1;
+                    g.deleted_cnt += 1;
+                    // SAFETY: We never touch this element again after dropped.
+                    unsafe { std::ptr::drop_in_place(cur) };
+                    // We already advanced the counter.
+                    if DELETED {
+                        continue;
+                    } else {
+                        break;
+                    }
+                }
+                if DELETED {
+                    // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
+                    // We use copy for move, and never touch this element again.
+                    unsafe {
+                        let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
+                        std::ptr::copy_nonoverlapping(cur, hole_slot, 1);
+                    }
+                }
+                g.processed_len += 1;
+            }
+        }
+
+        // Stage 1: Nothing was deleted.
+        process_loop::<F, T, CAP, false>(original_len, &mut f, &mut g);
+
+        // Stage 2: Some elements were deleted.
+        process_loop::<F, T, CAP, true>(original_len, &mut f, &mut g);
+
+        // All item are processed. This can be optimized to `set_len` by LLVM.
+        drop(g);
+    }
+
+    #[inline]
+    pub fn dedup_by_key<F, K>(&mut self, mut key: F)
+    where
+        F: FnMut(&mut T) -> K,
+        K: PartialEq,
+    {
+        self.dedup_by(|a, b| key(a) == key(b))
+    }
+
+    pub fn dedup_by<F>(&mut self, mut same_bucket: F)
+    where
+        F: FnMut(&mut T, &mut T) -> bool,
+    {
+        let len = self.len();
+        if len <= 1 {
+            return;
+        }
+
+        /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+        struct FillGapOnDrop<'a, T, const CAP: usize> {
+            /* Offset of the element we want to check if it is duplicate */
+            read: usize,
+
+            /* Offset of the place where we want to place the non-duplicate
+             * when we find it. */
+            write: usize,
+
+            /* The Vec that would need correction if `same_bucket` panicked */
+            vec: &'a mut FixedVec<T, CAP>,
+        }
+
+        impl<'a, T, const CAP: usize> Drop for FillGapOnDrop<'a, T, CAP> {
+            fn drop(&mut self) {
+                /* This code gets executed when `same_bucket` panics */
+
+                /* SAFETY: invariant guarantees that `read - write`
+                 * and `len - read` never overflow and that the copy is always
+                 * in-bounds. */
+                unsafe {
+                    let ptr = self.vec.as_mut_ptr();
+                    let len = self.vec.len();
+
+                    /* How many items were left when `same_bucket` panicked.
+                     * Basically vec[read..].len() */
+                    let items_left = len.wrapping_sub(self.read);
+
+                    /* Pointer to first item in vec[write..write+items_left] slice */
+                    let dropped_ptr = ptr.add(self.write);
+                    /* Pointer to first item in vec[read..] slice */
+                    let valid_ptr = ptr.add(self.read);
+
+                    /* Copy `vec[read..]` to `vec[write..write+items_left]`.
+                     * The slices can overlap, so `copy_nonoverlapping` cannot be used */
+                    std::ptr::copy(valid_ptr, dropped_ptr, items_left);
+
+                    /* How many items have been already dropped
+                     * Basically vec[read..write].len() */
+                    let dropped = self.read.wrapping_sub(self.write);
+
+                    self.vec.set_len(len - dropped);
+                }
+            }
+        }
+
+        let mut gap = FillGapOnDrop {
+            read: 1,
+            write: 1,
+            vec: self,
+        };
+        let ptr = gap.vec.as_mut_ptr();
+
+        /* Drop items while going through Vec, it should be more efficient than
+         * doing slice partition_dedup + truncate */
+
+        /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
+         * are always in-bounds and read_ptr never aliases prev_ptr */
+        unsafe {
+            while gap.read < len {
+                let read_ptr = ptr.add(gap.read);
+                let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+
+                if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+                    // Increase `gap.read` now since the drop may panic.
+                    gap.read += 1;
+                    /* We have found duplicate, drop it in-place */
+                    std::ptr::drop_in_place(read_ptr);
+                } else {
+                    let write_ptr = ptr.add(gap.write);
+
+                    /* Because `read_ptr` can be equal to `write_ptr`, we either
+                     * have to use `copy` or conditional `copy_nonoverlapping`.
+                     * Looks like the first option is faster. */
+                    std::ptr::copy(read_ptr, write_ptr, 1);
+
+                    /* We have filled that place, so go further */
+                    gap.write += 1;
+                    gap.read += 1;
+                }
+            }
+
+            /* Technically we could let `gap` clean up with its Drop, but
+             * when `same_bucket` is guaranteed to not panic, this bloats a little
+             * the codegen, so we just do it manually */
+            gap.vec.set_len(gap.write);
+            std::mem::forget(gap);
+        }
+    }
+
+    fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+        if n > CAP.wrapping_sub(self.len) {
+            panic!("capacity overflow");
+        }
+
+        unsafe {
+            let mut ptr = self.as_mut_ptr().add(self.len());
+
+            // Write all elements except the last one
+            for _ in 1..n {
+                std::ptr::write(ptr, value.next());
+                ptr = ptr.offset(1);
+                // Increment the length in every step in case next() panics
+                self.len += 1;
+            }
+
+            if n > 0 {
+                // We can write the last element directly without cloning needlessly
+                std::ptr::write(ptr, value.last());
+                self.len += 1;
+            }
+        }
+    }
+
+    #[inline]
+    pub fn push(&mut self, value: T) {
+        if self.len == CAP {
+            panic!("capacity overflow (is {})", CAP);
+        }
+        unsafe {
+            let end = self.as_mut_ptr().add(self.len);
+            std::ptr::write(end, value);
+            self.len += 1;
+        }
+    }
+
+    #[inline]
+    pub fn pop(&mut self) -> Option<T> {
+        if self.len == 0 {
+            None
+        } else {
+            unsafe {
+                self.len -= 1;
+                Some(std::ptr::read(self.as_ptr().add(self.len())))
+            }
+        }
+    }
+
+    #[inline]
+    pub fn clear(&mut self) {
+        if self.len == 0 {
+            return;
+        }
+        // This is safe because:
+        //
+        // * the slice passed to `drop_in_place` is valid; the `0 > self.len`
+        //   case avoids creating an invalid slice, and
+        // * the `len` of the vector is shrunk before calling `drop_in_place`,
+        //   such that no value will be dropped twice in case `drop_in_place`
+        //   were to panic once (if it panics twice, the program aborts).
+        unsafe {
+            let s = std::ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), self.len);
+            self.len = 0;
+            std::ptr::drop_in_place(s);
+        }
+    }
+
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+}
+
+impl<T: Clone, const CAP: usize> FixedVec<T, CAP> {
+    pub fn resize(&mut self, new_len: usize, value: T) {
+        let len = self.len();
+
+        if new_len > len {
+            self.extend_with(new_len - len, ExtendElement(value))
+        } else {
+            self.truncate(new_len);
+        }
+    }
+}
+
+impl<T: PartialEq, const CAP: usize> FixedVec<T, CAP> {
+    #[inline]
+    pub fn dedup(&mut self) {
+        self.dedup_by(|a, b| a == b)
+    }
+}
+
+impl<T, const CAP: usize> std::ops::Deref for FixedVec<T, CAP> {
+    type Target = [T];
+
+    fn deref(&self) -> &[T] {
+        unsafe { std::slice::from_raw_parts(self.as_ptr(), self.len) }
+    }
+}
+
+impl<T, const CAP: usize> std::ops::DerefMut for FixedVec<T, CAP> {
+    fn deref_mut(&mut self) -> &mut [T] {
+        unsafe { std::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
+    }
+}
+
+impl<T: std::hash::Hash, const CAP: usize> std::hash::Hash for FixedVec<T, CAP> {
+    #[inline]
+    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+        std::hash::Hash::hash(&**self, state)
+    }
+}
+
+impl<T, I: SliceIndex<[T]>, const CAP: usize> Index<I> for FixedVec<T, CAP> {
+    type Output = I::Output;
+
+    #[inline]
+    fn index(&self, index: I) -> &Self::Output {
+        Index::index(&**self, index)
+    }
+}
+
+impl<T, I: SliceIndex<[T]>, const CAP: usize> IndexMut<I> for FixedVec<T, CAP> {
+    #[inline]
+    fn index_mut(&mut self, index: I) -> &mut Self::Output {
+        IndexMut::index_mut(&mut **self, index)
+    }
+}
+
+impl<'a, T, const CAP: usize> IntoIterator for &'a FixedVec<T, CAP> {
+    type Item = &'a T;
+    type IntoIter = std::slice::Iter<'a, T>;
+
+    fn into_iter(self) -> std::slice::Iter<'a, T> {
+        self.iter()
+    }
+}
+
+impl<'a, T, const CAP: usize> IntoIterator for &'a mut FixedVec<T, CAP> {
+    type Item = &'a mut T;
+    type IntoIter = std::slice::IterMut<'a, T>;
+
+    fn into_iter(self) -> std::slice::IterMut<'a, T> {
+        self.iter_mut()
+    }
+}
diff --git a/narcissus-core/src/hybrid_vec.rs b/narcissus-core/src/hybrid_vec.rs
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/narcissus-core/src/lib.rs b/narcissus-core/src/lib.rs
new file mode 100644 (file)
index 0000000..2dbb1ab
--- /dev/null
@@ -0,0 +1,309 @@
+mod bitset;
+mod fixed_vec;
+pub mod manual_arc;
+mod mutex;
+mod pool;
+mod ref_count;
+mod uuid;
+mod virtual_mem;
+mod virtual_vec;
+mod waiter;
+
+pub use bitset::BitIter;
+pub use fixed_vec::FixedVec;
+pub use mutex::Mutex;
+pub use pool::{Handle, Pool};
+pub use ref_count::{Arc, Rc};
+pub use uuid::Uuid;
+pub use virtual_mem::{virtual_commit, virtual_free, virtual_reserve};
+pub use virtual_vec::{VirtualDeque, VirtualVec};
+
+use std::mem::MaybeUninit;
+
+#[macro_export]
+macro_rules! static_assert {
+    ($cond:expr) => {
+        $crate::static_assert!($cond, concat!("assertion failed: ", stringify!($cond)));
+    };
+    ($cond:expr, $($t:tt)+) => {
+        #[forbid(const_err)]
+        const _: () = {
+            if !$cond {
+                core::panic!($($t)+)
+            }
+        };
+    };
+}
+
+#[macro_export]
+macro_rules! thread_token_def {
+    ($token_name:ident, $container_name:ident, $max_concurrency:expr) => {
+        mod private {
+            use std::cell::UnsafeCell;
+            use std::sync::atomic::AtomicUsize;
+            use $crate::{array_assume_init, uninit_array, PhantomUnsend};
+            pub struct $token_name {
+                index: usize,
+                phantom: PhantomUnsend,
+            }
+
+            impl $token_name {
+                const MAX_CONCURRENCY: usize = $max_concurrency;
+                pub fn new() -> Self {
+                    static NEXT_THREAD_INDEX: AtomicUsize = AtomicUsize::new(0);
+                    let index =
+                        NEXT_THREAD_INDEX.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+                    assert!(
+                        index < Self::MAX_CONCURRENCY,
+                        "number of tokens exceeds max concurrency"
+                    );
+                    Self {
+                        index,
+                        phantom: PhantomUnsend {},
+                    }
+                }
+            }
+
+            pub struct $container_name<T> {
+                slots: [UnsafeCell<T>; $token_name::MAX_CONCURRENCY],
+            }
+
+            impl<T> $container_name<T> {
+                pub fn new<F>(mut f: F) -> Self
+                where
+                    F: FnMut() -> T,
+                {
+                    let mut slots = uninit_array();
+                    for elem in &mut slots[..] {
+                        elem.write(UnsafeCell::new(f()));
+                    }
+                    Self {
+                        slots: unsafe { array_assume_init(slots) },
+                    }
+                }
+
+                pub fn get<'a>(&self, thread_token: &'a $token_name) -> &'a T {
+                    // SAFETY: Safe while `thread_token` cannot be shared between threads, copied or modified?
+                    unsafe { &*self.slots[thread_token.index].get() }
+                }
+
+                pub fn get_mut<'a>(&self, thread_token: &'a mut $token_name) -> &'a mut T {
+                    // SAFETY: Safe while `thread_token` cannot be shared between threads, copied or modified?
+                    unsafe { &mut *self.slots[thread_token.index].get() }
+                }
+
+                pub fn slots_mut(&mut self) -> &mut [T] {
+                    unsafe {
+                        std::mem::transmute::<
+                            &mut [UnsafeCell<T>; $token_name::MAX_CONCURRENCY],
+                            &mut [T; $token_name::MAX_CONCURRENCY],
+                        >(&mut self.slots)
+                    }
+                }
+            }
+        }
+        pub use private::$container_name;
+        pub use private::$token_name;
+    };
+}
+
+#[macro_export]
+macro_rules! flags_def {
+    ($name:ident) => {
+        pub struct $name(u32);
+
+        impl $name {
+            #[inline]
+            pub fn from_raw(value: u32) -> Self {
+                Self(value)
+            }
+
+            #[inline]
+            pub fn as_raw(self) -> u32 {
+                self.0
+            }
+
+            #[inline]
+            pub fn intersects(self, rhs: Self) -> bool {
+                self.0 & rhs.0 != 0
+            }
+
+            #[inline]
+            pub fn contains(self, rhs: Self) -> bool {
+                self.0 & rhs.0 == rhs.0
+            }
+
+            #[inline]
+            pub fn cardinality(self) -> u32 {
+                self.0.count_ones()
+            }
+        }
+
+        impl Clone for $name {
+            fn clone(&self) -> Self {
+                Self(self.0)
+            }
+        }
+
+        impl Copy for $name {}
+
+        impl Default for $name {
+            fn default() -> Self {
+                Self(0)
+            }
+        }
+
+        impl PartialEq for $name {
+            fn eq(&self, rhs: &Self) -> bool {
+                self.0 == rhs.0
+            }
+        }
+
+        impl Eq for $name {}
+
+        impl std::ops::BitOr for $name {
+            type Output = Self;
+            fn bitor(self, rhs: Self) -> Self::Output {
+                Self(self.0 | rhs.0)
+            }
+        }
+
+        impl std::ops::BitOrAssign for $name {
+            fn bitor_assign(&mut self, rhs: Self) {
+                self.0 |= rhs.0
+            }
+        }
+
+        impl std::ops::BitAnd for $name {
+            type Output = Self;
+            fn bitand(self, rhs: Self) -> Self::Output {
+                Self(self.0 & rhs.0)
+            }
+        }
+
+        impl std::ops::BitAndAssign for $name {
+            fn bitand_assign(&mut self, rhs: Self) {
+                self.0 &= rhs.0
+            }
+        }
+
+        impl std::ops::BitXor for $name {
+            type Output = Self;
+            fn bitxor(self, rhs: Self) -> Self::Output {
+                Self(self.0 ^ rhs.0)
+            }
+        }
+
+        impl std::ops::BitXorAssign for $name {
+            fn bitxor_assign(&mut self, rhs: Self) {
+                self.0 ^= rhs.0
+            }
+        }
+    };
+}
+
+/// Avoid the awful `Default::default()` spam.
+#[inline(always)]
+pub fn default<T: Default>() -> T {
+    T::default()
+}
+
+#[allow(unconditional_panic)]
+const fn illegal_null_in_string() {
+    [][0]
+}
+
+#[doc(hidden)]
+pub const fn validate_cstr_contents(bytes: &[u8]) {
+    let mut i = 0;
+    while i < bytes.len() {
+        if bytes[i] == b'\0' {
+            illegal_null_in_string();
+        }
+        i += 1;
+    }
+}
+
+#[macro_export]
+macro_rules! cstr {
+    ( $s:literal ) => {{
+        $crate::validate_cstr_contents($s.as_bytes());
+        #[allow(unused_unsafe)]
+        unsafe {
+            std::mem::transmute::<_, &std::ffi::CStr>(concat!($s, "\0"))
+        }
+    }};
+}
+
+#[allow(dead_code)]
+pub fn string_from_c_str(c_str: &[i8]) -> String {
+    let s = unsafe { std::ffi::CStr::from_ptr(c_str.as_ptr()).to_bytes() };
+    String::from_utf8_lossy(s).into_owned()
+}
+
+pub fn get_thread_id() -> i32 {
+    unsafe { libc::gettid() }
+}
+
+pub fn uninit_array<T, const N: usize>() -> [MaybeUninit<T>; N] {
+    unsafe { MaybeUninit::<[MaybeUninit<T>; N]>::uninit().assume_init() }
+}
+
+pub unsafe fn array_assume_init<T, const N: usize>(array: [MaybeUninit<T>; N]) -> [T; N] {
+    (&array as *const _ as *const [T; N]).read()
+}
+
+pub fn make_array<T, F: FnMut() -> T, const N: usize>(mut f: F) -> [T; N]
+where
+    T: Sized,
+{
+    let mut array = uninit_array();
+    for elem in &mut array[..] {
+        elem.write(f());
+    }
+    unsafe { array_assume_init(array) }
+}
+
+pub fn uninit_box<T>() -> Box<MaybeUninit<T>> {
+    let layout = std::alloc::Layout::new::<MaybeUninit<T>>();
+    unsafe {
+        let ptr = std::mem::transmute::<_, *mut MaybeUninit<T>>(std::alloc::alloc(layout));
+        Box::from_raw(ptr)
+    }
+}
+
+pub fn zeroed_box<T>() -> Box<MaybeUninit<T>> {
+    let layout = std::alloc::Layout::new::<MaybeUninit<T>>();
+    unsafe {
+        let ptr = std::mem::transmute::<_, *mut MaybeUninit<T>>(std::alloc::alloc_zeroed(layout));
+        Box::from_raw(ptr)
+    }
+}
+
+/// Negative traits aren't stable yet, so use a dummy PhantomData marker to implement !Send
+pub type PhantomUnsend = std::marker::PhantomData<*mut ()>;
+
+#[must_use]
+pub fn align_offset(x: usize, align: usize) -> usize {
+    debug_assert!(align.is_power_of_two());
+    (x + align - 1) & !(align - 1)
+}
+
+#[cold]
+#[inline(never)]
+pub fn page_size() -> usize {
+    unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::ffi::CStr;
+
+    #[test]
+    fn test_cstr() {
+        assert_eq!(
+            cstr!("hello"),
+            CStr::from_bytes_with_nul(b"hello\0").unwrap()
+        );
+    }
+}
diff --git a/narcissus-core/src/manual_arc.rs b/narcissus-core/src/manual_arc.rs
new file mode 100644 (file)
index 0000000..280e5a4
--- /dev/null
@@ -0,0 +1,154 @@
+use std::{
+    marker::PhantomData,
+    mem::ManuallyDrop,
+    ops::Deref,
+    ptr::NonNull,
+    sync::atomic::{AtomicU32, Ordering},
+};
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
+pub enum Release<T> {
+    Shared,
+    Unique(T),
+}
+
+struct Inner<T> {
+    strong: AtomicU32,
+    value: ManuallyDrop<T>,
+}
+
+impl<T> Inner<T> {
+    #[inline]
+    fn new(value: T) -> Self {
+        Self {
+            strong: AtomicU32::new(1),
+            value: ManuallyDrop::new(value),
+        }
+    }
+}
+
+impl<T> Inner<T> {
+    #[inline]
+    fn incr_strong(&self) {
+        self.strong.fetch_add(1, Ordering::Relaxed);
+    }
+
+    #[inline]
+    fn decr_strong(&self) -> bool {
+        self.strong.fetch_sub(1, Ordering::Release) != 1
+    }
+}
+
+pub struct ManualArc<T> {
+    ptr: NonNull<Inner<T>>,
+    phantom: PhantomData<Inner<T>>,
+
+    #[cfg(debug_assertions)]
+    has_released: bool,
+}
+
+impl<T> ManualArc<T> {
+    pub fn new(value: T) -> Self {
+        Self::from_inner(Box::leak(Box::new(Inner::new(value))).into())
+    }
+
+    #[inline]
+    fn from_inner(ptr: NonNull<Inner<T>>) -> Self {
+        Self {
+            ptr,
+            phantom: PhantomData,
+
+            #[cfg(debug_assertions)]
+            has_released: false,
+        }
+    }
+
+    #[inline]
+    fn inner(&self) -> &Inner<T> {
+        unsafe { self.ptr.as_ref() }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn release_slow(&self) -> T {
+        std::sync::atomic::fence(Ordering::Acquire);
+        let value;
+        unsafe {
+            let mut inner = Box::from_raw(self.ptr.as_ptr());
+            // extract the value from the container.
+            value = ManuallyDrop::take(&mut inner.value);
+            // since the value is wrapped in `ManuallyDrop` it won't be dropped here.
+            drop(inner);
+        }
+        value
+    }
+
+    pub fn release(&mut self) -> Release<T> {
+        #[cfg(debug_assertions)]
+        {
+            assert!(!self.has_released);
+            self.has_released = true;
+        }
+
+        if self.inner().decr_strong() {
+            Release::Shared
+        } else {
+            Release::Unique(self.release_slow())
+        }
+    }
+}
+
+impl<T: Default> Default for ManualArc<T> {
+    fn default() -> Self {
+        Self::new(T::default())
+    }
+}
+
+impl<T> Clone for ManualArc<T> {
+    fn clone(&self) -> Self {
+        self.inner().incr_strong();
+        Self::from_inner(self.inner().into())
+    }
+}
+
+#[cfg(debug_assertions)]
+impl<T> Drop for ManualArc<T> {
+    fn drop(&mut self) {
+        if !std::thread::panicking() {
+            assert!(self.has_released, "must release manually before drop");
+        }
+    }
+}
+
+impl<T> Deref for ManualArc<T> {
+    type Target = T;
+
+    // Inner is valid whilever we have a valid ManualArc.
+    fn deref(&self) -> &Self::Target {
+        self.inner().value.deref()
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::{ManualArc, Release};
+
+    #[test]
+    fn basic() {
+        let mut arc1 = ManualArc::new(42);
+        let mut arc2 = arc1.clone();
+
+        assert_eq!(*arc1, 42);
+        assert_eq!(*arc2, 42);
+
+        assert_eq!(arc2.release(), Release::Shared);
+        assert_eq!(arc1.release(), Release::Unique(42));
+    }
+
+    #[test]
+    #[should_panic]
+    fn drop_without_release() {
+        let arc = ManualArc::new(32);
+        drop(arc);
+    }
+}
diff --git a/narcissus-core/src/mutex.rs b/narcissus-core/src/mutex.rs
new file mode 100644 (file)
index 0000000..603695e
--- /dev/null
@@ -0,0 +1,287 @@
+use std::{
+    cell::UnsafeCell,
+    ops::{Deref, DerefMut},
+    sync::atomic::{AtomicI32, Ordering},
+};
+
+#[cfg(debug_assertions)]
+use crate::get_thread_id;
+
+use crate::{waiter, PhantomUnsend};
+
+const UNLOCKED: i32 = 0;
+const LOCKED: i32 = 1;
+const LOCKED_WAIT: i32 = 2;
+
+pub struct Mutex<T: ?Sized> {
+    control: AtomicI32,
+    #[cfg(debug_assertions)]
+    thread_id: AtomicI32,
+    data: UnsafeCell<T>,
+}
+
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+pub struct MutexGuard<'a, T: ?Sized + 'a> {
+    mutex: &'a Mutex<T>,
+    phantom: PhantomUnsend,
+}
+
+impl<'a, T: ?Sized + 'a> MutexGuard<'a, T> {
+    pub fn new(mutex: &'a Mutex<T>) -> Self {
+        MutexGuard {
+            mutex,
+            phantom: PhantomUnsend {},
+        }
+    }
+}
+
+impl<T: ?Sized> Deref for MutexGuard<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        unsafe { &*self.mutex.data.get() }
+    }
+}
+
+impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        unsafe { &mut *self.mutex.data.get() }
+    }
+}
+
+impl<T: ?Sized> Drop for MutexGuard<'_, T> {
+    fn drop(&mut self) {
+        unsafe { self.mutex.raw_unlock() }
+    }
+}
+
+unsafe impl<T: ?Sized + Sync> Sync for MutexGuard<'_, T> {}
+
+impl<T> Mutex<T> {
+    pub fn new(value: T) -> Self {
+        Self {
+            control: AtomicI32::new(UNLOCKED),
+            #[cfg(debug_assertions)]
+            thread_id: AtomicI32::new(0),
+            data: UnsafeCell::new(value),
+        }
+    }
+}
+
+impl<T: ?Sized> Mutex<T> {
+    pub fn lock(&self) -> MutexGuard<'_, T> {
+        // SAFETY: `raw_lock()` will deadlock if recursive acquisition is attempted, so the
+        // following sequence cannot generate multiple mutable references.
+        // ```
+        //   let mutex = Mutex::new(1);
+        //   let mut lock1 = mutex.lock();
+        //   let mut lock2 = mutex.lock();
+        //   let a = &mut *lock1;
+        //   let b = &mut *lock2;
+        // ```
+        // In a debug configuration it will assert instead.
+        unsafe {
+            self.raw_lock();
+            MutexGuard::new(self)
+        }
+    }
+
+    pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
+        unsafe {
+            if self.raw_try_lock() {
+                Some(MutexGuard::new(self))
+            } else {
+                None
+            }
+        }
+    }
+
+    pub fn unlock(guard: MutexGuard<'_, T>) {
+        drop(guard)
+    }
+
+    pub fn get_mut(&mut self) -> &mut T {
+        self.data.get_mut()
+    }
+
+    pub unsafe fn raw_lock(&self) {
+        #[cfg(debug_assertions)]
+        if self.thread_id.load(Ordering::Relaxed) == get_thread_id() {
+            panic!("recursion not supported")
+        }
+
+        let mut c = self.control.load(Ordering::Relaxed);
+        if c == UNLOCKED {
+            match self.control.compare_exchange_weak(
+                UNLOCKED,
+                LOCKED,
+                Ordering::Acquire,
+                Ordering::Relaxed,
+            ) {
+                Ok(_) => {
+                    #[cfg(debug_assertions)]
+                    self.thread_id.store(get_thread_id(), Ordering::Relaxed);
+                    return;
+                }
+                Err(x) => c = x,
+            }
+        }
+
+        loop {
+            if c != LOCKED_WAIT {
+                match self.control.compare_exchange_weak(
+                    LOCKED,
+                    LOCKED_WAIT,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(x) => c = x,
+                    Err(x) => c = x,
+                }
+            }
+
+            if c == LOCKED_WAIT {
+                waiter::wait(&self.control, LOCKED_WAIT, None);
+                c = self.control.load(Ordering::Relaxed);
+            }
+
+            if c == UNLOCKED {
+                match self.control.compare_exchange_weak(
+                    UNLOCKED,
+                    LOCKED_WAIT,
+                    Ordering::Acquire,
+                    Ordering::Relaxed,
+                ) {
+                    Ok(_) => {
+                        #[cfg(debug_assertions)]
+                        self.thread_id.store(get_thread_id(), Ordering::Relaxed);
+                        return;
+                    }
+                    Err(x) => c = x,
+                }
+            }
+        }
+    }
+
+    pub unsafe fn raw_try_lock(&self) -> bool {
+        #[cfg(debug_assertions)]
+        if self.thread_id.load(Ordering::Relaxed) == get_thread_id() {
+            panic!("recursion not supported")
+        }
+
+        if self.control.load(Ordering::Relaxed) == UNLOCKED
+            && self
+                .control
+                .compare_exchange_weak(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
+                .is_ok()
+        {
+            #[cfg(debug_assertions)]
+            self.thread_id.store(get_thread_id(), Ordering::Relaxed);
+            true
+        } else {
+            false
+        }
+    }
+
+    pub unsafe fn raw_unlock(&self) {
+        #[cfg(debug_assertions)]
+        self.thread_id.store(0, Ordering::Relaxed);
+
+        if self.control.fetch_sub(1, Ordering::Release) != LOCKED {
+            self.control.store(UNLOCKED, Ordering::Release);
+            waiter::wake_n(&self.control, 1);
+        }
+    }
+}
+
+impl<T: ?Sized + Default> Default for Mutex<T> {
+    fn default() -> Mutex<T> {
+        Mutex::new(Default::default())
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::*;
+    #[test]
+    fn lock_unlock() {
+        let mutex = Mutex::new(10);
+        {
+            let lock = mutex.lock();
+            assert_eq!(*lock, 10);
+        }
+    }
+
+    #[test]
+    fn mutual_exclusion() {
+        let barrier = Arc::new(std::sync::Barrier::new(8));
+        let mut value = Arc::new(Mutex::new(0));
+        let mut threads = (0..8)
+            .map(|_| {
+                let barrier = barrier.clone();
+                let value = value.clone();
+                std::thread::spawn(move || {
+                    barrier.wait();
+                    for _ in 0..100_000 {
+                        *value.lock() += 1;
+                    }
+                })
+            })
+            .collect::<Vec<_>>();
+
+        for thread in threads.drain(..) {
+            thread.join().unwrap();
+        }
+
+        let value = *value.get_mut().unwrap().get_mut();
+        assert_eq!(value, 800_000);
+    }
+
+    // This test will deadlock in release builds, so don't run it.
+    #[cfg(debug_assertions)]
+    #[test]
+    #[should_panic(expected = "recursion not supported")]
+    fn recursion() {
+        let mutex = Mutex::new(1);
+        let mut lock1 = mutex.lock();
+        let mut lock2 = mutex.lock();
+        // Summon Cthulhu
+        let _a = &mut *lock1;
+        let _b = &mut *lock2;
+    }
+
+    // This test will deadlock in release builds, so don't run it.
+    #[cfg(debug_assertions)]
+    #[test]
+    #[should_panic(expected = "recursion not supported")]
+    fn recursion_try() {
+        let mutex = Mutex::new(1);
+        let mut lock1;
+        loop {
+            match mutex.try_lock() {
+                Some(lock) => {
+                    lock1 = lock;
+                    break;
+                }
+                None => {}
+            }
+        }
+
+        let mut lock2;
+        loop {
+            match mutex.try_lock() {
+                Some(lock) => {
+                    lock2 = lock;
+                    break;
+                }
+                None => {}
+            }
+        }
+
+        // Summon Cthulhu
+        let _a = &mut *lock1;
+        let _b = &mut *lock2;
+    }
+}
diff --git a/narcissus-core/src/pool.rs b/narcissus-core/src/pool.rs
new file mode 100644 (file)
index 0000000..bf03c1d
--- /dev/null
@@ -0,0 +1,720 @@
+use std::{marker::PhantomData, mem::size_of, ptr::NonNull, sync::atomic::AtomicU32};
+
+use crate::{align_offset, static_assert, virtual_commit, virtual_free, virtual_reserve};
+
+/// Each handle contains `MAGIC_BITS` bits of per-pool state.
+/// This value is provided by the user to aid debugging, lookup will panic if attempting to access a
+/// table using a handle with a non-matching magic value.
+const MAGIC_BITS: u32 = 4;
+/// Each handle uses `GEN_BITS` bits of per-slot generation counter. Looking up a handle with the
+/// correct index but an incorrect generation will yield `None`.
+const GEN_BITS: u32 = 8;
+/// Each handle uses `IDX_BITS` bits of index used to select a slot. This limits the maximum
+/// capacity of the table to `2 ^ IDX_BITS - 1`.
+const IDX_BITS: u32 = 20;
+
+const MAX_CAPACITY: usize = 1 << IDX_BITS as usize;
+const PAGE_SIZE: usize = 4096;
+
+/// Keep at least `MIN_FREE_SLOTS` available at all times in order to ensure a minimum of
+/// `MIN_FREE_SLOTS * 2 ^ GEN_BITS` create-delete cycles are required before a duplicate handle is
+/// generated.
+const MIN_FREE_SLOTS: usize = 512;
+
+static_assert!(MAGIC_BITS + GEN_BITS + IDX_BITS == 32);
+
+const MAGIC_MASK: u32 = (1 << MAGIC_BITS) - 1;
+const GEN_MASK: u32 = (1 << GEN_BITS) - 1;
+const IDX_MASK: u32 = (1 << IDX_BITS) - 1;
+
+const IDX_SHIFT: u32 = 0;
+const GEN_SHIFT: u32 = IDX_SHIFT + IDX_BITS;
+const MAGIC_SHIFT: u32 = GEN_SHIFT + GEN_BITS;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct Handle(u32);
+
+impl Default for Handle {
+    fn default() -> Self {
+        Self::null()
+    }
+}
+
+impl Handle {
+    fn new(magic: u32, generation: u32, slot_index: SlotIndex) -> Self {
+        let value = (magic & MAGIC_MASK) << MAGIC_SHIFT
+            | (generation & GEN_MASK) << GEN_SHIFT
+            | (slot_index.0 & IDX_MASK) << IDX_SHIFT;
+        Self(!value)
+    }
+
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    const fn magic(self) -> u32 {
+        (!self.0 >> MAGIC_SHIFT) & MAGIC_MASK
+    }
+
+    const fn generation(self) -> u32 {
+        (!self.0 >> GEN_SHIFT) & GEN_MASK
+    }
+
+    const fn slot_index(self) -> SlotIndex {
+        SlotIndex((!self.0 >> IDX_SHIFT) & IDX_MASK)
+    }
+
+    pub const fn is_null(&self) -> bool {
+        self.0 == 0
+    }
+}
+
+impl std::fmt::Debug for Handle {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        if self.is_null() {
+            f.debug_tuple("Handle").field(&"NULL").finish()
+        } else {
+            f.debug_struct("Handle")
+                .field("magic", &self.magic())
+                .field("generation", &self.generation())
+                .field("slot_index", &self.slot_index().0)
+                .finish()
+        }
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+struct SlotIndex(u32);
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+struct ValueIndex(u32);
+
+// Since slots don't store the magic value, we can use the upper bit as a valid flag.
+const SLOT_EMPTY_BIT: u32 = 0x8000_0000;
+
+struct Slot {
+    value_index_and_gen: u32,
+}
+
+impl Slot {
+    const fn new() -> Self {
+        Self {
+            value_index_and_gen: 0xffff_ffff,
+        }
+    }
+
+    fn is_empty(&self) -> bool {
+        self.value_index_and_gen & SLOT_EMPTY_BIT != 0
+    }
+
+    fn is_full(&self) -> bool {
+        self.value_index_and_gen & SLOT_EMPTY_BIT == 0
+    }
+
+    fn generation(&self) -> u32 {
+        (self.value_index_and_gen >> GEN_SHIFT) & GEN_MASK
+    }
+
+    fn value_index(&self) -> ValueIndex {
+        ValueIndex((self.value_index_and_gen >> IDX_SHIFT) & IDX_MASK)
+    }
+
+    fn set_value_index(&mut self, value_index: ValueIndex) {
+        debug_assert!(self.is_empty());
+        debug_assert!(value_index.0 & IDX_MASK == value_index.0);
+        self.value_index_and_gen =
+            self.generation() << GEN_SHIFT | (value_index.0 & IDX_MASK) << IDX_SHIFT;
+    }
+
+    fn update_value_index(&mut self, value_index: ValueIndex) {
+        debug_assert!(self.is_full());
+        debug_assert!(value_index.0 & IDX_MASK == value_index.0);
+        self.value_index_and_gen =
+            self.generation() << GEN_SHIFT | (value_index.0 & IDX_MASK) << IDX_SHIFT;
+    }
+
+    fn clear_value_index(&mut self) {
+        debug_assert!(self.is_full());
+        let new_generation = self.generation().wrapping_add(1);
+        self.value_index_and_gen =
+            SLOT_EMPTY_BIT | (new_generation & GEN_MASK) << GEN_SHIFT | IDX_MASK << IDX_SHIFT;
+        debug_assert!(self.is_empty());
+    }
+}
+
+/// FIFO free list of slot indices
+struct FreeSlots {
+    head: usize,
+    tail: usize,
+    cap: usize,
+    ptr: NonNull<SlotIndex>,
+}
+
+impl FreeSlots {
+    fn new(ptr: NonNull<SlotIndex>) -> Self {
+        Self {
+            head: 0,
+            tail: 0,
+            cap: 0,
+            ptr,
+        }
+    }
+
+    fn head(&self) -> usize {
+        self.head & (self.cap - 1)
+    }
+
+    fn tail(&self) -> usize {
+        self.tail & (self.cap - 1)
+    }
+
+    fn len(&self) -> usize {
+        self.head.wrapping_sub(self.tail)
+    }
+
+    fn is_full(&self) -> bool {
+        self.len() == self.cap
+    }
+
+    fn push(&mut self, free_slot_index: SlotIndex) {
+        if self.is_full() {
+            self.grow();
+        }
+
+        let head = self.head();
+        self.head = self.head.wrapping_add(1);
+        unsafe { std::ptr::write(self.ptr.as_ptr().add(head), free_slot_index) }
+    }
+
+    fn pop(&mut self) -> Option<SlotIndex> {
+        // If we don't have enough free slots we need to add some more.
+        if self.len() < MIN_FREE_SLOTS {
+            return None;
+        }
+        let tail = self.tail();
+        self.tail = self.tail.wrapping_add(1);
+        Some(unsafe { std::ptr::read(self.ptr.as_ptr().add(tail)) })
+    }
+
+    #[cold]
+    fn grow(&mut self) {
+        // Free slots must always be a power of two so that the modular arithmetic for indexing
+        // works out correctly.
+        debug_assert!(self.cap == 0 || self.cap.is_power_of_two());
+        assert!(self.cap < MAX_CAPACITY);
+
+        let new_cap = if self.cap == 0 { 1024 } else { self.cap << 1 };
+        unsafe {
+            virtual_commit(
+                self.ptr.as_ptr().add(self.cap) as _,
+                (new_cap - self.cap) * size_of::<u32>(),
+            )
+        };
+
+        // This is slightly wrong, but our freelist doesn't need correct ordering on resize and this
+        // avoids moving the values around.
+        if self.len() > 0 {
+            debug_assert!(self.is_full());
+            self.tail = 0;
+            self.head = self.cap;
+        }
+
+        self.cap = new_cap;
+    }
+}
+
+struct Slots {
+    len: usize,
+    ptr: NonNull<Slot>,
+}
+
+impl Slots {
+    fn new(ptr: NonNull<Slot>) -> Self {
+        Self { len: 0, ptr }
+    }
+
+    fn get(&self, slot_index: SlotIndex) -> Option<&Slot> {
+        let index = slot_index.0 as usize;
+        if index < self.len {
+            Some(unsafe { self.ptr.as_ptr().add(index).as_ref().unwrap() })
+        } else {
+            None
+        }
+    }
+
+    fn get_mut(&mut self, slot_index: SlotIndex) -> Option<&mut Slot> {
+        let index = slot_index.0 as usize;
+        if index < self.len {
+            Some(unsafe { self.ptr.as_ptr().add(index).as_mut().unwrap() })
+        } else {
+            None
+        }
+    }
+
+    #[cold]
+    fn grow(&mut self) -> (u32, u32) {
+        let len = self.len;
+        let new_len = std::cmp::min(len + MIN_FREE_SLOTS * 2, MAX_CAPACITY);
+        assert!(new_len > len);
+        unsafe {
+            virtual_commit(
+                self.ptr.as_ptr().add(len) as _,
+                (new_len - len) * size_of::<Slot>(),
+            );
+            for new_slot_index in len..new_len {
+                std::ptr::write(self.ptr.as_ptr().add(new_slot_index), Slot::new());
+            }
+        }
+        self.len = new_len;
+        (len as u32, new_len as u32)
+    }
+}
+
+struct Values<T> {
+    cap: usize,
+    len: usize,
+    slots_ptr: NonNull<SlotIndex>,
+    values_ptr: NonNull<T>,
+    phantom: PhantomData<T>,
+}
+
+impl<T> Values<T> {
+    fn new(slots_ptr: NonNull<SlotIndex>, values_ptr: NonNull<T>) -> Self {
+        Self {
+            cap: 0,
+            len: 0,
+            slots_ptr,
+            values_ptr,
+            phantom: PhantomData,
+        }
+    }
+
+    #[inline(always)]
+    fn as_slice(&self) -> &[T] {
+        unsafe { std::slice::from_raw_parts(self.values_ptr.as_ptr(), self.len) }
+    }
+
+    #[inline(always)]
+    fn as_mut_slice(&mut self) -> &mut [T] {
+        unsafe { std::slice::from_raw_parts_mut(self.values_ptr.as_ptr(), self.len) }
+    }
+
+    /// Update the lookup table for the given `ValueIndex` with a new `SlotIndex`
+    fn set_slot(&mut self, value_index: ValueIndex, slot_index: SlotIndex) {
+        let value_index = value_index.0 as usize;
+        assert!(value_index < self.len);
+        unsafe {
+            std::ptr::write(
+                self.slots_ptr.as_ptr().add(value_index).as_mut().unwrap(),
+                slot_index,
+            )
+        }
+    }
+
+    /// Retreive the `SlotIndex` corresponding to the given `ValueIndex` from the lookup table.
+    fn get_slot(&mut self, value_index: ValueIndex) -> SlotIndex {
+        let value_index = value_index.0 as usize;
+        assert!(value_index < self.len);
+        // SAFETY: SlotIndex is Copy so we don't invalidate the value being read.
+        unsafe { std::ptr::read(self.slots_ptr.as_ptr().add(value_index).as_ref().unwrap()) }
+    }
+
+    /// Push a new value into the values storage. Returns the index of the added value.
+    fn push(&mut self, value: T) -> ValueIndex {
+        if self.len == self.cap {
+            self.grow();
+        }
+
+        let new_value_index = self.len;
+        self.len += 1;
+        unsafe { std::ptr::write(self.values_ptr.as_ptr().add(new_value_index), value) };
+
+        ValueIndex(new_value_index as u32)
+    }
+
+    /// Remove the element at the given `ValueIndex` and replace it with the last element. Fixup
+    /// the lookup tables for the moved element.
+    ///
+    /// Returns the removed value.
+    fn swap_remove(&mut self, value_index: ValueIndex, slots: &mut Slots) -> T {
+        let last_value_index = ValueIndex((self.len - 1) as u32);
+
+        // Update the slot lookups for the swapped value.
+        if value_index != last_value_index {
+            let last_slot_index = self.get_slot(last_value_index);
+            self.set_slot(value_index, last_slot_index);
+            slots
+                .get_mut(last_slot_index)
+                .unwrap()
+                .update_value_index(value_index);
+        }
+
+        let value_index = value_index.0 as usize;
+        assert!(value_index < self.len);
+
+        unsafe {
+            let ptr = self.values_ptr.as_ptr();
+            self.len -= 1;
+
+            let value = std::ptr::read(ptr.add(value_index));
+            std::ptr::copy(
+                ptr.add(last_value_index.0 as usize),
+                ptr.add(value_index),
+                1,
+            );
+
+            value
+        }
+    }
+
+    /// Retreive a reference to the value at `value_index`
+    /// Panics if `value_index` is out of bounds
+    fn get(&self, value_index: ValueIndex) -> &T {
+        let value_index = value_index.0 as usize;
+        assert!(value_index < self.len);
+        let ptr = self.values_ptr.as_ptr();
+        unsafe { ptr.add(value_index).as_ref().unwrap() }
+    }
+
+    /// Retreive a mutable reference to the value at `value_index`
+    /// Panics if `value_index` is out of bounds
+    fn get_mut(&mut self, value_index: ValueIndex) -> &mut T {
+        let value_index = value_index.0 as usize;
+        assert!(value_index < self.len);
+        let ptr = self.values_ptr.as_ptr();
+        unsafe { ptr.add(value_index).as_mut().unwrap() }
+    }
+
+    #[cold]
+    fn grow(&mut self) {
+        let new_cap = std::cmp::min(self.cap + 1024, MAX_CAPACITY);
+        assert!(new_cap > self.cap);
+        let grow_region = new_cap - self.cap;
+        unsafe {
+            virtual_commit(
+                self.values_ptr.as_ptr().add(self.len) as _,
+                grow_region * size_of::<T>(),
+            );
+            virtual_commit(
+                self.slots_ptr.as_ptr().add(self.len) as _,
+                grow_region * size_of::<SlotIndex>(),
+            );
+        }
+        self.cap = new_cap;
+    }
+}
+
+pub struct Pool<T> {
+    magic: u32,
+    free_slots: FreeSlots,
+    slots: Slots,
+    values: Values<T>,
+    mapping_base: *mut std::ffi::c_void,
+    mapping_size: usize,
+}
+
+static NEXT_MAGIC: AtomicU32 = AtomicU32::new(0);
+
+impl<T> Pool<T> {
+    pub fn new() -> Self {
+        let mut mapping_size = 0;
+
+        let free_slots_offset = mapping_size;
+        mapping_size += MAX_CAPACITY * size_of::<u32>();
+        mapping_size = align_offset(mapping_size, PAGE_SIZE);
+
+        let slots_offset = mapping_size;
+        mapping_size += MAX_CAPACITY * size_of::<Slot>();
+        mapping_size = align_offset(mapping_size, PAGE_SIZE);
+
+        let value_slots_offset = mapping_size;
+        mapping_size += MAX_CAPACITY * size_of::<u32>();
+        mapping_size = align_offset(mapping_size, PAGE_SIZE);
+
+        let values_offset = mapping_size;
+        mapping_size += MAX_CAPACITY * size_of::<T>();
+        mapping_size = align_offset(mapping_size, PAGE_SIZE);
+
+        let mapping_base = unsafe { virtual_reserve(mapping_size) };
+        let free_slots = unsafe { mapping_base.add(free_slots_offset) } as _;
+        let slots = unsafe { mapping_base.add(slots_offset) } as _;
+        let value_slots = unsafe { mapping_base.add(value_slots_offset) } as _;
+        let values = unsafe { mapping_base.add(values_offset) } as _;
+
+        Self {
+            magic: NEXT_MAGIC.fetch_add(1, std::sync::atomic::Ordering::Relaxed),
+            free_slots: FreeSlots::new(NonNull::new(free_slots).unwrap()),
+            slots: Slots::new(NonNull::new(slots).unwrap()),
+            values: Values::new(
+                NonNull::new(value_slots).unwrap(),
+                NonNull::new(values).unwrap(),
+            ),
+            mapping_base,
+            mapping_size,
+        }
+    }
+
+    fn magic(&self) -> u32 {
+        self.magic & MAGIC_MASK
+    }
+
+    pub fn len(&self) -> usize {
+        self.values.len
+    }
+
+    pub fn values(&self) -> &[T] {
+        self.values.as_slice()
+    }
+
+    pub fn values_mut(&mut self) -> &mut [T] {
+        self.values.as_mut_slice()
+    }
+
+    pub fn insert(&mut self, value: T) -> Handle {
+        let value_index = self.values.push(value);
+
+        let slot_index = match self.free_slots.pop() {
+            Some(slot_index) => slot_index,
+            None => {
+                // We need to grow the slots array if there are insufficient free slots.
+                let (lo, hi) = self.slots.grow();
+                for free_slot_index in (lo + 1)..hi {
+                    self.free_slots.push(SlotIndex(free_slot_index));
+                }
+                SlotIndex(lo)
+            }
+        };
+
+        self.values.set_slot(value_index, slot_index);
+
+        let slot = self.slots.get_mut(slot_index).unwrap();
+        let generation = slot.generation();
+        slot.set_value_index(value_index);
+
+        Handle::new(self.magic(), generation, slot_index)
+    }
+
+    pub fn remove(&mut self, handle: Handle) -> Option<T> {
+        // Avoid checking magic on null handles, it's always all bits set.
+        if handle.is_null() {
+            return None;
+        }
+
+        assert_eq!(self.magic(), handle.magic());
+
+        let generation = handle.generation();
+        let slot_index = handle.slot_index();
+
+        if let Some(slot) = self.slots.get_mut(slot_index) {
+            if slot.generation() == generation {
+                self.free_slots.push(slot_index);
+                let value_index = slot.value_index();
+                slot.clear_value_index();
+                return Some(self.values.swap_remove(value_index, &mut self.slots));
+            }
+        }
+
+        None
+    }
+
+    pub fn get_mut(&mut self, handle: Handle) -> Option<&mut T> {
+        // Avoid checking magic on null handles, it's always all bits set.
+        if handle.is_null() {
+            return None;
+        }
+
+        assert_eq!(self.magic(), handle.magic());
+
+        let generation = handle.generation();
+        let slot_index = handle.slot_index();
+
+        if let Some(slot) = self.slots.get(slot_index) {
+            if slot.generation() == generation {
+                assert!(slot.is_full());
+                return Some(self.values.get_mut(slot.value_index()));
+            }
+        }
+
+        None
+    }
+
+    pub fn get(&self, handle: Handle) -> Option<&T> {
+        // Avoid checking magic on null handles, it's always all bits set.
+        if handle.is_null() {
+            return None;
+        }
+
+        assert_eq!(self.magic(), handle.magic());
+
+        let generation = handle.generation();
+        let slot_index = handle.slot_index();
+
+        if let Some(slot) = self.slots.get(slot_index) {
+            if slot.generation() == generation {
+                assert!(slot.is_full());
+                return Some(self.values.get(slot.value_index()));
+            }
+        }
+
+        None
+    }
+
+    pub fn clear_no_drop(&mut self) {
+        let len = self.slots.len as u32;
+        for i in 0..len {
+            let slot_index = SlotIndex(i);
+            let slot = self.slots.get_mut(slot_index).unwrap();
+            if slot.is_full() {
+                slot.clear_value_index();
+                self.free_slots.push(slot_index);
+            }
+        }
+    }
+
+    pub fn clear(&mut self) {
+        self.clear_no_drop();
+        let len = self.values.len;
+        self.values.len = 0;
+        let to_drop = std::ptr::slice_from_raw_parts_mut(self.values.values_ptr.as_ptr(), len);
+        unsafe { std::ptr::drop_in_place(to_drop) };
+    }
+}
+
+impl<T> Drop for Pool<T> {
+    fn drop(&mut self) {
+        unsafe {
+            let to_drop = std::ptr::slice_from_raw_parts_mut(
+                self.values.values_ptr.as_ptr(),
+                self.values.len,
+            );
+            std::ptr::drop_in_place(to_drop);
+            virtual_free(self.mapping_base, self.mapping_size);
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::sync::atomic::{AtomicU32, Ordering};
+
+    use super::{Handle, Pool, MAX_CAPACITY, MIN_FREE_SLOTS};
+
+    #[test]
+    fn test_pool() {
+        let mut pool = Pool::new();
+        assert_eq!(pool.get(Handle::null()), None);
+        let one = pool.insert(1);
+        let two = pool.insert(2);
+        let three = pool.insert(3);
+        for _ in 0..20 {
+            let handles = (0..300_000).map(|_| pool.insert(9)).collect::<Vec<_>>();
+            for handle in &handles {
+                assert_eq!(pool.remove(*handle), Some(9));
+            }
+        }
+        assert_eq!(pool.get(one), Some(&1));
+        assert_eq!(pool.get(two), Some(&2));
+        assert_eq!(pool.get(three), Some(&3));
+        assert_eq!(pool.remove(one), Some(1));
+        assert_eq!(pool.remove(two), Some(2));
+        assert_eq!(pool.remove(three), Some(3));
+        assert_eq!(pool.remove(one), None);
+        assert_eq!(pool.remove(two), None);
+        assert_eq!(pool.remove(three), None);
+    }
+
+    #[test]
+    fn test_pool_magic() {
+        let mut pool_1 = Pool::new();
+        let mut pool_2 = Pool::new();
+
+        let handle_1 = pool_1.insert(1);
+        let handle_2 = pool_2.insert(1);
+        assert_ne!(handle_1, handle_2);
+    }
+
+    #[test]
+    #[should_panic]
+    fn test_pool_magic_fail() {
+        let mut pool_1 = Pool::new();
+        let pool_2 = Pool::<i32>::new();
+
+        let handle_1 = pool_1.insert(1);
+        pool_2.get(handle_1);
+    }
+
+    #[test]
+    fn test_pool_capacity() {
+        #[derive(Clone, Copy)]
+        struct Chonk {
+            value: usize,
+            _pad: [u8; 4096 - std::mem::size_of::<usize>()],
+        }
+
+        impl Chonk {
+            fn new(value: usize) -> Self {
+                Self {
+                    value,
+                    _pad: [0; 4096 - std::mem::size_of::<usize>()],
+                }
+            }
+        }
+
+        impl PartialEq for Chonk {
+            fn eq(&self, rhs: &Self) -> bool {
+                self.value == rhs.value
+            }
+        }
+
+        let mut pool = Pool::new();
+
+        for i in 0..MAX_CAPACITY - MIN_FREE_SLOTS {
+            let chonk = Chonk::new(i);
+            let handle = pool.insert(chonk);
+            assert!(pool.get(handle) == Some(&chonk));
+        }
+
+        assert_eq!(pool.len(), MAX_CAPACITY - MIN_FREE_SLOTS);
+    }
+
+    #[test]
+    fn test_use_after_free() {
+        let mut pool = Pool::new();
+
+        let handle = pool.insert(1);
+        assert_eq!(pool.remove(handle), Some(1));
+
+        for _ in 0..65536 {
+            let new_handle = pool.insert(1);
+            assert_eq!(pool.remove(new_handle), Some(1));
+            assert_ne!(handle, new_handle);
+            assert_eq!(pool.get(handle), None);
+        }
+    }
+
+    #[test]
+    fn test_drop_it_like_its_hot() {
+        static DROP_COUNT: AtomicU32 = AtomicU32::new(0);
+        struct Snoop {}
+        impl Drop for Snoop {
+            fn drop(&mut self) {
+                DROP_COUNT.fetch_add(1, Ordering::Relaxed);
+            }
+        }
+        let mut pool = Pool::new();
+
+        let _ = pool.insert(Snoop {});
+        let _ = pool.insert(Snoop {});
+        let handle = pool.insert(Snoop {});
+
+        assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 0);
+        pool.remove(handle);
+        assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 1);
+        pool.clear();
+        assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3);
+
+        let _ = pool.insert(Snoop {});
+        drop(pool);
+        assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 4);
+    }
+}
diff --git a/narcissus-core/src/ref_count.rs b/narcissus-core/src/ref_count.rs
new file mode 100644 (file)
index 0000000..413973b
--- /dev/null
@@ -0,0 +1,305 @@
+use std::{
+    marker::PhantomData,
+    ops::Deref,
+    ptr::NonNull,
+    sync::atomic::{AtomicI32, Ordering},
+};
+
+struct Inner<T: ?Sized> {
+    // Number of strong references in addition to the current value.
+    // A negative value indicates a non-atomic reference count, counting up from i32::MIN
+    // A positive value indicates an atomic reference count, counting up from 0
+    strong: AtomicI32,
+    value: T,
+}
+
+impl<T> Inner<T> {
+    #[inline]
+    fn new(value: T) -> Self {
+        Self {
+            strong: AtomicI32::new(i32::MIN + 1),
+            value,
+        }
+    }
+}
+
+impl<T: ?Sized> Inner<T> {
+    #[inline]
+    fn incr_strong(&self) {
+        let strong = self.strong.load(Ordering::Relaxed);
+        if strong < 0 {
+            self.strong.store(strong.wrapping_add(1), Ordering::Relaxed);
+        } else {
+            self.strong.fetch_add(1, Ordering::Relaxed);
+        }
+    }
+
+    #[inline]
+    fn decr_strong(&self) -> bool {
+        let strong = self.strong.load(Ordering::Relaxed);
+        if strong < 0 {
+            self.strong.store(strong.wrapping_sub(1), Ordering::Release);
+            strong != i32::MIN + 1
+        } else {
+            let strong = self.strong.fetch_sub(1, Ordering::Release);
+            strong != 1
+        }
+    }
+
+    #[inline]
+    fn incr_strong_atomic(&self) {
+        self.strong.fetch_add(1, Ordering::Relaxed);
+    }
+
+    #[inline]
+    fn decr_strong_atomic(&self) -> bool {
+        self.strong.fetch_sub(1, Ordering::Release) != 1
+    }
+
+    #[inline]
+    fn upgrade(&self) {
+        let strong = self.strong.load(Ordering::Relaxed);
+        if strong < 0 {
+            self.strong
+                .store(strong.wrapping_add(i32::MIN), Ordering::Relaxed);
+        }
+    }
+}
+
+pub struct Rc<T: ?Sized> {
+    ptr: NonNull<Inner<T>>,
+    phantom: PhantomData<Inner<T>>,
+}
+
+impl<T> Rc<T> {
+    pub fn new(value: T) -> Self {
+        Self::from_inner(Box::leak(Box::new(Inner::new(value))).into())
+    }
+}
+
+impl<T: Default> Default for Rc<T> {
+    fn default() -> Self {
+        Self::new(T::default())
+    }
+}
+
+impl<T: ?Sized> Rc<T> {
+    #[inline]
+    pub fn strong_count(&self) -> i32 {
+        let strong = self.inner().strong.load(Ordering::Relaxed);
+        if strong < 0 {
+            strong.wrapping_add(i32::MIN)
+        } else {
+            strong
+        }
+    }
+
+    #[inline]
+    pub fn is_unique(&mut self) -> bool {
+        let strong = self.inner().strong.load(Ordering::Relaxed);
+        strong == 1 || strong == i32::MIN + 1
+    }
+
+    #[inline]
+    pub fn ptr_eq(&self, other: &Self) -> bool {
+        self.ptr.as_ptr() == other.ptr.as_ptr()
+    }
+
+    #[inline]
+    pub fn get_mut(&mut self) -> Option<&mut T> {
+        if self.is_unique() {
+            // This unsafety is ok because we're guaranteed that the pointer
+            // returned is the *only* pointer that will ever be returned to T. Our
+            // reference count is guaranteed to be 1 at this point, and we required
+            // the Arc itself to be `mut`, so we're returning the only possible
+            // reference to the inner data.
+            Some(unsafe { self.get_mut_unchecked() })
+        } else {
+            None
+        }
+    }
+
+    #[inline]
+    pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
+        // We are careful to *not* create a reference covering the "count" fields, as
+        // this would alias with concurrent access to the reference counts.
+        &mut (*self.ptr.as_ptr()).value
+    }
+
+    #[inline]
+    fn from_inner(ptr: NonNull<Inner<T>>) -> Self {
+        Self {
+            ptr,
+            phantom: PhantomData,
+        }
+    }
+
+    #[inline]
+    fn inner(&self) -> &Inner<T> {
+        unsafe { self.ptr.as_ref() }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn drop_slow(&self) {
+        std::sync::atomic::fence(Ordering::Acquire);
+        unsafe {
+            drop(Box::from_raw(self.ptr.as_ptr()));
+        }
+    }
+}
+
+impl<T: ?Sized> Clone for Rc<T> {
+    fn clone(&self) -> Self {
+        self.inner().incr_strong();
+        Self::from_inner(self.inner().into())
+    }
+}
+
+impl<T: ?Sized> Drop for Rc<T> {
+    fn drop(&mut self) {
+        if !self.inner().decr_strong() {
+            self.drop_slow();
+        }
+    }
+}
+
+impl<T: ?Sized> Deref for Rc<T> {
+    type Target = T;
+
+    // Inner is valid whilever we have a valid Rc.
+    fn deref(&self) -> &Self::Target {
+        &self.inner().value
+    }
+}
+
+pub struct Arc<T: ?Sized> {
+    ptr: NonNull<Inner<T>>,
+    phantom: PhantomData<Inner<T>>,
+}
+
+unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
+unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
+
+impl<T> Arc<T> {
+    pub fn new(value: T) -> Self {
+        Self::from_inner(Box::leak(Box::new(Inner::new(value))).into())
+    }
+}
+
+impl<T: Default> Default for Arc<T> {
+    fn default() -> Self {
+        Self::new(T::default())
+    }
+}
+
+impl<T: ?Sized> Arc<T> {
+    pub fn from_rc(rc: &Rc<T>) -> Self {
+        let inner = rc.inner();
+        inner.upgrade();
+        inner.incr_strong();
+        Self::from_inner(inner.into())
+    }
+
+    #[inline]
+    pub fn ptr_eq(&self, other: &Self) -> bool {
+        self.ptr.as_ptr() == other.ptr.as_ptr()
+    }
+
+    #[inline]
+    pub fn strong_count(&self) -> i32 {
+        let strong = self.inner().strong.load(Ordering::Relaxed);
+        if strong < 0 {
+            strong.wrapping_add(i32::MIN)
+        } else {
+            strong
+        }
+    }
+
+    #[inline]
+    pub fn is_unique(&self) -> bool {
+        let strong = self.inner().strong.load(Ordering::Acquire);
+        strong == 1 || strong == i32::MIN + 1
+    }
+
+    pub fn get_mut(&mut self) -> Option<&mut T> {
+        if self.is_unique() {
+            // This unsafety is ok because we're guaranteed that the pointer
+            // returned is the *only* pointer that will ever be returned to T. Our
+            // reference count is guaranteed to be 1 at this point, and we required
+            // the Arc itself to be `mut`, so we're returning the only possible
+            // reference to the inner data.
+            Some(unsafe { self.get_mut_unchecked() })
+        } else {
+            None
+        }
+    }
+
+    pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
+        // We are careful to *not* create a reference covering the "count" fields, as
+        // this would alias with concurrent access to the reference counts.
+        &mut (*self.ptr.as_ptr()).value
+    }
+
+    fn from_inner(ptr: NonNull<Inner<T>>) -> Self {
+        Self {
+            ptr,
+            phantom: PhantomData,
+        }
+    }
+
+    #[inline]
+    fn inner(&self) -> &Inner<T> {
+        unsafe { self.ptr.as_ref() }
+    }
+
+    #[cold]
+    #[inline(never)]
+    fn drop_slow(&self) {
+        std::sync::atomic::fence(Ordering::Acquire);
+        unsafe {
+            drop(Box::from_raw(self.ptr.as_ptr()));
+        }
+    }
+}
+
+impl<T: ?Sized> Clone for Arc<T> {
+    fn clone(&self) -> Self {
+        self.inner().incr_strong_atomic();
+        Self::from_inner(self.inner().into())
+    }
+}
+
+impl<T: ?Sized> Drop for Arc<T> {
+    fn drop(&mut self) {
+        if !self.inner().decr_strong_atomic() {
+            self.drop_slow()
+        }
+    }
+}
+
+impl<T: ?Sized> Deref for Arc<T> {
+    type Target = T;
+
+    // Inner is value whilever we have a valid Arc.
+    fn deref(&self) -> &Self::Target {
+        &self.inner().value
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::*;
+
+    #[test]
+    fn rc_double_upgrade() {
+        let rc1 = Rc::new(());
+        assert_eq!(rc1.strong_count(), 1);
+        let _rc2 = rc1.clone();
+        assert_eq!(rc1.strong_count(), 2);
+        let _arc1 = Arc::from_rc(&rc1);
+        assert_eq!(rc1.strong_count(), 3);
+        let _arc2 = Arc::from_rc(&rc1);
+        assert_eq!(rc1.strong_count(), 4);
+    }
+}
diff --git a/narcissus-core/src/uuid.rs b/narcissus-core/src/uuid.rs
new file mode 100644 (file)
index 0000000..acd1ecb
--- /dev/null
@@ -0,0 +1,237 @@
+use std::{error::Error, fmt::Display};
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+#[non_exhaustive]
+pub struct ParseUuidError;
+
+impl Display for ParseUuidError {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        "provided string is not a valid UUID".fmt(f)
+    }
+}
+
+impl Error for ParseUuidError {}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+pub struct Uuid([u8; 16]);
+
+impl Uuid {
+    pub fn nil() -> Self {
+        Self([0; 16])
+    }
+
+    pub fn from_bytes_be(bytes: [u8; 16]) -> Self {
+        Self(bytes)
+    }
+
+    pub const fn parse_str_unwrap(uuid: &str) -> Self {
+        match Uuid::parse_str(uuid) {
+            Ok(uuid) => uuid,
+            Err(_) => panic!("provided string is not a valid UUID"),
+        }
+    }
+
+    pub const fn parse_str(uuid: &str) -> Result<Self, ParseUuidError> {
+        const LUT: [u8; 256] = [
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, !0, !0, !0, !0, !0, !0, !0, 10, 11, 12,
+            13, 14, 15, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, 10, 11, 12, 13, 14, 15, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+            !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0, !0,
+        ];
+
+        let len = uuid.len();
+        let uuid = uuid.as_bytes();
+        if len != 36 || uuid[8] != b'-' || uuid[13] != b'-' || uuid[18] != b'-' || uuid[23] != b'-'
+        {
+            return Err(ParseUuidError);
+        }
+
+        let h_00_0 = LUT[uuid[0] as usize];
+        let h_00_1 = LUT[uuid[1] as usize];
+        let h_01_0 = LUT[uuid[2] as usize];
+        let h_01_1 = LUT[uuid[3] as usize];
+        let h_02_0 = LUT[uuid[4] as usize];
+        let h_02_1 = LUT[uuid[5] as usize];
+        let h_03_0 = LUT[uuid[6] as usize];
+        let h_03_1 = LUT[uuid[7] as usize];
+        // -
+        let h_04_0 = LUT[uuid[9] as usize];
+        let h_04_1 = LUT[uuid[10] as usize];
+        let h_05_0 = LUT[uuid[11] as usize];
+        let h_05_1 = LUT[uuid[12] as usize];
+        // -
+        let h_06_0 = LUT[uuid[14] as usize];
+        let h_06_1 = LUT[uuid[15] as usize];
+        let h_07_0 = LUT[uuid[16] as usize];
+        let h_07_1 = LUT[uuid[17] as usize];
+        // -
+        let h_08_0 = LUT[uuid[19] as usize];
+        let h_08_1 = LUT[uuid[20] as usize];
+        let h_09_0 = LUT[uuid[21] as usize];
+        let h_09_1 = LUT[uuid[22] as usize];
+        // -
+        let h_10_0 = LUT[uuid[24] as usize];
+        let h_10_1 = LUT[uuid[25] as usize];
+        let h_11_0 = LUT[uuid[26] as usize];
+        let h_11_1 = LUT[uuid[27] as usize];
+        let h_12_0 = LUT[uuid[28] as usize];
+        let h_12_1 = LUT[uuid[29] as usize];
+        let h_13_0 = LUT[uuid[30] as usize];
+        let h_13_1 = LUT[uuid[31] as usize];
+        let h_14_0 = LUT[uuid[32] as usize];
+        let h_14_1 = LUT[uuid[33] as usize];
+        let h_15_0 = LUT[uuid[34] as usize];
+        let h_15_1 = LUT[uuid[35] as usize];
+
+        let bits = h_00_0
+            | h_00_1
+            | h_01_0
+            | h_01_1
+            | h_02_0
+            | h_02_1
+            | h_03_0
+            | h_03_1
+            | h_04_0
+            | h_04_1
+            | h_05_0
+            | h_05_1
+            | h_06_0
+            | h_06_1
+            | h_07_0
+            | h_07_1
+            | h_08_0
+            | h_08_1
+            | h_09_0
+            | h_09_1
+            | h_10_0
+            | h_10_1
+            | h_11_0
+            | h_11_1
+            | h_12_0
+            | h_12_1
+            | h_13_0
+            | h_13_1
+            | h_14_0
+            | h_14_1
+            | h_15_0
+            | h_15_1;
+
+        // only possible if any of the half-words are invalid
+        if bits == !0 {
+            return Err(ParseUuidError);
+        }
+
+        Ok(Self([
+            h_00_0 << 4 | h_00_1,
+            h_01_0 << 4 | h_01_1,
+            h_02_0 << 4 | h_02_1,
+            h_03_0 << 4 | h_03_1,
+            // -
+            h_04_0 << 4 | h_04_1,
+            h_05_0 << 4 | h_05_1,
+            // -
+            h_06_0 << 4 | h_06_1,
+            h_07_0 << 4 | h_07_1,
+            // -
+            h_08_0 << 4 | h_08_1,
+            h_09_0 << 4 | h_09_1,
+            // -
+            h_10_0 << 4 | h_10_1,
+            h_11_0 << 4 | h_11_1,
+            h_12_0 << 4 | h_12_1,
+            h_13_0 << 4 | h_13_1,
+            h_14_0 << 4 | h_14_1,
+            h_15_0 << 4 | h_15_1,
+        ]))
+    }
+}
+
+impl Display for Uuid {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
+        f.write_fmt(format_args!(
+            "{:02x}{:02x}{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
+            self.0[0],
+            self.0[1],
+            self.0[2],
+            self.0[3],
+            self.0[4],
+            self.0[5],
+            self.0[6],
+            self.0[7],
+            self.0[8],
+            self.0[9],
+            self.0[10],
+            self.0[11],
+            self.0[12],
+            self.0[13],
+            self.0[14],
+            self.0[15],
+        ))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::uuid::ParseUuidError;
+
+    use super::Uuid;
+
+    #[test]
+    fn test_uuid() {
+        assert_eq!(
+            Uuid::parse_str("00000000-0000-0000-0000-000000000000"),
+            Ok(Uuid::nil())
+        );
+        assert_eq!(
+            format!(
+                "{}",
+                Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap()
+            ),
+            "00000000-0000-0000-0000-000000000000"
+        );
+
+        assert_eq!(
+            Uuid::parse_str("00112233-4455-6677-8899-aabbccddeeff"),
+            Ok(Uuid::from_bytes_be([
+                0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd,
+                0xee, 0xff
+            ]))
+        );
+        assert_eq!(
+            format!(
+                "{}",
+                Uuid::parse_str("00112233-4455-6677-8899-aabbccddeeff").unwrap()
+            ),
+            "00112233-4455-6677-8899-aabbccddeeff"
+        );
+
+        assert_eq!(
+            Uuid::parse_str("01234567-89AB-CDEF-0123-456789ABCDEF"),
+            Ok(Uuid::from_bytes_be([
+                0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab,
+                0xcd, 0xef
+            ]))
+        );
+        assert_eq!(
+            format!(
+                "{}",
+                Uuid::parse_str("01234567-89AB-CDEF-0123-456789ABCDEF").unwrap()
+            ),
+            "01234567-89ab-cdef-0123-456789abcdef"
+        );
+
+        assert_eq!(Uuid::parse_str(""), Err(ParseUuidError));
+        assert_eq!(
+            Uuid::parse_str("ERROR000-0000-0000-0000-000000000000"),
+            Err(ParseUuidError)
+        );
+    }
+}
diff --git a/narcissus-core/src/virtual_mem.rs b/narcissus-core/src/virtual_mem.rs
new file mode 100644 (file)
index 0000000..bd43f75
--- /dev/null
@@ -0,0 +1,30 @@
+#[cold]
+#[inline(never)]
+pub unsafe fn virtual_reserve(size: usize) -> *mut std::ffi::c_void {
+    let ptr = libc::mmap(
+        std::ptr::null_mut(),
+        size,
+        libc::PROT_NONE,
+        libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
+        -1,
+        0,
+    );
+
+    assert!(ptr != libc::MAP_FAILED && !ptr.is_null());
+
+    ptr
+}
+
+#[cold]
+#[inline(never)]
+pub unsafe fn virtual_commit(ptr: *mut std::ffi::c_void, size: usize) {
+    let result = libc::mprotect(ptr, size, libc::PROT_READ | libc::PROT_WRITE);
+    assert!(result == 0);
+}
+
+#[cold]
+#[inline(never)]
+pub unsafe fn virtual_free(ptr: *mut std::ffi::c_void, size: usize) {
+    let result = libc::munmap(ptr, size);
+    assert!(result == 0);
+}
diff --git a/narcissus-core/src/virtual_vec/mod.rs b/narcissus-core/src/virtual_vec/mod.rs
new file mode 100644 (file)
index 0000000..74cbb44
--- /dev/null
@@ -0,0 +1,66 @@
+mod raw_virtual_vec;
+mod virtual_deque;
+mod virtual_vec;
+
+pub use self::raw_virtual_vec::RawVirtualVec;
+pub use self::virtual_deque::VirtualDeque;
+pub use self::virtual_vec::VirtualVec;
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn virtual_vec() {
+        let mut v = VirtualVec::new(2048);
+
+        for i in 0..2048 {
+            v.push(i);
+        }
+
+        for i in 0..2048 {
+            assert_eq!(v[i], i);
+        }
+
+        for i in (0..2048).rev() {
+            assert_eq!(v.pop(), Some(i))
+        }
+
+        assert_eq!(v.len(), 0);
+    }
+
+    #[test]
+    #[should_panic]
+    fn virtual_vec_too_large() {
+        let mut v = VirtualVec::new(2048);
+        for i in 0..2049 {
+            v.push(i);
+        }
+    }
+
+    #[test]
+    fn virtual_deque() {
+        let mut queue = VirtualDeque::new(2048);
+
+        for _ in 0..2049 {
+            for i in 0..2047 {
+                queue.push_back(i);
+            }
+
+            for i in 0..2047 {
+                assert!(queue.pop_front() == Some(i));
+            }
+        }
+
+        assert_eq!(queue.len(), 0);
+    }
+
+    #[test]
+    #[should_panic]
+    fn virtual_deque_too_large() {
+        let mut v = VirtualDeque::new(2048);
+        for i in 0..2049 {
+            v.push_back(i);
+        }
+    }
+}
diff --git a/narcissus-core/src/virtual_vec/raw_virtual_vec.rs b/narcissus-core/src/virtual_vec/raw_virtual_vec.rs
new file mode 100644 (file)
index 0000000..8101c9f
--- /dev/null
@@ -0,0 +1,135 @@
+use std::cmp;
+use std::{
+    mem::{align_of, size_of},
+    ptr::NonNull,
+};
+
+use crate::{page_size, virtual_commit, virtual_free, virtual_reserve};
+
+pub struct RawVirtualVec<T> {
+    ptr: NonNull<T>,
+    cap: usize,
+    max_cap: usize,
+}
+
+impl<T> RawVirtualVec<T> {
+    pub fn new(max_capacity: usize) -> Self {
+        assert!(max_capacity != 0);
+
+        let size = size_of::<T>();
+        let align = align_of::<T>();
+        let page_size = page_size();
+
+        // Allocating memory with virtual alloc for a zst seems a bit of a waste :)
+        assert!(size != 0);
+
+        // mmap gaurantees we get page aligned addresses back. So as long as our alignment
+        // requirement is less than that, we're all good in the hood.
+        assert!(align < page_size);
+
+        let max_capacity_bytes = size.checked_mul(max_capacity).unwrap();
+
+        // Check overflow of rounding operation.
+        assert!(max_capacity_bytes <= (std::usize::MAX - (align - 1)));
+
+        let ptr = unsafe { NonNull::new_unchecked(virtual_reserve(max_capacity_bytes) as *mut T) };
+
+        Self {
+            ptr,
+            cap: 0,
+            max_cap: max_capacity,
+        }
+    }
+
+    pub fn with_capacity(capacity: usize, max_capacity: usize) -> Self {
+        assert!(capacity <= max_capacity);
+        let mut vec = Self::new(max_capacity);
+
+        unsafe {
+            // we ensure that capacity is less than max_capacity, and the new function above would
+            // have paniced if max_capacity * size_of::<T>() overflowed, so we're always safe here.
+            let cap_bytes = capacity * size_of::<T>();
+            virtual_commit(vec.ptr.as_ptr() as *mut std::ffi::c_void, cap_bytes);
+            vec.cap = capacity;
+        }
+
+        vec
+    }
+
+    #[inline]
+    pub fn reserve(&mut self, used_capacity: usize, required_extra_capacity: usize) {
+        if self.cap.wrapping_sub(used_capacity) >= required_extra_capacity {
+            return;
+        }
+
+        self.grow(used_capacity, required_extra_capacity);
+    }
+
+    #[cold]
+    #[inline(never)]
+    pub fn grow(&mut self, used_capacity: usize, required_extra_capacity: usize) {
+        unsafe {
+            let required_cap = used_capacity.checked_add(required_extra_capacity).unwrap();
+            let max_cap = self.max_cap;
+            if required_cap > max_cap {
+                panic!("max capacity exceeded")
+            };
+
+            // cap can never be big enough that this would wrap.
+            let double_cap = self.cap * 2;
+            let new_cap = cmp::max(required_cap, cmp::min(double_cap, max_cap));
+
+            // This can't overflow because we've already ensured that the new_cap is less than or
+            // equal to the the max_cap, and the max_cap has already been checked for overflow in
+            // the constructor.
+            let new_cap_bytes = new_cap * size_of::<T>();
+            virtual_commit(self.ptr.as_ptr() as *mut std::ffi::c_void, new_cap_bytes);
+
+            self.cap = new_cap;
+        }
+    }
+
+    #[cold]
+    #[inline(never)]
+    pub fn double(&mut self) {
+        unsafe {
+            let old_cap = self.cap;
+            let min_cap = 1;
+            let double_cap = old_cap.wrapping_mul(2);
+            let new_cap = cmp::max(double_cap, min_cap);
+            let new_cap = cmp::min(new_cap, self.max_cap);
+            assert_ne!(old_cap, new_cap);
+            let new_cap_bytes = new_cap * size_of::<T>();
+            virtual_commit(self.ptr.as_ptr() as *mut std::ffi::c_void, new_cap_bytes);
+            self.cap = new_cap;
+        }
+    }
+
+    #[inline(always)]
+    pub fn capacity(&self) -> usize {
+        self.cap
+    }
+
+    #[inline(always)]
+    pub fn max_capacity(&self) -> usize {
+        self.max_cap
+    }
+
+    #[inline(always)]
+    pub fn ptr(&self) -> *mut T {
+        self.ptr.as_ptr()
+    }
+}
+
+impl<T> Drop for RawVirtualVec<T> {
+    fn drop(&mut self) {
+        unsafe {
+            // The preconditions here that max_cap multiplied by the size won't overflow and
+            // that the pointer actually exists and is mapped are all ensured by the constructor.
+            virtual_free(
+                self.ptr.as_ptr() as *mut std::ffi::c_void,
+                self.max_cap * size_of::<T>(),
+            );
+        }
+    }
+}
diff --git a/narcissus-core/src/virtual_vec/virtual_deque.rs b/narcissus-core/src/virtual_vec/virtual_deque.rs
new file mode 100644 (file)
index 0000000..219645b
--- /dev/null
@@ -0,0 +1,1014 @@
+use std::iter::repeat_with;
+use std::ops::{Index, IndexMut};
+use std::ptr;
+use std::slice;
+
+use super::RawVirtualVec;
+
+const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
+const MINIMUM_CAPACITY: usize = 1; // 2 - 1
+
+pub struct VirtualDeque<T> {
+    buf: RawVirtualVec<T>,
+    head: usize,
+    tail: usize,
+}
+
+impl<T> VirtualDeque<T> {
+    pub fn new(max_capacity: usize) -> Self {
+        Self::with_capacity(INITIAL_CAPACITY, max_capacity)
+    }
+
+    pub fn with_capacity(capacity: usize, max_capacity: usize) -> Self {
+        assert!(max_capacity.is_power_of_two());
+        assert!(max_capacity < std::isize::MAX as usize);
+        let cap = std::cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
+        Self {
+            buf: RawVirtualVec::with_capacity(cap, max_capacity),
+            head: 0,
+            tail: 0,
+        }
+    }
+
+    #[inline]
+    pub fn max_capacity(&self) -> usize {
+        self.buf.max_capacity()
+    }
+
+    #[inline]
+    fn ptr(&self) -> *mut T {
+        self.buf.ptr()
+    }
+
+    #[inline]
+    fn capacity(&self) -> usize {
+        self.buf.capacity()
+    }
+
+    #[inline]
+    fn cap(&self) -> usize {
+        self.buf.capacity()
+    }
+
+    #[inline]
+    pub fn as_slices(&self) -> (&[T], &[T]) {
+        unsafe {
+            let buf = self.buffer_as_slice();
+            RingSlices::ring_slices(buf, self.head, self.tail)
+        }
+    }
+
+    #[inline]
+    pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
+        unsafe {
+            let head = self.head;
+            let tail = self.tail;
+            let buf = self.buffer_as_mut_slice();
+            RingSlices::ring_slices(buf, head, tail)
+        }
+    }
+
+    /// Turn ptr into a slice
+    #[inline]
+    unsafe fn buffer_as_slice(&self) -> &[T] {
+        slice::from_raw_parts(self.ptr(), self.cap())
+    }
+
+    /// Turn ptr into a mut slice
+    #[inline]
+    unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] {
+        slice::from_raw_parts_mut(self.ptr(), self.cap())
+    }
+
+    /// Moves an element out of the buffer
+    #[inline]
+    unsafe fn buffer_read(&mut self, off: usize) -> T {
+        ptr::read(self.ptr().add(off))
+    }
+
+    /// Writes an element into the buffer, moving it.
+    #[inline]
+    unsafe fn buffer_write(&mut self, off: usize, value: T) {
+        ptr::write(self.ptr().add(off), value);
+    }
+
+    pub fn len(&self) -> usize {
+        count(self.tail, self.head, self.cap())
+    }
+
+    /// Returns `true` if the buffer is at full capacity.
+    #[inline]
+    fn is_full(&self) -> bool {
+        self.cap() - self.len() == 1
+    }
+
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.tail == self.head
+    }
+
+    /// Returns the index in the underlying buffer for a given logical element
+    /// index + addend.
+    #[inline]
+    fn wrap_add(&self, idx: usize, addend: usize) -> usize {
+        wrap_index(idx.wrapping_add(addend), self.cap())
+    }
+
+    /// Returns the index in the underlying buffer for a given logical element
+    /// index - subtrahend.
+    #[inline]
+    fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
+        wrap_index(idx.wrapping_sub(subtrahend), self.cap())
+    }
+
+    /// Copies a contiguous block of memory len long from src to dst
+    #[inline]
+    unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
+        debug_assert!(
+            dst + len <= self.cap(),
+            "cpy dst={} src={} len={} cap={}",
+            dst,
+            src,
+            len,
+            self.cap()
+        );
+        debug_assert!(
+            src + len <= self.cap(),
+            "cpy dst={} src={} len={} cap={}",
+            dst,
+            src,
+            len,
+            self.cap()
+        );
+        ptr::copy(self.ptr().add(src), self.ptr().add(dst), len);
+    }
+
+    /// Copies a contiguous block of memory len long from src to dst
+    #[inline]
+    unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
+        debug_assert!(
+            dst + len <= self.cap(),
+            "cno dst={} src={} len={} cap={}",
+            dst,
+            src,
+            len,
+            self.cap()
+        );
+        debug_assert!(
+            src + len <= self.cap(),
+            "cno dst={} src={} len={} cap={}",
+            dst,
+            src,
+            len,
+            self.cap()
+        );
+        ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len);
+    }
+
+    /// Frobs the head and tail sections around to handle the fact that we
+    /// just reallocated. Unsafe because it trusts old_capacity.
+    #[inline]
+    unsafe fn handle_capacity_increase(&mut self, old_capacity: usize) {
+        let new_capacity = self.cap();
+
+        // Move the shortest contiguous section of the ring buffer
+        //    T             H
+        //   [o o o o o o o . ]
+        //    T             H
+        // A [o o o o o o o . . . . . . . . . ]
+        //        H T
+        //   [o o . o o o o o ]
+        //          T             H
+        // B [. . . o o o o o o o . . . . . . ]
+        //              H T
+        //   [o o o o o . o o ]
+        //              H                 T
+        // C [o o o o o . . . . . . . . . o o ]
+
+        if self.tail <= self.head {
+            // A
+            // Nop
+        } else if self.head < old_capacity - self.tail {
+            // B
+            self.copy_nonoverlapping(old_capacity, 0, self.head);
+            self.head += old_capacity;
+            debug_assert!(self.head > self.tail);
+        } else {
+            // C
+            let new_tail = new_capacity - (old_capacity - self.tail);
+            self.copy_nonoverlapping(new_tail, self.tail, old_capacity - self.tail);
+            self.tail = new_tail;
+            debug_assert!(self.head < self.tail);
+        }
+        debug_assert!(self.head < self.cap());
+        debug_assert!(self.tail < self.cap());
+        debug_assert!(self.cap().count_ones() == 1);
+    }
+
+    pub fn reserve_exact(&mut self, additional: usize) {
+        self.reserve(additional);
+    }
+
+    pub fn reserve(&mut self, additional: usize) {
+        let old_cap = self.cap();
+        let used_cap = self.len() + 1;
+        let new_cap = used_cap
+            .checked_add(additional)
+            .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
+            .expect("capacity overflow");
+
+        if new_cap > old_cap {
+            self.buf.reserve(used_cap, new_cap - used_cap);
+            unsafe {
+                self.handle_capacity_increase(old_cap);
+            }
+        }
+    }
+
+    pub fn get(&self, index: usize) -> Option<&T> {
+        if index < self.len() {
+            let idx = self.wrap_add(self.tail, index);
+            unsafe { Some(&*self.ptr().add(idx)) }
+        } else {
+            None
+        }
+    }
+
+    pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
+        if index < self.len() {
+            let idx = self.wrap_add(self.tail, index);
+            unsafe { Some(&mut *self.ptr().add(idx)) }
+        } else {
+            None
+        }
+    }
+
+    pub fn pop_front(&mut self) -> Option<T> {
+        if self.is_empty() {
+            None
+        } else {
+            let tail = self.tail;
+            self.tail = self.wrap_add(self.tail, 1);
+            unsafe { Some(self.buffer_read(tail)) }
+        }
+    }
+
+    pub fn pop_back(&mut self) -> Option<T> {
+        if self.is_empty() {
+            None
+        } else {
+            self.head = self.wrap_sub(self.head, 1);
+            let head = self.head;
+            unsafe { Some(self.buffer_read(head)) }
+        }
+    }
+
+    pub fn push_front(&mut self, value: T) {
+        self.grow_if_necessary();
+
+        self.tail = self.wrap_sub(self.tail, 1);
+        let tail = self.tail;
+        unsafe {
+            self.buffer_write(tail, value);
+        }
+    }
+
+    pub fn push_back(&mut self, value: T) {
+        self.grow_if_necessary();
+
+        let head = self.head;
+        self.head = self.wrap_add(self.head, 1);
+        unsafe { self.buffer_write(head, value) }
+    }
+
+    pub fn swap(&mut self, i: usize, j: usize) {
+        assert!(i < self.len());
+        assert!(j < self.len());
+        let ri = self.wrap_add(self.tail, i);
+        let rj = self.wrap_add(self.tail, j);
+        unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) }
+    }
+
+    pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
+        let length = self.len();
+        if length > 0 && index < length && index != 0 {
+            self.swap(index, 0);
+        } else if index >= length {
+            return None;
+        }
+        self.pop_front()
+    }
+
+    pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
+        let length = self.len();
+        if length > 0 && index < length - 1 {
+            self.swap(index, length - 1);
+        } else if index >= length {
+            return None;
+        }
+        self.pop_back()
+    }
+
+    #[inline]
+    fn is_contiguous(&self) -> bool {
+        self.tail <= self.head
+    }
+
+    pub fn insert(&mut self, index: usize, value: T) {
+        assert!(index <= self.len(), "index out of bounds");
+        self.grow_if_necessary();
+
+        // Move the least number of elements in the ring buffer and insert
+        // the given object
+        //
+        // At most len/2 - 1 elements will be moved. O(min(n, n-i))
+        //
+        // There are three main cases:
+        //  Elements are contiguous
+        //      - special case when tail is 0
+        //  Elements are discontiguous and the insert is in the tail section
+        //  Elements are discontiguous and the insert is in the head section
+        //
+        // For each of those there are two more cases:
+        //  Insert is closer to tail
+        //  Insert is closer to head
+        //
+        // Key: H - self.head
+        //      T - self.tail
+        //      o - Valid element
+        //      I - Insertion element
+        //      A - The element that should be after the insertion point
+        //      M - Indicates element was moved
+
+        let idx = self.wrap_add(self.tail, index);
+
+        let distance_to_tail = index;
+        let distance_to_head = self.len() - index;
+
+        let contiguous = self.is_contiguous();
+
+        match (
+            contiguous,
+            distance_to_tail <= distance_to_head,
+            idx >= self.tail,
+        ) {
+            (true, true, _) if index == 0 => {
+                // push_front
+                //
+                //       T
+                //       I             H
+                //      [A o o o o o o . . . . . . . . .]
+                //
+                //                       H         T
+                //      [A o o o o o o o . . . . . I]
+                //
+
+                self.tail = self.wrap_sub(self.tail, 1);
+            }
+            (true, true, _) => {
+                unsafe {
+                    // contiguous, insert closer to tail:
+                    //
+                    //             T   I         H
+                    //      [. . . o o A o o o o . . . . . .]
+                    //
+                    //           T               H
+                    //      [. . o o I A o o o o . . . . . .]
+                    //           M M
+                    //
+                    // contiguous, insert closer to tail and tail is 0:
+                    //
+                    //
+                    //       T   I         H
+                    //      [o o A o o o o . . . . . . . . .]
+                    //
+                    //                       H             T
+                    //      [o I A o o o o o . . . . . . . o]
+                    //       M                             M
+
+                    let new_tail = self.wrap_sub(self.tail, 1);
+
+                    self.copy(new_tail, self.tail, 1);
+                    // Already moved the tail, so we only copy `index - 1` elements.
+                    self.copy(self.tail, self.tail + 1, index - 1);
+
+                    self.tail = new_tail;
+                }
+            }
+            (true, false, _) => {
+                unsafe {
+                    //  contiguous, insert closer to head:
+                    //
+                    //             T       I     H
+                    //      [. . . o o o o A o o . . . . . .]
+                    //
+                    //             T               H
+                    //      [. . . o o o o I A o o . . . . .]
+                    //                       M M M
+
+                    self.copy(idx + 1, idx, self.head - idx);
+                    self.head = self.wrap_add(self.head, 1);
+                }
+            }
+            (false, true, true) => {
+                unsafe {
+                    // discontiguous, insert closer to tail, tail section:
+                    //
+                    //                   H         T   I
+                    //      [o o o o o o . . . . . o o A o o]
+                    //
+                    //                   H       T
+                    //      [o o o o o o . . . . o o I A o o]
+                    //                           M M
+
+                    self.copy(self.tail - 1, self.tail, index);
+                    self.tail -= 1;
+                }
+            }
+            (false, false, true) => {
+                unsafe {
+                    // discontiguous, insert closer to head, tail section:
+                    //
+                    //           H             T         I
+                    //      [o o . . . . . . . o o o o o A o]
+                    //
+                    //             H           T
+                    //      [o o o . . . . . . o o o o o I A]
+                    //       M M M                         M
+
+                    // copy elements up to new head
+                    self.copy(1, 0, self.head);
+
+                    // copy last element into empty spot at bottom of buffer
+                    self.copy(0, self.cap() - 1, 1);
+
+                    // move elements from idx to end forward not including ^ element
+                    self.copy(idx + 1, idx, self.cap() - 1 - idx);
+
+                    self.head += 1;
+                }
+            }
+            (false, true, false) if idx == 0 => {
+                unsafe {
+                    // discontiguous, insert is closer to tail, head section,
+                    // and is at index zero in the internal buffer:
+                    //
+                    //       I                   H     T
+                    //      [A o o o o o o o o o . . . o o o]
+                    //
+                    //                           H   T
+                    //      [A o o o o o o o o o . . o o o I]
+                    //                               M M M
+
+                    // copy elements up to new tail
+                    self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
+
+                    // copy last element into empty spot at bottom of buffer
+                    self.copy(self.cap() - 1, 0, 1);
+
+                    self.tail -= 1;
+                }
+            }
+            (false, true, false) => {
+                unsafe {
+                    // discontiguous, insert closer to tail, head section:
+                    //
+                    //             I             H     T
+                    //      [o o o A o o o o o o . . . o o o]
+                    //
+                    //                           H   T
+                    //      [o o I A o o o o o o . . o o o o]
+                    //       M M                     M M M M
+
+                    // copy elements up to new tail
+                    self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
+
+                    // copy last element into empty spot at bottom of buffer
+                    self.copy(self.cap() - 1, 0, 1);
+
+                    // move elements from idx-1 to end forward not including ^ element
+                    self.copy(0, 1, idx - 1);
+
+                    self.tail -= 1;
+                }
+            }
+            (false, false, false) => {
+                unsafe {
+                    // discontiguous, insert closer to head, head section:
+                    //
+                    //               I     H           T
+                    //      [o o o o A o o . . . . . . o o o]
+                    //
+                    //                     H           T
+                    //      [o o o o I A o o . . . . . o o o]
+                    //                 M M M
+
+                    self.copy(idx + 1, idx, self.head - idx);
+                    self.head += 1;
+                }
+            }
+        }
+
+        // tail might've been changed so we need to recalculate
+        let new_idx = self.wrap_add(self.tail, index);
+        unsafe {
+            self.buffer_write(new_idx, value);
+        }
+    }
+
+    /// Removes and returns the element at `index` from the `VecDeque`.
+    /// Whichever end is closer to the removal point will be moved to make
+    /// room, and all the affected elements will be moved to new positions.
+    /// Returns `None` if `index` is out of bounds.
+    ///
+    /// Element at index 0 is the front of the queue.
+    pub fn remove(&mut self, index: usize) -> Option<T> {
+        if self.is_empty() || self.len() <= index {
+            return None;
+        }
+
+        // There are three main cases:
+        //  Elements are contiguous
+        //  Elements are discontiguous and the removal is in the tail section
+        //  Elements are discontiguous and the removal is in the head section
+        //      - special case when elements are technically contiguous,
+        //        but self.head = 0
+        //
+        // For each of those there are two more cases:
+        //  Insert is closer to tail
+        //  Insert is closer to head
+        //
+        // Key: H - self.head
+        //      T - self.tail
+        //      o - Valid element
+        //      x - Element marked for removal
+        //      R - Indicates element that is being removed
+        //      M - Indicates element was moved
+
+        let idx = self.wrap_add(self.tail, index);
+
+        let elem = unsafe { Some(self.buffer_read(idx)) };
+
+        let distance_to_tail = index;
+        let distance_to_head = self.len() - index;
+
+        let contiguous = self.is_contiguous();
+
+        match (
+            contiguous,
+            distance_to_tail <= distance_to_head,
+            idx >= self.tail,
+        ) {
+            (true, true, _) => {
+                unsafe {
+                    // contiguous, remove closer to tail:
+                    //
+                    //             T   R         H
+                    //      [. . . o o x o o o o . . . . . .]
+                    //
+                    //               T           H
+                    //      [. . . . o o o o o o . . . . . .]
+                    //               M M
+
+                    self.copy(self.tail + 1, self.tail, index);
+                    self.tail += 1;
+                }
+            }
+            (true, false, _) => {
+                unsafe {
+                    // contiguous, remove closer to head:
+                    //
+                    //             T       R     H
+                    //      [. . . o o o o x o o . . . . . .]
+                    //
+                    //             T           H
+                    //      [. . . o o o o o o . . . . . . .]
+                    //                     M M
+
+                    self.copy(idx, idx + 1, self.head - idx - 1);
+                    self.head -= 1;
+                }
+            }
+            (false, true, true) => {
+                unsafe {
+                    // discontiguous, remove closer to tail, tail section:
+                    //
+                    //                   H         T   R
+                    //      [o o o o o o . . . . . o o x o o]
+                    //
+                    //                   H           T
+                    //      [o o o o o o . . . . . . o o o o]
+                    //                               M M
+
+                    self.copy(self.tail + 1, self.tail, index);
+                    self.tail = self.wrap_add(self.tail, 1);
+                }
+            }
+            (false, false, false) => {
+                unsafe {
+                    // discontiguous, remove closer to head, head section:
+                    //
+                    //               R     H           T
+                    //      [o o o o x o o . . . . . . o o o]
+                    //
+                    //                   H             T
+                    //      [o o o o o o . . . . . . . o o o]
+                    //               M M
+
+                    self.copy(idx, idx + 1, self.head - idx - 1);
+                    self.head -= 1;
+                }
+            }
+            (false, false, true) => {
+                unsafe {
+                    // discontiguous, remove closer to head, tail section:
+                    //
+                    //             H           T         R
+                    //      [o o o . . . . . . o o o o o x o]
+                    //
+                    //           H             T
+                    //      [o o . . . . . . . o o o o o o o]
+                    //       M M                         M M
+                    //
+                    // or quasi-discontiguous, remove next to head, tail section:
+                    //
+                    //       H                 T         R
+                    //      [. . . . . . . . . o o o o o x o]
+                    //
+                    //                         T           H
+                    //      [. . . . . . . . . o o o o o o .]
+                    //                                   M
+
+                    // draw in elements in the tail section
+                    self.copy(idx, idx + 1, self.cap() - idx - 1);
+
+                    // Prevents underflow.
+                    if self.head != 0 {
+                        // copy first element into empty spot
+                        self.copy(self.cap() - 1, 0, 1);
+
+                        // move elements in the head section backwards
+                        self.copy(0, 1, self.head - 1);
+                    }
+
+                    self.head = self.wrap_sub(self.head, 1);
+                }
+            }
+            (false, true, false) => {
+                unsafe {
+                    // discontiguous, remove closer to tail, head section:
+                    //
+                    //           R               H     T
+                    //      [o o x o o o o o o o . . . o o o]
+                    //
+                    //                           H       T
+                    //      [o o o o o o o o o o . . . . o o]
+                    //       M M M                       M M
+
+                    // draw in elements up to idx
+                    self.copy(1, 0, idx);
+
+                    // copy last element into empty spot
+                    self.copy(0, self.cap() - 1, 1);
+
+                    // move elements from tail to end forward, excluding the last one
+                    self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
+
+                    self.tail = self.wrap_add(self.tail, 1);
+                }
+            }
+        }
+
+        elem
+    }
+
+    pub fn truncate(&mut self, len: usize) {
+        // Safe because:
+        //
+        // * Any slice passed to `drop_in_place` is valid; the second case has
+        //   `len <= front.len()` and returning on `len > self.len()` ensures
+        //   `begin <= back.len()` in the first case
+        // * The head of the VirtualDeque is moved before calling `drop_in_place`,
+        //   so no value is dropped twice if `drop_in_place` panics
+        unsafe {
+            if len > self.len() {
+                return;
+            }
+            let num_dropped = self.len() - len;
+            let (front, back) = self.as_mut_slices();
+            if len > front.len() {
+                let begin = len - front.len();
+                let drop_back = back.get_unchecked_mut(begin..) as *mut _;
+                self.head = self.wrap_sub(self.head, num_dropped);
+                ptr::drop_in_place(drop_back);
+            } else {
+                let drop_back = back as *mut _;
+                let drop_front = front.get_unchecked_mut(len..) as *mut _;
+                self.head = self.wrap_sub(self.head, num_dropped);
+                ptr::drop_in_place(drop_front);
+                ptr::drop_in_place(drop_back);
+            }
+        }
+    }
+
+    /// Retains only the elements specified by the predicate.
+    ///
+    /// In other words, remove all elements `e` such that `f(&e)` returns false.
+    /// This method operates in place, visiting each element exactly once in the
+    /// original order, and preserves the order of the retained elements.
+    ///
+    pub fn retain<F>(&mut self, mut f: F)
+    where
+        F: FnMut(&T) -> bool,
+    {
+        let len = self.len();
+        let mut del = 0;
+        for i in 0..len {
+            if !f(&self[i]) {
+                del += 1;
+            } else if del > 0 {
+                self.swap(i - del, i);
+            }
+        }
+        if del > 0 {
+            self.truncate(len - del);
+        }
+    }
+
+    // This may panic or abort
+    #[inline]
+    fn grow_if_necessary(&mut self) {
+        if self.is_full() {
+            let old_cap = self.cap();
+            self.buf.double();
+            unsafe {
+                self.handle_capacity_increase(old_cap);
+            }
+            debug_assert!(!self.is_full());
+        }
+    }
+
+    pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut() -> T) {
+        let len = self.len();
+
+        if new_len > len {
+            self.extend(repeat_with(generator).take(new_len - len))
+        } else {
+            self.truncate(new_len);
+        }
+    }
+
+    pub fn iter(&self) -> Iter<'_, T> {
+        Iter {
+            tail: self.tail,
+            head: self.head,
+            ring: unsafe { self.buffer_as_slice() },
+        }
+    }
+
+    pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+        IterMut {
+            tail: self.tail,
+            head: self.head,
+            ring: unsafe { self.buffer_as_mut_slice() },
+        }
+    }
+}
+
+/// Calculate the number of elements left to be read in the buffer
+#[inline]
+fn count(tail: usize, head: usize, size: usize) -> usize {
+    // size is always a power of 2
+    (head.wrapping_sub(tail)) & (size - 1)
+}
+
+#[inline]
+fn wrap_index(index: usize, size: usize) -> usize {
+    // size is always a power of 2
+    debug_assert!(size.is_power_of_two());
+    index & (size - 1)
+}
+
+// Returns the two slices that cover the `VecDeque`'s valid range
+trait RingSlices: Sized {
+    fn slice(self, from: usize, to: usize) -> Self;
+    fn split_at(self, i: usize) -> (Self, Self);
+
+    fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
+        let contiguous = tail <= head;
+        if contiguous {
+            let (empty, buf) = buf.split_at(0);
+            (buf.slice(tail, head), empty)
+        } else {
+            let (mid, right) = buf.split_at(tail);
+            let (left, _) = mid.split_at(head);
+            (right, left)
+        }
+    }
+}
+
+impl<T> RingSlices for &[T] {
+    fn slice(self, from: usize, to: usize) -> Self {
+        &self[from..to]
+    }
+    fn split_at(self, i: usize) -> (Self, Self) {
+        (*self).split_at(i)
+    }
+}
+
+impl<T> RingSlices for &mut [T] {
+    fn slice(self, from: usize, to: usize) -> Self {
+        &mut self[from..to]
+    }
+    fn split_at(self, i: usize) -> (Self, Self) {
+        (*self).split_at_mut(i)
+    }
+}
+
+#[derive(Clone)]
+pub struct Iter<'a, T: 'a> {
+    ring: &'a [T],
+    tail: usize,
+    head: usize,
+}
+
+impl<'a, T> Iterator for Iter<'a, T> {
+    type Item = &'a T;
+
+    #[inline]
+    fn next(&mut self) -> Option<&'a T> {
+        if self.tail == self.head {
+            return None;
+        }
+        let tail = self.tail;
+        self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+        unsafe { Some(self.ring.get_unchecked(tail)) }
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let len = count(self.tail, self.head, self.ring.len());
+        (len, Some(len))
+    }
+
+    fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        accum = front.iter().fold(accum, &mut f);
+        back.iter().fold(accum, &mut f)
+    }
+
+    fn nth(&mut self, n: usize) -> Option<Self::Item> {
+        if n >= count(self.tail, self.head, self.ring.len()) {
+            self.tail = self.head;
+            None
+        } else {
+            self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
+            self.next()
+        }
+    }
+
+    #[inline]
+    fn last(mut self) -> Option<&'a T> {
+        self.next_back()
+    }
+}
+
+impl<A> Extend<A> for VirtualDeque<A> {
+    fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T) {
+        // This function should be the moral equivalent of:
+        //
+        //      for item in iter.into_iter() {
+        //          self.push_back(item);
+        //      }
+        let mut iter = iter.into_iter();
+        while let Some(element) = iter.next() {
+            if self.len() == self.capacity() {
+                let (lower, _) = iter.size_hint();
+                self.reserve(lower.saturating_add(1));
+            }
+
+            let head = self.head;
+            self.head = self.wrap_add(self.head, 1);
+            unsafe {
+                self.buffer_write(head, element);
+            }
+        }
+    }
+}
+
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<&'a T> {
+        if self.tail == self.head {
+            return None;
+        }
+        self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+        unsafe { Some(self.ring.get_unchecked(self.head)) }
+    }
+
+    fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        accum = back.iter().rfold(accum, &mut f);
+        front.iter().rfold(accum, &mut f)
+    }
+}
+
+pub struct IterMut<'a, T: 'a> {
+    ring: &'a mut [T],
+    tail: usize,
+    head: usize,
+}
+
+impl<'a, T> Iterator for IterMut<'a, T> {
+    type Item = &'a mut T;
+
+    #[inline]
+    fn next(&mut self) -> Option<&'a mut T> {
+        if self.tail == self.head {
+            return None;
+        }
+        let tail = self.tail;
+        self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+
+        unsafe {
+            let elem = self.ring.get_unchecked_mut(tail);
+            Some(&mut *(elem as *mut _))
+        }
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let len = count(self.tail, self.head, self.ring.len());
+        (len, Some(len))
+    }
+
+    fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        accum = front.iter_mut().fold(accum, &mut f);
+        back.iter_mut().fold(accum, &mut f)
+    }
+
+    fn nth(&mut self, n: usize) -> Option<Self::Item> {
+        if n >= count(self.tail, self.head, self.ring.len()) {
+            self.tail = self.head;
+            None
+        } else {
+            self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
+            self.next()
+        }
+    }
+
+    #[inline]
+    fn last(mut self) -> Option<&'a mut T> {
+        self.next_back()
+    }
+}
+
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<&'a mut T> {
+        if self.tail == self.head {
+            return None;
+        }
+        self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+
+        unsafe {
+            let elem = self.ring.get_unchecked_mut(self.head);
+            Some(&mut *(elem as *mut _))
+        }
+    }
+
+    fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        accum = back.iter_mut().rfold(accum, &mut f);
+        front.iter_mut().rfold(accum, &mut f)
+    }
+}
+
+impl<A> Index<usize> for VirtualDeque<A> {
+    type Output = A;
+
+    #[inline]
+    fn index(&self, index: usize) -> &A {
+        self.get(index).expect("Out of bounds access")
+    }
+}
+
+impl<A> IndexMut<usize> for VirtualDeque<A> {
+    #[inline]
+    fn index_mut(&mut self, index: usize) -> &mut A {
+        self.get_mut(index).expect("Out of bounds access")
+    }
+}
diff --git a/narcissus-core/src/virtual_vec/virtual_vec.rs b/narcissus-core/src/virtual_vec/virtual_vec.rs
new file mode 100644 (file)
index 0000000..9f2a927
--- /dev/null
@@ -0,0 +1,410 @@
+use std::ops::{Deref, DerefMut};
+use std::ptr;
+
+use super::RawVirtualVec;
+
+pub struct VirtualVec<T> {
+    buf: RawVirtualVec<T>,
+    len: usize,
+}
+
+impl<T> VirtualVec<T> {
+    /// Creates a new vector backed by virtual memory. The array cannot grow beyond its original
+    /// reservation
+    ///
+    /// Unlike a normal vector this means addresses will not be invalidated when the vector grows,
+    /// nor will there be any copying.
+    ///
+    /// # Panics
+    ///
+    /// Panics if the memory reservation fails, or if there's any overflow in the size calculations.
+    pub fn new(max_capacity: usize) -> Self {
+        Self {
+            buf: RawVirtualVec::new(max_capacity),
+            len: 0,
+        }
+    }
+
+    pub fn with_capacity(capacity: usize, max_capacity: usize) -> Self {
+        Self {
+            buf: RawVirtualVec::with_capacity(capacity, max_capacity),
+            len: 0,
+        }
+    }
+
+    pub fn push(&mut self, value: T) {
+        if self.len == self.capacity() {
+            self.reserve(1);
+        }
+
+        unsafe {
+            let end = self.as_mut_ptr().add(self.len);
+            ptr::write(end, value);
+            self.len += 1;
+        }
+    }
+
+    pub fn pop(&mut self) -> Option<T> {
+        if self.len == 0 {
+            None
+        } else {
+            unsafe {
+                self.len -= 1;
+                Some(ptr::read(self.get_unchecked(self.len())))
+            }
+        }
+    }
+
+    pub fn append(&mut self, other: &mut Self) {
+        unsafe {
+            self.append_elements(other.as_slice() as _);
+            other.set_len(0);
+        }
+    }
+
+    #[inline]
+    unsafe fn append_elements(&mut self, other: *const [T]) {
+        let count = (*other).len();
+        self.reserve(count);
+        let len = self.len();
+        ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count);
+        self.len += count;
+    }
+
+    pub fn clear(&mut self) {
+        self.truncate(0)
+    }
+
+    pub fn truncate(&mut self, len: usize) {
+        // This is safe because:
+        //
+        // * the slice passed to `drop_in_place` is valid; the `len > self.len`
+        //   case avoids creating an invalid slice, and
+        // * the `len` of the vector is shrunk before calling `drop_in_place`,
+        //   such that no value will be dropped twice in case `drop_in_place`
+        //   were to panic once (if it panics twice, the program aborts).
+        unsafe {
+            if len > self.len {
+                return;
+            }
+            let s = self.get_unchecked_mut(len..) as *mut _;
+            self.len = len;
+            ptr::drop_in_place(s);
+        }
+    }
+
+    #[inline]
+    unsafe fn set_len(&mut self, new_len: usize) {
+        debug_assert!(new_len <= self.capacity());
+        self.len = new_len;
+    }
+
+    /// Extracts a slice containing the entire vector.
+    ///
+    /// Equivalent to `&s[..]`.
+    #[inline]
+    pub fn as_slice(&self) -> &[T] {
+        self
+    }
+
+    /// Extracts a mutable slice of the entire vector.
+    ///
+    /// Equivalent to `&mut s[..]`.
+    #[inline]
+    pub fn as_mut_slice(&mut self) -> &mut [T] {
+        self
+    }
+
+    /// Returns a raw pointer to the vector's buffer.
+    ///
+    /// The caller must ensure that the vector outlives the pointer this
+    /// function returns, or else it will end up pointing to garbage.
+    ///
+    /// The caller must also ensure that the memory the pointer (non-transitively) points to
+    /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
+    /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+    ///
+    /// [`as_mut_ptr`]: #method.as_mut_ptr
+    #[inline]
+    pub fn as_ptr(&self) -> *const T {
+        // We shadow the slice method of the same name to avoid going through
+        // `deref`, which creates an intermediate reference.
+        self.buf.ptr()
+    }
+
+    /// Returns an unsafe mutable pointer to the vector's buffer.
+    ///
+    /// The caller must ensure that the vector outlives the pointer this
+    /// function returns, or else it will end up pointing to garbage.
+    ///
+    #[inline]
+    pub fn as_mut_ptr(&mut self) -> *mut T {
+        self.buf.ptr()
+    }
+
+    /// Removes an element from the vector and returns it.
+    ///
+    /// The removed element is replaced by the last element of the vector.
+    ///
+    /// This does not preserve ordering, but is O(1).
+    ///
+    /// # Panics
+    ///
+    /// Panics if `index` is out of bounds.
+    #[inline]
+    pub fn swap_remove(&mut self, index: usize) -> T {
+        unsafe {
+            // We replace self[index] with the last element. Note that if the
+            // bounds check on hole succeeds there must be a last element (which
+            // can be self[index] itself).
+            let hole: *mut T = &mut self[index];
+            let last = ptr::read(self.get_unchecked(self.len - 1));
+            self.len -= 1;
+            ptr::replace(hole, last)
+        }
+    }
+
+    /// Inserts an element at position `index` within the vector, shifting all
+    /// elements after it to the right.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `index > len`.
+    pub fn insert(&mut self, index: usize, element: T) {
+        let len = self.len();
+        assert!(index <= len);
+
+        // space for the new element
+        self.reserve(1);
+
+        unsafe {
+            // infallible
+            // The spot to put the new value
+            {
+                let p = self.as_mut_ptr().add(index);
+                // Shift everything over to make space. (Duplicating the
+                // `index`th element into two consecutive places.)
+                ptr::copy(p, p.offset(1), len - index);
+                // Write it in, overwriting the first copy of the `index`th
+                // element.
+                ptr::write(p, element);
+            }
+            self.len += 1;
+        }
+    }
+
+    /// Removes and returns the element at position `index` within the vector,
+    /// shifting all elements after it to the left.
+    ///
+    /// # Panics
+    ///
+    /// Panics if `index` is out of bounds.
+    pub fn remove(&mut self, index: usize) -> T {
+        let len = self.len();
+        assert!(index < len);
+        unsafe {
+            // infallible
+            let ret;
+            {
+                // the place we are taking from.
+                let ptr = self.as_mut_ptr().add(index);
+                // copy it out, unsafely having a copy of the value on
+                // the stack and in the vector at the same time.
+                ret = ptr::read(ptr);
+
+                // Shift everything down to fill in that spot.
+                ptr::copy(ptr.offset(1), ptr, len - index - 1);
+            }
+            self.len -= 1;
+            ret
+        }
+    }
+
+    pub fn resize_with<F>(&mut self, new_len: usize, f: F)
+    where
+        F: FnMut() -> T,
+    {
+        let len = self.len();
+        if new_len > len {
+            self.extend_with(new_len - len, ExtendFunc(f));
+        } else {
+            self.truncate(new_len);
+        }
+    }
+
+    #[inline]
+    pub fn reserve(&mut self, additional: usize) {
+        self.buf.reserve(self.len, additional);
+    }
+
+    #[inline]
+    pub fn is_empty(&self) -> bool {
+        self.len() == 0
+    }
+
+    #[inline]
+    pub fn is_full(&self) -> bool {
+        self.len() == self.capacity()
+    }
+
+    #[inline]
+    pub fn len(&self) -> usize {
+        self.len
+    }
+
+    #[inline]
+    pub fn capacity(&self) -> usize {
+        self.buf.capacity()
+    }
+
+    #[inline]
+    pub fn max_capacity(&self) -> usize {
+        self.buf.max_capacity()
+    }
+}
+
+impl<T> Deref for VirtualVec<T> {
+    type Target = [T];
+    fn deref(&self) -> &[T] {
+        unsafe { ::std::slice::from_raw_parts(self.buf.ptr(), self.len) }
+    }
+}
+
+impl<T> DerefMut for VirtualVec<T> {
+    fn deref_mut(&mut self) -> &mut [T] {
+        unsafe { ::std::slice::from_raw_parts_mut(self.buf.ptr(), self.len) }
+    }
+}
+
+impl<T: Clone> VirtualVec<T> {
+    /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+    ///
+    /// If `new_len` is greater than `len`, the `Vec` is extended by the
+    /// difference, with each additional slot filled with `value`.
+    /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+    ///
+    /// This method requires [`Clone`] to be able clone the passed value. If
+    /// you need more flexibility (or want to rely on [`Default`] instead of
+    /// [`Clone`]), use [`resize_with`].
+    ///
+    /// [`Clone`]: ../../std/clone/trait.Clone.html
+    /// [`Default`]: ../../std/default/trait.Default.html
+    /// [`resize_with`]: #method.resize_with
+    pub fn resize(&mut self, new_len: usize, value: T) {
+        let len = self.len();
+
+        if new_len > len {
+            self.extend_with(new_len - len, ExtendElement(value))
+        } else {
+            self.truncate(new_len);
+        }
+    }
+}
+
+impl<T> VirtualVec<T> {
+    /// Extend the vector by `n` values, using the given generator.
+    fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+        self.reserve(n);
+
+        unsafe {
+            let mut ptr = self.as_mut_ptr().add(self.len());
+            // Use SetLenOnDrop to work around bug where compiler
+            // may not realize the store through `ptr` through self.set_len()
+            // don't alias.
+            let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+            // Write all elements except the last one
+            for _ in 1..n {
+                ptr::write(ptr, value.next());
+                ptr = ptr.offset(1);
+                // Increment the length in every step in case next() panics
+                local_len.increment_len(1);
+            }
+
+            if n > 0 {
+                // We can write the last element directly without cloning needlessly
+                ptr::write(ptr, value.last());
+                local_len.increment_len(1);
+            }
+
+            // len set by scope guard
+        }
+    }
+}
+
+trait ExtendWith<T> {
+    fn next(&mut self) -> T;
+    fn last(self) -> T;
+}
+
+struct ExtendElement<T>(T);
+impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
+    fn next(&mut self) -> T {
+        self.0.clone()
+    }
+    fn last(self) -> T {
+        self.0
+    }
+}
+
+struct ExtendDefault;
+impl<T: Default> ExtendWith<T> for ExtendDefault {
+    fn next(&mut self) -> T {
+        Default::default()
+    }
+    fn last(self) -> T {
+        Default::default()
+    }
+}
+
+struct ExtendFunc<F>(F);
+impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
+    fn next(&mut self) -> T {
+        (self.0)()
+    }
+    fn last(mut self) -> T {
+        (self.0)()
+    }
+}
+
+impl<T> Drop for VirtualVec<T> {
+    fn drop(&mut self) {
+        unsafe {
+            // use drop for [T]
+            ptr::drop_in_place(&mut self[..]);
+        }
+        // RawVirtualVec handles deallocation
+    }
+}
+
+// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+//
+// The idea is: The length field in SetLenOnDrop is a local variable
+// that the optimizer will see does not alias with any stores through the Vec's data
+// pointer. This is a workaround for alias analysis issue #32155
+struct SetLenOnDrop<'a> {
+    len: &'a mut usize,
+    local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+    #[inline]
+    fn new(len: &'a mut usize) -> Self {
+        SetLenOnDrop {
+            local_len: *len,
+            len,
+        }
+    }
+
+    #[inline]
+    fn increment_len(&mut self, increment: usize) {
+        self.local_len += increment;
+    }
+}
+
+impl Drop for SetLenOnDrop<'_> {
+    #[inline]
+    fn drop(&mut self) {
+        *self.len = self.local_len;
+    }
+}
diff --git a/narcissus-core/src/waiter.rs b/narcissus-core/src/waiter.rs
new file mode 100644 (file)
index 0000000..ba09597
--- /dev/null
@@ -0,0 +1,34 @@
+use std::{sync::atomic::AtomicI32, time::Duration};
+
+pub fn wait(futex: &AtomicI32, expected: i32, timeout: Option<Duration>) {
+    let timespec = timeout.and_then(|d| {
+        Some(libc::timespec {
+            // Sleep forever if the timeout is longer than fits in a timespec.
+            tv_sec: d.as_secs().try_into().ok()?,
+            // This conversion never truncates, as subsec_nanos is always <1e9.
+            tv_nsec: d.subsec_nanos() as _,
+        })
+    });
+    unsafe {
+        libc::syscall(
+            libc::SYS_futex,
+            futex as *const AtomicI32,
+            libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG,
+            expected,
+            timespec
+                .as_ref()
+                .map_or(std::ptr::null(), |d| d as *const libc::timespec),
+        );
+    }
+}
+
+pub fn wake_n(futex: &AtomicI32, num_to_wake: i32) {
+    unsafe {
+        libc::syscall(
+            libc::SYS_futex,
+            futex as *const AtomicI32,
+            libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG,
+            num_to_wake,
+        );
+    }
+}
diff --git a/narcissus-gpu/Cargo.toml b/narcissus-gpu/Cargo.toml
new file mode 100644 (file)
index 0000000..41a0c97
--- /dev/null
@@ -0,0 +1,12 @@
+[package]
+name = "narcissus-gpu"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+libc = "0.2"
+narcissus-core = { path = "../narcissus-core" }
+narcissus-app = { path = "../narcissus-app" }
+vulkan-sys = { path = "../vulkan-sys" }
\ No newline at end of file
diff --git a/narcissus-gpu/src/lib.rs b/narcissus-gpu/src/lib.rs
new file mode 100644 (file)
index 0000000..27dd069
--- /dev/null
@@ -0,0 +1,280 @@
+use std::{ffi::CStr, marker::PhantomData};
+
+use narcissus_app::{App, Window};
+use narcissus_core::{flags_def, thread_token_def, Handle, PhantomUnsend};
+
+mod vulkan;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct Texture(Handle);
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct Buffer(Handle);
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct Sampler(Handle);
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct Pipeline(Handle);
+
+#[derive(Clone, Copy, Debug)]
+pub enum MemoryLocation {
+    Auto,
+    PreferHost,
+    PreferDevice,
+}
+
+#[repr(C)]
+pub struct Viewport {
+    pub x: f32,
+    pub y: f32,
+    pub width: f32,
+    pub height: f32,
+    pub min_depth: f32,
+    pub max_depth: f32,
+}
+
+#[repr(C)]
+pub struct Scissor {
+    pub x: i32,
+    pub y: i32,
+    pub width: u32,
+    pub height: u32,
+}
+
+flags_def!(ShaderStageFlags);
+impl ShaderStageFlags {
+    pub const VERTEX: Self = Self(1 << 0);
+    pub const FRAGMENT: Self = Self(1 << 1);
+    pub const COMPUTE: Self = Self(1 << 2);
+    pub const ALL: Self = Self(0b111); /* Self::VERTEX | Self::FRAGMENT | Self::COMPUTE */
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum TextureDimension {
+    Type1d,
+    Type2d,
+    Type3d,
+    TypeCube,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+#[allow(non_camel_case_types)]
+pub enum TextureFormat {
+    BGRA8_SRGB,
+    BGRA8_UNORM,
+    RGBA8_SRGB,
+    RGBA8_UNORM,
+}
+
+flags_def!(TextureUsageFlags);
+impl TextureUsageFlags {
+    pub const SAMPLED: Self = Self(1 << 0);
+    pub const STORAGE: Self = Self(1 << 1);
+    pub const DEPTH_STENCIL: Self = Self(1 << 2);
+    pub const RENDER_TARGET: Self = Self(1 << 3);
+    pub const TRANSFER_SRC: Self = Self(1 << 4);
+    pub const TRANSFER_DST: Self = Self(1 << 5);
+}
+
+flags_def!(BufferUsageFlags);
+impl BufferUsageFlags {
+    pub const UNIFORM: Self = Self(1 << 0);
+    pub const STORAGE: Self = Self(1 << 1);
+    pub const INDEX: Self = Self(1 << 2);
+    pub const TRANSFER_SRC: Self = Self(1 << 3);
+    pub const TRANSFER_DST: Self = Self(1 << 4);
+}
+
+pub struct BufferDesc {
+    pub memory_location: MemoryLocation,
+    pub usage: BufferUsageFlags,
+    pub size: usize,
+}
+
+pub struct TextureDesc {
+    pub memory_location: MemoryLocation,
+    pub usage: TextureUsageFlags,
+    pub dimension: TextureDimension,
+    pub format: TextureFormat,
+    pub width: u32,
+    pub height: u32,
+    pub depth: u32,
+    pub layers: u32,
+    pub mip_levels: u32,
+}
+
+pub struct TextureViewDesc {
+    pub texture: Texture,
+    pub dimension: TextureDimension,
+    pub format: TextureFormat,
+    pub base_mip: u32,
+    pub mip_count: u32,
+    pub base_layer: u32,
+    pub layer_count: u32,
+}
+
+pub struct ShaderDesc<'a> {
+    pub entrypoint_name: &'a CStr,
+    pub code: &'a [u8],
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum SamplerFilter {
+    Point,
+    Bilinear,
+    Trilinear,
+    Anisotropic,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum SamplerCompareOp {
+    None,
+    Less,
+    LessEq,
+    Greater,
+    GreaterEq,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum SamplerAddressMode {
+    Wrap,
+    Clamp,
+}
+
+pub struct SamplerDesc {
+    pub filter: SamplerFilter,
+    pub address_mode: SamplerAddressMode,
+    pub compare_op: SamplerCompareOp,
+    pub mip_lod_bias: f32,
+    pub min_lod: f32,
+    pub max_lod: f32,
+}
+
+pub struct GraphicsPipelineLayout<'a> {
+    pub color_attachment_formats: &'a [TextureFormat],
+    pub depth_attachment_format: Option<TextureFormat>,
+    pub stencil_attachment_format: Option<TextureFormat>,
+}
+
+pub struct GraphicsPipelineDesc<'a> {
+    pub vertex_shader: ShaderDesc<'a>,
+    pub fragment_shader: ShaderDesc<'a>,
+    pub layout: GraphicsPipelineLayout<'a>,
+}
+
+pub struct ComputePipelineDesc<'a> {
+    pub shader: ShaderDesc<'a>,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum ClearValue {
+    ColorF32([f32; 4]),
+    ColorU32([u32; 4]),
+    ColorI32([i32; 4]),
+    DepthStencil { depth: f32, stencil: u32 },
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum LoadOp {
+    Load,
+    Clear(ClearValue),
+    DontCare,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum StoreOp {
+    Store,
+    DontCare,
+}
+
+pub struct RenderingAttachment {
+    pub texture: Texture,
+    pub load_op: LoadOp,
+    pub store_op: StoreOp,
+}
+
+pub struct RenderingDesc<'a> {
+    pub x: i32,
+    pub y: i32,
+    pub width: u32,
+    pub height: u32,
+    pub color_attachments: &'a [RenderingAttachment],
+    pub depth_attachment: Option<RenderingAttachment>,
+    pub stencil_attachment: Option<RenderingAttachment>,
+}
+
+thread_token_def!(ThreadToken, GpuConcurrent, 8);
+
+pub struct FrameToken<'device> {
+    device_address: usize,
+    frame_index: usize,
+    phantom: PhantomData<&'device dyn Device>,
+}
+
+pub struct CommandBufferToken<'frame, 'thread> {
+    frame_token: &'frame FrameToken<'frame>,
+    thread_token: &'thread mut ThreadToken,
+    index: usize,
+    raw: u64,
+    phantom: PhantomUnsend,
+}
+
+pub trait Device {
+    fn create_buffer(&self, buffer_desc: &BufferDesc) -> Buffer;
+    fn create_texture(&self, texture_desc: &TextureDesc) -> Texture;
+    fn create_texture_view(&self, desc: &TextureViewDesc) -> Texture;
+    fn create_sampler(&self, desc: &SamplerDesc) -> Sampler;
+    fn create_graphics_pipeline(&self, desc: &GraphicsPipelineDesc) -> Pipeline;
+    fn create_compute_pipeline(&self, desc: &ComputePipelineDesc) -> Pipeline;
+
+    fn destroy_buffer(&self, frame_token: &FrameToken, buffer: Buffer);
+    fn destroy_texture(&self, frame_token: &FrameToken, texture: Texture);
+    fn destroy_sampler(&self, frame_token: &FrameToken, sampler: Sampler);
+    fn destroy_pipeline(&self, frame_token: &FrameToken, pipeline: Pipeline);
+
+    fn acquire_swapchain(
+        &self,
+        frame_token: &FrameToken,
+        window: Window,
+        format: TextureFormat,
+    ) -> (u32, u32, Texture);
+    fn destroy_window(&self, window: Window);
+
+    fn request_command_buffer<'frame>(
+        &'frame self,
+        frame_token: &'frame FrameToken,
+        thread_token: &'frame mut ThreadToken,
+    ) -> CommandBufferToken;
+
+    fn cmd_bind_pipeline(&self, command_buffer_token: &mut CommandBufferToken, pipeline: Pipeline);
+    fn cmd_begin_rendering(
+        &self,
+        command_buffer_token: &mut CommandBufferToken,
+        desc: &RenderingDesc,
+    );
+    fn cmd_end_rendering(&self, command_buffer_token: &mut CommandBufferToken);
+    fn cmd_set_viewports(
+        &self,
+        command_buffer_token: &mut CommandBufferToken,
+        viewports: &[Viewport],
+    );
+    fn cmd_set_scissors(&self, command_buffer_token: &mut CommandBufferToken, scissors: &[Scissor]);
+    fn cmd_draw(
+        &self,
+        command_buffer_token: &mut CommandBufferToken,
+        vertex_count: u32,
+        instance_count: u32,
+        first_vertex: u32,
+        first_instance: u32,
+    );
+
+    fn submit(&self, command_buffer_token: CommandBufferToken);
+
+    fn begin_frame<'device>(&'device self) -> FrameToken<'device>;
+    fn end_frame<'device>(&'device self, frame_token: FrameToken<'device>);
+}
+
+pub fn create_vulkan_device<'app>(app: &'app dyn App) -> Box<dyn Device + 'app> {
+    Box::new(vulkan::VulkanDevice::new(app))
+}
diff --git a/narcissus-gpu/src/vulkan.rs b/narcissus-gpu/src/vulkan.rs
new file mode 100644 (file)
index 0000000..c77f098
--- /dev/null
@@ -0,0 +1,2237 @@
+use std::{
+    cell::UnsafeCell,
+    collections::{HashMap, VecDeque},
+    marker::PhantomData,
+    ops::DerefMut,
+    os::raw::{c_char, c_void},
+    sync::atomic::{AtomicU64, AtomicUsize, Ordering},
+};
+
+use narcissus_app::{App, Window};
+use narcissus_core::{
+    cstr, default, make_array, manual_arc, manual_arc::ManualArc, Mutex, PhantomUnsend, Pool,
+};
+
+use vk::{DeviceFunctions, SurfaceKHRFunctions, SwapchainKHRFunctions};
+use vulkan_sys as vk;
+
+use crate::{
+    Buffer, BufferDesc, BufferUsageFlags, ClearValue, CommandBufferToken, ComputePipelineDesc,
+    Device, FrameToken, GpuConcurrent, GraphicsPipelineDesc, LoadOp, MemoryLocation, Pipeline,
+    Sampler, SamplerAddressMode, SamplerCompareOp, SamplerDesc, SamplerFilter, Texture,
+    TextureDesc, TextureDimension, TextureFormat, TextureUsageFlags, TextureViewDesc, ThreadToken,
+};
+
+const NUM_FRAMES: usize = 2;
+
+macro_rules! vk_check {
+    ($e:expr) => ({
+        #[allow(unused_unsafe)]
+        let e = unsafe { $e };
+        if e != vulkan_sys::Result::Success {
+            panic!("assertion failed: `result == vk::Result::Success`: \n value: `{:?}`", e);
+        }
+    });
+    ($e:expr, $($msg_args:tt)+) => ({
+        #[allow(unused_unsafe)]
+        let e = unsafe { $e };
+        if e != vulkan_sys::::Result::Success {
+            panic!("assertion failed: `result == vk::Result::Success`: \n value: `{:?}: {}`", e, format_args!($($msg_args)+));
+        }
+    })
+}
+
+#[must_use]
+fn vk_vec<T, F: FnMut(&mut u32, *mut T) -> vulkan_sys::Result>(mut f: F) -> Vec<T> {
+    let mut count = 0;
+    vk_check!(f(&mut count, std::ptr::null_mut()));
+    let mut v = Vec::with_capacity(count as usize);
+    vk_check!(f(&mut count, v.as_mut_ptr()));
+    unsafe { v.set_len(count as usize) };
+    v
+}
+
+#[must_use]
+fn vulkan_format(format: TextureFormat) -> vk::Format {
+    match format {
+        TextureFormat::RGBA8_SRGB => vk::Format::R8G8B8A8_SRGB,
+        TextureFormat::RGBA8_UNORM => vk::Format::R8G8B8A8_UNORM,
+        TextureFormat::BGRA8_SRGB => vk::Format::B8G8R8A8_SRGB,
+        TextureFormat::BGRA8_UNORM => vk::Format::B8G8R8A8_UNORM,
+    }
+}
+
+#[must_use]
+fn vulkan_aspect(format: TextureFormat) -> vk::ImageAspectFlags {
+    match format {
+        TextureFormat::BGRA8_SRGB
+        | TextureFormat::BGRA8_UNORM
+        | TextureFormat::RGBA8_SRGB
+        | TextureFormat::RGBA8_UNORM => vk::ImageAspectFlags::COLOR,
+    }
+}
+
+#[must_use]
+fn vulkan_clear_value(clear_value: ClearValue) -> vk::ClearValue {
+    match clear_value {
+        ClearValue::ColorF32(value) => vk::ClearValue {
+            color: vk::ClearColorValue { f32: value },
+        },
+        ClearValue::ColorU32(value) => vk::ClearValue {
+            color: vk::ClearColorValue { u32: value },
+        },
+        ClearValue::ColorI32(value) => vk::ClearValue {
+            color: vk::ClearColorValue { i32: value },
+        },
+        ClearValue::DepthStencil { depth, stencil } => vk::ClearValue {
+            depth_stencil: vk::ClearDepthStencilValue { depth, stencil },
+        },
+    }
+}
+
+struct DelayQueue<T> {
+    delay: u64,
+    counter: u64,
+    values: VecDeque<(u64, T)>,
+}
+
+impl<T> DelayQueue<T> {
+    fn new(delay: u64) -> Self {
+        Self {
+            delay,
+            counter: 0,
+            values: VecDeque::new(),
+        }
+    }
+
+    fn push(&mut self, value: T) {
+        self.values.push_back((self.counter + self.delay, value))
+    }
+
+    fn expire<F: FnMut(T)>(&mut self, mut f: F) {
+        self.counter += 1;
+
+        let to_remove = self
+            .values
+            .iter()
+            .take_while(|(expiry, _)| *expiry == self.counter)
+            .count();
+
+        for _ in 0..to_remove {
+            f(self.values.pop_front().unwrap().1);
+        }
+    }
+
+    pub fn drain<R>(&mut self, range: R) -> std::collections::vec_deque::Drain<'_, (u64, T)>
+    where
+        R: std::ops::RangeBounds<usize>,
+    {
+        self.values.drain(range)
+    }
+}
+
+struct VulkanBuffer {
+    memory: VulkanMemory,
+    buffer: vk::Buffer,
+}
+
+#[derive(Clone)]
+struct VulkanTexture {
+    memory: VulkanMemory,
+    image: vk::Image,
+}
+
+struct VulkanTextureUnique {
+    texture: VulkanTexture,
+    view: vk::ImageView,
+}
+
+struct VulkanTextureShared {
+    texture: ManualArc<VulkanTexture>,
+    view: vk::ImageView,
+}
+
+struct VulkanTextureSwapchain {
+    window: Window,
+    image: vk::Image,
+    view: vk::ImageView,
+}
+
+enum VulkanTextureHolder {
+    Unique(VulkanTextureUnique),
+    Shared(VulkanTextureShared),
+    Swapchain(VulkanTextureSwapchain),
+}
+
+struct VulkanSampler(vk::Sampler);
+
+struct VulkanPipeline {
+    pipeline: vk::Pipeline,
+    pipeline_bind_point: vk::PipelineBindPoint,
+}
+
+enum VulkanSwapchainState {
+    Vacant,
+    Occupied {
+        width: u32,
+        height: u32,
+        suboptimal: bool,
+        swapchain: vk::SwapchainKHR,
+        image_views: Box<[Texture]>,
+    },
+}
+
+struct VulkanSwapchain {
+    window: Window,
+    surface: vk::SurfaceKHR,
+    surface_format: vk::SurfaceFormatKHR,
+
+    state: VulkanSwapchainState,
+
+    _formats: Box<[vk::SurfaceFormatKHR]>,
+    _present_modes: Box<[vk::PresentModeKHR]>,
+    capabilities: vk::SurfaceCapabilitiesKHR,
+}
+
+#[derive(Default)]
+struct VulkanPresentInfo {
+    acquire: vk::Semaphore,
+    release: vk::Semaphore,
+    swapchain: vk::SwapchainKHR,
+    image_index: u32,
+}
+
+struct VulkanMemoryDesc {
+    requirements: vk::MemoryRequirements,
+    memory_location: MemoryLocation,
+    _linear: bool,
+}
+
+#[derive(Clone)]
+struct VulkanMemory {
+    memory: vk::DeviceMemory,
+    offset: u64,
+    _size: u64,
+}
+
+struct VulkanPools {
+    textures: Pool<VulkanTextureHolder>,
+    buffers: Pool<VulkanBuffer>,
+    samplers: Pool<VulkanSampler>,
+    pipelines: Pool<VulkanPipeline>,
+}
+
+struct VulkanCommandBuffer {
+    command_buffer: vk::CommandBuffer,
+    swapchains_touched: HashMap<Window, (vk::Image, vk::PipelineStageFlags2)>,
+}
+
+struct VulkanCommandBufferPool {
+    command_pool: vk::CommandPool,
+    next_free_index: usize,
+    command_buffers: Vec<VulkanCommandBuffer>,
+}
+
+impl<'device> FrameToken<'device> {
+    fn check_device(&self, device: &VulkanDevice) {
+        let device_address = device as *const _ as usize;
+        assert_eq!(
+            self.device_address, device_address,
+            "frame token device mismatch"
+        )
+    }
+
+    fn check_frame_counter(&self, frame_counter_value: usize) {
+        assert!(frame_counter_value & 1 == 0, "frame counter isn't acquired");
+        assert_eq!(
+            self.frame_index,
+            frame_counter_value >> 1,
+            "token does not match current frame"
+        );
+    }
+}
+
+struct FrameCounter {
+    value: AtomicUsize,
+}
+
+impl FrameCounter {
+    fn new() -> Self {
+        Self {
+            // Start the frame id at 1 so that the first `begin_frame` ticks us over to a new frame index.
+            value: AtomicUsize::new(1),
+        }
+    }
+
+    fn load(&self) -> usize {
+        self.value.load(Ordering::Relaxed)
+    }
+
+    fn acquire(&self, device: &VulkanDevice) -> FrameToken {
+        let old_frame_counter = self.value.fetch_add(1, Ordering::SeqCst);
+        assert!(
+            old_frame_counter & 1 == 1,
+            "acquiring a frame token before previous frame token has been released"
+        );
+
+        let frame_counter = old_frame_counter + 1;
+        let frame_index = frame_counter >> 1;
+
+        FrameToken {
+            device_address: device as *const _ as usize,
+            frame_index,
+            phantom: PhantomData,
+        }
+    }
+
+    fn release(&self, frame_token: FrameToken) {
+        let old_frame_counter = self.value.fetch_add(1, Ordering::SeqCst);
+        frame_token.check_frame_counter(old_frame_counter);
+    }
+}
+
+struct VulkanFrame {
+    universal_queue_fence: AtomicU64,
+
+    command_buffer_pools: GpuConcurrent<VulkanCommandBufferPool>,
+
+    present_swapchains: Mutex<HashMap<Window, VulkanPresentInfo>>,
+
+    destroyed_allocations: Mutex<VecDeque<VulkanMemory>>,
+    destroyed_buffers: Mutex<VecDeque<vk::Buffer>>,
+    destroyed_buffer_views: Mutex<VecDeque<vk::BufferView>>,
+    destroyed_images: Mutex<VecDeque<vk::Image>>,
+    destroyed_image_views: Mutex<VecDeque<vk::ImageView>>,
+    destroyed_samplers: Mutex<VecDeque<vk::Sampler>>,
+    destroyed_pipelines: Mutex<VecDeque<vk::Pipeline>>,
+
+    recycled_semaphores: Mutex<VecDeque<vk::Semaphore>>,
+}
+
+pub(crate) struct VulkanDevice<'app> {
+    app: &'app dyn App,
+
+    instance: vk::Instance,
+    physical_device: vk::PhysicalDevice,
+    physical_device_memory_properties: Box<vk::PhysicalDeviceMemoryProperties>,
+    device: vk::Device,
+
+    universal_queue: vk::Queue,
+    universal_queue_fence: AtomicU64,
+    universal_queue_semaphore: vk::Semaphore,
+    universal_queue_family_index: u32,
+
+    frame_counter: FrameCounter,
+    frames: Box<[UnsafeCell<VulkanFrame>; NUM_FRAMES]>,
+
+    swapchains: Mutex<HashMap<Window, VulkanSwapchain>>,
+    destroyed_swapchains: Mutex<
+        DelayQueue<(
+            Window,
+            vk::SwapchainKHR,
+            vk::SurfaceKHR,
+            Box<[vk::ImageView]>,
+        )>,
+    >,
+
+    pools: Mutex<VulkanPools>,
+    semaphores: Mutex<VecDeque<vk::Semaphore>>,
+
+    _global_fn: vk::GlobalFunctions,
+    instance_fn: vk::InstanceFunctions,
+    surface_fn: vk::SurfaceKHRFunctions,
+    swapchain_fn: vk::SwapchainKHRFunctions,
+    device_fn: vk::DeviceFunctions,
+}
+
+impl<'app> VulkanDevice<'app> {
+    pub(crate) fn new(app: &'app dyn App) -> Self {
+        let get_proc_addr = app.vk_get_loader();
+        let global_fn = unsafe { vk::GlobalFunctions::new(get_proc_addr) };
+
+        let api_version = {
+            let mut api_version = 0;
+            vk_check!(global_fn.enumerate_instance_version(&mut api_version));
+            api_version
+        };
+
+        if api_version < vk::VERSION_1_2 {
+            panic!("instance does not support vulkan 1.2")
+        }
+
+        #[cfg(debug_assertions)]
+        let enabled_layers = &[cstr!("VK_LAYER_KHRONOS_validation").as_ptr()];
+        #[cfg(not(debug_assertions))]
+        let enabled_layers = &[];
+
+        let enabled_extensions = app.vk_instance_extensions();
+        let enabled_extensions = enabled_extensions
+            .iter()
+            .map(|x| x.as_ptr())
+            .collect::<Vec<*const c_char>>();
+
+        let instance = {
+            let application_info = vk::ApplicationInfo {
+                application_name: cstr!("TRIANGLE").as_ptr(),
+                application_version: 0,
+                engine_name: cstr!("NARCISSUS").as_ptr(),
+                engine_version: 0,
+                api_version: vk::VERSION_1_3,
+                ..Default::default()
+            };
+            let create_info = vk::InstanceCreateInfo {
+                enabled_layers: enabled_layers.into(),
+                enabled_extension_names: enabled_extensions.as_slice().into(),
+                application_info: Some(&application_info),
+                ..Default::default()
+            };
+            let mut instance = vk::Instance::null();
+            vk_check!(global_fn.create_instance(&create_info, None, &mut instance));
+            instance
+        };
+
+        let instance_fn = vk::InstanceFunctions::new(&global_fn, instance, vk::VERSION_1_2);
+        let surface_fn = vk::SurfaceKHRFunctions::new(&global_fn, instance);
+        let swapchain_fn = vk::SwapchainKHRFunctions::new(&global_fn, instance, vk::VERSION_1_1);
+
+        let physical_devices = vk_vec(|count, ptr| unsafe {
+            instance_fn.enumerate_physical_devices(instance, count, ptr)
+        });
+
+        let physical_device = physical_devices
+            .iter()
+            .copied()
+            .find(|&physical_device| {
+                let (
+                    physical_device_properties,
+                    _physical_device_properties_11,
+                    _physical_device_properties_12,
+                    _physical_device_properties_13,
+                ) = {
+                    let mut properties_13 = vk::PhysicalDeviceVulkan13Properties::default();
+                    let mut properties_12 = vk::PhysicalDeviceVulkan12Properties::default();
+                    let mut properties_11 = vk::PhysicalDeviceVulkan11Properties::default();
+                    let mut properties = vk::PhysicalDeviceProperties2::default();
+                    unsafe {
+                        properties._next =
+                            std::mem::transmute::<_, *mut c_void>(&mut properties_11);
+                        properties_11._next =
+                            std::mem::transmute::<_, *mut c_void>(&mut properties_12);
+                        properties_12._next =
+                            std::mem::transmute::<_, *mut c_void>(&mut properties_13);
+                        instance_fn
+                            .get_physical_device_properties2(physical_device, &mut properties);
+                    }
+                    (properties, properties_11, properties_12, properties_13)
+                };
+
+                let (
+                    _physical_device_features,
+                    _physical_device_features_11,
+                    physical_device_features_12,
+                    physical_device_features_13,
+                ) = {
+                    let mut features_13 = vk::PhysicalDeviceVulkan13Features::default();
+                    let mut features_12 = vk::PhysicalDeviceVulkan12Features::default();
+                    let mut features_11 = vk::PhysicalDeviceVulkan11Features::default();
+                    let mut features = vk::PhysicalDeviceFeatures2::default();
+                    unsafe {
+                        features._next = std::mem::transmute::<_, *mut c_void>(&mut features_11);
+                        features_11._next = std::mem::transmute::<_, *mut c_void>(&mut features_12);
+                        features_12._next = std::mem::transmute::<_, *mut c_void>(&mut features_13);
+                        instance_fn.get_physical_device_features2(physical_device, &mut features);
+                    }
+                    (features.features, features_11, features_12, features_13)
+                };
+
+                physical_device_properties.properties.api_version >= vk::VERSION_1_3
+                    && physical_device_features_13.dynamic_rendering == vk::Bool32::True
+                    && physical_device_features_12.timeline_semaphore == vk::Bool32::True
+                    && physical_device_features_12.descriptor_indexing == vk::Bool32::True
+                    && physical_device_features_12.descriptor_binding_partially_bound
+                        == vk::Bool32::True
+                    && physical_device_features_12.draw_indirect_count == vk::Bool32::True
+            })
+            .expect("no supported physical devices reported");
+
+        let physical_device_memory_properties = unsafe {
+            let mut memory_properties = vk::PhysicalDeviceMemoryProperties::default();
+            instance_fn
+                .get_physical_device_memory_properties(physical_device, &mut memory_properties);
+            memory_properties
+        };
+
+        let queue_family_properties = vk_vec(|count, ptr| unsafe {
+            instance_fn.get_physical_device_queue_family_properties(physical_device, count, ptr);
+            vk::Result::Success
+        });
+
+        let (queue_family_index, _) = (0..)
+            .zip(queue_family_properties.iter())
+            .find(|&(_, queue_family_properties)| {
+                queue_family_properties
+                    .queue_flags
+                    .contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE)
+            })
+            .expect("failed to find universal queue for chosen device");
+
+        let device = unsafe {
+            let queue_priorities: &[_] = &[1.0];
+            let device_queue_create_infos: &[_] = &[vk::DeviceQueueCreateInfo {
+                queue_family_index,
+                queue_priorities: queue_priorities.into(),
+                ..Default::default()
+            }];
+            let enabled_extensions = vec![cstr!("VK_KHR_swapchain")];
+            let enabled_extensions = enabled_extensions
+                .iter()
+                .map(|x| x.as_ptr())
+                .collect::<Vec<*const c_char>>();
+            let enabled_features_13 = vk::PhysicalDeviceVulkan13Features {
+                dynamic_rendering: vk::Bool32::True,
+                synchronization2: vk::Bool32::True,
+                ..Default::default()
+            };
+            let enabled_features_12 = vk::PhysicalDeviceVulkan12Features {
+                _next: std::mem::transmute::<_, *mut c_void>(&enabled_features_13),
+                timeline_semaphore: vk::Bool32::True,
+                descriptor_indexing: vk::Bool32::True,
+                descriptor_binding_partially_bound: vk::Bool32::True,
+                draw_indirect_count: vk::Bool32::True,
+                ..Default::default()
+            };
+            let enabled_features_11 = vk::PhysicalDeviceVulkan11Features {
+                _next: std::mem::transmute::<_, *mut c_void>(&enabled_features_12),
+                ..Default::default()
+            };
+            let enabled_features = vk::PhysicalDeviceFeatures2 {
+                _next: std::mem::transmute::<_, *mut c_void>(&enabled_features_11),
+                ..Default::default()
+            };
+            let create_info = vk::DeviceCreateInfo {
+                _next: std::mem::transmute::<_, *mut c_void>(&enabled_features),
+                enabled_extension_names: enabled_extensions.as_slice().into(),
+                queue_create_infos: device_queue_create_infos.into(),
+                ..Default::default()
+            };
+            let mut device = vk::Device::null();
+            vk_check!(instance_fn.create_device(physical_device, &create_info, None, &mut device));
+            device
+        };
+
+        let device_fn = vk::DeviceFunctions::new(&instance_fn, device, vk::VERSION_1_3);
+
+        let universal_queue = unsafe {
+            let mut queue = vk::Queue::default();
+            device_fn.get_device_queue(device, queue_family_index, 0, &mut queue);
+            queue
+        };
+
+        let universal_queue_fence = 0;
+
+        let universal_queue_semaphore = {
+            let type_create_info = vk::SemaphoreTypeCreateInfo {
+                semaphore_type: vk::SemaphoreType::Timeline,
+                initial_value: universal_queue_fence,
+                ..Default::default()
+            };
+            let create_info = vk::SemaphoreCreateInfo {
+                _next: unsafe { std::mem::transmute::<_, _>(&type_create_info) },
+                ..Default::default()
+            };
+            let mut semaphore = vk::Semaphore::null();
+            vk_check!(device_fn.create_semaphore(device, &create_info, None, &mut semaphore));
+            semaphore
+        };
+
+        let frames = Box::new(make_array(|| {
+            let cmd_buffer_pools = GpuConcurrent::new(|| {
+                let pool = {
+                    let create_info = vk::CommandPoolCreateInfo {
+                        flags: vk::CommandPoolCreateFlags::TRANSIENT,
+                        queue_family_index,
+                        ..default()
+                    };
+                    let mut pool = vk::CommandPool::null();
+                    vk_check!(device_fn.create_command_pool(device, &create_info, None, &mut pool));
+                    pool
+                };
+                VulkanCommandBufferPool {
+                    command_pool: pool,
+                    command_buffers: Vec::new(),
+                    next_free_index: 0,
+                }
+            });
+            UnsafeCell::new(VulkanFrame {
+                command_buffer_pools: cmd_buffer_pools,
+                universal_queue_fence: AtomicU64::new(universal_queue_fence),
+                present_swapchains: Mutex::new(HashMap::new()),
+                destroyed_buffers: Mutex::new(VecDeque::new()),
+                destroyed_buffer_views: Mutex::new(VecDeque::new()),
+                destroyed_images: Mutex::new(VecDeque::new()),
+                destroyed_image_views: Mutex::new(VecDeque::new()),
+                destroyed_samplers: Mutex::new(VecDeque::new()),
+                destroyed_allocations: Mutex::new(VecDeque::new()),
+                destroyed_pipelines: Mutex::new(VecDeque::new()),
+                recycled_semaphores: Mutex::new(VecDeque::new()),
+            })
+        }));
+
+        Self {
+            app,
+
+            instance,
+            physical_device,
+            physical_device_memory_properties: Box::new(physical_device_memory_properties),
+            device,
+
+            universal_queue,
+            universal_queue_fence: AtomicU64::new(universal_queue_fence),
+            universal_queue_semaphore,
+            universal_queue_family_index: queue_family_index,
+
+            frame_counter: FrameCounter::new(),
+            frames,
+
+            swapchains: Mutex::new(HashMap::new()),
+            destroyed_swapchains: Mutex::new(DelayQueue::new(8)),
+
+            pools: Mutex::new(VulkanPools {
+                textures: Pool::new(),
+                buffers: Pool::new(),
+                samplers: Pool::new(),
+                pipelines: Pool::new(),
+            }),
+
+            semaphores: Mutex::new(VecDeque::new()),
+
+            _global_fn: global_fn,
+            instance_fn,
+            surface_fn,
+            swapchain_fn,
+            device_fn,
+        }
+    }
+
+    fn frame<'token>(&self, frame_token: &'token FrameToken) -> &'token VulkanFrame {
+        frame_token.check_device(self);
+        frame_token.check_frame_counter(self.frame_counter.load());
+        // SAFETY: reference is bound to the frame token exposed by the API. only one frame token can be valid at a time.
+        // The returned frame is only valid so long as we have a ref on the token.
+        unsafe { &*self.frames[frame_token.frame_index % NUM_FRAMES].get() }
+    }
+
+    fn frame_mut<'token>(
+        &'token self,
+        frame_token: &'token mut FrameToken,
+    ) -> &'token mut VulkanFrame {
+        frame_token.check_device(self);
+        frame_token.check_frame_counter(self.frame_counter.load());
+        // SAFETY: mutable reference is bound to the frame token exposed by the API. only one frame token can be valid at a time.
+        // The returned frame is only valid so long as we have a mut ref on the token.
+        unsafe { &mut *self.frames[frame_token.frame_index % NUM_FRAMES].get() }
+    }
+
+    fn command_buffer_mut<'token>(
+        &'token self,
+        command_buffer_token: &'token mut CommandBufferToken,
+    ) -> &'token mut VulkanCommandBuffer {
+        let frame = self.frame(command_buffer_token.frame_token);
+        let command_buffer_pool = frame
+            .command_buffer_pools
+            .get_mut(command_buffer_token.thread_token);
+        &mut command_buffer_pool.command_buffers[command_buffer_token.index]
+    }
+
+    fn find_memory_type_index(&self, filter: u32, flags: vk::MemoryPropertyFlags) -> u32 {
+        (0..self.physical_device_memory_properties.memory_type_count)
+            .map(|memory_type_index| {
+                (
+                    memory_type_index,
+                    self.physical_device_memory_properties.memory_types[memory_type_index as usize],
+                )
+            })
+            .find(|(i, memory_type)| {
+                (filter & (1 << i)) != 0 && memory_type.property_flags.contains(flags)
+            })
+            .expect("could not find memory type matching flags")
+            .0
+    }
+
+    fn allocate_memory(&self, desc: &VulkanMemoryDesc) -> VulkanMemory {
+        let memory_property_flags = match desc.memory_location {
+            MemoryLocation::Auto => vk::MemoryPropertyFlags::default(),
+            MemoryLocation::PreferHost => vk::MemoryPropertyFlags::HOST_VISIBLE,
+            MemoryLocation::PreferDevice => vk::MemoryPropertyFlags::DEVICE_LOCAL,
+        };
+
+        let memory_type_index =
+            self.find_memory_type_index(desc.requirements.memory_type_bits, memory_property_flags);
+        let allocate_info = vk::MemoryAllocateInfo {
+            allocation_size: desc.requirements.size,
+            memory_type_index,
+            ..default()
+        };
+        let mut memory = vk::DeviceMemory::null();
+        vk_check!(self
+            .device_fn
+            .allocate_memory(self.device, &allocate_info, None, &mut memory));
+
+        VulkanMemory {
+            memory,
+            offset: 0,
+            _size: desc.requirements.size,
+        }
+    }
+
+    fn allocate_memory_for_buffer(
+        &self,
+        buffer: vk::Buffer,
+        memory_location: MemoryLocation,
+    ) -> VulkanMemory {
+        let info = vk::BufferMemoryRequirementsInfo2 {
+            buffer,
+            ..default()
+        };
+        let mut memory_requirements = vk::MemoryRequirements2::default();
+        self.device_fn.get_buffer_memory_requirements2(
+            self.device,
+            &info,
+            &mut memory_requirements,
+        );
+
+        self.allocate_memory(&VulkanMemoryDesc {
+            requirements: memory_requirements.memory_requirements,
+            memory_location,
+            _linear: true,
+        })
+    }
+
+    fn allocate_memory_for_image(
+        &self,
+        image: vk::Image,
+        memory_location: MemoryLocation,
+    ) -> VulkanMemory {
+        let info = vk::ImageMemoryRequirementsInfo2 { image, ..default() };
+        let mut memory_requirements = vk::MemoryRequirements2::default();
+        self.device_fn
+            .get_image_memory_requirements2(self.device, &info, &mut memory_requirements);
+
+        self.allocate_memory(&VulkanMemoryDesc {
+            requirements: memory_requirements.memory_requirements,
+            memory_location,
+            _linear: true,
+        })
+    }
+
+    fn request_semaphore(&self) -> vk::Semaphore {
+        if let Some(semaphore) = self.semaphores.lock().pop_front() {
+            semaphore
+        } else {
+            let mut semaphore = vk::Semaphore::null();
+            let create_info = vk::SemaphoreCreateInfo::default();
+            vk_check!(self.device_fn.create_semaphore(
+                self.device,
+                &create_info,
+                None,
+                &mut semaphore
+            ));
+            semaphore
+        }
+    }
+
+    fn recycle_semaphore(&self, frame: &VulkanFrame, semaphore: vk::Semaphore) {
+        frame.recycled_semaphores.lock().push_back(semaphore)
+    }
+
+    fn request_transient_semaphore(&self, frame: &VulkanFrame) -> vk::Semaphore {
+        let semaphore = self.request_semaphore();
+        self.recycle_semaphore(frame, semaphore);
+        semaphore
+    }
+
+    fn destroy_deferred(device_fn: &DeviceFunctions, device: vk::Device, frame: &mut VulkanFrame) {
+        for pipeline in frame.destroyed_pipelines.get_mut().drain(..) {
+            unsafe { device_fn.destroy_pipeline(device, pipeline, None) }
+        }
+        for sampler in frame.destroyed_samplers.get_mut().drain(..) {
+            unsafe { device_fn.destroy_sampler(device, sampler, None) }
+        }
+        for image_view in frame.destroyed_image_views.get_mut().drain(..) {
+            unsafe { device_fn.destroy_image_view(device, image_view, None) }
+        }
+        for image in frame.destroyed_images.get_mut().drain(..) {
+            unsafe { device_fn.destroy_image(device, image, None) }
+        }
+        for buffer_view in frame.destroyed_buffer_views.get_mut().drain(..) {
+            unsafe { device_fn.destroy_buffer_view(device, buffer_view, None) }
+        }
+        for buffer in frame.destroyed_buffers.get_mut().drain(..) {
+            unsafe { device_fn.destroy_buffer(device, buffer, None) }
+        }
+        for memory in frame.destroyed_allocations.get_mut().drain(..) {
+            unsafe { device_fn.free_memory(device, memory.memory, None) };
+        }
+    }
+
+    fn destroy_swapchain(
+        app: &dyn App,
+        device_fn: &DeviceFunctions,
+        swapchain_fn: &SwapchainKHRFunctions,
+        surface_fn: &SurfaceKHRFunctions,
+        instance: vk::Instance,
+        device: vk::Device,
+        window: Window,
+        surface: vk::SurfaceKHR,
+        swapchain: vk::SwapchainKHR,
+        image_views: &[vk::ImageView],
+    ) {
+        if !image_views.is_empty() {
+            for &image_view in image_views {
+                unsafe { device_fn.destroy_image_view(device, image_view, None) }
+            }
+        }
+        if !swapchain.is_null() {
+            unsafe { swapchain_fn.destroy_swapchain(device, swapchain, None) }
+        }
+        if !surface.is_null() {
+            unsafe { surface_fn.destroy_surface(instance, surface, None) }
+        }
+        if !window.is_null() {
+            app.destroy_window(window);
+        }
+    }
+}
+
+impl<'driver> Device for VulkanDevice<'driver> {
+    fn create_buffer(&self, desc: &BufferDesc) -> Buffer {
+        let mut usage = vk::BufferUsageFlags::default();
+        if desc.usage.contains(BufferUsageFlags::UNIFORM) {
+            usage |= vk::BufferUsageFlags::UNIFORM_BUFFER;
+        }
+        if desc.usage.contains(BufferUsageFlags::STORAGE) {
+            usage |= vk::BufferUsageFlags::STORAGE_BUFFER;
+        }
+        if desc.usage.contains(BufferUsageFlags::INDEX) {
+            usage |= vk::BufferUsageFlags::INDEX_BUFFER;
+        }
+        if desc.usage.contains(BufferUsageFlags::TRANSFER_SRC) {
+            usage |= vk::BufferUsageFlags::TRANSFER_SRC;
+        }
+        if desc.usage.contains(BufferUsageFlags::TRANSFER_DST) {
+            usage |= vk::BufferUsageFlags::TRANSFER_DST;
+        }
+
+        let queue_family_indices = &[self.universal_queue_family_index];
+
+        let create_info = vk::BufferCreateInfo {
+            size: desc.size as u64,
+            usage,
+            queue_family_indices: queue_family_indices.into(),
+            sharing_mode: vk::SharingMode::Exclusive,
+            ..default()
+        };
+        let mut buffer = vk::Buffer::null();
+        vk_check!(self
+            .device_fn
+            .create_buffer(self.device, &create_info, None, &mut buffer));
+
+        let memory = self.allocate_memory_for_buffer(buffer, desc.memory_location);
+
+        unsafe {
+            self.device_fn.bind_buffer_memory2(
+                self.device,
+                &[vk::BindBufferMemoryInfo {
+                    buffer,
+                    memory: memory.memory,
+                    offset: memory.offset,
+                    ..default()
+                }],
+            )
+        };
+
+        let handle = self
+            .pools
+            .lock()
+            .buffers
+            .insert(VulkanBuffer { memory, buffer });
+
+        Buffer(handle)
+    }
+
+    fn create_texture(&self, desc: &TextureDesc) -> Texture {
+        debug_assert_ne!(desc.layers, 0, "layers must be at least one");
+        debug_assert_ne!(desc.width, 0, "width must be at least one");
+        debug_assert_ne!(desc.height, 0, "height must be at least one");
+        debug_assert_ne!(desc.depth, 0, "depth must be at least one");
+
+        if desc.dimension == TextureDimension::Type3d {
+            debug_assert_eq!(desc.layers, 1, "3d image arrays are illegal");
+        }
+
+        if desc.dimension == TextureDimension::TypeCube {
+            debug_assert!(desc.layers % 6 == 0, "cubemaps must have 6 layers each");
+            debug_assert_eq!(desc.depth, 1, "cubemap faces must be 2d");
+        }
+
+        let mut flags = vk::ImageCreateFlags::default();
+        if desc.dimension == TextureDimension::TypeCube {
+            flags |= vk::ImageCreateFlags::CUBE_COMPATIBLE
+        }
+
+        let image_type = match desc.dimension {
+            TextureDimension::Type1d => vk::ImageType::Type1d,
+            TextureDimension::Type2d => vk::ImageType::Type2d,
+            TextureDimension::Type3d => vk::ImageType::Type3d,
+            TextureDimension::TypeCube => vk::ImageType::Type2d,
+        };
+        let format = vulkan_format(desc.format);
+        let extent = vk::Extent3d {
+            width: desc.width,
+            height: desc.height,
+            depth: desc.depth,
+        };
+
+        let mut usage = default();
+        if desc.usage.contains(TextureUsageFlags::SAMPLED) {
+            usage |= vk::ImageUsageFlags::SAMPLED;
+        }
+        if desc.usage.contains(TextureUsageFlags::STORAGE) {
+            usage |= vk::ImageUsageFlags::STORAGE;
+        }
+        if desc.usage.contains(TextureUsageFlags::DEPTH_STENCIL) {
+            usage |= vk::ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT;
+        }
+        if desc.usage.contains(TextureUsageFlags::RENDER_TARGET) {
+            usage |= vk::ImageUsageFlags::COLOR_ATTACHMENT;
+        }
+        if desc.usage.contains(TextureUsageFlags::TRANSFER_DST) {
+            usage |= vk::ImageUsageFlags::TRANSFER_DST;
+        }
+        if desc.usage.contains(TextureUsageFlags::TRANSFER_SRC) {
+            usage |= vk::ImageUsageFlags::TRANSFER_SRC;
+        }
+
+        let queue_family_indices = &[self.universal_queue_family_index];
+        let create_info = vk::ImageCreateInfo {
+            flags,
+            image_type,
+            format,
+            extent,
+            mip_levels: desc.mip_levels,
+            array_layers: desc.layers,
+            samples: vk::SampleCountFlags::SAMPLE_COUNT_1,
+            tiling: vk::ImageTiling::OPTIMAL,
+            usage,
+            sharing_mode: vk::SharingMode::Exclusive,
+            queue_family_indices: queue_family_indices.into(),
+            initial_layout: vk::ImageLayout::Undefined,
+            ..default()
+        };
+
+        let mut image = vk::Image::null();
+        vk_check!(self
+            .device_fn
+            .create_image(self.device, &create_info, None, &mut image));
+
+        let memory = self.allocate_memory_for_image(image, desc.memory_location);
+
+        unsafe {
+            self.device_fn.bind_image_memory2(
+                self.device,
+                &[vk::BindImageMemoryInfo {
+                    image,
+                    memory: memory.memory,
+                    offset: memory.offset,
+                    ..default()
+                }],
+            )
+        };
+
+        let view_type = match (desc.layers, desc.dimension) {
+            (1, TextureDimension::Type1d) => vk::ImageViewType::Type1d,
+            (1, TextureDimension::Type2d) => vk::ImageViewType::Type2d,
+            (1, TextureDimension::Type3d) => vk::ImageViewType::Type3d,
+            (6, TextureDimension::TypeCube) => vk::ImageViewType::TypeCube,
+            (_, TextureDimension::Type1d) => vk::ImageViewType::Type1dArray,
+            (_, TextureDimension::Type2d) => vk::ImageViewType::Type2dArray,
+            (_, TextureDimension::TypeCube) => vk::ImageViewType::TypeCubeArray,
+            _ => panic!("unsupported view type"),
+        };
+
+        let aspect_mask = vulkan_aspect(desc.format);
+        let create_info = vk::ImageViewCreateInfo {
+            image,
+            view_type,
+            format,
+            subresource_range: vk::ImageSubresourceRange {
+                aspect_mask,
+                base_mip_level: 0,
+                level_count: desc.mip_levels,
+                base_array_layer: 0,
+                layer_count: desc.layers,
+            },
+            ..default()
+        };
+
+        let mut view = vk::ImageView::null();
+        vk_check!(self
+            .device_fn
+            .create_image_view(self.device, &create_info, None, &mut view));
+
+        let texture = VulkanTextureUnique {
+            texture: VulkanTexture { image, memory },
+            view,
+        };
+
+        let handle = self
+            .pools
+            .lock()
+            .textures
+            .insert(VulkanTextureHolder::Unique(texture));
+
+        Texture(handle)
+    }
+
+    fn create_texture_view(&self, desc: &TextureViewDesc) -> Texture {
+        let mut pools = self.pools.lock();
+        let texture = pools.textures.get_mut(desc.texture.0).unwrap();
+
+        let arc_texture;
+        match texture {
+            VulkanTextureHolder::Shared(shared) => arc_texture = shared.texture.clone(),
+            VulkanTextureHolder::Unique(unique) => {
+                let unique_texture = ManualArc::new(unique.texture.clone());
+                arc_texture = unique_texture.clone();
+                let unique_view = unique.view;
+                *texture = VulkanTextureHolder::Shared(VulkanTextureShared {
+                    texture: unique_texture,
+                    view: unique_view,
+                })
+            }
+            VulkanTextureHolder::Swapchain(_) => {
+                panic!("unable to create additional views of swapchain images")
+            }
+        }
+
+        let view_type = match (desc.layer_count, desc.dimension) {
+            (1, TextureDimension::Type1d) => vk::ImageViewType::Type1d,
+            (1, TextureDimension::Type2d) => vk::ImageViewType::Type2d,
+            (1, TextureDimension::Type3d) => vk::ImageViewType::Type3d,
+            (6, TextureDimension::TypeCube) => vk::ImageViewType::TypeCube,
+            (_, TextureDimension::Type1d) => vk::ImageViewType::Type1dArray,
+            (_, TextureDimension::Type2d) => vk::ImageViewType::Type2dArray,
+            (_, TextureDimension::TypeCube) => vk::ImageViewType::TypeCubeArray,
+            _ => panic!("unsupported view type"),
+        };
+
+        let format = vulkan_format(desc.format);
+        let aspect_mask = vulkan_aspect(desc.format);
+
+        let create_info = vk::ImageViewCreateInfo {
+            image: arc_texture.image,
+            view_type,
+            format,
+            subresource_range: vk::ImageSubresourceRange {
+                aspect_mask,
+                base_mip_level: desc.base_mip,
+                level_count: desc.mip_count,
+                base_array_layer: desc.base_layer,
+                layer_count: desc.layer_count,
+            },
+            ..default()
+        };
+
+        let mut view = vk::ImageView::null();
+        vk_check!(self
+            .device_fn
+            .create_image_view(self.device, &create_info, None, &mut view));
+
+        let handle = pools
+            .textures
+            .insert(VulkanTextureHolder::Shared(VulkanTextureShared {
+                texture: arc_texture,
+                view,
+            }));
+
+        Texture(handle)
+    }
+
+    fn create_sampler(&self, desc: &SamplerDesc) -> Sampler {
+        let (filter, mipmap_mode, anisotropy_enable) = match desc.filter {
+            SamplerFilter::Point => (
+                vk::Filter::Nearest,
+                vk::SamplerMipmapMode::Nearest,
+                vk::Bool32::False,
+            ),
+            SamplerFilter::Bilinear => (
+                vk::Filter::Linear,
+                vk::SamplerMipmapMode::Nearest,
+                vk::Bool32::False,
+            ),
+            SamplerFilter::Trilinear => (
+                vk::Filter::Linear,
+                vk::SamplerMipmapMode::Linear,
+                vk::Bool32::False,
+            ),
+            SamplerFilter::Anisotropic => (
+                vk::Filter::Linear,
+                vk::SamplerMipmapMode::Linear,
+                vk::Bool32::True,
+            ),
+        };
+
+        let address_mode = match desc.address_mode {
+            SamplerAddressMode::Wrap => vk::SamplerAddressMode::Repeat,
+            SamplerAddressMode::Clamp => vk::SamplerAddressMode::ClampToEdge,
+        };
+
+        let (compare_enable, compare_op) = match desc.compare_op {
+            SamplerCompareOp::None => (vk::Bool32::False, vk::CompareOp::Always),
+            SamplerCompareOp::Less => (vk::Bool32::True, vk::CompareOp::Less),
+            SamplerCompareOp::LessEq => (vk::Bool32::True, vk::CompareOp::LessOrEqual),
+            SamplerCompareOp::Greater => (vk::Bool32::True, vk::CompareOp::Greater),
+            SamplerCompareOp::GreaterEq => (vk::Bool32::True, vk::CompareOp::GreaterOrEqual),
+        };
+
+        let mut sampler = vk::Sampler::null();
+        vk_check!(self.device_fn.create_sampler(
+            self.device,
+            &vk::SamplerCreateInfo {
+                max_lod: desc.max_lod,
+                min_lod: desc.min_lod,
+                mip_lod_bias: desc.mip_lod_bias,
+                min_filter: filter,
+                mag_filter: filter,
+                mipmap_mode,
+                anisotropy_enable,
+                max_anisotropy: 16.0, // TODO: check maxSamplerAnisotropy
+                address_mode_u: address_mode,
+                address_mode_v: address_mode,
+                address_mode_w: address_mode,
+                compare_enable,
+                compare_op,
+                ..Default::default()
+            },
+            None,
+            &mut sampler,
+        ));
+
+        let handle = self.pools.lock().samplers.insert(VulkanSampler(sampler));
+        Sampler(handle)
+    }
+
+    fn create_graphics_pipeline(&self, desc: &GraphicsPipelineDesc) -> Pipeline {
+        let layout = {
+            let create_info = vk::PipelineLayoutCreateInfo::default();
+            let mut pipeline_layout = vk::PipelineLayout::null();
+            vk_check!(unsafe {
+                self.device_fn.create_pipeline_layout(
+                    self.device,
+                    &create_info,
+                    None,
+                    &mut pipeline_layout,
+                )
+            });
+            pipeline_layout
+        };
+
+        let shader_module = |code: &[u8]| {
+            let create_info = vk::ShaderModuleCreateInfo {
+                code: code.into(),
+                ..default()
+            };
+            let mut shader_module = vk::ShaderModule::null();
+            vk_check!(self.device_fn.create_shader_module(
+                self.device,
+                &create_info,
+                None,
+                &mut shader_module
+            ));
+            shader_module
+        };
+
+        let vertex_module = shader_module(desc.vertex_shader.code);
+        let fragment_module = shader_module(desc.fragment_shader.code);
+
+        let stages = &[
+            vk::PipelineShaderStageCreateInfo {
+                stage: vk::ShaderStageFlags::VERTEX,
+                name: desc.vertex_shader.entrypoint_name.as_ptr(),
+                module: vertex_module,
+                ..default()
+            },
+            vk::PipelineShaderStageCreateInfo {
+                stage: vk::ShaderStageFlags::FRAGMENT,
+                name: desc.fragment_shader.entrypoint_name.as_ptr(),
+                module: fragment_module,
+                ..default()
+            },
+        ];
+
+        let vertex_input_state = vk::PipelineVertexInputStateCreateInfo::default();
+        let input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo {
+            topology: vk::PrimitiveTopology::TriangleList,
+            ..default()
+        };
+        let viewport_state = vk::PipelineViewportStateCreateInfo::default();
+        let rasterization_state = vk::PipelineRasterizationStateCreateInfo {
+            line_width: 1.0,
+            ..default()
+        };
+        let multisample_state = vk::PipelineMultisampleStateCreateInfo {
+            rasterization_samples: vk::SampleCountFlags::SAMPLE_COUNT_1,
+            ..default()
+        };
+        let color_blend_attachments = &[vk::PipelineColorBlendAttachmentState {
+            color_write_mask: vk::ColorComponentFlags::R
+                | vk::ColorComponentFlags::G
+                | vk::ColorComponentFlags::B
+                | vk::ColorComponentFlags::A,
+            ..default()
+        }];
+        let color_blend_state = vk::PipelineColorBlendStateCreateInfo {
+            attachments: color_blend_attachments.into(),
+            ..default()
+        };
+        let dynamic_states = &[
+            vk::DynamicState::ViewportWithCount,
+            vk::DynamicState::ScissorWithCount,
+        ];
+        let dynamic_state = vk::PipelineDynamicStateCreateInfo {
+            dynamic_states: dynamic_states.into(),
+            ..default()
+        };
+        let color_attachment_formats = desc
+            .layout
+            .color_attachment_formats
+            .iter()
+            .copied()
+            .map(vulkan_format)
+            .collect::<Vec<_>>();
+
+        let pipeline_rendering_create_info = vk::PipelineRenderingCreateInfo {
+            view_mask: 0,
+            color_attachment_formats: color_attachment_formats.as_slice().into(),
+            depth_attachment_format: desc
+                .layout
+                .depth_attachment_format
+                .map_or(vk::Format::Undefined, vulkan_format),
+            stencil_attachment_format: desc
+                .layout
+                .stencil_attachment_format
+                .map_or(vk::Format::Undefined, vulkan_format),
+            ..default()
+        };
+
+        let create_infos = &mut [vk::GraphicsPipelineCreateInfo {
+            _next: unsafe {
+                std::mem::transmute::<_, *mut c_void>(&pipeline_rendering_create_info)
+            },
+            stages: stages.into(),
+            vertex_input_state: Some(&vertex_input_state),
+            input_assembly_state: Some(&input_assembly_state),
+            tessellation_state: None,
+            viewport_state: Some(&viewport_state),
+            rasterization_state: Some(&rasterization_state),
+            multisample_state: Some(&multisample_state),
+            color_blend_state: Some(&color_blend_state),
+            dynamic_state: Some(&dynamic_state),
+            layout,
+            ..default()
+        }];
+        let mut pipelines = [vk::Pipeline::null()];
+        vk_check!(self.device_fn.create_graphics_pipelines(
+            self.device,
+            vk::PipelineCache::null(),
+            create_infos,
+            None,
+            &mut pipelines
+        ));
+
+        unsafe {
+            self.device_fn
+                .destroy_shader_module(self.device, vertex_module, None)
+        };
+        unsafe {
+            self.device_fn
+                .destroy_shader_module(self.device, fragment_module, None)
+        };
+        unsafe {
+            self.device_fn
+                .destroy_pipeline_layout(self.device, layout, None)
+        };
+
+        let handle = self.pools.lock().pipelines.insert(VulkanPipeline {
+            pipeline: pipelines[0],
+            pipeline_bind_point: vk::PipelineBindPoint::Graphics,
+        });
+
+        Pipeline(handle)
+    }
+
+    fn create_compute_pipeline(&self, _desc: &ComputePipelineDesc) -> Pipeline {
+        todo!()
+    }
+
+    fn destroy_buffer(&self, frame_token: &FrameToken, buffer: Buffer) {
+        if let Some(buffer) = self.pools.lock().buffers.remove(buffer.0) {
+            let frame = self.frame(frame_token);
+            frame.destroyed_buffers.lock().push_back(buffer.buffer);
+            frame.destroyed_allocations.lock().push_back(buffer.memory);
+        }
+    }
+
+    fn destroy_texture(&self, frame_token: &FrameToken, texture: Texture) {
+        if let Some(texture) = self.pools.lock().textures.remove(texture.0) {
+            let frame = self.frame(frame_token);
+
+            match texture {
+                // The texture is unique, we've never allocated a reference counted object for it.
+                VulkanTextureHolder::Unique(texture) => {
+                    frame.destroyed_image_views.lock().push_back(texture.view);
+                    frame
+                        .destroyed_images
+                        .lock()
+                        .push_back(texture.texture.image);
+                    frame
+                        .destroyed_allocations
+                        .lock()
+                        .push_back(texture.texture.memory);
+                }
+                // The texture was at one point shared, we may or may not have the last reference.
+                VulkanTextureHolder::Shared(mut texture) => {
+                    frame.destroyed_image_views.lock().push_back(texture.view);
+                    // If we had the last reference we need to destroy the image and memory too
+                    if let manual_arc::Release::Unique(texture) = texture.texture.release() {
+                        frame.destroyed_images.lock().push_back(texture.image);
+                        frame.destroyed_allocations.lock().push_back(texture.memory);
+                    }
+                }
+                VulkanTextureHolder::Swapchain(_) => {
+                    panic!("cannot directly destroy swapchain images")
+                }
+            }
+        }
+    }
+
+    fn destroy_sampler(&self, frame_token: &FrameToken, sampler: Sampler) {
+        if let Some(sampler) = self.pools.lock().samplers.remove(sampler.0) {
+            self.frame(frame_token)
+                .destroyed_samplers
+                .lock()
+                .push_back(sampler.0)
+        }
+    }
+
+    fn destroy_pipeline(&self, frame_token: &FrameToken, pipeline: Pipeline) {
+        if let Some(pipeline) = self.pools.lock().pipelines.remove(pipeline.0) {
+            self.frame(frame_token)
+                .destroyed_pipelines
+                .lock()
+                .push_back(pipeline.pipeline)
+        }
+    }
+
+    fn destroy_window(&self, window: Window) {
+        if let Some(VulkanSwapchain {
+            window: _,
+            surface,
+            surface_format: _,
+            state,
+            _formats: _,
+            _present_modes: _,
+            capabilities: _,
+        }) = self.swapchains.lock().remove(&window)
+        {
+            let mut pools = self.pools.lock();
+            let VulkanPools {
+                textures,
+                buffers: _,
+                samplers: _,
+                pipelines: _,
+            } = pools.deref_mut();
+
+            if let VulkanSwapchainState::Occupied {
+                width: _,
+                height: _,
+                suboptimal: _,
+                swapchain,
+                image_views,
+            } = state
+            {
+                let mut vulkan_image_views = Vec::new();
+                for &image_view in image_views.iter() {
+                    match textures.remove(image_view.0) {
+                        Some(VulkanTextureHolder::Swapchain(VulkanTextureSwapchain {
+                            window: _,
+                            image: _,
+                            view,
+                        })) => vulkan_image_views.push(view),
+                        _ => panic!("swapchain texture in wrong state"),
+                    }
+                }
+
+                self.destroyed_swapchains.lock().push((
+                    window,
+                    swapchain,
+                    surface,
+                    vulkan_image_views.into_boxed_slice(),
+                ));
+            }
+        }
+    }
+
+    fn acquire_swapchain(
+        &self,
+        frame_token: &FrameToken,
+        window: Window,
+        format: TextureFormat,
+    ) -> (u32, u32, Texture) {
+        let format = vulkan_format(format);
+
+        let mut swapchains = self.swapchains.lock();
+        let mut vulkan_swapchain = swapchains.entry(window).or_insert_with(|| {
+            let surface = self.app.vk_create_surface(window, self.instance.as_raw());
+            let surface = vk::SurfaceKHR::from_raw(surface);
+
+            let mut supported = vk::Bool32::False;
+            vk_check!(self.surface_fn.get_physical_device_surface_support(
+                self.physical_device,
+                self.universal_queue_family_index,
+                surface,
+                &mut supported
+            ));
+
+            assert_eq!(
+                supported,
+                vk::Bool32::True,
+                "universal queue does not support presenting this surface"
+            );
+
+            let formats = vk_vec(|count, ptr| unsafe {
+                self.surface_fn.get_physical_device_surface_formats(
+                    self.physical_device,
+                    surface,
+                    count,
+                    ptr,
+                )
+            })
+            .into_boxed_slice();
+
+            let present_modes = vk_vec(|count, ptr| unsafe {
+                self.surface_fn.get_physical_device_surface_present_modes(
+                    self.physical_device,
+                    surface,
+                    count,
+                    ptr,
+                )
+            })
+            .into_boxed_slice();
+
+            let mut capabilities = vk::SurfaceCapabilitiesKHR::default();
+            vk_check!(self.surface_fn.get_physical_device_surface_capabilities(
+                self.physical_device,
+                surface,
+                &mut capabilities
+            ));
+
+            let surface_format = formats
+                .iter()
+                .copied()
+                .find(|&x| x.format == format)
+                .expect("failed to find matching surface format");
+
+            VulkanSwapchain {
+                window,
+                surface,
+                surface_format,
+                state: VulkanSwapchainState::Vacant,
+                _formats: formats,
+                _present_modes: present_modes,
+                capabilities,
+            }
+        });
+
+        assert_eq!(format, vulkan_swapchain.surface_format.format);
+
+        let frame = self.frame(frame_token);
+        let mut pools = self.pools.lock();
+        let VulkanPools {
+            textures,
+            buffers: _,
+            samplers: _,
+            pipelines: _,
+        } = pools.deref_mut();
+
+        let mut present_swapchains = frame.present_swapchains.lock();
+        let present_info = match present_swapchains.entry(window) {
+            std::collections::hash_map::Entry::Occupied(_) => {
+                panic!("attempting to acquire the same swapchain multiple times in a frame")
+            }
+            std::collections::hash_map::Entry::Vacant(entry) => entry.insert(default()),
+        };
+
+        let mut old_swapchain = vk::SwapchainKHR::null();
+        let mut iters = 0;
+
+        loop {
+            iters += 1;
+            if iters > 10 {
+                panic!("acquiring swapchain image took more than 10 tries");
+            }
+
+            let (desired_width, desired_height) =
+                self.app.vk_get_surface_extent(vulkan_swapchain.window);
+
+            vk_check!(self.surface_fn.get_physical_device_surface_capabilities(
+                self.physical_device,
+                vulkan_swapchain.surface,
+                &mut vulkan_swapchain.capabilities
+            ));
+
+            let desired_width = desired_width.clamp(
+                vulkan_swapchain.capabilities.min_image_extent.width,
+                vulkan_swapchain.capabilities.max_image_extent.width,
+            );
+            let desired_height = desired_height.clamp(
+                vulkan_swapchain.capabilities.min_image_extent.height,
+                vulkan_swapchain.capabilities.max_image_extent.height,
+            );
+
+            match &mut vulkan_swapchain.state {
+                VulkanSwapchainState::Vacant => {
+                    let image_extent = vk::Extent2d {
+                        width: desired_width,
+                        height: desired_height,
+                    };
+                    let mut new_swapchain = vk::SwapchainKHR::null();
+                    let create_info = vk::SwapchainCreateInfoKHR {
+                        surface: vulkan_swapchain.surface,
+                        min_image_count: vulkan_swapchain.capabilities.min_image_count,
+                        image_format: vulkan_swapchain.surface_format.format,
+                        image_color_space: vulkan_swapchain.surface_format.color_space,
+                        image_extent,
+                        image_usage: vk::ImageUsageFlags::COLOR_ATTACHMENT,
+                        image_array_layers: 1,
+                        image_sharing_mode: vk::SharingMode::Exclusive,
+                        pre_transform: vk::SurfaceTransformFlagsKHR::IDENTITY,
+                        composite_alpha: vk::CompositeAlphaFlagsKHR::OPAQUE,
+                        present_mode: vk::PresentModeKHR::Fifo,
+                        clipped: vk::Bool32::True,
+                        old_swapchain,
+                        ..default()
+                    };
+                    vk_check!(self.swapchain_fn.create_swapchain(
+                        self.device,
+                        &create_info,
+                        None,
+                        &mut new_swapchain
+                    ));
+                    assert!(!new_swapchain.is_null());
+
+                    let images = vk_vec(|count, ptr| unsafe {
+                        self.swapchain_fn.get_swapchain_images(
+                            self.device,
+                            new_swapchain,
+                            count,
+                            ptr,
+                        )
+                    });
+
+                    let image_views = images
+                        .iter()
+                        .map(|&image| {
+                            let create_info = vk::ImageViewCreateInfo {
+                                image,
+                                view_type: vk::ImageViewType::Type2d,
+                                format: vulkan_swapchain.surface_format.format,
+                                subresource_range: vk::ImageSubresourceRange {
+                                    aspect_mask: vk::ImageAspectFlags::COLOR,
+                                    base_mip_level: 0,
+                                    level_count: 1,
+                                    base_array_layer: 0,
+                                    layer_count: 1,
+                                },
+                                ..default()
+                            };
+                            let mut view = vk::ImageView::null();
+                            vk_check!(self.device_fn.create_image_view(
+                                self.device,
+                                &create_info,
+                                None,
+                                &mut view,
+                            ));
+
+                            let handle = textures.insert(VulkanTextureHolder::Swapchain(
+                                VulkanTextureSwapchain {
+                                    window,
+                                    image,
+                                    view,
+                                },
+                            ));
+                            Texture(handle)
+                        })
+                        .collect::<Box<_>>();
+
+                    vulkan_swapchain.state = VulkanSwapchainState::Occupied {
+                        width: image_extent.width,
+                        height: image_extent.height,
+                        suboptimal: false,
+                        swapchain: new_swapchain,
+                        image_views,
+                    };
+
+                    continue;
+                }
+                VulkanSwapchainState::Occupied {
+                    width,
+                    height,
+                    suboptimal,
+                    swapchain,
+                    image_views,
+                } => {
+                    let destroy_image_views =
+                        |textures: &mut Pool<VulkanTextureHolder>| -> Box<[vk::ImageView]> {
+                            let mut vulkan_image_views = Vec::new();
+                            for &image_view in image_views.iter() {
+                                match textures.remove(image_view.0) {
+                                    Some(VulkanTextureHolder::Swapchain(
+                                        VulkanTextureSwapchain {
+                                            window: _,
+                                            image: _,
+                                            view,
+                                        },
+                                    )) => vulkan_image_views.push(view),
+                                    _ => panic!("swapchain texture in wrong state"),
+                                }
+                            }
+                            vulkan_image_views.into_boxed_slice()
+                        };
+
+                    if *width != desired_width || *height != desired_height || *suboptimal {
+                        let image_views = destroy_image_views(textures);
+                        old_swapchain = *swapchain;
+                        if !old_swapchain.is_null() {
+                            self.destroyed_swapchains.lock().push((
+                                Window::default(),
+                                old_swapchain,
+                                vk::SurfaceKHR::null(),
+                                image_views,
+                            ));
+                        }
+                        vulkan_swapchain.state = VulkanSwapchainState::Vacant;
+                        continue;
+                    }
+
+                    let acquire = self.request_transient_semaphore(frame);
+                    let mut image_index = 0;
+                    match unsafe {
+                        self.swapchain_fn.acquire_next_image2(
+                            self.device,
+                            &vk::AcquireNextImageInfoKHR {
+                                swapchain: *swapchain,
+                                timeout: !0,
+                                semaphore: acquire,
+                                fence: vk::Fence::null(),
+                                device_mask: 1,
+                                ..default()
+                            },
+                            &mut image_index,
+                        )
+                    } {
+                        vk::Result::Success => {}
+                        vk::Result::SuboptimalKHR => {
+                            *suboptimal = true;
+                        }
+                        vk::Result::ErrorOutOfDateKHR => {
+                            old_swapchain = *swapchain;
+                            let image_views = destroy_image_views(textures);
+                            if !old_swapchain.is_null() {
+                                self.destroyed_swapchains.lock().push((
+                                    Window::default(),
+                                    old_swapchain,
+                                    vk::SurfaceKHR::null(),
+                                    image_views,
+                                ));
+                            }
+                            vulkan_swapchain.state = VulkanSwapchainState::Vacant;
+                            continue;
+                        }
+                        result => vk_check!(result),
+                    }
+
+                    present_info.acquire = acquire;
+                    present_info.image_index = image_index;
+                    present_info.swapchain = *swapchain;
+                    let view = image_views[image_index as usize];
+
+                    return (*width, *height, view);
+                }
+            }
+        }
+    }
+
+    fn request_command_buffer<'frame>(
+        &'frame self,
+        frame_token: &'frame FrameToken,
+        thread_token: &'frame mut ThreadToken,
+    ) -> CommandBufferToken {
+        let command_buffer_pool = self
+            .frame(frame_token)
+            .command_buffer_pools
+            .get_mut(thread_token);
+
+        // We have consumed all available command buffers, need to allocate a new one.
+        if command_buffer_pool.next_free_index >= command_buffer_pool.command_buffers.len() {
+            let mut command_buffers = [vk::CommandBuffer::null(); 4];
+            let allocate_info = vk::CommandBufferAllocateInfo {
+                command_pool: command_buffer_pool.command_pool,
+                level: vk::CommandBufferLevel::Primary,
+                command_buffer_count: command_buffers.len() as u32,
+                ..default()
+            };
+            vk_check!(self.device_fn.allocate_command_buffers(
+                self.device,
+                &allocate_info,
+                command_buffers.as_mut_ptr()
+            ));
+            command_buffer_pool
+                .command_buffers
+                .extend(command_buffers.iter().copied().map(|command_buffer| {
+                    VulkanCommandBuffer {
+                        command_buffer,
+                        swapchains_touched: HashMap::new(),
+                    }
+                }));
+        }
+
+        let index = command_buffer_pool.next_free_index;
+        command_buffer_pool.next_free_index += 1;
+
+        let command_buffer = command_buffer_pool.command_buffers[index].command_buffer;
+
+        vk_check!(self.device_fn.begin_command_buffer(
+            command_buffer,
+            &vk::CommandBufferBeginInfo {
+                flags: vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT,
+                ..default()
+            }
+        ));
+
+        CommandBufferToken {
+            frame_token,
+            thread_token,
+            index,
+            raw: command_buffer.as_raw(),
+            phantom: PhantomUnsend {},
+        }
+    }
+
+    fn cmd_bind_pipeline(&self, command_buffer_token: &mut CommandBufferToken, pipeline: Pipeline) {
+        let command_buffer = vk::CommandBuffer::from_raw(command_buffer_token.raw);
+        let VulkanPipeline {
+            pipeline,
+            pipeline_bind_point,
+        } = *self.pools.lock().pipelines.get(pipeline.0).unwrap();
+        unsafe {
+            self.device_fn
+                .cmd_bind_pipeline(command_buffer, pipeline_bind_point, pipeline)
+        };
+    }
+
+    fn cmd_begin_rendering(
+        &self,
+        command_buffer_token: &mut CommandBufferToken,
+        desc: &crate::RenderingDesc,
+    ) {
+        let command_buffer = self.command_buffer_mut(command_buffer_token);
+        let pools = self.pools.lock();
+
+        let color_attachments = desc
+            .color_attachments
+            .iter()
+            .map(|attachment| {
+                let image_view = match pools.textures.get(attachment.texture.0).unwrap() {
+                    VulkanTextureHolder::Unique(texture) => texture.view,
+                    VulkanTextureHolder::Shared(texture) => texture.view,
+                    VulkanTextureHolder::Swapchain(texture) => {
+                        assert!(
+                            command_buffer
+                                .swapchains_touched
+                                .contains_key(&texture.window)
+                                == false,
+                            "swapchain attached multiple times in a command buffer"
+                        );
+                        command_buffer.swapchains_touched.insert(
+                            texture.window,
+                            (
+                                texture.image,
+                                vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
+                            ),
+                        );
+
+                        // transition swapchain image to optimal
+                        let image_memory_barriers = &[vk::ImageMemoryBarrier2 {
+                            src_stage_mask: vk::PipelineStageFlags2::TOP_OF_PIPE,
+                            src_access_mask: vk::AccessFlags2::NONE,
+                            dst_stage_mask: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
+                            dst_access_mask: vk::AccessFlags2::COLOR_ATTACHMENT_WRITE,
+                            src_queue_family_index: self.universal_queue_family_index,
+                            dst_queue_family_index: self.universal_queue_family_index,
+                            old_layout: vk::ImageLayout::Undefined,
+                            new_layout: vk::ImageLayout::AttachmentOptimal,
+                            image: texture.image,
+                            subresource_range: vk::ImageSubresourceRange {
+                                aspect_mask: vk::ImageAspectFlags::COLOR,
+                                base_mip_level: 0,
+                                level_count: 1,
+                                base_array_layer: 0,
+                                layer_count: 1,
+                            },
+                            ..default()
+                        }];
+
+                        let dependency_info = vk::DependencyInfo {
+                            image_memory_barriers: image_memory_barriers.into(),
+                            ..default()
+                        };
+
+                        unsafe {
+                            self.device_fn.cmd_pipeline_barrier2(
+                                command_buffer.command_buffer,
+                                &dependency_info,
+                            )
+                        };
+
+                        texture.view
+                    }
+                };
+
+                let (load_op, clear_value) = match attachment.load_op {
+                    LoadOp::Load => (vk::AttachmentLoadOp::Load, vk::ClearValue::default()),
+                    LoadOp::Clear(clear_value) => {
+                        (vk::AttachmentLoadOp::Clear, vulkan_clear_value(clear_value))
+                    }
+                    LoadOp::DontCare => (vk::AttachmentLoadOp::DontCare, vk::ClearValue::default()),
+                };
+
+                let store_op = match attachment.store_op {
+                    crate::StoreOp::Store => vk::AttachmentStoreOp::Store,
+                    crate::StoreOp::DontCare => vk::AttachmentStoreOp::DontCare,
+                };
+
+                vk::RenderingAttachmentInfo {
+                    image_view,
+                    image_layout: vk::ImageLayout::ColorAttachmentOptimal,
+                    load_op,
+                    store_op,
+                    clear_value,
+                    ..default()
+                }
+            })
+            .collect::<Vec<_>>();
+
+        let rendering_info = vk::RenderingInfo {
+            flags: vk::RenderingFlags::default(),
+            render_area: vk::Rect2d {
+                offset: vk::Offset2d {
+                    x: desc.x,
+                    y: desc.y,
+                },
+                extent: vk::Extent2d {
+                    width: desc.width,
+                    height: desc.height,
+                },
+            },
+            layer_count: 1,
+            view_mask: 0,
+            color_attachments: color_attachments.as_slice().into(),
+            depth_attachment: None,
+            stencil_attachment: None,
+            ..default()
+        };
+        unsafe {
+            self.device_fn
+                .cmd_begin_rendering(command_buffer.command_buffer, &rendering_info)
+        }
+    }
+
+    fn cmd_end_rendering(&self, command_buffer_token: &mut CommandBufferToken) {
+        let command_buffer = vk::CommandBuffer::from_raw(command_buffer_token.raw);
+        unsafe { self.device_fn.cmd_end_rendering(command_buffer) }
+    }
+
+    fn cmd_set_viewports(
+        &self,
+        command_buffer_token: &mut CommandBufferToken,
+        viewports: &[crate::Viewport],
+    ) {
+        let command_buffer = vk::CommandBuffer::from_raw(command_buffer_token.raw);
+        unsafe {
+            self.device_fn.cmd_set_viewport_with_count(
+                command_buffer,
+                std::mem::transmute::<_, &[vk::Viewport]>(viewports), // yolo
+            );
+        }
+    }
+
+    fn cmd_set_scissors(
+        &self,
+        command_buffer_token: &mut CommandBufferToken,
+        scissors: &[crate::Scissor],
+    ) {
+        let command_buffer = vk::CommandBuffer::from_raw(command_buffer_token.raw);
+        unsafe {
+            self.device_fn.cmd_set_scissor_with_count(
+                command_buffer,
+                std::mem::transmute::<_, &[vk::Rect2d]>(scissors), // yolo
+            );
+        }
+    }
+
+    fn cmd_draw(
+        &self,
+        command_buffer_token: &mut CommandBufferToken,
+        vertex_count: u32,
+        instance_count: u32,
+        first_vertex: u32,
+        first_instance: u32,
+    ) {
+        let command_buffer = vk::CommandBuffer::from_raw(command_buffer_token.raw);
+        unsafe {
+            self.device_fn.cmd_draw(
+                command_buffer,
+                vertex_count,
+                instance_count,
+                first_vertex,
+                first_instance,
+            )
+        }
+    }
+
+    fn submit(&self, mut command_buffer_token: CommandBufferToken) {
+        let fence = self.universal_queue_fence.fetch_add(1, Ordering::SeqCst) + 1;
+
+        let frame = self.frame(command_buffer_token.frame_token);
+        frame.universal_queue_fence.store(fence, Ordering::Relaxed);
+
+        let command_buffer = self.command_buffer_mut(&mut command_buffer_token);
+
+        for (_, &(image, _)) in &command_buffer.swapchains_touched {
+            // transition swapchain image from attachment optimal to present src
+            let image_memory_barriers = &[vk::ImageMemoryBarrier2 {
+                src_stage_mask: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
+                src_access_mask: vk::AccessFlags2::COLOR_ATTACHMENT_WRITE,
+                dst_stage_mask: vk::PipelineStageFlags2::BOTTOM_OF_PIPE,
+                dst_access_mask: vk::AccessFlags2::NONE,
+                src_queue_family_index: self.universal_queue_family_index,
+                dst_queue_family_index: self.universal_queue_family_index,
+                old_layout: vk::ImageLayout::AttachmentOptimal,
+                new_layout: vk::ImageLayout::PresentSrcKhr,
+                image,
+                subresource_range: vk::ImageSubresourceRange {
+                    aspect_mask: vk::ImageAspectFlags::COLOR,
+                    base_mip_level: 0,
+                    level_count: 1,
+                    base_array_layer: 0,
+                    layer_count: 1,
+                },
+                ..default()
+            }];
+            let dependency_info = vk::DependencyInfo {
+                image_memory_barriers: image_memory_barriers.into(),
+                ..default()
+            };
+            unsafe {
+                self.device_fn
+                    .cmd_pipeline_barrier2(command_buffer.command_buffer, &dependency_info)
+            };
+        }
+
+        vk_check!(self
+            .device_fn
+            .end_command_buffer(command_buffer.command_buffer));
+
+        let mut wait_semaphores = Vec::new();
+        let mut signal_semaphores = Vec::new();
+
+        if command_buffer.swapchains_touched.len() != 0 {
+            let mut present_swapchains = frame.present_swapchains.lock();
+
+            for (swapchain, (_, stage_mask)) in command_buffer.swapchains_touched.drain() {
+                let present_swapchain = present_swapchains
+                    .get_mut(&swapchain)
+                    .expect("presenting a swapchain that hasn't been acquired this frame");
+
+                assert!(!present_swapchain.acquire.is_null());
+                present_swapchain.release = self.request_transient_semaphore(frame);
+
+                wait_semaphores.push(vk::SemaphoreSubmitInfo {
+                    semaphore: present_swapchain.acquire,
+                    stage_mask,
+                    ..default()
+                });
+                signal_semaphores.push(vk::SemaphoreSubmitInfo {
+                    semaphore: present_swapchain.release,
+                    ..default()
+                });
+            }
+        }
+
+        signal_semaphores.push(vk::SemaphoreSubmitInfo {
+            semaphore: self.universal_queue_semaphore,
+            semaphore_value: fence,
+            stage_mask: vk::PipelineStageFlags2::ALL_GRAPHICS,
+            ..default()
+        });
+
+        let command_buffer_infos = &[vk::CommandBufferSubmitInfo {
+            command_buffer: command_buffer.command_buffer,
+            device_mask: 1,
+            ..default()
+        }];
+
+        vk_check!(self.device_fn.queue_submit2(
+            self.universal_queue,
+            &[vk::SubmitInfo2 {
+                wait_semaphore_infos: wait_semaphores.as_slice().into(),
+                command_buffer_infos: command_buffer_infos.into(),
+                signal_semaphore_infos: signal_semaphores.as_slice().into(),
+                ..default()
+            }],
+            vk::Fence::null()
+        ));
+    }
+
+    fn begin_frame(&self) -> FrameToken {
+        let device_fn = &self.device_fn;
+        let device = self.device;
+
+        let mut frame_token = self.frame_counter.acquire(self);
+        let frame = self.frame_mut(&mut frame_token);
+
+        {
+            let semaphore_fences = &[frame
+                .universal_queue_fence
+                .load(std::sync::atomic::Ordering::Relaxed)];
+            let semaphores = &[self.universal_queue_semaphore];
+            let wait_info = vk::SemaphoreWaitInfo {
+                semaphores: (semaphores, semaphore_fences).into(),
+                ..default()
+            };
+            vk_check!(device_fn.wait_semaphores(device, &wait_info, !0));
+        }
+
+        for pool in frame.command_buffer_pools.slots_mut() {
+            if pool.next_free_index == 0 {
+                continue;
+            }
+
+            vk_check!(device_fn.reset_command_pool(
+                device,
+                pool.command_pool,
+                vk::CommandPoolResetFlags::default()
+            ));
+
+            pool.next_free_index = 0;
+        }
+
+        self.semaphores
+            .lock()
+            .extend(frame.recycled_semaphores.get_mut().drain(..));
+
+        Self::destroy_deferred(device_fn, device, frame);
+
+        self.destroyed_swapchains
+            .lock()
+            .expire(|(window, swapchain, surface, image_views)| {
+                Self::destroy_swapchain(
+                    self.app,
+                    device_fn,
+                    &self.swapchain_fn,
+                    &self.surface_fn,
+                    self.instance,
+                    device,
+                    window,
+                    surface,
+                    swapchain,
+                    &image_views,
+                );
+            });
+
+        frame_token
+    }
+
+    fn end_frame(&self, mut frame_token: FrameToken) {
+        let frame = self.frame_mut(&mut frame_token);
+
+        let present_swapchains = frame.present_swapchains.get_mut();
+        if present_swapchains.len() != 0 {
+            let mut windows = Vec::new();
+            let mut wait_semaphores = Vec::new();
+            let mut swapchains = Vec::new();
+            let mut swapchain_image_indices = Vec::new();
+            let mut results = Vec::new();
+
+            for (window, present_info) in present_swapchains.drain() {
+                windows.push(window);
+                wait_semaphores.push(present_info.release);
+                swapchains.push(present_info.swapchain);
+                swapchain_image_indices.push(present_info.image_index);
+            }
+
+            results.resize_with(swapchains.len(), || vk::Result::Success);
+
+            let present_info = vk::PresentInfoKHR {
+                wait_semaphores: wait_semaphores.as_slice().into(),
+                swapchains: (swapchains.as_slice(), swapchain_image_indices.as_slice()).into(),
+                results: results.as_mut_ptr(),
+                ..default()
+            };
+
+            unsafe {
+                // check results below, so ignore this return value.
+                let _ = self
+                    .swapchain_fn
+                    .queue_present(self.universal_queue, &present_info);
+            };
+
+            for (i, &result) in results.iter().enumerate() {
+                match result {
+                    vk::Result::Success => {}
+                    vk::Result::SuboptimalKHR => {
+                        // Yikes
+                        if let VulkanSwapchainState::Occupied {
+                            width: _,
+                            height: _,
+                            suboptimal,
+                            swapchain: _,
+                            image_views: _,
+                        } = &mut self.swapchains.lock().get_mut(&windows[i]).unwrap().state
+                        {
+                            *suboptimal = true;
+                        }
+                    }
+                    _ => vk_check!(result),
+                }
+            }
+        }
+
+        self.frame_counter.release(frame_token);
+    }
+}
+
+impl<'app> Drop for VulkanDevice<'app> {
+    fn drop(&mut self) {
+        vk_check!(self.device_fn.device_wait_idle(self.device));
+
+        let device_fn = &self.device_fn;
+        let instance = self.instance;
+        let device = self.device;
+
+        for frame in self.frames.as_mut() {
+            let frame = frame.get_mut();
+
+            for semaphore in frame.recycled_semaphores.get_mut() {
+                unsafe { device_fn.destroy_semaphore(device, *semaphore, None) }
+            }
+
+            Self::destroy_deferred(device_fn, device, frame);
+
+            for pool in frame.command_buffer_pools.slots_mut() {
+                if pool.command_buffers.len() > 0 {
+                    let command_buffers = pool
+                        .command_buffers
+                        .iter()
+                        .map(|x| x.command_buffer)
+                        .collect::<Vec<_>>();
+                    unsafe {
+                        device_fn.free_command_buffers(
+                            device,
+                            pool.command_pool,
+                            command_buffers.as_slice(),
+                        )
+                    };
+                }
+
+                unsafe { device_fn.destroy_command_pool(device, pool.command_pool, None) }
+            }
+        }
+
+        let mut image_views = Vec::new();
+        let mut images = Vec::new();
+        for texture in self.pools.get_mut().textures.values() {
+            match texture {
+                VulkanTextureHolder::Unique(texture) => {
+                    image_views.push(texture.view);
+                    images.push(texture.texture.image)
+                }
+                VulkanTextureHolder::Shared(texture) => {
+                    image_views.push(texture.view);
+                }
+                VulkanTextureHolder::Swapchain(texture) => {
+                    image_views.push(texture.view);
+                }
+            }
+        }
+
+        for image_view in image_views {
+            unsafe { device_fn.destroy_image_view(device, image_view, None) }
+        }
+
+        for image in images {
+            unsafe { device_fn.destroy_image(device, image, None) }
+        }
+
+        for semaphore in self
+            .semaphores
+            .get_mut()
+            .iter()
+            .chain(std::iter::once(&self.universal_queue_semaphore))
+        {
+            unsafe { device_fn.destroy_semaphore(device, *semaphore, None) }
+        }
+
+        for (_, (window, swapchain, surface, image_views)) in
+            self.destroyed_swapchains.get_mut().drain(..)
+        {
+            Self::destroy_swapchain(
+                self.app,
+                &self.device_fn,
+                &self.swapchain_fn,
+                &self.surface_fn,
+                self.instance,
+                self.device,
+                window,
+                surface,
+                swapchain,
+                &image_views,
+            );
+        }
+
+        for (_, swapchain) in self.swapchains.get_mut().iter() {
+            if let VulkanSwapchainState::Occupied {
+                width: _,
+                height: _,
+                suboptimal: _,
+                swapchain,
+                image_views: _,
+            } = swapchain.state
+            {
+                unsafe { self.swapchain_fn.destroy_swapchain(device, swapchain, None) }
+            }
+            unsafe {
+                self.surface_fn
+                    .destroy_surface(instance, swapchain.surface, None)
+            }
+        }
+
+        for pipeline in self.pools.get_mut().pipelines.values() {
+            unsafe { device_fn.destroy_pipeline(device, pipeline.pipeline, None) }
+        }
+
+        unsafe { device_fn.destroy_device(device, None) }
+        unsafe { self.instance_fn.destroy_instance(self.instance, None) };
+    }
+}
diff --git a/narcissus-maths/Cargo.toml b/narcissus-maths/Cargo.toml
new file mode 100644 (file)
index 0000000..e993f6c
--- /dev/null
@@ -0,0 +1,8 @@
+[package]
+name = "narcissus-math"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
diff --git a/narcissus-maths/src/lib.rs b/narcissus-maths/src/lib.rs
new file mode 100644 (file)
index 0000000..77b7ef0
--- /dev/null
@@ -0,0 +1,46 @@
+#[derive(Clone, Copy, PartialEq, PartialOrd)]
+#[repr(C)]
+pub struct Vec2 {
+    x: f32,
+    y: f32,
+}
+
+#[derive(Clone, Copy, PartialEq, PartialOrd)]
+#[repr(C)]
+pub struct Vec3 {
+    x: f32,
+    y: f32,
+    z: f32,
+}
+
+#[derive(Clone, Copy, PartialEq, PartialOrd)]
+#[repr(C)]
+pub struct Vec4 {
+    x: f32,
+    y: f32,
+    z: f32,
+    w: f32,
+}
+
+#[derive(Clone, Copy, PartialEq)]
+#[repr(C)]
+pub struct Quat {
+    a: f32,
+    b: f32,
+    c: f32,
+    d: f32,
+}
+
+#[derive(Clone, Copy, PartialEq)]
+#[repr(C)]
+pub struct Mat44([f32; 16]);
+
+#[derive(Clone, Copy, PartialEq)]
+#[repr(C)]
+pub struct Mat43([f32; 12]);
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn it_works() {}
+}
diff --git a/narcissus-world/Cargo.toml b/narcissus-world/Cargo.toml
new file mode 100644 (file)
index 0000000..14eff5d
--- /dev/null
@@ -0,0 +1,9 @@
+[package]
+name = "narcissus-world"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+narcissus-core = { path = "../narcissus-core" }
\ No newline at end of file
diff --git a/narcissus-world/src/lib.rs b/narcissus-world/src/lib.rs
new file mode 100644 (file)
index 0000000..1717223
--- /dev/null
@@ -0,0 +1,700 @@
+pub struct PageCache {}
+
+/*use std::{
+    collections::{hash_map::Entry, HashMap, VecDeque},
+    hash::{Hash, Hasher},
+    ptr::NonNull,
+};
+
+use narcissus_core::{align_offset, virtual_commit, virtual_free, virtual_reserve, Uuid};
+use std::ffi::c_void;
+
+const ID_INDEX_BITS: u32 = 22;
+const ID_GEN_BITS: u32 = 10;
+
+const PAGE_SIZE: usize = 4096;
+const MAX_FIELD_TYPES: usize = 256;
+
+// SAFETY: Blittable - no padding, zero valid, blah blah.
+pub unsafe trait FieldType {
+    const UUID: Uuid;
+    fn version() -> u32;
+    fn name() -> &'static str;
+}
+
+#[derive(Clone, Copy)]
+pub struct Id(u32);
+
+impl Id {
+    pub const fn null() -> Self {
+        Self(0)
+    }
+}
+
+#[derive(Clone, Copy)]
+pub struct Block(u32);
+
+impl Block {
+    pub const fn null() -> Self {
+        Self(0)
+    }
+}
+
+pub struct Config {
+    version: u32,
+    block_size: u32,
+    block_cap: u32,
+    id_cap: u32,
+}
+
+impl Config {
+    fn calculate_mapping(&self) -> Mapping {
+        const NUM_GUARD_PAGES: usize = 4;
+
+        let mut size = 0;
+
+        let id_info_offset = size;
+        let id_info_len = self.id_cap as usize * std::mem::size_of::<IdInfo>();
+
+        size += id_info_len;
+        size = align_offset(size, PAGE_SIZE);
+        size += NUM_GUARD_PAGES * PAGE_SIZE;
+
+        let block_info_offset = size;
+        let block_info_len = self.block_cap as usize * std::mem::size_of::<BlockInfo>();
+
+        size += block_info_len;
+        size = align_offset(size, PAGE_SIZE);
+        size += NUM_GUARD_PAGES * PAGE_SIZE;
+
+        let block_storage_offset = size;
+        let block_storage_len = self.block_cap as usize * self.block_size as usize;
+
+        size += block_storage_len;
+        size = align_offset(size, PAGE_SIZE);
+        size += NUM_GUARD_PAGES * PAGE_SIZE;
+
+        Mapping {
+            size,
+            id_info_offset,
+            id_info_len,
+            block_info_offset,
+            block_info_len,
+            block_storage_offset,
+            block_storage_len,
+        }
+    }
+}
+
+impl Default for Config {
+    fn default() -> Self {
+        Self {
+            version: 0,
+            block_size: 16 * 1024, // 16KiB blocks
+            block_cap: 262_144,    // 4GiB in total
+            id_cap: 4_194_304,
+        }
+    }
+}
+
+struct Mapping {
+    size: usize,
+    id_info_offset: usize,
+    id_info_len: usize,
+    block_info_offset: usize,
+    block_info_len: usize,
+    block_storage_offset: usize,
+    block_storage_len: usize,
+}
+
+struct IdInfo {
+    index_and_generation: u32,
+    index_in_block: u32,
+    block: Block,
+}
+
+struct BlockInfo {
+    num_things: u32,
+}
+
+#[derive(Clone, Hash, PartialEq, Eq)]
+struct FieldTypes([u64; MAX_FIELD_TYPES / 64]);
+
+impl FieldTypes {
+    pub fn new() -> Self {
+        Self([0; MAX_FIELD_TYPES / 64])
+    }
+
+    pub fn set(&mut self, field_type_index: FieldTypeIndex) {
+        let field_type_index = field_type_index.0;
+        let index = field_type_index / (MAX_FIELD_TYPES / 64);
+        self.0;
+    }
+}
+
+#[derive(Clone, Hash, PartialEq, Eq)]
+struct Descriptor {
+    shift: u32,
+    offset: u32,
+    stride: u32,
+    width: u32,
+}
+
+#[derive(Clone, PartialEq, Eq)]
+pub struct Schema {
+    hash: [u8; 32],
+    field_types: FieldTypes,
+    cap: usize,
+    descriptors: Box<[Descriptor]>,
+}
+
+impl Schema {
+    pub fn capacity(&self) -> usize {
+        self.cap
+    }
+}
+
+impl Hash for Schema {
+    fn hash<H: Hasher>(&self, state: &mut H) {
+        self.hash.hash(state);
+    }
+}
+
+enum SchemaFieldMode {
+    Scalar,
+    Vector,
+}
+
+struct SchemaField {
+    uuid: Uuid,
+    size: usize,
+    align: usize,
+    mode: SchemaFieldMode,
+}
+
+pub struct SchemaBuilder<'cfg, 'reg> {
+    config: &'cfg Config,
+    registry: &'reg Registry,
+    indexed: bool,
+    fields: Vec<SchemaField>,
+}
+
+impl<'cfg, 'reg> SchemaBuilder<'cfg, 'reg> {
+    pub fn new(config: &'cfg Config, registry: &'reg Registry) -> Self {
+        Self {
+            config,
+            registry,
+            indexed: false,
+            fields: Vec::new(),
+        }
+    }
+
+    pub fn indexed(mut self) -> Self {
+        self.indexed = true;
+        self
+    }
+
+    pub fn add_vector_field<T: FieldType>(mut self) -> Self {
+        self.fields.push(SchemaField {
+            uuid: T::UUID,
+            size: std::mem::size_of::<T>(),
+            align: std::mem::align_of::<T>(),
+            mode: SchemaFieldMode::Vector,
+        });
+        self
+    }
+
+    pub fn add_scalar_field<T: FieldType>(mut self) -> Self {
+        self.fields.push(SchemaField {
+            uuid: T::UUID,
+            size: std::mem::size_of::<T>(),
+            align: std::mem::align_of::<T>(),
+            mode: SchemaFieldMode::Scalar,
+        });
+        self
+    }
+
+    // fn push(&mut self, type_id: TypeId, field: &SchemaField) {
+    //     self.descriptor_indices
+    //         .insert(type_id, DescriptorIndex(self.descriptors.len()));
+    //     self.fields.push(Descriptor {
+    //         shift: 0,
+    //         offset: self.offset,
+    //         stride: 0,
+    //         width: field.size,
+    //     });
+    //     self.offset += field.size;
+    // }
+
+    // fn push_shift(&mut self, type_id: TypeId, part_desc: &PartDesc, shift: usize) {
+    //     self.descriptor_indices
+    //         .insert(type_id, DescriptorIndex(self.descriptors.len()));
+    //     self.descriptors.push(Descriptor {
+    //         shift,
+    //         offset: self.offset,
+    //         stride: part_desc.size,
+    //         width: part_desc.size,
+    //     });
+    //     self.offset += part_desc.size << shift;
+    // }
+
+    // fn push_vector(&mut self, type_id: TypeId, part_desc: &PartDesc, count: usize) {
+    //     self.descriptor_indices
+    //         .insert(type_id, DescriptorIndex(self.descriptors.len()));
+    //     self.descriptors.push(Descriptor {
+    //         shift: 0,
+    //         offset: self.offset,
+    //         stride: part_desc.size,
+    //         width: part_desc.size,
+    //     });
+    //     self.offset += part_desc.size * count;
+    // }
+
+    fn build(&self) -> Schema {
+        fn align_offset(x: usize, align: usize) -> usize {
+            debug_assert!(align.is_power_of_two());
+            (x + align - 1) & !(align - 1)
+        }
+
+        let mut field_types = FieldTypes::new();
+        let mut offset = 0;
+        let mut descriptors = Vec::new();
+
+        for field in &self.fields {
+            let field_type_index = self.registry.get_field_type_index_val(&field.uuid);
+        }
+
+        let field_type_indices = self
+            .fields
+            .iter()
+            .map(|field| self.registry.get_field_type_index_val(&field.uuid))
+            .collect::<Vec<_>>();
+
+        let fields = self
+            .fields
+            .iter()
+            .filter(|field| field.size != 0)
+            .collect::<Vec<_>>();
+
+        Schema {
+            hash: [0; 32],
+            field_types: FieldTypes([0; MAX_FIELD_TYPES / 64]),
+            cap: 0,
+            descriptors: descriptors.into_boxed_slice(),
+        }
+    }
+
+    pub fn build_aos(self) -> Schema {
+        Schema {
+            hash: [0; 32],
+            field_types: FieldTypes([0; MAX_FIELD_TYPES / 64]),
+            cap: 0,
+            descriptors: Box::new([]),
+        }
+    }
+
+    pub fn build_soa(self) -> Schema {
+        Schema {
+            hash: [0; 32],
+            field_types: FieldTypes([0; MAX_FIELD_TYPES / 64]),
+            cap: 0,
+            descriptors: Box::new([]),
+        }
+    }
+
+    pub fn build_aosoa(self, stride: usize) -> Schema {
+        assert!(stride.is_power_of_two());
+        Schema {
+            hash: [0; 32],
+            field_types: FieldTypes([0; MAX_FIELD_TYPES / 64]),
+            cap: 0,
+            descriptors: Box::new([]),
+        }
+    }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+struct FieldTypeIndex(usize);
+
+impl FieldTypeIndex {
+    fn new(index: usize) -> Self {
+        assert!(index < MAX_FIELD_TYPES);
+        Self(index)
+    }
+}
+
+struct FieldTypeInfo {
+    debug_name: &'static str,
+    align: usize,
+    size: usize,
+}
+
+pub struct Registry {
+    field_lookup: HashMap<Uuid, FieldTypeIndex>,
+    field_types: Vec<FieldTypeInfo>,
+}
+
+impl Registry {
+    pub fn new() -> Self {
+        Self {
+            field_lookup: HashMap::new(),
+            field_types: Vec::new(),
+        }
+    }
+
+    pub fn register_field_type<T: FieldType>(&mut self) {
+        let type_uuid = T::UUID;
+        let debug_name = T::name();
+        let size = std::mem::size_of::<T>();
+        let align = std::mem::align_of::<T>();
+
+        if let Entry::Vacant(entry) = self.field_lookup.entry(type_uuid) {
+            let next_index = self.field_types.len();
+            entry.insert(FieldTypeIndex::new(next_index));
+            self.field_types.push(FieldTypeInfo {
+                debug_name,
+                align,
+                size,
+            });
+        } else {
+            panic!("don't register field types more than once");
+        }
+    }
+
+    fn get_field_type_index<T: FieldType>(&self) -> FieldTypeIndex {
+        *self
+            .field_lookup
+            .get(&T::UUID)
+            .expect("failed to find field type")
+    }
+
+    fn get_field_type_index_val(&self, uuid: &Uuid) -> FieldTypeIndex {
+        *self
+            .field_lookup
+            .get(uuid)
+            .expect("failed to find field type")
+    }
+}
+
+pub struct World<'cfg, 'reg> {
+    config: &'cfg Config,
+    registry: &'reg Registry,
+
+    schemas: Vec<Schema>,
+
+    free_ids: VecDeque<u32>,
+    id_info_len: usize,
+    id_info: NonNull<IdInfo>,
+
+    free_blocks: Vec<u32>,
+    block_info_len: usize,
+    block_info: NonNull<BlockInfo>,
+    block_storage: NonNull<u8>,
+
+    map: *mut c_void,
+    map_size: usize,
+}
+
+impl<'cfg, 'reg> World<'cfg, 'reg> {
+    pub fn new(config: &'cfg Config, registry: &'reg Registry) -> Self {
+        let mapping = config.calculate_mapping();
+
+        let map = unsafe { virtual_reserve(mapping.size) };
+        assert!(!map.is_null());
+
+        let id_info = NonNull::new(unsafe {
+            std::mem::transmute::<*mut c_void, *mut IdInfo>(
+                map.offset(mapping.id_info_offset as isize),
+            )
+        })
+        .unwrap();
+
+        let block_info = NonNull::new(unsafe {
+            std::mem::transmute::<*mut c_void, *mut BlockInfo>(
+                map.offset(mapping.block_info_offset as isize),
+            )
+        })
+        .unwrap();
+
+        // Always commit the entire info area, it's not all that much memory.
+        unsafe { virtual_commit(block_info.as_ptr() as *mut c_void, mapping.block_info_len) };
+
+        let block_storage = NonNull::new(unsafe {
+            std::mem::transmute::<*mut c_void, *mut u8>(
+                map.offset(mapping.block_storage_offset as isize),
+            )
+        })
+        .unwrap();
+
+        Self {
+            config,
+            registry,
+            schemas: Vec::new(),
+            free_ids: VecDeque::new(),
+            id_info_len: 0,
+            id_info,
+            free_blocks: Vec::new(),
+            block_info_len: 0,
+            block_info,
+            block_storage,
+            map,
+            map_size: mapping.size,
+        }
+    }
+
+    #[inline]
+    fn id_infos(&self) -> &[IdInfo] {
+        unsafe { std::slice::from_raw_parts(self.id_info.as_ptr(), self.id_info_len) }
+    }
+
+    #[inline]
+    fn id_infos_mut(&mut self) -> &mut [IdInfo] {
+        unsafe { std::slice::from_raw_parts_mut(self.id_info.as_ptr(), self.id_info_len) }
+    }
+
+    #[inline]
+    fn block_infos(&self) -> &[BlockInfo] {
+        unsafe { std::slice::from_raw_parts(self.block_info.as_ptr(), self.block_info_len) }
+    }
+
+    #[inline]
+    fn block_infos_mut(&mut self) -> &mut [BlockInfo] {
+        unsafe { std::slice::from_raw_parts_mut(self.block_info.as_ptr(), self.block_info_len) }
+    }
+
+    #[inline]
+    fn block_storage(&self, block: Block) -> &[u8] {
+        let block_index = block.0 as usize;
+        assert!(block_index < self.block_info_len);
+        let block_size = self.config.block_size as usize;
+        unsafe {
+            std::slice::from_raw_parts(
+                self.block_storage
+                    .as_ptr()
+                    .offset((block_index * block_size) as isize),
+                block_size,
+            )
+        }
+    }
+
+    #[inline]
+    fn block_storage_mut(&mut self, block: Block) -> &mut [u8] {
+        let block_index = block.0 as usize;
+        assert!(block_index < self.block_info_len);
+        let block_size = self.config.block_size as usize;
+        unsafe {
+            std::slice::from_raw_parts_mut(
+                self.block_storage
+                    .as_ptr()
+                    .offset((block_index * block_size) as isize),
+                block_size,
+            )
+        }
+    }
+
+    pub fn allocate_ids(&self, ids: &mut [Id]) {}
+    pub fn release_ids(&self, ids: &[Id]) {}
+
+    pub fn allocate_blocks(&self, blocks: &mut [Block]) {}
+    pub fn release_blocks(&self, blocks: &[Block]) {}
+
+    pub fn insert_blocks(&self, schema: &Schema, blocks: &[Block]) {}
+    pub fn insert_blocks_indexed(&self, schema: &Schema, blocks: &[Block], ids: &[Id]) {}
+}
+
+impl<'cfg, 'reg> Drop for World<'cfg, 'reg> {
+    fn drop(&mut self) {
+        unsafe { virtual_free(self.map, self.map_size) };
+    }
+}
+
+// mod app;
+// //mod gfx;
+// mod world;
+
+// use app::App;
+// use narcissus_core::{FixedVec, Uuid};
+// use world::{Block, Config, FieldType, Id, Registry, SchemaBuilder, World};
+
+// // Units
+
+// #[derive(Clone, Copy)]
+// struct WorldX(f32);
+// unsafe impl FieldType for WorldX {
+//     const UUID: Uuid = Uuid::parse_str_unwrap("fa565a43-4ec0-460d-84bb-32ef861ff48b");
+
+//     fn version() -> u32 {
+//         0
+//     }
+
+//     fn name() -> &'static str {
+//         "World Position X"
+//     }
+// }
+
+// #[derive(Clone, Copy)]
+// struct WorldY(f32);
+// unsafe impl FieldType for WorldY {
+//     const UUID: Uuid = Uuid::parse_str_unwrap("b7e3ccbf-d839-4ee0-8be4-b068a25a299f");
+
+//     fn version() -> u32 {
+//         0
+//     }
+
+//     fn name() -> &'static str {
+//         "World Position Y"
+//     }
+// }
+
+// #[derive(Clone, Copy)]
+// struct WorldZ(f32);
+// unsafe impl FieldType for WorldZ {
+//     const UUID: Uuid = Uuid::parse_str_unwrap("a6bcf557-3117-4664-ae99-8c8ceb96467c");
+
+//     fn version() -> u32 {
+//         0
+//     }
+
+//     fn name() -> &'static str {
+//         "World Position Z"
+//     }
+// }
+
+// #[derive(Clone, Copy)]
+// struct Health(f32);
+// unsafe impl FieldType for Health {
+//     const UUID: Uuid = Uuid::parse_str_unwrap("f3c7be8f-2120-42bd-a0be-bfe26d95198b");
+
+//     fn version() -> u32 {
+//         0
+//     }
+
+//     fn name() -> &'static str {
+//         "Health"
+//     }
+// }
+
+// #[derive(Clone, Copy)]
+// struct Armor(f32);
+// unsafe impl FieldType for Armor {
+//     const UUID: Uuid = Uuid::parse_str_unwrap("42fde8e0-7576-4039-8169-769e68aafe8b");
+
+//     fn version() -> u32 {
+//         0
+//     }
+
+//     fn name() -> &'static str {
+//         "Armor"
+//     }
+// }
+
+// struct Ship();
+// unsafe impl FieldType for Ship {
+//     const UUID: Uuid = Uuid::parse_str_unwrap("02024fee-ef95-42c3-877b-aa9f6afbf0a2");
+
+//     fn version() -> u32 {
+//         0
+//     }
+
+//     fn name() -> &'static str {
+//         "Ship"
+//     }
+// }
+
+// struct Asteroid();
+// unsafe impl FieldType for Asteroid {
+//     const UUID: Uuid = Uuid::parse_str_unwrap("22e8e546-0aeb-4e23-beee-89075522fb57");
+
+//     fn version() -> u32 {
+//         0
+//     }
+
+//     fn name() -> &'static str {
+//         "Asteroid"
+//     }
+// }
+
+// pub fn main() {
+//     let app = App::new();
+
+//     let window = app.create_window();
+
+//     let mut registry = Registry::new();
+//     registry.register_field_type::<WorldX>();
+//     registry.register_field_type::<WorldY>();
+//     registry.register_field_type::<WorldZ>();
+//     registry.register_field_type::<Health>();
+//     registry.register_field_type::<Armor>();
+//     registry.register_field_type::<Ship>();
+//     registry.register_field_type::<Asteroid>();
+
+//     let config = Config::default();
+//     let world = World::new(&config, &registry);
+
+//     const NUM_SHIPS: usize = 1_000;
+//     const NUM_ASTEROIDS: usize = 100_000;
+
+//     let ships_schema = SchemaBuilder::new(&config, &registry)
+//         .indexed()
+//         .add_vector_field::<WorldX>()
+//         .add_vector_field::<WorldY>()
+//         .add_vector_field::<WorldZ>()
+//         .add_vector_field::<Health>()
+//         .add_vector_field::<Armor>()
+//         .add_scalar_field::<Ship>()
+//         .build_aosoa(4);
+
+//     let mut ship_blocks = FixedVec::<_, 16>::new();
+//     ship_blocks.resize(NUM_SHIPS / ships_schema.capacity(), Block::null());
+//     world.allocate_blocks(&mut ship_blocks);
+
+//     let mut ship_ids = vec![Id::null(); NUM_SHIPS];
+//     world.allocate_ids(&mut ship_ids);
+
+//     let make_ship_scalars = |ship: &mut Ship| {};
+
+//     let make_ship = |world_x: &mut WorldX,
+//                      world_y: &mut WorldY,
+//                      world_z: &mut WorldZ,
+//                      health: &mut Health,
+//                      armor: &mut Armor| {};
+
+//     {
+//         let mut block_iter = ship_blocks.iter();
+//         let mut ids_iter = ship_ids.chunks(ships_schema.capacity());
+//         for (block, ids) in block_iter.zip(ids_iter) {
+//             //make_ship_scalars();
+//             for id in ids {
+//                 //make_ship();
+//             }
+//         }
+//     }
+
+//     world.insert_blocks_indexed(&ships_schema, &ship_blocks, &ship_ids);
+
+//     let asteroids_schema = SchemaBuilder::new(&config, &registry)
+//         .add_vector_field::<WorldX>()
+//         .add_vector_field::<WorldY>()
+//         .add_vector_field::<WorldZ>()
+//         .add_vector_field::<Health>()
+//         .add_scalar_field::<Asteroid>()
+//         .build_aosoa(4);
+
+//     let mut asteroid_blocks = vec![Block::null(); NUM_ASTEROIDS / asteroids_schema.capacity()];
+//     world.allocate_blocks(&mut asteroid_blocks);
+
+//     world.insert_blocks(&asteroids_schema, &asteroid_blocks);
+
+//     app.destroy_window(window);
+// }
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn it_works() {
+        let result = 2 + 2;
+        assert_eq!(result, 4);
+    }
+}
+
+*/
diff --git a/narcissus/Cargo.toml b/narcissus/Cargo.toml
new file mode 100644 (file)
index 0000000..526c19e
--- /dev/null
@@ -0,0 +1,12 @@
+[package]
+name = "narcissus"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+libc = "0.2"
+narcissus-core = { path = "../narcissus-core" }
+narcissus-app = { path = "../narcissus-app" }
+narcissus-gpu = { path = "../narcissus-gpu" }
\ No newline at end of file
diff --git a/narcissus/src/main.rs b/narcissus/src/main.rs
new file mode 100644 (file)
index 0000000..993f700
--- /dev/null
@@ -0,0 +1,164 @@
+use narcissus_app::{create_app, Event, Window, WindowDesc};
+use narcissus_core::cstr;
+use narcissus_gpu::{
+    create_vulkan_device, ClearValue, Device, FrameToken, GraphicsPipelineDesc,
+    GraphicsPipelineLayout, LoadOp, MemoryLocation, Pipeline, RenderingAttachment, RenderingDesc,
+    Scissor, ShaderDesc, StoreOp, TextureDesc, TextureDimension, TextureFormat, TextureUsageFlags,
+    TextureViewDesc, ThreadToken, Viewport,
+};
+
+fn render_window(
+    device: &dyn Device,
+    frame_token: &FrameToken,
+    thread_token: &mut ThreadToken,
+    pipeline: Pipeline,
+    window: Window,
+) {
+    let (width, height, swapchain_image) =
+        device.acquire_swapchain(&frame_token, window, TextureFormat::BGRA8_SRGB);
+
+    let mut command_buffer_token = device.request_command_buffer(&frame_token, thread_token);
+    device.cmd_begin_rendering(
+        &mut command_buffer_token,
+        &RenderingDesc {
+            x: 0,
+            y: 0,
+            width,
+            height,
+            color_attachments: &[RenderingAttachment {
+                texture: swapchain_image,
+                load_op: LoadOp::Clear(ClearValue::ColorF32([0.392157, 0.584314, 0.929412, 1.0])),
+                store_op: StoreOp::Store,
+            }],
+            depth_attachment: None,
+            stencil_attachment: None,
+        },
+    );
+    device.cmd_bind_pipeline(&mut command_buffer_token, pipeline);
+    device.cmd_set_scissors(
+        &mut command_buffer_token,
+        &[Scissor {
+            x: 0,
+            y: 0,
+            width,
+            height,
+        }],
+    );
+    device.cmd_set_viewports(
+        &mut command_buffer_token,
+        &[Viewport {
+            x: 0.0,
+            y: 0.0,
+            width: width as f32,
+            height: height as f32,
+            min_depth: 0.0,
+            max_depth: 1.0,
+        }],
+    );
+    device.cmd_draw(&mut command_buffer_token, 3, 1, 0, 0);
+    device.cmd_end_rendering(&mut command_buffer_token);
+
+    device.submit(command_buffer_token);
+}
+
+pub fn main() {
+    let app = create_app();
+
+    let device = create_vulkan_device(app.as_ref());
+    let mut thread_token = ThreadToken::new();
+
+    #[repr(align(4))]
+    struct Spirv<const LEN: usize>([u8; LEN]);
+
+    let vert_shader_spv = Spirv(*include_bytes!("shaders/triangle.vert.spv"));
+    let frag_shader_spv = Spirv(*include_bytes!("shaders/triangle.frag.spv"));
+
+    let pipeline = device.create_graphics_pipeline(&GraphicsPipelineDesc {
+        vertex_shader: ShaderDesc {
+            entrypoint_name: cstr!("main"),
+            code: &vert_shader_spv.0,
+        },
+        fragment_shader: ShaderDesc {
+            entrypoint_name: cstr!("main"),
+            code: &frag_shader_spv.0,
+        },
+        layout: GraphicsPipelineLayout {
+            color_attachment_formats: &[TextureFormat::BGRA8_SRGB],
+            depth_attachment_format: None,
+            stencil_attachment_format: None,
+        },
+    });
+
+    let mut windows = (0..4)
+        .map(|i| {
+            let title = format!("Narcissus {}", i);
+            let title = title.as_str();
+            app.create_window(&WindowDesc {
+                title,
+                width: 800,
+                height: 600,
+            })
+        })
+        .collect::<Vec<_>>();
+
+    let texture = device.create_texture(&TextureDesc {
+        memory_location: MemoryLocation::PreferDevice,
+        usage: TextureUsageFlags::SAMPLED,
+        dimension: TextureDimension::Type2d,
+        format: TextureFormat::BGRA8_SRGB,
+        width: 800,
+        height: 600,
+        depth: 1,
+        layers: 1,
+        mip_levels: 1,
+    });
+
+    let texture2 = device.create_texture_view(&TextureViewDesc {
+        texture,
+        dimension: TextureDimension::Type2d,
+        format: TextureFormat::BGRA8_SRGB,
+        base_mip: 0,
+        mip_count: 1,
+        base_layer: 0,
+        layer_count: 1,
+    });
+
+    let frame_token = device.begin_frame();
+    device.destroy_texture(&frame_token, texture);
+    device.destroy_texture(&frame_token, texture2);
+    device.end_frame(frame_token);
+
+    let mut should_quit = false;
+
+    while !should_quit {
+        let frame_token = device.begin_frame();
+
+        while let Some(event) = app.poll_event() {
+            use Event::*;
+            match event {
+                Quit => {
+                    should_quit = true;
+                    break;
+                }
+                WindowClose(window) => {
+                    if let Some(index) = windows.iter().position(|&w| window == w) {
+                        device.destroy_window(windows.swap_remove(index));
+                    }
+                }
+                _ => {}
+            }
+        }
+
+        for &window in windows.iter() {
+            render_window(
+                device.as_ref(),
+                &frame_token,
+                &mut thread_token,
+                pipeline,
+                window,
+            );
+        }
+
+        device.end_frame(frame_token);
+    }
+}
diff --git a/narcissus/src/shaders/triangle.frag.glsl b/narcissus/src/shaders/triangle.frag.glsl
new file mode 100644 (file)
index 0000000..e948fd6
--- /dev/null
@@ -0,0 +1,8 @@
+#version 450
+
+layout(location = 0) in vec3 fragColor;
+layout(location = 0) out vec4 outColor;
+
+void main() {
+    outColor = vec4(fragColor, 1.0);
+}
\ No newline at end of file
diff --git a/narcissus/src/shaders/triangle.frag.spv b/narcissus/src/shaders/triangle.frag.spv
new file mode 100644 (file)
index 0000000..8a4cc40
Binary files /dev/null and b/narcissus/src/shaders/triangle.frag.spv differ
diff --git a/narcissus/src/shaders/triangle.vert.glsl b/narcissus/src/shaders/triangle.vert.glsl
new file mode 100644 (file)
index 0000000..66d6766
--- /dev/null
@@ -0,0 +1,20 @@
+#version 450
+
+layout(location = 0) out vec3 fragColor;
+
+vec2 positions[3] = vec2[](
+    vec2(0.0, -0.5),
+    vec2(0.5, 0.5),
+    vec2(-0.5, 0.5)
+);
+
+vec3 colors[3] = vec3[](
+    vec3(1.0, 0.0, 0.0),
+    vec3(0.0, 1.0, 0.0),
+    vec3(0.0, 0.0, 1.0)
+);
+
+void main() {
+    gl_Position = vec4(positions[gl_VertexIndex], 0.0, 1.0);
+    fragColor = colors[gl_VertexIndex];
+}
\ No newline at end of file
diff --git a/narcissus/src/shaders/triangle.vert.spv b/narcissus/src/shaders/triangle.vert.spv
new file mode 100644 (file)
index 0000000..ef8a465
Binary files /dev/null and b/narcissus/src/shaders/triangle.vert.spv differ
diff --git a/renderdoc-sys/Cargo.toml b/renderdoc-sys/Cargo.toml
new file mode 100644 (file)
index 0000000..0a2275c
--- /dev/null
@@ -0,0 +1,9 @@
+[package]
+name = "renderdoc-sys"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+libc = "0.2"
\ No newline at end of file
diff --git a/renderdoc-sys/src/helpers.rs b/renderdoc-sys/src/helpers.rs
new file mode 100644 (file)
index 0000000..6de9823
--- /dev/null
@@ -0,0 +1,45 @@
+#[allow(unconditional_panic)]
+const fn illegal_null_in_string() {
+    [][0]
+}
+
+#[doc(hidden)]
+pub const fn validate_cstr_contents(bytes: &[u8]) {
+    let mut i = 0;
+    while i < bytes.len() {
+        if bytes[i] == b'\0' {
+            illegal_null_in_string();
+        }
+        i += 1;
+    }
+}
+
+#[macro_export]
+macro_rules! cstr {
+    ( $s:literal ) => {{
+        $crate::helpers::validate_cstr_contents($s.as_bytes());
+        #[allow(unused_unsafe)]
+        unsafe {
+            std::mem::transmute::<_, &std::ffi::CStr>(concat!($s, "\0"))
+        }
+    }};
+}
+
+#[allow(dead_code)]
+pub fn string_from_c_str(c_str: &[i8]) -> String {
+    let s = unsafe { std::ffi::CStr::from_ptr(c_str.as_ptr()).to_bytes() };
+    String::from_utf8_lossy(s).into_owned()
+}
+
+#[cfg(test)]
+mod tests {
+    use std::ffi::CStr;
+
+    #[test]
+    fn test_cstr() {
+        assert_eq!(
+            cstr!("hello"),
+            CStr::from_bytes_with_nul(b"hello\0").unwrap()
+        );
+    }
+}
diff --git a/renderdoc-sys/src/lib.rs b/renderdoc-sys/src/lib.rs
new file mode 100644 (file)
index 0000000..436b5e8
--- /dev/null
@@ -0,0 +1,629 @@
+use std::{
+    ffi::{c_void, CStr, CString},
+    mem::MaybeUninit,
+    os::raw::c_char,
+};
+
+mod helpers;
+
+#[repr(C)]
+pub enum Version {
+    Version1_0_0 = 10000,
+    Version1_0_1 = 10001,
+    Version1_0_2 = 10002,
+    Version1_1_0 = 10100,
+    Version1_1_1 = 10101,
+    Version1_1_2 = 10102,
+    Version1_2_0 = 10200,
+    Version1_3_0 = 10300,
+    Version1_4_0 = 10400,
+    Version1_4_1 = 10401,
+    Version1_4_2 = 10402,
+    Version1_5_0 = 10500,
+}
+
+#[repr(C)]
+pub enum CaptureOption {
+    // Allow the application to enable vsync
+    //
+    // Default - enabled
+    //
+    // 1 - The application can enable or disable vsync at will
+    // 0 - vsync is force disabled
+    AllowVSync = 0,
+
+    // Allow the application to enable fullscreen
+    //
+    // Default - enabled
+    //
+    // 1 - The application can enable or disable fullscreen at will
+    // 0 - fullscreen is force disabled
+    AllowFullscreen = 1,
+
+    // Record API debugging events and messages
+    //
+    // Default - disabled
+    //
+    // 1 - Enable built-in API debugging features and records the results into
+    //     the capture, which is matched up with events on replay
+    // 0 - no API debugging is forcibly enabled
+    APIValidation = 2,
+
+    // Capture CPU callstacks for API events
+    //
+    // Default - disabled
+    //
+    // 1 - Enables capturing of callstacks
+    // 0 - no callstacks are captured
+    CaptureCallstacks = 3,
+
+    // When capturing CPU callstacks, only capture them from actions.
+    // This option does nothing without the above option being enabled
+    //
+    // Default - disabled
+    //
+    // 1 - Only captures callstacks for actions.
+    //     Ignored if CaptureCallstacks is disabled
+    // 0 - Callstacks, if enabled, are captured for every event.
+    CaptureCallstacksOnlyDraws = 4,
+
+    // Specify a delay in seconds to wait for a debugger to attach, after
+    // creating or injecting into a process, before continuing to allow it to run.
+    //
+    // 0 indicates no delay, and the process will run immediately after injection
+    //
+    // Default - 0 seconds
+    //
+    DelayForDebugger = 5,
+
+    // Verify buffer access. This includes checking the memory returned by a Map() call to
+    // detect any out-of-bounds modification, as well as initialising buffers with undefined contents
+    // to a marker value to catch use of uninitialised memory.
+    //
+    // NOTE: This option is only valid for OpenGL and D3D11. Explicit APIs such as D3D12 and Vulkan do
+    // not do the same kind of interception & checking and undefined contents are really undefined.
+    //
+    // Default - disabled
+    //
+    // 1 - Verify buffer access
+    // 0 - No verification is performed, and overwriting bounds may cause crashes or corruption in
+    //     RenderDoc.
+    VerifyBufferAccess = 6,
+
+    // Hooks any system API calls that create child processes, and injects
+    // RenderDoc into them recursively with the same options.
+    //
+    // Default - disabled
+    //
+    // 1 - Hooks into spawned child processes
+    // 0 - Child processes are not hooked by RenderDoc
+    HookIntoChildren = 7,
+
+    // By default RenderDoc only includes resources in the final capture necessary
+    // for that frame, this allows you to override that behaviour.
+    //
+    // Default - disabled
+    //
+    // 1 - all live resources at the time of capture are included in the capture
+    //     and available for inspection
+    // 0 - only the resources referenced by the captured frame are included
+    RefAllResources = 8,
+
+    // **NOTE**: As of RenderDoc v1.1 this option has been deprecated. Setting or
+    // getting it will be ignored, to allow compatibility with older versions.
+    // In v1.1 the option acts as if it's always enabled.
+    //
+    // By default RenderDoc skips saving initial states for resources where the
+    // previous contents don't appear to be used, assuming that writes before
+    // reads indicate previous contents aren't used.
+    //
+    // Default - disabled
+    //
+    // 1 - initial contents at the start of each captured frame are saved, even if
+    //     they are later overwritten or cleared before being used.
+    // 0 - unless a read is detected, initial contents will not be saved and will
+    //     appear as black or empty data.
+    SaveAllInitials = 9,
+
+    // In APIs that allow for the recording of command lists to be replayed later,
+    // RenderDoc may choose to not capture command lists before a frame capture is
+    // triggered, to reduce overheads. This means any command lists recorded once
+    // and replayed many times will not be available and may cause a failure to
+    // capture.
+    //
+    // NOTE: This is only true for APIs where multithreading is difficult or
+    // discouraged. Newer APIs like Vulkan and D3D12 will ignore this option
+    // and always capture all command lists since the API is heavily oriented
+    // around it and the overheads have been reduced by API design.
+    //
+    // 1 - All command lists are captured from the start of the application
+    // 0 - Command lists are only captured if their recording begins during
+    //     the period when a frame capture is in progress.
+    CaptureAllCmdLists = 10,
+
+    // Mute API debugging output when the API validation mode option is enabled
+    //
+    // Default - enabled
+    //
+    // 1 - Mute any API debug messages from being displayed or passed through
+    // 0 - API debugging is displayed as normal
+    DebugOutputMute = 11,
+
+    // Option to allow vendor extensions to be used even when they may be
+    // incompatible with RenderDoc and cause corrupted replays or crashes.
+    //
+    // Default - inactive
+    //
+    // No values are documented, this option should only be used when absolutely
+    // necessary as directed by a RenderDoc developer.
+    AllowUnsupportedVendorExtensions = 12,
+}
+
+#[repr(C)]
+pub enum InputButton {
+    // '0' - '9' matches ASCII values
+    Key0 = 0x30,
+    Key1 = 0x31,
+    Key2 = 0x32,
+    Key3 = 0x33,
+    Key4 = 0x34,
+    Key5 = 0x35,
+    Key6 = 0x36,
+    Key7 = 0x37,
+    Key8 = 0x38,
+    Key9 = 0x39,
+
+    // 'A' - 'Z' matches ASCII values
+    A = 0x41,
+    B = 0x42,
+    C = 0x43,
+    D = 0x44,
+    E = 0x45,
+    F = 0x46,
+    G = 0x47,
+    H = 0x48,
+    I = 0x49,
+    J = 0x4A,
+    K = 0x4B,
+    L = 0x4C,
+    M = 0x4D,
+    N = 0x4E,
+    O = 0x4F,
+    P = 0x50,
+    Q = 0x51,
+    R = 0x52,
+    S = 0x53,
+    T = 0x54,
+    U = 0x55,
+    V = 0x56,
+    W = 0x57,
+    X = 0x58,
+    Y = 0x59,
+    Z = 0x5A,
+
+    // leave the rest of the ASCII range free
+    // in case we want to use it later
+    NonPrintable = 0x100,
+
+    Divide,
+    Multiply,
+    Subtract,
+    Plus,
+
+    F1,
+    F2,
+    F3,
+    F4,
+    F5,
+    F6,
+    F7,
+    F8,
+    F9,
+    F10,
+    F11,
+    F12,
+
+    Home,
+    End,
+    Insert,
+    Delete,
+    PageUp,
+    PageDn,
+
+    Backspace,
+    Tab,
+    PrtScrn,
+    Pause,
+
+    Max,
+}
+
+#[repr(C)]
+pub struct OverlayBits(u32);
+
+impl OverlayBits {
+    // This single bit controls whether the overlay is enabled or disabled globally
+    pub const ENABLED: Self = Self(0x1);
+
+    // Show the average framerate over several seconds as well as min/max
+    pub const FRAME_RATE: Self = Self(0x2);
+
+    // Show the current frame number
+    pub const FRAME_NUMBER: Self = Self(0x4);
+
+    // Show a list of recent captures, and how many captures have been made
+    pub const CAPTURE_LIST: Self = Self(0x8);
+
+    // Default values for the overlay mask
+    pub const DEFAULT: Self =
+        Self(Self::ENABLED.0 | Self::FRAME_RATE.0 | Self::FRAME_NUMBER.0 | Self::CAPTURE_LIST.0);
+
+    // Enable all bits
+    pub const ALL: Self = Self(!0);
+
+    // Disable all bits
+    pub const NONE: Self = Self(0);
+}
+
+impl std::ops::BitOr for OverlayBits {
+    type Output = Self;
+
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+pub type DevicePointer = *mut c_void;
+pub type WindowHandle = *mut c_void;
+
+pub type FnGetApi = extern "system" fn(version: Version, out_pointers: *mut *mut c_void) -> i32;
+pub type FnGetApiVersion = extern "system" fn(major: &mut i32, minor: &mut i32, patch: &mut i32);
+pub type FnSetCaptureOptionU32 =
+    extern "system" fn(capture_option: CaptureOption, value: u32) -> i32;
+pub type FnSetCaptureOptionF32 =
+    extern "system" fn(capture_option: CaptureOption, value: f32) -> i32;
+pub type FnGetCaptureOptionU32 = extern "system" fn(capture_option: CaptureOption) -> u32;
+pub type FnGetCaptureOptionF32 = extern "system" fn(capture_option: CaptureOption) -> f32;
+pub type FnSetFocusToggleKeys = extern "system" fn(keys: *const InputButton, num_keys: i32);
+pub type FnSetCaptureKeys = extern "system" fn(keys: *const InputButton, num_keys: i32);
+pub type FnGetOverlayBits = extern "system" fn() -> OverlayBits;
+pub type FnMaskOverlayBits = extern "system" fn(and: OverlayBits, or: OverlayBits);
+pub type FnRemoveHooks = extern "system" fn();
+pub type FnUnloadCrashHandler = extern "system" fn();
+pub type FnSetCaptureFilePathTemplate = extern "system" fn(path_template: *const c_char);
+pub type FnGetCaptureFilePathTemplate = extern "system" fn() -> *const c_char;
+pub type FnGetNumCaptures = extern "system" fn() -> u32;
+pub type FnGetCapture = extern "system" fn(
+    index: u32,
+    filename: *mut c_char,
+    path_length: Option<&mut u32>,
+    timestamp: Option<&mut u64>,
+) -> u32;
+pub type FnTriggerCapture = extern "system" fn();
+pub type FnTriggerMultiFrameCapture = extern "system" fn(num_frames: u32);
+pub type FnIsTargetControlConnected = extern "system" fn() -> u32;
+pub type FnLaunchReplayUI =
+    extern "system" fn(connect_target_control: u32, command_line: *const c_char) -> u32;
+pub type FnSetActiveWindow = extern "system" fn(device: DevicePointer, window: WindowHandle);
+pub type FnStartFrameCapture = extern "system" fn(device: DevicePointer, window: WindowHandle);
+pub type FnIsFrameCapturing = extern "system" fn() -> u32;
+pub type FnEndFrameCapture = extern "system" fn(device: DevicePointer, window: WindowHandle) -> u32;
+pub type FnDiscardFrameCapture =
+    extern "system" fn(device: DevicePointer, window: WindowHandle) -> u32;
+pub type FnSetCaptureFileComments =
+    extern "system" fn(filepath: *const c_char, comments: *const c_char);
+pub type FnShowReplayUI = extern "system" fn() -> u32;
+
+#[repr(C)]
+pub struct RenderdocApi1_5_0 {
+    get_api_version: FnGetApiVersion,
+    set_capture_option_u32: FnSetCaptureOptionU32,
+    set_capture_option_f32: FnSetCaptureOptionF32,
+    get_capture_option_u32: FnGetCaptureOptionU32,
+    get_capture_option_f32: FnGetCaptureOptionF32,
+    set_focus_toggle_keys: FnSetFocusToggleKeys,
+    set_capture_keys: FnSetCaptureKeys,
+    get_overlay_bits: FnGetOverlayBits,
+    mask_overlay_bits: FnMaskOverlayBits,
+    remove_hooks: FnRemoveHooks,
+    unload_crash_handler: FnUnloadCrashHandler,
+    set_capture_file_path_template: FnSetCaptureFilePathTemplate,
+    get_capture_file_path_template: FnGetCaptureFilePathTemplate,
+    get_num_captures: FnGetNumCaptures,
+    get_capture: FnGetCapture,
+    trigger_capture: FnTriggerCapture,
+    is_target_control_connected: FnIsTargetControlConnected,
+    launch_replay_ui: FnLaunchReplayUI,
+    set_active_window: FnSetActiveWindow,
+    start_frame_capture: FnStartFrameCapture,
+    is_frame_capturing: FnIsFrameCapturing,
+    end_frame_capture: FnEndFrameCapture,
+    trigger_multi_frame_capture: FnTriggerMultiFrameCapture,
+    set_capture_file_comments: FnSetCaptureFileComments,
+    discard_frame_capture: FnDiscardFrameCapture,
+    show_replay_ui: FnShowReplayUI,
+}
+
+impl RenderdocApi1_5_0 {
+    pub fn load() -> Option<Self> {
+        unsafe {
+            let module = libc::dlopen(
+                cstr!("librenderdoc.so").as_ptr(),
+                libc::RTLD_NOW | libc::RTLD_NOLOAD,
+            );
+            if module.is_null() {
+                return None;
+            }
+            let get_api_ptr = libc::dlsym(module, cstr!("RENDERDOC_GetAPI").as_ptr());
+            if get_api_ptr.is_null() {
+                return None;
+            }
+            let get_api = std::mem::transmute::<_, FnGetApi>(get_api_ptr);
+
+            let mut rdoc_api = MaybeUninit::<Self>::uninit();
+            let ret = get_api(
+                Version::Version1_5_0,
+                rdoc_api.as_mut_ptr() as *mut *mut c_void,
+            );
+            if ret == 0 {
+                return None;
+            }
+            Some(rdoc_api.assume_init())
+        }
+    }
+
+    /// RenderDoc can return a higher version than requested if it's backwards compatible,
+    /// this function returns the actual version returned.
+    pub fn get_api_version(&self) -> (i32, i32, i32) {
+        let mut major = 0;
+        let mut minor = 0;
+        let mut patch = 0;
+        (self.get_api_version)(&mut major, &mut minor, &mut patch);
+        (major, minor, patch)
+    }
+
+    /// Sets an option that controls how RenderDoc behaves on capture.
+    ///
+    /// Returns true if the option and value are valid
+    /// Returns false if either is invalid and the option is unchanged
+    pub fn set_capture_option_f32(&self, option: CaptureOption, value: f32) -> bool {
+        (self.set_capture_option_f32)(option, value) == 1
+    }
+
+    /// Sets an option that controls how RenderDoc behaves on capture.
+    ///
+    /// Returns true if the option and value are valid
+    /// Returns false if either is invalid and the option is unchanged
+    pub fn set_capture_option_u32(&self, option: CaptureOption, value: u32) -> bool {
+        (self.set_capture_option_u32)(option, value) == 1
+    }
+
+    /// Gets an option that controls how RenderDoc behaves on capture.
+    ///
+    /// If the option is invalid, -FLT_MAX is returned
+    pub fn get_capture_option_f32(&self, option: CaptureOption) -> f32 {
+        (self.get_capture_option_f32)(option)
+    }
+
+    /// Gets an option that controls how RenderDoc behaves on capture.
+    ///
+    /// If the option is invalid, 0xffffffff is returned
+    pub fn get_capture_option_u32(&self, option: CaptureOption) -> u32 {
+        (self.get_capture_option_u32)(option)
+    }
+
+    /// Sets which key or keys can be used to toggle focus between multiple windows.
+    ///
+    /// If slice is empty toggle keys will be disabled
+    pub fn set_focus_toggle_keys(&self, keys: &[InputButton]) {
+        (self.set_focus_toggle_keys)(keys.as_ptr(), keys.len() as i32)
+    }
+
+    /// Sets which key or keys can be used to capture the next frame.
+    ///
+    /// If slice is empty capture keys will be disabled
+    pub fn set_capture_keys(&self, keys: &[InputButton]) {
+        (self.set_capture_keys)(keys.as_ptr(), keys.len() as i32)
+    }
+
+    /// Returns the overlay bits that have been set
+    pub fn get_overlay_bits(&self) -> OverlayBits {
+        (self.get_overlay_bits)()
+    }
+
+    /// sets the overlay bits with an and & or mask
+    pub fn mask_overlay_bits(&self, and: OverlayBits, or: OverlayBits) {
+        (self.mask_overlay_bits)(and, or)
+    }
+
+    /// Attempt to remove RenderDoc's hooks in the application.
+    ///
+    /// Note: that this can only work correctly if done immediately after
+    /// the module is loaded, before any API work happens. RenderDoc will remove its
+    /// injected hooks and shut down. Behaviour is undefined if this is called
+    /// after any API functions have been called, and there is still no guarantee of
+    /// success.
+    pub fn remove_hooks(&self) {
+        (self.remove_hooks)()
+    }
+
+    /// Unload RenderDoc's crash handler.
+    ///
+    /// If you use your own crash handler and don't want RenderDoc's handler to
+    /// intercede, you can call this function to unload it and any unhandled
+    /// exceptions will pass to the next handler.
+    pub fn unload_crash_handler(&self) {
+        (self.unload_crash_handler)()
+    }
+
+    /// Sets the capture file path template
+    ///
+    /// pathtemplate is a UTF-8 string that gives a template for how captures will be named
+    /// and where they will be saved.
+    ///
+    /// Any extension is stripped off the path, and captures are saved in the directory
+    /// specified, and named with the filename and the frame number appended. If the
+    /// directory does not exist it will be created, including any parent directories.
+    ///
+    /// If pathtemplate is NULL, the template will remain unchanged
+    ///
+    /// Example:
+    ///
+    /// SetCaptureFilePathTemplate("my_captures/example");
+    ///
+    /// Capture #1 -> my_captures/example_frame123.rdc
+    /// Capture #2 -> my_captures/example_frame456.rdc
+    pub fn set_capture_file_path_template(&self, path_template: &str) {
+        let path_template = CString::new(path_template).unwrap();
+        (self.set_capture_file_path_template)(path_template.as_ptr())
+    }
+
+    /// Gets the capture file path template
+    pub fn get_capture_file_path_template(&self) -> String {
+        let ptr = (self.get_capture_file_path_template)();
+        let str = unsafe { CStr::from_ptr(ptr) };
+        str.to_owned().into_string().unwrap()
+    }
+
+    /// Returns the number of captures that have been made
+    pub fn get_num_captures(&self) -> u32 {
+        (self.get_num_captures)()
+    }
+
+    /// This function returns the details of a capture, by index. New captures are added
+    /// to the end of the list.
+    ///
+    /// Returns the absolute path of the capture file and the time of capture in seconds since the
+    /// unix epoch, or None, if the capture index is invalid.
+    ///
+    /// Note: when captures are deleted in the UI they will remain in this list, so the
+    /// capture path may not exist anymore.
+    pub fn get_capture(&self, index: u32) -> Option<(String, u64)> {
+        let mut path_length = 0;
+        let mut timestamp = 0;
+        let ret = (self.get_capture)(
+            index,
+            std::ptr::null_mut(),
+            Some(&mut path_length),
+            Some(&mut timestamp),
+        );
+        if ret == 0 || path_length <= 1 {
+            return None;
+        }
+        let mut bytes: Vec<u8> = Vec::with_capacity(path_length as usize - 1);
+        (self.get_capture)(index, bytes.as_mut_ptr() as *mut c_char, None, None);
+        unsafe { bytes.set_len(path_length as usize - 1) };
+        Some((String::from_utf8(bytes).unwrap(), timestamp))
+    }
+
+    /// Capture the next frame on whichever window and API is currently considered active.
+    pub fn trigger_capture(&self) {
+        (self.trigger_capture)()
+    }
+
+    /// Capture the next N frames on whichever window and API is currently considered active.
+    pub fn trigger_multi_frame_capture(&self, num_frames: u32) {
+        (self.trigger_multi_frame_capture)(num_frames)
+    }
+
+    /// Returns true if the RenderDoc UI is connected to this application, false otherwise
+    pub fn is_target_control_connected(&self) -> bool {
+        (self.is_target_control_connected)() == 1
+    }
+
+    /// This function will launch the Replay UI associated with the RenderDoc library injected
+    /// into the running application.
+    ///
+    /// If connect_target_control is true, the Replay UI will be launched with a command line parameter
+    /// to connect to this application
+    /// command_line is an optional string containing the rest of the command line. E.g. a captures to open
+    ///
+    /// Returns the PID of the replay UI if successful, 0 if not successful.
+    pub fn launch_replay_ui(
+        &self,
+        connect_target_control: bool,
+        command_line: Option<&str>,
+    ) -> u32 {
+        let command_line = command_line.map(|s| CString::new(s).unwrap());
+        (self.launch_replay_ui)(
+            connect_target_control as u32,
+            command_line.map_or(std::ptr::null(), |s| s.as_ptr()),
+        )
+    }
+
+    /// This sets the RenderDoc in-app overlay in the API/window pair as 'active' and it will
+    /// respond to keypresses. Neither parameter can be NULL
+    pub fn set_active_window(&self, device: DevicePointer, window: WindowHandle) {
+        (self.set_active_window)(device, window)
+    }
+
+    /// When choosing either a device pointer or a window handle to capture, you can pass NULL.
+    /// Passing NULL specifies a 'wildcard' match against anything. This allows you to specify
+    /// any API rendering to a specific window, or a specific API instance rendering to any window,
+    /// or in the simplest case of one window and one API, you can just pass NULL for both.
+    ///
+    /// In either case, if there are two or more possible matching (device,window) pairs it
+    /// is undefined which one will be captured.
+    ///
+    /// Note: for headless rendering you can pass NULL for the window handle and either specify
+    /// a device pointer or leave it NULL as above.
+    ///
+    /// Immediately starts capturing API calls on the specified device pointer and window handle.
+    ///
+    /// If there is no matching thing to capture (e.g. no supported API has been initialised),
+    /// this will do nothing.
+    ///
+    /// The results are undefined (including crashes) if two captures are started overlapping,
+    /// even on separate devices and/oror windows.
+    pub fn start_frame_capture(&self, device: DevicePointer, window: WindowHandle) {
+        (self.start_frame_capture)(device, window)
+    }
+
+    /// Returns whether or not a frame capture is currently ongoing anywhere.
+    pub fn is_frame_capturing(&self) -> bool {
+        (self.is_frame_capturing)() == 1
+    }
+
+    /// Ends capturing immediately.
+    ///
+    /// This will return true if the capture succeeded, and false if there was an error capturing.
+    pub fn end_frame_capture(&self, device: DevicePointer, window: WindowHandle) -> bool {
+        (self.end_frame_capture)(device, window) == 1
+    }
+
+    /// Ends capturing immediately and discard any data stored without saving to disk.
+    /// This will return true if the capture was discarded, and false if there was an error or no capture
+    /// was in progress
+    pub fn discard_frame_capture(&self, device: DevicePointer, window: WindowHandle) -> bool {
+        (self.end_frame_capture)(device, window) == 1
+    }
+
+    /// Sets the comments associated with a capture file. These comments are displayed in the
+    /// UI program when opening.
+    ///
+    /// file_path should be a path to the capture file to add comments to. If None the most recent capture
+    /// file created made will be used instead.
+    /// comments should be a string to add as comments.
+    ///
+    /// Any existing comments will be overwritten.
+    pub fn set_capture_file_comments(&self, file_path: Option<&str>, comments: &str) {
+        let file_path = file_path.map(|s| CString::new(s).unwrap());
+        let comments = CString::new(comments).unwrap();
+
+        (self.set_capture_file_comments)(
+            file_path.map_or(std::ptr::null(), |s| s.as_ptr()),
+            comments.as_ptr(),
+        )
+    }
+
+    /// Requests that the replay UI show itself (if hidden or not the current top window). This can be
+    /// used in conjunction with IsTargetControlConnected and LaunchReplayUI to intelligently handle
+    /// showing the UI after making a capture.
+    ///
+    /// This will return true if the request was successfully passed on, though it's not guaranteed that
+    /// the UI will be on top in all cases depending on OS rules. It will return false if there is no
+    /// current target control connection to make such a request, or if there was another error
+    pub fn show_replay_ui(&self) -> bool {
+        (self.show_replay_ui)() == 1
+    }
+}
diff --git a/sdl2-sys/Cargo.toml b/sdl2-sys/Cargo.toml
new file mode 100644 (file)
index 0000000..91ea693
--- /dev/null
@@ -0,0 +1,11 @@
+[package]
+name = "sdl2-sys"
+version = "0.1.0"
+edition = "2021"
+links = "SDL2"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+
+[build-dependencies]
diff --git a/sdl2-sys/build.rs b/sdl2-sys/build.rs
new file mode 100644 (file)
index 0000000..a5d2729
--- /dev/null
@@ -0,0 +1,4 @@
+fn main() {
+    println!("cargo:rerun-if-changed=build.rs");
+    println!("cargo:rustc-link-lib=SDL2");
+}
diff --git a/sdl2-sys/src/lib.rs b/sdl2-sys/src/lib.rs
new file mode 100644 (file)
index 0000000..4f48ff6
--- /dev/null
@@ -0,0 +1,946 @@
+#![allow(non_camel_case_types)]
+use std::{ffi::c_void, os::raw::c_char};
+
+#[repr(C)]
+pub struct Window {
+    _unused: [u8; 0],
+}
+
+pub type JoystickID = i32;
+pub type TouchID = i64;
+pub type FingerID = i64;
+pub type GestureID = i64;
+pub type Keycode = i32;
+
+#[repr(C)]
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum Scancode {
+    UNKNOWN = 0,
+
+    /**
+     *  \name Usage page 0x07
+     *
+     *  These values are from usage page 0x07 (USB keyboard page).
+     */
+    /* @{ */
+    A = 4,
+    B = 5,
+    C = 6,
+    D = 7,
+    E = 8,
+    F = 9,
+    G = 10,
+    H = 11,
+    I = 12,
+    J = 13,
+    K = 14,
+    L = 15,
+    M = 16,
+    N = 17,
+    O = 18,
+    P = 19,
+    Q = 20,
+    R = 21,
+    S = 22,
+    T = 23,
+    U = 24,
+    V = 25,
+    W = 26,
+    X = 27,
+    Y = 28,
+    Z = 29,
+
+    SCANCODE_1 = 30,
+    SCANCODE_2 = 31,
+    SCANCODE_3 = 32,
+    SCANCODE_4 = 33,
+    SCANCODE_5 = 34,
+    SCANCODE_6 = 35,
+    SCANCODE_7 = 36,
+    SCANCODE_8 = 37,
+    SCANCODE_9 = 38,
+    SCANCODE_0 = 39,
+
+    RETURN = 40,
+    ESCAPE = 41,
+    BACKSPACE = 42,
+    TAB = 43,
+    SPACE = 44,
+
+    MINUS = 45,
+    EQUALS = 46,
+    LEFTBRACKET = 47,
+    RIGHTBRACKET = 48,
+    BACKSLASH = 49,
+    /**< Located at the lower left of the return
+     *   key on ISO keyboards and at the right end
+     *   of the QWERTY row on ANSI keyboards.
+     *   Produces REVERSE SOLIDUS (backslash) and
+     *   VERTICAL LINE in a US layout, REVERSE
+     *   SOLIDUS and VERTICAL LINE in a UK Mac
+     *   layout, NUMBER SIGN and TILDE in a UK
+     *   Windows layout, DOLLAR SIGN and POUND SIGN
+     *   in a Swiss German layout, NUMBER SIGN and
+     *   APOSTROPHE in a German layout, GRAVE
+     *   ACCENT and POUND SIGN in a French Mac
+     *   layout, and ASTERISK and MICRO SIGN in a
+     *   French Windows layout.
+     */
+    NONUSHASH = 50,
+    /**< ISO USB keyboards actually use this code
+     *   instead of 49 for the same key, but all
+     *   OSes I've seen treat the two codes
+     *   identically. So, as an implementor, unless
+     *   your keyboard generates both of those
+     *   codes and your OS treats them differently,
+     *   you should generate BACKSLASH
+     *   instead of this code. As a user, you
+     *   should not rely on this code because SDL
+     *   will never generate it with most (all?)
+     *   keyboards.
+     */
+    SEMICOLON = 51,
+    APOSTROPHE = 52,
+    GRAVE = 53,
+    /**< Located in the top left corner (on both ANSI
+     *   and ISO keyboards). Produces GRAVE ACCENT and
+     *   TILDE in a US Windows layout and in US and UK
+     *   Mac layouts on ANSI keyboards, GRAVE ACCENT
+     *   and NOT SIGN in a UK Windows layout, SECTION
+     *   SIGN and PLUS-MINUS SIGN in US and UK Mac
+     *   layouts on ISO keyboards, SECTION SIGN and
+     *   DEGREE SIGN in a Swiss German layout (Mac:
+     *   only on ISO keyboards), CIRCUMFLEX ACCENT and
+     *   DEGREE SIGN in a German layout (Mac: only on
+     *   ISO keyboards), SUPERSCRIPT TWO and TILDE in a
+     *   French Windows layout, COMMERCIAL AT and
+     *   NUMBER SIGN in a French Mac layout on ISO
+     *   keyboards, and LESS-THAN SIGN and GREATER-THAN
+     *   SIGN in a Swiss German, German, or French Mac
+     *   layout on ANSI keyboards.
+     */
+    COMMA = 54,
+    PERIOD = 55,
+    SLASH = 56,
+
+    CAPSLOCK = 57,
+
+    F1 = 58,
+    F2 = 59,
+    F3 = 60,
+    F4 = 61,
+    F5 = 62,
+    F6 = 63,
+    F7 = 64,
+    F8 = 65,
+    F9 = 66,
+    F10 = 67,
+    F11 = 68,
+    F12 = 69,
+
+    PRINTSCREEN = 70,
+    SCROLLLOCK = 71,
+    PAUSE = 72,
+    INSERT = 73,
+    /**< insert on PC, help on some Mac keyboards (but
+    does send code 73, not 117) */
+    HOME = 74,
+    PAGEUP = 75,
+    DELETE = 76,
+    END = 77,
+    PAGEDOWN = 78,
+    RIGHT = 79,
+    LEFT = 80,
+    DOWN = 81,
+    UP = 82,
+
+    NUMLOCKCLEAR = 83,
+    /**< num lock on PC, clear on Mac keyboards
+     */
+    KP_DIVIDE = 84,
+    KP_MULTIPLY = 85,
+    KP_MINUS = 86,
+    KP_PLUS = 87,
+    KP_ENTER = 88,
+    KP_1 = 89,
+    KP_2 = 90,
+    KP_3 = 91,
+    KP_4 = 92,
+    KP_5 = 93,
+    KP_6 = 94,
+    KP_7 = 95,
+    KP_8 = 96,
+    KP_9 = 97,
+    KP_0 = 98,
+    KP_PERIOD = 99,
+
+    NONUSBACKSLASH = 100,
+    /**< This is the additional key that ISO
+     *   keyboards have over ANSI ones,
+     *   located between left shift and Y.
+     *   Produces GRAVE ACCENT and TILDE in a
+     *   US or UK Mac layout, REVERSE SOLIDUS
+     *   (backslash) and VERTICAL LINE in a
+     *   US or UK Windows layout, and
+     *   LESS-THAN SIGN and GREATER-THAN SIGN
+     *   in a Swiss German, German, or French
+     *   layout. */
+    APPLICATION = 101,
+    /**< windows contextual menu, compose */
+    POWER = 102,
+    /**< The USB document says this is a status flag,
+     *   not a physical key - but some Mac keyboards
+     *   do have a power key. */
+    KP_EQUALS = 103,
+    F13 = 104,
+    F14 = 105,
+    F15 = 106,
+    F16 = 107,
+    F17 = 108,
+    F18 = 109,
+    F19 = 110,
+    F20 = 111,
+    F21 = 112,
+    F22 = 113,
+    F23 = 114,
+    F24 = 115,
+    EXECUTE = 116,
+    HELP = 117,
+    MENU = 118,
+    SELECT = 119,
+    STOP = 120,
+    AGAIN = 121,
+    /**< redo */
+    UNDO = 122,
+    CUT = 123,
+    COPY = 124,
+    PASTE = 125,
+    FIND = 126,
+    MUTE = 127,
+    VOLUMEUP = 128,
+    VOLUMEDOWN = 129,
+    /* not sure whether there's a reason to enable these */
+    /*     LOCKINGCAPSLOCK = 130,  */
+    /*     LOCKINGNUMLOCK = 131, */
+    /*     LOCKINGSCROLLLOCK = 132, */
+    KP_COMMA = 133,
+    KP_EQUALSAS400 = 134,
+
+    INTERNATIONAL1 = 135,
+    /**< used on Asian keyboards, see
+    footnotes in USB doc */
+    INTERNATIONAL2 = 136,
+    INTERNATIONAL3 = 137,
+    /**< Yen */
+    INTERNATIONAL4 = 138,
+    INTERNATIONAL5 = 139,
+    INTERNATIONAL6 = 140,
+    INTERNATIONAL7 = 141,
+    INTERNATIONAL8 = 142,
+    INTERNATIONAL9 = 143,
+    LANG1 = 144,
+    /**< Hangul/English toggle */
+    LANG2 = 145,
+    /**< Hanja conversion */
+    LANG3 = 146,
+    /**< Katakana */
+    LANG4 = 147,
+    /**< Hiragana */
+    LANG5 = 148,
+    /**< Zenkaku/Hankaku */
+    LANG6 = 149,
+    /**< reserved */
+    LANG7 = 150,
+    /**< reserved */
+    LANG8 = 151,
+    /**< reserved */
+    LANG9 = 152,
+    /**< reserved */
+    ALTERASE = 153,
+    /**< Erase-Eaze */
+    SYSREQ = 154,
+    CANCEL = 155,
+    CLEAR = 156,
+    PRIOR = 157,
+    RETURN2 = 158,
+    SEPARATOR = 159,
+    OUT = 160,
+    OPER = 161,
+    CLEARAGAIN = 162,
+    CRSEL = 163,
+    EXSEL = 164,
+
+    KP_00 = 176,
+    KP_000 = 177,
+    THOUSANDSSEPARATOR = 178,
+    DECIMALSEPARATOR = 179,
+    CURRENCYUNIT = 180,
+    CURRENCYSUBUNIT = 181,
+    KP_LEFTPAREN = 182,
+    KP_RIGHTPAREN = 183,
+    KP_LEFTBRACE = 184,
+    KP_RIGHTBRACE = 185,
+    KP_TAB = 186,
+    KP_BACKSPACE = 187,
+    KP_A = 188,
+    KP_B = 189,
+    KP_C = 190,
+    KP_D = 191,
+    KP_E = 192,
+    KP_F = 193,
+    KP_XOR = 194,
+    KP_POWER = 195,
+    KP_PERCENT = 196,
+    KP_LESS = 197,
+    KP_GREATER = 198,
+    KP_AMPERSAND = 199,
+    KP_DBLAMPERSAND = 200,
+    KP_VERTICALBAR = 201,
+    KP_DBLVERTICALBAR = 202,
+    KP_COLON = 203,
+    KP_HASH = 204,
+    KP_SPACE = 205,
+    KP_AT = 206,
+    KP_EXCLAM = 207,
+    KP_MEMSTORE = 208,
+    KP_MEMRECALL = 209,
+    KP_MEMCLEAR = 210,
+    KP_MEMADD = 211,
+    KP_MEMSUBTRACT = 212,
+    KP_MEMMULTIPLY = 213,
+    KP_MEMDIVIDE = 214,
+    KP_PLUSMINUS = 215,
+    KP_CLEAR = 216,
+    KP_CLEARENTRY = 217,
+    KP_BINARY = 218,
+    KP_OCTAL = 219,
+    KP_DECIMAL = 220,
+    KP_HEXADECIMAL = 221,
+
+    LCTRL = 224,
+    LSHIFT = 225,
+    LALT = 226,
+    /**< alt, option */
+    LGUI = 227,
+    /**< windows, command (apple), meta */
+    RCTRL = 228,
+    RSHIFT = 229,
+    RALT = 230,
+    /**< alt gr, option */
+    RGUI = 231,
+    /**< windows, command (apple), meta */
+    MODE = 257,
+    /**< I'm not sure if this is really not covered
+     *   by any of the above, but since there's a
+     *   special KMOD_MODE for it I'm adding it here
+     */
+
+    /* @} *//* Usage page 0x07 */
+
+    /**
+     *  \name Usage page 0x0C
+     *
+     *  These values are mapped from usage page 0x0C (USB consumer page).
+     */
+    /* @{ */
+    AUDIONEXT = 258,
+    AUDIOPREV = 259,
+    AUDIOSTOP = 260,
+    AUDIOPLAY = 261,
+    AUDIOMUTE = 262,
+    MEDIASELECT = 263,
+    WWW = 264,
+    MAIL = 265,
+    CALCULATOR = 266,
+    COMPUTER = 267,
+    AC_SEARCH = 268,
+    AC_HOME = 269,
+    AC_BACK = 270,
+    AC_FORWARD = 271,
+    AC_STOP = 272,
+    AC_REFRESH = 273,
+    AC_BOOKMARKS = 274,
+
+    /* @} *//* Usage page 0x0C */
+    /**
+     *  \name Walther keys
+     *
+     *  These are values that Christian Walther added (for mac keyboard?).
+     */
+    /* @{ */
+    BRIGHTNESSDOWN = 275,
+    BRIGHTNESSUP = 276,
+    DISPLAYSWITCH = 277,
+    /**< display mirroring/dual display
+    switch, video mode switch */
+    KBDILLUMTOGGLE = 278,
+    KBDILLUMDOWN = 279,
+    KBDILLUMUP = 280,
+    EJECT = 281,
+    SLEEP = 282,
+
+    APP1 = 283,
+    APP2 = 284,
+
+    /* @} *//* Walther keys */
+    /**
+     *  \name Usage page 0x0C (additional media keys)
+     *
+     *  These values are mapped from usage page 0x0C (USB consumer page).
+     */
+    /* @{ */
+    AUDIOREWIND = 285,
+    AUDIOFASTFORWARD = 286,
+
+    /* @} *//* Usage page 0x0C (additional media keys) */
+
+    /* Add any other keys here. */
+    NUM_SCANCODES = 512,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct Keysym {
+    pub scancode: Scancode,
+    pub sym: Keycode,
+    pub modifiers: u16,
+    pub _unused: u32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum EventType {
+    FIRSTEVENT = 0,
+    QUIT = 0x100,
+
+    APP_TERMINATING,
+    APP_LOWMEMORY,
+    APP_WILLENTERBACKGROUND,
+    APP_DIDENTERBACKGROUND,
+    APP_WILLENTERFOREGROUND,
+    APP_DIDENTERFOREGROUND,
+
+    LOCALECHANGED,
+
+    DISPLAYEVENT = 0x150,
+
+    WINDOWEVENT = 0x200,
+    SYSWMEVENT,
+
+    KEYDOWN = 0x300,
+    KEYUP,
+    TEXTEDITING,
+    TEXTINPUT,
+    KEYMAPCHANGED,
+
+    MOUSEMOTION = 0x400,
+    MOUSEBUTTONDOWN,
+    MOUSEBUTTONUP,
+    MOUSEWHEEL,
+
+    JOYAXISMOTION = 0x600,
+    JOYBALLMOTION,
+    JOYHATMOTION,
+    JOYBUTTONDOWN,
+    JOYBUTTONUP,
+    JOYDEVICEADDED,
+    JOYDEVICEREMOVED,
+
+    CONTROLLERAXISMOTION = 0x650,
+    CONTROLLERBUTTONDOWN,
+    CONTROLLERBUTTONUP,
+    CONTROLLERDEVICEADDED,
+    CONTROLLERDEVICEREMOVED,
+    CONTROLLERDEVICEREMAPPED,
+    CONTROLLERTOUCHPADDOWN,
+    CONTROLLERTOUCHPADMOTION,
+    CONTROLLERTOUCHPADUP,
+    CONTROLLERSENSORUPDATE,
+
+    FINGERDOWN = 0x700,
+    FINGERUP,
+    FINGERMOTION,
+
+    DOLLARGESTURE = 0x800,
+    DOLLARRECORD,
+    MULTIGESTURE,
+
+    CLIPBOARDUPDATE = 0x900,
+
+    DROPFILE = 0x1000,
+    DROPTEXT,
+    DROPBEGIN,
+    DROPCOMPLETE,
+
+    AUDIODEVICEADDED = 0x1100,
+    AUDIODEVICEREMOVED,
+
+    SENSORUPDATE = 0x1200,
+    RENDER_TARGETS_RESET = 0x2000,
+    RENDER_DEVICE_RESET,
+
+    POLLSENTINEL = 0x7F00,
+
+    USEREVENT = 0x8000,
+    LASTEVENT = 0xFFFF,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct CommonEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct DisplayEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub display: u32,
+    pub event: u8,
+    pub _padding1: u8,
+    pub _padding2: u8,
+    pub _padding3: u8,
+    pub data1: i32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct WindowEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub window_id: u32,
+    pub event: WindowEventId,
+    pub padding1: u8,
+    pub padding2: u8,
+    pub padding3: u8,
+    pub data1: i32,
+    pub data2: i32,
+}
+
+#[repr(u8)]
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum WindowEventId {
+    /// Never used *
+    None,
+    /// Window has been shown *
+    Shown,
+    /// Window has been hidden *
+    Hidden,
+    /// Window has been exposed and should be redrawn *
+    Exposed,
+    /// Window has been moved to data1, data2 *
+    Moved,
+    /// Window has been resized to data1xdata2 *
+    Resized,
+    /// The window size has changed, either as a result of an API call or through the system or user changing the window size. *
+    SizeChanged,
+    /// Window has been minimized *
+    Minimized,
+    /// Window has been maximized *
+    Maximized,
+    /// Window has been restored to normal size and position *
+    Restored,
+    /// Window has gained mouse focus *
+    Enter,
+    /// Window has lost mouse focus *
+    Leave,
+    /// Window has gained keyboard focus *
+    FocusGained,
+    /// Window has lost keyboard focus *
+    FocusLost,
+    /// The window manager requests that the window be closed *
+    Close,
+    /// Window is being offered a focus (should SetWindowInputFocus() on itself or a subwindow, or ignore) *
+    TakeFocus,
+    /// Window had a hit test that wasn't SDL_HITTEST_NORMAL. *
+    HitTest,
+    /// The ICC profile of the window's display has changed. *
+    IccprofChanged,
+    /// Window has been moved to display data1. *
+    DisplayChanged,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct KeyboardEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub window_id: u32,
+    pub state: u8,
+    pub repeat: u8,
+    pub _padding2: u8,
+    pub _padding3: u8,
+    pub keysym: Keysym,
+}
+
+const TEXTEDITINGEVENT_TEXT_SIZE: usize = 32;
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct TextEditingEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub window_id: u32,
+    pub text: [u8; TEXTEDITINGEVENT_TEXT_SIZE],
+    pub start: i32,
+    pub length: i32,
+}
+
+const TEXTINPUTEVENT_TEXT_SIZE: usize = 32;
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct TextInputEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub window_id: u32,
+    pub text: [u8; TEXTINPUTEVENT_TEXT_SIZE],
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct MouseMotionEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub window_id: u32,
+    pub which: u32,
+    pub state: u32,
+    pub x: i32,
+    pub y: i32,
+    pub xrel: i32,
+    pub yrel: i32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct MouseButtonEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub window_id: u32,
+    pub which: u32,
+    pub button: u8,
+    pub state: u8,
+    pub clicks: u8,
+    pub padding1: u8,
+    pub x: i32,
+    pub y: i32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct MouseWheelEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub window_id: u32,
+    pub which: u32,
+    pub x: i32,
+    pub y: i32,
+    pub direction: u32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct JoyAxisEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: JoystickID,
+    pub axis: u8,
+    pub padding1: u8,
+    pub padding2: u8,
+    pub padding3: u8,
+    pub value: i16,
+    pub padding4: u16,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct JoyBallEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: JoystickID,
+    pub ball: u8,
+    pub padding1: u8,
+    pub padding2: u8,
+    pub padding3: u8,
+    pub xrel: i16,
+    pub yrel: i16,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct JoyHatEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: JoystickID,
+    pub hat: u8,
+    pub value: u8,
+    pub padding1: u8,
+    pub padding2: u8,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct JoyButtonEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: JoystickID,
+    pub button: u8,
+    pub state: u8,
+    pub padding1: u8,
+    pub padding2: u8,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct JoyDeviceEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: i32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct ControllerAxisEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: JoystickID,
+    pub axis: u8,
+    pub padding1: u8,
+    pub padding2: u8,
+    pub padding3: u8,
+    pub value: i16,
+    pub padding4: u16,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct ControllerButtonEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: JoystickID,
+    pub button: u8,
+    pub state: u8,
+    pub padding1: u8,
+    pub padding2: u8,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct ControllerDeviceEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: i32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct ControllerTouchpadEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: JoystickID,
+    pub touchpad: i32,
+    pub finger: i32,
+    pub x: f32,
+    pub y: f32,
+    pub pressure: f32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct ControllerSensorEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: JoystickID,
+    pub sensor: i32,
+    pub data: [f32; 3],
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct AudioDeviceEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: u32,
+    pub iscapture: u8,
+    pub padding1: u8,
+    pub padding2: u8,
+    pub padding3: u8,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct TouchFingerEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub touch_id: TouchID,
+    pub finger_id: FingerID,
+    pub x: f32,
+    pub y: f32,
+    pub dx: f32,
+    pub dy: f32,
+    pub pressure: f32,
+    pub window_id: u32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct MultiGestureEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub touch_id: TouchID,
+    pub d_theta: f32,
+    pub d_dist: f32,
+    pub x: f32,
+    pub y: f32,
+    pub num_fingers: u16,
+    pub padding: u16,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct DollarGestureEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub touch_id: TouchID,
+    pub gesture_id: GestureID,
+    pub num_fingers: u32,
+    pub error: f32,
+    pub x: f32,
+    pub y: f32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct DropEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub file: *const c_char,
+    pub window_id: u32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct SensorEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub which: i32,
+    pub data: [f32; 6],
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct QuitEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct OSEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct UserEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub window_id: u32,
+    pub code: i32,
+    pub data1: *mut c_void,
+    pub data2: *mut c_void,
+}
+
+#[repr(C)]
+pub struct SysWMmsg {
+    _unused: [u8; 0],
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub struct SysWMEvent {
+    pub r#type: EventType,
+    pub timestamp: u32,
+    pub msg: *mut SysWMmsg,
+}
+
+#[repr(C)]
+#[derive(Clone, Copy)]
+pub union Event {
+    pub r#type: EventType,
+    pub common: CommonEvent,
+    pub display: DisplayEvent,
+    pub window: WindowEvent,
+    pub key: KeyboardEvent,
+    pub edit: TextEditingEvent,
+    pub text: TextInputEvent,
+    pub motion: MouseMotionEvent,
+    pub button: MouseButtonEvent,
+    pub wheel: MouseWheelEvent,
+    pub jaxis: JoyAxisEvent,
+    pub jball: JoyBallEvent,
+    pub jhat: JoyHatEvent,
+    pub jbutton: JoyButtonEvent,
+    pub jdevice: JoyDeviceEvent,
+    pub caxis: ControllerAxisEvent,
+    pub cbutton: ControllerButtonEvent,
+    pub cdevice: ControllerDeviceEvent,
+    pub ctouchpad: ControllerTouchpadEvent,
+    pub csensor: ControllerSensorEvent,
+    pub adevice: AudioDeviceEvent,
+    pub sensor: SensorEvent,
+    pub quit: QuitEvent,
+    pub user: UserEvent,
+    pub syswm: SysWMEvent,
+    pub tfinger: TouchFingerEvent,
+    pub mgesture: MultiGestureEvent,
+    pub dgesture: DollarGestureEvent,
+    pub r#drop: DropEvent,
+}
+
+extern "C" {
+    pub fn SDL_Init(flags: u32) -> i32;
+    pub fn SDL_Quit();
+
+    pub fn SDL_CreateWindow(
+        title: *const c_char,
+        x: i32,
+        y: i32,
+        w: i32,
+        h: i32,
+        flags: u32,
+    ) -> *mut Window;
+    pub fn SDL_DestroyWindow(window: *mut Window);
+
+    pub fn SDL_GetWindowID(window: *mut Window) -> u32;
+    pub fn SDL_GetWindowFromID(id: u32) -> *mut Window;
+
+    pub fn SDL_PollEvent(event: *mut Event) -> i32;
+
+    pub fn SDL_Vulkan_LoadLibrary(path: *const c_char) -> i32;
+    pub fn SDL_Vulkan_GetInstanceExtensions(
+        window: *mut Window,
+        count: &mut u32,
+        names: *mut *const c_char,
+    ) -> i32;
+    pub fn SDL_Vulkan_GetVkGetInstanceProcAddr() -> *mut c_void;
+    pub fn SDL_Vulkan_CreateSurface(window: *mut Window, instance: u64, surface: *mut u64) -> i32;
+    pub fn SDL_Vulkan_GetDrawableSize(window: *mut Window, w: *mut i32, h: *mut i32);
+}
+
+pub const INIT_VIDEO: u32 = 0x0000_0020;
+pub const WINDOW_SHOWN: u32 = 0x0000_0004;
+pub const WINDOW_RESIZABLE: u32 = 0x0000_0020;
+pub const WINDOW_VULKAN: u32 = 0x1000_0000;
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn it_works() {
+        let result = 2 + 2;
+        assert_eq!(result, 4);
+    }
+}
diff --git a/vulkan-sys/.gitignore b/vulkan-sys/.gitignore
new file mode 100644 (file)
index 0000000..96ef6c0
--- /dev/null
@@ -0,0 +1,2 @@
+/target
+Cargo.lock
diff --git a/vulkan-sys/Cargo.toml b/vulkan-sys/Cargo.toml
new file mode 100644 (file)
index 0000000..882f86d
--- /dev/null
@@ -0,0 +1,11 @@
+[package]
+name = "vulkan-sys"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+
+[dev-dependencies]
+libc = "0.2"
\ No newline at end of file
diff --git a/vulkan-sys/examples/triangle.frag.glsl b/vulkan-sys/examples/triangle.frag.glsl
new file mode 100644 (file)
index 0000000..e948fd6
--- /dev/null
@@ -0,0 +1,8 @@
+#version 450
+
+layout(location = 0) in vec3 fragColor;
+layout(location = 0) out vec4 outColor;
+
+void main() {
+    outColor = vec4(fragColor, 1.0);
+}
\ No newline at end of file
diff --git a/vulkan-sys/examples/triangle.frag.spv b/vulkan-sys/examples/triangle.frag.spv
new file mode 100644 (file)
index 0000000..8a4cc40
Binary files /dev/null and b/vulkan-sys/examples/triangle.frag.spv differ
diff --git a/vulkan-sys/examples/triangle.rs b/vulkan-sys/examples/triangle.rs
new file mode 100644 (file)
index 0000000..f7115c7
--- /dev/null
@@ -0,0 +1,793 @@
+use libc::c_void;
+
+use vulkan_sys as vk;
+use vulkan_sys::cstr;
+
+#[macro_export]
+macro_rules! vk_check {
+    ($e:expr) => ({
+        let e = $e;
+        if e != vk::Result::Success {
+            panic!("assertion failed: `result == vk::Result::Success`: \n value: `{:?}`", e);
+        }
+    });
+    ($e:expr, ) => ({
+        vk_assert!($e);
+    });
+    ($e:expr, $($msg_args:tt)+) => ({
+        let e = $e;
+        if e != vk::Result::Success {
+            panic!("assertion failed: `result == vk::Result::Success`: \n value: `{:?}: {}`", e, format_args!($($msg_args)+));
+        }
+    })
+}
+
+fn vk_vec<T, F: FnMut(&mut u32, *mut T) -> vk::Result>(mut f: F) -> Vec<T> {
+    let mut count = 0;
+    vk_check!(f(&mut count, std::ptr::null_mut()));
+    let mut v = Vec::with_capacity(count as usize);
+    vk_check!(f(&mut count, v.as_mut_ptr()));
+    unsafe { v.set_len(count as usize) };
+    v
+}
+
+/// Avoid the awful..default()` spam.
+#[inline(always)]
+fn default<T: Default>() -> T {
+    T::default()
+}
+
+pub fn main() {
+    let get_proc_addr = unsafe {
+        let module = libc::dlopen(
+            cstr!("libvulkan.so.1").as_ptr(),
+            libc::RTLD_NOW | libc::RTLD_LOCAL,
+        );
+        libc::dlsym(module, cstr!("vkGetInstanceProcAddr").as_ptr())
+    };
+
+    let global_fn = unsafe { vk::GlobalFunctions::new(get_proc_addr) };
+
+    let api_version = {
+        let mut api_version = 0;
+        vk_check!(global_fn.enumerate_instance_version(&mut api_version));
+        api_version
+    };
+
+    if api_version < vk::VERSION_1_2 {
+        panic!("instance does not support vulkan 1.2")
+    }
+
+    #[cfg(debug_assertions)]
+    let enabled_layers = &[cstr!("VK_LAYER_KHRONOS_validation").as_ptr()];
+    #[cfg(not(debug_assertions))]
+    let enabled_layers = &[];
+
+    let instance = {
+        let application_info = vk::ApplicationInfo {
+            application_name: cstr!("TRIANGLE").as_ptr(),
+            application_version: 0,
+            engine_name: cstr!("TRIANGLE").as_ptr(),
+            engine_version: 0,
+            api_version: vk::VERSION_1_3,
+            ..default()
+        };
+        let create_info = vk::InstanceCreateInfo {
+            enabled_layers: enabled_layers.into(),
+            application_info: Some(&application_info),
+            ..default()
+        };
+        let mut instance = vk::Instance::null();
+        vk_check!(unsafe { global_fn.create_instance(&create_info, None, &mut instance) });
+        instance
+    };
+
+    let instance_fn = vk::InstanceFunctions::new(&global_fn, instance, vk::VERSION_1_2);
+
+    let physical_devices = vk_vec(|count, ptr| unsafe {
+        instance_fn.enumerate_physical_devices(instance, count, ptr)
+    });
+
+    let physical_device = physical_devices
+        .iter()
+        .copied()
+        .filter(|&physical_device| {
+            let (
+                physical_device_properties,
+                _physical_device_properties_11,
+                _physical_device_properties_12,
+                _physical_device_properties_13,
+            ) = {
+                let mut properties_13 = vk::PhysicalDeviceVulkan13Properties::default();
+                let mut properties_12 = vk::PhysicalDeviceVulkan12Properties::default();
+                let mut properties_11 = vk::PhysicalDeviceVulkan11Properties::default();
+                let mut properties = vk::PhysicalDeviceProperties2::default();
+                unsafe {
+                    properties._next = std::mem::transmute::<_, *mut c_void>(&mut properties_11);
+                    properties_11._next = std::mem::transmute::<_, *mut c_void>(&mut properties_12);
+                    properties_12._next = std::mem::transmute::<_, *mut c_void>(&mut properties_13);
+                    instance_fn.get_physical_device_properties2(physical_device, &mut properties);
+                }
+                (properties, properties_11, properties_12, properties_13)
+            };
+
+            let (
+                _physical_device_features,
+                _physical_device_features_11,
+                physical_device_features_12,
+                physical_device_features_13,
+            ) = {
+                let mut features_13 = vk::PhysicalDeviceVulkan13Features::default();
+                let mut features_12 = vk::PhysicalDeviceVulkan12Features::default();
+                let mut features_11 = vk::PhysicalDeviceVulkan11Features::default();
+                let mut features = vk::PhysicalDeviceFeatures2::default();
+                unsafe {
+                    features._next = std::mem::transmute::<_, *mut c_void>(&mut features_11);
+                    features_11._next = std::mem::transmute::<_, *mut c_void>(&mut features_12);
+                    features_12._next = std::mem::transmute::<_, *mut c_void>(&mut features_13);
+                    instance_fn.get_physical_device_features2(physical_device, &mut features);
+                }
+                (features.features, features_11, features_12, features_13)
+            };
+
+            physical_device_properties.properties.api_version >= vk::VERSION_1_3
+                && physical_device_features_13.dynamic_rendering == vk::Bool32::True
+                && physical_device_features_12.timeline_semaphore == vk::Bool32::True
+        })
+        .next()
+        .expect("no supported physical devices reported");
+
+    let physical_device_memory_properties = unsafe {
+        let mut memory_properties = vk::PhysicalDeviceMemoryProperties::default();
+        instance_fn.get_physical_device_memory_properties(physical_device, &mut memory_properties);
+        memory_properties
+    };
+
+    let queue_family_properties = vk_vec(|count, ptr| unsafe {
+        instance_fn.get_physical_device_queue_family_properties(physical_device, count, ptr);
+        vk::Result::Success
+    });
+
+    let (queue_family_index, _) = (0..)
+        .zip(queue_family_properties.iter())
+        .find(|&(_, queue_family_properties)| {
+            queue_family_properties
+                .queue_flags
+                .contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE)
+        })
+        .expect("failed to find universal queue for chosen device");
+
+    let device = unsafe {
+        let queue_priorities = &[1.0];
+        let device_queue_create_infos = &[vk::DeviceQueueCreateInfo {
+            queue_family_index,
+            queue_priorities: queue_priorities.into(),
+            ..default()
+        }];
+        let enabled_features_13 = vk::PhysicalDeviceVulkan13Features {
+            dynamic_rendering: vk::Bool32::True,
+            synchronization2: vk::Bool32::True,
+            ..default()
+        };
+        let enabled_features_12 = vk::PhysicalDeviceVulkan12Features {
+            _next: std::mem::transmute::<_, *mut c_void>(&enabled_features_13),
+            timeline_semaphore: vk::Bool32::True,
+            ..default()
+        };
+        let enabled_features_11 = vk::PhysicalDeviceVulkan11Features {
+            _next: std::mem::transmute::<_, *mut c_void>(&enabled_features_12),
+            ..default()
+        };
+        let enabled_features = vk::PhysicalDeviceFeatures2 {
+            _next: std::mem::transmute::<_, *mut c_void>(&enabled_features_11),
+            ..default()
+        };
+        let create_info = vk::DeviceCreateInfo {
+            _next: std::mem::transmute::<_, *mut c_void>(&enabled_features),
+            queue_create_infos: device_queue_create_infos.into(),
+            ..default()
+        };
+        let mut device = vk::Device::null();
+        vk_check!(instance_fn.create_device(physical_device, &create_info, None, &mut device));
+        device
+    };
+
+    let device_fn = vk::DeviceFunctions::new(&instance_fn, device, vk::VERSION_1_3);
+
+    let queue = unsafe {
+        let mut queue = vk::Queue::default();
+        device_fn.get_device_queue(device, queue_family_index, 0, &mut queue);
+        queue
+    };
+
+    let mut semaphore_value = 0;
+    let semaphore = unsafe {
+        let type_create_info = vk::SemaphoreTypeCreateInfo {
+            semaphore_type: vk::SemaphoreType::Timeline,
+            initial_value: semaphore_value,
+            ..default()
+        };
+        let create_info = vk::SemaphoreCreateInfo {
+            _next: std::mem::transmute::<_, _>(&type_create_info),
+            ..default()
+        };
+        let mut semaphore = vk::Semaphore::null();
+        vk_check!(device_fn.create_semaphore(device, &create_info, None, &mut semaphore));
+        semaphore
+    };
+
+    let create_shader_module = |code: &[u8]| {
+        debug_assert!((code.as_ptr() as usize) & ((1 << 3) - 1) == 0);
+        let create_info = vk::ShaderModuleCreateInfo {
+            code: code.into(),
+            ..default()
+        };
+        let mut shader_module = vk::ShaderModule::null();
+        vk_check!(unsafe {
+            device_fn.create_shader_module(device, &create_info, None, &mut shader_module)
+        });
+        shader_module
+    };
+
+    let create_graphics_pipeline = |vert, frag| {
+        let vert_shader_module = create_shader_module(vert);
+        let frag_shader_module = create_shader_module(frag);
+
+        let layout = {
+            let create_info = vk::PipelineLayoutCreateInfo::default();
+            let mut pipeline_layout = vk::PipelineLayout::null();
+            vk_check!(unsafe {
+                device_fn.create_pipeline_layout(device, &create_info, None, &mut pipeline_layout)
+            });
+            pipeline_layout
+        };
+
+        let mut pipeline = vk::Pipeline::null();
+
+        unsafe {
+            let stages = &[
+                vk::PipelineShaderStageCreateInfo {
+                    stage: vk::ShaderStageFlags::VERTEX,
+                    module: vert_shader_module,
+                    name: cstr!("main").as_ptr(),
+                    ..default()
+                },
+                vk::PipelineShaderStageCreateInfo {
+                    stage: vk::ShaderStageFlags::FRAGMENT,
+                    module: frag_shader_module,
+                    name: cstr!("main").as_ptr(),
+                    ..default()
+                },
+            ];
+            let vertex_input_state = vk::PipelineVertexInputStateCreateInfo::default();
+            let input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo {
+                topology: vk::PrimitiveTopology::TriangleList,
+                ..default()
+            };
+            let viewport_state = vk::PipelineViewportStateCreateInfo::default();
+            let rasterization_state = vk::PipelineRasterizationStateCreateInfo {
+                line_width: 1.0,
+                ..default()
+            };
+            let multisample_state = vk::PipelineMultisampleStateCreateInfo {
+                rasterization_samples: vk::SampleCountFlags::SAMPLE_COUNT_1,
+                ..default()
+            };
+            let color_blend_attachments = &[vk::PipelineColorBlendAttachmentState {
+                color_write_mask: vk::ColorComponentFlags::R
+                    | vk::ColorComponentFlags::G
+                    | vk::ColorComponentFlags::B
+                    | vk::ColorComponentFlags::A,
+                ..default()
+            }];
+            let color_blend_state = vk::PipelineColorBlendStateCreateInfo {
+                attachments: color_blend_attachments.into(),
+                ..default()
+            };
+            let dynamic_states = &[
+                vk::DynamicState::ViewportWithCount,
+                vk::DynamicState::ScissorWithCount,
+            ];
+            let dynamic_state = vk::PipelineDynamicStateCreateInfo {
+                dynamic_states: dynamic_states.into(),
+                ..default()
+            };
+            let color_attachment_formats = &[vk::Format::R8G8B8A8_SRGB];
+            let pipeline_rendering_create_info = vk::PipelineRenderingCreateInfo {
+                color_attachment_formats: color_attachment_formats.into(),
+                ..default()
+            };
+            let create_info = vk::GraphicsPipelineCreateInfo {
+                _next: std::mem::transmute::<_, _>(&pipeline_rendering_create_info),
+                stages: stages.into(),
+                vertex_input_state: Some(&vertex_input_state),
+                input_assembly_state: Some(&input_assembly_state),
+                viewport_state: Some(&viewport_state),
+                rasterization_state: Some(&rasterization_state),
+                multisample_state: Some(&multisample_state),
+                color_blend_state: Some(&color_blend_state),
+                dynamic_state: Some(&dynamic_state),
+                layout,
+                ..default()
+            };
+            vk_check!(device_fn.create_graphics_pipelines(
+                device,
+                vk::PipelineCache::null(),
+                &[create_info],
+                None,
+                std::slice::from_mut(&mut pipeline),
+            ));
+        }
+
+        unsafe { device_fn.destroy_shader_module(device, vert_shader_module, None) };
+        unsafe { device_fn.destroy_shader_module(device, frag_shader_module, None) };
+
+        (layout, pipeline)
+    };
+
+    #[repr(align(4))]
+    struct Spirv<const LEN: usize>([u8; LEN]);
+
+    let vert_shader_spv = Spirv(*include_bytes!("triangle.vert.spv"));
+    let frag_shader_spv = Spirv(*include_bytes!("triangle.frag.spv"));
+
+    let (pipeline_layout, pipeline) =
+        create_graphics_pipeline(&vert_shader_spv.0, &frag_shader_spv.0);
+
+    let command_pool = {
+        let create_info = vk::CommandPoolCreateInfo {
+            flags: vk::CommandPoolCreateFlags::TRANSIENT,
+            queue_family_index,
+            ..default()
+        };
+        let mut command_pool = vk::CommandPool::default();
+        vk_check!(unsafe {
+            device_fn.create_command_pool(device, &create_info, None, &mut command_pool)
+        });
+        command_pool
+    };
+
+    let find_memory_type = |filter, flags| {
+        (0..physical_device_memory_properties.memory_type_count)
+            .map(|memory_type_index| {
+                (
+                    memory_type_index,
+                    physical_device_memory_properties.memory_types[memory_type_index as usize],
+                )
+            })
+            .filter(|(i, memory_type)| {
+                (filter & (1 << i)) != 0 && memory_type.property_flags.contains(flags)
+            })
+            .next()
+            .expect("could not find memory type matching flags")
+            .0
+    };
+
+    let create_image = |width, height, format, tiling, usage, memory_properties| {
+        let queue_family_indices = &[queue_family_index];
+        let create_info = vk::ImageCreateInfo {
+            image_type: vk::ImageType::Type2d,
+            extent: vk::Extent3d {
+                width,
+                height,
+                depth: 1,
+            },
+            mip_levels: 1,
+            array_layers: 1,
+            format,
+            tiling,
+            usage,
+            sharing_mode: vk::SharingMode::Exclusive,
+            samples: vk::SampleCountFlags::SAMPLE_COUNT_1,
+            queue_family_indices: queue_family_indices.into(),
+            initial_layout: vk::ImageLayout::Undefined,
+            ..default()
+        };
+        let mut image = vk::Image::null();
+        vk_check!(unsafe { device_fn.create_image(device, &create_info, None, &mut image) });
+
+        let memory_requirements = {
+            let mut memory_requirements = vk::MemoryRequirements2::default();
+            device_fn.get_image_memory_requirements2(
+                device,
+                &vk::ImageMemoryRequirementsInfo2 { image, ..default() },
+                &mut memory_requirements,
+            );
+            memory_requirements
+        };
+
+        let memory_type_index = find_memory_type(
+            memory_requirements.memory_requirements.memory_type_bits,
+            memory_properties,
+        );
+
+        let mut memory = vk::DeviceMemory::null();
+        vk_check!(unsafe {
+            device_fn.allocate_memory(
+                device,
+                &vk::MemoryAllocateInfo {
+                    allocation_size: memory_requirements.memory_requirements.size,
+                    memory_type_index,
+                    ..default()
+                },
+                None,
+                &mut memory,
+            )
+        });
+        unsafe {
+            device_fn.bind_image_memory2(
+                device,
+                &[vk::BindImageMemoryInfo {
+                    image,
+                    memory,
+                    offset: 0,
+                    ..default()
+                }],
+            )
+        };
+
+        (image, memory)
+    };
+
+    let create_image_and_view = |width, height, format, tiling, usage, memory_properties| {
+        let (image, memory) = create_image(width, height, format, tiling, usage, memory_properties);
+        let mut view = vk::ImageView::null();
+        let create_info = vk::ImageViewCreateInfo {
+            image,
+            view_type: vk::ImageViewType::Type2d,
+            format,
+            components: vk::ComponentMapping {
+                r: vk::ComponentSwizzle::R,
+                g: vk::ComponentSwizzle::G,
+                b: vk::ComponentSwizzle::B,
+                a: vk::ComponentSwizzle::A,
+            },
+            subresource_range: vk::ImageSubresourceRange {
+                aspect_mask: vk::ImageAspectFlags::COLOR,
+                base_mip_level: 0,
+                level_count: 1,
+                base_array_layer: 0,
+                layer_count: 1,
+            },
+            ..default()
+        };
+        vk_check!(unsafe { device_fn.create_image_view(device, &create_info, None, &mut view) });
+        (image, view, memory)
+    };
+
+    let width = 120;
+    let height = 40;
+    let viewport = vk::Viewport {
+        x: 0.0,
+        y: 0.0,
+        width: width as f32,
+        height: height as f32,
+        min_depth: 0.0,
+        max_depth: 1.0,
+    };
+
+    let scissor = vk::Rect2d {
+        offset: vk::Offset2d { x: 0, y: 0 },
+        extent: vk::Extent2d { width, height },
+    };
+
+    let (image, image_view, image_memory) = create_image_and_view(
+        width,
+        height,
+        vk::Format::R8G8B8A8_SRGB,
+        vk::ImageTiling::OPTIMAL,
+        vk::ImageUsageFlags::COLOR_ATTACHMENT | vk::ImageUsageFlags::TRANSFER_SRC,
+        vk::MemoryPropertyFlags::DEVICE_LOCAL,
+    );
+
+    let (host_image, host_image_memory) = create_image(
+        width,
+        height,
+        vk::Format::R8G8B8A8_SRGB,
+        vk::ImageTiling::LINEAR,
+        vk::ImageUsageFlags::TRANSFER_DST,
+        vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT,
+    );
+
+    let host_subresource_layout = unsafe {
+        let mut layout = vk::SubresourceLayout::default();
+        device_fn.get_image_subresource_layout(
+            device,
+            host_image,
+            &vk::ImageSubresource {
+                aspect_mask: vk::ImageAspectFlags::COLOR,
+                mip_level: 0,
+                array_layer: 0,
+            },
+            &mut layout,
+        );
+        layout
+    };
+
+    let mut data = std::ptr::null_mut();
+    vk_check!(unsafe {
+        device_fn.map_memory(
+            device,
+            host_image_memory,
+            0,
+            vk::WHOLE_SIZE,
+            vk::MemoryMapFlags::default(),
+            &mut data,
+        )
+    });
+
+    // Do the rendering!
+    let command_buffer = {
+        let mut command_buffers = [vk::CommandBuffer::default()];
+        let allocate_info = vk::CommandBufferAllocateInfo {
+            command_pool,
+            command_buffer_count: command_buffers.len() as u32,
+            ..default()
+        };
+        vk_check!(unsafe {
+            device_fn.allocate_command_buffers(device, &allocate_info, command_buffers.as_mut_ptr())
+        });
+        command_buffers[0]
+    };
+
+    {
+        let begin_info = vk::CommandBufferBeginInfo {
+            flags: vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT,
+            ..default()
+        };
+        vk_check!(unsafe { device_fn.begin_command_buffer(command_buffer, &begin_info) });
+    }
+
+    unsafe {
+        device_fn.cmd_bind_pipeline(command_buffer, vk::PipelineBindPoint::Graphics, pipeline)
+    };
+
+    unsafe { device_fn.cmd_set_viewport_with_count(command_buffer, &[viewport]) };
+    unsafe { device_fn.cmd_set_scissor_with_count(command_buffer, &[scissor]) };
+
+    let image_memory_barriers = &[vk::ImageMemoryBarrier2 {
+        src_stage_mask: vk::PipelineStageFlags2::TOP_OF_PIPE,
+        src_access_mask: vk::AccessFlags2::NONE,
+        dst_stage_mask: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
+        dst_access_mask: vk::AccessFlags2::COLOR_ATTACHMENT_WRITE,
+        old_layout: vk::ImageLayout::Undefined,
+        new_layout: vk::ImageLayout::ColorAttachmentOptimal,
+        image,
+        subresource_range: vk::ImageSubresourceRange {
+            aspect_mask: vk::ImageAspectFlags::COLOR,
+            base_mip_level: 0,
+            level_count: 1,
+            base_array_layer: 0,
+            layer_count: 1,
+        },
+        ..default()
+    }];
+    unsafe {
+        device_fn.cmd_pipeline_barrier2(
+            command_buffer,
+            &vk::DependencyInfo {
+                image_memory_barriers: image_memory_barriers.into(),
+                ..default()
+            },
+        )
+    };
+
+    unsafe {
+        device_fn.cmd_begin_rendering(
+            command_buffer,
+            &vk::RenderingInfo {
+                render_area: scissor,
+                layer_count: 1,
+                color_attachments: [vk::RenderingAttachmentInfo {
+                    image_view,
+                    image_layout: vk::ImageLayout::AttachmentOptimal,
+                    resolve_image_layout: vk::ImageLayout::AttachmentOptimal,
+                    load_op: vk::AttachmentLoadOp::Clear,
+                    store_op: vk::AttachmentStoreOp::Store,
+                    clear_value: vk::ClearValue {
+                        color: vk::ClearColorValue {
+                            f32: [0.392157, 0.584314, 0.929412, 1.0],
+                        },
+                    },
+                    ..default()
+                }]
+                .as_ref()
+                .into(),
+                ..default()
+            },
+        );
+        device_fn.cmd_draw(command_buffer, 3, 1, 0, 0);
+        device_fn.cmd_end_rendering(command_buffer);
+    };
+
+    let image_memory_barriers = &[
+        // transition color attachment to transfer src
+        vk::ImageMemoryBarrier2 {
+            src_stage_mask: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
+            src_access_mask: vk::AccessFlags2::COLOR_ATTACHMENT_WRITE,
+            dst_stage_mask: vk::PipelineStageFlags2::TRANSFER,
+            dst_access_mask: vk::AccessFlags2::TRANSFER_READ,
+            old_layout: vk::ImageLayout::ColorAttachmentOptimal,
+            new_layout: vk::ImageLayout::TransferSrcOptimal,
+            image,
+            subresource_range: vk::ImageSubresourceRange {
+                aspect_mask: vk::ImageAspectFlags::COLOR,
+                base_mip_level: 0,
+                level_count: 1,
+                base_array_layer: 0,
+                layer_count: 1,
+            },
+            ..default()
+        },
+        // transition host image to transfer dst
+        vk::ImageMemoryBarrier2 {
+            src_stage_mask: vk::PipelineStageFlags2::TRANSFER,
+            src_access_mask: vk::AccessFlags2::NONE,
+            dst_stage_mask: vk::PipelineStageFlags2::TRANSFER,
+            dst_access_mask: vk::AccessFlags2::TRANSFER_WRITE,
+            old_layout: vk::ImageLayout::Undefined,
+            new_layout: vk::ImageLayout::TransferDstOptimal,
+            image: host_image,
+            subresource_range: vk::ImageSubresourceRange {
+                aspect_mask: vk::ImageAspectFlags::COLOR,
+                base_mip_level: 0,
+                level_count: 1,
+                base_array_layer: 0,
+                layer_count: 1,
+            },
+            ..default()
+        },
+    ];
+    unsafe {
+        device_fn.cmd_pipeline_barrier2(
+            command_buffer,
+            &vk::DependencyInfo {
+                image_memory_barriers: image_memory_barriers.into(),
+                ..default()
+            },
+        )
+    };
+
+    let regions = &[vk::ImageCopy {
+        src_subresource: vk::ImageSubresourceLayers {
+            aspect_mask: vk::ImageAspectFlags::COLOR,
+            base_array_layer: 0,
+            layer_count: 1,
+            mip_level: 0,
+        },
+        src_offset: vk::Offset3d { x: 0, y: 0, z: 0 },
+        dst_subresource: vk::ImageSubresourceLayers {
+            aspect_mask: vk::ImageAspectFlags::COLOR,
+            base_array_layer: 0,
+            layer_count: 1,
+            mip_level: 0,
+        },
+        dst_offset: vk::Offset3d { x: 0, y: 0, z: 0 },
+        extent: vk::Extent3d {
+            width,
+            height,
+            depth: 1,
+        },
+    }];
+    unsafe {
+        device_fn.cmd_copy_image(
+            command_buffer,
+            image,
+            vk::ImageLayout::TransferSrcOptimal,
+            host_image,
+            vk::ImageLayout::TransferDstOptimal,
+            regions,
+        )
+    };
+
+    unsafe {
+        device_fn.cmd_pipeline_barrier2(
+            command_buffer,
+            &vk::DependencyInfo {
+                image_memory_barriers: [
+                    // transition host image to general so we can read it
+                    vk::ImageMemoryBarrier2 {
+                        src_stage_mask: vk::PipelineStageFlags2::TRANSFER,
+                        src_access_mask: vk::AccessFlags2::TRANSFER_WRITE,
+                        dst_stage_mask: vk::PipelineStageFlags2::TRANSFER,
+                        dst_access_mask: vk::AccessFlags2::MEMORY_READ,
+                        old_layout: vk::ImageLayout::TransferDstOptimal,
+                        new_layout: vk::ImageLayout::General,
+                        image: host_image,
+                        subresource_range: vk::ImageSubresourceRange {
+                            aspect_mask: vk::ImageAspectFlags::COLOR,
+                            base_mip_level: 0,
+                            level_count: 1,
+                            base_array_layer: 0,
+                            layer_count: 1,
+                        },
+                        ..default()
+                    },
+                ]
+                .as_ref()
+                .into(),
+                ..default()
+            },
+        )
+    };
+
+    vk_check!(unsafe { device_fn.end_command_buffer(command_buffer) });
+
+    // SUBMIT!
+    semaphore_value += 1;
+
+    let command_buffer_infos = &[vk::CommandBufferSubmitInfo {
+        command_buffer,
+        ..default()
+    }];
+    let signal_semaphore_infos = &[vk::SemaphoreSubmitInfo {
+        semaphore,
+        semaphore_value,
+        stage_mask: vk::PipelineStageFlags2::BOTTOM_OF_PIPE,
+        ..default()
+    }];
+    let submit = vk::SubmitInfo2 {
+        command_buffer_infos: command_buffer_infos.into(),
+        signal_semaphore_infos: signal_semaphore_infos.into(),
+        ..default()
+    };
+    vk_check!(unsafe { device_fn.queue_submit2(queue, &[submit], vk::Fence::null()) });
+
+    vk_check!(unsafe {
+        device_fn.wait_semaphores(
+            device,
+            &vk::SemaphoreWaitInfo {
+                semaphores: (&[semaphore], &[semaphore_value]).into(),
+                ..default()
+            },
+            !0,
+        )
+    });
+
+    let data = data as *const u8;
+    let image_bytes = unsafe {
+        std::slice::from_raw_parts(
+            data.offset(host_subresource_layout.offset as isize),
+            host_subresource_layout.size as usize,
+        )
+    };
+
+    #[inline]
+    unsafe fn as_chunks_unchecked<T, const N: usize>(slice: &[T]) -> &[[T; N]] {
+        debug_assert_ne!(N, 0);
+        debug_assert_eq!(slice.len() % N, 0);
+        let new_len = slice.len() / N;
+        // SAFETY: We cast a slice of `new_len * N` elements into
+        // a slice of `new_len` many `N` elements chunks.
+        std::slice::from_raw_parts(slice.as_ptr().cast(), new_len)
+    }
+
+    print!("\x1b[2J");
+    for row in image_bytes.chunks_exact(host_subresource_layout.row_pitch as usize) {
+        let pixels = unsafe { as_chunks_unchecked(row) };
+        for [r, g, b, _a] in &pixels[0..width as usize] {
+            print!("\x1b[38;2;{r};{g};{b}mâ–ˆ");
+        }
+        println!();
+    }
+    print!("\x1b[0m");
+
+    unsafe { device_fn.free_command_buffers(device, command_pool, &[command_buffer]) };
+
+    unsafe { device_fn.destroy_image_view(device, image_view, None) };
+    unsafe { device_fn.destroy_image(device, image, None) };
+    unsafe { device_fn.free_memory(device, image_memory, None) };
+
+    unsafe { device_fn.destroy_image(device, host_image, None) };
+    unsafe { device_fn.free_memory(device, host_image_memory, None) };
+
+    unsafe { device_fn.destroy_pipeline(device, pipeline, None) };
+    unsafe { device_fn.destroy_pipeline_layout(device, pipeline_layout, None) };
+
+    unsafe { device_fn.destroy_command_pool(device, command_pool, None) };
+
+    unsafe { device_fn.destroy_semaphore(device, semaphore, None) };
+
+    unsafe { device_fn.destroy_device(device, None) };
+    unsafe { instance_fn.destroy_instance(instance, None) };
+}
diff --git a/vulkan-sys/examples/triangle.vert.glsl b/vulkan-sys/examples/triangle.vert.glsl
new file mode 100644 (file)
index 0000000..66d6766
--- /dev/null
@@ -0,0 +1,20 @@
+#version 450
+
+layout(location = 0) out vec3 fragColor;
+
+vec2 positions[3] = vec2[](
+    vec2(0.0, -0.5),
+    vec2(0.5, 0.5),
+    vec2(-0.5, 0.5)
+);
+
+vec3 colors[3] = vec3[](
+    vec3(1.0, 0.0, 0.0),
+    vec3(0.0, 1.0, 0.0),
+    vec3(0.0, 0.0, 1.0)
+);
+
+void main() {
+    gl_Position = vec4(positions[gl_VertexIndex], 0.0, 1.0);
+    fragColor = colors[gl_VertexIndex];
+}
\ No newline at end of file
diff --git a/vulkan-sys/examples/triangle.vert.spv b/vulkan-sys/examples/triangle.vert.spv
new file mode 100644 (file)
index 0000000..ef8a465
Binary files /dev/null and b/vulkan-sys/examples/triangle.vert.spv differ
diff --git a/vulkan-sys/src/enums.rs b/vulkan-sys/src/enums.rs
new file mode 100644 (file)
index 0000000..e4a1e61
--- /dev/null
@@ -0,0 +1,1413 @@
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[non_exhaustive]
+#[must_use]
+pub enum Result {
+    ///  Command completed successfully
+    Success = 0,
+    ///  A fence or query has not yet completed
+    NotReady = 1,
+    ///  A wait operation has not completed in the specified time
+    Timeout = 2,
+    ///  An event is signaled
+    EventSet = 3,
+    ///  An event is unsignaled
+    EventReset = 4,
+    ///  A return array was too small for the result
+    Incomplete = 5,
+    ///  A host memory allocation has failed
+    ErrorOutOfHostMemory = -1,
+    ///  A device memory allocation has failed
+    ErrorOutOfDeviceMemory = -2,
+    ///  Initialization of a object has failed
+    ErrorInitializationFailed = -3,
+    ///  The logical device has been lost. See <<devsandqueues-lost-device>>
+    ErrorDeviceLost = -4,
+    ///  Mapping of a memory object has failed
+    ErrorMemoryMapFailed = -5,
+    ///  Layer specified does not exist
+    ErrorLayerNotPresent = -6,
+    ///  Extension specified does not exist
+    ErrorExtensionNotPresent = -7,
+    ///  Requested feature is not available on this device
+    ErrorFeatureNotPresent = -8,
+    ///  Unable to find a Vulkan driver
+    ErrorIncompatibleDriver = -9,
+    ///  Too many objects of the type have already been created
+    ErrorTooManyObjects = -10,
+    ///  Requested format is not supported on this device
+    ErrorFormatNotSupported = -11,
+    ///  A requested pool allocation has failed due to fragmentation of the pool's memory
+    ErrorFragmentedPool = -12,
+    ///  An unknown error has occurred, due to an implementation or application bug
+    ErrorUnknown = -13,
+    ErrorOutOfPoolMemory = -1000069000,
+    ErrorInvalidExternalHandle = -1000072003,
+    ErrorFragmentation = -1000161000,
+    ErrorInvalidOpaqueCaptureAddress = -1000257000,
+    ErrorSurfaceLostKHR = -1000000000,
+    ErrorNativeWindowInUseKHR = -1000000001,
+    SuboptimalKHR = 1000001003,
+    ErrorOutOfDateKHR = -1000001004,
+    ErrorIncompatibleDisplayKHR = -1000003001,
+    ErrorValidationFailedEXT = -1000011001,
+    ErrorInvalidShaderNV = -1000012000,
+    ErrorInvalidDrmFormatModifierPlaneLayoutEXT = -1000158000,
+    ErrorNotPermittedEXT = -1000174001,
+    ThreadIdleKHR = 1000268000,
+    ThreadDoneKHR = 1000268001,
+    OperationDeferredKHR = 1000268002,
+    OperationNotDeferredKHR = 1000268003,
+    PipelineCompileRequiredEXT = 1000297000,
+}
+
+#[repr(i32)]
+#[derive(Clone, Copy)]
+pub enum PhysicalDeviceType {
+    Other = 0,
+    IntegratedGpu = 1,
+    DiscreteGpu = 2,
+    VirtualGpu = 3,
+    Cpu = 4,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+#[non_exhaustive]
+pub enum InternalAllocationType {
+    Executable = 0,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+#[non_exhaustive]
+pub enum SystemAllocationScope {
+    Command = 0,
+    Object = 1,
+    Cache = 2,
+    Device = 3,
+    Instance = 4,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum ComponentSwizzle {
+    Identity = 0,
+    Zero = 1,
+    One = 2,
+    R = 3,
+    G = 4,
+    B = 5,
+    A = 6,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum ImageType {
+    Type1d = 0,
+    Type2d = 1,
+    Type3d = 2,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub struct ImageTiling(i32);
+
+impl ImageTiling {
+    pub const OPTIMAL: Self = Self(0);
+    pub const LINEAR: Self = Self(1);
+    pub const DRM_FORMAT_MODIFIER_EXT: Self = Self(1000158000);
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum ImageViewType {
+    Type1d = 0,
+    Type2d = 1,
+    Type3d = 2,
+    TypeCube = 3,
+    Type1dArray = 4,
+    Type2dArray = 5,
+    TypeCubeArray = 6,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum PresentModeKHR {
+    Immediate = 0,
+    Mailbox = 1,
+    Fifo = 2,
+    FifoRelaxed = 3,
+    SharedDemandRefresh = 1000111000,
+    SharedContinuousRefresh = 1000111001,
+}
+
+impl Default for PresentModeKHR {
+    fn default() -> Self {
+        Self::Fifo
+    }
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum CommandBufferLevel {
+    Primary = 0,
+    Secondary = 1,
+}
+
+#[repr(i32)]
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum ImageLayout {
+    /// Implicit layout an image is when its contents are undefined due to various reasons (e.g. right after creation)
+    Undefined = 0,
+    /// General layout when image can be used for any kind of access
+    General = 1,
+    /// Optimal layout when image is only used for color attachment read/write
+    ColorAttachmentOptimal = 2,
+    /// Optimal layout when image is only used for depth/stencil attachment read/write
+    DepthStencilAttachmentOptimal = 3,
+    /// Optimal layout when image is used for read only depth/stencil attachment and shader access
+    DepthStencilReadOnlyOptimal = 4,
+    /// Optimal layout when image is used for read only shader access
+    ShaderReadOnlyOptimal = 5,
+    /// Optimal layout when image is used only as source of transfer operations
+    TransferSrcOptimal = 6,
+    /// Optimal layout when image is used only as destination of transfer operations
+    TransferDstOptimal = 7,
+    /// Initial layout used when the data is populated by the CPU
+    Preinitialized = 8,
+    DepthReadOnlyStencilAttachmentOptimal = 1000117000,
+    DepthAttachmentStencilReadOnlyOptimal = 1000117001,
+    DepthAttachmentOptimal = 1000241000,
+    DepthReadOnlyOptimal = 1000241001,
+    StencilAttachmentOptimal = 1000241002,
+    StencilReadOnlyOptimal = 1000241003,
+    PresentSrcKhr = 1000001002,
+    VideoDecodeDstKhr = 1000024000,
+    VideoDecodeSrcKhr = 1000024001,
+    VideoDecodeDpbKhr = 1000024002,
+    SharedPresentKhr = 1000111000,
+    FragmentDensityMapOptimalExt = 1000218000,
+    FragmentShadingRateAttachmentOptimalKhr = 1000164003,
+    VideoEncodeDstKhr = 1000299000,
+    VideoEncodeSrcKhr = 1000299001,
+    VideoEncodeDpbKhr = 1000299002,
+    ReadOnlyOptimal = 1000314000,
+    AttachmentOptimal = 1000314001,
+}
+
+#[repr(i32)]
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum AttachmentLoadOp {
+    Load = 0,
+    Clear = 1,
+    DontCare = 2,
+    NoneExt = 1000400000,
+}
+
+#[repr(i32)]
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum AttachmentStoreOp {
+    Store = 0,
+    DontCare = 1,
+    NoneExt = 1000301000,
+}
+
+#[repr(i32)]
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum PipelineBindPoint {
+    Graphics = 0,
+    Compute = 1,
+    RayTracingKhr = 1000165000,
+    SubpassShadingHuawei = 1000369003,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum ColorSpaceKHR {
+    SrgbNonlinearKhr = 0,
+    DisplayP3NonlinearExt = 1000104001,
+    ExtendedSrgbLinearExt = 1000104002,
+    DisplayP3LinearExt = 1000104003,
+    DciP3NonlinearExt = 1000104004,
+    Bt709LinearExt = 1000104005,
+    Bt709NonlinearExt = 1000104006,
+    Bt2020LinearExt = 1000104007,
+    Hdr10St2084Ext = 1000104008,
+    DolbyvisionExt = 1000104009,
+    Hdr10HlgExt = 1000104010,
+    AdobergbLinearExt = 1000104011,
+    AdobergbNonlinearExt = 1000104012,
+    PassThroughExt = 1000104013,
+    ExtendedSrgbNonlinearExt = 1000104014,
+    DisplayNativeAmd = 1000213000,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum PrimitiveTopology {
+    PointList = 0,
+    LineList = 1,
+    LineStrip = 2,
+    TriangleList = 3,
+    TriangleStrip = 4,
+    TriangleFan = 5,
+    LineListWithAdjacency = 6,
+    LineStripWithAdjacency = 7,
+    TriangleListWithAdjacency = 8,
+    TriangleStripWithAdjacency = 9,
+    PatchList = 10,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum SharingMode {
+    Exclusive = 0,
+    Concurrent = 1,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum DescriptorType {
+    Sampler = 0,
+    CombinedImageSampler = 1,
+    SampledImage = 2,
+    StorageImage = 3,
+    UniformTexelBuffer = 4,
+    StorageTexelBuffer = 5,
+    UniformBuffer = 6,
+    StorageBuffer = 7,
+    UniformBufferDynamic = 8,
+    StorageBufferDynamic = 9,
+    InputAttachment = 10,
+    InlineUniformBlockExt = 1000138000,
+    AccelerationStructureKhr = 1000150000,
+    AccelerationStructureNv = 1000165000,
+    MutableValve = 1000351000,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum VertexInputRate {
+    Vertex = 0,
+    Instance = 1,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum CompareOp {
+    Never = 0,
+    Less = 1,
+    Equal = 2,
+    LessOrEqual = 3,
+    Greater = 4,
+    NotEqual = 5,
+    GreaterOrEqual = 6,
+    Always = 7,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum PolygonMode {
+    Fill = 0,
+    Line = 1,
+    Point = 2,
+    FillRectangleNv = 1000153000,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum FrontFace {
+    CounterClockwise = 0,
+    Clockwise = 1,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum BlendFactor {
+    Zero = 0,
+    One = 1,
+    SrcColor = 2,
+    OneMinusSrcColor = 3,
+    DstColor = 4,
+    OneMinusDstColor = 5,
+    SrcAlpha = 6,
+    OneMinusSrcAlpha = 7,
+    DstAlpha = 8,
+    OneMinusDstAlpha = 9,
+    ConstantColor = 10,
+    OneMinusConstantColor = 11,
+    ConstantAlpha = 12,
+    OneMinusConstantAlpha = 13,
+    SrcAlphaSaturate = 14,
+    Src1Color = 15,
+    OneMinusSrc1Color = 16,
+    Src1Alpha = 17,
+    OneMinusSrc1Alpha = 18,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum BlendOp {
+    Add = 0,
+    Subtract = 1,
+    ReverseSubtract = 2,
+    Min = 3,
+    Max = 4,
+    ZeroExt = 1000148000,
+    SrcExt = 1000148001,
+    DstExt = 1000148002,
+    SrcOverExt = 1000148003,
+    DstOverExt = 1000148004,
+    SrcInExt = 1000148005,
+    DstInExt = 1000148006,
+    SrcOutExt = 1000148007,
+    DstOutExt = 1000148008,
+    SrcAtopExt = 1000148009,
+    DstAtopExt = 1000148010,
+    XorExt = 1000148011,
+    MultiplyExt = 1000148012,
+    ScreenExt = 1000148013,
+    OverlayExt = 1000148014,
+    DarkenExt = 1000148015,
+    LightenExt = 1000148016,
+    ColordodgeExt = 1000148017,
+    ColorburnExt = 1000148018,
+    HardlightExt = 1000148019,
+    SoftlightExt = 1000148020,
+    DifferenceExt = 1000148021,
+    ExclusionExt = 1000148022,
+    InvertExt = 1000148023,
+    InvertRgbExt = 1000148024,
+    LineardodgeExt = 1000148025,
+    LinearburnExt = 1000148026,
+    VividlightExt = 1000148027,
+    LinearlightExt = 1000148028,
+    PinlightExt = 1000148029,
+    HardmixExt = 1000148030,
+    HslHueExt = 1000148031,
+    HslSaturationExt = 1000148032,
+    HslColorExt = 1000148033,
+    HslLuminosityExt = 1000148034,
+    PlusExt = 1000148035,
+    PlusClampedExt = 1000148036,
+    PlusClampedAlphaExt = 1000148037,
+    PlusDarkerExt = 1000148038,
+    MinusExt = 1000148039,
+    MinusClampedExt = 1000148040,
+    ContrastExt = 1000148041,
+    InvertOvgExt = 1000148042,
+    RedExt = 1000148043,
+    GreenExt = 1000148044,
+    BlueExt = 1000148045,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum StencilOp {
+    Keep = 0,
+    Zero = 1,
+    Replace = 2,
+    IncrementAndClamp = 3,
+    DecrementAndClamp = 4,
+    Invert = 5,
+    IncrementAndWrap = 6,
+    DecrementAndWrap = 7,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum LogicOp {
+    Clear = 0,
+    And = 1,
+    AndReverse = 2,
+    Copy = 3,
+    AndInverted = 4,
+    NoOp = 5,
+    Xor = 6,
+    Or = 7,
+    Nor = 8,
+    Equivalent = 9,
+    Invert = 10,
+    OrReverse = 11,
+    CopyInverted = 12,
+    OrInverted = 13,
+    Nand = 14,
+    Set = 15,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum DynamicState {
+    Viewport = 0,
+    Scissor = 1,
+    LineWidth = 2,
+    DepthBias = 3,
+    BlendConstants = 4,
+    DepthBounds = 5,
+    StencilCompareMask = 6,
+    StencilWriteMask = 7,
+    StencilReference = 8,
+    ViewportWScalingNv = 1000087000,
+    DiscardRectangleExt = 1000099000,
+    SampleLocationsExt = 1000143000,
+    RayTracingPipelineStackSizeKhr = 1000347000,
+    ViewportShadingRatePaletteNv = 1000164004,
+    ViewportCoarseSampleOrderNv = 1000164006,
+    ExclusiveScissorNv = 1000205001,
+    FragmentShadingRateKhr = 1000226000,
+    LineStippleExt = 1000259000,
+    CullMode = 1000267000,
+    FrontFaceExt = 1000267001,
+    PrimitiveTopology = 1000267002,
+    ViewportWithCount = 1000267003,
+    ScissorWithCount = 1000267004,
+    VertexInputBindingStride = 1000267005,
+    DepthTestEnable = 1000267006,
+    DepthWriteEnable = 1000267007,
+    DepthCompareOp = 1000267008,
+    DepthBoundsTestEnable = 1000267009,
+    StencilTestEnable = 1000267010,
+    StencilOp = 1000267011,
+    VertexInputExt = 1000352000,
+    PatchControlPointsExt = 1000377000,
+    RasterizerDiscardEnableExt = 1000377001,
+    DepthBiasEnableExt = 1000377002,
+    LogicOpExt = 1000377003,
+    PrimitiveRestartEnableExt = 1000377004,
+    ColorWriteEnableExt = 1000381000,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum PipelineCacheHeaderVersion {
+    One = 1,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum Filter {
+    Nearest = 0,
+    Linear = 1,
+    CubicExt = 1000015000,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum SamplerMipmapMode {
+    Nearest = 0, // Choose nearest mip level
+    Linear = 1,  // Linear filter between mip levels
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum SamplerAddressMode {
+    Repeat = 0,
+    MirroredRepeat = 1,
+    ClampToEdge = 2,
+    ClampToBorder = 3,
+    MirrorClampToEdge = 4, // No need to add an extnumber attribute, since this uses a core enum value
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum BorderColor {
+    FloatTransparentBlack = 0,
+    IntTransparentBlack = 1,
+    FloatOpaqueBlack = 2,
+    IntOpaqueBlack = 3,
+    FloatOpaqueWhite = 4,
+    IntOpaqueWhite = 5,
+    FloatCustomExt = 1000287003,
+    IntCustomExt = 1000287004,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum SubpassContents {
+    Inline = 0,
+    SecondaryCommandBuffers = 1,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum IndexType {
+    Uint16 = 0,
+    Uint32 = 1,
+    NoneKhr = 1000165000,
+    Uint8Ext = 1000265000,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum SemaphoreType {
+    Binary = 0,
+    Timeline = 1,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum PointClippingBehavior {
+    AllClipPlanes = 0,
+    UserClipPlanesOnly = 1,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum DriverId {
+    AmdProprietary = 1,
+    AmdOpenSource = 2,
+    MesaRadv = 3,
+    NvidiaProprietary = 4,
+    IntelProprietaryWindows = 5,
+    IntelOpenSourceMesa = 6,
+    ImaginationProprietary = 7,
+    QualcommProprietary = 8,
+    ArmProprietary = 9,
+    GoogleSwiftshader = 10,
+    GgpProprietary = 11,
+    BroadcomProprietary = 12,
+    MesaLlvmpipe = 13,
+    MoltenVk = 14,
+    CoreaviProprietary = 15,
+    JuiceProprietary = 16,
+    VerisiliconProprietary = 17,
+    MesaTurnip = 18,
+    MesaV3dv = 19,
+    MesaPanvk = 20,
+    SamsungProprietary = 21,
+    MesaVenus = 22,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum ShaderFloatControlsIndependence {
+    Only32Bit = 0,
+    All = 1,
+    None = 2,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[allow(non_camel_case_types)]
+pub enum Format {
+    Undefined = 0,
+    R4G4_UNORM_PACK8 = 1,
+    R4G4B4A4_UNORM_PACK16 = 2,
+    B4G4R4A4_UNORM_PACK16 = 3,
+    R5G6B5_UNORM_PACK16 = 4,
+    B5G6R5_UNORM_PACK16 = 5,
+    R5G5B5A1_UNORM_PACK16 = 6,
+    B5G5R5A1_UNORM_PACK16 = 7,
+    A1R5G5B5_UNORM_PACK16 = 8,
+    R8_UNORM = 9,
+    R8_SNORM = 10,
+    R8_USCALED = 11,
+    R8_SSCALED = 12,
+    R8_UINT = 13,
+    R8_SINT = 14,
+    R8_SRGB = 15,
+    R8G8_UNORM = 16,
+    R8G8_SNORM = 17,
+    R8G8_USCALED = 18,
+    R8G8_SSCALED = 19,
+    R8G8_UINT = 20,
+    R8G8_SINT = 21,
+    R8G8_SRGB = 22,
+    R8G8B8_UNORM = 23,
+    R8G8B8_SNORM = 24,
+    R8G8B8_USCALED = 25,
+    R8G8B8_SSCALED = 26,
+    R8G8B8_UINT = 27,
+    R8G8B8_SINT = 28,
+    R8G8B8_SRGB = 29,
+    B8G8R8_UNORM = 30,
+    B8G8R8_SNORM = 31,
+    B8G8R8_USCALED = 32,
+    B8G8R8_SSCALED = 33,
+    B8G8R8_UINT = 34,
+    B8G8R8_SINT = 35,
+    B8G8R8_SRGB = 36,
+    R8G8B8A8_UNORM = 37,
+    R8G8B8A8_SNORM = 38,
+    R8G8B8A8_USCALED = 39,
+    R8G8B8A8_SSCALED = 40,
+    R8G8B8A8_UINT = 41,
+    R8G8B8A8_SINT = 42,
+    R8G8B8A8_SRGB = 43,
+    B8G8R8A8_UNORM = 44,
+    B8G8R8A8_SNORM = 45,
+    B8G8R8A8_USCALED = 46,
+    B8G8R8A8_SSCALED = 47,
+    B8G8R8A8_UINT = 48,
+    B8G8R8A8_SINT = 49,
+    B8G8R8A8_SRGB = 50,
+    A8B8G8R8_UNORM_PACK32 = 51,
+    A8B8G8R8_SNORM_PACK32 = 52,
+    A8B8G8R8_USCALED_PACK32 = 53,
+    A8B8G8R8_SSCALED_PACK32 = 54,
+    A8B8G8R8_UINT_PACK32 = 55,
+    A8B8G8R8_SINT_PACK32 = 56,
+    A8B8G8R8_SRGB_PACK32 = 57,
+    A2R10G10B10_UNORM_PACK32 = 58,
+    A2R10G10B10_SNORM_PACK32 = 59,
+    A2R10G10B10_USCALED_PACK32 = 60,
+    A2R10G10B10_SSCALED_PACK32 = 61,
+    A2R10G10B10_UINT_PACK32 = 62,
+    A2R10G10B10_SINT_PACK32 = 63,
+    A2B10G10R10_UNORM_PACK32 = 64,
+    A2B10G10R10_SNORM_PACK32 = 65,
+    A2B10G10R10_USCALED_PACK32 = 66,
+    A2B10G10R10_SSCALED_PACK32 = 67,
+    A2B10G10R10_UINT_PACK32 = 68,
+    A2B10G10R10_SINT_PACK32 = 69,
+    R16_UNORM = 70,
+    R16_SNORM = 71,
+    R16_USCALED = 72,
+    R16_SSCALED = 73,
+    R16_UINT = 74,
+    R16_SINT = 75,
+    R16_SFLOAT = 76,
+    R16G16_UNORM = 77,
+    R16G16_SNORM = 78,
+    R16G16_USCALED = 79,
+    R16G16_SSCALED = 80,
+    R16G16_UINT = 81,
+    R16G16_SINT = 82,
+    R16G16_SFLOAT = 83,
+    R16G16B16_UNORM = 84,
+    R16G16B16_SNORM = 85,
+    R16G16B16_USCALED = 86,
+    R16G16B16_SSCALED = 87,
+    R16G16B16_UINT = 88,
+    R16G16B16_SINT = 89,
+    R16G16B16_SFLOAT = 90,
+    R16G16B16A16_UNORM = 91,
+    R16G16B16A16_SNORM = 92,
+    R16G16B16A16_USCALED = 93,
+    R16G16B16A16_SSCALED = 94,
+    R16G16B16A16_UINT = 95,
+    R16G16B16A16_SINT = 96,
+    R16G16B16A16_SFLOAT = 97,
+    R32_UINT = 98,
+    R32_SINT = 99,
+    R32_SFLOAT = 100,
+    R32G32_UINT = 101,
+    R32G32_SINT = 102,
+    R32G32_SFLOAT = 103,
+    R32G32B32_UINT = 104,
+    R32G32B32_SINT = 105,
+    R32G32B32_SFLOAT = 106,
+    R32G32B32A32_UINT = 107,
+    R32G32B32A32_SINT = 108,
+    R32G32B32A32_SFLOAT = 109,
+    R64_UINT = 110,
+    R64_SINT = 111,
+    R64_SFLOAT = 112,
+    R64G64_UINT = 113,
+    R64G64_SINT = 114,
+    R64G64_SFLOAT = 115,
+    R64G64B64_UINT = 116,
+    R64G64B64_SINT = 117,
+    R64G64B64_SFLOAT = 118,
+    R64G64B64A64_UINT = 119,
+    R64G64B64A64_SINT = 120,
+    R64G64B64A64_SFLOAT = 121,
+    B10G11R11_UFLOAT_PACK32 = 122,
+    E5B9G9R9_UFLOAT_PACK32 = 123,
+    D16_UNORM = 124,
+    X8_D24_UNORM_PACK32 = 125,
+    D32_SFLOAT = 126,
+    S8_UINT = 127,
+    D16_UNORM_S8_UINT = 128,
+    D24_UNORM_S8_UINT = 129,
+    D32_SFLOAT_S8_UINT = 130,
+    BC1_RGB_UNORM_BLOCK = 131,
+    BC1_RGB_SRGB_BLOCK = 132,
+    BC1_RGBA_UNORM_BLOCK = 133,
+    BC1_RGBA_SRGB_BLOCK = 134,
+    BC2_UNORM_BLOCK = 135,
+    BC2_SRGB_BLOCK = 136,
+    BC3_UNORM_BLOCK = 137,
+    BC3_SRGB_BLOCK = 138,
+    BC4_UNORM_BLOCK = 139,
+    BC4_SNORM_BLOCK = 140,
+    BC5_UNORM_BLOCK = 141,
+    BC5_SNORM_BLOCK = 142,
+    BC6H_UFLOAT_BLOCK = 143,
+    BC6H_SFLOAT_BLOCK = 144,
+    BC7_UNORM_BLOCK = 145,
+    BC7_SRGB_BLOCK = 146,
+    ETC2_R8G8B8_UNORM_BLOCK = 147,
+    ETC2_R8G8B8_SRGB_BLOCK = 148,
+    ETC2_R8G8B8A1_UNORM_BLOCK = 149,
+    ETC2_R8G8B8A1_SRGB_BLOCK = 150,
+    ETC2_R8G8B8A8_UNORM_BLOCK = 151,
+    ETC2_R8G8B8A8_SRGB_BLOCK = 152,
+    EAC_R11_UNORM_BLOCK = 153,
+    EAC_R11_SNORM_BLOCK = 154,
+    EAC_R11G11_UNORM_BLOCK = 155,
+    EAC_R11G11_SNORM_BLOCK = 156,
+    ASTC_4X4_UNORM_BLOCK = 157,
+    ASTC_4X4_SRGB_BLOCK = 158,
+    ASTC_5X4_UNORM_BLOCK = 159,
+    ASTC_5X4_SRGB_BLOCK = 160,
+    ASTC_5X5_UNORM_BLOCK = 161,
+    ASTC_5X5_SRGB_BLOCK = 162,
+    ASTC_6X5_UNORM_BLOCK = 163,
+    ASTC_6X5_SRGB_BLOCK = 164,
+    ASTC_6X6_UNORM_BLOCK = 165,
+    ASTC_6X6_SRGB_BLOCK = 166,
+    ASTC_8X5_UNORM_BLOCK = 167,
+    ASTC_8X5_SRGB_BLOCK = 168,
+    ASTC_8X6_UNORM_BLOCK = 169,
+    ASTC_8X6_SRGB_BLOCK = 170,
+    ASTC_8X8_UNORM_BLOCK = 171,
+    ASTC_8X8_SRGB_BLOCK = 172,
+    ASTC_10X5_UNORM_BLOCK = 173,
+    ASTC_10X5_SRGB_BLOCK = 174,
+    ASTC_10X6_UNORM_BLOCK = 175,
+    ASTC_10X6_SRGB_BLOCK = 176,
+    ASTC_10X8_UNORM_BLOCK = 177,
+    ASTC_10X8_SRGB_BLOCK = 178,
+    ASTC_10X10_UNORM_BLOCK = 179,
+    ASTC_10X10_SRGB_BLOCK = 180,
+    ASTC_12X10_UNORM_BLOCK = 181,
+    ASTC_12X10_SRGB_BLOCK = 182,
+    ASTC_12X12_UNORM_BLOCK = 183,
+    ASTC_12X12_SRGB_BLOCK = 184,
+    G8B8G8R8_422_UNORM = 1000156000,
+    B8G8R8G8_422_UNORM = 1000156001,
+    G8_B8_R8_3PLANE_420_UNORM = 1000156002,
+    G8_B8R8_2PLANE_420_UNORM = 1000156003,
+    G8_B8_R8_3PLANE_422_UNORM = 1000156004,
+    G8_B8R8_2PLANE_422_UNORM = 1000156005,
+    G8_B8_R8_3PLANE_444_UNORM = 1000156006,
+    R10X6_UNORM_PACK16 = 1000156007,
+    R10X6G10X6_UNORM_2PACK16 = 1000156008,
+    R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009,
+    G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010,
+    B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011,
+    G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012,
+    G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013,
+    G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014,
+    G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015,
+    G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016,
+    R12X4_UNORM_PACK16 = 1000156017,
+    R12X4G12X4_UNORM_2PACK16 = 1000156018,
+    R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019,
+    G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020,
+    B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021,
+    G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022,
+    G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023,
+    G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024,
+    G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025,
+    G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026,
+    G16B16G16R16_422_UNORM = 1000156027,
+    B16G16R16G16_422_UNORM = 1000156028,
+    G16_B16_R16_3PLANE_420_UNORM = 1000156029,
+    G16_B16R16_2PLANE_420_UNORM = 1000156030,
+    G16_B16_R16_3PLANE_422_UNORM = 1000156031,
+    G16_B16R16_2PLANE_422_UNORM = 1000156032,
+    G16_B16_R16_3PLANE_444_UNORM = 1000156033,
+    PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000,
+    PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001,
+    PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002,
+    PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003,
+    PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004,
+    PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005,
+    PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006,
+    PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007,
+    ASTC_4X4_SFLOAT_BLOCK_EXT = 1000066000,
+    ASTC_5X4_SFLOAT_BLOCK_EXT = 1000066001,
+    ASTC_5X5_SFLOAT_BLOCK_EXT = 1000066002,
+    ASTC_6X5_SFLOAT_BLOCK_EXT = 1000066003,
+    ASTC_6X6_SFLOAT_BLOCK_EXT = 1000066004,
+    ASTC_8X5_SFLOAT_BLOCK_EXT = 1000066005,
+    ASTC_8X6_SFLOAT_BLOCK_EXT = 1000066006,
+    ASTC_8X8_SFLOAT_BLOCK_EXT = 1000066007,
+    ASTC_10X5_SFLOAT_BLOCK_EXT = 1000066008,
+    ASTC_10X6_SFLOAT_BLOCK_EXT = 1000066009,
+    ASTC_10X8_SFLOAT_BLOCK_EXT = 1000066010,
+    ASTC_10X10_SFLOAT_BLOCK_EXT = 1000066011,
+    ASTC_12X10_SFLOAT_BLOCK_EXT = 1000066012,
+    ASTC_12X12_SFLOAT_BLOCK_EXT = 1000066013,
+    G8_B8R8_2PLANE_444_UNORM_EXT = 1000330000,
+    G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16_EXT = 1000330001,
+    G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16_EXT = 1000330002,
+    G16_B16R16_2PLANE_444_UNORM_EXT = 1000330003,
+    A4R4G4B4_UNORM_PACK16_EXT = 1000340000,
+    A4B4G4R4_UNORM_PACK16_EXT = 1000340001,
+}
+
+impl Default for Format {
+    fn default() -> Self {
+        Self::Undefined
+    }
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum StructureType {
+    ApplicationInfo = 0,
+    InstanceCreateInfo = 1,
+    DeviceQueueCreateInfo = 2,
+    DeviceCreateInfo = 3,
+    SubmitInfo = 4,
+    MemoryAllocateInfo = 5,
+    MappedMemoryRange = 6,
+    BindSparseInfo = 7,
+    FenceCreateInfo = 8,
+    SemaphoreCreateInfo = 9,
+    EventCreateInfo = 10,
+    QueryPoolCreateInfo = 11,
+    BufferCreateInfo = 12,
+    BufferViewCreateInfo = 13,
+    ImageCreateInfo = 14,
+    ImageViewCreateInfo = 15,
+    ShaderModuleCreateInfo = 16,
+    PipelineCacheCreateInfo = 17,
+    PipelineShaderStageCreateInfo = 18,
+    PipelineVertexInputStateCreateInfo = 19,
+    PipelineInputAssemblyStateCreateInfo = 20,
+    PipelineTessellationStateCreateInfo = 21,
+    PipelineViewportStateCreateInfo = 22,
+    PipelineRasterizationStateCreateInfo = 23,
+    PipelineMultisampleStateCreateInfo = 24,
+    PipelineDepthStencilStateCreateInfo = 25,
+    PipelineColorBlendStateCreateInfo = 26,
+    PipelineDynamicStateCreateInfo = 27,
+    GraphicsPipelineCreateInfo = 28,
+    ComputePipelineCreateInfo = 29,
+    PipelineLayoutCreateInfo = 30,
+    SamplerCreateInfo = 31,
+    DescriptorSetLayoutCreateInfo = 32,
+    DescriptorPoolCreateInfo = 33,
+    DescriptorSetAllocateInfo = 34,
+    WriteDescriptorSet = 35,
+    CopyDescriptorSet = 36,
+    FramebufferCreateInfo = 37,
+    RenderPassCreateInfo = 38,
+    CommandPoolCreateInfo = 39,
+    CommandBufferAllocateInfo = 40,
+    CommandBufferInheritanceInfo = 41,
+    CommandBufferBeginInfo = 42,
+    RenderPassBeginInfo = 43,
+    BufferMemoryBarrier = 44,
+    ImageMemoryBarrier = 45,
+    MemoryBarrier = 46,
+    LoaderInstanceCreateInfo = 47,
+    LoaderDeviceCreateInfo = 48,
+    PhysicalDeviceSubgroupProperties = 1000094000,
+    BindBufferMemoryInfo = 1000157000,
+    BindImageMemoryInfo = 1000157001,
+    PhysicalDevice16BitStorageFeatures = 1000083000,
+    MemoryDedicatedRequirements = 1000127000,
+    MemoryDedicatedAllocateInfo = 1000127001,
+    MemoryAllocateFlagsInfo = 1000060000,
+    DeviceGroupRenderPassBeginInfo = 1000060003,
+    DeviceGroupCommandBufferBeginInfo = 1000060004,
+    DeviceGroupSubmitInfo = 1000060005,
+    DeviceGroupBindSparseInfo = 1000060006,
+    BindBufferMemoryDeviceGroupInfo = 1000060013,
+    BindImageMemoryDeviceGroupInfo = 1000060014,
+    PhysicalDeviceGroupProperties = 1000070000,
+    DeviceGroupDeviceCreateInfo = 1000070001,
+    BufferMemoryRequirementsInfo2 = 1000146000,
+    ImageMemoryRequirementsInfo2 = 1000146001,
+    ImageSparseMemoryRequirementsInfo2 = 1000146002,
+    MemoryRequirements2 = 1000146003,
+    SparseImageMemoryRequirements2 = 1000146004,
+    PhysicalDeviceFeatures2 = 1000059000,
+    PhysicalDeviceProperties2 = 1000059001,
+    FormatProperties2 = 1000059002,
+    ImageFormatProperties2 = 1000059003,
+    PhysicalDeviceImageFormatInfo2 = 1000059004,
+    QueueFamilyProperties2 = 1000059005,
+    PhysicalDeviceMemoryProperties2 = 1000059006,
+    SparseImageFormatProperties2 = 1000059007,
+    PhysicalDeviceSparseImageFormatInfo2 = 1000059008,
+    PhysicalDevicePointClippingProperties = 1000117000,
+    RenderPassInputAttachmentAspectCreateInfo = 1000117001,
+    ImageViewUsageCreateInfo = 1000117002,
+    PipelineTessellationDomainOriginStateCreateInfo = 1000117003,
+    RenderPassMultiviewCreateInfo = 1000053000,
+    PhysicalDeviceMultiviewFeatures = 1000053001,
+    PhysicalDeviceMultiviewProperties = 1000053002,
+    PhysicalDeviceVariablePointersFeatures = 1000120000,
+    ProtectedSubmitInfo = 1000145000,
+    PhysicalDeviceProtectedMemoryFeatures = 1000145001,
+    PhysicalDeviceProtectedMemoryProperties = 1000145002,
+    DeviceQueueInfo2 = 1000145003,
+    SamplerYcbcrConversionCreateInfo = 1000156000,
+    SamplerYcbcrConversionInfo = 1000156001,
+    BindImagePlaneMemoryInfo = 1000156002,
+    ImagePlaneMemoryRequirementsInfo = 1000156003,
+    PhysicalDeviceSamplerYcbcrConversionFeatures = 1000156004,
+    SamplerYcbcrConversionImageFormatProperties = 1000156005,
+    DescriptorUpdateTemplateCreateInfo = 1000085000,
+    PhysicalDeviceExternalImageFormatInfo = 1000071000,
+    ExternalImageFormatProperties = 1000071001,
+    PhysicalDeviceExternalBufferInfo = 1000071002,
+    ExternalBufferProperties = 1000071003,
+    PhysicalDeviceIdProperties = 1000071004,
+    ExternalMemoryBufferCreateInfo = 1000072000,
+    ExternalMemoryImageCreateInfo = 1000072001,
+    ExportMemoryAllocateInfo = 1000072002,
+    PhysicalDeviceExternalFenceInfo = 1000112000,
+    ExternalFenceProperties = 1000112001,
+    ExportFenceCreateInfo = 1000113000,
+    ExportSemaphoreCreateInfo = 1000077000,
+    PhysicalDeviceExternalSemaphoreInfo = 1000076000,
+    ExternalSemaphoreProperties = 1000076001,
+    PhysicalDeviceMaintenance3Properties = 1000168000,
+    DescriptorSetLayoutSupport = 1000168001,
+    PhysicalDeviceShaderDrawParametersFeatures = 1000063000,
+    PhysicalDeviceVulkan11Features = 49,
+    PhysicalDeviceVulkan11Properties = 50,
+    PhysicalDeviceVulkan12Features = 51,
+    PhysicalDeviceVulkan12Properties = 52,
+    PhysicalDeviceVulkan13Features = 53,
+    PhysicalDeviceVulkan13Properties = 54,
+    ImageFormatListCreateInfo = 1000147000,
+    AttachmentDescription2 = 1000109000,
+    AttachmentReference2 = 1000109001,
+    SubpassDescription2 = 1000109002,
+    SubpassDependency2 = 1000109003,
+    RenderPassCreateInfo2 = 1000109004,
+    SubpassBeginInfo = 1000109005,
+    SubpassEndInfo = 1000109006,
+    PhysicalDevice8BitStorageFeatures = 1000177000,
+    PhysicalDeviceDriverProperties = 1000196000,
+    PhysicalDeviceShaderAtomicInt64Features = 1000180000,
+    PhysicalDeviceShaderFloat16Int8Features = 1000082000,
+    PhysicalDeviceFloatControlsProperties = 1000197000,
+    DescriptorSetLayoutBindingFlagsCreateInfo = 1000161000,
+    PhysicalDeviceDescriptorIndexingFeatures = 1000161001,
+    PhysicalDeviceDescriptorIndexingProperties = 1000161002,
+    DescriptorSetVariableDescriptorCountAllocateInfo = 1000161003,
+    DescriptorSetVariableDescriptorCountLayoutSupport = 1000161004,
+    PhysicalDeviceDepthStencilResolveProperties = 1000199000,
+    SubpassDescriptionDepthStencilResolve = 1000199001,
+    PhysicalDeviceScalarBlockLayoutFeatures = 1000221000,
+    ImageStencilUsageCreateInfo = 1000246000,
+    PhysicalDeviceSamplerFilterMinmaxProperties = 1000130000,
+    SamplerReductionModeCreateInfo = 1000130001,
+    PhysicalDeviceVulkanMemoryModelFeatures = 1000211000,
+    PhysicalDeviceImagelessFramebufferFeatures = 1000108000,
+    FramebufferAttachmentsCreateInfo = 1000108001,
+    FramebufferAttachmentImageInfo = 1000108002,
+    RenderPassAttachmentBeginInfo = 1000108003,
+    PhysicalDeviceUniformBufferStandardLayoutFeatures = 1000253000,
+    PhysicalDeviceShaderSubgroupExtendedTypesFeatures = 1000175000,
+    PhysicalDeviceSeparateDepthStencilLayoutsFeatures = 1000241000,
+    AttachmentReferenceStencilLayout = 1000241001,
+    AttachmentDescriptionStencilLayout = 1000241002,
+    PhysicalDeviceHostQueryResetFeatures = 1000261000,
+    PhysicalDeviceTimelineSemaphoreFeatures = 1000207000,
+    PhysicalDeviceTimelineSemaphoreProperties = 1000207001,
+    SemaphoreTypeCreateInfo = 1000207002,
+    TimelineSemaphoreSubmitInfo = 1000207003,
+    SemaphoreWaitInfo = 1000207004,
+    SemaphoreSignalInfo = 1000207005,
+    PhysicalDeviceBufferDeviceAddressFeatures = 1000257000,
+    BufferDeviceAddressInfo = 1000244001,
+    BufferOpaqueCaptureAddressCreateInfo = 1000257002,
+    MemoryOpaqueCaptureAddressAllocateInfo = 1000257003,
+    DeviceMemoryOpaqueCaptureAddressInfo = 1000257004,
+    SwapchainCreateInfoKhr = 1000001000,
+    PresentInfoKhr = 1000001001,
+    DeviceGroupPresentCapabilitiesKhr = 1000060007,
+    ImageSwapchainCreateInfoKhr = 1000060008,
+    BindImageMemorySwapchainInfoKhr = 1000060009,
+    AcquireNextImageInfoKhr = 1000060010,
+    DeviceGroupPresentInfoKhr = 1000060011,
+    DeviceGroupSwapchainCreateInfoKhr = 1000060012,
+    DisplayModeCreateInfoKhr = 1000002000,
+    DisplaySurfaceCreateInfoKhr = 1000002001,
+    DisplayPresentInfoKhr = 1000003000,
+    DebugReportCallbackCreateInfoExt = 1000011000,
+    PipelineRasterizationStateRasterizationOrderAmd = 1000018000,
+    DebugMarkerObjectNameInfoExt = 1000022000,
+    DebugMarkerObjectTagInfoExt = 1000022001,
+    DebugMarkerMarkerInfoExt = 1000022002,
+    VideoProfileKhr = 1000023000,
+    VideoCapabilitiesKhr = 1000023001,
+    VideoPictureResourceKhr = 1000023002,
+    VideoGetMemoryPropertiesKhr = 1000023003,
+    VideoBindMemoryKhr = 1000023004,
+    VideoSessionCreateInfoKhr = 1000023005,
+    VideoSessionParametersCreateInfoKhr = 1000023006,
+    VideoSessionParametersUpdateInfoKhr = 1000023007,
+    VideoBeginCodingInfoKhr = 1000023008,
+    VideoEndCodingInfoKhr = 1000023009,
+    VideoCodingControlInfoKhr = 1000023010,
+    VideoReferenceSlotKhr = 1000023011,
+    VideoQueueFamilyProperties2Khr = 1000023012,
+    VideoProfilesKhr = 1000023013,
+    PhysicalDeviceVideoFormatInfoKhr = 1000023014,
+    VideoFormatPropertiesKhr = 1000023015,
+    VideoDecodeInfoKhr = 1000024000,
+    DedicatedAllocationImageCreateInfoNv = 1000026000,
+    DedicatedAllocationBufferCreateInfoNv = 1000026001,
+    DedicatedAllocationMemoryAllocateInfoNv = 1000026002,
+    PhysicalDeviceTransformFeedbackFeaturesExt = 1000028000,
+    PhysicalDeviceTransformFeedbackPropertiesExt = 1000028001,
+    PipelineRasterizationStateStreamCreateInfoExt = 1000028002,
+    CuModuleCreateInfoNvx = 1000029000,
+    CuFunctionCreateInfoNvx = 1000029001,
+    CuLaunchInfoNvx = 1000029002,
+    ImageViewHandleInfoNvx = 1000030000,
+    ImageViewAddressPropertiesNvx = 1000030001,
+    TextureLodGatherFormatPropertiesAmd = 1000041000,
+    PhysicalDeviceCornerSampledImageFeaturesNv = 1000050000,
+    ExternalMemoryImageCreateInfoNv = 1000056000,
+    ExportMemoryAllocateInfoNv = 1000056001,
+    ValidationFlagsExt = 1000061000,
+    PhysicalDeviceTextureCompressionAstcHdrFeaturesExt = 1000066000,
+    ImageViewAstcDecodeModeExt = 1000067000,
+    PhysicalDeviceAstcDecodeFeaturesExt = 1000067001,
+    ImportMemoryFdInfoKhr = 1000074000,
+    MemoryFdPropertiesKhr = 1000074001,
+    MemoryGetFdInfoKhr = 1000074002,
+    ImportSemaphoreFdInfoKhr = 1000079000,
+    SemaphoreGetFdInfoKhr = 1000079001,
+    PhysicalDevicePushDescriptorPropertiesKhr = 1000080000,
+    CommandBufferInheritanceConditionalRenderingInfoExt = 1000081000,
+    PhysicalDeviceConditionalRenderingFeaturesExt = 1000081001,
+    ConditionalRenderingBeginInfoExt = 1000081002,
+    PresentRegionsKhr = 1000084000,
+    PipelineViewportWScalingStateCreateInfoNv = 1000087000,
+    SurfaceCapabilities2Ext = 1000090000,
+    DisplayPowerInfoExt = 1000091000,
+    DeviceEventInfoExt = 1000091001,
+    DisplayEventInfoExt = 1000091002,
+    SwapchainCounterCreateInfoExt = 1000091003,
+    PresentTimesInfoGoogle = 1000092000,
+    PhysicalDeviceMultiviewPerViewAttributesPropertiesNvx = 1000097000,
+    PipelineViewportSwizzleStateCreateInfoNv = 1000098000,
+    PhysicalDeviceDiscardRectanglePropertiesExt = 1000099000,
+    PipelineDiscardRectangleStateCreateInfoExt = 1000099001,
+    PhysicalDeviceConservativeRasterizationPropertiesExt = 1000101000,
+    PipelineRasterizationConservativeStateCreateInfoExt = 1000101001,
+    PhysicalDeviceDepthClipEnableFeaturesExt = 1000102000,
+    PipelineRasterizationDepthClipStateCreateInfoExt = 1000102001,
+    HdrMetadataExt = 1000105000,
+    SharedPresentSurfaceCapabilitiesKhr = 1000111000,
+    ImportFenceFdInfoKhr = 1000115000,
+    FenceGetFdInfoKhr = 1000115001,
+    PhysicalDevicePerformanceQueryFeaturesKhr = 1000116000,
+    PhysicalDevicePerformanceQueryPropertiesKhr = 1000116001,
+    QueryPoolPerformanceCreateInfoKhr = 1000116002,
+    PerformanceQuerySubmitInfoKhr = 1000116003,
+    AcquireProfilingLockInfoKhr = 1000116004,
+    PerformanceCounterKhr = 1000116005,
+    PerformanceCounterDescriptionKhr = 1000116006,
+    PhysicalDeviceSurfaceInfo2Khr = 1000119000,
+    SurfaceCapabilities2Khr = 1000119001,
+    SurfaceFormat2Khr = 1000119002,
+    DisplayProperties2Khr = 1000121000,
+    DisplayPlaneProperties2Khr = 1000121001,
+    DisplayModeProperties2Khr = 1000121002,
+    DisplayPlaneInfo2Khr = 1000121003,
+    DisplayPlaneCapabilities2Khr = 1000121004,
+    DebugUtilsObjectNameInfoExt = 1000128000,
+    DebugUtilsObjectTagInfoExt = 1000128001,
+    DebugUtilsLabelExt = 1000128002,
+    DebugUtilsMessengerCallbackDataExt = 1000128003,
+    DebugUtilsMessengerCreateInfoExt = 1000128004,
+    PhysicalDeviceInlineUniformBlockFeaturesExt = 1000138000,
+    PhysicalDeviceInlineUniformBlockPropertiesExt = 1000138001,
+    WriteDescriptorSetInlineUniformBlockExt = 1000138002,
+    DescriptorPoolInlineUniformBlockCreateInfoExt = 1000138003,
+    SampleLocationsInfoExt = 1000143000,
+    RenderPassSampleLocationsBeginInfoExt = 1000143001,
+    PipelineSampleLocationsStateCreateInfoExt = 1000143002,
+    PhysicalDeviceSampleLocationsPropertiesExt = 1000143003,
+    MultisamplePropertiesExt = 1000143004,
+    PhysicalDeviceBlendOperationAdvancedFeaturesExt = 1000148000,
+    PhysicalDeviceBlendOperationAdvancedPropertiesExt = 1000148001,
+    PipelineColorBlendAdvancedStateCreateInfoExt = 1000148002,
+    PipelineCoverageToColorStateCreateInfoNv = 1000149000,
+    WriteDescriptorSetAccelerationStructureKhr = 1000150007,
+    AccelerationStructureBuildGeometryInfoKhr = 1000150000,
+    AccelerationStructureDeviceAddressInfoKhr = 1000150002,
+    AccelerationStructureGeometryAabbsDataKhr = 1000150003,
+    AccelerationStructureGeometryInstancesDataKhr = 1000150004,
+    AccelerationStructureGeometryTrianglesDataKhr = 1000150005,
+    AccelerationStructureGeometryKhr = 1000150006,
+    AccelerationStructureVersionInfoKhr = 1000150009,
+    CopyAccelerationStructureInfoKhr = 1000150010,
+    CopyAccelerationStructureToMemoryInfoKhr = 1000150011,
+    CopyMemoryToAccelerationStructureInfoKhr = 1000150012,
+    PhysicalDeviceAccelerationStructureFeaturesKhr = 1000150013,
+    PhysicalDeviceAccelerationStructurePropertiesKhr = 1000150014,
+    AccelerationStructureCreateInfoKhr = 1000150017,
+    AccelerationStructureBuildSizesInfoKhr = 1000150020,
+    PhysicalDeviceRayTracingPipelineFeaturesKhr = 1000347000,
+    PhysicalDeviceRayTracingPipelinePropertiesKhr = 1000347001,
+    RayTracingPipelineCreateInfoKhr = 1000150015,
+    RayTracingShaderGroupCreateInfoKhr = 1000150016,
+    RayTracingPipelineInterfaceCreateInfoKhr = 1000150018,
+    PhysicalDeviceRayQueryFeaturesKhr = 1000348013,
+    PipelineCoverageModulationStateCreateInfoNv = 1000152000,
+    PhysicalDeviceShaderSmBuiltinsFeaturesNv = 1000154000,
+    PhysicalDeviceShaderSmBuiltinsPropertiesNv = 1000154001,
+    DrmFormatModifierPropertiesListExt = 1000158000,
+    PhysicalDeviceImageDrmFormatModifierInfoExt = 1000158002,
+    ImageDrmFormatModifierListCreateInfoExt = 1000158003,
+    ImageDrmFormatModifierExplicitCreateInfoExt = 1000158004,
+    ImageDrmFormatModifierPropertiesExt = 1000158005,
+    ValidationCacheCreateInfoExt = 1000160000,
+    ShaderModuleValidationCacheCreateInfoExt = 1000160001,
+    PhysicalDevicePortabilitySubsetFeaturesKhr = 1000163000,
+    PhysicalDevicePortabilitySubsetPropertiesKhr = 1000163001,
+    PipelineViewportShadingRateImageStateCreateInfoNv = 1000164000,
+    PhysicalDeviceShadingRateImageFeaturesNv = 1000164001,
+    PhysicalDeviceShadingRateImagePropertiesNv = 1000164002,
+    PipelineViewportCoarseSampleOrderStateCreateInfoNv = 1000164005,
+    RayTracingPipelineCreateInfoNv = 1000165000,
+    AccelerationStructureCreateInfoNv = 1000165001,
+    GeometryNv = 1000165003,
+    GeometryTrianglesNv = 1000165004,
+    GeometryAabbNv = 1000165005,
+    BindAccelerationStructureMemoryInfoNv = 1000165006,
+    WriteDescriptorSetAccelerationStructureNv = 1000165007,
+    AccelerationStructureMemoryRequirementsInfoNv = 1000165008,
+    PhysicalDeviceRayTracingPropertiesNv = 1000165009,
+    RayTracingShaderGroupCreateInfoNv = 1000165011,
+    AccelerationStructureInfoNv = 1000165012,
+    PhysicalDeviceRepresentativeFragmentTestFeaturesNv = 1000166000,
+    PipelineRepresentativeFragmentTestStateCreateInfoNv = 1000166001,
+    PhysicalDeviceImageViewImageFormatInfoExt = 1000170000,
+    FilterCubicImageViewImageFormatPropertiesExt = 1000170001,
+    DeviceQueueGlobalPriorityCreateInfoExt = 1000174000,
+    ImportMemoryHostPointerInfoExt = 1000178000,
+    MemoryHostPointerPropertiesExt = 1000178001,
+    PhysicalDeviceExternalMemoryHostPropertiesExt = 1000178002,
+    PhysicalDeviceShaderClockFeaturesKhr = 1000181000,
+    PipelineCompilerControlCreateInfoAmd = 1000183000,
+    CalibratedTimestampInfoExt = 1000184000,
+    PhysicalDeviceShaderCorePropertiesAmd = 1000185000,
+    DeviceMemoryOverallocationCreateInfoAmd = 1000189000,
+    PhysicalDeviceVertexAttributeDivisorPropertiesExt = 1000190000,
+    PipelineVertexInputDivisorStateCreateInfoExt = 1000190001,
+    PhysicalDeviceVertexAttributeDivisorFeaturesExt = 1000190002,
+    PipelineCreationFeedbackCreateInfoExt = 1000192000,
+    PhysicalDeviceComputeShaderDerivativesFeaturesNv = 1000201000,
+    PhysicalDeviceMeshShaderFeaturesNv = 1000202000,
+    PhysicalDeviceMeshShaderPropertiesNv = 1000202001,
+    PhysicalDeviceFragmentShaderBarycentricFeaturesNv = 1000203000,
+    PhysicalDeviceShaderImageFootprintFeaturesNv = 1000204000,
+    PipelineViewportExclusiveScissorStateCreateInfoNv = 1000205000,
+    PhysicalDeviceExclusiveScissorFeaturesNv = 1000205002,
+    CheckpointDataNv = 1000206000,
+    QueueFamilyCheckpointPropertiesNv = 1000206001,
+    PhysicalDeviceShaderIntegerFunctions2FeaturesIntel = 1000209000,
+    QueryPoolPerformanceQueryCreateInfoIntel = 1000210000,
+    InitializePerformanceApiInfoIntel = 1000210001,
+    PerformanceMarkerInfoIntel = 1000210002,
+    PerformanceStreamMarkerInfoIntel = 1000210003,
+    PerformanceOverrideInfoIntel = 1000210004,
+    PerformanceConfigurationAcquireInfoIntel = 1000210005,
+    PhysicalDevicePciBusInfoPropertiesExt = 1000212000,
+    DisplayNativeHdrSurfaceCapabilitiesAmd = 1000213000,
+    SwapchainDisplayNativeHdrCreateInfoAmd = 1000213001,
+    PhysicalDeviceShaderTerminateInvocationFeaturesKhr = 1000215000,
+    PhysicalDeviceFragmentDensityMapFeaturesExt = 1000218000,
+    PhysicalDeviceFragmentDensityMapPropertiesExt = 1000218001,
+    RenderPassFragmentDensityMapCreateInfoExt = 1000218002,
+    PhysicalDeviceSubgroupSizeControlPropertiesExt = 1000225000,
+    PipelineShaderStageRequiredSubgroupSizeCreateInfoExt = 1000225001,
+    PhysicalDeviceSubgroupSizeControlFeaturesExt = 1000225002,
+    FragmentShadingRateAttachmentInfoKhr = 1000226000,
+    PipelineFragmentShadingRateStateCreateInfoKhr = 1000226001,
+    PhysicalDeviceFragmentShadingRatePropertiesKhr = 1000226002,
+    PhysicalDeviceFragmentShadingRateFeaturesKhr = 1000226003,
+    PhysicalDeviceFragmentShadingRateKhr = 1000226004,
+    PhysicalDeviceShaderCoreProperties2Amd = 1000227000,
+    PhysicalDeviceCoherentMemoryFeaturesAmd = 1000229000,
+    PhysicalDeviceShaderImageAtomicInt64FeaturesExt = 1000234000,
+    PhysicalDeviceMemoryBudgetPropertiesExt = 1000237000,
+    PhysicalDeviceMemoryPriorityFeaturesExt = 1000238000,
+    MemoryPriorityAllocateInfoExt = 1000238001,
+    SurfaceProtectedCapabilitiesKhr = 1000239000,
+    PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNv = 1000240000,
+    PhysicalDeviceBufferDeviceAddressFeaturesExt = 1000244000,
+    BufferDeviceAddressCreateInfoExt = 1000244002,
+    PhysicalDeviceToolPropertiesExt = 1000245000,
+    ValidationFeaturesExt = 1000247000,
+    PhysicalDevicePresentWaitFeaturesKhr = 1000248000,
+    PhysicalDeviceCooperativeMatrixFeaturesNv = 1000249000,
+    CooperativeMatrixPropertiesNv = 1000249001,
+    PhysicalDeviceCooperativeMatrixPropertiesNv = 1000249002,
+    PhysicalDeviceCoverageReductionModeFeaturesNv = 1000250000,
+    PipelineCoverageReductionStateCreateInfoNv = 1000250001,
+    FramebufferMixedSamplesCombinationNv = 1000250002,
+    PhysicalDeviceFragmentShaderInterlockFeaturesExt = 1000251000,
+    PhysicalDeviceYcbcrImageArraysFeaturesExt = 1000252000,
+    PhysicalDeviceProvokingVertexFeaturesExt = 1000254000,
+    PipelineRasterizationProvokingVertexStateCreateInfoExt = 1000254001,
+    PhysicalDeviceProvokingVertexPropertiesExt = 1000254002,
+    HeadlessSurfaceCreateInfoExt = 1000256000,
+    PhysicalDeviceLineRasterizationFeaturesExt = 1000259000,
+    PipelineRasterizationLineStateCreateInfoExt = 1000259001,
+    PhysicalDeviceLineRasterizationPropertiesExt = 1000259002,
+    PhysicalDeviceShaderAtomicFloatFeaturesExt = 1000260000,
+    PhysicalDeviceIndexTypeUint8FeaturesExt = 1000265000,
+    PhysicalDeviceExtendedDynamicStateFeaturesExt = 1000267000,
+    PhysicalDevicePipelineExecutablePropertiesFeaturesKhr = 1000269000,
+    PipelineInfoKhr = 1000269001,
+    PipelineExecutablePropertiesKhr = 1000269002,
+    PipelineExecutableInfoKhr = 1000269003,
+    PipelineExecutableStatisticKhr = 1000269004,
+    PipelineExecutableInternalRepresentationKhr = 1000269005,
+    PhysicalDeviceShaderAtomicFloat2FeaturesExt = 1000273000,
+    PhysicalDeviceShaderDemoteToHelperInvocationFeaturesExt = 1000276000,
+    PhysicalDeviceDeviceGeneratedCommandsPropertiesNv = 1000277000,
+    GraphicsShaderGroupCreateInfoNv = 1000277001,
+    GraphicsPipelineShaderGroupsCreateInfoNv = 1000277002,
+    IndirectCommandsLayoutTokenNv = 1000277003,
+    IndirectCommandsLayoutCreateInfoNv = 1000277004,
+    GeneratedCommandsInfoNv = 1000277005,
+    GeneratedCommandsMemoryRequirementsInfoNv = 1000277006,
+    PhysicalDeviceDeviceGeneratedCommandsFeaturesNv = 1000277007,
+    PhysicalDeviceInheritedViewportScissorFeaturesNv = 1000278000,
+    CommandBufferInheritanceViewportScissorInfoNv = 1000278001,
+    PhysicalDeviceShaderIntegerDotProductFeaturesKhr = 1000280000,
+    PhysicalDeviceShaderIntegerDotProductPropertiesKhr = 1000280001,
+    PhysicalDeviceTexelBufferAlignmentFeaturesExt = 1000281000,
+    PhysicalDeviceTexelBufferAlignmentPropertiesExt = 1000281001,
+    CommandBufferInheritanceRenderPassTransformInfoQcom = 1000282000,
+    RenderPassTransformBeginInfoQcom = 1000282001,
+    PhysicalDeviceDeviceMemoryReportFeaturesExt = 1000284000,
+    DeviceDeviceMemoryReportCreateInfoExt = 1000284001,
+    DeviceMemoryReportCallbackDataExt = 1000284002,
+    PhysicalDeviceRobustness2FeaturesExt = 1000286000,
+    PhysicalDeviceRobustness2PropertiesExt = 1000286001,
+    SamplerCustomBorderColorCreateInfoExt = 1000287000,
+    PhysicalDeviceCustomBorderColorPropertiesExt = 1000287001,
+    PhysicalDeviceCustomBorderColorFeaturesExt = 1000287002,
+    PipelineLibraryCreateInfoKhr = 1000290000,
+    PresentIdKhr = 1000294000,
+    PhysicalDevicePresentIdFeaturesKhr = 1000294001,
+    PhysicalDevicePrivateDataFeaturesExt = 1000295000,
+    DevicePrivateDataCreateInfoExt = 1000295001,
+    PrivateDataSlotCreateInfoExt = 1000295002,
+    PhysicalDevicePipelineCreationCacheControlFeaturesExt = 1000297000,
+    VideoEncodeInfoKhr = 1000299000,
+    VideoEncodeRateControlInfoKhr = 1000299001,
+    PhysicalDeviceDiagnosticsConfigFeaturesNv = 1000300000,
+    DeviceDiagnosticsConfigCreateInfoNv = 1000300001,
+    MemoryBarrier2 = 1000314000,
+    BufferMemoryBarrier2 = 1000314001,
+    ImageMemoryBarrier2 = 1000314002,
+    DependencyInfo = 1000314003,
+    SubmitInfo2 = 1000314004,
+    SemaphoreSubmitInfo = 1000314005,
+    CommandBufferSubmitInfo = 1000314006,
+    PhysicalDeviceSynchronization2FeaturesKhr = 1000314007,
+    QueueFamilyCheckpointProperties2Nv = 1000314008,
+    CheckpointData2Nv = 1000314009,
+    PhysicalDeviceShaderSubgroupUniformControlFlowFeaturesKhr = 1000323000,
+    PhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKhr = 1000325000,
+    PhysicalDeviceFragmentShadingRateEnumsPropertiesNv = 1000326000,
+    PhysicalDeviceFragmentShadingRateEnumsFeaturesNv = 1000326001,
+    PipelineFragmentShadingRateEnumStateCreateInfoNv = 1000326002,
+    AccelerationStructureGeometryMotionTrianglesDataNv = 1000327000,
+    PhysicalDeviceRayTracingMotionBlurFeaturesNv = 1000327001,
+    AccelerationStructureMotionInfoNv = 1000327002,
+    PhysicalDeviceYcbcr2Plane444FormatsFeaturesExt = 1000330000,
+    PhysicalDeviceFragmentDensityMap2FeaturesExt = 1000332000,
+    PhysicalDeviceFragmentDensityMap2PropertiesExt = 1000332001,
+    CopyCommandTransformInfoQcom = 1000333000,
+    PhysicalDeviceImageRobustnessFeaturesExt = 1000335000,
+    PhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKhr = 1000336000,
+    CopyBufferInfo2Khr = 1000337000,
+    CopyImageInfo2Khr = 1000337001,
+    CopyBufferToImageInfo2Khr = 1000337002,
+    CopyImageToBufferInfo2Khr = 1000337003,
+    BlitImageInfo2Khr = 1000337004,
+    ResolveImageInfo2Khr = 1000337005,
+    BufferCopy2Khr = 1000337006,
+    ImageCopy2Khr = 1000337007,
+    ImageBlit2Khr = 1000337008,
+    BufferImageCopy2Khr = 1000337009,
+    ImageResolve2Khr = 1000337010,
+    PhysicalDevice4444FormatsFeaturesExt = 1000340000,
+    PhysicalDeviceMutableDescriptorTypeFeaturesValve = 1000351000,
+    MutableDescriptorTypeCreateInfoValve = 1000351002,
+    PhysicalDeviceVertexInputDynamicStateFeaturesExt = 1000352000,
+    VertexInputBindingDescription2Ext = 1000352001,
+    VertexInputAttributeDescription2Ext = 1000352002,
+    PhysicalDeviceDrmPropertiesExt = 1000353000,
+    PhysicalDevicePrimitiveTopologyListRestartFeaturesExt = 1000356000,
+    SubpassShadingPipelineCreateInfoHuawei = 1000369000,
+    PhysicalDeviceSubpassShadingFeaturesHuawei = 1000369001,
+    PhysicalDeviceSubpassShadingPropertiesHuawei = 1000369002,
+    PhysicalDeviceInvocationMaskFeaturesHuawei = 1000370000,
+    MemoryGetRemoteAddressInfoNv = 1000371000,
+    PhysicalDeviceExternalMemoryRdmaFeaturesNv = 1000371001,
+    PhysicalDeviceExtendedDynamicState2FeaturesExt = 1000377000,
+    PhysicalDeviceColorWriteEnableFeaturesExt = 1000381000,
+    PipelineColorWriteCreateInfoExt = 1000381001,
+    PhysicalDeviceGlobalPriorityQueryFeaturesExt = 1000388000,
+    QueueFamilyGlobalPriorityPropertiesExt = 1000388001,
+    PhysicalDeviceMultiDrawFeaturesExt = 1000392000,
+    PhysicalDeviceMultiDrawPropertiesExt = 1000392001,
+    RenderingInfo = 1000044000,
+    RenderingAttachmentInfo = 1000044001,
+    PipelineRenderingCreateInfo = 1000044002,
+    PhysicalDeviceDynamicRenderingFeatures = 1000044003,
+    CommandBufferInheritanceRenderingInfo = 1000044004,
+}
+
+#[repr(i32)]
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum ObjectType {
+    Unknown = 0,
+    Instance = 1,
+    PhysicalDevice = 2,
+    Device = 3,
+    Queue = 4,
+    Semaphore = 5,
+    CommandBuffer = 6,
+    Fence = 7,
+    DeviceMemory = 8,
+    Buffer = 9,
+    Image = 10,
+    Event = 11,
+    QueryPool = 12,
+    BufferView = 13,
+    ImageView = 14,
+    ShaderModule = 15,
+    PipelineCache = 16,
+    PipelineLayout = 17,
+    RenderPass = 18,
+    Pipeline = 19,
+    DescriptorSetLayout = 20,
+    Sampler = 21,
+    DescriptorPool = 22,
+    DescriptorSet = 23,
+    Framebuffer = 24,
+    CommandPool = 25,
+    SamplerYcbcrConversion = 1000156000,
+    DescriptorUpdateTemplate = 1000085000,
+    SurfaceKhr = 1000000000,
+    SwapchainKhr = 1000001000,
+    DisplayKhr = 1000002000,
+    DisplayModeKhr = 1000002001,
+    DebugReportCallbackExt = 1000011000,
+    VideoSessionKhr = 1000023000,
+    VideoSessionParametersKhr = 1000023001,
+    CuModuleNvx = 1000029000,
+    CuFunctionNvx = 1000029001,
+    DebugUtilsMessengerExt = 1000128000,
+    AccelerationStructureKhr = 1000150000,
+    ValidationCacheExt = 1000160000,
+    AccelerationStructureNv = 1000165000,
+    PerformanceConfigurationIntel = 1000210000,
+    DeferredOperationKhr = 1000268000,
+    IndirectCommandsLayoutNv = 1000277000,
+    PrivateDataSlotExt = 1000295000,
+}
diff --git a/vulkan-sys/src/flags.rs b/vulkan-sys/src/flags.rs
new file mode 100644 (file)
index 0000000..9b7afb1
--- /dev/null
@@ -0,0 +1,6780 @@
+#[repr(C)]
+pub struct InstanceCreateFlags(u32);
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct DeviceCreateFlags(u32);
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct DeviceQueueCreateFlags(u32);
+impl DeviceQueueCreateFlags {
+    pub const PROTECTED: Self = Self(1);
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct SurfaceTransformFlagsKHR(u32);
+impl SurfaceTransformFlagsKHR {
+    pub const IDENTITY: Self = Self(1);
+    pub const ROTATE_90: Self = Self(2);
+    pub const ROTATE_180: Self = Self(4);
+    pub const ROTATE_270: Self = Self(8);
+    pub const HORIZONTAL_MIRROR: Self = Self(16);
+    pub const HORIZONTAL_MIRROR_ROTATE_90: Self = Self(32);
+    pub const HORIZONTAL_MIRROR_ROTATE_180: Self = Self(64);
+    pub const HORIZONTAL_MIRROR_ROTATE_270: Self = Self(128);
+    pub const INHERIT: Self = Self(256);
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct SwapchainCreateFlagsKHR(u32);
+impl SwapchainCreateFlagsKHR {
+    /// Allow images with VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS
+    pub const SPLIT_INSTANCE_BIND_REGIONS: Self = Self(1);
+    /// Swapchain is protected
+    pub const PROTECTED: Self = Self(2);
+    pub const MUTABLE_FORMAT: Self = Self(4);
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct CompositeAlphaFlagsKHR(u32);
+impl CompositeAlphaFlagsKHR {
+    pub const OPAQUE: Self = Self(1);
+    pub const PRE_MULTIPLIED: Self = Self(2);
+    pub const POST_MULTIPLIED: Self = Self(4);
+    pub const INHERIT: Self = Self(8);
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct SampleCountFlags(u32);
+impl SampleCountFlags {
+    /// Sample count 1 supported
+    pub const SAMPLE_COUNT_1: Self = Self(1);
+    /// Sample count 2 supported
+    pub const SAMPLE_COUNT_2: Self = Self(2);
+    /// Sample count 4 supported
+    pub const SAMPLE_COUNT_4: Self = Self(4);
+    /// Sample count 8 supported
+    pub const SAMPLE_COUNT_8: Self = Self(8);
+    /// Sample count 16 supported
+    pub const SAMPLE_COUNT_16: Self = Self(16);
+    /// Sample count 32 supported
+    pub const SAMPLE_COUNT_32: Self = Self(32);
+    /// Sample count 64 supported
+    pub const SAMPLE_COUNT_64: Self = Self(64);
+}
+
+#[repr(C)]
+pub struct MemoryPropertyFlags(u32);
+impl MemoryPropertyFlags {
+    /// If otherwise stated, then allocate memory on device
+    pub const DEVICE_LOCAL: Self = Self(1);
+    /// Memory is mappable by host
+    pub const HOST_VISIBLE: Self = Self(2);
+    /// Memory will have i/o coherency. If not set, application may need to use vkFlushMappedMemoryRanges and vkInvalidateMappedMemoryRanges to flush/invalidate host cache
+    pub const HOST_COHERENT: Self = Self(4);
+    /// Memory will be cached by the host
+    pub const HOST_CACHED: Self = Self(8);
+    /// Memory may be allocated by the driver when it is required
+    pub const LAZILY_ALLOCATED: Self = Self(16);
+    /// Memory is protected
+    pub const PROTECTED: Self = Self(32);
+    pub const DEVICE_COHERENT_AMD: Self = Self(64);
+    pub const DEVICE_UNCACHED_AMD: Self = Self(128);
+    pub const RDMA_CAPABLE_NV: Self = Self(256);
+}
+
+#[repr(C)]
+pub struct MemoryHeapFlags(u32);
+impl MemoryHeapFlags {
+    /// If set, heap represents device memory
+    pub const DEVICE_LOCAL: Self = Self(1);
+    /// If set, heap allocations allocate multiple instances by default
+    pub const MULTI_INSTANCE: Self = Self(2);
+}
+
+#[repr(C)]
+pub struct MemoryMapFlags(u32);
+
+#[repr(C)]
+pub struct ImageAspectFlags(u32);
+impl ImageAspectFlags {
+    pub const COLOR: Self = Self(1);
+    pub const DEPTH: Self = Self(2);
+    pub const STENCIL: Self = Self(4);
+    pub const METADATA: Self = Self(8);
+    pub const PLANE_0: Self = Self(16);
+    pub const PLANE_1: Self = Self(32);
+    pub const PLANE_2: Self = Self(64);
+    pub const MEMORY_PLANE_0_EXT: Self = Self(128);
+    pub const MEMORY_PLANE_1_EXT: Self = Self(256);
+    pub const MEMORY_PLANE_2_EXT: Self = Self(512);
+    pub const MEMORY_PLANE_3_EXT: Self = Self(1024);
+}
+
+#[repr(C)]
+pub struct ImageViewCreateFlags(u32);
+impl ImageViewCreateFlags {
+    pub const FRAGMENT_DENSITY_MAP_DYNAMIC_EXT: Self = Self(1);
+    pub const FRAGMENT_DENSITY_MAP_DEFERRED_EXT: Self = Self(2);
+}
+
+#[repr(C)]
+pub struct SparseMemoryBindFlags(u32);
+impl SparseMemoryBindFlags {
+    /// Operation binds resource metadata to memory
+    pub const METADATA: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct SparseImageFormatFlags(u32);
+impl SparseImageFormatFlags {
+    /// Image uses a single mip tail region for all array layers
+    pub const SINGLE_MIPTAIL: Self = Self(1);
+    /// Image requires mip level dimensions to be an integer multiple of the sparse image block dimensions for non-tail mip levels.
+    pub const ALIGNED_MIP_SIZE: Self = Self(2);
+    /// Image uses a non-standard sparse image block dimensions
+    pub const NONSTANDARD_BLOCK_SIZE: Self = Self(4);
+}
+
+#[repr(C)]
+#[derive(Debug)]
+pub struct QueueFlags(u32);
+impl QueueFlags {
+    /// Queue supports graphics operations
+    pub const GRAPHICS: Self = Self(1);
+    /// Queue supports compute operations
+    pub const COMPUTE: Self = Self(2);
+    /// Queue supports transfer operations
+    pub const TRANSFER: Self = Self(4);
+    /// Queue supports sparse resource memory management operations
+    pub const SPARSE_BINDING: Self = Self(8);
+    /// Queues may support protected operations
+    pub const PROTECTED: Self = Self(16);
+    pub const VIDEO_DECODE_KHR: Self = Self(32);
+    pub const VIDEO_ENCODE_KHR: Self = Self(64);
+}
+
+#[repr(C)]
+pub struct ImageUsageFlags(u32);
+impl ImageUsageFlags {
+    /// Can be used as a source of transfer operations
+    pub const TRANSFER_SRC: Self = Self(1);
+    /// Can be used as a destination of transfer operations
+    pub const TRANSFER_DST: Self = Self(2);
+    /// Can be sampled from (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)
+    pub const SAMPLED: Self = Self(4);
+    /// Can be used as storage image (STORAGE_IMAGE descriptor type)
+    pub const STORAGE: Self = Self(8);
+    /// Can be used as framebuffer color attachment
+    pub const COLOR_ATTACHMENT: Self = Self(16);
+    /// Can be used as framebuffer depth/stencil attachment
+    pub const DEPTH_STENCIL_ATTACHMENT: Self = Self(32);
+    /// Image data not needed outside of rendering
+    pub const TRANSIENT_ATTACHMENT: Self = Self(64);
+    /// Can be used as framebuffer input attachment
+    pub const INPUT_ATTACHMENT: Self = Self(128);
+    pub const VIDEO_DECODE_DST_KHR: Self = Self(1024);
+    pub const VIDEO_DECODE_SRC_KHR: Self = Self(2048);
+    pub const VIDEO_DECODE_DPB_KHR: Self = Self(4096);
+    pub const FRAGMENT_DENSITY_MAP_EXT: Self = Self(512);
+    pub const FRAGMENT_SHADING_RATE_ATTACHMENT_KHR: Self = Self(256);
+    pub const VIDEO_ENCODE_DST_KHR: Self = Self(8192);
+    pub const VIDEO_ENCODE_SRC_KHR: Self = Self(16384);
+    pub const VIDEO_ENCODE_DPB_KHR: Self = Self(32768);
+    pub const INVOCATION_MASK_HUAWEI: Self = Self(262144);
+}
+
+#[repr(C)]
+pub struct ImageCreateFlags(u32);
+
+impl ImageCreateFlags {
+    /// Image should support sparse backing
+    pub const SPARSE_BINDING: Self = Self(1);
+    /// Image should support sparse backing with partial residency
+    pub const SPARSE_RESIDENCY: Self = Self(2);
+    /// Image should support constant data access to physical memory ranges mapped into multiple locations of sparse images
+    pub const SPARSE_ALIASED: Self = Self(4);
+    /// Allows image views to have different format than the base image
+    pub const MUTABLE_FORMAT: Self = Self(8);
+    /// Allows creating image views with cube type from the created image
+    pub const CUBE_COMPATIBLE: Self = Self(16);
+    pub const ALIAS: Self = Self(1024);
+    /// Allows using VkBindImageMemoryDeviceGroupInfo::pSplitInstanceBindRegions when binding memory to the image
+    pub const SPLIT_INSTANCE_BIND_REGIONS: Self = Self(64);
+    /// The 3D image can be viewed as a 2D or 2D array image
+    pub const IMAGE_CREATE_2D_ARRAY_COMPATIBLE: Self = Self(32);
+    pub const BLOCK_TEXEL_VIEW_COMPATIBLE: Self = Self(128);
+    pub const EXTENDED_USAGE: Self = Self(256);
+    /// Image requires protected memory
+    pub const PROTECTED: Self = Self(2048);
+    pub const DISJOINT: Self = Self(512);
+    pub const CORNER_SAMPLED_NV: Self = Self(8192);
+    pub const SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_EXT: Self = Self(4096);
+    pub const SUBSAMPLED_EXT: Self = Self(16384);
+}
+
+#[repr(C)]
+pub struct FormatFeatureFlags(u32);
+
+impl FormatFeatureFlags {
+    ///   Format can be used for sampled images (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)
+    pub const SAMPLED_IMAGE: Self = Self(1);
+    ///   Format can be used for storage images (STORAGE_IMAGE descriptor type)
+    pub const STORAGE_IMAGE: Self = Self(2);
+    ///   Format supports atomic operations in case it is used for storage images
+    pub const STORAGE_IMAGE_ATOMIC: Self = Self(4);
+    ///   Format can be used for uniform texel buffers (TBOs)
+    pub const UNIFORM_TEXEL_BUFFER: Self = Self(8);
+    ///   Format can be used for storage texel buffers (IBOs)
+    pub const STORAGE_TEXEL_BUFFER: Self = Self(16);
+    ///   Format supports atomic operations in case it is used for storage texel buffers
+    pub const STORAGE_TEXEL_BUFFER_ATOMIC: Self = Self(32);
+    ///   Format can be used for vertex buffers (VBOs)
+    pub const VERTEX_BUFFER: Self = Self(64);
+    ///   Format can be used for color attachment images
+    pub const COLOR_ATTACHMENT: Self = Self(128);
+    ///   Format supports blending in case it is used for color attachment images
+    pub const COLOR_ATTACHMENT_BLEND: Self = Self(256);
+    ///   Format can be used for depth/stencil attachment images
+    pub const DEPTH_STENCIL_ATTACHMENT: Self = Self(512);
+    ///   Format can be used as the source image of blits with vkCmdBlitImage
+    pub const BLIT_SRC: Self = Self(1024);
+    ///   Format can be used as the destination image of blits with vkCmdBlitImage
+    pub const BLIT_DST: Self = Self(2048);
+    ///   Format can be filtered with VK_FILTER_LINEAR when being sampled
+    pub const SAMPLED_IMAGE_FILTER_LINEAR: Self = Self(4096);
+    ///   Format can be used as the source image of image transfer commands
+    pub const TRANSFER_SRC: Self = Self(16384);
+    ///   Format can be used as the destination image of image transfer commands
+    pub const TRANSFER_DST: Self = Self(32768);
+    ///   Format can have midpoint rather than cosited chroma samples
+    pub const MIDPOINT_CHROMA_SAMPLES: Self = Self(131072);
+    ///   Format can be used with linear filtering whilst color conversion is enabled
+    pub const SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER: Self = Self(262144);
+    ///   Format can have different chroma, min and mag filters
+    pub const SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER: Self = Self(524288);
+    pub const SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT: Self = Self(1048576);
+    pub const SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE: Self =
+        Self(2097152);
+    ///   Format supports disjoint planes
+    pub const DISJOINT: Self = Self(4194304);
+    ///   Format can have cosited rather than midpoint chroma samples
+    pub const COSITED_CHROMA_SAMPLES: Self = Self(8388608);
+    ///   Format can be used with min/max reduction filtering
+    pub const SAMPLED_IMAGE_FILTER_MINMAX: Self = Self(65536);
+    ///   Format can be filtered with VK_FILTER_CUBIC_IMG when being sampled
+    pub const SAMPLED_IMAGE_FILTER_CUBIC_IMG: Self = Self(8192);
+    pub const VIDEO_DECODE_OUTPUT_KHR: Self = Self(33554432);
+    pub const VIDEO_DECODE_DPB_KHR: Self = Self(67108864);
+    pub const ACCELERATION_STRUCTURE_VERTEX_BUFFER_KHR: Self = Self(536870912);
+    pub const DISJOINT_KHR: Self = Self::DISJOINT;
+    pub const FRAGMENT_DENSITY_MAP_EXT: Self = Self(16777216);
+    pub const FRAGMENT_SHADING_RATE_ATTACHMENT_KHR: Self = Self(1073741824);
+    pub const VIDEO_ENCODE_INPUT_KHR: Self = Self(134217728);
+    pub const VIDEO_ENCODE_DPB_KHR: Self = Self(268435456);
+}
+
+#[repr(C)]
+pub struct PipelineStageFlags(u32);
+impl PipelineStageFlags {
+    /// Before subsequent commands are processed
+    pub const TOP_OF_PIPE: Self = Self(1);
+    /// Draw/DispatchIndirect command fetch
+    pub const DRAW_INDIRECT: Self = Self(2);
+    /// Vertex/index fetch
+    pub const VERTEX_INPUT: Self = Self(4);
+    /// Vertex shading
+    pub const VERTEX_SHADER: Self = Self(8);
+    /// Tessellation control shading
+    pub const TESSELLATION_CONTROL_SHADER: Self = Self(16);
+    /// Tessellation evaluation shading
+    pub const TESSELLATION_EVALUATION_SHADER: Self = Self(32);
+    /// Geometry shading
+    pub const GEOMETRY_SHADER: Self = Self(64);
+    /// Fragment shading
+    pub const FRAGMENT_SHADER: Self = Self(128);
+    /// Early fragment (depth and stencil) tests
+    pub const EARLY_FRAGMENT_TESTS: Self = Self(256);
+    /// Late fragment (depth and stencil) tests
+    pub const LATE_FRAGMENT_TESTS: Self = Self(512);
+    /// Color attachment writes
+    pub const COLOR_ATTACHMENT_OUTPUT: Self = Self(1024);
+    /// Compute shading
+    pub const COMPUTE_SHADER: Self = Self(2048);
+    /// Transfer/copy operations
+    pub const TRANSFER: Self = Self(4096);
+    /// After previous commands have completed
+    pub const BOTTOM_OF_PIPE: Self = Self(8192);
+    /// Indicates host (CPU) is a source/sink of the dependency
+    pub const HOST: Self = Self(16384);
+    /// All stages of the graphics pipeline
+    pub const ALL_GRAPHICS: Self = Self(32768);
+    /// All stages supported on the queue
+    pub const ALL_COMMANDS: Self = Self(65536);
+    pub const TRANSFORM_FEEDBACK_EXT: Self = Self(16777216);
+    /// A pipeline stage for conditional rendering predicate fetch
+    pub const CONDITIONAL_RENDERING_EXT: Self = Self(262144);
+    pub const ACCELERATION_STRUCTURE_BUILD_KHR: Self = Self(33554432);
+    pub const RAY_TRACING_SHADER_KHR: Self = Self(2097152);
+    pub const TASK_SHADER_NV: Self = Self(524288);
+    pub const MESH_SHADER_NV: Self = Self(1048576);
+    pub const FRAGMENT_DENSITY_PROCESS_EXT: Self = Self(8388608);
+    pub const FRAGMENT_SHADING_RATE_ATTACHMENT_KHR: Self = Self(4194304);
+    pub const COMMAND_PREPROCESS_NV: Self = Self(131072);
+}
+
+#[repr(C)]
+pub struct CommandPoolCreateFlags(u32);
+impl CommandPoolCreateFlags {
+    /// Command buffers have a short lifetime
+    pub const TRANSIENT: Self = Self(1);
+    /// Command buffers may release their memory individually
+    pub const RESET_COMMAND_BUFFER: Self = Self(2);
+    /// Command buffers allocated from pool are protected command buffers
+    pub const PROTECTED: Self = Self(4);
+}
+
+#[repr(C)]
+pub struct CommandPoolResetFlags(u32);
+impl CommandPoolResetFlags {
+    /// Release resources owned by the pool
+    pub const RELEASE_RESOURCES: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct CommandBufferResetFlags(u32);
+impl CommandBufferResetFlags {
+    /// Release resources owned by the buffer
+    pub const RELEASE_RESOURCES: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct CommandBufferUsageFlags(u32);
+impl CommandBufferUsageFlags {
+    pub const ONE_TIME_SUBMIT: Self = Self(1);
+    pub const RENDER_PASS_CONTINUE: Self = Self(2);
+    /// Command buffer may be submitted/executed more than once simultaneously
+    pub const SIMULTANEOUS_USE: Self = Self(4);
+}
+
+#[repr(C)]
+pub struct QueryControlFlags(u32);
+impl QueryControlFlags {
+    /// Require precise results to be collected by the query
+    pub const PRECISE: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct QueryResultFlags(u32);
+impl QueryResultFlags {
+    /// Results of the queries are written to the destination buffer as 64-bit values
+    pub const QUERY_RESULT_64: Self = Self(1);
+    /// Results of the queries are waited on before proceeding with the result copy
+    pub const WAIT: Self = Self(2);
+    /// Besides the results of the query, the availability of the results is also written
+    pub const WITH_AVAILABILITY: Self = Self(4);
+    /// Copy the partial results of the query even if the final results are not available
+    pub const PARTIAL: Self = Self(8);
+    pub const WITH_STATUS_KHR: Self = Self(16);
+}
+
+#[repr(C)]
+pub struct QueryPipelineStatisticFlags(u32);
+impl QueryPipelineStatisticFlags {
+    pub const INPUT_ASSEMBLY_VERTICES: Self = Self(1);
+    pub const INPUT_ASSEMBLY_PRIMITIVES: Self = Self(2);
+    pub const VERTEX_SHADER_INVOCATIONS: Self = Self(4);
+    pub const GEOMETRY_SHADER_INVOCATIONS: Self = Self(8);
+    pub const GEOMETRY_SHADER_PRIMITIVES: Self = Self(16);
+    pub const CLIPPING_INVOCATIONS: Self = Self(32);
+    pub const CLIPPING_PRIMITIVES: Self = Self(64);
+    pub const FRAGMENT_SHADER_INVOCATIONS: Self = Self(128);
+    pub const TESSELLATION_CONTROL_SHADER_PATCHES: Self = Self(256);
+    pub const TESSELLATION_EVALUATION_SHADER_INVOCATIONS: Self = Self(512);
+    pub const COMPUTE_SHADER_INVOCATIONS: Self = Self(1024);
+}
+
+#[repr(C)]
+pub struct AttachmentDescriptionFlags(u32);
+impl AttachmentDescriptionFlags {
+    /// The attachment may alias physical memory of another attachment in the same render pass
+    pub const MAY_ALIAS: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct AccessFlags(u32);
+impl AccessFlags {
+    /// Controls coherency of indirect command reads
+    pub const INDIRECT_COMMAND_READ: Self = Self(1);
+    /// Controls coherency of index reads
+    pub const INDEX_READ: Self = Self(2);
+    /// Controls coherency of vertex attribute reads
+    pub const VERTEX_ATTRIBUTE_READ: Self = Self(4);
+    /// Controls coherency of uniform buffer reads
+    pub const UNIFORM_READ: Self = Self(8);
+    /// Controls coherency of input attachment reads
+    pub const INPUT_ATTACHMENT_READ: Self = Self(16);
+    /// Controls coherency of shader reads
+    pub const SHADER_READ: Self = Self(32);
+    /// Controls coherency of shader writes
+    pub const SHADER_WRITE: Self = Self(64);
+    /// Controls coherency of color attachment reads
+    pub const COLOR_ATTACHMENT_READ: Self = Self(128);
+    /// Controls coherency of color attachment writes
+    pub const COLOR_ATTACHMENT_WRITE: Self = Self(256);
+    /// Controls coherency of depth/stencil attachment reads
+    pub const DEPTH_STENCIL_ATTACHMENT_READ: Self = Self(512);
+    /// Controls coherency of depth/stencil attachment writes
+    pub const DEPTH_STENCIL_ATTACHMENT_WRITE: Self = Self(1024);
+    /// Controls coherency of transfer reads
+    pub const TRANSFER_READ: Self = Self(2048);
+    /// Controls coherency of transfer writes
+    pub const TRANSFER_WRITE: Self = Self(4096);
+    /// Controls coherency of host reads
+    pub const HOST_READ: Self = Self(8192);
+    /// Controls coherency of host writes
+    pub const HOST_WRITE: Self = Self(16384);
+    /// Controls coherency of memory reads
+    pub const MEMORY_READ: Self = Self(32768);
+    /// Controls coherency of memory writes
+    pub const MEMORY_WRITE: Self = Self(65536);
+    pub const TRANSFORM_FEEDBACK_WRITE_EXT: Self = Self(33554432);
+    pub const TRANSFORM_FEEDBACK_COUNTER_READ_EXT: Self = Self(67108864);
+    pub const TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT: Self = Self(134217728);
+    /// read access flag for reading conditional rendering predicate
+    pub const CONDITIONAL_RENDERING_READ_EXT: Self = Self(1048576);
+    pub const COLOR_ATTACHMENT_READ_NONCOHERENT_EXT: Self = Self(524288);
+    pub const ACCELERATION_STRUCTURE_READ_KHR: Self = Self(2097152);
+    pub const ACCELERATION_STRUCTURE_WRITE_KHR: Self = Self(4194304);
+    pub const FRAGMENT_DENSITY_MAP_READ_EXT: Self = Self(16777216);
+    pub const FRAGMENT_SHADING_RATE_ATTACHMENT_READ_KHR: Self = Self(8388608);
+    pub const COMMAND_PREPROCESS_READ_NV: Self = Self(131072);
+    pub const COMMAND_PREPROCESS_WRITE_NV: Self = Self(262144);
+}
+
+#[repr(C)]
+pub struct DependencyFlags(u32);
+impl DependencyFlags {
+    /// Dependency is per pixel region
+    pub const BY_REGION: Self = Self(1);
+    /// Dependency is across devices
+    pub const DEVICE_GROUP: Self = Self(4);
+    pub const VIEW_LOCAL: Self = Self(2);
+}
+
+#[repr(C)]
+pub struct SubpassDescriptionFlags(u32);
+impl SubpassDescriptionFlags {
+    pub const PER_VIEW_ATTRIBUTES_NVX: Self = Self(1);
+    pub const PER_VIEW_POSITION_X_ONLY_NVX: Self = Self(2);
+    pub const FRAGMENT_REGION_QCOM: Self = Self(4);
+    pub const SHADER_RESOLVE_QCOM: Self = Self(8);
+}
+
+#[repr(C)]
+pub struct RenderPassCreateFlags(u32);
+
+#[repr(C)]
+pub struct FramebufferCreateFlags(u32);
+
+impl FramebufferCreateFlags {
+    pub const IMAGELESS: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct FenceCreateFlags(u32);
+impl FenceCreateFlags {
+    pub const SIGNALED: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct SemaphoreCreateFlags(u32);
+
+#[repr(C)]
+pub struct ShaderModuleCreateFlags(u32);
+
+#[repr(C)]
+pub struct ShaderStageFlags(u32);
+
+impl ShaderStageFlags {
+    pub const VERTEX: Self = Self(1);
+    pub const TESSELLATION_CONTROL: Self = Self(2);
+    pub const TESSELLATION_EVALUATION: Self = Self(4);
+    pub const GEOMETRY: Self = Self(8);
+    pub const FRAGMENT: Self = Self(16);
+    pub const COMPUTE: Self = Self(32);
+    pub const ALL_GRAPHICS: Self = Self(0x0000001F);
+    pub const ALL: Self = Self(0x7FFFFFFF);
+    pub const RAYGEN_KHR: Self = Self(256);
+    pub const ANY_HIT_KHR: Self = Self(512);
+    pub const CLOSEST_HIT_KHR: Self = Self(1024);
+    pub const MISS_KHR: Self = Self(2048);
+    pub const INTERSECTION_KHR: Self = Self(4096);
+    pub const CALLABLE_KHR: Self = Self(8192);
+    pub const TASK_NV: Self = Self(64);
+    pub const MESH_NV: Self = Self(128);
+    pub const SUBPASS_SHADING_HUAWEI: Self = Self(16384);
+}
+
+#[repr(C)]
+pub struct DescriptorSetLayoutCreateFlags(u32);
+impl DescriptorSetLayoutCreateFlags {
+    pub const UPDATE_AFTER_BIND_POOL: Self = Self(2);
+    /// Descriptors are pushed via flink:vkCmdPushDescriptorSetKHR
+    pub const PUSH_DESCRIPTOR_KHR: Self = Self(1);
+    pub const HOST_ONLY_POOL_VALVE: Self = Self(4);
+}
+
+#[repr(C)]
+pub struct StencilFaceFlags(u32);
+impl StencilFaceFlags {
+    pub const FRONT: Self = Self(1); // Front face
+    pub const BACK: Self = Self(2); // Back face
+    pub const FRONT_AND_BACK: Self = Self(0x00000003); // Front and back faces
+}
+
+#[repr(C)]
+pub struct CullModeFlags(u32);
+impl CullModeFlags {
+    pub const NONE: Self = Self(0);
+    pub const FRONT: Self = Self(1);
+    pub const BACK: Self = Self(2);
+    pub const FRONT_AND_BACK: Self = Self(0x00000003);
+}
+
+#[repr(C)]
+pub struct DescriptorPoolCreateFlags(u32);
+impl DescriptorPoolCreateFlags {
+    pub const FREE_DESCRIPTOR_SET: Self = Self(1); // Descriptor sets may be freed individually
+    pub const UPDATE_AFTER_BIND: Self = Self(2);
+    pub const HOST_ONLY_VALVE: Self = Self(4);
+}
+
+#[repr(C)]
+pub struct DescriptorPoolResetFlags(u32);
+
+#[repr(C)]
+pub struct SamplerCreateFlags(u32);
+impl SamplerCreateFlags {
+    pub const SUBSAMPLED_EXT: Self = Self(1);
+    pub const SUBSAMPLED_COARSE_RECONSTRUCTION_EXT: Self = Self(2);
+}
+
+#[repr(C)]
+pub struct PipelineLayoutCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineCacheCreateFlags(u32);
+impl PipelineCacheCreateFlags {
+    pub const EXTERNALLY_SYNCHRONIZED_EXT: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct PipelineDepthStencilStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineDynamicStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineColorBlendStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineMultisampleStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineRasterizationStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineViewportStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineTessellationStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineInputAssemblyStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineVertexInputStateCreateFlags(u32);
+
+#[repr(C)]
+pub struct PipelineShaderStageCreateFlags(u32);
+impl PipelineShaderStageCreateFlags {
+    pub const ALLOW_VARYING_SUBGROUP_SIZE_EXT: Self = Self(1);
+    pub const REQUIRE_F_SUBGROUPS_EXT: Self = Self(2);
+}
+
+#[repr(C)]
+pub struct PipelineCreateFlags(u32);
+impl PipelineCreateFlags {
+    pub const DISABLE_OPTIMIZATION: Self = Self(1);
+    pub const ALLOW_DERIVATIVES: Self = Self(2);
+    pub const DERIVATIVE: Self = Self(4);
+    pub const VIEW_INDEX_FROM_DEVICE_INDEX: Self = Self(8);
+    pub const DISPATCH_BASE: Self = Self(16);
+    pub const RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_KHR: Self = Self(16384);
+    pub const RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_KHR: Self = Self(32768);
+    pub const RAY_TRACING_NO_NULL_MISS_SHADERS_KHR: Self = Self(65536);
+    pub const RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_KHR: Self = Self(131072);
+    pub const RAY_TRACING_SKIP_TRIANGLES_KHR: Self = Self(4096);
+    pub const RAY_TRACING_SKIP_AABBS_KHR: Self = Self(8192);
+    pub const RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_KHR: Self = Self(524288);
+    pub const DEFER_COMPILE_NV: Self = Self(32);
+    pub const CAPTURE_STATISTICS_KHR: Self = Self(64);
+    pub const CAPTURE_INTERNAL_REPRESENTATIONS_KHR: Self = Self(128);
+    pub const INDIRECT_BINDABLE_NV: Self = Self(262144);
+    pub const LIBRARY_KHR: Self = Self(2048);
+    pub const FAIL_ON_PIPELINE_COMPILE_REQUIRED_EXT: Self = Self(256);
+    pub const EARLY_RETURN_ON_FAILURE_EXT: Self = Self(512);
+    pub const RAY_TRACING_ALLOW_MOTION_NV: Self = Self(1048576);
+}
+
+#[repr(C)]
+pub struct ColorComponentFlags(u32);
+impl ColorComponentFlags {
+    pub const R: Self = Self(1);
+    pub const G: Self = Self(2);
+    pub const B: Self = Self(4);
+    pub const A: Self = Self(8);
+}
+
+#[repr(C)]
+pub struct BufferUsageFlags(u32);
+impl BufferUsageFlags {
+    /// Can be used as a source of transfer operations
+    pub const TRANSFER_SRC: Self = Self(1);
+    /// Can be used as a destination of transfer operations
+    pub const TRANSFER_DST: Self = Self(2);
+    /// Can be used as TBO
+    pub const UNIFORM_TEXEL_BUFFER: Self = Self(4);
+    /// Can be used as IBO
+    pub const STORAGE_TEXEL_BUFFER: Self = Self(8);
+    /// Can be used as UBO
+    pub const UNIFORM_BUFFER: Self = Self(16);
+    /// Can be used as SSBO
+    pub const STORAGE_BUFFER: Self = Self(32);
+    /// Can be used as source of fixed-function index fetch (index buffer)
+    pub const INDEX_BUFFER: Self = Self(64);
+    /// Can be used as source of fixed-function vertex fetch (VBO)
+    pub const VERTEX_BUFFER: Self = Self(128);
+    /// Can be the source of indirect parameters (e.g. indirect buffer, parameter buffer)
+    pub const INDIRECT_BUFFER: Self = Self(256);
+    pub const SHADER_DEVICE_ADDRESS: Self = Self(131072);
+    pub const VIDEO_DECODE_SRC_KHR: Self = Self(8192);
+    pub const VIDEO_DECODE_DST_KHR: Self = Self(16384);
+    pub const TRANSFORM_FEEDBACK_BUFFER_EXT: Self = Self(2048);
+    pub const TRANSFORM_FEEDBACK_COUNTER_BUFFER_EXT: Self = Self(4096);
+    /// Specifies the buffer can be used as predicate in conditional rendering
+    pub const CONDITIONAL_RENDERING_EXT: Self = Self(512);
+    pub const ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_KHR: Self = Self(524288);
+    pub const ACCELERATION_STRUCTURE_STORAGE_KHR: Self = Self(1048576);
+    pub const SHADER_BINDING_TABLE_KHR: Self = Self(1024);
+    pub const VIDEO_ENCODE_DST_KHR: Self = Self(32768);
+    pub const VIDEO_ENCODE_SRC_KHR: Self = Self(65536);
+}
+
+#[repr(C)]
+pub struct BufferCreateFlags(u32);
+impl BufferCreateFlags {
+    /// Buffer should support sparse backing
+    pub const SPARSE_BINDING: Self = Self(1);
+    /// Buffer should support sparse backing with partial residency
+    pub const SPARSE_RESIDENCY: Self = Self(2);
+    /// Buffer should support constant data access to physical memory ranges mapped into multiple locations of sparse buffers
+    pub const SPARSE_ALIASED: Self = Self(4);
+    /// Buffer requires protected memory
+    pub const PROTECTED: Self = Self(8);
+    pub const DEVICE_ADDRESS_CAPTURE_REPLAY: Self = Self(16);
+}
+
+#[repr(C)]
+pub struct BufferViewCreateFlags(u32);
+
+#[repr(C)]
+pub struct SemaphoreWaitFlags(u32);
+
+impl SemaphoreWaitFlags {
+    pub const ANY: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct ResolveModeFlags(u32);
+
+impl ResolveModeFlags {
+    pub const RESOLVE_MODE_NONE: Self = Self(0);
+    pub const RESOLVE_MODE_SAMPLE_ZERO: Self = Self(0x00000001);
+    pub const RESOLVE_MODE_AVERAGE: Self = Self(0x00000002);
+    pub const RESOLVE_MODE_MIN: Self = Self(0x00000004);
+    pub const RESOLVE_MODE_MAX: Self = Self(0x00000008);
+}
+
+#[repr(C)]
+pub struct RenderingFlags(u32);
+
+impl RenderingFlags {
+    pub const RENDERING_CONTENTS_SECONDARY_COMMAND_BUFFERS: Self = Self(0x00000001);
+    pub const RENDERING_SUSPENDING: Self = Self(0x00000002);
+    pub const RENDERING_RESUMING: Self = Self(0x00000004);
+}
+
+#[repr(C)]
+pub struct SubgroupFeatureFlags(u32);
+
+impl SubgroupFeatureFlags {
+    pub const BASIC: Self = Self(0x00000001);
+    pub const VOTE: Self = Self(0x00000002);
+    pub const ARITHMETIC: Self = Self(0x00000004);
+    pub const BALLOT: Self = Self(0x00000008);
+    pub const SHUFFLE: Self = Self(0x00000010);
+    pub const SHUFFLE_RELATIVE: Self = Self(0x00000020);
+    pub const CLUSTERED: Self = Self(0x00000040);
+    pub const QUAD: Self = Self(0x00000080);
+    // Provided by VK_NV_shader_subgroup_partitioned
+    pub const PARTITIONED_NV: Self = Self(0x00000100);
+}
+
+#[repr(C)]
+pub struct SubmitFlags(u32);
+
+impl SubmitFlags {
+    pub const PROTECTED: Self = Self(1);
+}
+
+#[repr(C)]
+pub struct AccessFlags2(u64);
+
+impl AccessFlags2 {
+    pub const NONE: Self = Self(0);
+    pub const INDIRECT_COMMAND_READ: Self = Self(0x00000001);
+    pub const INDEX_READ: Self = Self(0x00000002);
+    pub const VERTEX_ATTRIBUTE_READ: Self = Self(0x00000004);
+    pub const UNIFORM_READ: Self = Self(0x00000008);
+    pub const INPUT_ATTACHMENT_READ: Self = Self(0x00000010);
+    pub const SHADER_READ: Self = Self(0x00000020);
+    pub const SHADER_WRITE: Self = Self(0x00000040);
+    pub const COLOR_ATTACHMENT_READ: Self = Self(0x00000080);
+    pub const COLOR_ATTACHMENT_WRITE: Self = Self(0x00000100);
+    pub const DEPTH_STENCIL_ATTACHMENT_READ: Self = Self(0x00000200);
+    pub const DEPTH_STENCIL_ATTACHMENT_WRITE: Self = Self(0x00000400);
+    pub const TRANSFER_READ: Self = Self(0x00000800);
+    pub const TRANSFER_WRITE: Self = Self(0x00001000);
+    pub const HOST_READ: Self = Self(0x00002000);
+    pub const HOST_WRITE: Self = Self(0x00004000);
+    pub const MEMORY_READ: Self = Self(0x00008000);
+    pub const MEMORY_WRITE: Self = Self(0x00010000);
+    pub const SHADER_SAMPLED_READ: Self = Self(0x100000000);
+    pub const SHADER_STORAGE_READ: Self = Self(0x200000000);
+    pub const SHADER_STORAGE_WRITE: Self = Self(0x400000000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_transform_feedback
+    pub const TRANSFORM_FEEDBACK_WRITE_EXT: Self = Self(0x02000000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_transform_feedback
+    pub const TRANSFORM_FEEDBACK_COUNTER_READ_EXT: Self = Self(0x04000000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_transform_feedback
+    pub const TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT: Self = Self(0x08000000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_conditional_rendering
+    pub const CONDITIONAL_RENDERING_READ_EXT: Self = Self(0x00100000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_device_generated_commands
+    pub const COMMAND_PREPROCESS_READ_NV: Self = Self(0x00020000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_device_generated_commands
+    pub const COMMAND_PREPROCESS_WRITE_NV: Self = Self(0x00040000);
+    // Provided by VK_KHR_fragment_shading_rate with VK_KHR_synchronization2
+    pub const FRAGMENT_SHADING_RATE_ATTACHMENT_READ_KHR: Self = Self(0x00800000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_shading_rate_image
+    pub const SHADING_RATE_IMAGE_READ_NV: Self = Self(0x00800000);
+    // Provided by VK_KHR_acceleration_structure with VK_KHR_synchronization2
+    pub const ACCELERATION_STRUCTURE_READ_KHR: Self = Self(0x00200000);
+    // Provided by VK_KHR_acceleration_structure with VK_KHR_synchronization2
+    pub const ACCELERATION_STRUCTURE_WRITE_KHR: Self = Self(0x00400000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_ray_tracing
+    pub const ACCELERATION_STRUCTURE_READ_NV: Self = Self(0x00200000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_ray_tracing
+    pub const ACCELERATION_STRUCTURE_WRITE_NV: Self = Self(0x00400000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_fragment_density_map
+    pub const FRAGMENT_DENSITY_MAP_READ_EXT: Self = Self(0x01000000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_blend_operation_advanced
+    pub const COLOR_ATTACHMENT_READ_NONCOHERENT_EXT: Self = Self(0x00080000);
+    // Provided by VK_HUAWEI_invocation_mask
+    pub const INVOCATION_MASK_READ_HUAWEI: Self = Self(0x8000000000);
+}
+
+#[repr(C)]
+pub struct PipelineStageFlags2(u64);
+
+impl PipelineStageFlags2 {
+    pub const NONE: Self = Self(0);
+    pub const TOP_OF_PIPE: Self = Self(0x00000001);
+    pub const DRAW_INDIRECT: Self = Self(0x00000002);
+    pub const VERTEX_INPUT: Self = Self(0x00000004);
+    pub const VERTEX_SHADER: Self = Self(0x00000008);
+    pub const TESSELLATION_CONTROL_SHADER: Self = Self(0x00000010);
+    pub const TESSELLATION_EVALUATION_SHADER: Self = Self(0x00000020);
+    pub const GEOMETRY_SHADER: Self = Self(0x00000040);
+    pub const FRAGMENT_SHADER: Self = Self(0x00000080);
+    pub const EARLY_FRAGMENT_TESTS: Self = Self(0x00000100);
+    pub const LATE_FRAGMENT_TESTS: Self = Self(0x00000200);
+    pub const COLOR_ATTACHMENT_OUTPUT: Self = Self(0x00000400);
+    pub const COMPUTE_SHADER: Self = Self(0x00000800);
+    pub const ALL_TRANSFER: Self = Self(0x00001000);
+    pub const TRANSFER: Self = Self(0x00001000);
+    pub const BOTTOM_OF_PIPE: Self = Self(0x00002000);
+    pub const HOST: Self = Self(0x00004000);
+    pub const ALL_GRAPHICS: Self = Self(0x00008000);
+    pub const ALL_COMMANDS: Self = Self(0x00010000);
+    pub const COPY: Self = Self(0x100000000);
+    pub const RESOLVE: Self = Self(0x200000000);
+    pub const BLIT: Self = Self(0x400000000);
+    pub const CLEAR: Self = Self(0x800000000);
+    pub const INDEX_INPUT: Self = Self(0x1000000000);
+    pub const VERTEX_ATTRIBUTE_INPUT: Self = Self(0x2000000000);
+    pub const PRE_RASTERIZATION_SHADERS: Self = Self(0x4000000000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_transform_feedback
+    pub const TRANSFORM_FEEDBACK_EXT: Self = Self(0x01000000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_conditional_rendering
+    pub const CONDITIONAL_RENDERING_EXT: Self = Self(0x00040000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_device_generated_commands
+    pub const COMMAND_PREPROCESS_NV: Self = Self(0x00020000);
+    // Provided by VK_KHR_fragment_shading_rate with VK_KHR_synchronization2
+    pub const FRAGMENT_SHADING_RATE_ATTACHMENT_KHR: Self = Self(0x00400000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_shading_rate_image
+    pub const SHADING_RATE_IMAGE_NV: Self = Self(0x00400000);
+    // Provided by VK_KHR_acceleration_structure with VK_KHR_synchronization2
+    pub const ACCELERATION_STRUCTURE_BUILD_KHR: Self = Self(0x02000000);
+    // Provided by VK_KHR_ray_tracing_pipeline with VK_KHR_synchronization2
+    pub const RAY_TRACING_SHADER_KHR: Self = Self(0x00200000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_ray_tracing
+    pub const RAY_TRACING_SHADER_NV: Self = Self(0x00200000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_ray_tracing
+    pub const ACCELERATION_STRUCTURE_BUILD_NV: Self = Self(0x02000000);
+    // Provided by VK_KHR_synchronization2 with VK_EXT_fragment_density_map
+    pub const FRAGMENT_DENSITY_PROCESS_EXT: Self = Self(0x00800000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_mesh_shader
+    pub const TASK_SHADER_NV: Self = Self(0x00080000);
+    // Provided by VK_KHR_synchronization2 with VK_NV_mesh_shader
+    pub const MESH_SHADER_NV: Self = Self(0x00100000);
+    // Provided by VK_HUAWEI_subpass_shading
+    pub const SUBPASS_SHADING_HUAWEI: Self = Self(0x8000000000);
+    // Provided by VK_HUAWEI_invocation_mask
+    pub const INVOCATION_MASK_HUAWEI: Self = Self(0x10000000000);
+}
+
+// Impls
+
+// InstanceCreateFlags
+// SampleCountFlags
+// MemoryPropertyFlags
+// MemoryHeapFlags
+// MemoryMapFlags
+// ImageAspectFlags
+// SparseMemoryBindFlags
+// SparseImageFormatFlags
+// QueueFlags
+// ImageUsageFlags
+// ImageCreateFlags
+// FormatFeatureFlags
+// PipelineStageFlags
+// SurfaceTransformFlagsKHR
+// SwapchainCreateFlagsKHR
+// CompositeAlphaFlagsKHR
+// ImageViewCreateFlags
+// CommandPoolCreateFlags
+// CommandPoolResetFlags
+// CommandBufferResetFlags
+// CommandBufferUsageFlags
+// QueryControlFlags
+// QueryResultFlags
+// QueryPipelineStatisticFlags
+// AttachmentDescriptionFlags
+// AccessFlags
+// DependencyFlags
+// SubpassDescriptionFlags
+// RenderPassCreateFlags
+// FramebufferCreateFlags
+// FenceCreateFlags
+// SemaphoreCreateFlags
+// ShaderModuleCreateFlags
+// ShaderStageFlags
+// DescriptorSetLayoutCreateFlags
+// StencilFaceFlags
+// CullModeFlags
+// DescriptorPoolCreateFlags
+// DescriptorPoolResetFlags
+// SamplerCreateFlags
+// PipelineLayoutCreateFlags
+// PipelineCacheCreateFlags
+// PipelineDepthStencilStateCreateFlags
+// PipelineDynamicStateCreateFlags
+// PipelineColorBlendStateCreateFlags
+// PipelineMultisampleStateCreateFlags
+// PipelineRasterizationStateCreateFlags
+// PipelineViewportStateCreateFlags
+// PipelineTessellationStateCreateFlags
+// PipelineInputAssemblyStateCreateFlags
+// PipelineVertexInputStateCreateFlags
+// PipelineShaderStageCreateFlags
+// PipelineCreateFlags
+// ColorComponentFlags
+// BufferCreateFlags
+// BufferUsageFlags
+// BufferViewCreateFlags
+// SemaphoreWaitFlags
+// ResolveModeFlags
+// RenderingFlags
+// SubgroupFeatureFlags
+// SubmitFlags
+// AccessFlags2
+// PipelineStageFlags2
+
+// Reference Implementation For Flags.
+
+// impl Flags {
+//     #[inline]
+//     pub fn from_raw(value: u32) -> Self {
+//         Self(value)
+//     }
+
+//     #[inline]
+//     pub fn as_raw(self) -> u32 {
+//         self.0
+//     }
+
+//     #[inline]
+//     pub fn intersects(self, rhs: Self) -> bool {
+//         self.0 & rhs.0 != 0
+//     }
+
+//     #[inline]
+//     pub fn contains(self, rhs: Self) -> bool {
+//         self.0 & rhs.0 == rhs.0
+//     }
+
+//     #[inline]
+//     pub fn cardinality(self) -> u32 {
+//         self.0.count_ones()
+//     }
+// }
+
+// impl Clone for Flags {
+//     fn clone(&self) -> Self {
+//         Self(self.0)
+//     }
+// }
+
+// impl Copy for Flags {}
+
+// impl Default for Flags {
+//     fn default() -> Self {
+//         Self(0)
+//     }
+// }
+
+// impl PartialEq for Flags {
+//     fn eq(&self, rhs: &Self) -> bool {
+//         self.0 == rhs.0
+//     }
+// }
+
+// impl Eq for Flags {}
+
+// impl std::ops::BitOr for Flags {
+//     type Output = Self;
+//     fn bitor(self, rhs: Self) -> Self::Output {
+//         Self(self.0 | rhs.0)
+//     }
+// }
+
+// impl std::ops::BitOrAssign for Flags {
+//     fn bitor_assign(&mut self, rhs: Self) {
+//         self.0 |= rhs.0
+//     }
+// }
+
+// impl std::ops::BitAnd for Flags {
+//     type Output = Self;
+//     fn bitand(self, rhs: Self) -> Self::Output {
+//         Self(self.0 & rhs.0)
+//     }
+// }
+
+// impl std::ops::BitAndAssign for Flags {
+// fn bitand_assign(&mut self, rhs: Self) {
+//     self.0 &= rhs.0
+// }
+// }
+
+// impl std::ops::BitXor for Flags {
+//     type Output = Self;
+//     fn bitxor(self, rhs: Self) -> Self::Output {
+//         Self(self.0 ^ rhs.0)
+//     }
+// }
+
+// impl std::ops::BitXorAssign for Flags {
+//     fn bitxor_assign(&mut self, rhs: Self) {
+//         self.0 ^= rhs.0
+//     }
+// }
+
+impl PipelineStageFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineStageFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineStageFlags {}
+
+impl Default for PipelineStageFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineStageFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineStageFlags {}
+
+impl std::ops::BitOr for PipelineStageFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineStageFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineStageFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineStageFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineStageFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineStageFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl InstanceCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for InstanceCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for InstanceCreateFlags {}
+
+impl Default for InstanceCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for InstanceCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for InstanceCreateFlags {}
+
+impl std::ops::BitOr for InstanceCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for InstanceCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for InstanceCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for InstanceCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for InstanceCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for InstanceCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl DeviceCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for DeviceCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for DeviceCreateFlags {}
+
+impl Default for DeviceCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for DeviceCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for DeviceCreateFlags {}
+
+impl std::ops::BitOr for DeviceCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for DeviceCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for DeviceCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for DeviceCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for DeviceCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for DeviceCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl DeviceQueueCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for DeviceQueueCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for DeviceQueueCreateFlags {}
+
+impl Default for DeviceQueueCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for DeviceQueueCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for DeviceQueueCreateFlags {}
+
+impl std::ops::BitOr for DeviceQueueCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for DeviceQueueCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for DeviceQueueCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for DeviceQueueCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for DeviceQueueCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for DeviceQueueCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl SampleCountFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SampleCountFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SampleCountFlags {}
+
+impl Default for SampleCountFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SampleCountFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SampleCountFlags {}
+
+impl std::ops::BitOr for SampleCountFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SampleCountFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SampleCountFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SampleCountFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SampleCountFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SampleCountFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl MemoryPropertyFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for MemoryPropertyFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for MemoryPropertyFlags {}
+
+impl Default for MemoryPropertyFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for MemoryPropertyFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for MemoryPropertyFlags {}
+
+impl std::ops::BitOr for MemoryPropertyFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for MemoryPropertyFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for MemoryPropertyFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for MemoryPropertyFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for MemoryPropertyFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for MemoryPropertyFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl MemoryHeapFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for MemoryHeapFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for MemoryHeapFlags {}
+
+impl Default for MemoryHeapFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for MemoryHeapFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for MemoryHeapFlags {}
+
+impl std::ops::BitOr for MemoryHeapFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for MemoryHeapFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for MemoryHeapFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for MemoryHeapFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for MemoryHeapFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for MemoryHeapFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl MemoryMapFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for MemoryMapFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for MemoryMapFlags {}
+
+impl Default for MemoryMapFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for MemoryMapFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for MemoryMapFlags {}
+
+impl std::ops::BitOr for MemoryMapFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for MemoryMapFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for MemoryMapFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for MemoryMapFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for MemoryMapFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for MemoryMapFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl ImageAspectFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for ImageAspectFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for ImageAspectFlags {}
+
+impl Default for ImageAspectFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for ImageAspectFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for ImageAspectFlags {}
+
+impl std::ops::BitOr for ImageAspectFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for ImageAspectFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for ImageAspectFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for ImageAspectFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for ImageAspectFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for ImageAspectFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl SparseMemoryBindFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SparseMemoryBindFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SparseMemoryBindFlags {}
+
+impl Default for SparseMemoryBindFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SparseMemoryBindFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SparseMemoryBindFlags {}
+
+impl std::ops::BitOr for SparseMemoryBindFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SparseMemoryBindFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SparseMemoryBindFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SparseMemoryBindFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SparseMemoryBindFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SparseMemoryBindFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl SparseImageFormatFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SparseImageFormatFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SparseImageFormatFlags {}
+
+impl Default for SparseImageFormatFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SparseImageFormatFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SparseImageFormatFlags {}
+
+impl std::ops::BitOr for SparseImageFormatFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SparseImageFormatFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SparseImageFormatFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SparseImageFormatFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SparseImageFormatFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SparseImageFormatFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl QueueFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for QueueFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for QueueFlags {}
+
+impl Default for QueueFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for QueueFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for QueueFlags {}
+
+impl std::ops::BitOr for QueueFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for QueueFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for QueueFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for QueueFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for QueueFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for QueueFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl ImageUsageFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for ImageUsageFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for ImageUsageFlags {}
+
+impl Default for ImageUsageFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for ImageUsageFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for ImageUsageFlags {}
+
+impl std::ops::BitOr for ImageUsageFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for ImageUsageFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for ImageUsageFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for ImageUsageFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for ImageUsageFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for ImageUsageFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl ImageCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for ImageCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for ImageCreateFlags {}
+
+impl Default for ImageCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for ImageCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for ImageCreateFlags {}
+
+impl std::ops::BitOr for ImageCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for ImageCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for ImageCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for ImageCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for ImageCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for ImageCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl FormatFeatureFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for FormatFeatureFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for FormatFeatureFlags {}
+
+impl Default for FormatFeatureFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for FormatFeatureFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for FormatFeatureFlags {}
+
+impl std::ops::BitOr for FormatFeatureFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for FormatFeatureFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for FormatFeatureFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for FormatFeatureFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for FormatFeatureFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for FormatFeatureFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl SurfaceTransformFlagsKHR {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SurfaceTransformFlagsKHR {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SurfaceTransformFlagsKHR {}
+
+impl Default for SurfaceTransformFlagsKHR {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SurfaceTransformFlagsKHR {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SurfaceTransformFlagsKHR {}
+
+impl std::ops::BitOr for SurfaceTransformFlagsKHR {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SurfaceTransformFlagsKHR {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SurfaceTransformFlagsKHR {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SurfaceTransformFlagsKHR {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SurfaceTransformFlagsKHR {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SurfaceTransformFlagsKHR {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl SwapchainCreateFlagsKHR {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SwapchainCreateFlagsKHR {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SwapchainCreateFlagsKHR {}
+
+impl Default for SwapchainCreateFlagsKHR {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SwapchainCreateFlagsKHR {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SwapchainCreateFlagsKHR {}
+
+impl std::ops::BitOr for SwapchainCreateFlagsKHR {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SwapchainCreateFlagsKHR {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SwapchainCreateFlagsKHR {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SwapchainCreateFlagsKHR {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SwapchainCreateFlagsKHR {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SwapchainCreateFlagsKHR {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl CompositeAlphaFlagsKHR {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for CompositeAlphaFlagsKHR {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for CompositeAlphaFlagsKHR {}
+
+impl Default for CompositeAlphaFlagsKHR {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for CompositeAlphaFlagsKHR {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for CompositeAlphaFlagsKHR {}
+
+impl std::ops::BitOr for CompositeAlphaFlagsKHR {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for CompositeAlphaFlagsKHR {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for CompositeAlphaFlagsKHR {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for CompositeAlphaFlagsKHR {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for CompositeAlphaFlagsKHR {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for CompositeAlphaFlagsKHR {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl ImageViewCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for ImageViewCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for ImageViewCreateFlags {}
+
+impl Default for ImageViewCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for ImageViewCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for ImageViewCreateFlags {}
+
+impl std::ops::BitOr for ImageViewCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for ImageViewCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for ImageViewCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for ImageViewCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for ImageViewCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for ImageViewCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl CommandPoolCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for CommandPoolCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for CommandPoolCreateFlags {}
+
+impl Default for CommandPoolCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for CommandPoolCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for CommandPoolCreateFlags {}
+
+impl std::ops::BitOr for CommandPoolCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for CommandPoolCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for CommandPoolCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for CommandPoolCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for CommandPoolCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for CommandPoolCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl CommandPoolResetFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for CommandPoolResetFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for CommandPoolResetFlags {}
+
+impl Default for CommandPoolResetFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for CommandPoolResetFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for CommandPoolResetFlags {}
+
+impl std::ops::BitOr for CommandPoolResetFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for CommandPoolResetFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for CommandPoolResetFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for CommandPoolResetFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for CommandPoolResetFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for CommandPoolResetFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl CommandBufferResetFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for CommandBufferResetFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for CommandBufferResetFlags {}
+
+impl Default for CommandBufferResetFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for CommandBufferResetFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for CommandBufferResetFlags {}
+
+impl std::ops::BitOr for CommandBufferResetFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for CommandBufferResetFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for CommandBufferResetFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for CommandBufferResetFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for CommandBufferResetFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for CommandBufferResetFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl CommandBufferUsageFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for CommandBufferUsageFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for CommandBufferUsageFlags {}
+
+impl Default for CommandBufferUsageFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for CommandBufferUsageFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for CommandBufferUsageFlags {}
+
+impl std::ops::BitOr for CommandBufferUsageFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for CommandBufferUsageFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for CommandBufferUsageFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for CommandBufferUsageFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for CommandBufferUsageFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for CommandBufferUsageFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl QueryControlFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for QueryControlFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for QueryControlFlags {}
+
+impl Default for QueryControlFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for QueryControlFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for QueryControlFlags {}
+
+impl std::ops::BitOr for QueryControlFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for QueryControlFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for QueryControlFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for QueryControlFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for QueryControlFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for QueryControlFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl QueryResultFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for QueryResultFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for QueryResultFlags {}
+
+impl Default for QueryResultFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for QueryResultFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for QueryResultFlags {}
+
+impl std::ops::BitOr for QueryResultFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for QueryResultFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for QueryResultFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for QueryResultFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for QueryResultFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for QueryResultFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl QueryPipelineStatisticFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for QueryPipelineStatisticFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for QueryPipelineStatisticFlags {}
+
+impl Default for QueryPipelineStatisticFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for QueryPipelineStatisticFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for QueryPipelineStatisticFlags {}
+
+impl std::ops::BitOr for QueryPipelineStatisticFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for QueryPipelineStatisticFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for QueryPipelineStatisticFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for QueryPipelineStatisticFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for QueryPipelineStatisticFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for QueryPipelineStatisticFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl AttachmentDescriptionFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for AttachmentDescriptionFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for AttachmentDescriptionFlags {}
+
+impl Default for AttachmentDescriptionFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for AttachmentDescriptionFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for AttachmentDescriptionFlags {}
+
+impl std::ops::BitOr for AttachmentDescriptionFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for AttachmentDescriptionFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for AttachmentDescriptionFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for AttachmentDescriptionFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for AttachmentDescriptionFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for AttachmentDescriptionFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl AccessFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for AccessFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for AccessFlags {}
+
+impl Default for AccessFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for AccessFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for AccessFlags {}
+
+impl std::ops::BitOr for AccessFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for AccessFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for AccessFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for AccessFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for AccessFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for AccessFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl DependencyFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for DependencyFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for DependencyFlags {}
+
+impl Default for DependencyFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for DependencyFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for DependencyFlags {}
+
+impl std::ops::BitOr for DependencyFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for DependencyFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for DependencyFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for DependencyFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for DependencyFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for DependencyFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl SubpassDescriptionFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SubpassDescriptionFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SubpassDescriptionFlags {}
+
+impl Default for SubpassDescriptionFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SubpassDescriptionFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SubpassDescriptionFlags {}
+
+impl std::ops::BitOr for SubpassDescriptionFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SubpassDescriptionFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SubpassDescriptionFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SubpassDescriptionFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SubpassDescriptionFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SubpassDescriptionFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl RenderPassCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for RenderPassCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for RenderPassCreateFlags {}
+
+impl Default for RenderPassCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for RenderPassCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for RenderPassCreateFlags {}
+
+impl std::ops::BitOr for RenderPassCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for RenderPassCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for RenderPassCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for RenderPassCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for RenderPassCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for RenderPassCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl FramebufferCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for FramebufferCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for FramebufferCreateFlags {}
+
+impl Default for FramebufferCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for FramebufferCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for FramebufferCreateFlags {}
+
+impl std::ops::BitOr for FramebufferCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for FramebufferCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for FramebufferCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for FramebufferCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for FramebufferCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for FramebufferCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl FenceCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for FenceCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for FenceCreateFlags {}
+
+impl Default for FenceCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for FenceCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for FenceCreateFlags {}
+
+impl std::ops::BitOr for FenceCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for FenceCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for FenceCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for FenceCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for FenceCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for FenceCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl SemaphoreCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SemaphoreCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SemaphoreCreateFlags {}
+
+impl Default for SemaphoreCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SemaphoreCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SemaphoreCreateFlags {}
+
+impl std::ops::BitOr for SemaphoreCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SemaphoreCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SemaphoreCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SemaphoreCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SemaphoreCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SemaphoreCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl ShaderModuleCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for ShaderModuleCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for ShaderModuleCreateFlags {}
+
+impl Default for ShaderModuleCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for ShaderModuleCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for ShaderModuleCreateFlags {}
+
+impl std::ops::BitOr for ShaderModuleCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for ShaderModuleCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for ShaderModuleCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for ShaderModuleCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for ShaderModuleCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for ShaderModuleCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl ShaderStageFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for ShaderStageFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for ShaderStageFlags {}
+
+impl Default for ShaderStageFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for ShaderStageFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for ShaderStageFlags {}
+
+impl std::ops::BitOr for ShaderStageFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for ShaderStageFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for ShaderStageFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for ShaderStageFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for ShaderStageFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for ShaderStageFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl DescriptorSetLayoutCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for DescriptorSetLayoutCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for DescriptorSetLayoutCreateFlags {}
+
+impl Default for DescriptorSetLayoutCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for DescriptorSetLayoutCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for DescriptorSetLayoutCreateFlags {}
+
+impl std::ops::BitOr for DescriptorSetLayoutCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for DescriptorSetLayoutCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for DescriptorSetLayoutCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for DescriptorSetLayoutCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for DescriptorSetLayoutCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for DescriptorSetLayoutCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl StencilFaceFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for StencilFaceFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for StencilFaceFlags {}
+
+impl Default for StencilFaceFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for StencilFaceFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for StencilFaceFlags {}
+
+impl std::ops::BitOr for StencilFaceFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for StencilFaceFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for StencilFaceFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for StencilFaceFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for StencilFaceFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for StencilFaceFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl CullModeFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for CullModeFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for CullModeFlags {}
+
+impl Default for CullModeFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for CullModeFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for CullModeFlags {}
+
+impl std::ops::BitOr for CullModeFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for CullModeFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for CullModeFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for CullModeFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for CullModeFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for CullModeFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl DescriptorPoolCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for DescriptorPoolCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for DescriptorPoolCreateFlags {}
+
+impl Default for DescriptorPoolCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for DescriptorPoolCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for DescriptorPoolCreateFlags {}
+
+impl std::ops::BitOr for DescriptorPoolCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for DescriptorPoolCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for DescriptorPoolCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for DescriptorPoolCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for DescriptorPoolCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for DescriptorPoolCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl DescriptorPoolResetFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for DescriptorPoolResetFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for DescriptorPoolResetFlags {}
+
+impl Default for DescriptorPoolResetFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for DescriptorPoolResetFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for DescriptorPoolResetFlags {}
+
+impl std::ops::BitOr for DescriptorPoolResetFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for DescriptorPoolResetFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for DescriptorPoolResetFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for DescriptorPoolResetFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for DescriptorPoolResetFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for DescriptorPoolResetFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl SamplerCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SamplerCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SamplerCreateFlags {}
+
+impl Default for SamplerCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SamplerCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SamplerCreateFlags {}
+
+impl std::ops::BitOr for SamplerCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SamplerCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SamplerCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SamplerCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SamplerCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SamplerCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineLayoutCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineLayoutCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineLayoutCreateFlags {}
+
+impl Default for PipelineLayoutCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineLayoutCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineLayoutCreateFlags {}
+
+impl std::ops::BitOr for PipelineLayoutCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineLayoutCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineLayoutCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineLayoutCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineLayoutCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineLayoutCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineCacheCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineCacheCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineCacheCreateFlags {}
+
+impl Default for PipelineCacheCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineCacheCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineCacheCreateFlags {}
+
+impl std::ops::BitOr for PipelineCacheCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineCacheCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineCacheCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineCacheCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineCacheCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineCacheCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineDepthStencilStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineDepthStencilStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineDepthStencilStateCreateFlags {}
+
+impl Default for PipelineDepthStencilStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineDepthStencilStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineDepthStencilStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineDepthStencilStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineDepthStencilStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineDepthStencilStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineDepthStencilStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineDepthStencilStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineDepthStencilStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineDynamicStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineDynamicStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineDynamicStateCreateFlags {}
+
+impl Default for PipelineDynamicStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineDynamicStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineDynamicStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineDynamicStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineDynamicStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineDynamicStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineDynamicStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineDynamicStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineDynamicStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineColorBlendStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineColorBlendStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineColorBlendStateCreateFlags {}
+
+impl Default for PipelineColorBlendStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineColorBlendStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineColorBlendStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineColorBlendStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineColorBlendStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineColorBlendStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineColorBlendStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineColorBlendStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineColorBlendStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineMultisampleStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineMultisampleStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineMultisampleStateCreateFlags {}
+
+impl Default for PipelineMultisampleStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineMultisampleStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineMultisampleStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineMultisampleStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineMultisampleStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineMultisampleStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineMultisampleStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineMultisampleStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineMultisampleStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineRasterizationStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineRasterizationStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineRasterizationStateCreateFlags {}
+
+impl Default for PipelineRasterizationStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineRasterizationStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineRasterizationStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineRasterizationStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineRasterizationStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineRasterizationStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineRasterizationStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineRasterizationStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineRasterizationStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineViewportStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineViewportStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineViewportStateCreateFlags {}
+
+impl Default for PipelineViewportStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineViewportStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineViewportStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineViewportStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineViewportStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineViewportStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineViewportStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineViewportStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineViewportStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineTessellationStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineTessellationStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineTessellationStateCreateFlags {}
+
+impl Default for PipelineTessellationStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineTessellationStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineTessellationStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineTessellationStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineTessellationStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineTessellationStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineTessellationStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineTessellationStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineTessellationStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineInputAssemblyStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineInputAssemblyStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineInputAssemblyStateCreateFlags {}
+
+impl Default for PipelineInputAssemblyStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineInputAssemblyStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineInputAssemblyStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineInputAssemblyStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineInputAssemblyStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineInputAssemblyStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineInputAssemblyStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineInputAssemblyStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineInputAssemblyStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineVertexInputStateCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineVertexInputStateCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineVertexInputStateCreateFlags {}
+
+impl Default for PipelineVertexInputStateCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineVertexInputStateCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineVertexInputStateCreateFlags {}
+
+impl std::ops::BitOr for PipelineVertexInputStateCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineVertexInputStateCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineVertexInputStateCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineVertexInputStateCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineVertexInputStateCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineVertexInputStateCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineShaderStageCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineShaderStageCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineShaderStageCreateFlags {}
+
+impl Default for PipelineShaderStageCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineShaderStageCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineShaderStageCreateFlags {}
+
+impl std::ops::BitOr for PipelineShaderStageCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineShaderStageCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineShaderStageCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineShaderStageCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineShaderStageCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineShaderStageCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl PipelineCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineCreateFlags {}
+
+impl Default for PipelineCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineCreateFlags {}
+
+impl std::ops::BitOr for PipelineCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl ColorComponentFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for ColorComponentFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for ColorComponentFlags {}
+
+impl Default for ColorComponentFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for ColorComponentFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for ColorComponentFlags {}
+
+impl std::ops::BitOr for ColorComponentFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for ColorComponentFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for ColorComponentFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for ColorComponentFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for ColorComponentFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for ColorComponentFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl BufferCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for BufferCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for BufferCreateFlags {}
+
+impl Default for BufferCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for BufferCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for BufferCreateFlags {}
+
+impl std::ops::BitOr for BufferCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for BufferCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for BufferCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for BufferCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for BufferCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for BufferCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl BufferUsageFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for BufferUsageFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for BufferUsageFlags {}
+
+impl Default for BufferUsageFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for BufferUsageFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for BufferUsageFlags {}
+
+impl std::ops::BitOr for BufferUsageFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for BufferUsageFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for BufferUsageFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for BufferUsageFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for BufferUsageFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for BufferUsageFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl BufferViewCreateFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for BufferViewCreateFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for BufferViewCreateFlags {}
+
+impl Default for BufferViewCreateFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for BufferViewCreateFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for BufferViewCreateFlags {}
+
+impl std::ops::BitOr for BufferViewCreateFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for BufferViewCreateFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for BufferViewCreateFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for BufferViewCreateFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for BufferViewCreateFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for BufferViewCreateFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl SemaphoreWaitFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SemaphoreWaitFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SemaphoreWaitFlags {}
+
+impl Default for SemaphoreWaitFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SemaphoreWaitFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SemaphoreWaitFlags {}
+
+impl std::ops::BitOr for SemaphoreWaitFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SemaphoreWaitFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SemaphoreWaitFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SemaphoreWaitFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SemaphoreWaitFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SemaphoreWaitFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl ResolveModeFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for ResolveModeFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for ResolveModeFlags {}
+
+impl Default for ResolveModeFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for ResolveModeFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for ResolveModeFlags {}
+
+impl std::ops::BitOr for ResolveModeFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for ResolveModeFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for ResolveModeFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for ResolveModeFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for ResolveModeFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for ResolveModeFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl RenderingFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for RenderingFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for RenderingFlags {}
+
+impl Default for RenderingFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for RenderingFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for RenderingFlags {}
+
+impl std::ops::BitOr for RenderingFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for RenderingFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for RenderingFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for RenderingFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for RenderingFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for RenderingFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl SubgroupFeatureFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SubgroupFeatureFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SubgroupFeatureFlags {}
+
+impl Default for SubgroupFeatureFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SubgroupFeatureFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SubgroupFeatureFlags {}
+
+impl std::ops::BitOr for SubgroupFeatureFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SubgroupFeatureFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SubgroupFeatureFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SubgroupFeatureFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SubgroupFeatureFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SubgroupFeatureFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+impl SubmitFlags {
+    #[inline]
+    pub fn from_raw(value: u32) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u32 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for SubmitFlags {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for SubmitFlags {}
+
+impl Default for SubmitFlags {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for SubmitFlags {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for SubmitFlags {}
+
+impl std::ops::BitOr for SubmitFlags {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for SubmitFlags {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for SubmitFlags {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for SubmitFlags {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for SubmitFlags {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for SubmitFlags {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl PipelineStageFlags2 {
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for PipelineStageFlags2 {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for PipelineStageFlags2 {}
+
+impl Default for PipelineStageFlags2 {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for PipelineStageFlags2 {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for PipelineStageFlags2 {}
+
+impl std::ops::BitOr for PipelineStageFlags2 {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for PipelineStageFlags2 {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for PipelineStageFlags2 {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for PipelineStageFlags2 {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for PipelineStageFlags2 {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for PipelineStageFlags2 {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
+
+impl AccessFlags2 {
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn intersects(self, rhs: Self) -> bool {
+        self.0 & rhs.0 != 0
+    }
+
+    #[inline]
+    pub fn contains(self, rhs: Self) -> bool {
+        self.0 & rhs.0 == rhs.0
+    }
+
+    #[inline]
+    pub fn cardinality(self) -> u32 {
+        self.0.count_ones()
+    }
+}
+
+impl Clone for AccessFlags2 {
+    fn clone(&self) -> Self {
+        Self(self.0)
+    }
+}
+
+impl Copy for AccessFlags2 {}
+
+impl Default for AccessFlags2 {
+    fn default() -> Self {
+        Self(0)
+    }
+}
+
+impl PartialEq for AccessFlags2 {
+    fn eq(&self, rhs: &Self) -> bool {
+        self.0 == rhs.0
+    }
+}
+
+impl Eq for AccessFlags2 {}
+
+impl std::ops::BitOr for AccessFlags2 {
+    type Output = Self;
+    fn bitor(self, rhs: Self) -> Self::Output {
+        Self(self.0 | rhs.0)
+    }
+}
+
+impl std::ops::BitOrAssign for AccessFlags2 {
+    fn bitor_assign(&mut self, rhs: Self) {
+        self.0 |= rhs.0
+    }
+}
+
+impl std::ops::BitAnd for AccessFlags2 {
+    type Output = Self;
+    fn bitand(self, rhs: Self) -> Self::Output {
+        Self(self.0 & rhs.0)
+    }
+}
+
+impl std::ops::BitAndAssign for AccessFlags2 {
+    fn bitand_assign(&mut self, rhs: Self) {
+        self.0 &= rhs.0
+    }
+}
+
+impl std::ops::BitXor for AccessFlags2 {
+    type Output = Self;
+    fn bitxor(self, rhs: Self) -> Self::Output {
+        Self(self.0 ^ rhs.0)
+    }
+}
+
+impl std::ops::BitXorAssign for AccessFlags2 {
+    fn bitxor_assign(&mut self, rhs: Self) {
+        self.0 ^= rhs.0
+    }
+}
diff --git a/vulkan-sys/src/functions.rs b/vulkan-sys/src/functions.rs
new file mode 100644 (file)
index 0000000..f68490a
--- /dev/null
@@ -0,0 +1,894 @@
+use std::{ffi::c_void, os::raw::c_char};
+
+use super::*;
+
+pub type FnVoidFunction = extern "system" fn();
+
+pub type FnAllocationFunction = extern "system" fn(
+    user_data: *mut c_void,
+    size: usize,
+    alignment: usize,
+    allocationScope: SystemAllocationScope,
+) -> *mut c_void;
+
+pub type FnReallocationFunction = extern "system" fn(
+    user_data: *mut c_void,
+    original: *mut c_void,
+    size: usize,
+    alignment: usize,
+    allocation_scope: SystemAllocationScope,
+) -> *mut c_void;
+
+pub type FnFreeFunction = extern "system" fn(user_data: *mut c_void, memory: *mut c_void);
+
+pub type FnInternalAllocationNotification = extern "system" fn(
+    user_data: *mut c_void,
+    size: usize,
+    allocation_type: InternalAllocationType,
+    allocation_scope: SystemAllocationScope,
+);
+
+pub type FnInternalFreeNotification = extern "system" fn(
+    user_data: *mut c_void,
+    size: usize,
+    allocation_type: InternalAllocationType,
+    allocation_scope: SystemAllocationScope,
+);
+
+pub type FnGetInstanceProcAddr =
+    extern "system" fn(instance: Instance, name: *const c_char) -> Option<FnVoidFunction>;
+
+pub type FnEnumerateInstanceVersion = extern "system" fn(api_version: &mut u32) -> Result;
+
+pub type FnCreateInstance = extern "system" fn(
+    create_info: &InstanceCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    instance: &mut Instance,
+) -> Result;
+
+pub type FnDestroyInstance =
+    extern "system" fn(instance: Instance, allocator: Option<&AllocationCallbacks>);
+
+pub type FnEnumeratePhysicalDevices = extern "system" fn(
+    instance: Instance,
+    physical_device_count: &mut u32,
+    physical_devices: *mut PhysicalDevice,
+) -> Result;
+
+pub type FnGetPhysicalDeviceFeatures =
+    extern "system" fn(physical_device: PhysicalDevice, features: *mut PhysicalDeviceFeatures);
+
+pub type FnGetPhysicalDeviceFeatures2 =
+    extern "system" fn(physical_device: PhysicalDevice, features: *mut PhysicalDeviceFeatures2);
+
+pub type FnGetPhysicalDeviceFormatProperties = extern "system" fn(
+    physicalDevice: PhysicalDevice,
+    format: Format,
+    format_properties: &mut FormatProperties,
+);
+
+pub type FnGetPhysicalDeviceImageFormatProperties = extern "system" fn(
+    physicalDevice: PhysicalDevice,
+    format: Format,
+    r#type: ImageType,
+    tiling: ImageTiling,
+    usage: ImageUsageFlags,
+    flags: ImageCreateFlags,
+    image_format_properties: &mut ImageFormatProperties,
+) -> Result;
+
+pub type FnGetPhysicalDeviceProperties =
+    extern "system" fn(physicalDevice: PhysicalDevice, properties: *mut PhysicalDeviceProperties);
+
+pub type FnGetPhysicalDeviceProperties2 =
+    extern "system" fn(physicalDevice: PhysicalDevice, properties: *mut PhysicalDeviceProperties2);
+
+pub type FnGetPhysicalDeviceQueueFamilyProperties = extern "system" fn(
+    physical_device: PhysicalDevice,
+    queue_family_property_count: &mut u32,
+    queue_family_properties: *mut QueueFamilyProperties,
+);
+
+pub type FnGetPhysicalDeviceMemoryProperties = extern "system" fn(
+    physical_device: PhysicalDevice,
+    memory_properties: *mut PhysicalDeviceMemoryProperties,
+);
+
+pub type FnDestroySurfaceKHR = extern "system" fn(
+    instance: Instance,
+    surface: SurfaceKHR,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnGetPhysicalDeviceSurfaceSupportKHR = extern "system" fn(
+    physical_device: PhysicalDevice,
+    queue_family_index: u32,
+    surface: SurfaceKHR,
+    supported: &mut Bool32,
+) -> Result;
+
+pub type FnGetPhysicalDeviceSurfaceCapabilitiesKHR = extern "system" fn(
+    physical_device: PhysicalDevice,
+    surface: SurfaceKHR,
+    surface_capabilities: &mut SurfaceCapabilitiesKHR,
+) -> Result;
+
+pub type FnGetPhysicalDeviceSurfaceFormatsKHR = extern "system" fn(
+    physical_device: PhysicalDevice,
+    surface: SurfaceKHR,
+    surface_format_count: &mut u32,
+    surface_formats: *mut SurfaceFormatKHR,
+) -> Result;
+
+pub type FnGetPhysicalDeviceSurfacePresentModesKHR = extern "system" fn(
+    physical_device: PhysicalDevice,
+    surface: SurfaceKHR,
+    present_mode_count: &mut u32,
+    present_modes: *mut PresentModeKHR,
+) -> Result;
+
+pub type FnCreateDevice = extern "system" fn(
+    physical_device: PhysicalDevice,
+    create_info: &DeviceCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    device: &mut Device,
+) -> Result;
+
+pub type FnGetDeviceProcAddr =
+    extern "system" fn(device: Device, name: *const c_char) -> Option<FnVoidFunction>;
+
+pub type FnDestroyDevice =
+    extern "system" fn(device: Device, allocator: Option<&AllocationCallbacks>);
+
+pub type FnGetDeviceQueue = extern "system" fn(
+    device: Device,
+    queue_family_index: u32,
+    queue_index: u32,
+    queue: &mut Queue,
+);
+
+pub type FnQueueSubmit = extern "system" fn(
+    queue: Queue,
+    submit_count: u32,
+    submits: *const SubmitInfo,
+    fence: Fence,
+) -> Result;
+
+pub type FnQueueSubmit2 = extern "system" fn(
+    queue: Queue,
+    submit_count: u32,
+    submits: *const SubmitInfo2,
+    fence: Fence,
+) -> Result;
+
+pub type FnQueueWaitIdle = extern "system" fn(queue: Queue) -> Result;
+
+pub type FnDeviceWaitIdle = extern "system" fn(device: Device) -> Result;
+
+pub type FnAllocateMemory = extern "system" fn(
+    device: Device,
+    allocate_info: &MemoryAllocateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    memory: *mut DeviceMemory,
+) -> Result;
+
+pub type FnFreeMemory = extern "system" fn(
+    device: Device,
+    memory: DeviceMemory,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnMapMemory = extern "system" fn(
+    device: Device,
+    memory: DeviceMemory,
+    offset: DeviceSize,
+    size: DeviceSize,
+    flags: MemoryMapFlags,
+    data: &mut *mut c_void,
+) -> Result;
+
+pub type FnUnmapMemory = extern "system" fn(device: Device, memory: DeviceMemory);
+
+pub type FnCreateImageView = extern "system" fn(
+    device: Device,
+    create_info: &ImageViewCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    view: &mut ImageView,
+) -> Result;
+
+pub type FnDestroyImageView = extern "system" fn(
+    device: Device,
+    image_view: ImageView,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnCreateShaderModule = extern "system" fn(
+    device: Device,
+    create_info: &ShaderModuleCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    shader_module: &mut ShaderModule,
+) -> Result;
+
+pub type FnDestroyShaderModule = extern "system" fn(
+    device: Device,
+    shader_module: ShaderModule,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnCreatePipelineCache = extern "system" fn(
+    device: Device,
+    create_info: &PipelineCacheCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    pipeline_cache: &mut PipelineCache,
+) -> Result;
+
+pub type FnDestroyPipelineCache = extern "system" fn(
+    device: Device,
+    pipeline_cache: PipelineCache,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnGetPipelineCacheData = extern "system" fn(
+    device: Device,
+    pipeline_cache: PipelineCache,
+    data_size: &mut usize,
+    data: *mut c_void,
+) -> Result;
+
+pub type FnMergePipelineCaches = extern "system" fn(
+    device: Device,
+    dst_cache: PipelineCache,
+    src_cache_count: u32,
+    src_caches: *const PipelineCache,
+) -> Result;
+
+pub type FnCreateGraphicsPipelines = extern "system" fn(
+    device: Device,
+    pipeline_cache: PipelineCache,
+    create_info_count: u32,
+    create_infos: *const GraphicsPipelineCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    pipelines: *mut Pipeline,
+) -> Result;
+
+pub type FnCreateComputePipelines = extern "system" fn(
+    device: Device,
+    pipeline_cache: PipelineCache,
+    create_info_count: u32,
+    create_infos: *const ComputePipelineCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    pipelines: *mut Pipeline,
+) -> Result;
+
+pub type FnDestroyPipeline =
+    extern "system" fn(device: Device, pipeline: Pipeline, allocator: Option<&AllocationCallbacks>);
+
+pub type FnCreatePipelineLayout = extern "system" fn(
+    device: Device,
+    create_info: &PipelineLayoutCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    pipeline_layout: &mut PipelineLayout,
+) -> Result;
+
+pub type FnDestroyPipelineLayout = extern "system" fn(
+    device: Device,
+    pipeline_layout: PipelineLayout,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnCreateSampler = extern "system" fn(
+    device: Device,
+    create_info: &SamplerCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    sampler: &mut Sampler,
+) -> Result;
+
+pub type FnDestroySampler =
+    extern "system" fn(device: Device, sampler: Sampler, allocator: Option<&AllocationCallbacks>);
+
+pub type FnCreateDescriptorSetLayout = extern "system" fn(
+    device: Device,
+    create_info: &DescriptorSetLayoutCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    set_layout: &mut DescriptorSetLayout,
+) -> Result;
+
+pub type FnDestroyDescriptorSetLayout = extern "system" fn(
+    device: Device,
+    descriptor_set_layout: DescriptorSetLayout,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnCreateDescriptorPool = extern "system" fn(
+    device: Device,
+    create_info: &DescriptorPoolCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    descriptor_pool: &mut DescriptorPool,
+) -> Result;
+
+pub type FnDestroyDescriptorPool = extern "system" fn(
+    device: Device,
+    descriptor_pool: DescriptorPool,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnResetDescriptorPool = extern "system" fn(
+    device: Device,
+    descriptor_pool: DescriptorPool,
+    flags: DescriptorPoolResetFlags,
+) -> Result;
+
+pub type FnAllocateDescriptorSets = extern "system" fn(
+    device: Device,
+    allocate_info: &DescriptorSetAllocateInfo,
+    descriptor_sets: *mut DescriptorSet,
+) -> Result;
+
+pub type FnFreeDescriptorSets = extern "system" fn(
+    device: Device,
+    descriptor_pool: DescriptorPool,
+    descriptor_set_count: u32,
+    descriptor_sets: *const DescriptorSet,
+) -> Result;
+
+pub type FnUpdateDescriptorSets = extern "system" fn(
+    device: Device,
+    descriptor_write_count: u32,
+    descriptor_writes: *const WriteDescriptorSet,
+    descriptor_copy_count: u32,
+    descriptor_copies: *const CopyDescriptorSet,
+);
+
+pub type FnCreateFramebuffer = extern "system" fn(
+    device: Device,
+    create_info: &FramebufferCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    framebuffer: &mut Framebuffer,
+) -> Result;
+
+pub type FnDestroyFramebuffer = extern "system" fn(
+    device: Device,
+    framebuffer: Framebuffer,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnCreateRenderPass = extern "system" fn(
+    device: Device,
+    create_info: &RenderPassCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    render_pass: &mut RenderPass,
+) -> Result;
+
+pub type FnDestroyRenderPass = extern "system" fn(
+    device: Device,
+    render_pass: RenderPass,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnCreateCommandPool = extern "system" fn(
+    device: Device,
+    create_info: &CommandPoolCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    command_pool: &mut CommandPool,
+) -> Result;
+
+pub type FnDestroyCommandPool = extern "system" fn(
+    device: Device,
+    command_pool: CommandPool,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnResetCommandPool = extern "system" fn(
+    device: Device,
+    command_pool: CommandPool,
+    flags: CommandPoolResetFlags,
+) -> Result;
+
+pub type FnAllocateCommandBuffers = extern "system" fn(
+    device: Device,
+    allocate_info: &CommandBufferAllocateInfo,
+    command_buffers: *mut CommandBuffer,
+) -> Result;
+
+pub type FnFreeCommandBuffers = extern "system" fn(
+    device: Device,
+    command_pool: CommandPool,
+    command_buffer_count: u32,
+    command_buffers: *const CommandBuffer,
+);
+
+pub type FnBeginCommandBuffer = extern "system" fn(
+    command_buffer: CommandBuffer,
+    begin_info: &CommandBufferBeginInfo,
+) -> Result;
+
+pub type FnEndCommandBuffer = extern "system" fn(command_buffer: CommandBuffer) -> Result;
+
+pub type FnResetCommandBuffer =
+    extern "system" fn(command_buffer: CommandBuffer, flags: CommandBufferResetFlags) -> Result;
+
+pub type FnCmdBindPipeline = extern "system" fn(
+    command_buffer: CommandBuffer,
+    pipeline_bind_point: PipelineBindPoint,
+    pipeline: Pipeline,
+);
+
+pub type FnCmdSetViewport = extern "system" fn(
+    command_buffer: CommandBuffer,
+    first_viewport: u32,
+    viewport_count: u32,
+    viewports: *const Viewport,
+);
+
+pub type FnCmdSetScissor = extern "system" fn(
+    command_buffer: CommandBuffer,
+    first_scissor: u32,
+    scissor_count: u32,
+    scissors: *const Rect2d,
+);
+
+pub type FnCmdSetLineWidth = extern "system" fn(command_buffer: CommandBuffer, line_width: f32);
+
+pub type FnCmdSetDepthBias = extern "system" fn(
+    command_buffer: CommandBuffer,
+    depth_bias_constant_factor: f32,
+    depth_bias_clamp: f32,
+    depth_bias_slope_factor: f32,
+);
+
+pub type FnCmdSetBlendConstants =
+    extern "system" fn(command_buffer: CommandBuffer, blend_constants: [f32; 4]);
+
+pub type FnCmdSetDepthBounds =
+    extern "system" fn(command_buffer: CommandBuffer, min_depth_bounds: f32, max_depth_bounds: f32);
+
+pub type FnCmdSetStencilCompareMask = extern "system" fn(
+    command_buffer: CommandBuffer,
+    face_mask: StencilFaceFlags,
+    compare_mask: u32,
+);
+
+pub type FnCmdSetStencilWriteMask =
+    extern "system" fn(command_buffer: CommandBuffer, face_mask: StencilFaceFlags, write_mask: u32);
+
+pub type FnCmdSetStencilReference =
+    extern "system" fn(command_buffer: CommandBuffer, face_mask: StencilFaceFlags, reference: u32);
+
+pub type FnCmdBindDescriptorSets = extern "system" fn(
+    command_buffer: CommandBuffer,
+    pipeline_bind_point: PipelineBindPoint,
+    layout: PipelineLayout,
+    first_set: u32,
+    descriptor_set_count: u32,
+    descriptor_sets: *const DescriptorSet,
+    dynamic_offset_count: u32,
+    dynamic_offsets: *const u32,
+);
+
+pub type FnCmdBindIndexBuffer = extern "system" fn(
+    command_buffer: CommandBuffer,
+    buffer: Buffer,
+    offset: DeviceSize,
+    index_type: IndexType,
+);
+
+pub type FnCmdBindVertexBuffers = extern "system" fn(
+    command_buffer: CommandBuffer,
+    first_binding: u32,
+    binding_count: u32,
+    buffers: *const Buffer,
+    offsets: *const DeviceSize,
+);
+
+pub type FnCmdDraw = extern "system" fn(
+    command_buffer: CommandBuffer,
+    vertex_count: u32,
+    instance_count: u32,
+    first_vertex: u32,
+    first_instance: u32,
+);
+
+pub type FnCmdDrawIndexed = extern "system" fn(
+    command_buffer: CommandBuffer,
+    index_count: u32,
+    instance_count: u32,
+    first_index: u32,
+    vertex_offset: i32,
+    first_instance: u32,
+);
+
+pub type FnCmdDrawIndirect = extern "system" fn(
+    command_buffer: CommandBuffer,
+    buffer: Buffer,
+    offset: DeviceSize,
+    draw_count: u32,
+    stride: u32,
+);
+
+pub type FnCmdDrawIndexedIndirect = extern "system" fn(
+    command_buffer: CommandBuffer,
+    buffer: Buffer,
+    offset: DeviceSize,
+    draw_count: u32,
+    stride: u32,
+);
+
+pub type FnCmdDispatch = extern "system" fn(
+    command_buffer: CommandBuffer,
+    group_count_x: u32,
+    group_count_y: u32,
+    group_count_z: u32,
+);
+
+pub type FnCmdDispatchIndirect =
+    extern "system" fn(command_buffer: CommandBuffer, buffer: Buffer, offset: DeviceSize);
+
+pub type FnCmdCopyBuffer = extern "system" fn(
+    command_buffer: CommandBuffer,
+    src_buffer: Buffer,
+    dst_buffer: Buffer,
+    region_count: u32,
+    regions: *const BufferCopy,
+);
+
+pub type FnCmdCopyImage = extern "system" fn(
+    command_buffer: CommandBuffer,
+    src_image: Image,
+    src_image_layout: ImageLayout,
+    dst_image: Image,
+    dst_image_layout: ImageLayout,
+    region_count: u32,
+    regions: *const ImageCopy,
+);
+
+pub type FnCmdBlitImage = extern "system" fn(
+    command_buffer: CommandBuffer,
+    src_image: Image,
+    src_image_layout: ImageLayout,
+    dst_image: Image,
+    dst_image_layout: ImageLayout,
+    region_count: u32,
+    regions: *const ImageBlit,
+    filter: Filter,
+);
+
+pub type FnCmdCopyBufferToImage = extern "system" fn(
+    command_buffer: CommandBuffer,
+    src_buffer: Buffer,
+    dst_image: Image,
+    dst_image_layout: ImageLayout,
+    region_count: u32,
+    regions: *const BufferImageCopy,
+);
+
+pub type FnCmdCopyImageToBuffer = extern "system" fn(
+    command_buffer: CommandBuffer,
+    src_image: Image,
+    src_image_layout: ImageLayout,
+    dst_buffer: Buffer,
+    region_count: u32,
+    regions: *const BufferImageCopy,
+);
+
+pub type FnCmdUpdateBuffer = extern "system" fn(
+    command_buffer: CommandBuffer,
+    dst_buffer: Buffer,
+    dst_offset: DeviceSize,
+    data_size: DeviceSize,
+    data: *const c_void,
+);
+
+pub type FnCmdFillBuffer = extern "system" fn(
+    command_buffer: CommandBuffer,
+    dst_buffer: Buffer,
+    dst_offset: DeviceSize,
+    size: DeviceSize,
+    data: u32,
+);
+
+pub type FnCmdClearColorImage = extern "system" fn(
+    command_buffer: CommandBuffer,
+    image: Image,
+    image_layout: ImageLayout,
+    color: &ClearColorValue,
+    range_count: u32,
+    ranges: *const ImageSubresourceRange,
+);
+
+pub type FnCmdClearDepthStencilImage = extern "system" fn(
+    command_buffer: CommandBuffer,
+    image: Image,
+    image_layout: ImageLayout,
+    depth_stencil: &ClearDepthStencilValue,
+    range_count: u32,
+    ranges: *const ImageSubresourceRange,
+);
+
+pub type FnCmdClearAttachments = extern "system" fn(
+    command_buffer: CommandBuffer,
+    attachment_count: u32,
+    attachments: *const ClearAttachment,
+    rect_count: u32,
+    rects: *const ClearRect,
+);
+
+pub type FnCmdResolveImage = extern "system" fn(
+    command_buffer: CommandBuffer,
+    src_image: Image,
+    src_image_layout: ImageLayout,
+    dst_image: Image,
+    dst_image_layout: ImageLayout,
+    region_count: u32,
+    regions: *const ImageResolve,
+);
+
+pub type FnCmdSetEvent =
+    extern "system" fn(command_buffer: CommandBuffer, event: Event, stage_mask: PipelineStageFlags);
+
+pub type FnCmdResetEvent =
+    extern "system" fn(command_buffer: CommandBuffer, event: Event, stage_mask: PipelineStageFlags);
+
+pub type FnCmdWaitEvents = extern "system" fn(
+    command_buffer: CommandBuffer,
+    event_count: u32,
+    events: *const Event,
+    src_stage_mask: PipelineStageFlags,
+    dst_stage_mask: PipelineStageFlags,
+    memory_barrier_count: u32,
+    memory_barriers: *const MemoryBarrier,
+    buffer_memory_barrier_count: u32,
+    buffer_memory_barriers: *const BufferMemoryBarrier,
+    image_memory_barrier_count: u32,
+    image_memory_barriers: *const ImageMemoryBarrier,
+);
+
+pub type FnCmdPipelineBarrier = extern "system" fn(
+    command_buffer: CommandBuffer,
+    src_stage_mask: PipelineStageFlags,
+    dst_stage_mask: PipelineStageFlags,
+    dependency_flags: DependencyFlags,
+    memory_barrier_count: u32,
+    memory_barriers: *const MemoryBarrier,
+    buffer_memory_barrier_count: u32,
+    buffer_memory_barriers: *const BufferMemoryBarrier,
+    image_memory_barrier_count: u32,
+    image_memory_barriers: *const ImageMemoryBarrier,
+);
+
+pub type FnCmdPipelineBarrier2 =
+    extern "system" fn(command_buffer: CommandBuffer, dependency_info: &DependencyInfo);
+
+pub type FnCmdWaitEvents2 = extern "system" fn(
+    command_buffer: CommandBuffer,
+    event_count: u32,
+    events: *const Event,
+    dependency_infos: *const DependencyInfo,
+);
+
+pub type FnCmdSetEvent2 = extern "system" fn(
+    command_buffer: CommandBuffer,
+    event: Event,
+    dependency_info: &DependencyInfo,
+);
+
+pub type FnCmdBeginQuery = extern "system" fn(
+    command_buffer: CommandBuffer,
+    query_pool: QueryPool,
+    query: u32,
+    flags: QueryControlFlags,
+);
+
+pub type FnCmdEndQuery =
+    extern "system" fn(command_buffer: CommandBuffer, query_pool: QueryPool, query: u32);
+
+pub type FnCmdResetQueryPool = extern "system" fn(
+    command_buffer: CommandBuffer,
+    query_pool: QueryPool,
+    first_query: u32,
+    query_count: u32,
+);
+
+pub type FnCmdSetViewportWithCount = extern "system" fn(
+    command_buffer: CommandBuffer,
+    viewport_count: u32,
+    viewports: *const Viewport,
+);
+
+pub type FnCmdSetScissorWithCount =
+    extern "system" fn(command_buffer: CommandBuffer, scissors_count: u32, scissors: *const Rect2d);
+
+pub type FnCreateBuffer = extern "system" fn(
+    device: Device,
+    create_info: &BufferCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    buffer: &mut Buffer,
+) -> Result;
+
+pub type FnDestroyBuffer =
+    extern "system" fn(device: Device, buffer: Buffer, allocator: Option<&AllocationCallbacks>);
+
+pub type FnCreateBufferView = extern "system" fn(
+    device: Device,
+    create_info: &BufferViewCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    view: &mut BufferView,
+) -> Result;
+
+pub type FnDestroyBufferView = extern "system" fn(
+    device: Device,
+    buffer_view: BufferView,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnCreateImage = extern "system" fn(
+    device: Device,
+    create_info: &ImageCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    image: &mut Image,
+) -> Result;
+
+pub type FnDestroyImage =
+    extern "system" fn(device: Device, image: Image, allocator: Option<&AllocationCallbacks>);
+
+pub type FnGetImageSubresourceLayout = extern "system" fn(
+    device: Device,
+    image: Image,
+    subresource: &ImageSubresource,
+    layout: &mut SubresourceLayout,
+);
+
+pub type FnGetImageMemoryRequirements2 = extern "system" fn(
+    device: Device,
+    info: &ImageMemoryRequirementsInfo2,
+    memory_requirements: &mut MemoryRequirements2,
+);
+
+pub type FnBindImageMemory2 =
+    extern "system" fn(device: Device, bind_info_count: u32, *const BindImageMemoryInfo);
+
+pub type FnGetBufferMemoryRequirements2 = extern "system" fn(
+    device: Device,
+    info: &BufferMemoryRequirementsInfo2,
+    memory_requirements: &mut MemoryRequirements2,
+);
+
+pub type FnBindBufferMemory2 =
+    extern "system" fn(device: Device, bind_info_count: u32, *const BindBufferMemoryInfo);
+
+pub type FnCmdWriteTimestamp = extern "system" fn(
+    command_buffer: CommandBuffer,
+    pipeline_stage: PipelineStageFlags,
+    query_pool: QueryPool,
+    query: u32,
+);
+
+pub type FnCmdCopyQueryPoolResults = extern "system" fn(
+    command_buffer: CommandBuffer,
+    query_pool: QueryPool,
+    first_query: u32,
+    query_count: u32,
+    dst_buffer: Buffer,
+    dst_offset: DeviceSize,
+    stride: DeviceSize,
+    flags: QueryResultFlags,
+);
+
+pub type FnCmdPushConstants = extern "system" fn(
+    command_buffer: CommandBuffer,
+    layout: PipelineLayout,
+    stage_flags: ShaderStageFlags,
+    offset: u32,
+    size: u32,
+    values: *const c_void,
+);
+
+pub type FnCmdBeginRenderPass = extern "system" fn(
+    command_buffer: CommandBuffer,
+    render_pass_begin: &RenderPassBeginInfo,
+    contents: SubpassContents,
+);
+
+pub type FnCmdNextSubpass =
+    extern "system" fn(command_buffer: CommandBuffer, contents: SubpassContents);
+
+pub type FnCmdEndRenderPass = extern "system" fn(command_buffer: CommandBuffer);
+
+pub type FnCmdExecuteCommands = extern "system" fn(
+    command_buffer: CommandBuffer,
+    command_buffer_count: u32,
+    command_buffers: *const CommandBuffer,
+);
+
+pub type FnCmdBeginRendering =
+    extern "system" fn(command_buffer: CommandBuffer, rendering_info: &RenderingInfo);
+
+pub type FnCmdEndRendering = extern "system" fn(command_buffer: CommandBuffer);
+
+pub type FnCreateFence = extern "system" fn(
+    device: Device,
+    create_info: &FenceCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    fence: &mut Fence,
+) -> Result;
+
+pub type FnDestroyFence =
+    extern "system" fn(device: Device, fence: Fence, allocator: Option<&AllocationCallbacks>);
+
+pub type FnResetFences =
+    extern "system" fn(device: Device, fence_count: u32, fences: *const Fence) -> Result;
+
+pub type FnGetFenceStatus = extern "system" fn(device: Device, fence: Fence) -> Result;
+
+pub type FnWaitForFences = extern "system" fn(
+    device: Device,
+    fence_count: u32,
+    fences: *const Fence,
+    wait_all: Bool32,
+    timeout: u64,
+) -> Result;
+
+pub type FnInvalidateMappedMemoryRanges = extern "system" fn(
+    device: Device,
+    memory_range_count: u32,
+    memory_ranges: *const MappedMemoryRange,
+) -> Result;
+
+pub type FnCreateSemaphore = extern "system" fn(
+    device: Device,
+    create_info: &SemaphoreCreateInfo,
+    allocator: Option<&AllocationCallbacks>,
+    semaphore: &mut Semaphore,
+) -> Result;
+
+pub type FnDestroySemaphore = extern "system" fn(
+    device: Device,
+    semaphore: Semaphore,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnGetSemaphoreCounterValue =
+    extern "system" fn(device: Device, semaphore: Semaphore, value: &mut u64) -> Result;
+
+pub type FnWaitSemaphores =
+    extern "system" fn(device: Device, wait_info: &SemaphoreWaitInfo, timeout: u64) -> Result;
+
+pub type FnSignalSemaphore =
+    extern "system" fn(device: Device, signal_info: &SemaphoreSignalInfo) -> Result;
+
+pub type FnCreateSwapchainKHR = extern "system" fn(
+    device: Device,
+    create_info: &SwapchainCreateInfoKHR,
+    allocator: Option<&AllocationCallbacks>,
+    swapchain: &mut SwapchainKHR,
+) -> Result;
+
+pub type FnDestroySwapchainKHR = extern "system" fn(
+    device: Device,
+    swapchain: SwapchainKHR,
+    allocator: Option<&AllocationCallbacks>,
+);
+
+pub type FnGetSwapchainImagesKHR = extern "system" fn(
+    device: Device,
+    swapchain: SwapchainKHR,
+    swapchain_image_count: &mut u32,
+    swapchain_images: *mut Image,
+) -> Result;
+
+pub type FnAcquireNextImageKHR = extern "system" fn(
+    device: Device,
+    swapchain: SwapchainKHR,
+    timeout: u64,
+    semaphore: Semaphore,
+    fence: Fence,
+    image_index: &mut u32,
+) -> Result;
+
+pub type FnQueuePresentKHR =
+    extern "system" fn(queue: Queue, present_info: &PresentInfoKHR) -> Result;
+
+pub type FnAcquireNextImage2KHR = extern "system" fn(
+    device: Device,
+    acquire_info: &AcquireNextImageInfoKHR,
+    image_index: &mut u32,
+) -> Result;
diff --git a/vulkan-sys/src/handles.rs b/vulkan-sys/src/handles.rs
new file mode 100644 (file)
index 0000000..478d7d4
--- /dev/null
@@ -0,0 +1,798 @@
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Instance(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct PhysicalDevice(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Device(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Queue(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct CommandBuffer(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct DeviceMemory(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct CommandPool(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Buffer(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct BufferView(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Image(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct ImageView(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct ShaderModule(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Pipeline(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct PipelineLayout(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Sampler(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct DescriptorSet(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct DescriptorSetLayout(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct DescriptorPool(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Fence(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Semaphore(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Event(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct QueryPool(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct Framebuffer(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct RenderPass(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct PipelineCache(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct DescriptorUpdateTemplate(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct DisplayKHR(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct DisplayModeKHR(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct SurfaceKHR(u64);
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Default, Debug)]
+pub struct SwapchainKHR(u64);
+
+// impl Handle {
+//     #[inline]
+//     pub const fn null() -> Self {
+//         Self(0)
+//     }
+
+//     #[inline]
+//     pub fn as_raw(self) -> u64 {
+//         self.0
+//     }
+
+//     #[inline]
+//     pub fn from_raw(value: u64) -> Self {
+//         Self(value)
+//     }
+// }
+
+// Instance
+// PhysicalDevice
+// Device
+// Queue
+// CommandBuffer
+// DeviceMemory
+// CommandPool
+// Buffer
+// BufferView
+// Image
+// ImageView
+// ShaderModule
+// Pipeline
+// PipelineLayout
+// Sampler
+// DescriptorSet
+// DescriptorSetLayout
+// DescriptorPool
+// Fence
+// Semaphore
+// Event
+// QueryPool
+// Framebuffer
+// RenderPass
+// PipelineCache
+// DescriptorUpdateTemplate
+// DisplayKHR
+// DisplayModeKHR
+// SurfaceKHR
+// SwapchainKHR
+
+impl Instance {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl PhysicalDevice {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Device {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Queue {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl CommandBuffer {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl DeviceMemory {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl CommandPool {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Buffer {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl BufferView {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Image {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl ImageView {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl ShaderModule {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Pipeline {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl PipelineLayout {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Sampler {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl DescriptorSet {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl DescriptorSetLayout {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl DescriptorPool {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Fence {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Semaphore {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Event {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl QueryPool {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl Framebuffer {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl RenderPass {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl PipelineCache {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl DescriptorUpdateTemplate {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl DisplayKHR {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl DisplayModeKHR {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl SurfaceKHR {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
+impl SwapchainKHR {
+    #[inline]
+    pub const fn null() -> Self {
+        Self(0)
+    }
+
+    #[inline]
+    pub const fn is_null(self) -> bool {
+        self.0 == 0
+    }
+
+    #[inline]
+    pub fn as_raw(self) -> u64 {
+        self.0
+    }
+
+    #[inline]
+    pub fn from_raw(value: u64) -> Self {
+        Self(value)
+    }
+}
diff --git a/vulkan-sys/src/helpers.rs b/vulkan-sys/src/helpers.rs
new file mode 100644 (file)
index 0000000..6de9823
--- /dev/null
@@ -0,0 +1,45 @@
+#[allow(unconditional_panic)]
+const fn illegal_null_in_string() {
+    [][0]
+}
+
+#[doc(hidden)]
+pub const fn validate_cstr_contents(bytes: &[u8]) {
+    let mut i = 0;
+    while i < bytes.len() {
+        if bytes[i] == b'\0' {
+            illegal_null_in_string();
+        }
+        i += 1;
+    }
+}
+
+#[macro_export]
+macro_rules! cstr {
+    ( $s:literal ) => {{
+        $crate::helpers::validate_cstr_contents($s.as_bytes());
+        #[allow(unused_unsafe)]
+        unsafe {
+            std::mem::transmute::<_, &std::ffi::CStr>(concat!($s, "\0"))
+        }
+    }};
+}
+
+#[allow(dead_code)]
+pub fn string_from_c_str(c_str: &[i8]) -> String {
+    let s = unsafe { std::ffi::CStr::from_ptr(c_str.as_ptr()).to_bytes() };
+    String::from_utf8_lossy(s).into_owned()
+}
+
+#[cfg(test)]
+mod tests {
+    use std::ffi::CStr;
+
+    #[test]
+    fn test_cstr() {
+        assert_eq!(
+            cstr!("hello"),
+            CStr::from_bytes_with_nul(b"hello\0").unwrap()
+        );
+    }
+}
diff --git a/vulkan-sys/src/lib.rs b/vulkan-sys/src/lib.rs
new file mode 100644 (file)
index 0000000..0c902de
--- /dev/null
@@ -0,0 +1,2401 @@
+#![allow(unused)]
+#![allow(clippy::missing_safety_doc)]
+#![allow(clippy::too_many_arguments)]
+#![allow(clippy::derivable_impls)]
+
+mod enums;
+mod flags;
+mod functions;
+mod handles;
+pub mod helpers;
+mod structs;
+
+pub use enums::*;
+pub use flags::*;
+pub use functions::*;
+pub use handles::*;
+pub use structs::*;
+
+use std::{
+    convert::{TryFrom, TryInto},
+    ffi::{c_void, CStr},
+    marker::PhantomData,
+    mem::{transmute, MaybeUninit},
+    os::raw::c_char,
+};
+
+pub const fn make_version(major: u32, minor: u32, patch: u32) -> u32 {
+    (major << 22) | (minor << 12) | patch
+}
+
+pub const fn get_version(ver: u32) -> (u32, u32, u32) {
+    (ver >> 22, (ver >> 12) & 0x3ff, ver & 0xfff)
+}
+
+pub const VERSION_1_0: u32 = make_version(1, 0, 0);
+pub const VERSION_1_1: u32 = make_version(1, 1, 0);
+pub const VERSION_1_2: u32 = make_version(1, 2, 0);
+pub const VERSION_1_3: u32 = make_version(1, 3, 0);
+
+pub const MAX_PHYSICAL_DEVICE_NAME_SIZE: u32 = 256;
+pub const UUID_SIZE: u32 = 16;
+pub const LUID_SIZE: u32 = 8;
+pub const MAX_EXTENSION_NAME_SIZE: u32 = 256;
+pub const MAX_DESCRIPTION_SIZE: u32 = 256;
+pub const MAX_MEMORY_TYPES: u32 = 32;
+pub const MAX_MEMORY_HEAPS: u32 = 16;
+pub const LOD_CLAMP_NONE: f32 = 1000.0;
+pub const REMAINING_MIP_LEVELS: u32 = !0u32;
+pub const REMAINING_ARRAY_LAYERS: u32 = !0u32;
+pub const WHOLE_SIZE: u64 = !0u64;
+pub const ATTACHMENT_UNUSED: u32 = !0u32;
+pub const TRUE: u32 = 1;
+pub const FALSE: u32 = 0;
+pub const QUEUE_FAMILY_IGNORED: u32 = !0u32;
+pub const QUEUE_FAMILY_EXTERNAL: u32 = !1u32;
+pub const QUEUE_FAMILY_EXTERNAL_KHR: u32 = QUEUE_FAMILY_EXTERNAL;
+pub const QUEUE_FAMILY_FOREIGN_EXT: u32 = !2u32;
+pub const SUBPASS_EXTERNAL: u32 = !0u32;
+pub const MAX_DEVICE_GROUP_SIZE: u32 = 32;
+pub const MAX_DEVICE_GROUP_SIZE_KHR: u32 = MAX_DEVICE_GROUP_SIZE;
+pub const MAX_DRIVER_NAME_SIZE: u32 = 256;
+pub const MAX_DRIVER_NAME_SIZE_KHR: u32 = MAX_DRIVER_NAME_SIZE;
+pub const MAX_DRIVER_INFO_SIZE: u32 = 256;
+pub const MAX_DRIVER_INFO_SIZE_KHR: u32 = MAX_DRIVER_INFO_SIZE;
+pub const SHADER_UNUSED_KHR: u32 = !0u32;
+pub const SHADER_UNUSED_NV: u32 = SHADER_UNUSED_KHR;
+pub const MAX_GLOBAL_PRIORITY_SIZE_EXT: u32 = 16;
+
+pub type SampleMask = u32;
+
+#[repr(u32)]
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum Bool32 {
+    False = 0,
+    True = 1,
+}
+
+impl Default for Bool32 {
+    fn default() -> Self {
+        Bool32::False
+    }
+}
+
+pub type DeviceSize = u64;
+pub type DeviceAddress = u64;
+
+#[repr(C)]
+#[repr(packed(4))]
+pub struct VulkanSlice1<'a, I, T, const PAD: usize> {
+    len: I,
+    #[doc(hidden)]
+    _pad: MaybeUninit<[u8; PAD]>,
+    ptr: *const T,
+    phantom: PhantomData<&'a T>,
+}
+
+impl<'a, I, T, const PAD: usize> std::fmt::Debug for VulkanSlice1<'a, I, T, PAD>
+where
+    I: TryInto<usize> + Copy,
+    T: std::fmt::Debug,
+{
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let len = self.len.try_into().unwrap_or(0);
+        let slice = unsafe { std::slice::from_raw_parts(self.ptr, len) };
+        f.debug_list().entries(slice).finish()
+    }
+}
+
+impl<'a, I: Default, T, const PAD: usize> Default for VulkanSlice1<'a, I, T, PAD> {
+    fn default() -> Self {
+        Self {
+            len: Default::default(),
+            _pad: MaybeUninit::uninit(),
+            ptr: std::ptr::null(),
+            phantom: PhantomData,
+        }
+    }
+}
+
+impl<'a, I, T, const PAD: usize> VulkanSlice1<'a, I, T, PAD> {
+    pub const fn dangling(len: I) -> Self {
+        Self {
+            len,
+            _pad: MaybeUninit::uninit(),
+            ptr: std::ptr::null(),
+            phantom: PhantomData,
+        }
+    }
+}
+
+impl<'a, I, T, const PAD: usize> From<&'a [T]> for VulkanSlice1<'a, I, T, PAD>
+where
+    I: TryFrom<usize>,
+{
+    fn from(x: &'a [T]) -> Self {
+        let len = match I::try_from(x.len()) {
+            Ok(x) => x,
+            Err(_) => panic!("invalid slice length"),
+        };
+        let ptr = x.as_ptr();
+        Self {
+            len,
+            _pad: MaybeUninit::uninit(),
+            ptr,
+            phantom: PhantomData,
+        }
+    }
+}
+
+impl<'a, I, T, const N: usize, const PAD: usize> From<&'a [T; N]> for VulkanSlice1<'a, I, T, PAD>
+where
+    I: TryFrom<usize>,
+{
+    fn from(x: &'a [T; N]) -> Self {
+        let len = match I::try_from(N) {
+            Ok(x) => x,
+            Err(_) => panic!("invalid slice length"),
+        };
+        let ptr = x.as_ptr();
+        Self {
+            len,
+            _pad: MaybeUninit::uninit(),
+            ptr,
+            phantom: PhantomData,
+        }
+    }
+}
+
+#[repr(C)]
+#[repr(packed(4))]
+pub struct VulkanSlice2<'a, I, T0, T1, const PAD: usize> {
+    len: I,
+    #[doc(hidden)]
+    _pad: MaybeUninit<[u8; PAD]>,
+    ptr0: *const T0,
+    ptr1: *const T1,
+    phantom0: PhantomData<&'a T0>,
+    phantom1: PhantomData<&'a T1>,
+}
+
+impl<'a, I, T0, T1, const PAD: usize> std::fmt::Debug for VulkanSlice2<'a, I, T0, T1, PAD>
+where
+    I: TryInto<usize> + Copy,
+    T0: std::fmt::Debug,
+    T1: std::fmt::Debug,
+{
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        let len = self.len.try_into().unwrap_or(0);
+        let slice = unsafe { std::slice::from_raw_parts(self.ptr0, len) };
+        f.debug_list().entries(slice).finish()?;
+        let slice = unsafe { std::slice::from_raw_parts(self.ptr1, len) };
+        f.debug_list().entries(slice).finish()
+    }
+}
+
+impl<'a, I: Default, T0, T1, const PAD: usize> Default for VulkanSlice2<'a, I, T0, T1, PAD> {
+    fn default() -> Self {
+        Self {
+            len: Default::default(),
+            _pad: MaybeUninit::uninit(),
+            ptr0: std::ptr::null(),
+            ptr1: std::ptr::null(),
+            phantom0: PhantomData,
+            phantom1: PhantomData,
+        }
+    }
+}
+
+impl<'a, I, T0, T1, const PAD: usize> From<(&'a [T0], &'a [T1])>
+    for VulkanSlice2<'a, I, T0, T1, PAD>
+where
+    I: TryFrom<usize>,
+{
+    fn from(x: (&'a [T0], &'a [T1])) -> Self {
+        debug_assert!(x.0.len() == x.1.len());
+        let len = match I::try_from(x.0.len()) {
+            Ok(x) => x,
+            Err(_) => panic!("invalid slice length"),
+        };
+        let ptr0 = x.0.as_ptr();
+        let ptr1 = x.1.as_ptr();
+        Self {
+            len,
+            _pad: MaybeUninit::uninit(),
+            ptr0,
+            ptr1,
+            phantom0: PhantomData,
+            phantom1: PhantomData,
+        }
+    }
+}
+
+impl<'a, I, T0, T1, const N: usize, const PAD: usize> From<(&'a [T0; N], &'a [T1; N])>
+    for VulkanSlice2<'a, I, T0, T1, PAD>
+where
+    I: TryFrom<usize>,
+{
+    fn from(x: (&'a [T0; N], &'a [T1; N])) -> Self {
+        let len = match I::try_from(N) {
+            Ok(x) => x,
+            Err(_) => panic!("invalid slice length"),
+        };
+        let ptr0 = x.0.as_ptr();
+        let ptr1 = x.1.as_ptr();
+        Self {
+            len,
+            _pad: MaybeUninit::uninit(),
+            ptr0,
+            ptr1,
+            phantom0: PhantomData,
+            phantom1: PhantomData,
+        }
+    }
+}
+
+fn vulkan_instance_version_not_supported() {
+    panic!("calling an instance function not supported by the version requested in `InstanceFunctions::new`")
+}
+
+fn vulkan_device_version_not_supported() {
+    panic!("calling a device function not supported by the version requested in `DeviceFunctions::new`")
+}
+
+pub struct GlobalFunctions {
+    get_instance_proc_addr: FnGetInstanceProcAddr,
+    enumerate_instance_version: Option<FnEnumerateInstanceVersion>,
+    create_instance: FnCreateInstance,
+}
+
+impl GlobalFunctions {
+    pub unsafe fn new(get_proc_addr: *mut c_void) -> Self {
+        let get_instance_proc_addr = transmute::<_, FnGetInstanceProcAddr>(get_proc_addr);
+        Self {
+            get_instance_proc_addr,
+            enumerate_instance_version: transmute::<_, _>(get_instance_proc_addr(
+                Instance::null(),
+                cstr!("vkEnumerateInstanceVersion").as_ptr(),
+            )),
+            create_instance: transmute::<_, _>(
+                get_instance_proc_addr(Instance::null(), cstr!("vkCreateInstance").as_ptr())
+                    .expect("failed to load vkCreateInstance"),
+            ),
+        }
+    }
+
+    #[inline]
+    pub unsafe fn get_instance_proc_addr(
+        &self,
+        instance: Instance,
+        name: *const c_char,
+    ) -> Option<FnVoidFunction> {
+        (self.get_instance_proc_addr)(instance, name)
+    }
+
+    #[inline]
+    pub fn enumerate_instance_version(&self, api_version: &mut u32) -> Result {
+        if let Some(enumerate_instance_version) = self.enumerate_instance_version {
+            enumerate_instance_version(api_version)
+        } else {
+            *api_version = VERSION_1_0;
+            Result::Success
+        }
+    }
+
+    #[inline]
+    pub unsafe fn create_instance(
+        &self,
+        create_info: &InstanceCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        instance: &mut Instance,
+    ) -> Result {
+        (self.create_instance)(create_info, allocator, instance)
+    }
+}
+
+pub struct InstanceFunctions {
+    destroy_instance: FnDestroyInstance,
+    enumerate_physical_devices: FnEnumeratePhysicalDevices,
+    get_physical_device_features: FnGetPhysicalDeviceFeatures,
+    get_physical_device_properties: FnGetPhysicalDeviceProperties,
+    get_physical_device_queue_family_properties: FnGetPhysicalDeviceQueueFamilyProperties,
+    get_physical_device_memory_properties: FnGetPhysicalDeviceMemoryProperties,
+    create_device: FnCreateDevice,
+    get_device_proc_addr: FnGetDeviceProcAddr,
+
+    // VERSION_1_1
+    get_physical_device_features2: FnGetPhysicalDeviceFeatures2,
+    get_physical_device_properties2: FnGetPhysicalDeviceProperties2,
+}
+
+impl InstanceFunctions {
+    pub fn new(global_functions: &GlobalFunctions, instance: Instance, api_version: u32) -> Self {
+        unsafe {
+            let load = |name: &CStr, function_version| {
+                if api_version >= function_version {
+                    global_functions
+                        .get_instance_proc_addr(instance, name.as_ptr())
+                        .unwrap_or_else(
+                            #[cold]
+                            || {
+                                panic!(
+                                    "failed to load instance function {}",
+                                    name.to_string_lossy()
+                                )
+                            },
+                        )
+                } else {
+                    transmute::<_, _>(vulkan_instance_version_not_supported as fn())
+                }
+            };
+
+            Self {
+                destroy_instance: transmute::<_, _>(load(cstr!("vkDestroyInstance"), VERSION_1_0)),
+                enumerate_physical_devices: transmute::<_, _>(load(
+                    cstr!("vkEnumeratePhysicalDevices"),
+                    VERSION_1_0,
+                )),
+                get_physical_device_features: transmute::<_, _>(load(
+                    cstr!("vkGetPhysicalDeviceFeatures"),
+                    VERSION_1_0,
+                )),
+                get_physical_device_properties: transmute::<_, _>(load(
+                    cstr!("vkGetPhysicalDeviceProperties"),
+                    VERSION_1_0,
+                )),
+                get_physical_device_queue_family_properties: transmute::<_, _>(load(
+                    cstr!("vkGetPhysicalDeviceQueueFamilyProperties"),
+                    VERSION_1_0,
+                )),
+                get_physical_device_memory_properties: transmute::<_, _>(load(
+                    cstr!("vkGetPhysicalDeviceMemoryProperties"),
+                    VERSION_1_0,
+                )),
+                create_device: transmute::<_, _>(load(cstr!("vkCreateDevice"), VERSION_1_0)),
+                get_device_proc_addr: transmute::<_, _>(load(
+                    cstr!("vkGetDeviceProcAddr"),
+                    VERSION_1_0,
+                )),
+
+                // VERSION_1_1
+                get_physical_device_features2: transmute::<_, _>(load(
+                    cstr!("vkGetPhysicalDeviceFeatures2"),
+                    VERSION_1_1,
+                )),
+                get_physical_device_properties2: transmute::<_, _>(load(
+                    cstr!("vkGetPhysicalDeviceProperties2"),
+                    VERSION_1_1,
+                )),
+            }
+        }
+    }
+
+    #[inline]
+    pub unsafe fn destroy_instance(
+        &self,
+        instance: Instance,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_instance)(instance, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn enumerate_physical_devices(
+        &self,
+        instance: Instance,
+        physical_device_count: &mut u32,
+        physical_devices: *mut PhysicalDevice,
+    ) -> Result {
+        (self.enumerate_physical_devices)(instance, physical_device_count, physical_devices)
+    }
+
+    #[inline]
+    pub unsafe fn get_physical_device_features(
+        &self,
+        physical_device: PhysicalDevice,
+        features: *mut PhysicalDeviceFeatures,
+    ) {
+        (self.get_physical_device_features)(physical_device, features)
+    }
+
+    #[inline]
+    pub unsafe fn get_physical_device_features2(
+        &self,
+        physical_device: PhysicalDevice,
+        features: *mut PhysicalDeviceFeatures2,
+    ) {
+        (self.get_physical_device_features2)(physical_device, features)
+    }
+
+    #[inline]
+    pub unsafe fn get_physical_device_properties(
+        &self,
+        physical_device: PhysicalDevice,
+        properties: *mut PhysicalDeviceProperties,
+    ) {
+        (self.get_physical_device_properties)(physical_device, properties)
+    }
+
+    #[inline]
+    pub unsafe fn get_physical_device_properties2(
+        &self,
+        physical_device: PhysicalDevice,
+        properties: *mut PhysicalDeviceProperties2,
+    ) {
+        (self.get_physical_device_properties2)(physical_device, properties)
+    }
+
+    #[inline]
+    pub unsafe fn get_physical_device_queue_family_properties(
+        &self,
+        physical_device: PhysicalDevice,
+        queue_family_property_count: &mut u32,
+        queue_family_properties: *mut QueueFamilyProperties,
+    ) {
+        (self.get_physical_device_queue_family_properties)(
+            physical_device,
+            queue_family_property_count,
+            queue_family_properties,
+        )
+    }
+
+    #[inline]
+    pub unsafe fn get_physical_device_memory_properties(
+        &self,
+        physical_device: PhysicalDevice,
+        memory_properties: *mut PhysicalDeviceMemoryProperties,
+    ) {
+        (self.get_physical_device_memory_properties)(physical_device, memory_properties)
+    }
+
+    #[inline]
+    pub unsafe fn create_device(
+        &self,
+        physical_device: PhysicalDevice,
+        create_info: &DeviceCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        device: &mut Device,
+    ) -> Result {
+        (self.create_device)(physical_device, create_info, allocator, device)
+    }
+
+    #[inline]
+    pub unsafe fn get_device_proc_addr(
+        &self,
+        device: Device,
+        name: *const c_char,
+    ) -> Option<FnVoidFunction> {
+        (self.get_device_proc_addr)(device, name)
+    }
+}
+
+pub struct DeviceFunctions {
+    destroy_device: FnDestroyDevice,
+    get_device_queue: FnGetDeviceQueue,
+    queue_submit: FnQueueSubmit,
+    queue_wait_idle: FnQueueWaitIdle,
+    device_wait_idle: FnDeviceWaitIdle,
+    allocate_memory: FnAllocateMemory,
+    free_memory: FnFreeMemory,
+    map_memory: FnMapMemory,
+    unmap_memory: FnUnmapMemory,
+    create_buffer: FnCreateBuffer,
+    destroy_buffer: FnDestroyBuffer,
+    create_buffer_view: FnCreateBufferView,
+    destroy_buffer_view: FnDestroyBufferView,
+    create_image: FnCreateImage,
+    destroy_image: FnDestroyImage,
+    get_image_subresource_layout: FnGetImageSubresourceLayout,
+    create_image_view: FnCreateImageView,
+    destroy_image_view: FnDestroyImageView,
+    create_command_pool: FnCreateCommandPool,
+    destroy_command_pool: FnDestroyCommandPool,
+    reset_command_pool: FnResetCommandPool,
+    allocate_command_buffers: FnAllocateCommandBuffers,
+    free_command_buffers: FnFreeCommandBuffers,
+    begin_command_buffer: FnBeginCommandBuffer,
+    end_command_buffer: FnEndCommandBuffer,
+    reset_command_buffer: FnResetCommandBuffer,
+    create_framebuffer: FnCreateFramebuffer,
+    destroy_framebuffer: FnDestroyFramebuffer,
+    create_render_pass: FnCreateRenderPass,
+    destroy_render_pass: FnDestroyRenderPass,
+    create_semaphore: FnCreateSemaphore,
+    destroy_semaphore: FnDestroySemaphore,
+    get_semaphore_counter_value: FnGetSemaphoreCounterValue,
+    wait_semaphores: FnWaitSemaphores,
+    signal_semaphore: FnSignalSemaphore,
+    create_fence: FnCreateFence,
+    destroy_fence: FnDestroyFence,
+    reset_fences: FnResetFences,
+    get_fence_status: FnGetFenceStatus,
+    wait_for_fences: FnWaitForFences,
+    invalidate_mapped_memory_ranges: FnInvalidateMappedMemoryRanges,
+    create_shader_module: FnCreateShaderModule,
+    destroy_shader_module: FnDestroyShaderModule,
+    create_sampler: FnCreateSampler,
+    destroy_sampler: FnDestroySampler,
+    create_descriptor_set_layout: FnCreateDescriptorSetLayout,
+    destroy_descriptor_set_layout: FnDestroyDescriptorSetLayout,
+    create_pipeline_layout: FnCreatePipelineLayout,
+    destroy_pipeline_layout: FnDestroyPipelineLayout,
+    create_graphics_pipelines: FnCreateGraphicsPipelines,
+    create_compute_pipelines: FnCreateComputePipelines,
+    destroy_pipeline: FnDestroyPipeline,
+    cmd_bind_pipeline: FnCmdBindPipeline,
+    cmd_set_viewport: FnCmdSetViewport,
+    cmd_set_scissor: FnCmdSetScissor,
+    cmd_set_line_width: FnCmdSetLineWidth,
+    cmd_set_depth_bias: FnCmdSetDepthBias,
+    cmd_set_blend_constants: FnCmdSetBlendConstants,
+    cmd_set_depth_bounds: FnCmdSetDepthBounds,
+    cmd_set_stencil_compare_mask: FnCmdSetStencilCompareMask,
+    cmd_set_stencil_write_mask: FnCmdSetStencilWriteMask,
+    cmd_set_stencil_reference: FnCmdSetStencilReference,
+    cmd_bind_descriptor_sets: FnCmdBindDescriptorSets,
+    cmd_bind_index_buffer: FnCmdBindIndexBuffer,
+    cmd_bind_vertex_buffers: FnCmdBindVertexBuffers,
+    cmd_draw: FnCmdDraw,
+    cmd_draw_indexed: FnCmdDrawIndexed,
+    cmd_draw_indirect: FnCmdDrawIndirect,
+    cmd_draw_indexed_indirect: FnCmdDrawIndexedIndirect,
+    cmd_dispatch: FnCmdDispatch,
+    cmd_dispatch_indirect: FnCmdDispatchIndirect,
+    cmd_copy_buffer: FnCmdCopyBuffer,
+    cmd_copy_image: FnCmdCopyImage,
+    cmd_blit_image: FnCmdBlitImage,
+    cmd_copy_buffer_to_image: FnCmdCopyBufferToImage,
+    cmd_copy_image_to_buffer: FnCmdCopyImageToBuffer,
+    cmd_update_buffer: FnCmdUpdateBuffer,
+    cmd_fill_buffer: FnCmdFillBuffer,
+    cmd_clear_color_image: FnCmdClearColorImage,
+    cmd_clear_depth_stencil_image: FnCmdClearDepthStencilImage,
+    cmd_clear_attachments: FnCmdClearAttachments,
+    cmd_resolve_image: FnCmdResolveImage,
+    cmd_set_event: FnCmdSetEvent,
+    cmd_reset_event: FnCmdResetEvent,
+    cmd_wait_events: FnCmdWaitEvents,
+    cmd_pipeline_barrier: FnCmdPipelineBarrier,
+    cmd_begin_query: FnCmdBeginQuery,
+    cmd_end_query: FnCmdEndQuery,
+    cmd_reset_query_pool: FnCmdResetQueryPool,
+    cmd_write_timestamp: FnCmdWriteTimestamp,
+    cmd_copy_query_pool_results: FnCmdCopyQueryPoolResults,
+    cmd_push_constants: FnCmdPushConstants,
+    cmd_begin_render_pass: FnCmdBeginRenderPass,
+    cmd_next_subpass: FnCmdNextSubpass,
+    cmd_end_render_pass: FnCmdEndRenderPass,
+    cmd_execute_commands: FnCmdExecuteCommands,
+
+    // VERSION_1_1
+    get_image_memory_requirements2: FnGetImageMemoryRequirements2,
+    bind_image_memory2: FnBindImageMemory2,
+    get_buffer_memory_requirements2: FnGetBufferMemoryRequirements2,
+    bind_buffer_memory2: FnBindBufferMemory2,
+
+    // VERSION_1_3
+    cmd_pipeline_barrier2: FnCmdPipelineBarrier2,
+    cmd_wait_events2: FnCmdWaitEvents2,
+    cmd_set_event2: FnCmdSetEvent2,
+    cmd_begin_rendering: FnCmdBeginRendering,
+    cmd_end_rendering: FnCmdEndRendering,
+    cmd_set_viewport_with_count: FnCmdSetViewportWithCount,
+    cmd_set_scissor_with_count: FnCmdSetScissorWithCount,
+    queue_submit2: FnQueueSubmit2,
+}
+
+impl DeviceFunctions {
+    pub fn new(instance_functons: &InstanceFunctions, device: Device, api_version: u32) -> Self {
+        unsafe {
+            let load = |name: &CStr, function_version| {
+                if api_version >= function_version {
+                    instance_functons
+                        .get_device_proc_addr(device, name.as_ptr())
+                        .unwrap_or_else(
+                            #[cold]
+                            || panic!("failed to load device function {}", name.to_string_lossy()),
+                        )
+                } else {
+                    transmute::<_, _>(vulkan_device_version_not_supported as fn())
+                }
+            };
+
+            Self {
+                destroy_device: transmute::<_, _>(load(cstr!("vkDestroyDevice"), VERSION_1_0)),
+                get_device_queue: transmute::<_, _>(load(cstr!("vkGetDeviceQueue"), VERSION_1_0)),
+                queue_submit: transmute::<_, _>(load(cstr!("vkQueueSubmit"), VERSION_1_0)),
+                queue_wait_idle: transmute::<_, _>(load(cstr!("vkQueueWaitIdle"), VERSION_1_0)),
+                device_wait_idle: transmute::<_, _>(load(cstr!("vkDeviceWaitIdle"), VERSION_1_0)),
+                allocate_memory: transmute::<_, _>(load(cstr!("vkAllocateMemory"), VERSION_1_0)),
+                free_memory: transmute::<_, _>(load(cstr!("vkFreeMemory"), VERSION_1_0)),
+                map_memory: transmute::<_, _>(load(cstr!("vkMapMemory"), VERSION_1_0)),
+                unmap_memory: transmute::<_, _>(load(cstr!("vkUnmapMemory"), VERSION_1_0)),
+                create_buffer: transmute::<_, _>(load(cstr!("vkCreateBuffer"), VERSION_1_0)),
+                destroy_buffer: transmute::<_, _>(load(cstr!("vkDestroyBuffer"), VERSION_1_0)),
+                create_buffer_view: transmute::<_, _>(load(
+                    cstr!("vkCreateBufferView"),
+                    VERSION_1_0,
+                )),
+                destroy_buffer_view: transmute::<_, _>(load(
+                    cstr!("vkDestroyBufferView"),
+                    VERSION_1_0,
+                )),
+                create_image: transmute::<_, _>(load(cstr!("vkCreateImage"), VERSION_1_0)),
+                destroy_image: transmute::<_, _>(load(cstr!("vkDestroyImage"), VERSION_1_0)),
+                get_image_subresource_layout: transmute::<_, _>(load(
+                    cstr!("vkGetImageSubresourceLayout"),
+                    VERSION_1_0,
+                )),
+                create_image_view: transmute::<_, _>(load(cstr!("vkCreateImageView"), VERSION_1_0)),
+                destroy_image_view: transmute::<_, _>(load(
+                    cstr!("vkDestroyImageView"),
+                    VERSION_1_0,
+                )),
+                create_command_pool: transmute::<_, _>(load(
+                    cstr!("vkCreateCommandPool"),
+                    VERSION_1_0,
+                )),
+                destroy_command_pool: transmute::<_, _>(load(
+                    cstr!("vkDestroyCommandPool"),
+                    VERSION_1_0,
+                )),
+                reset_command_pool: transmute::<_, _>(load(
+                    cstr!("vkResetCommandPool"),
+                    VERSION_1_0,
+                )),
+                allocate_command_buffers: transmute::<_, _>(load(
+                    cstr!("vkAllocateCommandBuffers"),
+                    VERSION_1_0,
+                )),
+                free_command_buffers: transmute::<_, _>(load(
+                    cstr!("vkFreeCommandBuffers"),
+                    VERSION_1_0,
+                )),
+                begin_command_buffer: transmute::<_, _>(load(
+                    cstr!("vkBeginCommandBuffer"),
+                    VERSION_1_0,
+                )),
+                end_command_buffer: transmute::<_, _>(load(
+                    cstr!("vkEndCommandBuffer"),
+                    VERSION_1_0,
+                )),
+                reset_command_buffer: transmute::<_, _>(load(
+                    cstr!("vkResetCommandBuffer"),
+                    VERSION_1_0,
+                )),
+                create_framebuffer: transmute::<_, _>(load(
+                    cstr!("vkCreateFramebuffer"),
+                    VERSION_1_0,
+                )),
+                destroy_framebuffer: transmute::<_, _>(load(
+                    cstr!("vkDestroyFramebuffer"),
+                    VERSION_1_0,
+                )),
+                create_render_pass: transmute::<_, _>(load(
+                    cstr!("vkCreateRenderPass"),
+                    VERSION_1_0,
+                )),
+                destroy_render_pass: transmute::<_, _>(load(
+                    cstr!("vkDestroyRenderPass"),
+                    VERSION_1_0,
+                )),
+                create_semaphore: transmute::<_, _>(load(cstr!("vkCreateSemaphore"), VERSION_1_0)),
+                destroy_semaphore: transmute::<_, _>(load(
+                    cstr!("vkDestroySemaphore"),
+                    VERSION_1_0,
+                )),
+
+                wait_semaphores: transmute::<_, _>(load(cstr!("vkWaitSemaphores"), VERSION_1_0)),
+                signal_semaphore: transmute::<_, _>(load(cstr!("vkSignalSemaphore"), VERSION_1_0)),
+                create_fence: transmute::<_, _>(load(cstr!("vkCreateFence"), VERSION_1_0)),
+                destroy_fence: transmute::<_, _>(load(cstr!("vkDestroyFence"), VERSION_1_0)),
+                reset_fences: transmute::<_, _>(load(cstr!("vkResetFences"), VERSION_1_0)),
+                get_fence_status: transmute::<_, _>(load(cstr!("vkGetFenceStatus"), VERSION_1_0)),
+                wait_for_fences: transmute::<_, _>(load(cstr!("vkWaitForFences"), VERSION_1_0)),
+                invalidate_mapped_memory_ranges: transmute::<_, _>(load(
+                    cstr!("vkInvalidateMappedMemoryRanges"),
+                    VERSION_1_0,
+                )),
+                create_shader_module: transmute::<_, _>(load(
+                    cstr!("vkCreateShaderModule"),
+                    VERSION_1_0,
+                )),
+                destroy_shader_module: transmute::<_, _>(load(
+                    cstr!("vkDestroyShaderModule"),
+                    VERSION_1_0,
+                )),
+                create_sampler: transmute::<_, _>(load(cstr!("vkCreateSampler"), VERSION_1_0)),
+                destroy_sampler: transmute::<_, _>(load(cstr!("vkDestroySampler"), VERSION_1_0)),
+                create_descriptor_set_layout: transmute::<_, _>(load(
+                    cstr!("vkCreateDescriptorSetLayout"),
+                    VERSION_1_0,
+                )),
+                destroy_descriptor_set_layout: transmute::<_, _>(load(
+                    cstr!("vkDestroyDescriptorSetLayout"),
+                    VERSION_1_0,
+                )),
+                create_pipeline_layout: transmute::<_, _>(load(
+                    cstr!("vkCreatePipelineLayout"),
+                    VERSION_1_0,
+                )),
+                destroy_pipeline_layout: transmute::<_, _>(load(
+                    cstr!("vkDestroyPipelineLayout"),
+                    VERSION_1_0,
+                )),
+                create_graphics_pipelines: transmute::<_, _>(load(
+                    cstr!("vkCreateGraphicsPipelines"),
+                    VERSION_1_0,
+                )),
+                create_compute_pipelines: transmute::<_, _>(load(
+                    cstr!("vkCreateComputePipelines"),
+                    VERSION_1_0,
+                )),
+                destroy_pipeline: transmute::<_, _>(load(cstr!("vkDestroyPipeline"), VERSION_1_0)),
+                cmd_bind_pipeline: transmute::<_, _>(load(cstr!("vkCmdBindPipeline"), VERSION_1_0)),
+                cmd_set_viewport: transmute::<_, _>(load(cstr!("vkCmdSetViewport"), VERSION_1_0)),
+                cmd_set_scissor: transmute::<_, _>(load(cstr!("vkCmdSetScissor"), VERSION_1_0)),
+                cmd_set_line_width: transmute::<_, _>(load(
+                    cstr!("vkCmdSetLineWidth"),
+                    VERSION_1_0,
+                )),
+                cmd_set_depth_bias: transmute::<_, _>(load(
+                    cstr!("vkCmdSetDepthBias"),
+                    VERSION_1_0,
+                )),
+                cmd_set_blend_constants: transmute::<_, _>(load(
+                    cstr!("vkCmdSetBlendConstants"),
+                    VERSION_1_0,
+                )),
+                cmd_set_depth_bounds: transmute::<_, _>(load(
+                    cstr!("vkCmdSetDepthBounds"),
+                    VERSION_1_0,
+                )),
+                cmd_set_stencil_compare_mask: transmute::<_, _>(load(
+                    cstr!("vkCmdSetStencilCompareMask"),
+                    VERSION_1_0,
+                )),
+                cmd_set_stencil_write_mask: transmute::<_, _>(load(
+                    cstr!("vkCmdSetStencilWriteMask"),
+                    VERSION_1_0,
+                )),
+                cmd_set_stencil_reference: transmute::<_, _>(load(
+                    cstr!("vkCmdSetStencilReference"),
+                    VERSION_1_0,
+                )),
+                cmd_bind_descriptor_sets: transmute::<_, _>(load(
+                    cstr!("vkCmdBindDescriptorSets"),
+                    VERSION_1_0,
+                )),
+                cmd_bind_index_buffer: transmute::<_, _>(load(
+                    cstr!("vkCmdBindIndexBuffer"),
+                    VERSION_1_0,
+                )),
+                cmd_bind_vertex_buffers: transmute::<_, _>(load(
+                    cstr!("vkCmdBindVertexBuffers"),
+                    VERSION_1_0,
+                )),
+                cmd_draw: transmute::<_, _>(load(cstr!("vkCmdDraw"), VERSION_1_0)),
+                cmd_draw_indexed: transmute::<_, _>(load(cstr!("vkCmdDrawIndexed"), VERSION_1_0)),
+                cmd_draw_indirect: transmute::<_, _>(load(cstr!("vkCmdDrawIndirect"), VERSION_1_0)),
+                cmd_draw_indexed_indirect: transmute::<_, _>(load(
+                    cstr!("vkCmdDrawIndexedIndirect"),
+                    VERSION_1_0,
+                )),
+                cmd_dispatch: transmute::<_, _>(load(cstr!("vkCmdDispatch"), VERSION_1_0)),
+                cmd_dispatch_indirect: transmute::<_, _>(load(
+                    cstr!("vkCmdDispatchIndirect"),
+                    VERSION_1_0,
+                )),
+                cmd_copy_buffer: transmute::<_, _>(load(cstr!("vkCmdCopyBuffer"), VERSION_1_0)),
+                cmd_copy_image: transmute::<_, _>(load(cstr!("vkCmdCopyImage"), VERSION_1_0)),
+                cmd_blit_image: transmute::<_, _>(load(cstr!("vkCmdBlitImage"), VERSION_1_0)),
+                cmd_copy_buffer_to_image: transmute::<_, _>(load(
+                    cstr!("vkCmdCopyBufferToImage"),
+                    VERSION_1_0,
+                )),
+                cmd_copy_image_to_buffer: transmute::<_, _>(load(
+                    cstr!("vkCmdCopyImageToBuffer"),
+                    VERSION_1_0,
+                )),
+                cmd_update_buffer: transmute::<_, _>(load(cstr!("vkCmdUpdateBuffer"), VERSION_1_0)),
+                cmd_fill_buffer: transmute::<_, _>(load(cstr!("vkCmdFillBuffer"), VERSION_1_0)),
+                cmd_clear_color_image: transmute::<_, _>(load(
+                    cstr!("vkCmdClearColorImage"),
+                    VERSION_1_0,
+                )),
+                cmd_clear_depth_stencil_image: transmute::<_, _>(load(
+                    cstr!("vkCmdClearDepthStencilImage"),
+                    VERSION_1_0,
+                )),
+                cmd_clear_attachments: transmute::<_, _>(load(
+                    cstr!("vkCmdClearAttachments"),
+                    VERSION_1_0,
+                )),
+                cmd_resolve_image: transmute::<_, _>(load(cstr!("vkCmdResolveImage"), VERSION_1_0)),
+                cmd_set_event: transmute::<_, _>(load(cstr!("vkCmdSetEvent"), VERSION_1_0)),
+                cmd_reset_event: transmute::<_, _>(load(cstr!("vkCmdResetEvent"), VERSION_1_0)),
+                cmd_wait_events: transmute::<_, _>(load(cstr!("vkCmdWaitEvents"), VERSION_1_0)),
+                cmd_pipeline_barrier: transmute::<_, _>(load(
+                    cstr!("vkCmdPipelineBarrier"),
+                    VERSION_1_0,
+                )),
+                cmd_begin_query: transmute::<_, _>(load(cstr!("vkCmdBeginQuery"), VERSION_1_0)),
+                cmd_end_query: transmute::<_, _>(load(cstr!("vkCmdEndQuery"), VERSION_1_0)),
+                cmd_reset_query_pool: transmute::<_, _>(load(
+                    cstr!("vkCmdResetQueryPool"),
+                    VERSION_1_0,
+                )),
+                cmd_write_timestamp: transmute::<_, _>(load(
+                    cstr!("vkCmdWriteTimestamp"),
+                    VERSION_1_0,
+                )),
+                cmd_copy_query_pool_results: transmute::<_, _>(load(
+                    cstr!("vkCmdCopyQueryPoolResults"),
+                    VERSION_1_0,
+                )),
+                cmd_push_constants: transmute::<_, _>(load(
+                    cstr!("vkCmdPushConstants"),
+                    VERSION_1_0,
+                )),
+                cmd_begin_render_pass: transmute::<_, _>(load(
+                    cstr!("vkCmdBeginRenderPass"),
+                    VERSION_1_0,
+                )),
+                cmd_next_subpass: transmute::<_, _>(load(cstr!("vkCmdNextSubpass"), VERSION_1_0)),
+                cmd_end_render_pass: transmute::<_, _>(load(
+                    cstr!("vkCmdEndRenderPass"),
+                    VERSION_1_0,
+                )),
+                cmd_execute_commands: transmute::<_, _>(load(
+                    cstr!("vkCmdExecuteCommands"),
+                    VERSION_1_0,
+                )),
+
+                // VERSION_1_1
+                get_image_memory_requirements2: transmute::<_, _>(load(
+                    cstr!("vkGetImageMemoryRequirements2"),
+                    VERSION_1_1,
+                )),
+                bind_image_memory2: transmute::<_, _>(load(
+                    cstr!("vkBindImageMemory2"),
+                    VERSION_1_1,
+                )),
+                get_buffer_memory_requirements2: transmute::<_, _>(load(
+                    cstr!("vkGetBufferMemoryRequirements2"),
+                    VERSION_1_1,
+                )),
+                bind_buffer_memory2: transmute::<_, _>(load(
+                    cstr!("vkBindBufferMemory2"),
+                    VERSION_1_1,
+                )),
+
+                // VERSION_1_2
+                get_semaphore_counter_value: transmute::<_, _>(load(
+                    cstr!("vkGetSemaphoreCounterValue"),
+                    VERSION_1_2,
+                )),
+
+                // VERSION_1_3
+                cmd_pipeline_barrier2: transmute::<_, _>(load(
+                    cstr!("vkCmdPipelineBarrier2"),
+                    VERSION_1_3,
+                )),
+                cmd_wait_events2: transmute::<_, _>(load(cstr!("vkCmdWaitEvents2"), VERSION_1_3)),
+                cmd_set_event2: transmute::<_, _>(load(cstr!("vkCmdSetEvent2"), VERSION_1_3)),
+
+                cmd_begin_rendering: transmute::<_, _>(load(
+                    cstr!("vkCmdBeginRendering"),
+                    VERSION_1_3,
+                )),
+                cmd_end_rendering: transmute::<_, _>(load(cstr!("vkCmdEndRendering"), VERSION_1_3)),
+                cmd_set_viewport_with_count: transmute::<_, _>(load(
+                    cstr!("vkCmdSetViewportWithCount"),
+                    VERSION_1_3,
+                )),
+                cmd_set_scissor_with_count: transmute::<_, _>(load(
+                    cstr!("vkCmdSetScissorWithCount"),
+                    VERSION_1_3,
+                )),
+                queue_submit2: transmute::<_, _>(load(cstr!("vkQueueSubmit2"), VERSION_1_3)),
+            }
+        }
+    }
+
+    #[inline]
+    pub unsafe fn destroy_device(&self, device: Device, allocator: Option<&AllocationCallbacks>) {
+        (self.destroy_device)(device, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn get_device_queue(
+        &self,
+        device: Device,
+        queue_family_index: u32,
+        queue_index: u32,
+        queue: &mut Queue,
+    ) {
+        (self.get_device_queue)(device, queue_family_index, queue_index, queue)
+    }
+
+    #[inline]
+    pub unsafe fn queue_submit(
+        &self,
+        queue: Queue,
+        submits: &[SubmitInfo],
+        fence: Fence,
+    ) -> Result {
+        (self.queue_submit)(queue, submits.len() as u32, submits.as_ptr(), fence)
+    }
+
+    #[inline]
+    pub unsafe fn queue_submit2(
+        &self,
+        queue: Queue,
+        submits: &[SubmitInfo2],
+        fence: Fence,
+    ) -> Result {
+        (self.queue_submit2)(queue, submits.len() as u32, submits.as_ptr(), fence)
+    }
+
+    #[inline]
+    pub unsafe fn allocate_memory(
+        &self,
+        device: Device,
+        allocate_info: &MemoryAllocateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        memory: &mut DeviceMemory,
+    ) -> Result {
+        (self.allocate_memory)(device, allocate_info, allocator, memory)
+    }
+
+    #[inline]
+    pub unsafe fn free_memory(
+        &self,
+        device: Device,
+        memory: DeviceMemory,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.free_memory)(device, memory, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn map_memory(
+        &self,
+        device: Device,
+        memory: DeviceMemory,
+        offset: DeviceSize,
+        size: DeviceSize,
+        flags: MemoryMapFlags,
+        data: &mut *mut c_void,
+    ) -> Result {
+        (self.map_memory)(device, memory, offset, size, flags, data)
+    }
+
+    #[inline]
+    pub unsafe fn unmap_memory(&self, device: Device, memory: DeviceMemory) {
+        (self.unmap_memory)(device, memory)
+    }
+
+    #[inline]
+    pub unsafe fn create_buffer(
+        &self,
+        device: Device,
+        create_info: &BufferCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        buffer: &mut Buffer,
+    ) -> Result {
+        (self.create_buffer)(device, create_info, allocator, buffer)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_buffer(
+        &self,
+        device: Device,
+        buffer: Buffer,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_buffer)(device, buffer, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_buffer_view(
+        &self,
+        device: Device,
+        create_info: &BufferViewCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        view: &mut BufferView,
+    ) -> Result {
+        (self.create_buffer_view)(device, create_info, allocator, view)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_buffer_view(
+        &self,
+        device: Device,
+        buffer_view: BufferView,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_buffer_view)(device, buffer_view, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_image(
+        &self,
+        device: Device,
+        create_info: &ImageCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        image: &mut Image,
+    ) -> Result {
+        (self.create_image)(device, create_info, allocator, image)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_image(
+        &self,
+        device: Device,
+        image: Image,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_image)(device, image, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn get_image_subresource_layout(
+        &self,
+        device: Device,
+        image: Image,
+        subresource: &ImageSubresource,
+        layout: &mut SubresourceLayout,
+    ) {
+        (self.get_image_subresource_layout)(device, image, subresource, layout)
+    }
+
+    #[inline]
+    pub unsafe fn create_image_view(
+        &self,
+        device: Device,
+        create_info: &ImageViewCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        view: &mut ImageView,
+    ) -> Result {
+        (self.create_image_view)(device, create_info, allocator, view)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_image_view(
+        &self,
+        device: Device,
+        image_view: ImageView,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_image_view)(device, image_view, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_render_pass(
+        &self,
+        device: Device,
+        create_info: &RenderPassCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        render_pass: &mut RenderPass,
+    ) -> Result {
+        (self.create_render_pass)(device, create_info, allocator, render_pass)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_render_pass(
+        &self,
+        device: Device,
+        render_pass: RenderPass,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_render_pass)(device, render_pass, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_framebuffer(
+        &self,
+        device: Device,
+        create_info: &FramebufferCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        framebuffer: &mut Framebuffer,
+    ) -> Result {
+        (self.create_framebuffer)(device, create_info, allocator, framebuffer)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_framebuffer(
+        &self,
+        device: Device,
+        framebuffer: Framebuffer,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_framebuffer)(device, framebuffer, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_command_pool(
+        &self,
+        device: Device,
+        create_info: &CommandPoolCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        command_pool: &mut CommandPool,
+    ) -> Result {
+        (self.create_command_pool)(device, create_info, allocator, command_pool)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_command_pool(
+        &self,
+        device: Device,
+        command_pool: CommandPool,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_command_pool)(device, command_pool, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn reset_command_pool(
+        &self,
+        device: Device,
+        command_pool: CommandPool,
+        flags: CommandPoolResetFlags,
+    ) -> Result {
+        (self.reset_command_pool)(device, command_pool, flags)
+    }
+
+    #[inline]
+    pub unsafe fn allocate_command_buffers(
+        &self,
+        device: Device,
+        allocate_info: &CommandBufferAllocateInfo,
+        command_buffers: *mut CommandBuffer,
+    ) -> Result {
+        (self.allocate_command_buffers)(device, allocate_info, command_buffers)
+    }
+
+    #[inline]
+    pub unsafe fn free_command_buffers(
+        &self,
+        device: Device,
+        command_pool: CommandPool,
+        command_buffers: &[CommandBuffer],
+    ) {
+        (self.free_command_buffers)(
+            device,
+            command_pool,
+            command_buffers.len() as u32,
+            command_buffers.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn begin_command_buffer(
+        &self,
+        command_buffer: CommandBuffer,
+        begin_info: &CommandBufferBeginInfo,
+    ) -> Result {
+        (self.begin_command_buffer)(command_buffer, begin_info)
+    }
+
+    #[inline]
+    pub unsafe fn end_command_buffer(&self, command_buffer: CommandBuffer) -> Result {
+        (self.end_command_buffer)(command_buffer)
+    }
+
+    #[inline]
+    pub unsafe fn reset_command_buffer(
+        &self,
+        command_buffer: CommandBuffer,
+        flags: CommandBufferResetFlags,
+    ) -> Result {
+        (self.reset_command_buffer)(command_buffer, flags)
+    }
+
+    #[inline]
+    pub unsafe fn create_semaphore(
+        &self,
+        device: Device,
+        create_info: &SemaphoreCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        semaphore: &mut Semaphore,
+    ) -> Result {
+        (self.create_semaphore)(device, create_info, allocator, semaphore)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_semaphore(
+        &self,
+        device: Device,
+        semaphore: Semaphore,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_semaphore)(device, semaphore, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn get_semaphore_counter_value(
+        &self,
+        device: Device,
+        semaphore: Semaphore,
+        value: &mut u64,
+    ) -> Result {
+        (self.get_semaphore_counter_value)(device, semaphore, value)
+    }
+
+    #[inline]
+    pub unsafe fn wait_semaphores(
+        &self,
+        device: Device,
+        wait_info: &SemaphoreWaitInfo,
+        timeout: u64,
+    ) -> Result {
+        (self.wait_semaphores)(device, wait_info, timeout)
+    }
+
+    #[inline]
+    pub unsafe fn signal_semaphore(
+        &self,
+        device: Device,
+        signal_info: &SemaphoreSignalInfo,
+    ) -> Result {
+        (self.signal_semaphore)(device, signal_info)
+    }
+
+    #[inline]
+    pub unsafe fn create_fence(
+        &self,
+        device: Device,
+        create_info: &FenceCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        fence: &mut Fence,
+    ) -> Result {
+        (self.create_fence)(device, create_info, allocator, fence)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_fence(
+        &self,
+        device: Device,
+        fence: Fence,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_fence)(device, fence, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn reset_fences(&self, device: Device, fences: &[Fence]) -> Result {
+        (self.reset_fences)(device, fences.len() as u32, fences.as_ptr())
+    }
+
+    #[inline]
+    pub unsafe fn wait_for_fences(
+        &self,
+        device: Device,
+        fences: &[Fence],
+        wait_all: Bool32,
+        timeout: u64,
+    ) -> Result {
+        (self.wait_for_fences)(
+            device,
+            fences.len() as u32,
+            fences.as_ptr(),
+            wait_all,
+            timeout,
+        )
+    }
+
+    #[inline]
+    pub unsafe fn invalidate_mapped_memory_ranges(
+        &self,
+        device: Device,
+        memory_ranges: &[MappedMemoryRange],
+    ) -> Result {
+        (self.invalidate_mapped_memory_ranges)(
+            device,
+            memory_ranges.len() as u32,
+            memory_ranges.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn create_shader_module(
+        &self,
+        device: Device,
+        create_info: &ShaderModuleCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        shader_module: &mut ShaderModule,
+    ) -> Result {
+        (self.create_shader_module)(device, create_info, allocator, shader_module)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_shader_module(
+        &self,
+        device: Device,
+        shader_module: ShaderModule,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_shader_module)(device, shader_module, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_sampler(
+        &self,
+        device: Device,
+        create_info: &SamplerCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        sampler: &mut Sampler,
+    ) -> Result {
+        (self.create_sampler)(device, create_info, allocator, sampler)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_sampler(
+        &self,
+        device: Device,
+        sampler: Sampler,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_sampler)(device, sampler, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_descriptor_set_layout(
+        &self,
+        device: Device,
+        create_info: &DescriptorSetLayoutCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        set_layout: &mut DescriptorSetLayout,
+    ) -> Result {
+        (self.create_descriptor_set_layout)(device, create_info, allocator, set_layout)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_descriptor_set_layout(
+        &self,
+        device: Device,
+        descriptor_set_layout: DescriptorSetLayout,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_descriptor_set_layout)(device, descriptor_set_layout, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_pipeline_layout(
+        &self,
+        device: Device,
+        create_info: &PipelineLayoutCreateInfo,
+        allocator: Option<&AllocationCallbacks>,
+        pipeline_layout: &mut PipelineLayout,
+    ) -> Result {
+        (self.create_pipeline_layout)(device, create_info, allocator, pipeline_layout)
+    }
+
+    #[inline]
+    pub unsafe fn destroy_pipeline_layout(
+        &self,
+        device: Device,
+        pipeline_layout: PipelineLayout,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_pipeline_layout)(device, pipeline_layout, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn create_graphics_pipelines(
+        &self,
+        device: Device,
+        pipeline_cache: PipelineCache,
+        create_infos: &[GraphicsPipelineCreateInfo],
+        allocator: Option<&AllocationCallbacks>,
+        pipelines: &mut [Pipeline],
+    ) -> Result {
+        (self.create_graphics_pipelines)(
+            device,
+            pipeline_cache,
+            create_infos.len() as u32,
+            create_infos.as_ptr(),
+            allocator,
+            pipelines.as_mut_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn create_compute_pipelines(
+        &self,
+        device: Device,
+        pipeline_cache: PipelineCache,
+        create_infos: &[ComputePipelineCreateInfo],
+        allocator: Option<&AllocationCallbacks>,
+        pipelines: &mut [Pipeline],
+    ) -> Result {
+        debug_assert_eq!(create_infos.len(), pipelines.len());
+        (self.create_compute_pipelines)(
+            device,
+            pipeline_cache,
+            create_infos.len() as u32,
+            create_infos.as_ptr(),
+            allocator,
+            pipelines.as_mut_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn destroy_pipeline(
+        &self,
+        device: Device,
+        pipeline: Pipeline,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_pipeline)(device, pipeline, allocator)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_bind_pipeline(
+        &self,
+        command_buffer: CommandBuffer,
+        pipeline_bind_point: PipelineBindPoint,
+        pipeline: Pipeline,
+    ) {
+        (self.cmd_bind_pipeline)(command_buffer, pipeline_bind_point, pipeline)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_viewport(
+        &self,
+        command_buffer: CommandBuffer,
+        first_viewport: u32,
+        viewports: &[Viewport],
+    ) {
+        (self.cmd_set_viewport)(
+            command_buffer,
+            first_viewport,
+            viewports.len() as u32,
+            viewports.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_scissor(
+        &self,
+        command_buffer: CommandBuffer,
+        first_scissor: u32,
+        scissors: &[Rect2d],
+    ) {
+        (self.cmd_set_scissor)(
+            command_buffer,
+            first_scissor,
+            scissors.len() as u32,
+            scissors.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_line_width(&self, command_buffer: CommandBuffer, line_width: f32) {
+        (self.cmd_set_line_width)(command_buffer, line_width)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_depth_bias(
+        &self,
+        command_buffer: CommandBuffer,
+        depth_bias_constant_factor: f32,
+        depth_bias_clamp: f32,
+        depth_bias_slope_factor: f32,
+    ) {
+        (self.cmd_set_depth_bias)(
+            command_buffer,
+            depth_bias_constant_factor,
+            depth_bias_clamp,
+            depth_bias_slope_factor,
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_blend_constants(
+        &self,
+        command_buffer: CommandBuffer,
+        blend_constants: [f32; 4],
+    ) {
+        (self.cmd_set_blend_constants)(command_buffer, blend_constants)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_depth_bounds(
+        &self,
+        command_buffer: CommandBuffer,
+        min_depth_bounds: f32,
+        max_depth_bounds: f32,
+    ) {
+        (self.cmd_set_depth_bounds)(command_buffer, min_depth_bounds, max_depth_bounds)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_stencil_compare_mask(
+        &self,
+        command_buffer: CommandBuffer,
+        face_mask: StencilFaceFlags,
+        compare_mask: u32,
+    ) {
+        (self.cmd_set_stencil_compare_mask)(command_buffer, face_mask, compare_mask)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_stencil_write_mask(
+        &self,
+        command_buffer: CommandBuffer,
+        face_mask: StencilFaceFlags,
+        write_mask: u32,
+    ) {
+        (self.cmd_set_stencil_write_mask)(command_buffer, face_mask, write_mask)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_stencil_reference(
+        &self,
+        command_buffer: CommandBuffer,
+        face_mask: StencilFaceFlags,
+        reference: u32,
+    ) {
+        (self.cmd_set_stencil_reference)(command_buffer, face_mask, reference)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_bind_descriptor_sets(
+        &self,
+        command_buffer: CommandBuffer,
+        pipeline_bind_point: PipelineBindPoint,
+        layout: PipelineLayout,
+        first_set: u32,
+        descriptor_sets: &[DescriptorSet],
+        dynamic_offsets: &[u32],
+    ) {
+        (self.cmd_bind_descriptor_sets)(
+            command_buffer,
+            pipeline_bind_point,
+            layout,
+            first_set,
+            descriptor_sets.len() as u32,
+            descriptor_sets.as_ptr(),
+            dynamic_offsets.len() as u32,
+            dynamic_offsets.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_bind_index_buffer(
+        &self,
+        command_buffer: CommandBuffer,
+        buffer: Buffer,
+        offset: DeviceSize,
+        index_type: IndexType,
+    ) {
+        (self.cmd_bind_index_buffer)(command_buffer, buffer, offset, index_type)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_bind_vertex_buffers(
+        &self,
+        command_buffer: CommandBuffer,
+        first_binding: u32,
+        binding_count: u32,
+        buffers: *const Buffer,
+        offsets: *const DeviceSize,
+    ) {
+        (self.cmd_bind_vertex_buffers)(
+            command_buffer,
+            first_binding,
+            binding_count,
+            buffers,
+            offsets,
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_draw(
+        &self,
+        command_buffer: CommandBuffer,
+        vertex_count: u32,
+        instance_count: u32,
+        first_vertex: u32,
+        first_instance: u32,
+    ) {
+        (self.cmd_draw)(
+            command_buffer,
+            vertex_count,
+            instance_count,
+            first_vertex,
+            first_instance,
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_draw_indexed(
+        &self,
+        command_buffer: CommandBuffer,
+        index_count: u32,
+        instance_count: u32,
+        first_index: u32,
+        vertex_offset: i32,
+        first_instance: u32,
+    ) {
+        (self.cmd_draw_indexed)(
+            command_buffer,
+            index_count,
+            instance_count,
+            first_index,
+            vertex_offset,
+            first_instance,
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_draw_indirect(
+        &self,
+        command_buffer: CommandBuffer,
+        buffer: Buffer,
+        offset: DeviceSize,
+        draw_count: u32,
+        stride: u32,
+    ) {
+        (self.cmd_draw_indirect)(command_buffer, buffer, offset, draw_count, stride)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_draw_indexed_indirect(
+        &self,
+        command_buffer: CommandBuffer,
+        buffer: Buffer,
+        offset: DeviceSize,
+        draw_count: u32,
+        stride: u32,
+    ) {
+        (self.cmd_draw_indexed_indirect)(command_buffer, buffer, offset, draw_count, stride)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_dispatch(
+        &self,
+        command_buffer: CommandBuffer,
+        group_count_x: u32,
+        group_count_y: u32,
+        group_count_z: u32,
+    ) {
+        (self.cmd_dispatch)(command_buffer, group_count_x, group_count_y, group_count_z)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_dispatch_indirect(
+        &self,
+        command_buffer: CommandBuffer,
+        buffer: Buffer,
+        offset: DeviceSize,
+    ) {
+        (self.cmd_dispatch_indirect)(command_buffer, buffer, offset)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_copy_buffer(
+        &self,
+        command_buffer: CommandBuffer,
+        src_buffer: Buffer,
+        dst_buffer: Buffer,
+        regions: &[BufferCopy],
+    ) {
+        (self.cmd_copy_buffer)(
+            command_buffer,
+            src_buffer,
+            dst_buffer,
+            regions.len() as u32,
+            regions.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_copy_image(
+        &self,
+        command_buffer: CommandBuffer,
+        src_image: Image,
+        src_image_layout: ImageLayout,
+        dst_image: Image,
+        dst_image_layout: ImageLayout,
+        regions: &[ImageCopy],
+    ) {
+        (self.cmd_copy_image)(
+            command_buffer,
+            src_image,
+            src_image_layout,
+            dst_image,
+            dst_image_layout,
+            regions.len() as u32,
+            regions.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_blit_image(
+        &self,
+        command_buffer: CommandBuffer,
+        src_image: Image,
+        src_image_layout: ImageLayout,
+        dst_image: Image,
+        dst_image_layout: ImageLayout,
+        regions: &[ImageBlit],
+        filter: Filter,
+    ) {
+        (self.cmd_blit_image)(
+            command_buffer,
+            src_image,
+            src_image_layout,
+            dst_image,
+            dst_image_layout,
+            regions.len() as u32,
+            regions.as_ptr(),
+            filter,
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_copy_buffer_to_image(
+        &self,
+        command_buffer: CommandBuffer,
+        src_buffer: Buffer,
+        dst_image: Image,
+        dst_image_layout: ImageLayout,
+        regions: &[BufferImageCopy],
+    ) {
+        (self.cmd_copy_buffer_to_image)(
+            command_buffer,
+            src_buffer,
+            dst_image,
+            dst_image_layout,
+            regions.len() as u32,
+            regions.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_copy_image_to_buffer(
+        &self,
+        command_buffer: CommandBuffer,
+        src_image: Image,
+        src_image_layout: ImageLayout,
+        dst_buffer: Buffer,
+        regions: &[BufferImageCopy],
+    ) {
+        (self.cmd_copy_image_to_buffer)(
+            command_buffer,
+            src_image,
+            src_image_layout,
+            dst_buffer,
+            regions.len() as u32,
+            regions.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_update_buffer(
+        &self,
+        command_buffer: CommandBuffer,
+        dst_buffer: Buffer,
+        dst_offset: DeviceSize,
+        data_size: DeviceSize,
+        data: *const c_void,
+    ) {
+        (self.cmd_update_buffer)(command_buffer, dst_buffer, dst_offset, data_size, data)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_fill_buffer(
+        &self,
+        command_buffer: CommandBuffer,
+        dst_buffer: Buffer,
+        dst_offset: DeviceSize,
+        size: DeviceSize,
+        data: u32,
+    ) {
+        (self.cmd_fill_buffer)(command_buffer, dst_buffer, dst_offset, size, data)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_clear_color_image(
+        &self,
+        command_buffer: CommandBuffer,
+        image: Image,
+        image_layout: ImageLayout,
+        color: &ClearColorValue,
+        ranges: &[ImageSubresourceRange],
+    ) {
+        (self.cmd_clear_color_image)(
+            command_buffer,
+            image,
+            image_layout,
+            color,
+            ranges.len() as u32,
+            ranges.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_clear_depth_stencil_image(
+        &self,
+        command_buffer: CommandBuffer,
+        image: Image,
+        image_layout: ImageLayout,
+        depth_stencil: &ClearDepthStencilValue,
+        ranges: &[ImageSubresourceRange],
+    ) {
+        (self.cmd_clear_depth_stencil_image)(
+            command_buffer,
+            image,
+            image_layout,
+            depth_stencil,
+            ranges.len() as u32,
+            ranges.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_clear_attachments(
+        &self,
+        command_buffer: CommandBuffer,
+        attachments: &[ClearAttachment],
+        rects: &[ClearRect],
+    ) {
+        (self.cmd_clear_attachments)(
+            command_buffer,
+            attachments.len() as u32,
+            attachments.as_ptr(),
+            rects.len() as u32,
+            rects.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_resolve_image(
+        &self,
+        command_buffer: CommandBuffer,
+        src_image: Image,
+        src_image_layout: ImageLayout,
+        dst_image: Image,
+        dst_image_layout: ImageLayout,
+        regions: &[ImageResolve],
+    ) {
+        (self.cmd_resolve_image)(
+            command_buffer,
+            src_image,
+            src_image_layout,
+            dst_image,
+            dst_image_layout,
+            regions.len() as u32,
+            regions.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_event(
+        &self,
+        command_buffer: CommandBuffer,
+        event: Event,
+        stage_mask: PipelineStageFlags,
+    ) {
+        (self.cmd_set_event)(command_buffer, event, stage_mask)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_reset_event(
+        &self,
+        command_buffer: CommandBuffer,
+        event: Event,
+        stage_mask: PipelineStageFlags,
+    ) {
+        (self.cmd_reset_event)(command_buffer, event, stage_mask)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_wait_events(
+        &self,
+        command_buffer: CommandBuffer,
+        events: &[Event],
+        src_stage_mask: PipelineStageFlags,
+        dst_stage_mask: PipelineStageFlags,
+        memory_barriers: &[MemoryBarrier],
+        buffer_memory_barriers: &[BufferMemoryBarrier],
+        image_memory_barriers: &[ImageMemoryBarrier],
+    ) {
+        (self.cmd_wait_events)(
+            command_buffer,
+            events.len() as u32,
+            events.as_ptr(),
+            src_stage_mask,
+            dst_stage_mask,
+            memory_barriers.len() as u32,
+            memory_barriers.as_ptr(),
+            buffer_memory_barriers.len() as u32,
+            buffer_memory_barriers.as_ptr(),
+            image_memory_barriers.len() as u32,
+            image_memory_barriers.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_pipeline_barrier(
+        &self,
+        command_buffer: CommandBuffer,
+        src_stage_mask: PipelineStageFlags,
+        dst_stage_mask: PipelineStageFlags,
+        dependency_flags: DependencyFlags,
+        memory_barriers: &[MemoryBarrier],
+        buffer_memory_barriers: &[BufferMemoryBarrier],
+        image_memory_barriers: &[ImageMemoryBarrier],
+    ) {
+        (self.cmd_pipeline_barrier)(
+            command_buffer,
+            src_stage_mask,
+            dst_stage_mask,
+            dependency_flags,
+            memory_barriers.len() as u32,
+            memory_barriers.as_ptr(),
+            buffer_memory_barriers.len() as u32,
+            buffer_memory_barriers.as_ptr(),
+            image_memory_barriers.len() as u32,
+            image_memory_barriers.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_begin_query(
+        &self,
+        command_buffer: CommandBuffer,
+        query_pool: QueryPool,
+        query: u32,
+        flags: QueryControlFlags,
+    ) {
+        (self.cmd_begin_query)(command_buffer, query_pool, query, flags)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_end_query(
+        &self,
+        command_buffer: CommandBuffer,
+        query_pool: QueryPool,
+        query: u32,
+    ) {
+        (self.cmd_end_query)(command_buffer, query_pool, query)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_reset_query_pool(
+        &self,
+        command_buffer: CommandBuffer,
+        query_pool: QueryPool,
+        first_query: u32,
+        query_count: u32,
+    ) {
+        (self.cmd_reset_query_pool)(command_buffer, query_pool, first_query, query_count)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_write_timestamp(
+        &self,
+        command_buffer: CommandBuffer,
+        pipeline_stage: PipelineStageFlags,
+        query_pool: QueryPool,
+        query: u32,
+    ) {
+        (self.cmd_write_timestamp)(command_buffer, pipeline_stage, query_pool, query)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_copy_query_pool_results(
+        &self,
+        command_buffer: CommandBuffer,
+        query_pool: QueryPool,
+        first_query: u32,
+        query_count: u32,
+        dst_buffer: Buffer,
+        dst_offset: DeviceSize,
+        stride: DeviceSize,
+        flags: QueryResultFlags,
+    ) {
+        (self.cmd_copy_query_pool_results)(
+            command_buffer,
+            query_pool,
+            first_query,
+            query_count,
+            dst_buffer,
+            dst_offset,
+            stride,
+            flags,
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_push_constants(
+        &self,
+        command_buffer: CommandBuffer,
+        layout: PipelineLayout,
+        stage_flags: ShaderStageFlags,
+        offset: u32,
+        size: u32,
+        values: *const c_void,
+    ) {
+        (self.cmd_push_constants)(command_buffer, layout, stage_flags, offset, size, values)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_begin_render_pass(
+        &self,
+        command_buffer: CommandBuffer,
+        render_pass_begin: &RenderPassBeginInfo,
+        contents: SubpassContents,
+    ) {
+        (self.cmd_begin_render_pass)(command_buffer, render_pass_begin, contents)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_next_subpass(
+        &self,
+        command_buffer: CommandBuffer,
+        contents: SubpassContents,
+    ) {
+        (self.cmd_next_subpass)(command_buffer, contents)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_end_render_pass(&self, command_buffer: CommandBuffer) {
+        (self.cmd_end_render_pass)(command_buffer)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_pipeline_barrier2(
+        &self,
+        command_buffer: CommandBuffer,
+        dependency_info: &DependencyInfo,
+    ) {
+        (self.cmd_pipeline_barrier2)(command_buffer, dependency_info)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_wait_events2(
+        &self,
+        command_buffer: CommandBuffer,
+        event_count: u32,
+        events: *const Event,
+        dependency_infos: *const DependencyInfo,
+    ) {
+        (self.cmd_wait_events2)(command_buffer, event_count, events, dependency_infos)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_event2(
+        &self,
+        command_buffer: CommandBuffer,
+        event: Event,
+        dependency_info: &DependencyInfo,
+    ) {
+        (self.cmd_set_event2)(command_buffer, event, dependency_info)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_begin_rendering(
+        &self,
+        command_buffer: CommandBuffer,
+        rendering_info: &RenderingInfo,
+    ) {
+        (self.cmd_begin_rendering)(command_buffer, rendering_info)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_end_rendering(&self, command_buffer: CommandBuffer) {
+        (self.cmd_end_rendering)(command_buffer)
+    }
+
+    #[inline]
+    pub unsafe fn cmd_execute_commands(
+        &self,
+        command_buffer: CommandBuffer,
+        command_buffers: &[CommandBuffer],
+    ) {
+        (self.cmd_execute_commands)(
+            command_buffer,
+            command_buffers.len() as u32,
+            command_buffers.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_viewport_with_count(
+        &self,
+        command_buffer: CommandBuffer,
+        viewports: &[Viewport],
+    ) {
+        (self.cmd_set_viewport_with_count)(
+            command_buffer,
+            viewports.len() as u32,
+            viewports.as_ptr(),
+        )
+    }
+
+    #[inline]
+    pub unsafe fn cmd_set_scissor_with_count(
+        &self,
+        command_buffer: CommandBuffer,
+        scissors: &[Rect2d],
+    ) {
+        (self.cmd_set_scissor_with_count)(command_buffer, scissors.len() as u32, scissors.as_ptr())
+    }
+
+    #[inline]
+    pub fn get_image_memory_requirements2(
+        &self,
+        device: Device,
+        info: &ImageMemoryRequirementsInfo2,
+        memory_requirements: &mut MemoryRequirements2,
+    ) {
+        (self.get_image_memory_requirements2)(device, info, memory_requirements)
+    }
+
+    #[inline]
+    pub unsafe fn bind_image_memory2(&self, device: Device, bind_infos: &[BindImageMemoryInfo]) {
+        (self.bind_image_memory2)(device, bind_infos.len() as u32, bind_infos.as_ptr())
+    }
+
+    #[inline]
+    pub fn get_buffer_memory_requirements2(
+        &self,
+        device: Device,
+        info: &BufferMemoryRequirementsInfo2,
+        memory_requirements: &mut MemoryRequirements2,
+    ) {
+        (self.get_buffer_memory_requirements2)(device, info, memory_requirements)
+    }
+
+    #[inline]
+    pub unsafe fn bind_buffer_memory2(&self, device: Device, bind_infos: &[BindBufferMemoryInfo]) {
+        (self.bind_buffer_memory2)(device, bind_infos.len() as u32, bind_infos.as_ptr())
+    }
+
+    #[inline]
+    pub unsafe fn queue_wait_idle(&self, queue: Queue) -> Result {
+        (self.queue_wait_idle)(queue)
+    }
+
+    #[inline]
+    pub unsafe fn device_wait_idle(&self, device: Device) -> Result {
+        (self.device_wait_idle)(device)
+    }
+}
+
+pub struct SurfaceKHRFunctions {
+    destroy_surface: FnDestroySurfaceKHR,
+    get_physical_device_surface_support: FnGetPhysicalDeviceSurfaceSupportKHR,
+    get_physical_device_surface_capabilities: FnGetPhysicalDeviceSurfaceCapabilitiesKHR,
+    get_physical_device_surface_formats: FnGetPhysicalDeviceSurfaceFormatsKHR,
+    get_physical_device_surface_present_modes: FnGetPhysicalDeviceSurfacePresentModesKHR,
+}
+
+impl SurfaceKHRFunctions {
+    pub fn new(global_functions: &GlobalFunctions, instance: Instance) -> Self {
+        unsafe {
+            let load = |name: &CStr| {
+                global_functions
+                    .get_instance_proc_addr(instance, name.as_ptr())
+                    .unwrap_or_else(
+                        #[cold]
+                        || panic!("failed to load device function {}", name.to_string_lossy()),
+                    )
+            };
+            Self {
+                destroy_surface: transmute::<_, _>(load(cstr!("vkDestroySurfaceKHR"))),
+                get_physical_device_surface_support: transmute::<_, _>(load(cstr!(
+                    "vkGetPhysicalDeviceSurfaceSupportKHR"
+                ))),
+                get_physical_device_surface_capabilities: transmute::<_, _>(load(cstr!(
+                    "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"
+                ))),
+                get_physical_device_surface_formats: transmute::<_, _>(load(cstr!(
+                    "vkGetPhysicalDeviceSurfaceFormatsKHR"
+                ))),
+                get_physical_device_surface_present_modes: transmute::<_, _>(load(cstr!(
+                    "vkGetPhysicalDeviceSurfacePresentModesKHR"
+                ))),
+            }
+        }
+    }
+
+    pub unsafe fn destroy_surface(
+        &self,
+        instance: Instance,
+        surface: SurfaceKHR,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_surface)(instance, surface, allocator)
+    }
+
+    pub unsafe fn get_physical_device_surface_support(
+        &self,
+        physical_device: PhysicalDevice,
+        queue_family_index: u32,
+        surface: SurfaceKHR,
+        supported: &mut Bool32,
+    ) -> Result {
+        (self.get_physical_device_surface_support)(
+            physical_device,
+            queue_family_index,
+            surface,
+            supported,
+        )
+    }
+
+    pub unsafe fn get_physical_device_surface_capabilities(
+        &self,
+        physical_device: PhysicalDevice,
+        surface: SurfaceKHR,
+        surface_capabilities: &mut SurfaceCapabilitiesKHR,
+    ) -> Result {
+        (self.get_physical_device_surface_capabilities)(
+            physical_device,
+            surface,
+            surface_capabilities,
+        )
+    }
+
+    pub unsafe fn get_physical_device_surface_formats(
+        &self,
+        physical_device: PhysicalDevice,
+        surface: SurfaceKHR,
+        surface_format_count: &mut u32,
+        surface_formats: *mut SurfaceFormatKHR,
+    ) -> Result {
+        (self.get_physical_device_surface_formats)(
+            physical_device,
+            surface,
+            surface_format_count,
+            surface_formats,
+        )
+    }
+
+    pub unsafe fn get_physical_device_surface_present_modes(
+        &self,
+        physical_device: PhysicalDevice,
+        surface: SurfaceKHR,
+        present_mode_count: &mut u32,
+        present_modes: *mut PresentModeKHR,
+    ) -> Result {
+        (self.get_physical_device_surface_present_modes)(
+            physical_device,
+            surface,
+            present_mode_count,
+            present_modes,
+        )
+    }
+}
+
+pub struct SwapchainKHRFunctions {
+    create_swapchain: FnCreateSwapchainKHR,
+    destroy_swapchain: FnDestroySwapchainKHR,
+    get_swapchain_images: FnGetSwapchainImagesKHR,
+    acquire_next_image: FnAcquireNextImageKHR,
+    queue_present: FnQueuePresentKHR,
+
+    acquire_next_image2: FnAcquireNextImage2KHR,
+}
+
+impl SwapchainKHRFunctions {
+    pub fn new(global_functions: &GlobalFunctions, instance: Instance, api_version: u32) -> Self {
+        unsafe {
+            let load = |name: &CStr, function_version: u32| {
+                if api_version >= function_version {
+                    global_functions
+                        .get_instance_proc_addr(instance, name.as_ptr())
+                        .unwrap_or_else(
+                            #[cold]
+                            || panic!("failed to load device function {}", name.to_string_lossy()),
+                        )
+                } else {
+                    transmute::<_, _>(vulkan_instance_version_not_supported as fn())
+                }
+            };
+            Self {
+                create_swapchain: transmute::<_, _>(load(
+                    cstr!("vkCreateSwapchainKHR"),
+                    VERSION_1_0,
+                )),
+                destroy_swapchain: transmute::<_, _>(load(
+                    cstr!("vkDestroySwapchainKHR"),
+                    VERSION_1_0,
+                )),
+                get_swapchain_images: transmute::<_, _>(load(
+                    cstr!("vkGetSwapchainImagesKHR"),
+                    VERSION_1_0,
+                )),
+                acquire_next_image: transmute::<_, _>(load(
+                    cstr!("vkAcquireNextImageKHR"),
+                    VERSION_1_0,
+                )),
+                queue_present: transmute::<_, _>(load(cstr!("vkQueuePresentKHR"), VERSION_1_0)),
+
+                acquire_next_image2: transmute::<_, _>(load(
+                    cstr!("vkAcquireNextImage2KHR"),
+                    VERSION_1_1,
+                )),
+            }
+        }
+    }
+
+    pub unsafe fn create_swapchain(
+        &self,
+        device: Device,
+        create_info: &SwapchainCreateInfoKHR,
+        allocator: Option<&AllocationCallbacks>,
+        swapchain: &mut SwapchainKHR,
+    ) -> Result {
+        (self.create_swapchain)(device, create_info, allocator, swapchain)
+    }
+
+    pub unsafe fn destroy_swapchain(
+        &self,
+        device: Device,
+        swapchain: SwapchainKHR,
+        allocator: Option<&AllocationCallbacks>,
+    ) {
+        (self.destroy_swapchain)(device, swapchain, allocator)
+    }
+
+    pub unsafe fn get_swapchain_images(
+        &self,
+        device: Device,
+        swapchain: SwapchainKHR,
+        swapchain_image_count: &mut u32,
+        swapchain_images: *mut Image,
+    ) -> Result {
+        (self.get_swapchain_images)(device, swapchain, swapchain_image_count, swapchain_images)
+    }
+
+    pub unsafe fn acquire_next_image(
+        &self,
+        device: Device,
+        swapchain: SwapchainKHR,
+        timeout: u64,
+        semaphore: Semaphore,
+        fence: Fence,
+        image_index: &mut u32,
+    ) -> Result {
+        (self.acquire_next_image)(device, swapchain, timeout, semaphore, fence, image_index)
+    }
+
+    pub unsafe fn acquire_next_image2(
+        &self,
+        device: Device,
+        acquire_info: &AcquireNextImageInfoKHR,
+        image_index: &mut u32,
+    ) -> Result {
+        (self.acquire_next_image2)(device, acquire_info, image_index)
+    }
+
+    pub unsafe fn queue_present(&self, queue: Queue, present_info: &PresentInfoKHR) -> Result {
+        (self.queue_present)(queue, present_info)
+    }
+}
diff --git a/vulkan-sys/src/structs.rs b/vulkan-sys/src/structs.rs
new file mode 100644 (file)
index 0000000..830acbb
--- /dev/null
@@ -0,0 +1,2570 @@
+use std::{ffi::c_void, os::raw::c_char};
+
+use super::*;
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default)]
+pub struct Offset2d {
+    pub x: i32,
+    pub y: i32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default)]
+pub struct Offset3d {
+    pub x: i32,
+    pub y: i32,
+    pub z: i32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
+pub struct Extent2d {
+    pub width: u32,
+    pub height: u32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
+pub struct Extent3d {
+    pub width: u32,
+    pub height: u32,
+    pub depth: u32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+pub struct Viewport {
+    pub x: f32,
+    pub y: f32,
+    pub width: f32,
+    pub height: f32,
+    pub min_depth: f32,
+    pub max_depth: f32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, Default, Debug)]
+pub struct Rect2d {
+    pub offset: Offset2d,
+    pub extent: Extent2d,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+pub struct ClearRect {
+    pub rect: Rect2d,
+    pub base_array_layer: u32,
+    pub layer_count: u32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ComponentMapping {
+    pub r: ComponentSwizzle,
+    pub g: ComponentSwizzle,
+    pub b: ComponentSwizzle,
+    pub a: ComponentSwizzle,
+}
+
+#[repr(C)]
+pub struct AllocationCallbacks {
+    user_data: *mut c_void,
+    allocation: Option<FnAllocationFunction>,
+    reallocation: Option<FnReallocationFunction>,
+    free: Option<FnFreeFunction>,
+    internal_allocation: Option<FnInternalAllocationNotification>,
+    internal_free: Option<FnInternalFreeNotification>,
+}
+
+#[repr(C)]
+pub struct DeviceQueueCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: DeviceQueueCreateFlags,
+    pub queue_family_index: u32,
+    pub queue_priorities: VulkanSlice1<'a, u32, f32, 4>,
+}
+
+impl<'a> Default for DeviceQueueCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::DeviceQueueCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct DeviceCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: DeviceCreateFlags,
+    pub queue_create_infos: VulkanSlice1<'a, u32, DeviceQueueCreateInfo<'a>, 0>,
+    pub enabled_layers: VulkanSlice1<'a, u32, *const c_char, 4>,
+    pub enabled_extension_names: VulkanSlice1<'a, u32, *const c_char, 4>,
+    pub enabled_features: *const PhysicalDeviceFeatures,
+}
+
+impl<'a> Default for DeviceCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::DeviceCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct ApplicationInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub application_name: *const c_char,
+    pub application_version: u32,
+    pub engine_name: *const c_char,
+    pub engine_version: u32,
+    pub api_version: u32,
+}
+
+impl Default for ApplicationInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::ApplicationInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct InstanceCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: InstanceCreateFlags,
+    pub application_info: Option<&'a ApplicationInfo>,
+    pub enabled_layers: VulkanSlice1<'a, u32, *const c_char, 4>,
+    pub enabled_extension_names: VulkanSlice1<'a, u32, *const c_char, 4>,
+}
+
+impl<'a> Default for InstanceCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::InstanceCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+pub struct QueueFamilyProperties {
+    pub queue_flags: QueueFlags,
+    pub queue_count: u32,
+    pub timestamp_valid_bits: u32,
+    /// Minimum alignment requirement for image transfers
+    pub min_image_transfer_granularity: Extent3d,
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceMemoryProperties {
+    pub memory_type_count: u32,
+    pub memory_types: [MemoryType; MAX_MEMORY_TYPES as usize],
+    pub memory_heap_count: u32,
+    pub memory_heaps: [MemoryHeap; MAX_MEMORY_HEAPS as usize],
+}
+
+impl Default for PhysicalDeviceMemoryProperties {
+    fn default() -> Self {
+        unsafe { MaybeUninit::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct MemoryAllocateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub allocation_size: DeviceSize, // Size of memory allocation
+    pub memory_type_index: u32,      // Index of the of the memory type to allocate from
+}
+
+impl Default for MemoryAllocateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::MemoryAllocateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct MemoryRequirements {
+    pub size: DeviceSize,      // Specified in bytes
+    pub alignment: DeviceSize, // Specified in bytes
+    pub memory_type_bits: u32, // Bitmask of the allowed memory type indices into memoryTypes[] for this object
+}
+
+impl Default for MemoryRequirements {
+    fn default() -> Self {
+        unsafe { MaybeUninit::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+pub struct MemoryRequirements2 {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub memory_requirements: MemoryRequirements,
+}
+
+impl Default for MemoryRequirements2 {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::MemoryRequirements2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct ImageMemoryRequirementsInfo2 {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub image: Image,
+}
+
+impl Default for ImageMemoryRequirementsInfo2 {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::ImageMemoryRequirementsInfo2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct BufferMemoryRequirementsInfo2 {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub buffer: Buffer,
+}
+
+impl Default for BufferMemoryRequirementsInfo2 {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::BufferMemoryRequirementsInfo2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct BindImageMemoryInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub image: Image,
+    pub memory: DeviceMemory,
+    pub offset: DeviceSize,
+}
+
+impl Default for BindImageMemoryInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::BindImageMemoryInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct BindBufferMemoryInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub buffer: Buffer,
+    pub memory: DeviceMemory,
+    pub offset: DeviceSize,
+}
+
+impl Default for BindBufferMemoryInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::BindBufferMemoryInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SparseImageFormatProperties {
+    pub aspect_mask: ImageAspectFlags,
+    pub image_granularity: Extent3d,
+    pub flags: SparseImageFormatFlags,
+}
+
+#[repr(C)]
+pub struct SparseImageMemoryRequirements {
+    pub format_properties: SparseImageFormatProperties,
+    pub image_mip_tail_first_lod: u32,
+    pub image_mip_tail_size: DeviceSize, // Specified in bytes, must be a multiple of sparse block size in bytes / alignment
+    pub image_mip_tail_offset: DeviceSize, // Specified in bytes, must be a multiple of sparse block size in bytes / alignment
+    pub image_mip_tail_stride: DeviceSize, // Specified in bytes, must be a multiple of sparse block size in bytes / alignment
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct MemoryType {
+    pub property_flags: MemoryPropertyFlags, // Memory properties of this memory type
+    pub heap_index: u32, // Index of the memory heap allocations of this memory type are taken from
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct MemoryHeap {
+    pub size: DeviceSize,       // Available memory in the heap
+    pub flags: MemoryHeapFlags, // Flags for the heap
+}
+
+#[repr(C)]
+pub struct SubmitInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub wait_semaphores: VulkanSlice2<'a, u32, Semaphore, PipelineStageFlags, 4>,
+    pub command_buffers: VulkanSlice1<'a, u32, CommandBuffer, 4>,
+    pub signal_semaphores: VulkanSlice1<'a, u32, Semaphore, 4>,
+}
+
+impl<'a> Default for SubmitInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SubmitInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SubmitInfo2<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: SubmitFlags,
+    pub wait_semaphore_infos: VulkanSlice1<'a, u32, SemaphoreSubmitInfo, 0>,
+    pub command_buffer_infos: VulkanSlice1<'a, u32, CommandBufferSubmitInfo, 4>,
+    pub signal_semaphore_infos: VulkanSlice1<'a, u32, SemaphoreSubmitInfo, 4>,
+}
+
+impl<'a> Default for SubmitInfo2<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SubmitInfo2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SemaphoreSubmitInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub semaphore: Semaphore,
+    pub semaphore_value: u64,
+    pub stage_mask: PipelineStageFlags2,
+    pub device_index: u32,
+}
+
+impl Default for SemaphoreSubmitInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SemaphoreSubmitInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct CommandBufferSubmitInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub command_buffer: CommandBuffer,
+    pub device_mask: u32,
+}
+
+impl Default for CommandBufferSubmitInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::CommandBufferSubmitInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct MappedMemoryRange {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub memory: DeviceMemory, // Mapped memory object
+    pub offset: DeviceSize,   // Offset within the memory object where the range starts
+    pub size: DeviceSize,     // Size of the range within the memory object
+}
+
+impl Default for MappedMemoryRange {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::MappedMemoryRange;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Clone)]
+pub struct FormatProperties {
+    pub linear_tiling_features: FormatFeatureFlags, // Format features in case of linear tiling
+    pub optimal_tiling_features: FormatFeatureFlags, // Format features in case of optimal tiling
+    pub buffer_features: FormatFeatureFlags,        // Format features supported by buffers
+}
+
+#[repr(C)]
+#[derive(Clone)]
+pub struct ImageFormatProperties {
+    pub max_extent: Extent3d,  // max image dimensions for this resource type
+    pub max_mip_levels: u32,   // max number of mipmap levels for this resource type
+    pub max_array_layers: u32, // max array size for this resource type
+    pub sample_counts: SampleCountFlags, // supported sample counts for this resource type
+    pub max_resource_size: DeviceSize, // max size (in bytes) of this resource type
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct SurfaceCapabilitiesKHR {
+    pub min_image_count: u32, // Supported minimum number of images for the surface
+    pub max_image_count: u32, // Supported maximum number of images for the surface, 0 for unlimited
+    pub current_extent: Extent2d, // Current image width and height for the surface, (0, 0) if undefined
+    pub min_image_extent: Extent2d, // Supported minimum image width and height for the surface
+    pub max_image_extent: Extent2d, // Supported maximum image width and height for the surface
+    pub max_image_array_layers: u32, // Supported maximum number of image layers for the surface
+    pub supported_transforms: SurfaceTransformFlagsKHR, // 1 or more bits representing the transforms supported
+    pub current_transform: SurfaceTransformFlagsKHR, // The surface's current transform relative to the device's natural orientation
+    pub supported_composite_alpha: CompositeAlphaFlagsKHR, // 1 or more bits representing the alpha compositing modes supported
+    pub supported_usage_flags: ImageUsageFlags, // Supported image usage flags for the surface
+}
+
+impl Default for SurfaceCapabilitiesKHR {
+    fn default() -> Self {
+        unsafe { MaybeUninit::<Self>::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct SurfaceFormatKHR {
+    pub format: Format,             // Supported pair of rendering format
+    pub color_space: ColorSpaceKHR, // and color space for the surface
+}
+
+impl Default for SurfaceFormatKHR {
+    fn default() -> Self {
+        Self {
+            format: Format::Undefined,
+            color_space: ColorSpaceKHR::SrgbNonlinearKhr,
+        }
+    }
+}
+
+#[repr(C)]
+pub struct SwapchainCreateInfoKHR<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: SwapchainCreateFlagsKHR,
+    ///  The swapchain's target surface
+    pub surface: SurfaceKHR,
+    ///  Minimum number of presentation images the application needs
+    pub min_image_count: u32,
+    ///  Format of the presentation images
+    pub image_format: Format,
+    ///  Colorspace of the presentation images
+    pub image_color_space: ColorSpaceKHR,
+    ///  Dimensions of the presentation images
+    pub image_extent: Extent2d,
+    ///  Determines the number of views for multiview/stereo presentation
+    pub image_array_layers: u32,
+    ///  Bits indicating how the presentation images will be used
+    pub image_usage: ImageUsageFlags,
+    ///  Sharing mode used for the presentation images
+    pub image_sharing_mode: SharingMode,
+    ///  Array of queue family indices having access to the images in case of concurrent sharing mode
+    pub queue_family_indices: VulkanSlice1<'a, u32, u32, 4>,
+    ///  The transform, relative to the device's natural orientation, applied to the image content prior to presentation
+    pub pre_transform: SurfaceTransformFlagsKHR,
+    ///  The alpha blending mode used when compositing this surface with other surfaces in the window system
+    pub composite_alpha: CompositeAlphaFlagsKHR,
+    ///  Which presentation mode to use for presents on this swap chain
+    pub present_mode: PresentModeKHR,
+    ///  Specifies whether presentable images may be affected by window clip regions
+    pub clipped: Bool32,
+    ///  Existing swap chain to replace, if any
+    pub old_swapchain: SwapchainKHR,
+}
+
+// typedef struct VkSwapchainCreateInfoKHR {
+//     VkStructureType                  sType;
+//     const void*                      pNext;
+//     VkSwapchainCreateFlagsKHR        flags;
+//     VkSurfaceKHR                     surface;
+//     uint32_t                         minImageCount;
+//     VkFormat                         imageFormat;
+//     VkColorSpaceKHR                  imageColorSpace;
+//     VkExtent2D                       imageExtent;
+//     uint32_t                         imageArrayLayers;
+//     VkImageUsageFlags                imageUsage;
+//     VkSharingMode                    imageSharingMode;
+//     uint32_t                         queueFamilyIndexCount;
+//     const uint32_t*                  pQueueFamilyIndices;
+//     VkSurfaceTransformFlagBitsKHR    preTransform;
+//     VkCompositeAlphaFlagBitsKHR      compositeAlpha;
+//     VkPresentModeKHR                 presentMode;
+//     VkBool32                         clipped;
+//     VkSwapchainKHR                   oldSwapchain;
+// } VkSwapchainCreateInfoKHR;
+
+impl<'a> Default for SwapchainCreateInfoKHR<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SwapchainCreateInfoKhr;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PresentInfoKHR<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    /// Semaphores to wait for before presenting
+    pub wait_semaphores: VulkanSlice1<'a, u32, Semaphore, 4>,
+    /// Swapchains and swapchain image indices to present
+    pub swapchains: VulkanSlice2<'a, u32, SwapchainKHR, u32, 4>,
+    /// Optional (i.e. if non-NULL) VkResult for each swapchain
+    pub results: *mut Result,
+}
+
+impl<'a> Default for PresentInfoKHR<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PresentInfoKhr;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct AcquireNextImageInfoKHR {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub swapchain: SwapchainKHR,
+    pub timeout: u64,
+    pub semaphore: Semaphore,
+    pub fence: Fence,
+    pub device_mask: u32,
+}
+
+impl Default for AcquireNextImageInfoKHR {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::AcquireNextImageInfoKhr;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct ImageCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: ImageCreateFlags, // Image creation flags
+    pub image_type: ImageType,
+    pub format: Format,
+    pub extent: Extent3d,
+    pub mip_levels: u32,
+    pub array_layers: u32,
+    pub samples: SampleCountFlags,
+    pub tiling: ImageTiling,
+    /// Image usage flags
+    pub usage: ImageUsageFlags,
+    /// Cross-queue-family sharing mode
+    pub sharing_mode: SharingMode,
+    /// Array of queue family indices to share across
+    pub queue_family_indices: VulkanSlice1<'a, u32, u32, 4>,
+    /// Initial image layout for all subresources
+    pub initial_layout: ImageLayout,
+}
+
+impl<'a> Default for ImageCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::ImageCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SubresourceLayout {
+    pub offset: DeviceSize,
+    pub size: DeviceSize,
+    pub row_pitch: DeviceSize,
+    pub array_pitch: DeviceSize,
+    pub depth_pitch: DeviceSize,
+}
+
+impl Default for SubresourceLayout {
+    fn default() -> Self {
+        unsafe { MaybeUninit::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+pub struct ImageViewCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: ImageViewCreateFlags,
+    pub image: Image,
+    pub view_type: ImageViewType,
+    pub format: Format,
+    pub components: ComponentMapping,
+    pub subresource_range: ImageSubresourceRange,
+}
+
+impl Default for ImageViewCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::ImageViewCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CommandPoolCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: CommandPoolCreateFlags,
+    pub queue_family_index: u32,
+}
+
+impl Default for CommandPoolCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::CommandPoolCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CommandBufferAllocateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub command_pool: CommandPool,
+    pub level: CommandBufferLevel,
+    pub command_buffer_count: u32,
+}
+
+impl Default for CommandBufferAllocateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::CommandBufferAllocateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct CommandBufferInheritanceInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub render_pass: RenderPass, // Render pass for secondary command buffers
+    pub subpass: u32,
+    pub framebuffer: Framebuffer, // Framebuffer for secondary command buffers
+    pub occlusion_query_enable: Bool32, // Whether this secondary command buffer may be executed during an occlusion query
+    pub query_flags: QueryControlFlags, // Query flags used by this secondary command buffer, if executed during an occlusion query
+    pub pipeline_statistics: QueryPipelineStatisticFlags, // Pipeline statistics that may be counted for this secondary command buffer
+}
+
+impl Default for CommandBufferInheritanceInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::CommandBufferInheritanceInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct CommandBufferBeginInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: CommandBufferUsageFlags,
+    /// Inheritance info for secondary command buffers
+    pub inheritance_info: Option<&'a CommandBufferInheritanceInfo>,
+}
+
+impl<'a> Default for CommandBufferBeginInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::CommandBufferBeginInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct RenderingAttachmentInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub image_view: ImageView,
+    pub image_layout: ImageLayout,
+    pub resolve_mode: ResolveModeFlags,
+    pub resolve_image_view: ImageView,
+    pub resolve_image_layout: ImageLayout,
+    pub load_op: AttachmentLoadOp,
+    pub store_op: AttachmentStoreOp,
+    pub clear_value: ClearValue,
+}
+
+impl Default for RenderingAttachmentInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::RenderingAttachmentInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct RenderingInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: RenderingFlags,
+    pub render_area: Rect2d,
+    pub layer_count: u32,
+    pub view_mask: u32,
+    pub color_attachments: VulkanSlice1<'a, u32, RenderingAttachmentInfo, 0>,
+    pub depth_attachment: Option<&'a RenderingAttachmentInfo>,
+    pub stencil_attachment: Option<&'a RenderingAttachmentInfo>,
+}
+
+impl<'a> Default for RenderingInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::RenderingInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineRenderingCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub view_mask: u32,
+    pub color_attachment_formats: VulkanSlice1<'a, u32, Format, 0>,
+    pub depth_attachment_format: Format,
+    pub stencil_attachment_format: Format,
+}
+
+impl<'a> Default for PipelineRenderingCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineRenderingCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct BufferCopy {
+    /// Specified in bytes
+    pub src_offset: DeviceSize,
+    /// Specified in bytes
+    pub dst_offset: DeviceSize,
+    /// Specified in bytes
+    pub size: DeviceSize,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ImageSubresourceLayers {
+    pub aspect_mask: ImageAspectFlags,
+    pub mip_level: u32,
+    pub base_array_layer: u32,
+    pub layer_count: u32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ImageCopy {
+    pub src_subresource: ImageSubresourceLayers,
+    /// Specified in pixels for both compressed and uncompressed images
+    pub src_offset: Offset3d,
+    pub dst_subresource: ImageSubresourceLayers,
+    /// Specified in pixels for both compressed and uncompressed images
+    pub dst_offset: Offset3d,
+    /// Specified in pixels for both compressed and uncompressed images
+    pub extent: Extent3d,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ImageBlit {
+    pub src_subresource: ImageSubresourceLayers,
+    /// Specified in pixels for both compressed and uncompressed images
+    pub src_offsets: [Offset3d; 2],
+    pub dst_subresource: ImageSubresourceLayers,
+    /// Specified in pixels for both compressed and uncompressed images
+    pub dst_offsets: [Offset3d; 2],
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct BufferImageCopy {
+    /// Specified in bytes
+    pub buffer_offset: DeviceSize,
+    /// Specified in texels
+    pub buffer_row_length: u32,
+    pub buffer_image_height: u32,
+    pub image_subresource: ImageSubresourceLayers,
+    /// Specified in pixels for both compressed and uncompressed images
+    pub image_offset: Offset3d,
+    /// Specified in pixels for both compressed and uncompressed images
+    pub image_extent: Extent3d,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ImageResolve {
+    pub src_subresource: ImageSubresourceLayers,
+    pub src_offset: Offset3d,
+    pub dst_subresource: ImageSubresourceLayers,
+    pub dst_offset: Offset3d,
+    pub extent: Extent3d,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union ClearColorValue {
+    pub f32: [f32; 4],
+    pub i32: [i32; 4],
+    pub u32: [u32; 4],
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ClearDepthStencilValue {
+    pub depth: f32,
+    pub stencil: u32,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+/// Union allowing specification of color or depth and stencil values. Actual value selected is based on attachment being cleared.
+pub union ClearValue {
+    pub color: ClearColorValue,
+    pub depth_stencil: ClearDepthStencilValue,
+}
+
+impl Default for ClearValue {
+    fn default() -> Self {
+        unsafe { MaybeUninit::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+pub struct RenderPassBeginInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub render_pass: RenderPass,
+    pub framebuffer: Framebuffer,
+    pub render_area: Rect2d,
+    pub clear_values: VulkanSlice1<'a, u32, ClearValue, 4>,
+}
+
+impl<'a> Default for RenderPassBeginInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::RenderPassBeginInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct MemoryBarrier2 {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub src_stage_mask: PipelineStageFlags2,
+    pub src_access_mask: AccessFlags2,
+    pub dst_stage_mask: PipelineStageFlags2,
+    pub dst_access_mask: AccessFlags2,
+}
+
+impl Default for MemoryBarrier2 {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::MemoryBarrier2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct BufferMemoryBarrier2 {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub src_stage_mask: PipelineStageFlags2,
+    pub src_access_mask: AccessFlags2,
+    pub dst_stage_mask: PipelineStageFlags2,
+    pub dst_access_mask: AccessFlags2,
+    pub src_queue_family_index: u32,
+    pub dst_queue_family_index: u32,
+    pub buffer: Buffer,
+    pub offset: DeviceSize,
+    pub size: DeviceSize,
+}
+
+impl Default for BufferMemoryBarrier2 {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::BufferMemoryBarrier2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct ImageMemoryBarrier2 {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub src_stage_mask: PipelineStageFlags2,
+    pub src_access_mask: AccessFlags2,
+    pub dst_stage_mask: PipelineStageFlags2,
+    pub dst_access_mask: AccessFlags2,
+    pub old_layout: ImageLayout,
+    pub new_layout: ImageLayout,
+    pub src_queue_family_index: u32,
+    pub dst_queue_family_index: u32,
+    pub image: Image,
+    pub subresource_range: ImageSubresourceRange,
+}
+
+impl Default for ImageMemoryBarrier2 {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::ImageMemoryBarrier2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct DependencyInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: DependencyFlags,
+    pub memory_barriers: VulkanSlice1<'a, u32, MemoryBarrier2, 0>,
+    pub buffer_memory_barriers: VulkanSlice1<'a, u32, BufferMemoryBarrier2, 4>,
+    pub image_memory_barriers: VulkanSlice1<'a, u32, ImageMemoryBarrier2, 4>,
+}
+
+impl<'a> Default for DependencyInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::DependencyInfo;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct ClearAttachment {
+    pub aspect_mask: ImageAspectFlags,
+    pub color_attachment: u32,
+    pub clear_value: ClearValue,
+}
+
+#[repr(C)]
+pub struct AttachmentDescription {
+    pub flags: AttachmentDescriptionFlags,
+    pub format: Format,
+    pub samples: SampleCountFlags,
+    pub load_op: AttachmentLoadOp, // Load operation for color or depth data
+    pub store_op: AttachmentStoreOp, // Store operation for color or depth data
+    pub stencil_load_op: AttachmentLoadOp, // Load operation for stencil data
+    pub stencil_store_op: AttachmentStoreOp, // Store operation for stencil data
+    pub initial_layout: ImageLayout,
+    pub final_layout: ImageLayout,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct AttachmentReference {
+    pub attachment: u32,
+    pub layout: ImageLayout,
+}
+
+#[repr(C)]
+pub struct SubpassDescription<'a> {
+    pub flags: SubpassDescriptionFlags,
+    /// Must be VK_PIPELINE_BIND_POINT_GRAPHICS for now
+    pub pipeline_bind_point: PipelineBindPoint,
+    pub input_attachments: VulkanSlice1<'a, u32, AttachmentReference, 4>,
+    pub color_attachments: VulkanSlice1<'a, u32, AttachmentReference, 4>,
+    pub resolve_attachments: *const AttachmentReference,
+    pub depth_stencil_attachment: Option<&'a AttachmentReference>,
+    pub preserve_attachments: VulkanSlice1<'a, u32, u32, 4>,
+}
+
+impl<'a> Default for SubpassDescription<'a> {
+    fn default() -> Self {
+        unsafe { MaybeUninit::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+pub struct SubpassDependency {
+    pub src_subpass: u32,
+    pub dst_subpass: u32,
+    pub src_stage_mask: PipelineStageFlags,
+    pub dst_stage_mask: PipelineStageFlags,
+    /// Memory accesses from the source of the dependency to synchronize
+    pub src_access_mask: AccessFlags,
+    /// Memory accesses from the destination of the dependency to synchronize
+    pub dst_access_mask: AccessFlags,
+    pub dependency_flags: DependencyFlags,
+}
+
+#[repr(C)]
+pub struct RenderPassCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: RenderPassCreateFlags,
+    pub attachments: VulkanSlice1<'a, u32, AttachmentDescription, 0>,
+    pub subpasses: VulkanSlice1<'a, u32, SubpassDescription<'a>, 4>,
+    pub dependencies: VulkanSlice1<'a, u32, SubpassDependency, 4>,
+}
+
+impl<'a> Default for RenderPassCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::RenderPassCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct ShaderModuleCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: ShaderModuleCreateFlags,
+    pub _pad: [u8; 4],
+    pub code: VulkanSlice1<'a, usize, u8, 0>,
+}
+
+impl<'a> Default for ShaderModuleCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::ShaderModuleCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct DescriptorSetLayoutBinding {
+    ///  Binding number for this entry
+    pub binding: u32,
+    ///  Type of the descriptors in this binding
+    pub descriptor_type: DescriptorType,
+    ///  Number of descriptors in this binding
+    pub descriptor_count: u32,
+    ///  Shader stages this binding is visible to
+    pub stage_flags: ShaderStageFlags,
+    ///  Immutable samplers (used if descriptor type is SAMPLER or COMBINED_IMAGE_SAMPLER, is either NULL or contains count number of elements)
+    pub immutable_samplers: *const Sampler,
+}
+
+#[repr(C)]
+pub struct DescriptorSetLayoutCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: DescriptorSetLayoutCreateFlags,
+    ///  Array of descriptor set layout bindings
+    pub bindings: VulkanSlice1<'a, u32, DescriptorSetLayoutBinding, 4>,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct DescriptorPoolSize {
+    pub r#type: DescriptorType,
+    pub descriptor_count: u32,
+}
+
+#[repr(C)]
+pub struct DescriptorPoolCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: DescriptorPoolCreateFlags,
+    pub max_sets: u32,
+    pub pool_sizes: VulkanSlice1<'a, u32, DescriptorPoolSize, 0>,
+}
+
+#[repr(C)]
+pub struct DescriptorSetAllocateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub descriptor_pool: DescriptorPool,
+    pub set_layouts: VulkanSlice1<'a, u32, DescriptorSetLayout, 4>,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct SpecializationMapEntry {
+    ///  The SpecConstant ID specified in the BIL
+    pub constant_id: u32,
+    ///  Offset of the value in the data block
+    pub offset: u32,
+    ///  Size in bytes of the SpecConstant
+    pub size: usize,
+}
+
+#[repr(C)]
+pub struct SpecializationInfo<'a> {
+    pub map_entries: VulkanSlice1<'a, u32, SpecializationMapEntry, 4>,
+    ///  Size in bytes of pData
+    pub data_size: usize,
+    ///  Pointer to SpecConstant data
+    pub data: *const c_void,
+}
+
+#[repr(C)]
+pub struct PipelineShaderStageCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineShaderStageCreateFlags,
+    ///  Shader stage
+    pub stage: ShaderStageFlags,
+    ///  Module containing entry point
+    pub module: ShaderModule,
+    ///  Null-terminated entry point name
+    pub name: *const c_char,
+    pub specialization_info: Option<&'a SpecializationInfo<'a>>,
+}
+
+impl<'a> Default for PipelineShaderStageCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineShaderStageCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct ComputePipelineCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineCreateFlags,
+    pub stage: PipelineShaderStageCreateInfo<'a>,
+    ///  Interface layout of the pipeline
+    pub layout: PipelineLayout,
+    ///  If VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is nonzero, it specifies the handle of the base pipeline this is a derivative of
+    pub base_pipeline_handle: Pipeline,
+    ///  If VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is not -1, it specifies an index into pCreateInfos of the base pipeline this is a derivative of
+    pub base_pipeline_index: i32,
+}
+
+impl<'a> Default for ComputePipelineCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::ComputePipelineCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct VertexInputBindingDescription {
+    ///  Vertex buffer binding id
+    pub binding: u32,
+    ///  Distance between vertices in bytes (0 = no advancement)
+    pub stride: u32,
+    ///  The rate at which the vertex data is consumed
+    pub input_rate: VertexInputRate,
+}
+
+#[repr(C)]
+pub struct VertexInputAttributeDescription {
+    ///  location of the shader vertex attrib
+    pub location: u32,
+    ///  Vertex buffer binding id
+    pub binding: u32,
+    ///  format of source data
+    pub format: Format,
+    ///  Offset of first element in bytes from base of vertex
+    pub offset: u32,
+}
+
+#[repr(C)]
+pub struct PipelineVertexInputStateCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineVertexInputStateCreateFlags,
+    pub vertex_binding_descriptions: VulkanSlice1<'a, u32, VertexInputBindingDescription, 0>,
+    pub vertex_attribute_descriptions: VulkanSlice1<'a, u32, VertexInputAttributeDescription, 4>,
+}
+
+impl<'a> Default for PipelineVertexInputStateCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineVertexInputStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineInputAssemblyStateCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineInputAssemblyStateCreateFlags,
+    pub topology: PrimitiveTopology,
+    pub primitive_restart_enable: Bool32,
+}
+
+impl Default for PipelineInputAssemblyStateCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineInputAssemblyStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineTessellationStateCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineTessellationStateCreateFlags,
+    pub patch_control_points: u32,
+}
+
+impl PipelineTessellationStateCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineTessellationStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineViewportStateCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineViewportStateCreateFlags,
+    pub viewports: VulkanSlice1<'a, u32, Viewport, 0>,
+    pub scissors: VulkanSlice1<'a, u32, Rect2d, 4>,
+}
+
+impl<'a> Default for PipelineViewportStateCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineViewportStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineRasterizationStateCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineRasterizationStateCreateFlags,
+    pub depth_clamp_enable: Bool32,
+    pub rasterizer_discard_enable: Bool32,
+    pub polygon_mode: PolygonMode,
+    pub cull_mode: CullModeFlags,
+    pub front_face: FrontFace,
+    pub depth_bias_enable: Bool32,
+    pub depth_bias_constant_factor: f32,
+    pub depth_bias_clamp: f32,
+    pub depth_bias_slope_factor: f32,
+    pub line_width: f32,
+}
+
+impl Default for PipelineRasterizationStateCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineRasterizationStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineMultisampleStateCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineMultisampleStateCreateFlags,
+    pub rasterization_samples: SampleCountFlags,
+    pub sample_shading_enable: Bool32,
+    pub min_sample_shading: f32,
+    pub sample_mask: *const SampleMask,
+    pub alpha_to_coverage_enable: Bool32,
+    pub alpha_to_one_enable: Bool32,
+}
+
+impl Default for PipelineMultisampleStateCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineMultisampleStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineColorBlendAttachmentState {
+    pub blend_enable: Bool32,
+    pub src_color_blend_factor: BlendFactor,
+    pub dst_color_blend_factor: BlendFactor,
+    pub color_blend_op: BlendOp,
+    pub src_alpha_blend_factor: BlendFactor,
+    pub dst_alpha_blend_factor: BlendFactor,
+    pub alpha_blend_op: BlendOp,
+    pub color_write_mask: ColorComponentFlags,
+}
+
+impl Default for PipelineColorBlendAttachmentState {
+    fn default() -> Self {
+        unsafe { MaybeUninit::<Self>::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+pub struct PipelineColorBlendStateCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineColorBlendStateCreateFlags,
+    pub logic_op_enable: Bool32,
+    pub logic_op: LogicOp,
+    pub attachments: VulkanSlice1<'a, u32, PipelineColorBlendAttachmentState, 0>,
+    pub blend_constants: [f32; 4],
+}
+
+impl<'a> Default for PipelineColorBlendStateCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineColorBlendStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineDynamicStateCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineDynamicStateCreateFlags,
+    pub dynamic_states: VulkanSlice1<'a, u32, DynamicState, 0>,
+}
+
+impl<'a> Default for PipelineDynamicStateCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineDynamicStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct StencilOpState {
+    pub fail_op: StencilOp,
+    pub pass_op: StencilOp,
+    pub depth_fail_op: StencilOp,
+    pub compare_op: CompareOp,
+    pub compare_mask: u32,
+    pub write_mask: u32,
+    pub reference: u32,
+}
+
+#[repr(C)]
+pub struct PipelineDepthStencilStateCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineDepthStencilStateCreateFlags,
+    pub depth_test_enable: Bool32,
+    pub depth_write_enable: Bool32,
+    pub depth_compare_op: CompareOp,
+    pub depth_bounds_test_enable: Bool32, // optional (depth_bounds_test)
+    pub stencil_test_enable: Bool32,
+    pub front: StencilOpState,
+    pub back: StencilOpState,
+    pub min_depth_bounds: f32,
+    pub max_depth_bounds: f32,
+}
+
+impl Default for PipelineDepthStencilStateCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineDepthStencilStateCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct GraphicsPipelineCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineCreateFlags,
+    pub stages: VulkanSlice1<'a, u32, PipelineShaderStageCreateInfo<'a>, 0>,
+    pub vertex_input_state: Option<&'a PipelineVertexInputStateCreateInfo<'a>>,
+    pub input_assembly_state: Option<&'a PipelineInputAssemblyStateCreateInfo>,
+    pub tessellation_state: Option<&'a PipelineTessellationStateCreateInfo>,
+    pub viewport_state: Option<&'a PipelineViewportStateCreateInfo<'a>>,
+    pub rasterization_state: Option<&'a PipelineRasterizationStateCreateInfo>,
+    pub multisample_state: Option<&'a PipelineMultisampleStateCreateInfo>,
+    pub depth_stencil_state: Option<&'a PipelineDepthStencilStateCreateInfo>,
+    pub color_blend_state: Option<&'a PipelineColorBlendStateCreateInfo<'a>>,
+    pub dynamic_state: Option<&'a PipelineDynamicStateCreateInfo<'a>>,
+    ///  Interface layout of the pipeline
+    pub layout: PipelineLayout,
+    pub render_pass: RenderPass,
+    pub subpass: u32,
+    ///  If VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is nonzero, it specifies the handle of the base pipeline this is a derivative of
+    pub base_pipeline_handle: Pipeline,
+    ///  If VK_PIPELINE_CREATE_DERIVATIVE_BIT is set and this value is not -1, it specifies an index into pCreateInfos of the base pipeline this is a derivative of
+    pub base_pipeline_index: i32,
+}
+
+impl<'a> Default for GraphicsPipelineCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::GraphicsPipelineCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineCacheCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineCacheCreateFlags,
+    ///  Size of initial data to populate cache, in bytes
+    pub initial_data_size: usize,
+    ///  Initial data to populate cache
+    pub initial_data: *const c_void,
+}
+
+impl Default for PipelineCacheCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineCacheCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PipelineCacheHeaderVersionOne {
+    // The fields in this structure are non-normative since structure packing is implementation-defined in C. The specification defines the normative layout.
+    pub header_size: u32,
+    pub header_version: PipelineCacheHeaderVersion,
+    pub vendor_id: u32,
+    pub device_id: u32,
+    pub pipeline_cache_uuid: [u8; UUID_SIZE as usize],
+}
+
+#[repr(C)]
+pub struct PushConstantRange {
+    ///  Which stages use the range
+    pub stage_flags: ShaderStageFlags,
+    ///  Start of the range, in bytes
+    pub offset: u32,
+    ///  Size of the range, in bytes
+    pub size: u32,
+}
+
+#[repr(C)]
+pub struct PipelineLayoutCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: PipelineLayoutCreateFlags,
+    pub set_layouts: VulkanSlice1<'a, u32, DescriptorSetLayout, 0>,
+    pub push_constant_ranges: VulkanSlice1<'a, u32, DescriptorSetLayout, 4>,
+}
+
+impl<'a> Default for PipelineLayoutCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PipelineLayoutCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SamplerCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: SamplerCreateFlags,
+    pub mag_filter: Filter,
+    pub min_filter: Filter,
+    pub mipmap_mode: SamplerMipmapMode,
+    pub address_mode_u: SamplerAddressMode,
+    pub address_mode_v: SamplerAddressMode,
+    pub address_mode_w: SamplerAddressMode,
+    pub mip_lod_bias: f32,
+    pub anisotropy_enable: Bool32,
+    pub max_anisotropy: f32,
+    pub compare_enable: Bool32,
+    pub compare_op: CompareOp,
+    pub min_lod: f32,
+    pub max_lod: f32,
+    pub border_color: BorderColor,
+    pub unnormalized_coordinates: Bool32,
+}
+
+impl Default for SamplerCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SamplerCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct DescriptorBufferInfo {
+    /// Buffer used for this descriptor slot.
+    pub buffer: Buffer,
+    /// Base offset from buffer start in bytes to update in the descriptor set.
+    pub offset: DeviceSize,
+    /// Size in bytes of the buffer resource for this descriptor update.
+    pub range: DeviceSize,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct DescriptorImageInfo {
+    /// Sampler to write to the descriptor in case it is a SAMPLER or COMBINED_IMAGE_SAMPLER descriptor. Ignored otherwise.
+    pub sampler: Sampler,
+    /// Image view to write to the descriptor in case it is a SAMPLED_IMAGE, STORAGE_IMAGE, COMBINED_IMAGE_SAMPLER, or INPUT_ATTACHMENT descriptor. Ignored otherwise.
+    pub image_view: ImageView,
+    /// Layout the image is expected to be in when accessed using this descriptor (only used if imageView is not VK_NULL_HANDLE).
+    pub image_layout: ImageLayout,
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct WriteDescriptorSet<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    /// Destination descriptor set
+    pub dst_set: DescriptorSet,
+    /// Binding within the destination descriptor set to write
+    pub dst_binding: u32,
+    /// Array element within the destination binding to write
+    pub dst_array_element: u32,
+    /// Number of descriptors to write (determines the size of the array pointed by pDescriptors)
+    pub descriptor_count: u32,
+    /// Descriptor type to write (determines which members of the array pointed by pDescriptors are going to be used)
+    pub descriptor_type: DescriptorType,
+    /// Sampler, image view, and layout for SAMPLER, COMBINED_IMAGE_SAMPLER, {SAMPLED,STORAGE}_IMAGE, and INPUT_ATTACHMENT descriptor types.
+    pub image_info: Option<&'a DescriptorImageInfo>,
+    /// Raw buffer, size, and offset for {UNIFORM,STORAGE}_BUFFER[_DYNAMIC] descriptor types.
+    pub buffer_info: Option<&'a DescriptorBufferInfo>,
+    /// Buffer view to write to the descriptor for {UNIFORM,STORAGE}_TEXEL_BUFFER descriptor types.
+    pub texel_buffer_view: Option<&'a BufferView>,
+}
+
+impl<'a> Default for WriteDescriptorSet<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::WriteDescriptorSet;
+        x
+    }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct CopyDescriptorSet {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    /// Source descriptor set
+    pub src_set: DescriptorSet,
+    /// Binding within the source descriptor set to copy from
+    pub src_binding: u32,
+    /// Array element within the source binding to copy from
+    pub src_array_element: u32,
+    /// Destination descriptor set
+    pub dst_set: DescriptorSet,
+    /// Binding within the destination descriptor set to copy to
+    pub dst_binding: u32,
+    /// Array element within the destination binding to copy to
+    pub dst_array_element: u32,
+    /// Number of descriptors to write (determines the size of the array pointed by pDescriptors)
+    pub descriptor_count: u32,
+}
+
+impl Default for CopyDescriptorSet {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::CopyDescriptorSet;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct BufferCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    /// Buffer creation flags
+    pub flags: BufferCreateFlags,
+    /// Specified in bytes
+    pub size: DeviceSize,
+    /// Buffer usage flags
+    pub usage: BufferUsageFlags,
+    pub sharing_mode: SharingMode,
+    pub queue_family_indices: VulkanSlice1<'a, u32, u32, 4>,
+}
+
+impl<'a> Default for BufferCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::BufferCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct BufferViewCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: BufferViewCreateFlags,
+    pub buffer: Buffer,
+    /// Optionally specifies format of elements
+    pub format: Format,
+    /// Specified in bytes
+    pub offset: DeviceSize,
+    /// View size specified in bytes
+    pub range: DeviceSize,
+}
+
+impl Default for BufferViewCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::BufferViewCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct ImageSubresource {
+    pub aspect_mask: ImageAspectFlags,
+    pub mip_level: u32,
+    pub array_layer: u32,
+}
+
+#[repr(C)]
+pub struct ImageSubresourceRange {
+    pub aspect_mask: ImageAspectFlags,
+    pub base_mip_level: u32,
+    pub level_count: u32,
+    pub base_array_layer: u32,
+    pub layer_count: u32,
+}
+
+impl Default for ImageSubresourceRange {
+    fn default() -> Self {
+        Self {
+            aspect_mask: Default::default(),
+            base_mip_level: Default::default(),
+            level_count: Default::default(),
+            base_array_layer: Default::default(),
+            layer_count: Default::default(),
+        }
+    }
+}
+
+#[repr(C)]
+pub struct FramebufferCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: FramebufferCreateFlags,
+    pub render_pass: RenderPass,
+    pub attachments: VulkanSlice1<'a, u32, ImageView, 4>,
+    pub width: u32,
+    pub height: u32,
+    pub layers: u32,
+}
+
+impl<'a> Default for FramebufferCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::FramebufferCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct FramebufferAttachmentImageInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    flags: ImageCreateFlags,
+    usage: ImageUsageFlags,
+    width: u32,
+    height: u32,
+    layer_count: u32,
+    view_formats: VulkanSlice1<'a, u32, Format, 0>,
+}
+
+impl<'a> Default for FramebufferAttachmentImageInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::FramebufferAttachmentImageInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct FramebufferAttachmentsCreateInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    attachment_image_infos: VulkanSlice1<'a, u32, FramebufferAttachmentImageInfo<'a>, 4>,
+}
+
+impl<'a> Default for FramebufferAttachmentsCreateInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::FramebufferAttachmentsCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct FenceCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: FenceCreateFlags,
+}
+
+impl Default for FenceCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::FenceCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SemaphoreCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: SemaphoreCreateFlags, // Semaphore creation flags
+}
+
+impl Default for SemaphoreCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SemaphoreCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SemaphoreTypeCreateInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub semaphore_type: SemaphoreType,
+    pub initial_value: u64,
+}
+
+impl Default for SemaphoreTypeCreateInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SemaphoreTypeCreateInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct TimelineSemaphoreSubmitInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub wait_semaphore_values: VulkanSlice1<'a, u32, u64, 4>,
+    pub signal_semaphore_values: VulkanSlice1<'a, u32, u64, 4>,
+}
+
+impl<'a> Default for TimelineSemaphoreSubmitInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::TimelineSemaphoreSubmitInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SemaphoreWaitInfo<'a> {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub flags: SemaphoreWaitFlags,
+    pub semaphores: VulkanSlice2<'a, u32, Semaphore, u64, 0>,
+}
+
+impl<'a> Default for SemaphoreWaitInfo<'a> {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SemaphoreWaitInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct SemaphoreSignalInfo {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    pub semaphore: Semaphore,
+    pub value: u64,
+}
+
+impl Default for SemaphoreSignalInfo {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::SemaphoreSignalInfo;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct MemoryBarrier {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    /// Memory accesses from the source of the dependency to synchronize
+    pub src_access_mask: AccessFlags,
+    /// Memory accesses from the destination of the dependency to synchronize
+    pub dst_access_mask: AccessFlags,
+}
+
+impl Default for MemoryBarrier {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::MemoryBarrier;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct BufferMemoryBarrier {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    /// Memory accesses from the source of the dependency to synchronize
+    pub src_access_mask: AccessFlags,
+    /// Memory accesses from the destination of the dependency to synchronize
+    pub dst_access_mask: AccessFlags,
+    /// Queue family to transition ownership from
+    pub src_queue_family_index: u32,
+    /// Queue family to transition ownership to
+    pub dst_queue_family_index: u32,
+    /// Buffer to sync
+    pub buffer: Buffer,
+    /// Offset within the buffer to sync
+    pub offset: DeviceSize,
+    /// Amount of bytes to sync
+    pub size: DeviceSize,
+}
+
+impl Default for BufferMemoryBarrier {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::BufferMemoryBarrier;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct ConformanceVersion {
+    pub major: u8,
+    pub minor: u8,
+    pub subminor: u8,
+    pub patch: u8,
+}
+
+#[repr(C)]
+pub struct ImageMemoryBarrier {
+    pub _type: StructureType,
+    pub _next: *const c_void,
+    /// Memory accesses from the source of the dependency to synchronize
+    pub src_access_mask: AccessFlags,
+    /// Memory accesses from the destination of the dependency to synchronize
+    pub dst_access_mask: AccessFlags,
+    /// Current layout of the image
+    pub old_layout: ImageLayout,
+    /// New layout to transition the image to
+    pub new_layout: ImageLayout,
+    /// Queue family to transition ownership from
+    pub src_queue_family_index: u32,
+    /// Queue family to transition ownership to
+    pub dst_queue_family_index: u32,
+    /// Image to sync
+    pub image: Image,
+    /// Subresource range to sync
+    pub subresource_range: ImageSubresourceRange,
+}
+
+impl Default for ImageMemoryBarrier {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::ImageMemoryBarrier;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceSparseProperties {
+    ///  Sparse resources support: GPU will access all 2D (single sample) sparse resources using the standard sparse image block shapes (based on pixel format)
+    pub residency_standard_2d_block_shape: Bool32,
+    ///  Sparse resources support: GPU will access all 2D (multisample) sparse resources using the standard sparse image block shapes (based on pixel format)
+    pub residency_standard_2d_multisample_block_shape: Bool32,
+    ///  Sparse resources support: GPU will access all 3D sparse resources using the standard sparse image block shapes (based on pixel format)
+    pub residency_standard_3d_block_shape: Bool32,
+    ///  Sparse resources support: Images with mip level dimensions that are NOT a multiple of the sparse image block dimensions will be placed in the mip tail
+    pub residency_aligned_mip_size: Bool32,
+    ///  Sparse resources support: GPU can consistently access non-resident regions of a resource, all reads return as if data is 0, writes are discarded
+    pub residency_non_resident_strict: Bool32,
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceLimits {
+    ///  max 1D image dimension
+    pub max_image_dimension_1d: u32,
+    ///  max 2D image dimension
+    pub max_image_dimension_2d: u32,
+    ///  max 3D image dimension
+    pub max_image_dimension_3d: u32,
+    ///  max cubemap image dimension
+    pub max_image_dimension_cube: u32,
+    ///  max layers for image arrays
+    pub max_image_array_layers: u32,
+    ///  max texel buffer size (fstexels)
+    pub max_texel_buffer_elements: u32,
+    ///  max uniform buffer range (bytes)
+    pub max_uniform_buffer_range: u32,
+    ///  max storage buffer range (bytes)
+    pub max_storage_buffer_range: u32,
+    ///  max size of the push constants pool (bytes)
+    pub max_push_constants_size: u32,
+
+    ///  max number of device memory allocations supported
+    pub max_memory_allocation_count: u32,
+    ///  max number of samplers that can be allocated on a device
+    pub max_sampler_allocation_count: u32,
+    ///  Granularity (in bytes) at which buffers and images can be bound to adjacent memory for simultaneous usage
+    pub buffer_image_granularity: DeviceSize,
+    ///  Total address space available for sparse allocations (bytes)
+    pub sparse_address_space_size: DeviceSize,
+
+    ///  max number of descriptors sets that can be bound to a pipeline
+    pub max_bound_descriptor_sets: u32,
+    ///  max number of samplers allowed per-stage in a descriptor set
+    pub max_per_stage_descriptor_samplers: u32,
+    ///  max number of uniform buffers allowed per-stage in a descriptor set
+    pub max_per_stage_descriptor_uniform_buffers: u32,
+    ///  max number of storage buffers allowed per-stage in a descriptor set
+    pub max_per_stage_descriptor_storage_buffers: u32,
+    ///  max number of sampled images allowed per-stage in a descriptor set
+    pub max_per_stage_descriptor_sampled_images: u32,
+    ///  max number of storage images allowed per-stage in a descriptor set
+    pub max_per_stage_descriptor_storage_images: u32,
+    ///  max number of input attachments allowed per-stage in a descriptor set
+    pub max_per_stage_descriptor_input_attachments: u32,
+    ///  max number of resources allowed by a single stage
+    pub max_per_stage_resources: u32,
+    ///  max number of samplers allowed in all stages in a descriptor set
+    pub max_descriptor_set_samplers: u32,
+    ///  max number of uniform buffers allowed in all stages in a descriptor set
+    pub max_descriptor_set_uniform_buffers: u32,
+    ///  max number of dynamic uniform buffers allowed in all stages in a descriptor set
+    pub max_descriptor_set_uniform_buffers_dynamic: u32,
+    ///  max number of storage buffers allowed in all stages in a descriptor set
+    pub max_descriptor_set_storage_buffers: u32,
+    ///  max number of dynamic storage buffers allowed in all stages in a descriptor set
+    pub max_descriptor_set_storage_buffers_dynamic: u32,
+    ///  max number of sampled images allowed in all stages in a descriptor set
+    pub max_descriptor_set_sampled_images: u32,
+    ///  max number of storage images allowed in all stages in a descriptor set
+    pub max_descriptor_set_storage_images: u32,
+    ///  max number of input attachments allowed in all stages in a descriptor set
+    pub max_descriptor_set_input_attachments: u32,
+
+    ///  max number of vertex input attribute slots
+    pub max_vertex_input_attributes: u32,
+    ///  max number of vertex input binding slots
+    pub max_vertex_input_bindings: u32,
+    ///  max vertex input attribute offset added to vertex buffer offset
+    pub max_vertex_input_attribute_offset: u32,
+    ///  max vertex input binding stride
+    pub max_vertex_input_binding_stride: u32,
+    ///  max number of output components written by vertex shader
+    pub max_vertex_output_components: u32,
+
+    ///  max level supported by tessellation primitive generator
+    pub max_tessellation_generation_level: u32,
+    ///  max patch size (vertices)
+    pub max_tessellation_patch_size: u32,
+    ///  max number of input components per-vertex in TCS
+    pub max_tessellation_control_per_vertex_input_components: u32,
+    ///  max number of output components per-vertex in TCS
+    pub max_tessellation_control_per_vertex_output_components: u32,
+    ///  max number of output components per-patch in TCS
+    pub max_tessellation_control_per_patch_output_components: u32,
+    ///  max total number of per-vertex and per-patch output components in TCS
+    pub max_tessellation_control_total_output_components: u32,
+    ///  tessellation evaluation stage limits
+
+    ///  max number of input components per vertex in TES
+    pub max_tessellation_evaluation_input_components: u32,
+    ///  max number of output components per vertex in TES
+    pub max_tessellation_evaluation_output_components: u32,
+
+    ///  max invocation count supported in geometry shader
+    pub max_geometry_shader_invocations: u32,
+    ///  max number of input components read in geometry stage
+    pub max_geometry_input_components: u32,
+    ///  max number of output components written in geometry stage
+    pub max_geometry_output_components: u32,
+    ///  max number of vertices that can be emitted in geometry stage
+    pub max_geometry_output_vertices: u32,
+    ///  max total number of components (all vertices) written in geometry stage
+    pub max_geometry_total_output_components: u32,
+
+    ///  max number of input components read in fragment stage
+    pub max_fragment_input_components: u32,
+    ///  max number of output attachments written in fragment stage
+    pub max_fragment_output_attachments: u32,
+    ///  max number of output attachments written when using dual source blending
+    pub max_fragment_dual_src_attachments: u32,
+    ///  max total number of storage buffers, storage images and output buffers
+    pub max_fragment_combined_output_resources: u32,
+
+    ///  max total storage size of work group local storage (bytes)
+    pub max_compute_shared_memory_size: u32,
+    ///  max num of compute work groups that may be dispatched by a single command (x,y,z)
+    pub max_compute_work_group_count: [u32; 3],
+    ///  max total compute invocations in a single local work group
+    pub max_compute_work_group_invocations: u32,
+    ///  max local size of a compute work group (x,y,z)
+    pub max_compute_work_group_size: [u32; 3],
+    ///  number bits of subpixel precision in screen x and y
+    pub sub_pixel_precision_bits: u32,
+    ///  number bits of precision for selecting texel weights
+    pub sub_texel_precision_bits: u32,
+    ///  number bits of precision for selecting mipmap weights
+    pub mipmap_precision_bits: u32,
+    ///  max index value for indexed draw calls (for 32-bit indices)
+    pub max_draw_indexed_index_value: u32,
+    ///  max draw count for indirect draw calls
+    pub max_draw_indirect_count: u32,
+    ///  max absolute sampler LOD bias
+    pub max_sampler_lod_bias: f32,
+    ///  max degree of sampler anisotropy
+    pub max_sampler_anisotropy: f32,
+    ///  max number of active viewports
+    pub max_viewports: u32,
+    ///  max viewport dimensions (x,y)
+    pub max_viewport_dimensions: [u32; 2],
+    ///  viewport bounds range (min,max)
+    pub viewport_bounds_range: [f32; 2],
+    ///  number bits of subpixel precision for viewport
+    pub viewport_sub_pixel_bits: u32,
+    ///  min required alignment of pointers returned by MapMemory (bytes)
+    pub min_memory_map_alignment: usize,
+    ///  min required alignment for texel buffer offsets (bytes)
+    pub min_texel_buffer_offset_alignment: DeviceSize,
+    ///  min required alignment for uniform buffer sizes and offsets (bytes)
+    pub min_uniform_buffer_offset_alignment: DeviceSize,
+    ///  min required alignment for storage buffer offsets (bytes)
+    pub min_storage_buffer_offset_alignment: DeviceSize,
+    ///  min texel offset for OpTextureSampleOffset
+    pub min_texel_offset: i32,
+    ///  max texel offset for OpTextureSampleOffset
+    pub max_texel_offset: u32,
+    ///  min texel offset for OpTextureGatherOffset
+    pub min_texel_gather_offset: i32,
+    ///  max texel offset for OpTextureGatherOffset
+    pub max_texel_gather_offset: u32,
+    ///  furthest negative offset for interpolateAtOffset
+    pub min_interpolation_offset: f32,
+    ///  furthest positive offset for interpolateAtOffset
+    pub max_interpolation_offset: f32,
+    ///  number of subpixel bits for interpolateAtOffset
+    pub sub_pixel_interpolation_offset_bits: u32,
+    ///  max width for a framebuffer
+    pub max_framebuffer_width: u32,
+    ///  max height for a framebuffer
+    pub max_framebuffer_height: u32,
+    ///  max layer count for a layered framebuffer
+    pub max_framebuffer_layers: u32,
+    ///  supported color sample counts for a framebuffer
+    pub framebuffer_color_sample_counts: SampleCountFlags,
+    ///  supported depth sample counts for a framebuffer
+    pub framebuffer_depth_sample_counts: SampleCountFlags,
+    ///  supported stencil sample counts for a framebuffer
+    pub framebuffer_stencil_sample_counts: SampleCountFlags,
+    ///  supported sample counts for a subpass which uses no attachments
+    pub framebuffer_no_attachments_sample_counts: SampleCountFlags,
+    ///  max number of color attachments per subpass
+    pub max_color_attachments: u32,
+    ///  supported color sample counts for a non-integer sampled image
+    pub sampled_image_color_sample_counts: SampleCountFlags,
+    ///  supported sample counts for an integer image
+    pub sampled_image_integer_sample_counts: SampleCountFlags,
+    ///  supported depth sample counts for a sampled image
+    pub sampled_image_depth_sample_counts: SampleCountFlags,
+    ///  supported stencil sample counts for a sampled image
+    pub sampled_image_stencil_sample_counts: SampleCountFlags,
+    ///  supported sample counts for a storage image
+    pub storage_image_sample_counts: SampleCountFlags,
+    ///  max number of sample mask words
+    pub max_sample_mask_words: u32,
+    ///  timestamps on graphics and compute queues
+    pub timestamp_compute_and_graphics: Bool32,
+    ///  number of nanoseconds it takes for timestamp query value to increment by 1
+    pub timestamp_period: f32,
+    ///  max number of clip distances
+    pub max_clip_distances: u32,
+    ///  max number of cull distances
+    pub max_cull_distances: u32,
+    ///  max combined number of user clipping
+    pub max_combined_clip_and_cull_distances: u32,
+    ///  distinct queue priorities available
+    pub discrete_queue_priorities: u32,
+    ///  range (min,max) of supported point sizes
+    pub point_size_range: [f32; 2],
+    ///  range (min,max) of supported line widths
+    pub line_width_range: [f32; 2],
+    ///  granularity of supported point sizes
+    pub point_size_granularity: f32,
+    ///  granularity of supported line widths
+    pub line_width_granularity: f32,
+    ///  line rasterization follows preferred rules
+    pub strict_lines: Bool32,
+    ///  supports standard sample locations for all supported sample counts
+    pub standard_sample_locations: Bool32,
+    ///  optimal offset of buffer copies
+    pub optimal_buffer_copy_offset_alignment: DeviceSize,
+    ///  optimal pitch of buffer copies
+    pub optimal_buffer_copy_row_pitch_alignment: DeviceSize,
+    ///  minimum size and alignment for non-coherent host-mapped device memory access
+    pub non_coherent_atom_size: DeviceSize,
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceFeatures {
+    /// out of bounds buffer accesses are well defined
+    pub robust_buffer_access: Bool32,
+    /// full 32-bit range of indices for indexed draw calls
+    pub full_draw_index_uint32: Bool32,
+    /// image views which are arrays of cube maps
+    pub image_cube_array: Bool32,
+    /// blending operations are controlled per-attachment
+    pub independent_blend: Bool32,
+    /// geometry stage
+    pub geometry_shader: Bool32,
+    /// tessellation control and evaluation stage
+    pub tessellation_shader: Bool32,
+    /// per-sample shading and interpolation
+    pub sample_rate_shading: Bool32,
+    /// blend operations which take two sources
+    pub dual_src_blend: Bool32,
+    /// logic operations
+    pub logic_op: Bool32,
+    /// multi draw indirect
+    pub multi_draw_indirect: Bool32,
+    /// indirect draws can use non-zero firstInstance
+    pub draw_indirect_first_instance: Bool32,
+    /// depth clamping
+    pub depth_clamp: Bool32,
+    /// depth bias clamping
+    pub depth_bias_clamp: Bool32,
+    /// point and wireframe fill modes
+    pub fill_mode_non_solid: Bool32,
+    /// depth bounds test
+    pub depth_bounds: Bool32,
+    /// lines with width greater than 1
+    pub wide_lines: Bool32,
+    /// points with size greater than 1
+    pub large_points: Bool32,
+    /// the fragment alpha component can be forced to maximum representable alpha value
+    pub alpha_to_one: Bool32,
+    /// viewport arrays
+    pub multi_viewport: Bool32,
+    /// anisotropic sampler filtering
+    pub sampler_anisotropy: Bool32,
+    /// ETC texture compression formats
+    pub texture_compression_etc2: Bool32,
+    /// ASTC LDR texture compression formats
+    pub texture_compression_astc_ldr: Bool32,
+    /// BC1-7 texture compressed formats
+    pub texture_compression_bc: Bool32,
+    /// precise occlusion queries returning actual sample counts
+    pub occlusion_query_precise: Bool32,
+    /// pipeline statistics query
+    pub pipeline_statistics_query: Bool32,
+    /// stores and atomic ops on storage buffers and images are supported in vertex, tessellation, and geometry stages
+    pub vertex_pipeline_stores_and_atomics: Bool32,
+    /// stores and atomic ops on storage buffers and images are supported in the fragment stage
+    pub fragment_stores_and_atomics: Bool32,
+    /// tessellation and geometry stages can export point size
+    pub shader_tessellation_and_geometry_point_size: Bool32,
+    /// image gather with run-time values and independent offsets
+    pub shader_image_gather_extended: Bool32,
+    /// the extended set of formats can be used for storage images
+    pub shader_storage_image_extended_formats: Bool32,
+    /// multisample images can be used for storage images
+    pub shader_storage_image_multisample: Bool32,
+    /// read from storage image does not require format qualifier
+    pub shader_storage_image_read_without_format: Bool32,
+    /// write to storage image does not require format qualifier
+    pub shader_storage_image_write_without_format: Bool32,
+    /// arrays of uniform buffers can be accessed with dynamically uniform indices
+    pub shader_uniform_buffer_array_dynamic_indexing: Bool32,
+    /// arrays of sampled images can be accessed with dynamically uniform indices
+    pub shader_sampled_image_array_dynamic_indexing: Bool32,
+    /// arrays of storage buffers can be accessed with dynamically uniform indices
+    pub shader_storage_buffer_array_dynamic_indexing: Bool32,
+    /// arrays of storage images can be accessed with dynamically uniform indices
+    pub shader_storage_image_array_dynamic_indexing: Bool32,
+    /// clip distance in shaders
+    pub shader_clip_distance: Bool32,
+    /// cull distance in shaders
+    pub shader_cull_distance: Bool32,
+    /// 64-bit floats (doubles) in shaders
+    pub shader_float64: Bool32,
+    /// 64-bit integers in shaders
+    pub shader_int64: Bool32,
+    /// 16-bit integers in shaders
+    pub shader_int16: Bool32,
+    /// shader can use texture operations that return resource residency information (requires sparseNonResident support)
+    pub shader_resource_residency: Bool32,
+    /// shader can use texture operations that specify minimum resource LOD
+    pub shader_resource_min_lod: Bool32,
+    /// Sparse resources support: Resource memory can be managed at opaque page level rather than object level
+    pub sparse_binding: Bool32,
+    /// Sparse resources support: GPU can access partially resident buffers
+    pub sparse_residency_buffer: Bool32,
+    /// Sparse resources support: GPU can access partially resident 2D (non-MSAA non-depth/stencil) images
+    pub sparse_residency_image_2d: Bool32,
+    /// Sparse resources support: GPU can access partially resident 3D images
+    pub sparse_residency_image_3d: Bool32,
+    /// Sparse resources support: GPU can access partially resident MSAA 2D images with 2 samples
+    pub sparse_residency_2_samples: Bool32,
+    /// Sparse resources support: GPU can access partially resident MSAA 2D images with 4 samples
+    pub sparse_residency_4_samples: Bool32,
+    /// Sparse resources support: GPU can access partially resident MSAA 2D images with 8 samples
+    pub sparse_residency_8_samples: Bool32,
+    /// Sparse resources support: GPU can access partially resident MSAA 2D images with 16 samples
+    pub sparse_residency_16_samples: Bool32,
+    /// Sparse resources support: GPU can correctly access data aliased into multiple locations (opt-in)
+    pub sparse_residency_aliased: Bool32,
+    /// multisample rate must be the same for all pipelines in a subpass
+    pub variable_multisample_rate: Bool32,
+    /// Queries may be inherited from primary to secondary command buffers
+    pub inherited_queries: Bool32,
+}
+
+impl Default for PhysicalDeviceFeatures {
+    fn default() -> Self {
+        unsafe { MaybeUninit::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceFeatures2 {
+    pub _type: StructureType,
+    pub _next: *mut c_void,
+    pub features: PhysicalDeviceFeatures,
+}
+
+impl Default for PhysicalDeviceFeatures2 {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PhysicalDeviceFeatures2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceVulkan11Features {
+    pub _type: StructureType,
+    pub _next: *mut c_void,
+    pub storage_buffer_16bit_access: Bool32,
+    pub uniform_and_storage_buffer_16bit_access: Bool32,
+    pub storage_push_constant16: Bool32,
+    pub storage_input_output16: Bool32,
+    pub multiview: Bool32,
+    pub multiview_geometry_shader: Bool32,
+    pub multiview_tessellation_shader: Bool32,
+    pub variable_pointers_storage_buffer: Bool32,
+    pub variable_pointers: Bool32,
+    pub protected_memory: Bool32,
+    pub sampler_ycbcr_conversion: Bool32,
+    pub shader_draw_parameters: Bool32,
+}
+
+impl Default for PhysicalDeviceVulkan11Features {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PhysicalDeviceVulkan11Features;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceVulkan12Features {
+    pub _type: StructureType,
+    pub _next: *mut c_void,
+    pub sampler_mirror_clamp_to_edge: Bool32,
+    pub draw_indirect_count: Bool32,
+    pub storage_buffer_8bit_access: Bool32,
+    pub uniform_and_storage_buffer_8bit_access: Bool32,
+    pub storage_push_constant8: Bool32,
+    pub shader_buffer_int64_atomics: Bool32,
+    pub shader_shared_int64_atomics: Bool32,
+    pub shader_float16: Bool32,
+    pub shader_int8: Bool32,
+    pub descriptor_indexing: Bool32,
+    pub shader_input_attachment_array_dynamic_indexing: Bool32,
+    pub shader_uniform_texel_buffer_array_dynamic_indexing: Bool32,
+    pub shader_storage_texel_buffer_array_dynamic_indexing: Bool32,
+    pub shader_uniform_buffer_array_non_uniform_indexing: Bool32,
+    pub shader_sampled_image_array_non_uniform_indexing: Bool32,
+    pub shader_storage_buffer_array_non_uniform_indexing: Bool32,
+    pub shader_storage_image_array_non_uniform_indexing: Bool32,
+    pub shader_input_attachment_array_non_uniform_indexing: Bool32,
+    pub shader_uniform_texel_buffer_array_non_uniform_indexing: Bool32,
+    pub shader_storage_texel_buffer_array_non_uniform_indexing: Bool32,
+    pub descriptor_binding_uniform_buffer_update_after_bind: Bool32,
+    pub descriptor_binding_sampled_image_update_after_bind: Bool32,
+    pub descriptor_binding_storage_image_update_after_bind: Bool32,
+    pub descriptor_binding_storage_buffer_update_after_bind: Bool32,
+    pub descriptor_binding_uniform_texel_buffer_update_after_bind: Bool32,
+    pub descriptor_binding_storage_texel_buffer_update_after_bind: Bool32,
+    pub descriptor_binding_update_unused_while_pending: Bool32,
+    pub descriptor_binding_partially_bound: Bool32,
+    pub descriptor_binding_variable_descriptor_count: Bool32,
+    pub runtime_descriptor_array: Bool32,
+    pub sampler_filter_minmax: Bool32,
+    pub scalar_block_layout: Bool32,
+    pub imageless_framebuffer: Bool32,
+    pub uniform_buffer_standard_layout: Bool32,
+    pub shader_subgroup_extended_types: Bool32,
+    pub separate_depth_stencil_layouts: Bool32,
+    pub host_query_reset: Bool32,
+    pub timeline_semaphore: Bool32,
+    pub buffer_device_address: Bool32,
+    pub buffer_device_address_capture_replay: Bool32,
+    pub buffer_device_address_multi_device: Bool32,
+    pub vulkan_memory_model: Bool32,
+    pub vulkan_memory_model_device_scope: Bool32,
+    pub vulkan_memory_model_availability_visibility_chains: Bool32,
+    pub shader_output_viewport_index: Bool32,
+    pub shader_output_layer: Bool32,
+    pub subgroup_broadcast_dynamic_id: Bool32,
+}
+
+impl Default for PhysicalDeviceVulkan12Features {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PhysicalDeviceVulkan12Features;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceVulkan13Features {
+    pub _type: StructureType,
+    pub _next: *mut c_void,
+    pub robust_image_access: Bool32,
+    pub inline_uniform_block: Bool32,
+    pub descriptor_binding_inline_uniform_block_update_after_bind: Bool32,
+    pub pipeline_creation_cache_control: Bool32,
+    pub private_data: Bool32,
+    pub shader_demote_to_helper_invocation: Bool32,
+    pub shader_terminate_invocation: Bool32,
+    pub subgroup_size_control: Bool32,
+    pub compute_full_subgroups: Bool32,
+    pub synchronization2: Bool32,
+    pub texture_compression_astc_hdr: Bool32,
+    pub shader_zero_initialize_workgroup_memory: Bool32,
+    pub dynamic_rendering: Bool32,
+    pub shader_integer_dot_product: Bool32,
+    pub maintenance4: Bool32,
+}
+
+impl Default for PhysicalDeviceVulkan13Features {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PhysicalDeviceVulkan13Features;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceProperties {
+    pub api_version: u32,
+    pub driver_version: u32,
+    pub vendor_id: u32,
+    pub device_id: u32,
+    pub device_type: PhysicalDeviceType,
+    pub device_name: [c_char; MAX_PHYSICAL_DEVICE_NAME_SIZE as usize],
+    pub pipeline_cache_uuid: [u8; UUID_SIZE as usize],
+    pub limits: PhysicalDeviceLimits,
+    pub sparse_properties: PhysicalDeviceSparseProperties,
+}
+
+impl Default for PhysicalDeviceProperties {
+    fn default() -> Self {
+        unsafe { MaybeUninit::zeroed().assume_init() }
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceProperties2 {
+    pub _type: StructureType,
+    pub _next: *mut c_void,
+    pub properties: PhysicalDeviceProperties,
+}
+
+impl Default for PhysicalDeviceProperties2 {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PhysicalDeviceProperties2;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceVulkan11Properties {
+    pub _type: StructureType,
+    pub _next: *mut c_void,
+    pub device_uuid: [u8; UUID_SIZE as usize],
+    pub driver_uuid: [u8; UUID_SIZE as usize],
+    pub device_luid: [u8; LUID_SIZE as usize],
+    pub device_node_mask: u32,
+    pub device_luid_valid: Bool32,
+    pub subgroup_size: u32,
+    pub subgroup_supported_stages: ShaderStageFlags,
+    pub subgroup_supported_operations: SubgroupFeatureFlags,
+    pub subgroup_quad_operations_in_all_stages: Bool32,
+    pub point_clipping_behavior: PointClippingBehavior,
+    pub max_multiview_view_count: u32,
+    pub max_multiview_instance_index: u32,
+    pub protected_no_fault: Bool32,
+    pub max_per_set_descriptors: u32,
+    pub max_memory_allocation_size: DeviceSize,
+}
+
+impl Default for PhysicalDeviceVulkan11Properties {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PhysicalDeviceVulkan11Properties;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceVulkan12Properties {
+    pub _type: StructureType,
+    pub _next: *mut c_void,
+    pub driver_id: DriverId,
+    pub driver_name: [u8; MAX_DRIVER_NAME_SIZE as usize],
+    pub driver_info: [u8; MAX_DRIVER_INFO_SIZE as usize],
+    pub conformance_version: ConformanceVersion,
+    pub denorm_behavior_independence: ShaderFloatControlsIndependence,
+    pub rounding_mode_independence: ShaderFloatControlsIndependence,
+    pub shader_signed_zero_inf_nan_preserve_float16: Bool32,
+    pub shader_signed_zero_inf_nan_preserve_float32: Bool32,
+    pub shader_signed_zero_inf_nan_preserve_float64: Bool32,
+    pub shader_denorm_preserve_float16: Bool32,
+    pub shader_denorm_preserve_float32: Bool32,
+    pub shader_denorm_preserve_float64: Bool32,
+    pub shader_denorm_flush_to_zero_float16: Bool32,
+    pub shader_denorm_flush_to_zero_float32: Bool32,
+    pub shader_denorm_flush_to_zero_float64: Bool32,
+    pub shader_rounding_mode_rte_float16: Bool32,
+    pub shader_rounding_mode_rte_float32: Bool32,
+    pub shader_rounding_mode_rte_float64: Bool32,
+    pub shader_rounding_mode_rtz_float16: Bool32,
+    pub shader_rounding_mode_rtz_float32: Bool32,
+    pub shader_rounding_mode_rtz_float64: Bool32,
+    pub max_update_after_bind_descriptors_in_all_pools: u32,
+    pub shader_uniform_buffer_array_non_uniform_indexing_native: Bool32,
+    pub shader_sampled_image_array_non_uniform_indexing_native: Bool32,
+    pub shader_storage_buffer_array_non_uniform_indexing_native: Bool32,
+    pub shader_storage_image_array_non_uniform_indexing_native: Bool32,
+    pub shader_input_attachment_array_non_uniform_indexing_native: Bool32,
+    pub robust_buffer_access_update_after_bind: Bool32,
+    pub quad_divergent_implicit_lod: Bool32,
+    pub max_per_stage_descriptor_update_after_bind_samplers: u32,
+    pub max_per_stage_descriptor_update_after_bind_uniform_buffers: u32,
+    pub max_per_stage_descriptor_update_after_bind_storage_buffers: u32,
+    pub max_per_stage_descriptor_update_after_bind_sampled_images: u32,
+    pub max_per_stage_descriptor_update_after_bind_storage_images: u32,
+    pub max_per_stage_descriptor_update_after_bind_input_attachments: u32,
+    pub max_per_stage_update_after_bind_resources: u32,
+    pub max_descriptor_set_update_after_bind_samplers: u32,
+    pub max_descriptor_set_update_after_bind_uniform_buffers: u32,
+    pub max_descriptor_set_update_after_bind_uniform_buffers_dynamic: u32,
+    pub max_descriptor_set_update_after_bind_storage_buffers: u32,
+    pub max_descriptor_set_update_after_bind_storage_buffers_dynamic: u32,
+    pub max_descriptor_set_update_after_bind_sampled_images: u32,
+    pub max_descriptor_set_update_after_bind_storage_images: u32,
+    pub max_descriptor_set_update_after_bind_input_attachments: u32,
+    pub supported_depth_resolve_modes: ResolveModeFlags,
+    pub supported_stencil_resolve_modes: ResolveModeFlags,
+    pub independent_resolve_none: Bool32,
+    pub independent_resolve: Bool32,
+    pub filter_minmax_single_component_formats: Bool32,
+    pub filter_minmax_image_component_mapping: Bool32,
+    pub max_timeline_semaphore_value_difference: u64,
+    pub framebuffer_integer_color_sample_counts: SampleCountFlags,
+}
+
+impl Default for PhysicalDeviceVulkan12Properties {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PhysicalDeviceVulkan12Properties;
+        x
+    }
+}
+
+#[repr(C)]
+pub struct PhysicalDeviceVulkan13Properties {
+    pub _type: StructureType,
+    pub _next: *mut c_void,
+    pub min_subgroup_size: u32,
+    pub max_subgroup_size: u32,
+    pub max_compute_workgroup_subgroups: u32,
+    pub required_subgroup_size_stages: ShaderStageFlags,
+    pub max_inline_uniform_block_size: u32,
+    pub max_per_stage_descriptor_inline_uniform_blocks: u32,
+    pub max_per_stage_descriptor_update_after_bind_inline_uniform_blocks: u32,
+    pub max_descriptor_set_inline_uniform_blocks: u32,
+    pub max_descriptor_set_update_after_bind_inline_uniform_blocks: u32,
+    pub max_inline_uniform_total_size: u32,
+    pub integer_dot_product_8bit_unsigned_accelerated: Bool32,
+    pub integer_dot_product_8bit_signed_accelerated: Bool32,
+    pub integer_dot_product_8bit_mixed_signedness_accelerated: Bool32,
+    pub integer_dot_product_4x8bit_packed_unsigned_accelerated: Bool32,
+    pub integer_dot_product_4x8bit_packed_signed_accelerated: Bool32,
+    pub integer_dot_product_4x8bit_packed_mixed_signedness_accelerated: Bool32,
+    pub integer_dot_product_16bit_unsigned_accelerated: Bool32,
+    pub integer_dot_product_16bit_signed_accelerated: Bool32,
+    pub integer_dot_product_16bit_mixed_signedness_accelerated: Bool32,
+    pub integer_dot_product_32bit_unsigned_accelerated: Bool32,
+    pub integer_dot_product_32bit_signed_accelerated: Bool32,
+    pub integer_dot_product_32bit_mixed_signedness_accelerated: Bool32,
+    pub integer_dot_product_64bit_unsigned_accelerated: Bool32,
+    pub integer_dot_product_64bit_signed_accelerated: Bool32,
+    pub integer_dot_product_64bit_mixed_signedness_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_8bit_unsigned_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_8bit_signed_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_8bit_mixed_signedness_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_4x8bit_packed_unsigned_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_4x8bit_packed_signed_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_4x8bit_packed_mixed_signedness_accelerated:
+        Bool32,
+    pub integer_dot_product_accumulating_saturating_16bit_unsigned_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_16bit_signed_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_16bit_mixed_signedness_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_32bit_unsigned_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_32bit_signed_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_32bit_mixed_signedness_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_64bit_unsigned_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_64bit_signed_accelerated: Bool32,
+    pub integer_dot_product_accumulating_saturating_64bit_mixed_signedness_accelerated: Bool32,
+    pub storage_texel_buffer_offset_alignment_bytes: DeviceSize,
+    pub storage_texel_buffer_offset_single_texel_alignment: Bool32,
+    pub uniform_texel_buffer_offset_alignment_bytes: DeviceSize,
+    pub uniform_texel_buffer_offset_single_texel_alignment: Bool32,
+    pub max_buffer_size: DeviceSize,
+}
+
+impl Default for PhysicalDeviceVulkan13Properties {
+    fn default() -> Self {
+        let mut x = unsafe { MaybeUninit::<Self>::zeroed().assume_init() };
+        x._type = StructureType::PhysicalDeviceVulkan13Properties;
+        x
+    }
+}