diff --git a/Cargo.toml b/Cargo.toml index 9d4a58c..017fdee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,3 +10,18 @@ keywords = ["header", "heap", "vec", "vector", "graph"] categories = ["no-std"] license = "MIT" readme = "README.md" + +[features] +default = ["std", "atomic_append"] +std = [] +atomic_append = [] + +[dev-dependencies] +xmacro = "0.2.0" + +# include debug info for flamgraph and other profiling tools +[profile.bench] +debug = true + +[dependencies] +mutants = "0.0.3" diff --git a/README.md b/README.md index 909497e..da6227e 100644 --- a/README.md +++ b/README.md @@ -3,3 +3,13 @@ Allows one to store a header struct and a vector all inline in the same memory on the heap and share weak versions for minimizing random lookups in data structures If you use this without creating a weak ptr, it is safe. It is unsafe to create a weak pointer because you now have aliasing. + +## Features + +* `std` + Enables API's that requires stdlib features. Provides more compatibility to `Vec`. + This feature is enabled by default. +* `atomic_append` + Enables the `atomic_push` API's that allows extending a `HeaderVec` with interior + mutability from a single thread by a immutable handle. + This feature is enabled by default. diff --git a/benches/compare_std_vec.rs b/benches/compare_std_vec.rs index 0d84314..b4b11a5 100644 --- a/benches/compare_std_vec.rs +++ b/benches/compare_std_vec.rs @@ -2,6 +2,7 @@ extern crate std; extern crate test; +use xmacro::xmacro; use header_vec::*; use test::Bencher; @@ -126,3 +127,135 @@ fn test_regular_vec_create(b: &mut Bencher) { // acc // }); // } + +xmacro! { + $[ + benchfunc: type: + hv_create_bench (HeaderVec::<(), _>) + vec_create_bench (Vec) + ] + + fn $benchfunc(b: &mut Bencher, init: &[T]) + where + T: Clone + Default, + { + b.iter(|| { + let v = $type::from(init); + v + }); + } +} + +xmacro! { + // benching construction times. + $[ + bench: init: + small [123; 1] + middle [123; 1000] + large [[123;32]; 100000] + ] + + #[bench] + fn $+bench_hv_create_$bench(b: &mut Bencher) { + hv_create_bench(b, &$init); + } + + #[bench] + fn $+bench_vec_create_$bench(b: &mut Bencher) { + vec_create_bench(b, &$init); + } +} + +#[cfg(feature = "std")] +mod stdbench { + use super::*; + use std::ops::RangeBounds; + + xmacro! { + $[ + benchfunc: type: + hv_drain_bench (HeaderVec::<(), _>) + vec_drain_bench (Vec) + ] + + fn $benchfunc(b: &mut Bencher, init: &[T], range: R) + where + T: Clone + Default, + R: RangeBounds + Clone, + { + b.iter(|| { + let mut v = $type::from(init); + v.drain(range.clone()); + v + }); + } + } + + xmacro! { + $[ + bench: init: range: + begin [123; 10000] (..5000) + middle [123; 10000] (1000..5000) + end [123; 10000] (1000..) + ] + + #[bench] + fn $+bench_hv_drain_$bench(b: &mut Bencher) { + hv_drain_bench(b, &$init, $range); + } + + #[bench] + fn $+bench_vec_drain_$bench(b: &mut Bencher) { + vec_drain_bench(b, &$init, $range); + } + } + + xmacro! { + $[ + benchfunc: type: + hv_splice_bench (HeaderVec::<(), _>) + vec_splice_bench (Vec) + ] + + fn $benchfunc(b: &mut Bencher, init: &[T], range: R, replace_with: I) + where + T: Clone, + R: RangeBounds + Clone, + I: IntoIterator + Clone, + { + b.iter(|| { + let mut v = $type::from(init); + v.splice(range.clone(), replace_with.clone()); + v + }); + } + } + + xmacro! { + $[ + bench: init: range: replace_with: + nop [123; 10000] (0..0) [] + insert [123; 10000] (1000..1000) [123; 5000] + insert_big [[123;64]; 10000] (1000..1000) [[123; 64]; 5000] + remove [123; 10000] (1000..6000) [] + middle_shorter [123; 10000] (4000..5000) [234; 500] + middle_longer [123; 10000] (4000..5000) [345; 2000] + middle_same [123; 10000] (4000..5000) [456; 1000] + end_shorter [123; 10000] (9000..) [234; 500] + end_longer [123; 10000] (9000..) [345; 2000] + end_same [123; 10000] (9000..) [456; 1000] + append_big [[123;64]; 10000] (10000..) [[456; 64]; 5000] + append_front_big [[123;64]; 100000] (0..0) [[456; 64]; 1] + ] + + #[bench] + fn $+bench_hv_splice_$bench(b: &mut Bencher) { + hv_splice_bench(b, &$init, $range, $replace_with) + } + + #[bench] + fn $+bench_vec_splice_$bench(b: &mut Bencher) { + vec_splice_bench(b, &$init, $range, $replace_with) + } + } +} diff --git a/src/drain.rs b/src/drain.rs new file mode 100644 index 0000000..8a720e6 --- /dev/null +++ b/src/drain.rs @@ -0,0 +1,226 @@ +#![cfg(feature = "std")] + +use core::{ + any::type_name, + fmt, + mem::{self}, + ptr::{self, NonNull}, +}; + +use std::{iter::FusedIterator, mem::ManuallyDrop, slice}; + +use crate::HeaderVec; + +/// A draining iterator for `HeaderVec`. +/// +/// This `struct` is created by [`HeaderVec::drain`]. +/// See its documentation for more. +/// +/// # Feature compatibility +/// +/// The `drain()` API and [`Drain`] iterator are only available when the `std` feature is +/// enabled. +/// +/// # Example +/// +/// ``` +/// # use header_vec::HeaderVec; +/// let mut hv: HeaderVec<(), _> = HeaderVec::from([0, 1, 2]); +/// let iter: header_vec::Drain<'_, _, _> = hv.drain(..); +/// ``` +pub struct Drain<'a, H, T> { + /// Index of tail to preserve + pub(super) tail_start: usize, + /// End index of tail to preserve + pub(super) tail_len: usize, + /// Current remaining range to remove + pub(super) iter: slice::Iter<'a, T>, + pub(super) vec: NonNull>, +} + +impl fmt::Debug for Drain<'_, H, T> { + #[mutants::skip] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct(&format!( + "Drain<{}, {}>", + type_name::(), + type_name::() + )) + .field("header", unsafe { self.vec.as_ref() }) + .field("iter", &self.iter.as_slice()) + .finish() + } +} + +impl Drain<'_, H, T> { + /// Returns the remaining items of this iterator as a slice. + /// + /// # Examples + /// + /// ``` + /// # use header_vec::HeaderVec; + /// let mut hv: HeaderVec<(), _> = HeaderVec::from(['a', 'b', 'c']); + /// let mut drain = hv.drain(..); + /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']); + /// let _ = drain.next().unwrap(); + /// assert_eq!(drain.as_slice(), &['b', 'c']); + /// ``` + #[must_use] + pub fn as_slice(&self) -> &[T] { + self.iter.as_slice() + } + + /// Keep unyielded elements in the source `HeaderVec`. + /// + /// # Examples + /// + /// ``` + /// # use header_vec::HeaderVec; + /// let mut hv: HeaderVec<(), _> = HeaderVec::from(['a', 'b', 'c']); + /// let mut drain = hv.drain(..); + /// + /// assert_eq!(drain.next().unwrap(), 'a'); + /// + /// // This call keeps 'b' and 'c' in the vec. + /// drain.keep_rest(); + /// + /// // If we wouldn't call `keep_rest()`, + /// // `hv` would be empty. + /// assert_eq!(hv.as_slice(), ['b', 'c']); + /// ``` + pub fn keep_rest(self) { + let mut this = ManuallyDrop::new(self); + + unsafe { + let source_vec = this.vec.as_mut(); + + let start = source_vec.len(); + let tail = this.tail_start; + + let unyielded_len = this.iter.len(); + let unyielded_ptr = this.iter.as_slice().as_ptr(); + + let start_ptr = source_vec.as_mut_ptr().add(start); + + // memmove back unyielded elements + if unyielded_ptr != start_ptr { + let src = unyielded_ptr; + let dst = start_ptr; + + ptr::copy(src, dst, unyielded_len); + } + + // memmove back untouched tail + if tail != (start + unyielded_len) { + let src = source_vec.as_ptr().add(tail); + let dst = start_ptr.add(unyielded_len); + ptr::copy(src, dst, this.tail_len); + } + + source_vec.set_len(start + unyielded_len + this.tail_len); + } + } +} + +impl AsRef<[T]> for Drain<'_, H, T> { + fn as_ref(&self) -> &[T] { + self.as_slice() + } +} + +unsafe impl Sync for Drain<'_, H, T> {} +unsafe impl Send for Drain<'_, H, T> {} + +impl Iterator for Drain<'_, H, T> { + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + self.iter + .next() + .map(|elt| unsafe { ptr::read(elt as *const _) }) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl DoubleEndedIterator for Drain<'_, H, T> { + #[inline] + fn next_back(&mut self) -> Option { + self.iter + .next_back() + .map(|elt| unsafe { ptr::read(elt as *const _) }) + } +} + +impl Drop for Drain<'_, H, T> { + fn drop(&mut self) { + /// Moves back the un-`Drain`ed elements to restore the original `Vec`. + struct DropGuard<'r, 'a, H, T>(&'r mut Drain<'a, H, T>); + + impl Drop for DropGuard<'_, '_, H, T> { + fn drop(&mut self) { + if self.0.tail_len > 0 { + unsafe { + let source_vec = self.0.vec.as_mut(); + // memmove back untouched tail, update to new length + let start = source_vec.len(); + let tail = self.0.tail_start; + if tail != start { + let src = source_vec.as_ptr().add(tail); + let dst = source_vec.as_mut_ptr().add(start); + ptr::copy(src, dst, self.0.tail_len); + } + source_vec.set_len(start + self.0.tail_len); + } + } + } + } + + let iter = mem::take(&mut self.iter); + let drop_len = iter.len(); + + let mut vec = self.vec; + + // ensure elements are moved back into their appropriate places, even when drop_in_place panics + let _guard = DropGuard(self); + + if drop_len == 0 { + return; + } + + // as_slice() must only be called when iter.len() is > 0 because + // it also gets touched by vec::Splice which may turn it into a dangling pointer + // which would make it and the vec pointer point to different allocations which would + // lead to invalid pointer arithmetic below. + let drop_ptr = iter.as_slice().as_ptr(); + + unsafe { + // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place + // a pointer with mutable provenance is necessary. Therefore we must reconstruct + // it from the original vec but also avoid creating a &mut to the front since that could + // invalidate raw pointers to it which some unsafe code might rely on. + let vec_ptr = vec.as_mut().as_mut_ptr(); + + // PLANNED: let drop_offset = drop_ptr.sub_ptr(vec_ptr); is in nightly + let drop_offset = usize::try_from(drop_ptr.offset_from(vec_ptr)).unwrap_unchecked(); + let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len); + ptr::drop_in_place(to_drop); + } + } +} + +impl FusedIterator for Drain<'_, H, T> {} + +// PLANNED: unstable features +// impl ExactSizeIterator for Drain<'_, H, T> { +// fn is_empty(&self) -> bool { +// self.iter.is_empty() +// } +// } +// +// #[unstable(feature = "trusted_len", issue = "37572")] +// unsafe impl TrustedLen for Drain<'_, H, T> {} +// diff --git a/src/future_slice.rs b/src/future_slice.rs new file mode 100644 index 0000000..b6311a0 --- /dev/null +++ b/src/future_slice.rs @@ -0,0 +1,61 @@ +//! This module re-implements a unstable slice functions, these should be removed once they +//! are stabilized. These is copy-pasted with slight modifications from std::slice for +//! functions that do not need language magic. + +use std::ops; + +#[track_caller] +#[must_use] +pub(crate) fn range(range: R, bounds: ops::RangeTo) -> ops::Range +where + R: ops::RangeBounds, +{ + let len = bounds.end; + + let start = match range.start_bound() { + ops::Bound::Included(&start) => start, + ops::Bound::Excluded(start) => start + .checked_add(1) + .unwrap_or_else(|| slice_start_index_overflow_fail()), + ops::Bound::Unbounded => 0, + }; + + let end = match range.end_bound() { + ops::Bound::Included(end) => end + .checked_add(1) + .unwrap_or_else(|| slice_end_index_overflow_fail()), + ops::Bound::Excluded(&end) => end, + ops::Bound::Unbounded => len, + }; + + if start > end { + slice_index_order_fail(start, end); + } + if end > len { + slice_end_index_len_fail(end, len); + } + + ops::Range { start, end } +} + +#[track_caller] +const fn slice_start_index_overflow_fail() -> ! { + panic!("attempted to index slice from after maximum usize"); +} + +#[track_caller] +const fn slice_end_index_overflow_fail() -> ! { + panic!("attempted to index slice up to maximum usize"); +} + +#[track_caller] +fn slice_index_order_fail(index: usize, end: usize) -> ! { + panic!("slice index start is larger than end, slice index starts at {index} but ends at {end}") +} + +#[track_caller] +fn slice_end_index_len_fail(index: usize, len: usize) -> ! { + panic!( + "slice end index is out of range for slice, range end index {index} out of range for slice of length {len}" + ) +} diff --git a/src/lib.rs b/src/lib.rs index 56968dd..7293d9a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,23 +1,62 @@ -#![no_std] +#![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; use core::{ - cmp, + convert::{AsRef, From}, fmt::Debug, - marker::PhantomData, - mem::{self, ManuallyDrop}, + mem::{self, ManuallyDrop, MaybeUninit}, ops::{Deref, DerefMut, Index, IndexMut}, ptr, + ptr::NonNull, + slice, slice::SliceIndex, }; +#[cfg(feature = "std")] +use std::{ + // core::range::RangeBounds is unstable, we have to rely on std + ops::{Range, RangeBounds}, +}; + +mod weak; +pub use weak::HeaderVecWeak; + +mod drain; +#[cfg(feature = "std")] +pub use drain::Drain; + +mod splice; +#[cfg(feature = "std")] +pub use splice::Splice; + +// To implement std/Vec compatibility we would need a few nightly features. +// For the time being we just reimplement them here until they become stabilized. +#[cfg(feature = "std")] +mod future_slice; + +#[cfg(feature = "atomic_append")] +use core::sync::atomic::{AtomicUsize, Ordering}; + +/// A closure that becomes called when a `HeaderVec` becomes reallocated. +/// This is closure is responsible for updating weak nodes. +pub type WeakFixupFn<'a> = &'a mut dyn FnMut(*const ()); + struct HeaderVecHeader { head: H, capacity: usize, + #[cfg(feature = "atomic_append")] + len: AtomicUsize, + #[cfg(not(feature = "atomic_append"))] len: usize, } +// This union will be properly aligned and sized to store headers followed by T's. +union AlignedHeader { + _header: ManuallyDrop>, + _data: ManuallyDrop<[T; 0]>, +} + /// A vector with a header of your choosing behind a thin pointer /// /// # Example @@ -37,12 +76,11 @@ struct HeaderVecHeader { /// hv.push('z'); /// ``` /// -/// [`HeaderVec`] itself consists solely of a pointer, it's only 8 bytes big. +/// [`HeaderVec`] itself consists solely of a non-null pointer, it's only 8 bytes big. /// All of the data, like our header `OurHeaderType { a: 2 }`, the length of the vector: `2`, /// and the contents of the vector `['x', 'z']` resides on the other side of the pointer. pub struct HeaderVec { - ptr: *mut T, - _phantom: PhantomData, + ptr: NonNull>, } impl HeaderVec { @@ -51,21 +89,19 @@ impl HeaderVec { } pub fn with_capacity(capacity: usize, head: H) -> Self { - assert!(capacity > 0, "HeaderVec capacity cannot be 0"); - // Allocate the initial memory, which is unititialized. + const { assert!(mem::size_of::() > 0, "HeaderVec does not support ZST's") }; + + // Allocate the initial memory, which is uninitialized. let layout = Self::layout(capacity); - let ptr = unsafe { alloc::alloc::alloc(layout) } as *mut T; + let ptr = unsafe { alloc::alloc::alloc(layout) } as *mut AlignedHeader; - // Handle out-of-memory. - if ptr.is_null() { + let Some(ptr) = NonNull::new(ptr) else { + // Handle out-of-memory. alloc::alloc::handle_alloc_error(layout); - } + }; // Create self. - let mut this = Self { - ptr, - _phantom: PhantomData, - }; + let mut this = Self { ptr }; // Set the header. let header = this.header_mut(); @@ -74,47 +110,132 @@ impl HeaderVec { unsafe { core::ptr::write(&mut header.head, head) }; // These primitive types don't have drop implementations. header.capacity = capacity; - header.len = 0; + header.len = 0usize.into(); this } + /// Creates a new `HeaderVec` with the given header from owned elements. + /// This functions consumes elements from a `IntoIterator` and creates + /// a `HeaderVec` from these. See [`from_header_slice()`] which creates a `HeaderVec` + /// by cloning elements from a slice. + /// + /// # Example + /// + /// ``` + /// # use header_vec::HeaderVec; + /// let hv = HeaderVec::from_header_elements(42, [1, 2, 3]); + /// assert_eq!(hv.as_slice(), [1, 2, 3]); + /// ``` + pub fn from_header_elements(header: H, elements: impl IntoIterator) -> Self { + let iter = elements.into_iter(); + let mut hv = HeaderVec::with_capacity(iter.size_hint().0, header); + hv.extend(iter); + hv + } + + /// Get the length of the vector from a mutable reference. When one has a `&mut + /// HeaderVec`, this is the method is always exact and can be slightly faster than the non + /// mutable `len()`. + #[cfg(feature = "atomic_append")] + #[mutants::skip] + #[inline(always)] + pub fn len_exact(&mut self) -> usize { + *self.header_mut().len.get_mut() + } + #[cfg(not(feature = "atomic_append"))] + #[mutants::skip] + #[inline(always)] + pub fn len_exact(&mut self) -> usize { + self.header_mut().len + } + + /// This gives the length of the `HeaderVec`. This is the non synchronized variant may + /// produce racy results in case another thread atomically appended to + /// `&self`. Nevertheless it is always safe to use. + #[cfg(feature = "atomic_append")] + #[mutants::skip] #[inline(always)] pub fn len(&self) -> usize { + self.len_atomic_relaxed() + } + #[cfg(not(feature = "atomic_append"))] + #[mutants::skip] + #[inline(always)] + pub fn len(&self) -> usize { + self.header().len + } + + /// This gives the length of the `HeaderVec`. With `atomic_append` enabled this gives a + /// exact result *after* another thread atomically appended to this `HeaderVec`. It still + /// requires synchronization because the length may become invalidated when another thread + /// atomically appends data to this `HeaderVec` while we still work with the result of + /// this method. + #[cfg(not(feature = "atomic_append"))] + #[mutants::skip] + #[inline(always)] + pub fn len_strict(&self) -> usize { self.header().len } + #[cfg(feature = "atomic_append")] + #[mutants::skip] + #[inline(always)] + pub fn len_strict(&self) -> usize { + self.len_atomic_acquire() + } + + /// Check whenever a `HeaderVec` is empty. This uses a `&mut self` reference and is + /// always exact and may be slightly faster than the non mutable variant. + #[inline(always)] + pub fn is_empty_exact(&mut self) -> bool { + self.len_exact() == 0 + } + /// Check whenever a `HeaderVec` is empty. This uses a `&self` reference and may be racy + /// when another thread atomically appended to this `HeaderVec`. #[inline(always)] pub fn is_empty(&self) -> bool { self.len() == 0 } + /// Check whenever a `HeaderVec` is empty. see [`len_strict()`] about the exactness guarantees. + #[inline(always)] + pub fn is_empty_strict(&self) -> bool { + self.len_strict() == 0 + } + #[inline(always)] pub fn capacity(&self) -> usize { self.header().capacity } + /// This is the amount of elements that can be added to the `HeaderVec` without reallocation. + #[inline(always)] + pub fn spare_capacity(&self) -> usize { + self.header().capacity - self.len_strict() + } + #[inline(always)] pub fn as_slice(&self) -> &[T] { - unsafe { core::slice::from_raw_parts(self.start_ptr(), self.len()) } + unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len_strict()) } } #[inline(always)] pub fn as_mut_slice(&mut self) -> &mut [T] { - unsafe { core::slice::from_raw_parts_mut(self.start_ptr_mut(), self.len()) } + unsafe { core::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len_exact()) } } /// This is useful to check if two nodes are the same. Use it with [`HeaderVec::is`]. #[inline(always)] pub fn ptr(&self) -> *const () { - self.ptr as *const () + self.ptr.as_ptr() as *const () } /// This is used to check if this is the `HeaderVec` that corresponds to the given pointer. /// This is useful for updating weak references after [`HeaderVec::push`] returns the pointer. #[inline(always)] pub fn is(&self, ptr: *const ()) -> bool { - self.ptr as *const () == ptr + self.ptr() == ptr } /// Create a (dangerous) weak reference to the `HeaderVec`. This is useful to be able @@ -138,10 +259,7 @@ impl HeaderVec { #[inline(always)] pub unsafe fn weak(&self) -> HeaderVecWeak { HeaderVecWeak { - header_vec: ManuallyDrop::new(Self { - ptr: self.ptr, - _phantom: PhantomData, - }), + header_vec: ManuallyDrop::new(Self { ptr: self.ptr }), } } @@ -156,56 +274,188 @@ impl HeaderVec { self.ptr = weak.ptr; } + /// Reserves capacity for at least `additional` more elements to be inserted in the given `HeaderVec`. + #[inline] + pub fn reserve(&mut self, additional: usize) { + self.reserve_intern(additional, false, &mut None); + } + + /// Reserves capacity for at least `additional` more elements to be inserted in the given `HeaderVec`. + /// This method must be used when `HeaderVecWeak` are used. It takes a closure that is responsible for + /// updating the weak references as additional parameter. + #[inline] + pub fn reserve_with_weakfix(&mut self, additional: usize, weak_fixup: WeakFixupFn) { + self.reserve_intern(additional, false, &mut Some(weak_fixup)); + } + + /// Reserves capacity for exactly `additional` more elements to be inserted in the given `HeaderVec`. + #[mutants::skip] + #[inline] + pub fn reserve_exact(&mut self, additional: usize) { + self.reserve_intern(additional, true, &mut None); + } + + /// Reserves capacity for exactly `additional` more elements to be inserted in the given `HeaderVec`. + /// This method must be used when `HeaderVecWeak` are used. It takes a closure that is responsible for + /// updating the weak references as additional parameter. + #[mutants::skip] + #[inline] + pub fn reserve_exact_with_weakfix(&mut self, additional: usize, weak_fixup: WeakFixupFn) { + self.reserve_intern(additional, true, &mut Some(weak_fixup)); + } + + /// Reserves capacity for at least `additional` more elements to be inserted in the given `HeaderVec`. + #[inline(always)] + pub(crate) fn reserve_intern( + &mut self, + additional: usize, + exact: bool, + weak_fixup: &mut Option, + ) { + if self.spare_capacity() < additional { + let len = self.len_exact(); + // using saturating_add here ensures that we get a allocation error instead wrapping over and + // allocating a total wrong size + unsafe { self.resize_cold(len.saturating_add(additional), exact, weak_fixup) }; + } + } + + /// Shrinks the capacity of the `HeaderVec` to the `min_capacity` or `self.len()`, whichever is larger. + #[inline] + pub fn shrink_to(&mut self, min_capacity: usize) { + let requested_capacity = self.len_exact().max(min_capacity); + unsafe { self.resize_cold(requested_capacity, true, &mut None) }; + } + + /// Shrinks the capacity of the `HeaderVec` to the `min_capacity` or `self.len()`, whichever is larger. + /// This method must be used when `HeaderVecWeak` are used. It takes a closure that is responsible for + /// updating the weak references as additional parameter. + #[inline] + pub fn shrink_to_with_weakfix(&mut self, min_capacity: usize, weak_fixup: WeakFixupFn) { + let requested_capacity = self.len_exact().max(min_capacity); + unsafe { self.resize_cold(requested_capacity, true, &mut Some(weak_fixup)) }; + } + + /// Resizes the vector hold exactly `self.len()` elements. + #[mutants::skip] + #[inline(always)] + pub fn shrink_to_fit(&mut self) { + self.shrink_to(0); + } + + /// Resizes the vector hold exactly `self.len()` elements. + /// This method must be used when `HeaderVecWeak` are used. It takes a closure that is responsible for + /// updating the weak references as additional parameter. + #[mutants::skip] + #[inline(always)] + pub fn shrink_to_fit_with_weakfix(&mut self, weak_fixup: WeakFixupFn) { + self.shrink_to_with_weakfix(0, weak_fixup); + } + + /// Resize the vector to least `requested_capacity` elements. + /// Does exact resizing if `exact` is true. + /// + /// Returns `Some(*const ())` if the memory was moved to a new location. + /// + /// # Safety + /// + /// `requested_capacity` must be greater or equal than `self.len()` #[cold] - fn resize_insert(&mut self) -> Option<*const ()> { + unsafe fn resize_cold( + &mut self, + requested_capacity: usize, + exact: bool, + weak_fixup: &mut Option, + ) { + // For efficiency we do only a debug_assert here, this is a internal unsafe function + // it's contract should be already enforced by the caller which is under our control + debug_assert!( + self.len_exact() <= requested_capacity, + "requested capacity is less than current length" + ); let old_capacity = self.capacity(); - let new_capacity = old_capacity * 2; - // Set the new capacity. - self.header_mut().capacity = new_capacity; + + // Shortcut when nothing is to be done. + if requested_capacity == old_capacity { + return; + } + + let new_capacity = if requested_capacity > old_capacity { + if exact { + // exact growing + requested_capacity + } else if requested_capacity <= old_capacity * 2 { + // doubling the capacity is sufficient + old_capacity * 2 + } else if old_capacity > 0 { + // requested more than twice as much space, reserve the next multiple of + // old_capacity that is greater than the requested capacity. This gives headroom + // for new inserts while not doubling the memory requirement with bulk requests + (requested_capacity / old_capacity + 1).saturating_mul(old_capacity) + } else { + // special case when we start at capacity 0 + requested_capacity + } + } else if exact { + // exact shrinking + requested_capacity + } else { + unimplemented!() + // or: (has no public API yet) + // // shrink to the next power of two or self.capacity, whichever is smaller + // requested_capacity.next_power_of_two().min(self.capacity()) + }; // Reallocate the pointer. let ptr = unsafe { alloc::alloc::realloc( - self.ptr as *mut u8, + self.ptr() as *mut u8, Self::layout(old_capacity), Self::elems_to_mem_bytes(new_capacity), - ) as *mut T + ) as *mut AlignedHeader }; - // Handle out-of-memory. - if ptr.is_null() { + + let Some(ptr) = NonNull::new(ptr) else { + // Handle out-of-memory. alloc::alloc::handle_alloc_error(Self::layout(new_capacity)); - } + }; + // Check if the new pointer is different than the old one. let previous_pointer = if ptr != self.ptr { - // Give the user the old pointer so they can update everything. - Some(self.ptr as *const ()) + // Store old pointer for weak_fixup. + Some(self.ptr()) } else { None }; // Assign the new pointer. self.ptr = ptr; + // And set the new capacity. + self.header_mut().capacity = new_capacity; - previous_pointer + // Finally run the weak_fixup closure when provided + previous_pointer.map(|ptr| weak_fixup.as_mut().map(|weak_fixup| weak_fixup(ptr))); } /// Adds an item to the end of the list. - /// - /// Returns `true` if the memory was moved to a new location. - /// In this case, you are responsible for updating the weak nodes. - pub fn push(&mut self, item: T) -> Option<*const ()> { - let old_len = self.len(); + pub fn push(&mut self, item: T) { + self.push_intern(item, &mut None); + } + + /// Adds an item to the end of the list. + /// This method must be used when `HeaderVecWeak` are used. It takes a closure that is responsible for + /// updating the weak references as additional parameter. + pub fn push_with_weakfix(&mut self, item: T, weak_fixup: WeakFixupFn) { + self.push_intern(item, &mut Some(weak_fixup)); + } + + #[inline(always)] + fn push_intern(&mut self, item: T, weak_fixup: &mut Option) { + let old_len = self.len_exact(); let new_len = old_len + 1; - let old_capacity = self.capacity(); - // If it isn't big enough. - let previous_pointer = if new_len > old_capacity { - self.resize_insert() - } else { - None - }; + self.reserve_intern(1, false, weak_fixup); unsafe { - core::ptr::write(self.start_ptr_mut().add(old_len), item); + core::ptr::write(self.as_mut_ptr().add(old_len), item); } - self.header_mut().len = new_len; - previous_pointer + self.header_mut().len = new_len.into(); } /// Retains only the elements specified by the predicate. @@ -217,9 +467,9 @@ impl HeaderVec { // This keeps track of the length (and next position) of the contiguous retained elements // at the beginning of the vector. let mut head = 0; - let original_len = self.len(); + let original_len = self.len_exact(); // Get the offset of the beginning of the slice. - let start_ptr = self.start_ptr_mut(); + let start_ptr = self.as_mut_ptr(); // Go through each index. for index in 0..original_len { unsafe { @@ -239,15 +489,143 @@ impl HeaderVec { } } // The head now represents the new length of the vector. - self.header_mut().len = head; + self.header_mut().len = head.into(); + } + + /// Returns the remaining spare capacity of the vector as a slice of + /// `MaybeUninit`. + /// + /// The returned slice can be used to fill the vector with data (e.g. by + /// reading from a file) before marking the data as initialized using the + /// [`set_len`] method. + /// + pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit] { + unsafe { + core::slice::from_raw_parts_mut( + self.end_ptr_mut() as *mut MaybeUninit, + self.spare_capacity(), + ) + } + } + + /// Forces the length of the headervec to `new_len`. + /// + /// This is a low-level operation that maintains none of the normal + /// invariants of the type. Normally changing the length of a vector + /// is done using one of the safe operations instead. Noteworthy is that + /// this method does not drop any of the elements that are removed when + /// shrinking the vector. + /// + /// # Safety + /// + /// - `new_len` must be less than or equal to [`capacity()`]. + /// - The elements at `old_len..new_len` must be initialized. + pub unsafe fn set_len(&mut self, new_len: usize) { + debug_assert!( + new_len <= self.capacity(), + "new_len [{new_len}] is greater than capacity [{}]", + self.capacity() + ); + self.header_mut().len = new_len.into(); + } + + /// Shortens a `HeaderVec`, keeping the first `len` elements and dropping + /// the rest. + /// + /// If `len` is greater or equal to the vector's current length, this has + /// no effect. + /// + /// The [`drain`] method can emulate `truncate`, but causes the excess + /// elements to be returned instead of dropped. + /// + /// Note that this method has no effect on the allocated capacity + /// of the vector. + /// + /// # Examples + /// + /// Truncating a five element `HeaderVec` to two elements: + /// + /// ``` + /// use header_vec::HeaderVec; + /// let mut hv: HeaderVec<(), _> = HeaderVec::from([1, 2, 3, 4, 5]); + /// hv.truncate(2); + /// assert_eq!(hv.as_slice(), [1, 2]); + /// ``` + /// + /// No truncation occurs when `len` is greater than the vector's current + /// length: + /// + /// ``` + /// use header_vec::HeaderVec; + /// let mut hv: HeaderVec<(), _> = HeaderVec::from([1, 2, 3]); + /// hv.truncate(8); + /// assert_eq!(hv.as_slice(), [1, 2, 3]); + /// ``` + /// + /// Truncating when `len == 0` is equivalent to calling the [`clear`] + /// method. + /// + /// ``` + /// use header_vec::HeaderVec; + /// let mut hv: HeaderVec<(), _> = HeaderVec::from([1, 2, 3]); + /// hv.truncate(0); + /// assert_eq!(hv.as_slice(), []); + /// ``` + /// + /// [`clear`]: HeaderVec::clear + /// [`drain`]: HeaderVec::drain + #[mutants::skip] + pub fn truncate(&mut self, len: usize) { + unsafe { + let old_len = self.len_exact(); + if len > old_len { + return; + } + let remaining_len = old_len - len; + let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len); + self.header_mut().len = len.into(); + ptr::drop_in_place(s); + } + } + + /// Clears a `HeaderVec`, removing all values. + /// + /// Note that this method has no effect on the allocated capacity + /// of the vector. + /// + /// # Examples + /// + /// ``` + /// use header_vec::HeaderVec; + /// let mut hv: HeaderVec<(), _> = HeaderVec::from([1, 2, 3]); + /// + /// hv.clear(); + /// + /// assert!(hv.is_empty()); + /// ``` + #[inline] + pub fn clear(&mut self) { + let elems: *mut [T] = self.as_mut_slice(); + + // SAFETY: + // - `elems` comes directly from `as_mut_slice` and is therefore valid. + // - Setting the length before calling `drop_in_place` means that, + // if an element's `Drop` impl panics, the vector's `Drop` impl will + // do nothing (leaking the rest of the elements) instead of dropping + // some twice. + unsafe { + self.set_len(0); + ptr::drop_in_place(elems); + } } /// Gives the offset in units of T (as if the pointer started at an array of T) that the slice actually starts at. + #[mutants::skip] #[inline(always)] - fn offset() -> usize { + const fn offset() -> usize { // The first location, in units of size_of::(), that is after the header // It's the end of the header, rounded up to the nearest size_of::() - (mem::size_of::>() + mem::size_of::() - 1) / mem::size_of::() + (mem::size_of::>() - 1) / mem::size_of::() + 1 } /// Compute the number of elements (in units of T) to allocate for a given capacity. @@ -267,44 +645,356 @@ impl HeaderVec { fn layout(capacity: usize) -> alloc::alloc::Layout { alloc::alloc::Layout::from_size_align( Self::elems_to_mem_bytes(capacity), - cmp::max(mem::align_of::(), mem::align_of::()), + mem::align_of::>() ) .expect("unable to produce memory layout with Hrc key type (is it a zero sized type? they are not permitted)") } /// Gets the pointer to the start of the slice. #[inline(always)] - fn start_ptr(&self) -> *const T { - unsafe { self.ptr.add(Self::offset()) } + pub fn as_ptr(&self) -> *const T { + unsafe { (self.ptr() as *const T).add(Self::offset()) } } /// Gets the pointer to the start of the slice. #[inline(always)] - fn start_ptr_mut(&mut self) -> *mut T { - unsafe { self.ptr.add(Self::offset()) } + pub fn as_mut_ptr(&mut self) -> *mut T { + unsafe { (self.ptr() as *mut T).add(Self::offset()) } + } + + /// Gets the pointer to the end of the slice. This returns a mutable pointer to + /// uninitialized memory behind the last element. + #[inline(always)] + fn end_ptr_mut(&mut self) -> *mut T { + unsafe { self.as_mut_ptr().add(self.len_exact()) } } #[inline(always)] fn header(&self) -> &HeaderVecHeader { // The beginning of the memory is always the header. - unsafe { &*(self.ptr as *const HeaderVecHeader) } + unsafe { &*(self.ptr() as *const HeaderVecHeader) } } #[inline(always)] fn header_mut(&mut self) -> &mut HeaderVecHeader { // The beginning of the memory is always the header. - unsafe { &mut *(self.ptr as *mut HeaderVecHeader) } + unsafe { &mut *(self.ptr() as *mut HeaderVecHeader) } + } +} + +impl HeaderVec { + /// Creates a new `HeaderVec` with the given header from some data. + /// The data cloned from a `AsRef<[T]>`, see [`from_header_elements()`] for + /// constructing a `HeaderVec` from owned elements. + pub fn from_header_slice(header: H, slice: impl AsRef<[T]>) -> Self { + let slice = slice.as_ref(); + let mut hv = Self::with_capacity(slice.len(), header); + hv.extend_from_slice_intern(slice, &mut None); + hv + } + + /// Adds items from a slice to the end of the list. + pub fn extend_from_slice(&mut self, slice: impl AsRef<[T]>) { + self.extend_from_slice_intern(slice.as_ref(), &mut None) + } + + /// Adds items from a slice to the end of the list. + /// This method must be used when `HeaderVecWeak` are used. It takes a closure that is responsible for + /// updating the weak references as additional parameter. + pub fn extend_from_slice_with_weakfix( + &mut self, + slice: impl AsRef<[T]>, + weak_fixup: WeakFixupFn, + ) { + self.extend_from_slice_intern(slice.as_ref(), &mut Some(weak_fixup)); + } + + #[inline(always)] + fn extend_from_slice_intern(&mut self, slice: &[T], weak_fixup: &mut Option) { + self.reserve_intern(slice.len(), false, weak_fixup); + + // copy data + let end_ptr = self.end_ptr_mut(); + for (index, item) in slice.iter().enumerate() { + unsafe { + core::ptr::write(end_ptr.add(index), item.clone()); + } + } + // correct the len + self.header_mut().len = (self.len_exact() + slice.len()).into(); + } +} + +#[cfg(feature = "atomic_append")] +/// The atomic append API is only enabled when the `atomic_append` feature flag is set (which +/// is the default). The [`push_atomic()`] or [`extend_from_slice_atomic()`] methods then +/// become available and some internals using atomic operations. +/// +/// This API implements interior-mutable appending to a shared `HeaderVec`. To other threads +/// the appended elements are either not seen or all seen at once. Without additional +/// synchronization these appends are racy but memory safe. The intention behind this API is to +/// provide facilities for building other container abstractions the benefit from the shared +/// non blocking nature while being unaffected from the racy semantics or provide synchronization +/// on their own (Eg: reference counted data, interners, streaming parsers, etc). Since the +/// `HeaderVec` is a shared object and we have only a `&self`, it can not be reallocated and moved, +/// therefore appending can only be done within the reserved capacity. +/// +/// # Safety +/// +/// Only one single thread must try to [`push_atomic()`] or [`extend_from_slice_atomic()`] the +/// `HeaderVec` at at time using the atomic append API's. The actual implementations of this +/// restriction is left to the caller. This can be done by mutexes or guard objects. Or +/// simply by staying single threaded or ensuring somehow else that there is only a single +/// thread using the atomic_appending API. +impl HeaderVec { + /// Atomically adds an item to the end of the list without reallocation. + /// + /// # Errors + /// + /// If the vector is full, the item is returned. + /// + /// # Safety + /// + /// There must be only one thread calling this method at any time. Synchronization has to + /// be provided by the user. + pub unsafe fn push_atomic(&self, item: T) -> Result<(), T> { + // relaxed is good enough here because this should be the only thread calling this method. + let len = self.len_atomic_relaxed(); + if len < self.capacity() { + unsafe { + core::ptr::write(self.end_ptr_atomic_mut(), item); + }; + let len_again = self.len_atomic_add_release(1); + // in debug builds we check for races, the chance to catch these are still pretty minimal + debug_assert_eq!(len_again, len, "len was updated by another thread"); + Ok(()) + } else { + Err(item) + } + } + + /// Get the length of the vector with `Ordering::Acquire`. This ensures that the length is + /// properly synchronized after it got atomically updated. + #[inline(always)] + fn len_atomic_acquire(&self) -> usize { + self.header().len.load(Ordering::Acquire) + } + + /// Get the length of the vector with `Ordering::Relaxed`. This is useful for when you don't + /// need exact synchronization semantic. + #[inline(always)] + fn len_atomic_relaxed(&self) -> usize { + self.header().len.load(Ordering::Relaxed) + } + + /// Add `n` to the length of the vector atomically with `Ordering::Release`. + /// + /// # Safety + /// + /// Before incrementing the length of the vector, you must ensure that new elements are + /// properly initialized. + #[inline(always)] + unsafe fn len_atomic_add_release(&self, n: usize) -> usize { + self.header().len.fetch_add(n, Ordering::Release) + } + + /// Gets the pointer to the end of the slice. This returns a mutable pointer to + /// uninitialized memory behind the last element. + #[inline(always)] + fn end_ptr_atomic_mut(&self) -> *mut T { + unsafe { self.as_ptr().add(self.len_atomic_acquire()) as *mut T } + } +} + +#[cfg(feature = "atomic_append")] +impl HeaderVec { + /// Atomically add items from a slice to the end of the list. without reallocation + /// + /// # Errors + /// + /// If the vector is full, the item is returned. + /// + /// # Safety + /// + /// There must be only one thread calling this method at any time. Synchronization has to + /// be provided by the user. + pub unsafe fn extend_from_slice_atomic<'a>(&self, slice: &'a [T]) -> Result<(), &'a [T]> { + #[cfg(debug_assertions)] // only for the race check later + let len = self.len_atomic_relaxed(); + if self.spare_capacity() >= slice.len() { + // copy data + let end_ptr = self.end_ptr_atomic_mut(); + for (index, item) in slice.iter().enumerate() { + unsafe { + core::ptr::write(end_ptr.add(index), item.clone()); + } + } + // correct the len + let _len_again = self.len_atomic_add_release(slice.len()); + // in debug builds we check for races, the chance to catch these are still pretty minimal + #[cfg(debug_assertions)] + debug_assert_eq!(_len_again, len, "len was updated by another thread"); + Ok(()) + } else { + Err(slice) + } + } +} + +#[cfg(feature = "std")] +/// The methods that depend on stdlib features. +impl HeaderVec { + /// Removes the specified range from a `HeaderVec` in bulk, returning all + /// removed elements as an iterator. If the iterator is dropped before + /// being fully consumed, it drops the remaining removed elements. + /// + /// The returned iterator keeps a mutable borrow on the `HeaderVec` to optimize + /// its implementation. + /// + /// # Feature compatibility + /// + /// The `drain()` API and `Drain` iterator are only available when the `std` feature is + /// enabled. + /// + /// # Panics + /// + /// Panics if the starting point is greater than the end point or if + /// the end point is greater than the length of the vector. + /// + /// # Leaking + /// + /// If the returned iterator goes out of scope without being dropped (due to + /// [`mem::forget`], for example), the vector may have lost and leaked + /// elements arbitrarily, including elements outside the range. + /// + /// # Examples + /// + /// ``` + /// use header_vec::HeaderVec; + /// let mut v: HeaderVec<(), _> = HeaderVec::from(&[1, 2, 3]); + /// let u: Vec<_> = v.drain(1..).collect(); + /// assert_eq!(v.as_slice(), &[1]); + /// assert_eq!(u.as_slice(), &[2, 3]); + /// + /// // A full range clears the vector, like `clear()` does + /// v.drain(..); + /// assert_eq!(v.as_slice(), &[]); + /// ``` + pub fn drain(&mut self, range: R) -> Drain<'_, H, T> + where + R: RangeBounds, + { + // Memory safety + // + // When the Drain is first created, it shortens the length of + // the source vector to make sure no uninitialized or moved-from elements + // are accessible at all if the Drain's destructor never gets to run. + // + // Drain will ptr::read out the values to remove. + // When finished, remaining tail of the vec is copied back to cover + // the hole, and the vector length is restored to the new length. + // + let len = self.len(); + let Range { start, end } = future_slice::range(range, ..len); + + unsafe { + // set self.vec length's to start, to be safe in case Drain is leaked + self.set_len(start); + let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start); + Drain { + tail_start: end, + tail_len: len - end, + iter: range_slice.iter(), + vec: NonNull::from(self), + } + } + } + + /// Creates a splicing iterator that replaces the specified range in the vector + /// with the given `replace_with` iterator and yields the removed items. + /// `replace_with` does not need to be the same length as `range`. + /// + /// `range` is removed even if the iterator is not consumed until the end. + /// + /// It is unspecified how many elements are removed from the vector + /// if the `Splice` value is leaked. + /// + /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped. + /// + /// This is optimal if: + /// + /// * The tail (elements in the vector after `range`) is empty, + /// * or `replace_with` yields fewer or equal elements than `range`’s length + /// * or the lower bound of its `size_hint()` is exact. + /// + /// Otherwise, a temporary vector is allocated to store the tail elements which are in the way. + /// + /// # Panics + /// + /// Panics if the starting point is greater than the end point or if + /// the end point is greater than the length of the vector. + /// + /// # Examples + /// + /// ``` + /// use header_vec::HeaderVec; + /// let mut hv: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4]); + /// let new = [7, 8, 9]; + /// let u: Vec<_> = hv.splice(1..3, new).collect(); + /// assert_eq!(hv.as_slice(), [1, 7, 8, 9, 4]); + /// assert_eq!(u, [2, 3]); + /// ``` + #[inline] + pub fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, H, I::IntoIter> + where + R: RangeBounds, + I: IntoIterator, + { + self.splice_internal(range, replace_with, None) + } + + /// Creates a splicing iterator like [`splice()`]. + /// This method must be used when `HeaderVecWeak` are used. It takes a closure that is responsible for + /// updating the weak references as additional parameter. + #[inline] + pub fn splice_with_weakfix<'a, R, I>( + &'a mut self, + range: R, + replace_with: I, + weak_fixup: WeakFixupFn<'a>, + ) -> Splice<'a, H, I::IntoIter> + where + R: RangeBounds, + I: IntoIterator, + { + self.splice_internal(range, replace_with, Some(weak_fixup)) + } + + #[inline(always)] + fn splice_internal<'a, R, I>( + &'a mut self, + range: R, + replace_with: I, + weak_fixup: Option>, + ) -> Splice<'a, H, I::IntoIter> + where + R: RangeBounds, + I: IntoIterator, + { + Splice { + drain: self.drain(range), + replace_with: replace_with.into_iter(), + weak_fixup, + } } } impl Drop for HeaderVec { fn drop(&mut self) { unsafe { + ptr::drop_in_place(self.as_mut_slice()); ptr::drop_in_place(&mut self.header_mut().head); - for ix in 0..self.len() { - ptr::drop_in_place(self.start_ptr_mut().add(ix)); - } - alloc::alloc::dealloc(self.ptr as *mut u8, Self::layout(self.capacity())); + alloc::alloc::dealloc(self.ptr() as *mut u8, Self::layout(self.capacity())); } } } @@ -363,7 +1053,7 @@ where T: Clone, { fn clone(&self) -> Self { - let mut new_vec = Self::with_capacity(self.len(), self.header().head.clone()); + let mut new_vec = Self::with_capacity(self.len_strict(), self.header().head.clone()); for e in self.as_slice() { new_vec.push(e.clone()); } @@ -376,6 +1066,7 @@ where H: Debug, T: Debug, { + #[mutants::skip] fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("HeaderVec") .field("header", &self.header().head) @@ -384,26 +1075,59 @@ where } } -pub struct HeaderVecWeak { - header_vec: ManuallyDrop>, +impl From for HeaderVec +where + U: AsRef<[T]>, +{ + fn from(from: U) -> Self { + HeaderVec::from_header_slice(H::default(), from) + } +} + +impl HeaderVec { + pub fn iter(&self) -> slice::Iter<'_, T> { + self.as_slice().iter() + } + + pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> { + self.as_mut_slice().iter_mut() + } } -impl Deref for HeaderVecWeak { - type Target = HeaderVec; +impl<'a, H, T> IntoIterator for &'a HeaderVec { + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; - fn deref(&self) -> &Self::Target { - &self.header_vec + fn into_iter(self) -> Self::IntoIter { + self.iter() } } -impl DerefMut for HeaderVecWeak { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.header_vec +impl<'a, H, T> IntoIterator for &'a mut HeaderVec { + type Item = &'a mut T; + type IntoIter = slice::IterMut<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() } } -impl Debug for HeaderVecWeak { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("HeaderVecWeak").finish() +impl Extend for HeaderVec { + #[inline] + #[track_caller] + fn extend>(&mut self, iter: I) { + let iter = iter.into_iter(); + self.reserve(iter.size_hint().0); + iter.for_each(|item| self.push(item)); + } +} + +/// Extend implementation that copies elements out of references before pushing them onto the Vec. +impl<'a, H, T: Copy + 'a> Extend<&'a T> for HeaderVec { + #[track_caller] + fn extend>(&mut self, iter: I) { + let iter = iter.into_iter(); + self.reserve(iter.size_hint().0); + iter.for_each(|item| self.push(*item)); } } diff --git a/src/splice.rs b/src/splice.rs new file mode 100644 index 0000000..e3b0290 --- /dev/null +++ b/src/splice.rs @@ -0,0 +1,165 @@ +#![cfg(feature = "std")] + +use core::{any::type_name, fmt, ptr, slice}; + +use crate::{Drain, WeakFixupFn}; + +/// A splicing iterator for a `HeaderVec`. +/// +/// This struct is created by [`Vec::splice()`]. +/// See its documentation for more. +/// +/// # Example +/// +/// ``` +/// # use header_vec::HeaderVec; +/// let mut hv: HeaderVec<(), _> = HeaderVec::from([0, 1, 2]); +/// let new = [7, 8]; +/// let iter = hv.splice(1.., new); +/// ``` +pub struct Splice<'a, H, I: Iterator + 'a> { + pub(super) drain: Drain<'a, H, I::Item>, + pub(super) replace_with: I, + pub(super) weak_fixup: Option>, +} + +impl Iterator for Splice<'_, H, I> { + type Item = I::Item; + + fn next(&mut self) -> Option { + self.drain.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.drain.size_hint() + } +} + +impl Splice<'_, H, I> { + /// Not a standard function, might be useful nevertheless, we use it in tests. + pub fn drained_slice(&self) -> &[I::Item] { + self.drain.as_slice() + } +} + +impl DoubleEndedIterator for Splice<'_, H, I> { + fn next_back(&mut self) -> Option { + self.drain.next_back() + } +} + +impl ExactSizeIterator for Splice<'_, H, I> {} + +impl fmt::Debug for Splice<'_, H, I> +where + I: Iterator + fmt::Debug, + I::Item: fmt::Debug, +{ + #[mutants::skip] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct(&format!( + "Splice<{}, {}>", + type_name::(), + type_name::() + )) + .field("drain", &self.drain.as_slice()) + .field("replace_with", &self.replace_with) + .field("weak_fixup", &self.weak_fixup.is_some()) + .finish() + } +} + +impl Drop for Splice<'_, H, I> { + #[track_caller] + #[mutants::skip] + fn drop(&mut self) { + self.drain.by_ref().for_each(drop); + // At this point draining is done and the only remaining tasks are splicing + // and moving things into the final place. + // Which means we can replace the slice::Iter with pointers that won't point to deallocated + // memory, so that Drain::drop is still allowed to call iter.len(), otherwise it would break + // the ptr.sub_ptr contract. + + unsafe { + let vec = self.drain.vec.as_mut(); + + if self.drain.tail_len == 0 { + vec.extend(self.replace_with.by_ref()); + return; + } + + // First fill the range left by drain(). + if !self.drain.fill(&mut self.replace_with) { + return; + } + + // There may be more elements. Use the lower bound as an estimate. + // FIXME: Is the upper bound a better guess? Or something else? + let (lower_bound, _upper_bound) = self.replace_with.size_hint(); + if lower_bound > 0 { + self.drain.move_tail(lower_bound, &mut self.weak_fixup); + if !self.drain.fill(&mut self.replace_with) { + return; + } + } + + // Collect any remaining elements. + // This is a zero-length vector which does not allocate if `lower_bound` was exact. + let mut collected = self + .replace_with + .by_ref() + .collect::>() + .into_iter(); + // Now we have an exact count. + if collected.len() > 0 { + self.drain.move_tail(collected.len(), &mut self.weak_fixup); + let filled = self.drain.fill(&mut collected); + debug_assert!(filled); + debug_assert_eq!(collected.len(), 0); + } + } + } +} + +/// Private helper methods for `Splice::drop` +impl Drain<'_, H, T> { + /// The range from `self.vec.len` to `self.tail_start` contains elements + /// that have been moved out. + /// Fill that range as much as possible with new elements from the `replace_with` iterator. + /// Returns `true` if we filled the entire range. (`replace_with.next()` didn’t return `None`.) + unsafe fn fill>(&mut self, replace_with: &mut I) -> bool { + let vec = unsafe { self.vec.as_mut() }; + let range_start = vec.len_exact(); + let range_end = self.tail_start; + let range_slice = unsafe { + slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start) + }; + + for place in range_slice { + if let Some(new_item) = replace_with.next() { + unsafe { ptr::write(place, new_item) }; + let len = vec.len_exact(); + vec.set_len(len + 1); + } else { + return false; + } + } + true + } + + /// Makes room for inserting more elements before the tail. + #[track_caller] + unsafe fn move_tail(&mut self, additional: usize, weak_fixup: &mut Option>) { + let vec = unsafe { self.vec.as_mut() }; + let len = self.tail_start + self.tail_len; + vec.reserve_intern(len + additional, false, weak_fixup); + + let new_tail_start = self.tail_start + additional; + unsafe { + let src = vec.as_ptr().add(self.tail_start); + let dst = vec.as_mut_ptr().add(new_tail_start); + ptr::copy(src, dst, self.tail_len); + } + self.tail_start = new_tail_start; + } +} diff --git a/src/weak.rs b/src/weak.rs new file mode 100644 index 0000000..6d1dd1f --- /dev/null +++ b/src/weak.rs @@ -0,0 +1,33 @@ +//! Weak reference to a `HeaderVec`. + +use core::{ + fmt::Debug, + mem::ManuallyDrop, + ops::{Deref, DerefMut}, +}; + +use crate::HeaderVec; + +pub struct HeaderVecWeak { + pub(crate) header_vec: ManuallyDrop>, +} + +impl Deref for HeaderVecWeak { + type Target = HeaderVec; + + fn deref(&self) -> &Self::Target { + &self.header_vec + } +} + +impl DerefMut for HeaderVecWeak { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.header_vec + } +} + +impl Debug for HeaderVecWeak { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("HeaderVecWeak").finish() + } +} diff --git a/tests/atomic_append.rs b/tests/atomic_append.rs new file mode 100644 index 0000000..42f5535 --- /dev/null +++ b/tests/atomic_append.rs @@ -0,0 +1,27 @@ +#![cfg(feature = "atomic_append")] +extern crate std; + +use header_vec::*; + +#[test] +fn test_atomic_append() { + let mut hv = HeaderVec::with_capacity(10, ()); + + hv.push(1); + unsafe { hv.push_atomic(2).unwrap() }; + hv.push(3); + + assert_eq!(hv.len(), 3); + assert_eq!(hv.as_slice(), [1, 2, 3]); +} + +#[test] +fn test_extend_from_slice() { + let hv = HeaderVec::with_capacity(6, ()); + + unsafe { + hv.extend_from_slice_atomic(&[0, 1, 2]).unwrap(); + hv.extend_from_slice_atomic(&[3, 4, 5]).unwrap(); + } + assert_eq!(hv.as_slice(), &[0, 1, 2, 3, 4, 5]); +} diff --git a/tests/mutants.rs b/tests/mutants.rs new file mode 100644 index 0000000..e39d8cc --- /dev/null +++ b/tests/mutants.rs @@ -0,0 +1,377 @@ +//! This file is partially is generated with github copilot assistance. +//! The only objective here is to generate coverage over all code paths to +//! make 'cargo mutants' pass. Then the test suite should pass +//! 'cargo +nightly miri test'. Unless otherwise noted the tests here are +//! not extensively reviewed for semantic correctness. Eventually human +//! reviewed tests here should be moved to other unit tests. + +use header_vec::HeaderVec; + +#[test] +fn test_drain_size_hint() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4, 5]); + let drain = vec.drain(1..4); + let (lower, upper) = drain.size_hint(); + assert_eq!(lower, 3); + assert_eq!(upper, Some(3)); +} + +#[test] +fn test_is_empty_exact() { + let mut vec: HeaderVec<(), i32> = HeaderVec::new(()); + assert!(vec.is_empty_exact()); + + vec.push(1); + assert!(!vec.is_empty_exact()); + + vec.truncate(0); + assert!(vec.is_empty_exact()); +} + +#[test] +fn test_push_with_weakfix() { + let mut vec: HeaderVec<(), i32> = HeaderVec::new(()); + let mut called = false; + vec.push_with_weakfix(1, &mut |_| called = true); + assert_eq!(vec.as_slice(), &[1]); + + // Test that push_with_weakfix actually pushes the value + assert_eq!(vec.len(), 1); + assert_eq!(vec[0], 1); + + // Test that multiple values can be pushed + vec.push_with_weakfix(2, &mut |_| {}); + assert_eq!(vec.len(), 2); + assert_eq!(vec[1], 2); +} + +#[test] +fn test_splice_size_hint() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4, 5]); + let splice = vec.splice(1..4, [8, 9]); + let (lower, upper) = splice.size_hint(); + assert_eq!(lower, 3); + assert_eq!(upper, Some(3)); +} + +#[test] +fn test_reserve() { + let mut vec: HeaderVec<(), i32> = HeaderVec::new(()); + let initial_cap = vec.capacity(); + vec.reserve(100); + assert!(vec.capacity() >= initial_cap + 100); +} + +#[test] +fn test_shrink_to() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + vec.reserve(100); + let big_cap = vec.capacity(); + vec.shrink_to(10); + assert!(vec.capacity() < big_cap); + assert!(vec.capacity() >= 10); +} + +#[test] +fn test_splice_drop() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4, 5]); + { + let _splice = vec.splice(1..4, [8, 9]); + // Let splice drop here + } + assert_eq!(vec.as_slice(), &[1, 8, 9, 5]); +} + +#[test] +fn test_drain_debug() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + let drain = vec.drain(1..); + assert!(format!("{:?}", drain).contains("Drain")); +} + +#[test] +fn test_is() { + let vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + let ptr = vec.ptr(); + assert!(vec.is(ptr)); + assert!(!vec.is(std::ptr::null())); +} + +#[test] +fn test_shrink_to_fit_with_weakfix() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + vec.reserve(100); + let big_cap = vec.capacity(); + let mut called = false; + vec.shrink_to_fit_with_weakfix(&mut |_| called = true); + assert!(vec.capacity() < big_cap); + assert_eq!(vec.capacity(), vec.len()); +} + +#[test] +fn test_into_iter() { + let vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + let mut iter = (&vec).into_iter(); + assert_eq!(iter.next(), Some(&1)); + assert_eq!(iter.next(), Some(&2)); + assert_eq!(iter.next(), Some(&3)); + assert_eq!(iter.next(), None); +} + +#[test] +fn test_len_strict() { + let vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + assert_eq!(vec.len_strict(), 3); + assert_eq!(vec.len_strict(), vec.len()); +} + +#[test] +fn test_drain_as_ref() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + let drain = vec.drain(..); + assert_eq!(drain.as_ref(), &[1, 2, 3]); +} + +#[test] +fn test_drain_keep_rest() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4]); + { + let mut drain = vec.drain(..); + assert_eq!(drain.next(), Some(1)); + drain.keep_rest(); + } + assert_eq!(vec.as_slice(), &[2, 3, 4]); +} + +#[test] +fn test_partial_eq() { + let vec1: HeaderVec<&str, i32> = HeaderVec::from_header_slice("header1", [1, 2]); + let vec2: HeaderVec<&str, i32> = HeaderVec::from_header_slice("header1", [1, 2]); + let vec3: HeaderVec<&str, i32> = HeaderVec::from_header_slice("header2", [1, 2]); + let vec4: HeaderVec<&str, i32> = HeaderVec::from_header_slice("header1", [1, 3]); + + assert_eq!(vec1, vec2); + assert_ne!(vec1, vec3); // Different header + assert_ne!(vec1, vec4); // Different elements +} + +#[test] +fn test_splice_next_back() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4]); + let mut splice = vec.splice(1..3, [5, 6]); + assert_eq!(splice.next_back(), Some(3)); + assert_eq!(splice.next_back(), Some(2)); + assert_eq!(splice.next_back(), None); +} + +#[test] +fn test_drain_keep_rest_tail_len() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4, 5]); + { + let mut drain = vec.drain(1..3); + drain.next(); + drain.keep_rest(); + } + assert_eq!(vec.as_slice(), &[1, 3, 4, 5]); +} + +#[test] +fn test_header_vec_basics() { + let mut vec: HeaderVec<(), i32> = HeaderVec::new(()); + + // Test empty state + assert!(vec.is_empty()); + assert_eq!(vec.len(), 0); + assert_eq!(vec.len_strict(), 0); + assert_eq!(vec.as_slice().len(), 0); + + // Test non-empty state + vec.push(1); + assert!(!vec.is_empty()); + assert_eq!(vec.len(), 1); + assert_eq!(vec.len_strict(), 1); + assert_eq!(vec.as_mut_slice(), &mut [1]); + + // Test reserve_exact_with_weakfix + let initial_cap = vec.capacity(); + let mut called = false; + vec.reserve_exact_with_weakfix(10, &mut |_| called = true); + assert!(vec.capacity() >= initial_cap + 10); + + // Test offset calculation implicitly through indexing + vec.push(2); + vec.push(3); + assert_eq!(vec[0], 1); + assert_eq!(vec[1], 2); + assert_eq!(vec[2], 3); +} + +#[test] +fn test_splice_drop_behavior() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + { + let mut splice = vec.splice(1..3, vec![4, 5, 6]); + assert_eq!(splice.next(), Some(2)); + // Let splice drop here with remaining elements + } + assert_eq!(vec.as_slice(), &[1, 4, 5, 6]); +} + +#[test] +fn test_truncate_minus() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4, 5]); + vec.truncate(2); + assert_eq!(vec.len(), 2); + assert_eq!(vec.as_slice(), &[1, 2]); +} + +#[test] +fn test_is_empty_strict() { + let mut vec: HeaderVec<(), i32> = HeaderVec::new(()); + assert!(vec.is_empty_strict()); + vec.push(1); + assert!(!vec.is_empty_strict()); +} + +#[test] +fn test_spare_capacity_mut() { + let mut vec: HeaderVec<(), i32> = HeaderVec::with_capacity(10, ()); + vec.push(1); + let spare = vec.spare_capacity_mut(); + assert!(!spare.is_empty()); + assert_eq!(spare.len(), 9); +} + +#[test] +fn test_weak_debug() { + let vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + let weak = unsafe { vec.weak() }; + assert!(!format!("{:?}", weak).is_empty()); +} + +#[test] +fn test_offset_alignment() { + let mut vec: HeaderVec<(), i32> = HeaderVec::with_capacity(1, ()); + vec.push(42); + assert_eq!(vec[0], 42); // Tests correct memory layout/offset +} + +#[test] +fn test_reserve_with_weakfix() { + let mut vec: HeaderVec<(), i32> = HeaderVec::new(()); + let mut called = false; + vec.reserve_with_weakfix(100, &mut |_| called = true); + assert!(vec.capacity() >= 100); +} + +#[test] +fn test_drop_behavior() { + struct DropCheck(std::rc::Rc>); + impl Drop for DropCheck { + fn drop(&mut self) { + *self.0.borrow_mut() = true; + } + } + + let dropped = std::rc::Rc::new(std::cell::RefCell::new(false)); + { + let mut vec: HeaderVec<(), _> = HeaderVec::new(()); + vec.push(DropCheck(dropped.clone())); + } + assert!(*dropped.borrow()); +} + +#[test] +fn test_offset_arithmetic() { + let mut vec: HeaderVec<(), u64> = HeaderVec::with_capacity(2, ()); + vec.push(123); + vec.push(456); + assert_eq!(vec[0], 123); + assert_eq!(vec[1], 456); +} + +#[test] +fn test_splice_debug() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + let splice = vec.splice(1..3, vec![4, 5]); + assert!(format!("{:?}", splice).contains("Splice")); +} + +#[test] +fn test_extend_from_slice_with_weakfix() { + let mut vec: HeaderVec<(), i32> = HeaderVec::new(()); + let mut called = false; + vec.extend_from_slice_with_weakfix([1, 2, 3], &mut |_| called = true); + assert_eq!(vec.as_slice(), &[1, 2, 3]); +} + +#[test] +fn test_truncate_plus() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4, 5]); + vec.truncate(2); + assert_eq!(vec.len(), 2); + assert_eq!(vec.as_slice(), &[1, 2]); + + // Test truncating to larger size (should have no effect) + vec.truncate(10); + assert_eq!(vec.len(), 2); + assert_eq!(vec.as_slice(), &[1, 2]); +} + +#[test] +fn test_into_iter_mut() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + let mut iter = (&mut vec).into_iter(); + + // Verify we can mutate through the iterator + if let Some(first) = iter.next() { + *first = 100; + } + + // Verify we get mutable references to all elements in correct order + let mut collected: Vec<&mut i32> = iter.collect(); + *collected[0] = 200; + *collected[1] = 300; + + // Verify mutations happened + assert_eq!(vec.as_slice(), &[100, 200, 300]); +} + +#[test] +fn test_len_exact() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3]); + assert_eq!(vec.len_exact(), 3); + + vec.push(4); + assert_eq!(vec.len_exact(), 4); + + vec.truncate(2); + assert_eq!(vec.len_exact(), 2); + + // Additional checks that depend on len_exact + assert_eq!(vec.capacity(), vec.len_exact() + vec.spare_capacity()); + assert_eq!(vec.len_exact(), vec.as_slice().len()); + assert_eq!(vec.is_empty_exact(), vec.len_exact() == 0); +} + +#[test] +fn test_drain_keep_rest_advanced() { + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4, 5]); + { + let mut drain = vec.drain(1..3); + // Take one item to test non-zero start position + assert_eq!(drain.next(), Some(2)); + drain.keep_rest(); + } + // Verify that items are correctly placed when keeping rest with non-zero start position + assert_eq!(vec.as_slice(), &[1, 3, 4, 5]); + + // Test with larger gaps to verify addition vs multiplication difference + let mut vec: HeaderVec<(), i32> = HeaderVec::from([1, 2, 3, 4, 5, 6, 7, 8]); + { + let mut drain = vec.drain(2..6); + assert_eq!(drain.next(), Some(3)); + drain.keep_rest(); + } + assert_eq!(vec.as_slice(), &[1, 2, 4, 5, 6, 7, 8]); +} diff --git a/tests/simple.rs b/tests/simple.rs index 2b42d85..4f39f1e 100644 --- a/tests/simple.rs +++ b/tests/simple.rs @@ -11,6 +11,31 @@ struct TestA { c: usize, } +#[test] +fn test_sizeof() { + // assert that HeaderVec is really a single lean pointer + assert_eq!( + core::mem::size_of::>(), + core::mem::size_of::<*mut ()>() + ); + // and has space for niche optimization + assert_eq!( + core::mem::size_of::>(), + core::mem::size_of::>>() + ); +} + +#[test] +fn test_empty() { + let mut v_empty = HeaderVec::with_capacity(0, TestA { a: 4, b: !0, c: 66 }); + + assert_eq!(0, v_empty.len()); + assert_eq!(0, v_empty.capacity()); + assert_eq!(0, v_empty.as_slice().len()); + + v_empty.extend_from_slice("the quick brown fox jumps over the lazy dog".as_bytes()); +} + #[test] fn test_head_array() { let mut v_orig = HeaderVec::new(TestA { a: 4, b: !0, c: 66 }); @@ -44,3 +69,34 @@ fn test_head_array() { v_orig.as_slice().iter().copied().collect::() ); } + +// This shown a miri error +#[test] +fn test_push() { + let mut hv = HeaderVec::with_capacity(10, ()); + + hv.push(123); + assert_eq!(hv[0], 123); +} + +#[test] +fn test_extend_from_slice() { + let mut hv = HeaderVec::new(()); + + hv.extend_from_slice([0, 1, 2]); + hv.extend_from_slice([3, 4, 5]); + assert_eq!(hv.as_slice(), [0, 1, 2, 3, 4, 5]); +} + +#[test] +fn test_from() { + assert_eq!(HeaderVec::<(), i32>::from(&[1, 2, 3]).as_slice(), [1, 2, 3]); +} + +#[test] +fn test_from_str() { + assert_eq!( + HeaderVec::<(), u8>::from("test").as_slice(), + "test".as_bytes() + ); +} diff --git a/tests/std.rs b/tests/std.rs new file mode 100644 index 0000000..721902d --- /dev/null +++ b/tests/std.rs @@ -0,0 +1,65 @@ +//! Tests for the `std` feature of the `header_vec` crate. +#![cfg(feature = "std")] + +use header_vec::*; +use xmacro::xmacro; + +#[test] +fn test_extend() { + let mut hv = HeaderVec::new(()); + hv.extend([1, 2, 3]); + assert_eq!(hv.as_slice(), [1, 2, 3]); +} + +#[test] +fn test_extend_ref() { + let mut hv = HeaderVec::<(), i32>::new(()); + hv.extend([&1, &2, &3]); + assert_eq!(hv.as_slice(), [1, 2, 3]); +} + +#[test] +fn test_drain() { + let mut hv = HeaderVec::from_header_slice((), [1, 2, 3, 4, 5, 6]); + + let drain = hv.drain(1..4); + assert_eq!(drain.as_slice(), [2, 3, 4]); + drop(drain); + assert_eq!(hv.as_slice(), [1, 5, 6]); +} + +xmacro! { + $[ + // tests with simple i32 lists + name: init: range: replace: drained: result: + nop_begin [1, 2, 3, 4, 5, 6] (0..0) [] [] [1, 2, 3, 4, 5, 6] + nop_middle [1, 2, 3, 4, 5, 6] (3..3) [] [] [1, 2, 3, 4, 5, 6] + nop_end [1, 2, 3, 4, 5, 6] (6..6) [] [] [1, 2, 3, 4, 5, 6] + insert_begin [1, 2, 3, 4, 5, 6] (0..0) [-1, 0] [] [-1, 0, 1, 2, 3, 4, 5, 6] + insert_middle [1, 2, 3, 4, 5, 6] (3..3) [33, 34] [] [1, 2, 3, 33, 34, 4, 5, 6] + insert_end [1, 2, 3, 4, 5, 6] (6..6) [7, 8] [] [1, 2, 3, 4, 5, 6, 7, 8] + remove_begin [1, 2, 3, 4, 5, 6] (0..2) [] [1, 2] [3, 4, 5, 6] + remove_middle [1, 2, 3, 4, 5, 6] (3..5) [] [4, 5] [1, 2, 3, 6] + remove_end [1, 2, 3, 4, 5, 6] (4..) [] [5, 6] [1, 2, 3, 4] + replace_begin_shorter [1, 2, 3, 4, 5, 6] (0..2) [11] [1, 2] [11,3, 4, 5, 6] + replace_middle_shorter [1, 2, 3, 4, 5, 6] (3..5) [44] [4, 5] [1, 2, 3, 44, 6] + replace_end_shorter [1, 2, 3, 4, 5, 6] (4..) [55] [5, 6] [1, 2, 3, 4, 55] + replace_begin_same [1, 2, 3, 4, 5, 6] (0..2) [11, 22] [1, 2] [11, 22, 3, 4, 5, 6] + replace_middle_same [1, 2, 3, 4, 5, 6] (3..5) [44, 55] [4, 5] [1, 2, 3, 44,55, 6] + replace_end_same [1, 2, 3, 4, 5, 6] (4..) [55, 66] [5, 6] [1, 2, 3, 4, 55, 66] + replace_begin_longer [1, 2, 3, 4, 5, 6] (0..2) [11, 22, 33] [1, 2] [11, 22, 33, 3, 4, 5, 6] + replace_middle_longer [1, 2, 3, 4, 5, 6] (3..5) [44, 55, 66] [4, 5] [1, 2, 3, 44, 55, 66, 6] + replace_end_longer [1, 2, 3, 4, 5, 6] (4..) [66, 77, 88] [5, 6] [1, 2, 3, 4, 66, 77, 88] + big_nop [[1; 64]; 64] (0..0) [[0; 64]; 0] [[0; 64]; 0] [[1; 64]; 64] + ] + + #[test] + fn $+test_splice_$name() { + let mut hv = HeaderVec::from_header_slice((), $init); + let splice = hv.splice($range, $replace); + + assert_eq!(splice.drained_slice(), $drained); + drop(splice); + assert_eq!(hv.as_slice(), $result); + } +}