ecow/vec.rs
1//! A clone-on-write alternative to [`Vec`].
2
3use core::alloc::Layout;
4use core::array;
5use core::borrow::Borrow;
6use core::cmp::Ordering;
7use core::fmt::{self, Debug, Formatter};
8use core::hash::{Hash, Hasher};
9use core::marker::PhantomData;
10use core::mem;
11use core::ops::Deref;
12use core::ptr::{self, NonNull};
13
14#[cfg(not(feature = "std"))]
15use alloc::vec::Vec;
16
17use crate::sync::atomic::{self, AtomicUsize, Ordering::*};
18
19/// Create a new [`EcoVec`] with the given elements.
20/// ```
21/// # use ecow::eco_vec;
22/// assert_eq!(eco_vec![1; 4], [1; 4]);
23/// assert_eq!(eco_vec![1, 2, 3], [1, 2, 3]);
24/// ```
25#[macro_export]
26macro_rules! eco_vec {
27 () => { $crate::EcoVec::new() };
28 ($elem:expr; $n:expr) => { $crate::EcoVec::from_elem($elem, $n) };
29 ($($value:expr),+ $(,)?) => { $crate::EcoVec::from([$($value),+]) };
30}
31
32/// An economical vector with clone-on-write semantics.
33///
34/// This type has the same layout as a slice `&[T]`: It consists of a pointer
35/// and a length. The pointer is null-pointer optimized (meaning that
36/// [`Option<EcoVec<T>>`] has the same size as `EcoVec<T>`). Dereferencing an
37/// `EcoVec` to a slice is a no-op.
38///
39/// Within its allocation, an `EcoVec` stores a reference count and its
40/// capacity. In contrast to an [`Arc<Vec<T>>`](alloc::sync::Arc), it only
41/// requires a single allocation for both the reference count and the elements.
42/// The internal reference counter is atomic, making this type [`Sync`] and
43/// [`Send`].
44///
45/// Note that most mutating methods require [`T: Clone`](Clone) due to
46/// clone-on-write semantics.
47///
48/// # Example
49/// ```
50/// use ecow::EcoVec;
51///
52/// // Empty vector does not allocate, but first push does.
53/// let mut first = EcoVec::new();
54/// first.push(1);
55/// first.push(2);
56/// assert_eq!(first, [1, 2]);
57///
58/// // This clone is cheap, it references the same allocation.
59/// let mut second = first.clone();
60///
61/// // This makes a real copy (clone-on-write).
62/// second.push(3);
63/// assert_eq!(second, [1, 2, 3]);
64///
65/// // As `second` was cloned upon mutation, this iterator can
66/// // move the elements. If the allocation was still shared with
67/// // `first`, this would clone lazily.
68/// assert_eq!(second.into_iter().collect::<Vec<_>>(), vec![1, 2, 3]);
69/// ```
70#[repr(C)]
71pub struct EcoVec<T> {
72 /// Is `Self::dangling()` when the vector is unallocated.
73 ///
74 /// Otherwise, points `Self::offset()` bytes after a valid allocation and
75 /// header, to the start of the vector's elements. It is then aligned to the
76 /// maximum of the header's alignment and T's alignment. The pointer is
77 /// valid for `len` reads and `capacity` writes of T. The elements may only
78 /// be accessed mutably if the reference-count is `1`.
79 ptr: NonNull<T>,
80 /// The number of elements in the vector.
81 ///
82 /// Invariant: `len <= capacity`.
83 len: usize,
84 /// See Vec's impl for more details.
85 phantom: PhantomData<T>,
86}
87
88/// The start of the backing allocation.
89///
90/// This is followed by padding, if necessary, and then the actual data.
91#[derive(Debug)]
92struct Header {
93 /// The vector's reference count. Starts at 1 and only drops to zero
94 /// when the last vector is dropped.
95 ///
96 /// Invariant: `refs <= isize::MAX`.
97 refs: AtomicUsize,
98 /// The number of elements the backing allocation can hold. Zero if there
99 /// is no backing allocation.
100 ///
101 /// May only be mutated if `refs == 1`.
102 ///
103 /// Invariant: `capacity <= isize::MAX`.
104 capacity: usize,
105}
106
107impl<T> EcoVec<T> {
108 /// Create a new, empty vector.
109 ///
110 /// This does not allocate.
111 #[inline]
112 pub const fn new() -> Self {
113 Self {
114 ptr: Self::dangling(),
115 len: 0,
116 phantom: PhantomData,
117 }
118 }
119
120 /// Create a new, empty vector with at least the specified capacity.
121 #[inline]
122 pub fn with_capacity(capacity: usize) -> Self {
123 let mut vec = Self::new();
124 if capacity > 0 {
125 unsafe {
126 // Safety:
127 // - The reference count starts at 1.
128 // - The capacity starts at 0 and the target capacity is checked
129 // to be `> 0`.
130 vec.grow(capacity);
131 }
132 }
133 vec
134 }
135
136 /// Returns `true` if the vector contains no elements.
137 #[inline]
138 pub const fn is_empty(&self) -> bool {
139 self.len == 0
140 }
141
142 /// The number of elements in the vector.
143 #[inline]
144 pub const fn len(&self) -> usize {
145 self.len
146 }
147
148 /// How many elements the vector's backing allocation can hold.
149 ///
150 /// Even if `len < capacity`, pushing into the vector may still
151 /// allocate if the reference count is larger than one.
152 #[inline]
153 pub fn capacity(&self) -> usize {
154 self.header().map_or(0, |header| header.capacity)
155 }
156
157 /// Extracts a slice containing the entire vector.
158 #[inline]
159 pub fn as_slice(&self) -> &[T] {
160 // Safety:
161 // - The pointer returned by `data()` is non-null, well-aligned, and
162 // valid for `len` reads of `T`.
163 // - We have the invariant `len <= capacity <= isize::MAX`.
164 // - The memory referenced by the slice isn't mutated for the returned
165 // slice's lifetime, because `self` becomes borrowed and even if there
166 // are other vectors referencing the same backing allocation, they are
167 // now allowed to mutate the slice since then the ref-count is larger
168 // than one.
169 unsafe { core::slice::from_raw_parts(self.data(), self.len) }
170 }
171
172 /// Removes all values from the vector.
173 pub fn clear(&mut self) {
174 // Nothing to do if it's empty.
175 if self.is_empty() {
176 return;
177 }
178
179 // If there are other vectors that reference the same backing
180 // allocation, we just create a new, empty vector.
181 if !self.is_unique() {
182 // If another vector was dropped in the meantime, this vector could
183 // have become unique, but we don't care, creating a new one
184 // is safe nonetheless. Note that this runs the vector's drop
185 // impl and reduces the ref-count.
186 *self = Self::new();
187 return;
188 }
189
190 unsafe {
191 let prev = self.len;
192 self.len = 0;
193
194 // Safety:
195 // - We set the length to zero first in case a drop panics, so we
196 // leak rather than double dropping.
197 // - We have unique ownership of the backing allocation, so we can
198 // keep it and clear it. In particular, no other vector can have
199 // gained shared ownership in the meantime since `is_unique()`,
200 // as this is the only live vector available for cloning and we
201 // hold a mutable reference to it.
202 // - The pointer returned by `data_mut()` is valid for `capacity`
203 // writes, we have the invariant `prev <= capacity` and thus,
204 // `data_mut()` is valid for `prev` writes.
205 ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.data_mut(), prev));
206 }
207 }
208}
209
210impl<T: Clone> EcoVec<T> {
211 /// Create a new vector with `n` copies of `value`.
212 pub fn from_elem(value: T, n: usize) -> Self {
213 let mut vec = Self::with_capacity(n);
214 for _ in 0..n {
215 // Safety: we just called `EcoVec::with_capacity()`
216 unsafe { vec.push_unchecked(value.clone()) }
217 }
218 vec
219 }
220
221 /// Produce a mutable slice containing the entire vector.
222 ///
223 /// Clones the vector if its reference count is larger than 1.
224 pub fn make_mut(&mut self) -> &mut [T] {
225 // To provide mutable access, we must have unique ownership over the
226 // backing allocation.
227 self.make_unique();
228
229 // Safety:
230 // The reference count is `1` because of `make_unique`.
231 // For more details, see `Self::as_slice()`.
232 unsafe { core::slice::from_raw_parts_mut(self.data_mut(), self.len) }
233 }
234
235 /// Add a value at the end of the vector.
236 ///
237 /// Clones the vector if its reference count is larger than 1.
238 #[inline]
239 pub fn push(&mut self, value: T) {
240 // Ensure unique ownership and grow the vector if necessary.
241 self.reserve((self.len == self.capacity()) as usize);
242
243 // Safety: we just called `EcoVec::reserve()`
244 unsafe {
245 self.push_unchecked(value);
246 }
247 }
248
249 /// Add a value at the end of the vector, without reallocating.
250 ///
251 /// You must ensure that `self.is_unique()` and `self.len < self.capacity()`
252 /// hold, by calling `EcoVec::with_capacity()` or `EcoVec::reserve()`.
253 #[inline]
254 unsafe fn push_unchecked(&mut self, value: T) {
255 debug_assert!(self.is_unique());
256 debug_assert!(self.len < self.capacity());
257
258 unsafe {
259 // Safety:
260 // - The caller must ensure that the reference count is `1`.
261 // - The pointer returned by `data_mut()` is valid for `capacity`
262 // writes.
263 // - The caller must ensure that `len < capacity`.
264 // - Thus, `data_mut() + len` is valid for one write.
265 ptr::write(self.data_mut().add(self.len), value);
266
267 // Safety:
268 // Since we reserved space, we maintain `len <= capacity`.
269 self.len += 1;
270 }
271 }
272
273 /// Removes the last element from a vector and returns it, or `None` if the
274 /// vector is empty.
275 ///
276 /// Clones the vector if its reference count is larger than 1.
277 #[inline]
278 pub fn pop(&mut self) -> Option<T> {
279 if self.is_empty() {
280 return None;
281 }
282
283 self.make_unique();
284
285 unsafe {
286 // Safety:
287 // Cannot underflow because `is_empty` returned `false`.
288 self.len -= 1;
289
290 // Safety:
291 // - The reference count is `1` because of `make_unique`.
292 // - The pointer returned by `data()` is valid for `len` reads and
293 // thus `data() + new_len` is valid for one read.
294 Some(ptr::read(self.data().add(self.len)))
295 }
296 }
297
298 /// Inserts an element at an index within the vector, shifting all elements
299 /// after it to the right.
300 ///
301 /// Clones the vector if its reference count is larger than 1.
302 ///
303 /// Panics if `index > len`.
304 pub fn insert(&mut self, index: usize, value: T) {
305 if index > self.len {
306 out_of_bounds(index, self.len);
307 }
308
309 // Ensure unique ownership and grow the vector if necessary.
310 self.reserve((self.len == self.capacity()) as usize);
311
312 unsafe {
313 // Safety:
314 // - The reference count is `1` because of `reserve`.
315 // - The pointer returned by `data_mut()` is valid for `len`
316 // reads and `capacity` writes of `T`.
317 // - Thus, `at` is valid for `len - index` reads of `T`
318 // - And `at` is valid for `capacity - index` writes of `T`.
319 // Because of the `reserve` call, we have `len < capacity` and
320 // thus `at + 1` is valid for `len - index` writes of `T`.
321 let at = self.data_mut().add(index);
322 ptr::copy(at, at.add(1), self.len - index);
323
324 // Safety:
325 // - The pointer returned by `data_mut()` is valid for `capacity`
326 // writes.
327 // - Due to the bounds check above, `index <= len`
328 // - Due to the reserve check, `len < capacity`.
329 // - Thus, `data() + index` is valid for one write.
330 ptr::write(at, value);
331
332 // Safety:
333 // Since we reserved space, we maintain `len <= capacity`.
334 self.len += 1;
335 }
336 }
337
338 /// Removes and returns the element at position index within the vector,
339 /// shifting all elements after it to the left.
340 ///
341 /// Clones the vector if its reference count is larger than 1.
342 ///
343 /// Panics if `index >= len`.
344 pub fn remove(&mut self, index: usize) -> T {
345 if index >= self.len {
346 out_of_bounds(index, self.len);
347 }
348
349 self.make_unique();
350
351 unsafe {
352 // Safety:
353 // - The reference count is `1` because of `make_unique`.
354 // - The pointer returned by `data()` is valid for `len` reads.
355 // - Due to the check above, `index < len`.
356 // - Thus, `at` is valid for one read.
357 let at = self.data_mut().add(index);
358 let value = ptr::read(at);
359
360 // Safety:
361 // - The pointer returned by `data()` is valid for `len` reads and
362 // `capacity` writes.
363 // - Thus, `at + 1` is valid for `len - index - 1` reads.
364 // - Thus, `at` is valid for `capacity - index` writes.
365 // - Due to the invariant `len <= capacity`, `at` is also valid
366 // for `len - index - 1` writes.
367 ptr::copy(at.add(1), at, self.len - index - 1);
368
369 // Safety:
370 // Cannot underflow because `index < len` and thus `len > 0`.
371 self.len -= 1;
372
373 value
374 }
375 }
376
377 /// Retains only the elements specified by the predicate.
378 ///
379 /// Clones the vector if its reference count is larger than 1.
380 ///
381 /// Note that this clones the vector even if `f` always returns `false`. To
382 /// prevent that, you can first iterate over the vector yourself and then
383 /// only call `retain` if your condition is `false` for some element.
384 pub fn retain<F>(&mut self, mut f: F)
385 where
386 F: FnMut(&mut T) -> bool,
387 {
388 // Modified from: https://github.com/servo/rust-smallvec
389 // Copyright (c) 2018 The Servo Project Developers
390 let len = self.len;
391 let values = self.make_mut();
392
393 let mut del = 0;
394 for i in 0..len {
395 if !f(&mut values[i]) {
396 del += 1;
397 } else if del > 0 {
398 values.swap(i - del, i);
399 }
400 }
401
402 if del > 0 {
403 self.truncate(len - del);
404 }
405 }
406
407 /// Shortens the vector, keeping the first `target` elements and dropping
408 /// the rest.
409 ///
410 /// Clones the vector if its reference count is larger than 1 and
411 /// `target < len`.
412 pub fn truncate(&mut self, target: usize) {
413 if target >= self.len {
414 return;
415 }
416
417 if !self.is_unique() {
418 // Safety: Just checked bounds.
419 *self = Self::from(unsafe { self.get_unchecked(..target) });
420 return;
421 }
422
423 let rest = self.len - target;
424 unsafe {
425 // Safety:
426 // - Since `target < len`, we maintain `len <= capacity`.
427 self.len = target;
428
429 // Safety:
430 // The reference count is `1` because of `make_unique`.
431 // - The pointer returned by `data_mut()` is valid for `capacity`
432 // writes.
433 // - We have the invariant `len <= capacity`.
434 // - Thus, `data_mut() + target` is valid for `len - target` writes.
435 ptr::drop_in_place(ptr::slice_from_raw_parts_mut(
436 self.data_mut().add(target),
437 rest,
438 ));
439 }
440 }
441
442 /// Reserve space for at least `additional` more elements.
443 ///
444 /// Guarantees that the resulting vector has reference count `1` and space
445 /// for `additional` more elements.
446 pub fn reserve(&mut self, additional: usize) {
447 let capacity = self.capacity();
448 let mut target = capacity;
449
450 if additional > capacity - self.len {
451 // Reserve at least the `additional` capacity, but also at least
452 // double the capacity to ensure exponential growth and finally
453 // jump directly to a minimum capacity to prevent frequent
454 // reallocation for small vectors.
455 target = self
456 .len
457 .checked_add(additional)
458 .unwrap_or_else(|| capacity_overflow())
459 .max(2 * capacity)
460 .max(Self::min_cap());
461 }
462
463 if !self.is_unique() {
464 let mut vec = Self::with_capacity(target);
465 vec.extend(self.iter().cloned());
466 *self = vec;
467 } else if target > capacity {
468 unsafe {
469 // Safety:
470 // - The reference count is `1` because of `make_unique`.
471 // - The `target` capacity is greater than the current capacity
472 // because `additional > 0`.
473 self.grow(target);
474 }
475 }
476 }
477
478 /// Clones and pushes all elements in a slice to the vector.
479 pub fn extend_from_slice(&mut self, slice: &[T]) {
480 if slice.is_empty() {
481 return;
482 }
483
484 self.reserve(slice.len());
485
486 for value in slice {
487 // Safety:
488 // - The reference count is `1` because of `reserve`.
489 // - `self.len < self.capacity()` because we reserved space for
490 // `slice.len()` more elements.
491 unsafe {
492 self.push_unchecked(value.clone());
493 }
494 }
495 }
496
497 /// Pushes all elements in a trusted-len iterator to the vector.
498 ///
499 /// # Safety
500 /// We can't use `TrustedLen` because it is unstable. Still, the
501 /// `ExactSizeIterator::len` must return the exact length of the iterator
502 /// for this to be safe.
503 pub unsafe fn extend_from_trusted<I>(&mut self, iter: I)
504 where
505 I: IntoIterator<Item = T>,
506 I::IntoIter: ExactSizeIterator,
507 {
508 let iter = iter.into_iter();
509 let count = iter.len();
510
511 if count == 0 {
512 return;
513 }
514
515 self.reserve(count);
516
517 for value in iter {
518 // Safety:
519 // - The reference count is `1` because of `reserve`.
520 // - `self.len < self.capacity()` because we reserved space for
521 // `iter.len()` more elements.
522 unsafe {
523 self.push_unchecked(value);
524 }
525 }
526 }
527}
528
529impl<T> EcoVec<T> {
530 /// Grow the capacity to at least the `target` size.
531 ///
532 /// May only be called if:
533 /// - the reference count is `1`, and
534 /// - `target > capacity` (i.e., this methods grows, it doesn't shrink).
535 unsafe fn grow(&mut self, mut target: usize) {
536 debug_assert!(self.is_unique());
537 debug_assert!(target > self.capacity());
538
539 // Maintain the `capacity <= isize::MAX` invariant.
540 if target > isize::MAX as usize {
541 capacity_overflow();
542 }
543
544 // Directly go to maximum capacity for ZSTs.
545 if mem::size_of::<T>() == 0 {
546 target = isize::MAX as usize;
547 }
548
549 let layout = Self::layout(target);
550 let allocation = if !self.is_allocated() {
551 // Safety:
552 // The layout has non-zero size because `target > 0`.
553 alloc::alloc::alloc(layout)
554 } else {
555 // Safety:
556 // - `self.ptr` was allocated before (just checked)
557 // - the old block was allocated with the current capacity
558 // - `Self::size()` guarantees to return a value that is `> 0`
559 // and rounded up to the nearest multiple of `Self::align()`
560 // does not overflow `isize::MAX`.
561 alloc::alloc::realloc(
562 self.allocation_mut(),
563 Self::layout(self.capacity()),
564 Self::size(target),
565 )
566 };
567
568 if allocation.is_null() {
569 alloc::alloc::handle_alloc_error(layout);
570 }
571
572 // Construct data pointer by offsetting.
573 //
574 // Safety:
575 // Just checked for null and adding only increases the size. Can't
576 // overflow because the `allocation` is a valid pointer to
577 // `Self::size(target)` bytes and `Self::offset() < Self::size(target)`.
578 self.ptr = NonNull::new_unchecked(allocation.add(Self::offset()).cast());
579 debug_assert_ne!(self.ptr, Self::dangling());
580
581 // Safety:
582 // The freshly allocated pointer is valid for a write of the header.
583 ptr::write(
584 allocation.cast::<Header>(),
585 Header { refs: AtomicUsize::new(1), capacity: target },
586 );
587 }
588
589 /// Whether this vector has a backing allocation.
590 #[inline]
591 fn is_allocated(&self) -> bool {
592 !ptr::eq(self.ptr.as_ptr(), Self::dangling().as_ptr())
593 }
594
595 /// An immutable pointer to the backing allocation.
596 ///
597 /// May only be called if `is_allocated` returns `true`.
598 #[inline]
599 unsafe fn allocation(&self) -> *const u8 {
600 debug_assert!(self.is_allocated());
601 self.ptr.as_ptr().cast::<u8>().sub(Self::offset())
602 }
603
604 /// A mutable pointer to the backing allocation.
605 ///
606 /// May only be called if `is_allocated` returns `true`.
607 #[inline]
608 unsafe fn allocation_mut(&mut self) -> *mut u8 {
609 debug_assert!(self.is_allocated());
610 self.ptr.as_ptr().cast::<u8>().sub(Self::offset())
611 }
612
613 /// A reference to the header.
614 #[inline]
615 fn header(&self) -> Option<&Header> {
616 // Safety:
617 // If the vector is allocated, there is always a valid header.
618 self.is_allocated()
619 .then(|| unsafe { &*self.allocation().cast::<Header>() })
620 }
621
622 /// The data pointer.
623 ///
624 /// Returns a pointer that is non-null, well-aligned, and valid for `len`
625 /// reads of `T`.
626 #[inline]
627 fn data(&self) -> *const T {
628 self.ptr.as_ptr()
629 }
630
631 /// The data pointer, mutably.
632 ///
633 /// Returns a pointer that is non-null, well-aligned, and valid for
634 /// `capacity` writes of `T`.
635 ///
636 /// May only be called if the reference count is 1.
637 #[inline]
638 unsafe fn data_mut(&mut self) -> *mut T {
639 self.ptr.as_ptr()
640 }
641
642 /// The layout of a backing allocation for the given capacity.
643 #[inline]
644 fn layout(capacity: usize) -> Layout {
645 // Safety:
646 // - `Self::size(capacity)` guarantees that it rounded up the alignment
647 // does not overflow `isize::MAX`.
648 // - Since `Self::align()` is the header's alignment or T's alignment,
649 // it fulfills the requirements of a valid alignment.
650 unsafe { Layout::from_size_align_unchecked(Self::size(capacity), Self::align()) }
651 }
652
653 /// The size of a backing allocation for the given capacity.
654 ///
655 /// Always `> 0`. When rounded up to the next multiple of `Self::align()` is
656 /// guaranteed to be `<= isize::MAX`.
657 #[inline]
658 fn size(capacity: usize) -> usize {
659 mem::size_of::<T>()
660 .checked_mul(capacity)
661 .and_then(|size| Self::offset().checked_add(size))
662 .filter(|&size| {
663 // See `Layout::max_size_for_align` for details.
664 size < isize::MAX as usize - Self::align()
665 })
666 .unwrap_or_else(|| capacity_overflow())
667 }
668
669 /// The alignment of the backing allocation.
670 #[inline]
671 const fn align() -> usize {
672 max(mem::align_of::<Header>(), mem::align_of::<T>())
673 }
674
675 /// The offset of the data in the backing allocation.
676 ///
677 /// Always `> 0`. `self.ptr` points to the data and `self.ptr - offset` to
678 /// the header.
679 #[inline]
680 const fn offset() -> usize {
681 max(mem::size_of::<Header>(), Self::align())
682 }
683
684 /// The sentinel value of `self.ptr`, used to indicate an uninitialized,
685 /// unallocated vector. It is dangling (does not point to valid memory) and
686 /// has no provenance. As such, it must not be used to read/write/offset.
687 /// However, it is well-aligned, so it can be used to create 0-length
688 /// slices.
689 ///
690 /// All pointers to allocated vector elements will be distinct from this
691 /// value, because allocated vector elements start `Self::offset()` bytes
692 /// into a heap allocation and heap allocations cannot start at 0 (null).
693 #[inline]
694 const fn dangling() -> NonNull<T> {
695 unsafe {
696 // Safety: This is the stable equivalent of `core::ptr::invalid_mut`.
697 // The pointer we create has no provenance and may not be
698 // read/write/offset.
699 #[allow(clippy::useless_transmute)]
700 let ptr = mem::transmute::<usize, *mut T>(Self::offset());
701
702 // Safety: `Self::offset()` is never 0.
703 NonNull::new_unchecked(ptr)
704 }
705 }
706
707 /// The minimum non-zero capacity.
708 #[inline]
709 const fn min_cap() -> usize {
710 // In the spirit of the `EcoVec`, we choose the cutoff size of T from
711 // which 1 is the minimum capacity a bit lower than a standard `Vec`.
712 if mem::size_of::<T>() == 1 {
713 8
714 } else if mem::size_of::<T>() <= 32 {
715 4
716 } else {
717 1
718 }
719 }
720}
721
722impl<T: Clone> EcoVec<T> {
723 /// Ensure that this vector has a unique backing allocation.
724 ///
725 /// May change the capacity.
726 fn make_unique(&mut self) {
727 if !self.is_unique() {
728 *self = Self::from(self.as_slice());
729 }
730 }
731}
732
733impl EcoVec<u8> {
734 /// Copies from a byte slice.
735 #[inline]
736 pub(crate) fn extend_from_byte_slice(&mut self, bytes: &[u8]) {
737 if bytes.is_empty() {
738 return;
739 }
740
741 self.reserve(bytes.len());
742
743 unsafe {
744 // Safety:
745 // - The source slice is valid for `bytes.len()` reads.
746 // - The destination is valid for `bytes.len()` more writes due to
747 // the `reserve` call.
748 // - The two ranges are non-overlapping because we hold a mutable
749 // reference to `self` and an immutable one to `bytes`.
750 ptr::copy_nonoverlapping(
751 bytes.as_ptr(),
752 self.data_mut().add(self.len),
753 bytes.len(),
754 );
755 }
756
757 self.len += bytes.len();
758 }
759}
760
761// Safety: Works like `Arc`.
762unsafe impl<T: Sync + Send> Sync for EcoVec<T> {}
763unsafe impl<T: Sync + Send> Send for EcoVec<T> {}
764
765impl<T> EcoVec<T> {
766 /// Whether no other vector is pointing to the same backing allocation.
767 ///
768 /// This takes a mutable reference because only callers with ownership or a
769 /// mutable reference can ensure that the result stays relevant. Potential
770 /// callers with a shared reference could read `true` while another shared
771 /// reference is cloned on a different thread, bumping the ref-count. By
772 /// restricting this callers with mutable access, we ensure that no
773 /// uncontrolled cloning is happening in the time between the `is_unique`
774 /// call and any subsequent mutation.
775 #[inline]
776 pub fn is_unique(&mut self) -> bool {
777 // See Arc's is_unique() method.
778 self.header().map_or(true, |header| header.refs.load(Acquire) == 1)
779 }
780}
781
782impl<T: Clone> Clone for EcoVec<T> {
783 #[inline]
784 fn clone(&self) -> Self {
785 // If the vector has a backing allocation, bump the ref-count.
786 if let Some(header) = self.header() {
787 // See Arc's clone impl for details about memory ordering.
788 let prev = header.refs.fetch_add(1, Relaxed);
789
790 // See Arc's clone impl details about guarding against incredibly degenerate programs
791 if prev > isize::MAX as usize {
792 ref_count_overflow(self.ptr, self.len);
793 }
794 }
795
796 Self { ptr: self.ptr, len: self.len, phantom: PhantomData }
797 }
798}
799
800impl<T> Drop for EcoVec<T> {
801 fn drop(&mut self) {
802 // Drop our ref-count. If there was more than one vector before
803 // (including this one), we shouldn't deallocate. Nothing to do if there
804 // is no header and thus no backing allocation. See Arc's drop impl for
805 // details about memory ordering.
806 if self
807 .header()
808 .map_or(true, |header| header.refs.fetch_sub(1, Release) != 1)
809 {
810 return;
811 }
812
813 // See Arc's drop impl for details.
814 atomic::fence(Acquire);
815
816 // Ensures that the backing storage is deallocated even if one of the
817 // element drops panics.
818 struct Dealloc(*mut u8, Layout);
819
820 impl Drop for Dealloc {
821 fn drop(&mut self) {
822 // Safety: See below.
823 unsafe {
824 alloc::alloc::dealloc(self.0, self.1);
825 }
826 }
827 }
828
829 // Safety:
830 // The vector has a header, so `self.allocation()` points to an
831 // allocation with the layout of current capacity.
832 let _dealloc =
833 unsafe { Dealloc(self.allocation_mut(), Self::layout(self.capacity())) };
834
835 unsafe {
836 // Safety:
837 // No other vector references the backing allocation (just checked).
838 // For more details, see `Self::as_slice()`.
839 ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.data_mut(), self.len));
840 }
841 }
842}
843
844impl<T> Deref for EcoVec<T> {
845 type Target = [T];
846
847 #[inline]
848 fn deref(&self) -> &Self::Target {
849 self.as_slice()
850 }
851}
852
853impl<T> Borrow<[T]> for EcoVec<T> {
854 #[inline]
855 fn borrow(&self) -> &[T] {
856 self.as_slice()
857 }
858}
859
860impl<T> AsRef<[T]> for EcoVec<T> {
861 #[inline]
862 fn as_ref(&self) -> &[T] {
863 self.as_slice()
864 }
865}
866
867impl<T> Default for EcoVec<T> {
868 #[inline]
869 fn default() -> Self {
870 Self::new()
871 }
872}
873
874impl<T: Debug> Debug for EcoVec<T> {
875 #[inline]
876 fn fmt(&self, f: &mut Formatter) -> fmt::Result {
877 self.as_slice().fmt(f)
878 }
879}
880
881impl<T: Hash> Hash for EcoVec<T> {
882 #[inline]
883 fn hash<H: Hasher>(&self, state: &mut H) {
884 self.as_slice().hash(state);
885 }
886}
887
888impl<T: Eq> Eq for EcoVec<T> {}
889
890impl<T: PartialEq> PartialEq for EcoVec<T> {
891 #[inline]
892 fn eq(&self, other: &Self) -> bool {
893 self.as_slice() == other.as_slice()
894 }
895}
896
897impl<T: PartialEq> PartialEq<[T]> for EcoVec<T> {
898 #[inline]
899 fn eq(&self, other: &[T]) -> bool {
900 self.as_slice() == other
901 }
902}
903
904impl<T: PartialEq> PartialEq<&[T]> for EcoVec<T> {
905 #[inline]
906 fn eq(&self, other: &&[T]) -> bool {
907 self.as_slice() == *other
908 }
909}
910
911impl<T: PartialEq, const N: usize> PartialEq<[T; N]> for EcoVec<T> {
912 #[inline]
913 fn eq(&self, other: &[T; N]) -> bool {
914 self.as_slice() == other
915 }
916}
917
918impl<T: PartialEq, const N: usize> PartialEq<&[T; N]> for EcoVec<T> {
919 #[inline]
920 fn eq(&self, other: &&[T; N]) -> bool {
921 self.as_slice() == *other
922 }
923}
924
925impl<T: PartialEq> PartialEq<Vec<T>> for EcoVec<T> {
926 #[inline]
927 fn eq(&self, other: &Vec<T>) -> bool {
928 self.as_slice() == other
929 }
930}
931
932impl<T: PartialEq> PartialEq<EcoVec<T>> for [T] {
933 #[inline]
934 fn eq(&self, other: &EcoVec<T>) -> bool {
935 self == other.as_slice()
936 }
937}
938
939impl<T: PartialEq, const N: usize> PartialEq<EcoVec<T>> for [T; N] {
940 #[inline]
941 fn eq(&self, other: &EcoVec<T>) -> bool {
942 self == other.as_slice()
943 }
944}
945
946impl<T: PartialEq> PartialEq<EcoVec<T>> for Vec<T> {
947 #[inline]
948 fn eq(&self, other: &EcoVec<T>) -> bool {
949 self == other.as_slice()
950 }
951}
952
953impl<T: Ord> Ord for EcoVec<T> {
954 #[inline]
955 fn cmp(&self, other: &Self) -> Ordering {
956 self.as_slice().cmp(other.as_slice())
957 }
958}
959
960impl<T: PartialOrd> PartialOrd for EcoVec<T> {
961 #[inline]
962 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
963 self.as_slice().partial_cmp(other.as_slice())
964 }
965}
966
967impl<T: Clone> From<&[T]> for EcoVec<T> {
968 fn from(slice: &[T]) -> Self {
969 let mut vec = Self::new();
970 vec.extend_from_slice(slice);
971 vec
972 }
973}
974
975impl<T: Clone, const N: usize> From<[T; N]> for EcoVec<T> {
976 fn from(array: [T; N]) -> Self {
977 let mut vec = Self::new();
978 unsafe {
979 // Safety: Array's IntoIter implements `TrustedLen`.
980 vec.extend_from_trusted(array);
981 }
982 vec
983 }
984}
985
986impl<T: Clone> From<Vec<T>> for EcoVec<T> {
987 /// This needs to allocate to change the layout.
988 fn from(other: Vec<T>) -> Self {
989 let mut vec = Self::new();
990 unsafe {
991 // Safety: Vec's IntoIter implements `TrustedLen`.
992 vec.extend_from_trusted(other);
993 }
994 vec
995 }
996}
997
998impl<T: Clone, const N: usize> TryFrom<EcoVec<T>> for [T; N] {
999 type Error = EcoVec<T>;
1000
1001 fn try_from(mut vec: EcoVec<T>) -> Result<Self, Self::Error> {
1002 if vec.len() != N {
1003 return Err(vec);
1004 }
1005
1006 Ok(if vec.is_unique() {
1007 // Set the length to zero to prevent double drop.
1008 vec.len = 0;
1009
1010 // Safety:
1011 // - We have unique ownership over the underlying allocation.
1012 // - The pointer returned by `data()` is valid for `N` reads.
1013 // - We take ownership of the value and don't drop it again
1014 // in our drop impl.
1015 unsafe { ptr::read(vec.data() as *const [T; N]) }
1016 } else {
1017 // Safety: We know that the length is correct.
1018 unsafe { array::from_fn(|i| vec.get_unchecked(i).clone()) }
1019 })
1020 }
1021}
1022
1023impl<T: Clone> FromIterator<T> for EcoVec<T> {
1024 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
1025 let iter = iter.into_iter();
1026 let hint = iter.size_hint().0;
1027 let mut vec = Self::with_capacity(hint);
1028 vec.extend(iter);
1029 vec
1030 }
1031}
1032
1033impl<T: Clone> Extend<T> for EcoVec<T> {
1034 fn extend<I>(&mut self, iter: I)
1035 where
1036 I: IntoIterator<Item = T>,
1037 {
1038 let iter = iter.into_iter();
1039 let hint = iter.size_hint().0;
1040 if hint > 0 {
1041 self.reserve(hint);
1042 }
1043 for value in iter {
1044 self.push(value);
1045 }
1046 }
1047}
1048
1049impl<'a, T> IntoIterator for &'a EcoVec<T> {
1050 type IntoIter = core::slice::Iter<'a, T>;
1051 type Item = &'a T;
1052
1053 #[inline]
1054 fn into_iter(self) -> Self::IntoIter {
1055 self.as_slice().iter()
1056 }
1057}
1058
1059impl<T: Clone> IntoIterator for EcoVec<T> {
1060 type IntoIter = IntoIter<T>;
1061 type Item = T;
1062
1063 #[inline]
1064 fn into_iter(mut self) -> Self::IntoIter {
1065 IntoIter {
1066 unique: self.is_unique(),
1067 front: 0,
1068 back: self.len,
1069 vec: self,
1070 }
1071 }
1072}
1073
1074/// An owned iterator over an [`EcoVec`].
1075///
1076/// If the vector had a reference count of 1, this moves out of the vector,
1077/// otherwise it lazily clones.
1078pub struct IntoIter<T> {
1079 /// The underlying vector.
1080 vec: EcoVec<T>,
1081 /// Whether we have unique ownership over the underlying allocation.
1082 unique: bool,
1083 /// How many elements we have already read from the front.
1084 /// If `unique` is true, these must not be dropped in our drop impl!
1085 ///
1086 /// Invariant: `0 <= front <= back`.
1087 front: usize,
1088 /// How many elements we have already read from the back.
1089 /// If `unique` is true, these must not be dropped in our drop impl!
1090 ///
1091 /// Invariant: `0 <= back <= len`.
1092 back: usize,
1093}
1094
1095impl<T> IntoIter<T> {
1096 /// Returns the remaining items of this iterator as a slice.
1097 #[inline]
1098 pub fn as_slice(&self) -> &[T] {
1099 unsafe {
1100 // Safety:
1101 // - The pointer returned by `data()` is valid for `len` reads.
1102 // - Since `front <= back <= len`, `data() + front` is valid for
1103 // `back - front` reads.
1104 // - For more details, see `EcoVec::as_slice`.
1105 core::slice::from_raw_parts(
1106 self.vec.data().add(self.front),
1107 self.back - self.front,
1108 )
1109 }
1110 }
1111}
1112
1113impl<T: Clone> Iterator for IntoIter<T> {
1114 type Item = T;
1115
1116 #[inline]
1117 fn next(&mut self) -> Option<Self::Item> {
1118 (self.front < self.back).then(|| {
1119 let prev = self.front;
1120 self.front += 1;
1121 if self.unique {
1122 // Safety:
1123 // - We have unique ownership over the underlying allocation.
1124 // - The pointer returned by `data()` is valid for `len` reads.
1125 // - We know that `prev < self.back <= len`.
1126 // - We take ownership of the value and don't drop it again
1127 // in our drop impl.
1128 unsafe { ptr::read(self.vec.data().add(prev)) }
1129 } else {
1130 // Safety:
1131 // - We know that `prev < self.back <= len`.
1132 unsafe { self.vec.get_unchecked(prev).clone() }
1133 }
1134 })
1135 }
1136
1137 #[inline]
1138 fn size_hint(&self) -> (usize, Option<usize>) {
1139 let len = self.back - self.front;
1140 (len, Some(len))
1141 }
1142
1143 #[inline]
1144 fn count(self) -> usize {
1145 self.len()
1146 }
1147}
1148
1149impl<T: Clone> DoubleEndedIterator for IntoIter<T> {
1150 #[inline]
1151 fn next_back(&mut self) -> Option<Self::Item> {
1152 (self.back > self.front).then(|| {
1153 self.back -= 1;
1154 if self.unique {
1155 // Safety:
1156 // - We have unique ownership over the underlying allocation.
1157 // - The pointer returned by `data()` is valid for `len` reads.
1158 // - We know that `self.back < len` at this point.
1159 // - We take ownership of the value and don't drop it again
1160 // in our drop impl.
1161 unsafe { ptr::read(self.vec.data().add(self.back)) }
1162 } else {
1163 // Safety:
1164 // - Due to the subtraction, `self.back < len` at this point.
1165 unsafe { self.vec.get_unchecked(self.back).clone() }
1166 }
1167 })
1168 }
1169}
1170
1171impl<T: Clone> ExactSizeIterator for IntoIter<T> {}
1172
1173impl<T> Drop for IntoIter<T> {
1174 fn drop(&mut self) {
1175 if !self.unique || !self.vec.is_allocated() {
1176 return;
1177 }
1178
1179 // Safety:
1180 // We have unique ownership over the underlying allocation.
1181 unsafe {
1182 // Safety:
1183 // Set len to zero before dropping to prevent double dropping in
1184 // EcoVec's drop impl in case of panic.
1185 self.vec.len = 0;
1186
1187 // Safety:
1188 // - The elements in `..self.front` and `self.back..` have
1189 // already been moved out of the vector. Thus, we only drop
1190 // the elements that remain in the middle.
1191 // - For details about the slicing, see `Self::as_slice()`.
1192 ptr::drop_in_place(ptr::slice_from_raw_parts_mut(
1193 self.vec.data_mut().add(self.front),
1194 self.back - self.front,
1195 ));
1196 }
1197 }
1198}
1199
1200impl<T: Debug> Debug for IntoIter<T> {
1201 #[inline]
1202 fn fmt(&self, f: &mut Formatter) -> fmt::Result {
1203 f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
1204 }
1205}
1206
1207#[cold]
1208fn capacity_overflow() -> ! {
1209 panic!("capacity overflow");
1210}
1211
1212#[cold]
1213fn ref_count_overflow<T>(ptr: NonNull<T>, len: usize) -> ! {
1214 // Drop to decrement the ref count to counter the increment in `clone()`
1215 drop(EcoVec { ptr, len, phantom: PhantomData });
1216 panic!("reference count overflow");
1217}
1218
1219#[cold]
1220fn out_of_bounds(index: usize, len: usize) -> ! {
1221 panic!("index is out bounds (index: {index}, len: {len})");
1222}
1223
1224// Copy of `std::cmp::max::<usize>()` that is callable in `const` contexts
1225#[inline]
1226const fn max(x: usize, y: usize) -> usize {
1227 if x > y {
1228 x
1229 } else {
1230 y
1231 }
1232}
1233
1234#[cfg(feature = "std")]
1235impl std::io::Write for EcoVec<u8> {
1236 #[inline]
1237 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
1238 self.extend_from_byte_slice(buf);
1239 Ok(buf.len())
1240 }
1241
1242 #[inline]
1243 fn flush(&mut self) -> std::io::Result<()> {
1244 Ok(())
1245 }
1246}
1247
1248#[cfg(feature = "serde")]
1249mod serde {
1250 use crate::EcoVec;
1251 use core::{fmt, marker::PhantomData};
1252 use serde::de::{Deserializer, Visitor};
1253
1254 impl<T: serde::Serialize> serde::Serialize for EcoVec<T> {
1255 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
1256 where
1257 S: serde::Serializer,
1258 {
1259 self.as_slice().serialize(serializer)
1260 }
1261 }
1262
1263 struct EcoVecVisitor<T>(PhantomData<T>);
1264
1265 impl<'a, T: serde::Deserialize<'a> + Clone> Visitor<'a> for EcoVecVisitor<T> {
1266 type Value = EcoVec<T>;
1267
1268 fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
1269 formatter.write_str("a sequence")
1270 }
1271
1272 fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
1273 where
1274 A: serde::de::SeqAccess<'a>,
1275 {
1276 let len = seq.size_hint().unwrap_or(0);
1277 let mut values = EcoVec::with_capacity(len);
1278 while let Some(value) = seq.next_element()? {
1279 values.push(value)
1280 }
1281 Ok(values)
1282 }
1283 }
1284
1285 impl<'de, T: serde::Deserialize<'de> + Clone> serde::Deserialize<'de> for EcoVec<T> {
1286 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
1287 where
1288 D: Deserializer<'de>,
1289 {
1290 deserializer.deserialize_seq(EcoVecVisitor(PhantomData))
1291 }
1292 }
1293}