diff --git a/library/alloc/src/raw_rc.rs b/library/alloc/src/raw_rc.rs index 3d3cdce64a79b..884d2be3910f7 100644 --- a/library/alloc/src/raw_rc.rs +++ b/library/alloc/src/raw_rc.rs @@ -1,3 +1,24 @@ +//! Base implementation for `rc::{Rc, Weak}` and `sync::{Arc, Weak}`. +//! +//! The memory layout of an reference counted allocation is designed so that the reference counts +//! has fixed offsets to the value. In this way, many operations on reference counted pointers can +//! share the same code in order to reduce binary size. +//! +//! This is done by using a layout like the following structure: +//! +//! ```ignore (illustrative) +//! #[repr(C)] +//! struct RcAllocation { +//! padding: MaybeUninit<[u8; const { align_of::().saturating_sub(size_of::()) }]>, +//! ref_counts: RefCounts, +//! value: T, +//! } +//! ``` +//! +//! In this way, for all `T`: +//! `mem::offset_of!(RcAllocation, value) - mem::offset_of!(RcAllocation, ref_counts)` +//! always equals to size_of::()`. + use core::alloc::{AllocError, Allocator, Layout, LayoutError}; use core::any::Any; use core::cell::UnsafeCell; @@ -27,55 +48,124 @@ use crate::string::String; #[cfg(not(no_global_oom_handling))] use crate::vec::Vec; -pub trait RcOps { +/// A trait for `rc` and `sync` module to inject their concrete implementations of reference +/// count operations. +pub unsafe trait RcOps: Sized { + /// Increment strong or weak reference pointers. Used by `{RawRc,RawWeak}::clone`. + /// + /// # Safety + /// + /// - `count` should only be handled by the same `RcOps` implementation. + /// - The value of `count` should be non-zero. unsafe fn increment_ref_count(count: &UnsafeCell); + + /// Decrement strong or weak reference pointers, returns whether the reference count becomes + /// zero after decrementing. Used by `{RawRc,RawWeak}::drop`. + /// + /// # Safety + /// + /// - `count` should only be handled by the same `RcOps` implementation. + /// - The value of `count` should be non-zero. unsafe fn decrement_ref_count(count: &UnsafeCell) -> bool; + /// Increment `strong_count` if and only if `strong_count` is non-zero, returns whether + /// incrementing is performed. Used by `RawWeak::upgrade`. + /// + /// # Safety + /// + /// - `strong_count` should only be handled by the same `RcOps` implementation. unsafe fn upgrade(strong_count: &UnsafeCell) -> bool; + + /// Increment `weak_count`. This is required instead of `increment_ref_count` because `Arc` + /// requires additional synchronization with `is_unique`. + /// + /// # Safety + /// + /// - `weak_count` should only be handled by the same `RcOps` implementation. + /// - Caller should provide a `weak_count` value from a `RawRc` object. unsafe fn downgrade(weak_count: &UnsafeCell); + /// Decrement `strong_count` if and only if `strong_count` is 1, returns true if decrementing + /// is performed. Used by `RawRc::try_unwrap`. + /// + /// # Safety + /// + /// - `strong_count` should only be handled by the same `RcOps` implementation. unsafe fn lock_strong_count(strong_count: &UnsafeCell) -> bool; + + /// Set `strong_count` to 1. + /// + /// # Safety + /// + /// - `strong_count` should only be handled by the same `RcOps` implementation. + /// - `strong_count` is 0. unsafe fn unlock_strong_count(strong_count: &UnsafeCell); + /// Returns whether both `strong_count` are 1 and `weak_count` is 1. Used by `RawRc::get_mut`. + /// + /// # Safety + /// + /// - Both `strong_count` and `weak_count` should only be handled by the same `RcOps` + /// implementation. unsafe fn is_unique(strong_count: &UnsafeCell, weak_count: &UnsafeCell) -> bool; + /// Makes `rc` the sole owner of a value by: + /// + /// - If both strong count and weak count are 1, nothing will be done because caller is + /// already the sole owner of the value. + /// - If strong count is 1 and weak count is greater than 1, implementor will first + /// decrement both strong count and weak count, then `MakeMut::by_move` be called + /// to notify the caller moving is needed in order to make caller the sole owner. + /// - If strong count is greater than 1, `Make::by_clone` will be called to notify the caller + /// cloning is needed in order to make caller the sole owner. + /// + /// # Safety + /// + /// - The reference counts in `MakeMut` should only be handled by the same `RcOps` + /// implementation. #[cfg(not(no_global_oom_handling))] - unsafe fn make_unique(rc: &mut RawRc, by_clone: F, by_move: G) + unsafe fn make_mut(make_mut: MakeMut<'_, T, A, Self>) where - T: ?Sized, - F: FnOnce(&mut RawRc), - G: FnOnce(&mut RawRc); + T: CloneToUninit + ?Sized, + A: Allocator; } +/// Stores the strong and weak reference counts to a shared value. pub struct RefCounts { + /// Weak reference count (plus one if there are non-zero strong reference count). pub weak: UnsafeCell, + /// Strong reference count. pub strong: UnsafeCell, } impl RefCounts { + /// Creates a `RefCounts` with weak count of `1` and strong count of `strong_count`. pub const fn new(strong_cont: usize) -> Self { Self { weak: UnsafeCell::new(1), strong: UnsafeCell::new(strong_cont) } } } -const _: () = assert!(RefCounts::LAYOUT.size().is_power_of_two()); - +/// Describes the allocation of a reference counted value. struct RcLayout { + /// The layout of the allocation. allocation_layout: Layout, - allocation_offset_bytes: usize, + /// The offset of the value from beginning of the allocation. + value_offset_bytes: usize, } impl RcLayout { const fn from_value_layout(value_layout: Layout) -> Result { match RefCounts::LAYOUT.extend(value_layout) { - Ok((unaligned_allocation_layout, allocation_offset_bytes)) => Ok(Self { - allocation_layout: unaligned_allocation_layout.pad_to_align(), - allocation_offset_bytes, - }), + Ok((unaligned_allocation_layout, value_offset_bytes)) => { + Ok(Self { allocation_layout: unaligned_allocation_layout, value_offset_bytes }) + } Err(error) => Err(error), } } + /// # Safety + /// + /// - `RcLayout::from(value_layout)` must return `Ok(...)`. const unsafe fn from_value_layout_unchecked(value_layout: Layout) -> Self { match Self::from_value_layout(value_layout) { Ok(rc_layout) => rc_layout, @@ -91,6 +181,10 @@ impl RcLayout { Self::from_value_layout(Layout::for_value(value_ref)) } + /// # Safety + /// + /// - `value_ptr` points to a value that is contained in a reference counted allocation. + /// - `value_ptr` contains correct metadata for the memory layout of `T`. const unsafe fn from_value_ptr_unchecked(value_ptr: NonNull) -> Self where T: ?Sized, @@ -147,7 +241,7 @@ unsafe fn init_rc_allocation( rc_layout: &RcLayout, ) -> NonNull<()> { let allocation_ptr = allocation_ptr.cast::<()>(); - let value_ptr = unsafe { allocation_ptr.byte_add(rc_layout.allocation_offset_bytes) }; + let value_ptr = unsafe { allocation_ptr.byte_add(rc_layout.value_offset_bytes) }; let ref_counts = const { RefCounts::new(STRONG_COUNT) }; unsafe { ref_counts_ptr_from_value_ptr(value_ptr).write(ref_counts) }; @@ -239,6 +333,8 @@ where } } +/// Allocate a memory block for storing a reference counted value according to `rc_layout` and +/// initialize the value with `f`. If `f` panics, the allocated memory will be deallocated. #[cfg(not(no_global_oom_handling))] fn allocate_for_rc_with( alloc: &A, @@ -277,6 +373,10 @@ where ptr } +/// # Safety +/// +/// - Memory starts with `ptr` is valid to read `size` bytes. +/// - `rc_layout` has enough space for storing a value of `size` bytes. #[cfg(not(no_global_oom_handling))] unsafe fn allocate_for_rc_with_bytes( alloc: &A, @@ -341,12 +441,16 @@ where { unsafe { alloc.deallocate( - ptr.cast().byte_sub(rc_layout.allocation_offset_bytes), + ptr.cast().byte_sub(rc_layout.value_offset_bytes), rc_layout.allocation_layout, ); } } +fn is_dangling(ptr: NonNull<()>) -> bool { + ptr.addr() == NonZeroUsize::MAX +} + struct GuardedWeak<'a, T, A, R> where T: ?Sized, @@ -411,6 +515,69 @@ where } } +#[cfg(not(no_global_oom_handling))] +pub struct MakeMut<'a, T, A, R> +where + T: ?Sized, +{ + rc: &'a mut RawRc, + _phantom_data: PhantomData, +} + +#[cfg(not(no_global_oom_handling))] +impl<'a, T, A, R> MakeMut<'a, T, A, R> +where + T: ?Sized, +{ + pub fn ref_counts(&self) -> &RefCounts { + self.rc.ref_counts() + } + + /// # Safety + /// + /// - strong count in ref counts has been set to 0. + pub unsafe fn by_move(self) + where + A: Allocator, + R: RcOps, + { + let (ptr_ref, alloc) = self.rc.borrow_raw_parts(); + let old_ptr = *ptr_ref; + + unsafe { + let mut weak = RawWeak::from_raw_parts(old_ptr, &*alloc); + let guard = GuardedWeak::::new(&mut weak); + let new_ptr = allocate_for_rc_with_value_unchecked::(alloc, old_ptr.as_ref()); + + *ptr_ref = new_ptr; + + drop(guard); + } + } + + pub fn by_clone(self) + where + T: CloneToUninit, + A: Allocator, + R: RcOps, + { + let (ptr_ref, alloc) = self.rc.borrow_raw_parts(); + let old_ptr = *ptr_ref; + + unsafe { + let rc_layout = RcLayout::from_value_ptr_unchecked(old_ptr); + + let new_ptr = allocate_for_rc_with::(alloc, &rc_layout, |new_ptr| { + T::clone_to_uninit(old_ptr.as_ref(), new_ptr.as_ptr().cast()); + }); + + *ptr_ref = NonNull::new_unchecked(new_ptr.as_ptr().with_metadata_of(old_ptr.as_ptr())); + + RawRc::from_raw_parts(old_ptr, &*alloc).drop::(); + } + } +} + pub struct RawWeak where T: ?Sized, @@ -480,20 +647,23 @@ where A: Clone, R: RcOps, { - unsafe { - if !self.is_dangling() { - R::increment_ref_count(self.weak_count_unchecked()); + unsafe fn inner(ptr: NonNull<()>, alloc: &A) -> A + where + A: Clone, + R: RcOps, + { + if !is_dangling(ptr) { + unsafe { R::increment_ref_count(weak_count_ptr_from_value_ptr(ptr).as_ref()) }; } - self.clone_without_increment_weak_count() + alloc.clone() } - } - unsafe fn clone_without_increment_weak_count(&self) -> Self - where - A: Clone, - { - unsafe { Self::from_raw_parts(self.ptr, self.alloc.clone()) } + unsafe { + let alloc = inner::(self.ptr.cast(), &self.alloc); + + Self::from_raw_parts(self.ptr, alloc) + } } pub unsafe fn drop(&mut self) @@ -501,7 +671,7 @@ where A: Allocator, R: RcOps, { - if !self.is_dangling() { + if !is_dangling(self.ptr.cast()) { unsafe { self.drop_unchecked::() }; } } @@ -532,10 +702,6 @@ where (self.ptr, self.alloc) } - pub fn is_dangling(&self) -> bool { - self.ptr.addr() == NonZeroUsize::MAX - } - pub fn ptr_eq(&self, other: &Self) -> bool { ptr::addr_eq(self.ptr.as_ptr(), other.ptr.as_ptr()) } @@ -546,7 +712,7 @@ where #[cfg(not(no_sync))] pub fn ref_counts(&self) -> Option<&RefCounts> { - (!self.is_dangling()).then(|| unsafe { self.ref_counts_unchecked() }) + (!is_dangling(self.ptr.cast())).then(|| unsafe { self.ref_counts_unchecked() }) } #[cfg(not(no_sync))] @@ -555,7 +721,7 @@ where } pub fn strong_count(&self) -> Option<&UnsafeCell> { - (!self.is_dangling()).then(|| unsafe { self.strong_count_unchecked() }) + (!is_dangling(self.ptr.cast())).then(|| unsafe { self.strong_count_unchecked() }) } unsafe fn strong_count_unchecked(&self) -> &UnsafeCell { @@ -563,7 +729,7 @@ where } pub fn weak_count(&self) -> Option<&UnsafeCell> { - (!self.is_dangling()).then(|| unsafe { self.weak_count_unchecked() }) + (!is_dangling(self.ptr.cast())).then(|| unsafe { self.weak_count_unchecked() }) } unsafe fn weak_count_unchecked(&self) -> &UnsafeCell { @@ -575,7 +741,7 @@ where A: Clone, R: RcOps, { - if self.is_dangling() { None } else { unsafe { self.upgrade_unchecked::() } } + if is_dangling(self.ptr.cast()) { None } else { unsafe { self.upgrade_unchecked::() } } } unsafe fn upgrade_unchecked(&self) -> Option> @@ -830,10 +996,21 @@ where A: Clone, R: RcOps, { + unsafe fn inner(ptr: NonNull<()>, alloc: &A) -> A + where + A: Clone, + R: RcOps, + { + unsafe { R::increment_ref_count(strong_count_ptr_from_value_ptr(ptr).as_ref()) }; + + alloc.clone() + } + unsafe { - R::increment_ref_count(self.strong_count()); + let ptr = self.as_ptr(); + let alloc = inner::(ptr.cast(), self.allocator()); - Self::from_weak(self.weak.clone_without_increment_weak_count()) + Self::from_raw_parts(ptr, alloc) } } @@ -860,10 +1037,21 @@ where A: Clone, R: RcOps, { + unsafe fn inner(ptr: NonNull<()>, alloc: &A) -> A + where + A: Clone, + R: RcOps, + { + unsafe { R::downgrade(weak_count_ptr_from_value_ptr(ptr).as_ref()) }; + + alloc.clone() + } + unsafe { - R::downgrade(self.weak_count()); + let ptr = self.as_ptr(); + let alloc = inner::(ptr.cast(), self.allocator()); - self.weak.clone_without_increment_weak_count() + RawWeak::from_raw_parts(ptr, alloc) } } @@ -909,47 +1097,6 @@ where self.weak.into_raw_parts() } - #[cfg(not(no_global_oom_handling))] - unsafe fn make_unique_by_clone(&mut self) - where - T: CloneToUninit, - A: Allocator, - R: RcOps, - { - let (ptr_ref, alloc) = self.borrow_raw_parts(); - let old_ptr = *ptr_ref; - - unsafe { - let rc_layout = RcLayout::from_value_ptr_unchecked(old_ptr); - - let new_ptr = allocate_for_rc_with::(alloc, &rc_layout, |new_ptr| { - T::clone_to_uninit(old_ptr.as_ref(), new_ptr.as_ptr().cast()); - }); - - *ptr_ref = NonNull::new_unchecked(new_ptr.as_ptr().with_metadata_of(old_ptr.as_ptr())); - - RawRc::from_raw_parts(old_ptr, &*alloc).drop::(); - } - } - - #[cfg(not(no_global_oom_handling))] - unsafe fn make_unique_by_move(&mut self) - where - A: Allocator, - R: RcOps, - { - let (ptr_ref, alloc) = self.borrow_raw_parts(); - let old_ptr = *ptr_ref; - - unsafe { - let new_ptr = allocate_for_rc_with_value_unchecked::(alloc, old_ptr.as_ref()); - - *ptr_ref = new_ptr; - - RawWeak::from_raw_parts(old_ptr, &*alloc).drop_unchecked::(); - } - } - #[cfg(not(no_global_oom_handling))] pub unsafe fn make_mut(&mut self) -> &mut T where @@ -958,11 +1105,7 @@ where R: RcOps, { unsafe { - R::make_unique( - self, - |this| this.make_unique_by_clone::(), - |this| this.make_unique_by_move::(), - ); + R::make_mut(MakeMut { rc: self, _phantom_data: PhantomData }); self.get_mut_unchecked() } diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index b0d6711233028..701a2262571f7 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -261,6 +261,8 @@ use crate::alloc::{AllocError, Allocator, Global}; use crate::borrow::{Cow, ToOwned}; #[cfg(not(no_global_oom_handling))] use crate::boxed::Box; +#[cfg(not(no_global_oom_handling))] +use crate::raw_rc::MakeMut; use crate::raw_rc::{self, RawRc, RawUniqueRc, RawWeak}; #[cfg(not(no_global_oom_handling))] use crate::string::String; @@ -281,7 +283,7 @@ where enum RcOps {} -impl raw_rc::RcOps for RcOps { +unsafe impl raw_rc::RcOps for RcOps { unsafe fn increment_ref_count(count: &UnsafeCell) { let count = unsafe { &mut *count.get() }; let strong = *count; @@ -349,22 +351,23 @@ impl raw_rc::RcOps for RcOps { } #[cfg(not(no_global_oom_handling))] - unsafe fn make_unique(rc: &mut RawRc, by_clone: F, by_move: G) + unsafe fn make_mut(make_mut: MakeMut<'_, T, A, Self>) where - T: ?Sized, - F: FnOnce(&mut RawRc), - G: FnOnce(&mut RawRc), + T: CloneToUninit + ?Sized, + A: Allocator, { - let strong_count = unsafe { &mut *rc.strong_count().get() }; + let ref_counts = make_mut.ref_counts(); + let strong_count = unsafe { &mut *ref_counts.strong.get() }; if *strong_count == 1 { - if unsafe { *rc.weak_count().get() } != 1 { + if unsafe { *ref_counts.weak.get() } != 1 { *strong_count = 0; - by_move(rc); + // SAFETY: We have set strong count to 0. + unsafe { make_mut.by_move() }; } } else { - by_clone(rc); + make_mut.by_clone(); } } } diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index b51c7d009b9fd..7073686ebebe5 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -26,13 +26,13 @@ use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use core::sync::atomic::{self, AtomicUsize}; use core::{borrow, fmt, hint, intrinsics}; -#[cfg(not(no_global_oom_handling))] -use crate::alloc::Layout; use crate::alloc::{AllocError, Allocator, Global}; use crate::borrow::{Cow, ToOwned}; #[cfg(not(no_global_oom_handling))] use crate::boxed::Box; #[cfg(not(no_global_oom_handling))] +use crate::raw_rc::MakeMut; +#[cfg(not(no_global_oom_handling))] use crate::raw_rc::RefCounts; use crate::raw_rc::{self, RawRc, RawWeak}; #[cfg(not(no_global_oom_handling))] @@ -82,7 +82,7 @@ macro_rules! acquire { enum RcOps {} -impl raw_rc::RcOps for RcOps { +unsafe impl raw_rc::RcOps for RcOps { unsafe fn increment_ref_count(count: &UnsafeCell) { let count = unsafe { AtomicUsize::from_ptr(count.get()) }; @@ -236,13 +236,12 @@ impl raw_rc::RcOps for RcOps { } #[cfg(not(no_global_oom_handling))] - unsafe fn make_unique(rc: &mut raw_rc::RawRc, by_clone: F, by_move: G) + unsafe fn make_mut(make_mut: MakeMut<'_, T, A, Self>) where - T: ?Sized, - F: FnOnce(&mut raw_rc::RawRc), - G: FnOnce(&mut raw_rc::RawRc), + T: CloneToUninit + ?Sized, + A: Allocator, { - let ref_counts = rc.ref_counts(); + let ref_counts = make_mut.ref_counts(); let strong_count = unsafe { AtomicUsize::from_ptr(ref_counts.strong.get()) }; let weak_count = unsafe { AtomicUsize::from_ptr(ref_counts.weak.get()) }; @@ -272,10 +271,11 @@ impl raw_rc::RcOps for RcOps { // usize::MAX (i.e., locked), since the weak count can only be // locked by a thread with a strong reference. - by_move(rc); + // SAFETY: We have set strong count to 0. + unsafe { make_mut.by_move() }; } } else { - by_clone(rc); + make_mut.by_clone(); } } } @@ -2832,10 +2832,6 @@ impl Default for Arc { #[cfg(not(no_global_oom_handling))] const MAX_STATIC_INNER_SLICE_ALIGNMENT: usize = 16; -#[cfg(not(no_global_oom_handling))] -const STATIC_INNER_PADDING: usize = - Layout::new::().padding_needed_for(MAX_STATIC_INNER_SLICE_ALIGNMENT); - /// Struct to hold the static `ArcInner` used for empty `Arc` as /// returned by `Default::default`. /// @@ -2846,7 +2842,8 @@ const STATIC_INNER_PADDING: usize = #[cfg(not(no_global_oom_handling))] #[repr(C, align(16))] struct SliceArcInnerForStatic { - padding: MaybeUninit<[RefCounts; STATIC_INNER_PADDING / mem::size_of::()]>, + padding: + MaybeUninit<[u8; MAX_STATIC_INNER_SLICE_ALIGNMENT.saturating_sub(size_of::())]>, ref_counts: RefCounts, value: [u8; 1], }