netstack3_sync/
rc.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Synchronized reference counting primitives.
6//!
7//! This module introduces a family of reference counted types that allows
8//! marking the underlying data for destruction before all strongly references
9//! to the data are dropped. This enables the following features:
10//!   * Upgrading a weak reference to a strong reference succeeds iff at least
11//!     one strong reference exists _and_ the data has not been marked for
12//!     destruction.
13//!   * Allow waiting for all strongly-held references to be dropped after
14//!     marking the data.
15
16use core::fmt::Debug;
17use core::hash::{Hash, Hasher};
18use core::ops::Deref;
19use core::panic::Location;
20use core::sync::atomic::{AtomicBool, Ordering};
21
22use derivative::Derivative;
23
24mod caller {
25    //! Provides tracking of instances via tracked caller location.
26    //!
27    //! Callers are only tracked in debug builds. All operations and types
28    //! are no-ops and empty unless the `rc-debug-names` feature is enabled.
29
30    use core::fmt::Debug;
31    use core::panic::Location;
32
33    /// Records reference-counted names of instances.
34    #[derive(Default)]
35    pub(super) struct Callers {
36        /// The names that were inserted and aren't known to be gone.
37        ///
38        /// This holds weak references to allow callers to drop without
39        /// synchronizing. Invalid weak pointers are cleaned up periodically but
40        /// are not logically present.
41        ///
42        /// Note that using [`std::sync::Mutex`] here is intentional to opt this
43        /// out of loom checking, which makes testing with `rc-debug-names`
44        /// impossibly slow.
45        #[cfg(feature = "rc-debug-names")]
46        pub(super) callers: std::sync::Mutex<std::collections::HashMap<Location<'static>, usize>>,
47    }
48
49    impl Debug for Callers {
50        #[cfg(not(feature = "rc-debug-names"))]
51        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
52            write!(f, "(Not Tracked)")
53        }
54        #[cfg(feature = "rc-debug-names")]
55        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
56            let Self { callers } = self;
57            let callers = callers.lock().unwrap();
58            write!(f, "[\n")?;
59            for (l, c) in callers.iter() {
60                write!(f, "   {l} => {c},\n")?;
61            }
62            write!(f, "]")
63        }
64    }
65
66    impl Callers {
67        /// Creates a new [`Callers`] from the given [`Location`].
68        ///
69        /// On non-debug builds, this is a no-op.
70        pub(super) fn insert(&self, caller: &Location<'static>) -> TrackedCaller {
71            #[cfg(not(feature = "rc-debug-names"))]
72            {
73                let _ = caller;
74                TrackedCaller {}
75            }
76            #[cfg(feature = "rc-debug-names")]
77            {
78                let Self { callers } = self;
79                let mut callers = callers.lock().unwrap();
80                let count = callers.entry(caller.clone()).or_insert(0);
81                *count += 1;
82                TrackedCaller { location: caller.clone() }
83            }
84        }
85    }
86
87    #[derive(Debug)]
88    pub(super) struct TrackedCaller {
89        #[cfg(feature = "rc-debug-names")]
90        pub(super) location: Location<'static>,
91    }
92
93    impl TrackedCaller {
94        #[cfg(not(feature = "rc-debug-names"))]
95        pub(super) fn release(&mut self, Callers {}: &Callers) {
96            let Self {} = self;
97        }
98
99        #[cfg(feature = "rc-debug-names")]
100        pub(super) fn release(&mut self, Callers { callers }: &Callers) {
101            let Self { location } = self;
102            let mut callers = callers.lock().unwrap();
103            let mut entry = match callers.entry(location.clone()) {
104                std::collections::hash_map::Entry::Vacant(_) => {
105                    panic!("location {location:?} was not in the callers map")
106                }
107                std::collections::hash_map::Entry::Occupied(o) => o,
108            };
109
110            let sub = entry
111                .get()
112                .checked_sub(1)
113                .unwrap_or_else(|| panic!("zero-count location {location:?} in map"));
114            if sub == 0 {
115                let _: usize = entry.remove();
116            } else {
117                *entry.get_mut() = sub;
118            }
119        }
120    }
121}
122
123mod resource_token {
124    use core::fmt::Debug;
125    use core::sync::atomic::{AtomicU64, Ordering};
126    use std::marker::PhantomData;
127    use std::num::NonZeroU64;
128
129    /// An opaque token associated with a resource.
130    ///
131    /// It can be used to create debug and trace identifiers for the resource,
132    /// but it should not be used as a unique identifier of the resource inside
133    /// the netstack.
134    ///
135    /// By default the lifetime of a token is bound the resource that token
136    /// belongs to, but it can be extended by calling
137    /// [`ResourceToken::extend_lifetime`].
138    ///
139    /// Internally the value is stored as `NonZeroU64`. This is an optimization
140    /// to save memory when the value is wrapped in `Option` (it allows the
141    /// compiler to fit `Option<ResourceToken>` in 8 bytes).
142    #[cfg_attr(any(test, feature = "testutils"), derive(PartialEq, Eq, PartialOrd, Ord))]
143    #[derive(Clone)]
144    pub struct ResourceToken<'a> {
145        value: NonZeroU64,
146        _marker: PhantomData<&'a ()>,
147    }
148
149    impl<'a> ResourceToken<'a> {
150        /// Extends lifetime of the token.
151        ///
152        /// # Discussion
153        ///
154        /// It's generally okay to extend the lifetime of the token, but prefer
155        /// to use tokens bound to the resource's lifetime whenever possible,
156        /// since it provides guardrails against identifiers that outlive the
157        /// resource itself.
158        pub fn extend_lifetime(self) -> ResourceToken<'static> {
159            ResourceToken { value: self.value, _marker: PhantomData }
160        }
161
162        /// Returns internal value. Consumes `self`.
163        ///
164        /// # Discussion
165        ///
166        /// Export to `u64` when a representation is needed for interaction with
167        /// other processes or components such as trace identifiers and eBPF
168        /// socket cookies.
169        ///
170        /// Refrain from using the returned value within the netstack otherwise.
171        pub fn export_value(self) -> u64 {
172            self.value.get()
173        }
174    }
175
176    impl<'a> Debug for ResourceToken<'a> {
177        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
178            write!(f, "{}", self.value)
179        }
180    }
181
182    /// Holder of a value for `ResourceToken`. Vends `ResourceToken` instances
183    /// with the same value and the lifetime bound to the lifetime of the holder.
184    ///
185    /// The [`Default`] implementation generates a new unique value.
186    pub struct ResourceTokenValue(NonZeroU64);
187
188    impl ResourceTokenValue {
189        /// Creates a new token.
190        pub fn token(&self) -> ResourceToken<'_> {
191            let ResourceTokenValue(value) = self;
192            ResourceToken { value: *value, _marker: PhantomData }
193        }
194    }
195
196    impl core::fmt::Debug for ResourceTokenValue {
197        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
198            let ResourceTokenValue(value) = self;
199            write!(f, "{}", value)
200        }
201    }
202
203    impl Default for ResourceTokenValue {
204        fn default() -> Self {
205            static NEXT_TOKEN: AtomicU64 = AtomicU64::new(1);
206            // NB: Fetch add will cause the counter to rollback to 0 if we
207            // happen to exceed `u64::MAX` instantiations. In practice, that's
208            // an impossibility (at 1 billion instantiations per second, the
209            // counter is valid for > 500 years). Spare the CPU cycles and don't
210            // bother attempting to handle overflow.
211            Self(NonZeroU64::new(NEXT_TOKEN.fetch_add(1, Ordering::Relaxed)).unwrap())
212        }
213    }
214}
215
216pub use resource_token::{ResourceToken, ResourceTokenValue};
217
218mod debug_id {
219    use super::ResourceToken;
220    use core::fmt::Debug;
221
222    /// A debug identifier for the RC types exposed in the parent module.
223    ///
224    /// Encompasses the underlying pointer for the RC type, as well as
225    /// (optionally) the globally unique [`ResourceToken`].
226    pub(super) enum DebugId<T> {
227        /// Used in contexts that have access to the [`ResourceToken`], e.g.
228        /// [`Primary`], [`Strong`], and sometimes [`Weak`] RC types.
229        WithToken { ptr: *const T, token: ResourceToken<'static> },
230        /// Used in contexts that don't have access to the [`ResourceToken`], e.g.
231        /// [`Weak`] RC types that cannot be upgraded.
232        WithoutToken { ptr: *const T },
233    }
234
235    impl<T> Debug for DebugId<T> {
236        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
237            match self {
238                DebugId::WithToken { ptr, token } => write!(f, "{:?}:{:?}", token, ptr),
239                DebugId::WithoutToken { ptr } => write!(f, "?:{:?}", ptr),
240            }
241        }
242    }
243}
244
245#[derive(Derivative)]
246#[derivative(Debug)]
247struct Inner<T> {
248    marked_for_destruction: AtomicBool,
249    callers: caller::Callers,
250    data: core::mem::ManuallyDrop<T>,
251    // NB: Notifier could be an atomic pointer or atomic box but this mutex is
252    // never contended and we don't have to import new code into the repository
253    // (i.e. atomicbox) or write unsafe code.
254    #[derivative(Debug = "ignore")]
255    notifier: crate::Mutex<Option<Box<dyn Notifier<T>>>>,
256    resource_token: ResourceTokenValue,
257}
258
259impl<T> Inner<T> {
260    fn pre_drop_check(marked_for_destruction: &AtomicBool) {
261        // `Ordering::Acquire` because we want to synchronize with with the
262        // `Ordering::Release` write to `marked_for_destruction` so that all
263        // memory writes before the reference was marked for destruction is
264        // visible here.
265        assert!(marked_for_destruction.load(Ordering::Acquire), "Must be marked for destruction");
266    }
267
268    fn unwrap(mut self) -> T {
269        // We cannot destructure `self` by value since `Inner` implements
270        // `Drop`. So we must manually drop all the fields but data and then
271        // forget self.
272        let Inner { marked_for_destruction, data, callers: holders, notifier, resource_token } =
273            &mut self;
274
275        // Make sure that `inner` is in a valid state for destruction.
276        //
277        // Note that we do not actually destroy all of `self` here; we decompose
278        // it into its parts, keeping what we need & throwing away what we
279        // don't. Regardless, we perform the same checks.
280        Inner::<T>::pre_drop_check(marked_for_destruction);
281
282        // SAFETY: Safe since we own `self` and `self` is immediately forgotten
283        // below so the its destructor (and those of its fields) will not be run
284        // as a result of `self` being dropped.
285        let data = unsafe {
286            // Explicitly drop since we do not need these anymore.
287            core::ptr::drop_in_place(marked_for_destruction);
288            core::ptr::drop_in_place(holders);
289            core::ptr::drop_in_place(notifier);
290            core::ptr::drop_in_place(resource_token);
291
292            core::mem::ManuallyDrop::take(data)
293        };
294        // Forget self now to prevent its `Drop::drop` impl from being run which
295        // will attempt to destroy `data` but still perform pre-drop checks on
296        // `Inner`'s state.
297        core::mem::forget(self);
298
299        data
300    }
301
302    /// Sets the notifier for this `Inner`.
303    ///
304    /// Panics if notifier is already set.
305    fn set_notifier<N: Notifier<T> + 'static>(&self, notifier: N) {
306        let Self { notifier: slot, .. } = self;
307
308        // Using dynamic dispatch to notify allows us to not have to know the
309        // notifier that will be used from creation and spread the type on all
310        // reference types in this crate. The assumption is that the allocation
311        // and dynamic dispatch costs here are tiny compared to the overall work
312        // of destroying the resources this module is targeting.
313        let boxed: Box<dyn Notifier<T>> = Box::new(notifier);
314        let prev_notifier = { slot.lock().replace(boxed) };
315        // Uphold invariant that this can only be done from Primary.
316        assert!(prev_notifier.is_none(), "can't have a notifier already installed");
317    }
318}
319
320impl<T> Drop for Inner<T> {
321    fn drop(&mut self) {
322        let Inner { marked_for_destruction, data, callers: _, notifier, resource_token: _ } = self;
323        // Take data out of ManuallyDrop in case we panic in pre_drop_check.
324        // That'll ensure data is dropped if we hit the panic.
325        //
326        //  SAFETY: Safe because ManuallyDrop is not referenced again after
327        // taking.
328        let data = unsafe { core::mem::ManuallyDrop::take(data) };
329        Self::pre_drop_check(marked_for_destruction);
330        if let Some(mut notifier) = notifier.lock().take() {
331            notifier.notify(data);
332        }
333    }
334}
335
336/// A primary reference.
337///
338/// Note that only one `Primary` may be associated with data. This is
339/// enforced by not implementing [`Clone`].
340///
341/// For now, this reference is no different than a [`Strong`] but later changes
342/// will enable blocking the destruction of a primary reference until all
343/// strongly held references are dropped.
344#[derive(Debug)]
345pub struct Primary<T> {
346    inner: core::mem::ManuallyDrop<alloc::sync::Arc<Inner<T>>>,
347}
348
349impl<T> Drop for Primary<T> {
350    fn drop(&mut self) {
351        let was_marked = self.mark_for_destruction();
352        let Self { inner } = self;
353        // Take the inner out of ManuallyDrop early so its Drop impl will run in
354        // case we panic here.
355        // SAFETY: Safe because we don't reference ManuallyDrop again.
356        let inner = unsafe { core::mem::ManuallyDrop::take(inner) };
357
358        // Make debugging easier: don't panic if a panic is already happening
359        // since double-panics are annoying to debug. This means that the
360        // invariants provided by Primary are possibly violated during an
361        // unwind, but we're sidestepping that problem because Fuchsia is our
362        // only audience here.
363        if !std::thread::panicking() {
364            assert_eq!(was_marked, false, "Must not be marked for destruction yet");
365
366            let Inner {
367                marked_for_destruction: _,
368                callers,
369                data: _,
370                notifier: _,
371                resource_token: _,
372            } = &*inner;
373
374            // Make sure that this `Primary` is the last thing to hold a strong
375            // reference to the underlying data when it is being dropped.
376            let refs = alloc::sync::Arc::strong_count(&inner).checked_sub(1).unwrap();
377            assert!(
378                refs == 0,
379                "dropped Primary with {refs} strong refs remaining, \
380                            Callers={callers:?}"
381            );
382        }
383    }
384}
385
386impl<T> AsRef<T> for Primary<T> {
387    fn as_ref(&self) -> &T {
388        self.deref()
389    }
390}
391
392impl<T> Deref for Primary<T> {
393    type Target = T;
394
395    fn deref(&self) -> &T {
396        let Self { inner } = self;
397        let Inner { marked_for_destruction: _, data, callers: _, notifier: _, resource_token: _ } =
398            &***inner;
399        data
400    }
401}
402
403impl<T> Primary<T> {
404    // Marks this primary reference as ready for destruction. Used by all
405    // dropping flows. We take &mut self here to ensure we have the only
406    // possible reference to Primary. Returns whether it was already marked for
407    // destruction.
408    fn mark_for_destruction(&mut self) -> bool {
409        let Self { inner } = self;
410        // `Ordering::Release` because want to make sure that all memory writes
411        // before dropping this `Primary` synchronizes with later attempts to
412        // upgrade weak pointers and the `Drop::drop` impl of `Inner`.
413        inner.marked_for_destruction.swap(true, Ordering::Release)
414    }
415
416    /// Returns a new strongly-held reference.
417    pub fn new(data: T) -> Primary<T> {
418        Primary {
419            inner: core::mem::ManuallyDrop::new(alloc::sync::Arc::new(Inner {
420                marked_for_destruction: AtomicBool::new(false),
421                callers: caller::Callers::default(),
422                data: core::mem::ManuallyDrop::new(data),
423                notifier: crate::Mutex::new(None),
424                resource_token: ResourceTokenValue::default(),
425            })),
426        }
427    }
428
429    /// Constructs a new `Primary<T>` while giving you a Weak<T> to the
430    /// allocation, to allow you to construct a `T` which holds a weak pointer
431    /// to itself.
432    ///
433    /// Like for [`Arc::new_cyclic`], the `Weak` reference provided to `data_fn`
434    /// cannot be upgraded until the [`Primary`] is constructed.
435    pub fn new_cyclic(data_fn: impl FnOnce(Weak<T>) -> T) -> Primary<T> {
436        Primary {
437            inner: core::mem::ManuallyDrop::new(alloc::sync::Arc::new_cyclic(move |weak| Inner {
438                marked_for_destruction: AtomicBool::new(false),
439                callers: caller::Callers::default(),
440                data: core::mem::ManuallyDrop::new(data_fn(Weak(weak.clone()))),
441                notifier: crate::Mutex::new(None),
442                resource_token: ResourceTokenValue::default(),
443            })),
444        }
445    }
446
447    /// Clones a strongly-held reference.
448    #[cfg_attr(feature = "rc-debug-names", track_caller)]
449    pub fn clone_strong(Self { inner }: &Self) -> Strong<T> {
450        let Inner { data: _, callers, marked_for_destruction: _, notifier: _, resource_token: _ } =
451            &***inner;
452        let caller = callers.insert(Location::caller());
453        Strong { inner: alloc::sync::Arc::clone(inner), caller }
454    }
455
456    /// Returns a weak reference pointing to the same underlying data.
457    pub fn downgrade(Self { inner }: &Self) -> Weak<T> {
458        Weak(alloc::sync::Arc::downgrade(inner))
459    }
460
461    /// Returns true if the two pointers point to the same allocation.
462    pub fn ptr_eq(
463        Self { inner: this }: &Self,
464        Strong { inner: other, caller: _ }: &Strong<T>,
465    ) -> bool {
466        alloc::sync::Arc::ptr_eq(this, other)
467    }
468
469    /// Returns [`Debug`] implementation that is stable and unique
470    /// for the data held behind this [`Primary`].
471    pub fn debug_id(&self) -> impl Debug + '_ {
472        let Self { inner } = self;
473
474        // The lifetime of the returned `DebugId` is bound to the lifetime
475        // of `self`.
476        let token = inner.resource_token.token().extend_lifetime();
477
478        debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(inner), token }
479    }
480
481    fn mark_for_destruction_and_take_inner(mut this: Self) -> alloc::sync::Arc<Inner<T>> {
482        // Prepare for destruction.
483        assert!(!this.mark_for_destruction());
484        let Self { inner } = &mut this;
485        // SAFETY: Safe because inner can't be used after this. We forget
486        // our Primary reference to prevent its Drop impl from running.
487        let inner = unsafe { core::mem::ManuallyDrop::take(inner) };
488        core::mem::forget(this);
489        inner
490    }
491
492    fn try_unwrap(this: Self) -> Result<T, alloc::sync::Arc<Inner<T>>> {
493        let inner = Self::mark_for_destruction_and_take_inner(this);
494        alloc::sync::Arc::try_unwrap(inner).map(Inner::unwrap)
495    }
496
497    /// Returns the inner value if no [`Strong`] references are held.
498    ///
499    /// # Panics
500    ///
501    /// Panics if [`Strong`] references are held when this function is called.
502    pub fn unwrap(this: Self) -> T {
503        Self::try_unwrap(this).unwrap_or_else(|inner| {
504            let callers = &inner.callers;
505            let refs = alloc::sync::Arc::strong_count(&inner).checked_sub(1).unwrap();
506            panic!("can't unwrap, still had {refs} strong refs: {callers:?}");
507        })
508    }
509
510    /// Marks this [`Primary`] for destruction and uses `notifier` as a signaler
511    /// for when destruction of all strong references is terminated. After
512    /// calling `unwrap_with_notifier` [`Weak`] references can no longer be
513    /// upgraded.
514    pub fn unwrap_with_notifier<N: Notifier<T> + 'static>(this: Self, notifier: N) {
515        let inner = Self::mark_for_destruction_and_take_inner(this);
516        inner.set_notifier(notifier);
517        // Now we can drop our inner reference, if we were the last this will
518        // trigger the notifier.
519        core::mem::drop(inner);
520    }
521
522    /// Marks this [`Primary`] for destruction and returns `Ok` if this was the
523    /// last strong reference standing for it. Otherwise `new_notifier` is
524    /// called to create a new notifier to observe deferred destruction.
525    ///
526    /// Like [`Primary::unwrap_with_notifier`], [`Weak`] references can no
527    /// longer be upgraded after calling `unwrap_or_notify_with`.
528    pub fn unwrap_or_notify_with<N: Notifier<T> + 'static, O, F: FnOnce() -> (N, O)>(
529        this: Self,
530        new_notifier: F,
531    ) -> Result<T, O> {
532        Self::try_unwrap(this).map_err(move |inner| {
533            let (notifier, output) = new_notifier();
534            inner.set_notifier(notifier);
535            output
536        })
537    }
538
539    /// Creates a [`DebugReferences`] instance.
540    pub fn debug_references(this: &Self) -> DebugReferences<T> {
541        let Self { inner } = this;
542        DebugReferences(alloc::sync::Arc::downgrade(&*inner))
543    }
544}
545
546/// A strongly-held reference.
547///
548/// Similar to an [`alloc::sync::Arc`] but holding a `Strong` acts as a witness
549/// to the live-ness of the underlying data. That is, holding a `Strong` implies
550/// that the underlying data has not yet been destroyed.
551///
552/// Note that `Strong`'s implementation of [`Hash`] and [`PartialEq`] operate on
553/// the pointer itself and not the underlying data.
554#[derive(Debug, Derivative)]
555pub struct Strong<T> {
556    inner: alloc::sync::Arc<Inner<T>>,
557    caller: caller::TrackedCaller,
558}
559
560impl<T> Drop for Strong<T> {
561    fn drop(&mut self) {
562        let Self { inner, caller } = self;
563        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
564            &**inner;
565        caller.release(callers);
566    }
567}
568
569impl<T> AsRef<T> for Strong<T> {
570    fn as_ref(&self) -> &T {
571        self.deref()
572    }
573}
574
575impl<T> Deref for Strong<T> {
576    type Target = T;
577
578    fn deref(&self) -> &T {
579        let Self { inner, caller: _ } = self;
580        let Inner { marked_for_destruction: _, data, callers: _, notifier: _, resource_token: _ } =
581            inner.deref();
582        data
583    }
584}
585
586impl<T> core::cmp::Eq for Strong<T> {}
587
588impl<T> core::cmp::PartialEq for Strong<T> {
589    fn eq(&self, other: &Self) -> bool {
590        Self::ptr_eq(self, other)
591    }
592}
593
594impl<T> Hash for Strong<T> {
595    fn hash<H: Hasher>(&self, state: &mut H) {
596        let Self { inner, caller: _ } = self;
597        alloc::sync::Arc::as_ptr(inner).hash(state)
598    }
599}
600
601impl<T> Clone for Strong<T> {
602    #[cfg_attr(feature = "rc-debug-names", track_caller)]
603    fn clone(&self) -> Self {
604        let Self { inner, caller: _ } = self;
605        let Inner { data: _, marked_for_destruction: _, callers, notifier: _, resource_token: _ } =
606            &**inner;
607        let caller = callers.insert(Location::caller());
608        Self { inner: alloc::sync::Arc::clone(inner), caller }
609    }
610}
611
612impl<T> Strong<T> {
613    /// Returns a weak reference pointing to the same underlying data.
614    pub fn downgrade(Self { inner, caller: _ }: &Self) -> Weak<T> {
615        Weak(alloc::sync::Arc::downgrade(inner))
616    }
617
618    /// Returns [`Debug`] implementation that is stable and unique
619    /// for the data held behind this [`Strong`].
620    pub fn debug_id(&self) -> impl Debug + '_ {
621        let Self { inner, caller: _ } = self;
622
623        // The lifetime of the returned `DebugId` is bound to the lifetime
624        // of `self`.
625        let token = inner.resource_token.token().extend_lifetime();
626
627        debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(inner), token }
628    }
629
630    /// Returns a [`ResourceToken`] that corresponds to this object.
631    pub fn resource_token(&self) -> ResourceToken<'_> {
632        self.inner.resource_token.token()
633    }
634
635    /// Returns true if the inner value has since been marked for destruction.
636    pub fn marked_for_destruction(Self { inner, caller: _ }: &Self) -> bool {
637        let Inner { marked_for_destruction, data: _, callers: _, notifier: _, resource_token: _ } =
638            inner.as_ref();
639        // `Ordering::Acquire` because we want to synchronize with with the
640        // `Ordering::Release` write to `marked_for_destruction` so that all
641        // memory writes before the reference was marked for destruction is
642        // visible here.
643        marked_for_destruction.load(Ordering::Acquire)
644    }
645
646    /// Returns true if the two pointers point to the same allocation.
647    pub fn weak_ptr_eq(Self { inner: this, caller: _ }: &Self, Weak(other): &Weak<T>) -> bool {
648        core::ptr::eq(alloc::sync::Arc::as_ptr(this), other.as_ptr())
649    }
650
651    /// Returns true if the two pointers point to the same allocation.
652    pub fn ptr_eq(
653        Self { inner: this, caller: _ }: &Self,
654        Self { inner: other, caller: _ }: &Self,
655    ) -> bool {
656        alloc::sync::Arc::ptr_eq(this, other)
657    }
658
659    /// Compares the two pointers.
660    pub fn ptr_cmp(
661        Self { inner: this, caller: _ }: &Self,
662        Self { inner: other, caller: _ }: &Self,
663    ) -> core::cmp::Ordering {
664        let this = alloc::sync::Arc::as_ptr(this);
665        let other = alloc::sync::Arc::as_ptr(other);
666        this.cmp(&other)
667    }
668
669    /// Creates a [`DebugReferences`] instance.
670    pub fn debug_references(this: &Self) -> DebugReferences<T> {
671        let Self { inner, caller: _ } = this;
672        DebugReferences(alloc::sync::Arc::downgrade(inner))
673    }
674}
675
676/// A weakly-held reference.
677///
678/// Similar to an [`alloc::sync::Weak`].
679///
680/// A `Weak` does not make any claim to the live-ness of the underlying data.
681/// Holders of a `Weak` must attempt to upgrade to a [`Strong`] through
682/// [`Weak::upgrade`] to access the underlying data.
683///
684/// Note that `Weak`'s implementation of [`Hash`] and [`PartialEq`] operate on
685/// the pointer itself and not the underlying data.
686#[derive(Debug)]
687pub struct Weak<T>(alloc::sync::Weak<Inner<T>>);
688
689impl<T> core::cmp::Eq for Weak<T> {}
690
691impl<T> core::cmp::PartialEq for Weak<T> {
692    fn eq(&self, other: &Self) -> bool {
693        Self::ptr_eq(self, other)
694    }
695}
696
697impl<T> Hash for Weak<T> {
698    fn hash<H: Hasher>(&self, state: &mut H) {
699        let Self(this) = self;
700        this.as_ptr().hash(state)
701    }
702}
703
704impl<T> Clone for Weak<T> {
705    fn clone(&self) -> Self {
706        let Self(this) = self;
707        Weak(this.clone())
708    }
709}
710
711impl<T> Weak<T> {
712    /// Returns true if the two pointers point to the same allocation.
713    pub fn ptr_eq(&self, Self(other): &Self) -> bool {
714        let Self(this) = self;
715        this.ptr_eq(other)
716    }
717
718    /// Returns [`Debug`] implementation that is stable and unique
719    /// for the data held behind this [`Weak`].
720    pub fn debug_id(&self) -> impl Debug + '_ {
721        match self.upgrade() {
722            Some(strong) => {
723                let Strong { inner, caller: _ } = &strong;
724
725                // The lifetime of the returned `DebugId` is still bound to the
726                // lifetime of `self`.
727                let token = inner.resource_token.token().extend_lifetime();
728
729                debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(&inner), token }
730            }
731            None => {
732                let Self(this) = self;
733                // NB: If we can't upgrade the socket, we can't know the token.
734                debug_id::DebugId::WithoutToken { ptr: this.as_ptr() }
735            }
736        }
737    }
738
739    /// Attempts to upgrade to a [`Strong`].
740    ///
741    /// Returns `None` if the inner value has since been marked for destruction.
742    #[cfg_attr(feature = "rc-debug-names", track_caller)]
743    pub fn upgrade(&self) -> Option<Strong<T>> {
744        let Self(weak) = self;
745        let arc = weak.upgrade()?;
746        let Inner { marked_for_destruction, data: _, callers, notifier: _, resource_token: _ } =
747            arc.deref();
748
749        // `Ordering::Acquire` because we want to synchronize with with the
750        // `Ordering::Release` write to `marked_for_destruction` so that all
751        // memory writes before the reference was marked for destruction is
752        // visible here.
753        if !marked_for_destruction.load(Ordering::Acquire) {
754            let caller = callers.insert(Location::caller());
755            Some(Strong { inner: arc, caller })
756        } else {
757            None
758        }
759    }
760
761    /// Gets the number of [`Primary`] and [`Strong`] references to this allocation.
762    pub fn strong_count(&self) -> usize {
763        let Self(weak) = self;
764        weak.strong_count()
765    }
766
767    /// Creates a [`DebugReferences`] instance.
768    pub fn debug_references(&self) -> DebugReferences<T> {
769        let Self(inner) = self;
770        DebugReferences(inner.clone())
771    }
772}
773
774fn debug_refs(
775    refs: Option<(usize, &AtomicBool, &caller::Callers)>,
776    name: &'static str,
777    f: &mut core::fmt::Formatter<'_>,
778) -> core::fmt::Result {
779    let mut f = f.debug_struct(name);
780    match refs {
781        Some((strong_count, marked_for_destruction, callers)) => f
782            .field("strong_count", &strong_count)
783            .field("marked_for_destruction", marked_for_destruction)
784            .field("callers", callers)
785            .finish(),
786        None => {
787            let strong_count = 0_usize;
788            f.field("strong_count", &strong_count).finish_non_exhaustive()
789        }
790    }
791}
792
793/// Provides a [`Debug`] implementation that contains information helpful for
794/// debugging dangling references.
795#[derive(Clone)]
796pub struct DebugReferences<T>(alloc::sync::Weak<Inner<T>>);
797
798impl<T> Debug for DebugReferences<T> {
799    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
800        let Self(inner) = self;
801        let inner = inner.upgrade();
802        let refs = inner.as_ref().map(|inner| {
803            (alloc::sync::Arc::strong_count(inner), &inner.marked_for_destruction, &inner.callers)
804        });
805        debug_refs(refs, "DebugReferences", f)
806    }
807}
808
809impl<T: Send + Sync + 'static> DebugReferences<T> {
810    /// Transforms this `DebugReferences` into a [`DynDebugReferences`].
811    pub fn into_dyn(self) -> DynDebugReferences {
812        let Self(w) = self;
813        DynDebugReferences(w)
814    }
815}
816
817/// Like [`DebugReferences`], but type-erases the contained type.
818#[derive(Clone)]
819pub struct DynDebugReferences(alloc::sync::Weak<dyn ExposeRefs>);
820
821impl Debug for DynDebugReferences {
822    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
823        let Self(inner) = self;
824        let inner = inner.upgrade();
825        let refs = inner.as_ref().map(|inner| {
826            let (marked_for_destruction, callers) = inner.refs_info();
827            (alloc::sync::Arc::strong_count(inner), marked_for_destruction, callers)
828        });
829        debug_refs(refs, "DynDebugReferences", f)
830    }
831}
832
833/// A trait allowing [`DynDebugReferences`] to erase the `T` type on [`Inner`].
834trait ExposeRefs: Send + Sync + 'static {
835    fn refs_info(&self) -> (&AtomicBool, &caller::Callers);
836}
837
838impl<T: Send + Sync + 'static> ExposeRefs for Inner<T> {
839    fn refs_info(&self) -> (&AtomicBool, &caller::Callers) {
840        (&self.marked_for_destruction, &self.callers)
841    }
842}
843
844/// Provides delegated notification of all strong references of a [`Primary`]
845/// being dropped.
846///
847/// See [`Primary::unwrap_with_notifier`].
848pub trait Notifier<T>: Send {
849    /// Called when the data contained in the [`Primary`] reference can be
850    /// extracted out because there are no more strong references to it.
851    fn notify(&mut self, data: T);
852}
853
854/// An implementation of [`Notifier`] that stores the unwrapped data in a
855/// `Clone` type.
856///
857/// Useful for tests where completion assertions are possible and useful.
858#[derive(Debug, Derivative)]
859#[derivative(Clone(bound = ""))]
860pub struct ArcNotifier<T>(alloc::sync::Arc<crate::Mutex<Option<T>>>);
861
862impl<T> ArcNotifier<T> {
863    /// Creates a new `ArcNotifier`.
864    pub fn new() -> Self {
865        Self(alloc::sync::Arc::new(crate::Mutex::new(None)))
866    }
867
868    /// Takes the notified value, if any.
869    pub fn take(&self) -> Option<T> {
870        let Self(inner) = self;
871        inner.lock().take()
872    }
873}
874
875impl<T: Send> Notifier<T> for ArcNotifier<T> {
876    fn notify(&mut self, data: T) {
877        let Self(inner) = self;
878        assert!(inner.lock().replace(data).is_none(), "notified twice");
879    }
880}
881
882/// An implementation of [`Notifier`] that wraps another `Notifier` and applies
883/// a function on notified objects.
884pub struct MapNotifier<N, F> {
885    inner: N,
886    map: Option<F>,
887}
888
889impl<N, F> MapNotifier<N, F> {
890    /// Creates a new [`MapNotifier`] that wraps `notifier` with a mapping
891    /// function `F`.
892    pub fn new(notifier: N, map: F) -> Self {
893        Self { inner: notifier, map: Some(map) }
894    }
895}
896
897impl<A, B, N: Notifier<B>, F: FnOnce(A) -> B> Notifier<A> for MapNotifier<N, F>
898where
899    Self: Send,
900{
901    fn notify(&mut self, data: A) {
902        let Self { inner, map } = self;
903        let map = map.take().expect("notified twice");
904        inner.notify(map(data))
905    }
906}
907
908/// A handy implementation for the common Infallible "Never" type.
909impl<T> Notifier<T> for core::convert::Infallible {
910    fn notify(&mut self, _data: T) {
911        match *self {}
912    }
913}
914
915#[cfg(test)]
916mod tests {
917    use super::*;
918
919    #[test]
920    fn zombie_weak() {
921        let primary = Primary::new(());
922        let weak = {
923            let strong = Primary::clone_strong(&primary);
924            Strong::downgrade(&strong)
925        };
926        core::mem::drop(primary);
927
928        assert!(weak.upgrade().is_none());
929    }
930
931    #[test]
932    fn rcs() {
933        const INITIAL_VAL: u8 = 1;
934        const NEW_VAL: u8 = 2;
935
936        let primary = Primary::new(crate::sync::Mutex::new(INITIAL_VAL));
937        let strong = Primary::clone_strong(&primary);
938        let weak = Strong::downgrade(&strong);
939
940        *primary.lock().unwrap() = NEW_VAL;
941        assert_eq!(*primary.deref().lock().unwrap(), NEW_VAL);
942        assert_eq!(*strong.deref().lock().unwrap(), NEW_VAL);
943        assert_eq!(*weak.upgrade().unwrap().deref().lock().unwrap(), NEW_VAL);
944    }
945
946    #[test]
947    fn unwrap_primary_without_strong_held() {
948        const VAL: u16 = 6;
949        let primary = Primary::new(VAL);
950        assert_eq!(Primary::unwrap(primary), VAL);
951    }
952
953    #[test]
954    #[should_panic(expected = "can't unwrap, still had 1 strong refs")]
955    fn unwrap_primary_with_strong_held() {
956        let primary = Primary::new(8);
957        let _strong: Strong<_> = Primary::clone_strong(&primary);
958        let _: u16 = Primary::unwrap(primary);
959    }
960
961    #[test]
962    #[should_panic(expected = "dropped Primary with 1 strong refs remaining")]
963    fn drop_primary_with_strong_held() {
964        let primary = Primary::new(9);
965        let _strong: Strong<_> = Primary::clone_strong(&primary);
966        core::mem::drop(primary);
967    }
968
969    // This test trips LSAN on Fuchsia for some unknown reason. The host-side
970    // test should be enough to protect us against regressing on the panicking
971    // check.
972    #[cfg(not(target_os = "fuchsia"))]
973    #[test]
974    #[should_panic(expected = "oopsie")]
975    fn double_panic_protect() {
976        let primary = Primary::new(9);
977        let strong = Primary::clone_strong(&primary);
978        // This will cause primary to be dropped before strong and would yield a
979        // double panic if we didn't protect against it in Primary's Drop impl.
980        let _tuple_to_invert_drop_order = (primary, strong);
981        panic!("oopsie");
982    }
983
984    #[cfg(feature = "rc-debug-names")]
985    #[test]
986    fn tracked_callers() {
987        let primary = Primary::new(10);
988        // Mark this position so we ensure all track_caller marks are correct in
989        // the methods that support it.
990        let here = Location::caller();
991        let strong1 = Primary::clone_strong(&primary);
992        let strong2 = strong1.clone();
993        let weak = Strong::downgrade(&strong2);
994        let strong3 = weak.upgrade().unwrap();
995
996        let Primary { inner } = &primary;
997        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
998            &***inner;
999
1000        let strongs = [strong1, strong2, strong3];
1001        let _: &Location<'_> = strongs.iter().enumerate().fold(here, |prev, (i, cur)| {
1002            let Strong { inner: _, caller: caller::TrackedCaller { location: cur } } = cur;
1003            assert_eq!(prev.file(), cur.file(), "{i}");
1004            assert!(prev.line() < cur.line(), "{prev} < {cur}, {i}");
1005            {
1006                let callers = callers.callers.lock().unwrap();
1007                assert_eq!(callers.get(cur).copied(), Some(1));
1008            }
1009
1010            cur
1011        });
1012
1013        // All callers must be removed from the callers map on drop.
1014        std::mem::drop(strongs);
1015        {
1016            let callers = callers.callers.lock().unwrap();
1017            let callers = callers.deref();
1018            assert!(callers.is_empty(), "{callers:?}");
1019        }
1020    }
1021    #[cfg(feature = "rc-debug-names")]
1022    #[test]
1023    fn same_location_caller_tracking() {
1024        fn clone_in_fn<T>(p: &Primary<T>) -> Strong<T> {
1025            Primary::clone_strong(p)
1026        }
1027
1028        let primary = Primary::new(10);
1029        let strong1 = clone_in_fn(&primary);
1030        let strong2 = clone_in_fn(&primary);
1031        assert_eq!(strong1.caller.location, strong2.caller.location);
1032
1033        let Primary { inner } = &primary;
1034        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
1035            &***inner;
1036
1037        {
1038            let callers = callers.callers.lock().unwrap();
1039            assert_eq!(callers.get(&strong1.caller.location).copied(), Some(2));
1040        }
1041
1042        std::mem::drop(strong1);
1043        std::mem::drop(strong2);
1044
1045        {
1046            let callers = callers.callers.lock().unwrap();
1047            let callers = callers.deref();
1048            assert!(callers.is_empty(), "{callers:?}");
1049        }
1050    }
1051
1052    #[cfg(feature = "rc-debug-names")]
1053    #[test]
1054    #[should_panic(expected = "core/sync/src/rc.rs")]
1055    fn callers_in_panic() {
1056        let primary = Primary::new(10);
1057        let _strong = Primary::clone_strong(&primary);
1058        drop(primary);
1059    }
1060
1061    #[test]
1062    fn unwrap_with_notifier() {
1063        let primary = Primary::new(10);
1064        let strong = Primary::clone_strong(&primary);
1065        let notifier = ArcNotifier::new();
1066        Primary::unwrap_with_notifier(primary, notifier.clone());
1067        // Strong reference is still alive.
1068        assert_eq!(notifier.take(), None);
1069        core::mem::drop(strong);
1070        assert_eq!(notifier.take(), Some(10));
1071    }
1072
1073    #[test]
1074    fn unwrap_or_notify_with_immediate() {
1075        let primary = Primary::new(10);
1076        let result = Primary::unwrap_or_notify_with::<ArcNotifier<_>, (), _>(primary, || {
1077            panic!("should not try to create notifier")
1078        });
1079        assert_eq!(result, Ok(10));
1080    }
1081
1082    #[test]
1083    fn unwrap_or_notify_with_deferred() {
1084        let primary = Primary::new(10);
1085        let strong = Primary::clone_strong(&primary);
1086        let result = Primary::unwrap_or_notify_with(primary, || {
1087            let notifier = ArcNotifier::new();
1088            (notifier.clone(), notifier)
1089        });
1090        let notifier = result.unwrap_err();
1091        assert_eq!(notifier.take(), None);
1092        core::mem::drop(strong);
1093        assert_eq!(notifier.take(), Some(10));
1094    }
1095
1096    #[test]
1097    fn map_notifier() {
1098        let primary = Primary::new(10);
1099        let notifier = ArcNotifier::new();
1100        let map_notifier = MapNotifier::new(notifier.clone(), |data| (data, data + 1));
1101        Primary::unwrap_with_notifier(primary, map_notifier);
1102        assert_eq!(notifier.take(), Some((10, 11)));
1103    }
1104
1105    #[test]
1106    fn new_cyclic() {
1107        #[derive(Debug)]
1108        struct Data {
1109            value: i32,
1110            weak: Weak<Data>,
1111        }
1112
1113        let primary = Primary::new_cyclic(|weak| Data { value: 2, weak });
1114        assert_eq!(primary.value, 2);
1115        let strong = primary.weak.upgrade().unwrap();
1116        assert_eq!(strong.value, 2);
1117        assert!(Primary::ptr_eq(&primary, &strong));
1118    }
1119
1120    macro_rules! assert_debug_id_eq {
1121        ($id1:expr, $id2:expr) => {
1122            assert_eq!(alloc::format!("{:?}", $id1), alloc::format!("{:?}", $id2))
1123        };
1124    }
1125    macro_rules! assert_debug_id_ne {
1126        ($id1:expr, $id2:expr) => {
1127            assert_ne!(alloc::format!("{:?}", $id1), alloc::format!("{:?}", $id2))
1128        };
1129    }
1130
1131    #[test]
1132    fn debug_ids_are_stable() {
1133        // Verify that transforming a given RC doesn't change it's debug_id.
1134        let primary = Primary::new(1);
1135        let strong = Primary::clone_strong(&primary);
1136        let weak_p = Primary::downgrade(&primary);
1137        let weak_s = Strong::downgrade(&strong);
1138        let weak_c = weak_p.clone();
1139        assert_debug_id_eq!(&primary.debug_id(), &strong.debug_id());
1140        assert_debug_id_eq!(&primary.debug_id(), &weak_p.debug_id());
1141        assert_debug_id_eq!(&primary.debug_id(), &weak_s.debug_id());
1142        assert_debug_id_eq!(&primary.debug_id(), &weak_c.debug_id());
1143    }
1144
1145    #[test]
1146    fn debug_ids_are_unique() {
1147        // Verify that RCs to different data have different debug_ids.
1148        let primary1 = Primary::new(1);
1149        let primary2 = Primary::new(1);
1150        assert_debug_id_ne!(&primary1.debug_id(), &primary2.debug_id());
1151
1152        // Verify that dropping an RC does not allow it's debug_id to be reused.
1153        let id1 = format!("{:?}", primary1.debug_id());
1154        std::mem::drop(primary1);
1155        let primary3 = Primary::new(1);
1156        assert_ne!(id1, format!("{:?}", primary3.debug_id()));
1157    }
1158}