netstack3_sync/
rc.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Synchronized reference counting primitives.
6//!
7//! This module introduces a family of reference counted types that allows
8//! marking the underlying data for destruction before all strongly references
9//! to the data are dropped. This enables the following features:
10//!   * Upgrading a weak reference to a strong reference succeeds iff at least
11//!     one strong reference exists _and_ the data has not been marked for
12//!     destruction.
13//!   * Allow waiting for all strongly-held references to be dropped after
14//!     marking the data.
15
16use core::fmt::Debug;
17use core::hash::{Hash, Hasher};
18use core::ops::Deref;
19use core::panic::Location;
20use core::sync::atomic::{AtomicBool, Ordering};
21
22use derivative::Derivative;
23
24mod caller {
25    //! Provides tracking of instances via tracked caller location.
26    //!
27    //! Callers are only tracked in debug builds. All operations and types
28    //! are no-ops and empty unless the `rc-debug-names` feature is enabled.
29
30    use core::fmt::Debug;
31    use core::panic::Location;
32
33    /// Records reference-counted names of instances.
34    #[derive(Default)]
35    pub(super) struct Callers {
36        /// The names that were inserted and aren't known to be gone.
37        ///
38        /// This holds weak references to allow callers to drop without
39        /// synchronizing. Invalid weak pointers are cleaned up periodically but
40        /// are not logically present.
41        ///
42        /// Note that using [`std::sync::Mutex`] here is intentional to opt this
43        /// out of loom checking, which makes testing with `rc-debug-names`
44        /// impossibly slow.
45        #[cfg(feature = "rc-debug-names")]
46        pub(super) callers: std::sync::Mutex<std::collections::HashMap<Location<'static>, usize>>,
47    }
48
49    impl Debug for Callers {
50        #[cfg(not(feature = "rc-debug-names"))]
51        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
52            write!(f, "(Not Tracked)")
53        }
54        #[cfg(feature = "rc-debug-names")]
55        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
56            let Self { callers } = self;
57            let callers = callers.lock().unwrap();
58            write!(f, "[\n")?;
59            for (l, c) in callers.iter() {
60                write!(f, "   {l} => {c},\n")?;
61            }
62            write!(f, "]")
63        }
64    }
65
66    impl Callers {
67        /// Creates a new [`Callers`] from the given [`Location`].
68        ///
69        /// On non-debug builds, this is a no-op.
70        pub(super) fn insert(&self, caller: &Location<'static>) -> TrackedCaller {
71            #[cfg(not(feature = "rc-debug-names"))]
72            {
73                let _ = caller;
74                TrackedCaller {}
75            }
76            #[cfg(feature = "rc-debug-names")]
77            {
78                let Self { callers } = self;
79                let mut callers = callers.lock().unwrap();
80                let count = callers.entry(caller.clone()).or_insert(0);
81                *count += 1;
82                TrackedCaller { location: caller.clone() }
83            }
84        }
85    }
86
87    #[derive(Debug)]
88    pub(super) struct TrackedCaller {
89        #[cfg(feature = "rc-debug-names")]
90        pub(super) location: Location<'static>,
91    }
92
93    impl TrackedCaller {
94        #[cfg(not(feature = "rc-debug-names"))]
95        pub(super) fn release(&mut self, Callers {}: &Callers) {
96            let Self {} = self;
97        }
98
99        #[cfg(feature = "rc-debug-names")]
100        pub(super) fn release(&mut self, Callers { callers }: &Callers) {
101            let Self { location } = self;
102            let mut callers = callers.lock().unwrap();
103            let mut entry = match callers.entry(location.clone()) {
104                std::collections::hash_map::Entry::Vacant(_) => {
105                    panic!("location {location:?} was not in the callers map")
106                }
107                std::collections::hash_map::Entry::Occupied(o) => o,
108            };
109
110            let sub = entry
111                .get()
112                .checked_sub(1)
113                .unwrap_or_else(|| panic!("zero-count location {location:?} in map"));
114            if sub == 0 {
115                let _: usize = entry.remove();
116            } else {
117                *entry.get_mut() = sub;
118            }
119        }
120    }
121}
122
123mod resource_token {
124    use core::fmt::Debug;
125    use core::sync::atomic::{AtomicU64, Ordering};
126    use std::marker::PhantomData;
127
128    /// An opaque token associated with a resource.
129    ///
130    /// It can be used to create debug and trace identifiers for the resource,
131    /// but it should not be used as a unique identifier of the resource inside
132    /// the netstack.
133    ///
134    /// By default the lifetime of a token is bound the resource that token
135    /// belongs to, but it can be extended by calling
136    /// [`ResourceToken::extend_lifetime`].
137    #[cfg_attr(any(test, feature = "testutils"), derive(PartialEq, Eq, PartialOrd, Ord))]
138    pub struct ResourceToken<'a> {
139        value: u64,
140        _marker: PhantomData<&'a ()>,
141    }
142
143    impl<'a> ResourceToken<'a> {
144        /// Extends lifetime of the token.
145        ///
146        /// # Discussion
147        ///
148        /// It's generally okay to extend the lifetime of the token, but prefer
149        /// to use tokens bound to the resource's lifetime whenever possible,
150        /// since it provides guardrails against identifiers that outlive the
151        /// resource itself.
152        pub fn extend_lifetime(self) -> ResourceToken<'static> {
153            ResourceToken { value: self.value, _marker: PhantomData }
154        }
155
156        /// Returns internal value. Consumes `self`.
157        ///
158        /// # Discussion
159        ///
160        /// Export to `u64` when a representation is needed for interaction with
161        /// other processes or components such as trace identifiers and eBPF
162        /// socket cookies.
163        ///
164        /// Refrain from using the returned value within the netstack otherwise.
165        pub fn export_value(self) -> u64 {
166            self.value
167        }
168    }
169
170    impl<'a> Debug for ResourceToken<'a> {
171        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
172            write!(f, "{}", self.value)
173        }
174    }
175
176    /// Holder of a value for `ResourceToken`. Vends `ResourceToken` instances
177    /// with the same value and the lifetime bound to the lifetime of the holder.
178    ///
179    /// The [`Default`] implementation generates a new unique value.
180    pub struct ResourceTokenValue(u64);
181
182    impl ResourceTokenValue {
183        /// Creates a new token.
184        pub fn token(&self) -> ResourceToken<'_> {
185            let ResourceTokenValue(value) = self;
186            ResourceToken { value: *value, _marker: PhantomData }
187        }
188    }
189
190    impl core::fmt::Debug for ResourceTokenValue {
191        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
192            let ResourceTokenValue(value) = self;
193            write!(f, "{}", value)
194        }
195    }
196
197    impl Default for ResourceTokenValue {
198        fn default() -> Self {
199            static NEXT_TOKEN: AtomicU64 = AtomicU64::new(0);
200            // NB: Fetch add will cause the counter to rollback to 0 if we
201            // happen to exceed `u64::MAX` instantiations. In practice, that's
202            // an impossibility (at 1 billion instantiations per second, the
203            // counter is valid for > 500 years). Spare the CPU cycles and don't
204            // bother attempting to detect/handle overflow.
205            Self(NEXT_TOKEN.fetch_add(1, Ordering::Relaxed))
206        }
207    }
208}
209
210pub use resource_token::{ResourceToken, ResourceTokenValue};
211
212mod debug_id {
213    use super::ResourceToken;
214    use core::fmt::Debug;
215
216    /// A debug identifier for the RC types exposed in the parent module.
217    ///
218    /// Encompasses the underlying pointer for the RC type, as well as
219    /// (optionally) the globally unique [`ResourceToken`].
220    pub(super) enum DebugId<T> {
221        /// Used in contexts that have access to the [`ResourceToken`], e.g.
222        /// [`Primary`], [`Strong`], and sometimes [`Weak`] RC types.
223        WithToken { ptr: *const T, token: ResourceToken<'static> },
224        /// Used in contexts that don't have access to the [`ResourceToken`], e.g.
225        /// [`Weak`] RC types that cannot be upgraded.
226        WithoutToken { ptr: *const T },
227    }
228
229    impl<T> Debug for DebugId<T> {
230        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
231            match self {
232                DebugId::WithToken { ptr, token } => write!(f, "{:?}:{:?}", token, ptr),
233                DebugId::WithoutToken { ptr } => write!(f, "?:{:?}", ptr),
234            }
235        }
236    }
237}
238
239#[derive(Derivative)]
240#[derivative(Debug)]
241struct Inner<T> {
242    marked_for_destruction: AtomicBool,
243    callers: caller::Callers,
244    data: core::mem::ManuallyDrop<T>,
245    // NB: Notifier could be an atomic pointer or atomic box but this mutex is
246    // never contended and we don't have to import new code into the repository
247    // (i.e. atomicbox) or write unsafe code.
248    #[derivative(Debug = "ignore")]
249    notifier: crate::Mutex<Option<Box<dyn Notifier<T>>>>,
250    resource_token: ResourceTokenValue,
251}
252
253impl<T> Inner<T> {
254    fn pre_drop_check(marked_for_destruction: &AtomicBool) {
255        // `Ordering::Acquire` because we want to synchronize with with the
256        // `Ordering::Release` write to `marked_for_destruction` so that all
257        // memory writes before the reference was marked for destruction is
258        // visible here.
259        assert!(marked_for_destruction.load(Ordering::Acquire), "Must be marked for destruction");
260    }
261
262    fn unwrap(mut self) -> T {
263        // We cannot destructure `self` by value since `Inner` implements
264        // `Drop`. So we must manually drop all the fields but data and then
265        // forget self.
266        let Inner { marked_for_destruction, data, callers: holders, notifier, resource_token } =
267            &mut self;
268
269        // Make sure that `inner` is in a valid state for destruction.
270        //
271        // Note that we do not actually destroy all of `self` here; we decompose
272        // it into its parts, keeping what we need & throwing away what we
273        // don't. Regardless, we perform the same checks.
274        Inner::<T>::pre_drop_check(marked_for_destruction);
275
276        // SAFETY: Safe since we own `self` and `self` is immediately forgotten
277        // below so the its destructor (and those of its fields) will not be run
278        // as a result of `self` being dropped.
279        let data = unsafe {
280            // Explicitly drop since we do not need these anymore.
281            core::ptr::drop_in_place(marked_for_destruction);
282            core::ptr::drop_in_place(holders);
283            core::ptr::drop_in_place(notifier);
284            core::ptr::drop_in_place(resource_token);
285
286            core::mem::ManuallyDrop::take(data)
287        };
288        // Forget self now to prevent its `Drop::drop` impl from being run which
289        // will attempt to destroy `data` but still perform pre-drop checks on
290        // `Inner`'s state.
291        core::mem::forget(self);
292
293        data
294    }
295
296    /// Sets the notifier for this `Inner`.
297    ///
298    /// Panics if notifier is already set.
299    fn set_notifier<N: Notifier<T> + 'static>(&self, notifier: N) {
300        let Self { notifier: slot, .. } = self;
301
302        // Using dynamic dispatch to notify allows us to not have to know the
303        // notifier that will be used from creation and spread the type on all
304        // reference types in this crate. The assumption is that the allocation
305        // and dynamic dispatch costs here are tiny compared to the overall work
306        // of destroying the resources this module is targeting.
307        let boxed: Box<dyn Notifier<T>> = Box::new(notifier);
308        let prev_notifier = { slot.lock().replace(boxed) };
309        // Uphold invariant that this can only be done from Primary.
310        assert!(prev_notifier.is_none(), "can't have a notifier already installed");
311    }
312}
313
314impl<T> Drop for Inner<T> {
315    fn drop(&mut self) {
316        let Inner { marked_for_destruction, data, callers: _, notifier, resource_token: _ } = self;
317        // Take data out of ManuallyDrop in case we panic in pre_drop_check.
318        // That'll ensure data is dropped if we hit the panic.
319        //
320        //  SAFETY: Safe because ManuallyDrop is not referenced again after
321        // taking.
322        let data = unsafe { core::mem::ManuallyDrop::take(data) };
323        Self::pre_drop_check(marked_for_destruction);
324        if let Some(mut notifier) = notifier.lock().take() {
325            notifier.notify(data);
326        }
327    }
328}
329
330/// A primary reference.
331///
332/// Note that only one `Primary` may be associated with data. This is
333/// enforced by not implementing [`Clone`].
334///
335/// For now, this reference is no different than a [`Strong`] but later changes
336/// will enable blocking the destruction of a primary reference until all
337/// strongly held references are dropped.
338#[derive(Debug)]
339pub struct Primary<T> {
340    inner: core::mem::ManuallyDrop<alloc::sync::Arc<Inner<T>>>,
341}
342
343impl<T> Drop for Primary<T> {
344    fn drop(&mut self) {
345        let was_marked = self.mark_for_destruction();
346        let Self { inner } = self;
347        // Take the inner out of ManuallyDrop early so its Drop impl will run in
348        // case we panic here.
349        // SAFETY: Safe because we don't reference ManuallyDrop again.
350        let inner = unsafe { core::mem::ManuallyDrop::take(inner) };
351
352        // Make debugging easier: don't panic if a panic is already happening
353        // since double-panics are annoying to debug. This means that the
354        // invariants provided by Primary are possibly violated during an
355        // unwind, but we're sidestepping that problem because Fuchsia is our
356        // only audience here.
357        if !std::thread::panicking() {
358            assert_eq!(was_marked, false, "Must not be marked for destruction yet");
359
360            let Inner {
361                marked_for_destruction: _,
362                callers,
363                data: _,
364                notifier: _,
365                resource_token: _,
366            } = &*inner;
367
368            // Make sure that this `Primary` is the last thing to hold a strong
369            // reference to the underlying data when it is being dropped.
370            let refs = alloc::sync::Arc::strong_count(&inner).checked_sub(1).unwrap();
371            assert!(
372                refs == 0,
373                "dropped Primary with {refs} strong refs remaining, \
374                            Callers={callers:?}"
375            );
376        }
377    }
378}
379
380impl<T> AsRef<T> for Primary<T> {
381    fn as_ref(&self) -> &T {
382        self.deref()
383    }
384}
385
386impl<T> Deref for Primary<T> {
387    type Target = T;
388
389    fn deref(&self) -> &T {
390        let Self { inner } = self;
391        let Inner { marked_for_destruction: _, data, callers: _, notifier: _, resource_token: _ } =
392            &***inner;
393        data
394    }
395}
396
397impl<T> Primary<T> {
398    // Marks this primary reference as ready for destruction. Used by all
399    // dropping flows. We take &mut self here to ensure we have the only
400    // possible reference to Primary. Returns whether it was already marked for
401    // destruction.
402    fn mark_for_destruction(&mut self) -> bool {
403        let Self { inner } = self;
404        // `Ordering::Release` because want to make sure that all memory writes
405        // before dropping this `Primary` synchronizes with later attempts to
406        // upgrade weak pointers and the `Drop::drop` impl of `Inner`.
407        inner.marked_for_destruction.swap(true, Ordering::Release)
408    }
409
410    /// Returns a new strongly-held reference.
411    pub fn new(data: T) -> Primary<T> {
412        Primary {
413            inner: core::mem::ManuallyDrop::new(alloc::sync::Arc::new(Inner {
414                marked_for_destruction: AtomicBool::new(false),
415                callers: caller::Callers::default(),
416                data: core::mem::ManuallyDrop::new(data),
417                notifier: crate::Mutex::new(None),
418                resource_token: ResourceTokenValue::default(),
419            })),
420        }
421    }
422
423    /// Constructs a new `Primary<T>` while giving you a Weak<T> to the
424    /// allocation, to allow you to construct a `T` which holds a weak pointer
425    /// to itself.
426    ///
427    /// Like for [`Arc::new_cyclic`], the `Weak` reference provided to `data_fn`
428    /// cannot be upgraded until the [`Primary`] is constructed.
429    pub fn new_cyclic(data_fn: impl FnOnce(Weak<T>) -> T) -> Primary<T> {
430        Primary {
431            inner: core::mem::ManuallyDrop::new(alloc::sync::Arc::new_cyclic(move |weak| Inner {
432                marked_for_destruction: AtomicBool::new(false),
433                callers: caller::Callers::default(),
434                data: core::mem::ManuallyDrop::new(data_fn(Weak(weak.clone()))),
435                notifier: crate::Mutex::new(None),
436                resource_token: ResourceTokenValue::default(),
437            })),
438        }
439    }
440
441    /// Clones a strongly-held reference.
442    #[cfg_attr(feature = "rc-debug-names", track_caller)]
443    pub fn clone_strong(Self { inner }: &Self) -> Strong<T> {
444        let Inner { data: _, callers, marked_for_destruction: _, notifier: _, resource_token: _ } =
445            &***inner;
446        let caller = callers.insert(Location::caller());
447        Strong { inner: alloc::sync::Arc::clone(inner), caller }
448    }
449
450    /// Returns a weak reference pointing to the same underlying data.
451    pub fn downgrade(Self { inner }: &Self) -> Weak<T> {
452        Weak(alloc::sync::Arc::downgrade(inner))
453    }
454
455    /// Returns true if the two pointers point to the same allocation.
456    pub fn ptr_eq(
457        Self { inner: this }: &Self,
458        Strong { inner: other, caller: _ }: &Strong<T>,
459    ) -> bool {
460        alloc::sync::Arc::ptr_eq(this, other)
461    }
462
463    /// Returns [`Debug`] implementation that is stable and unique
464    /// for the data held behind this [`Primary`].
465    pub fn debug_id(&self) -> impl Debug + '_ {
466        let Self { inner } = self;
467
468        // The lifetime of the returned `DebugId` is bound to the lifetime
469        // of `self`.
470        let token = inner.resource_token.token().extend_lifetime();
471
472        debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(inner), token }
473    }
474
475    fn mark_for_destruction_and_take_inner(mut this: Self) -> alloc::sync::Arc<Inner<T>> {
476        // Prepare for destruction.
477        assert!(!this.mark_for_destruction());
478        let Self { inner } = &mut this;
479        // SAFETY: Safe because inner can't be used after this. We forget
480        // our Primary reference to prevent its Drop impl from running.
481        let inner = unsafe { core::mem::ManuallyDrop::take(inner) };
482        core::mem::forget(this);
483        inner
484    }
485
486    fn try_unwrap(this: Self) -> Result<T, alloc::sync::Arc<Inner<T>>> {
487        let inner = Self::mark_for_destruction_and_take_inner(this);
488        alloc::sync::Arc::try_unwrap(inner).map(Inner::unwrap)
489    }
490
491    /// Returns the inner value if no [`Strong`] references are held.
492    ///
493    /// # Panics
494    ///
495    /// Panics if [`Strong`] references are held when this function is called.
496    pub fn unwrap(this: Self) -> T {
497        Self::try_unwrap(this).unwrap_or_else(|inner| {
498            let callers = &inner.callers;
499            let refs = alloc::sync::Arc::strong_count(&inner).checked_sub(1).unwrap();
500            panic!("can't unwrap, still had {refs} strong refs: {callers:?}");
501        })
502    }
503
504    /// Marks this [`Primary`] for destruction and uses `notifier` as a signaler
505    /// for when destruction of all strong references is terminated. After
506    /// calling `unwrap_with_notifier` [`Weak`] references can no longer be
507    /// upgraded.
508    pub fn unwrap_with_notifier<N: Notifier<T> + 'static>(this: Self, notifier: N) {
509        let inner = Self::mark_for_destruction_and_take_inner(this);
510        inner.set_notifier(notifier);
511        // Now we can drop our inner reference, if we were the last this will
512        // trigger the notifier.
513        core::mem::drop(inner);
514    }
515
516    /// Marks this [`Primary`] for destruction and returns `Ok` if this was the
517    /// last strong reference standing for it. Otherwise `new_notifier` is
518    /// called to create a new notifier to observe deferred destruction.
519    ///
520    /// Like [`Primary::unwrap_with_notifier`], [`Weak`] references can no
521    /// longer be upgraded after calling `unwrap_or_notify_with`.
522    pub fn unwrap_or_notify_with<N: Notifier<T> + 'static, O, F: FnOnce() -> (N, O)>(
523        this: Self,
524        new_notifier: F,
525    ) -> Result<T, O> {
526        Self::try_unwrap(this).map_err(move |inner| {
527            let (notifier, output) = new_notifier();
528            inner.set_notifier(notifier);
529            output
530        })
531    }
532
533    /// Creates a [`DebugReferences`] instance.
534    pub fn debug_references(this: &Self) -> DebugReferences<T> {
535        let Self { inner } = this;
536        DebugReferences(alloc::sync::Arc::downgrade(&*inner))
537    }
538}
539
540/// A strongly-held reference.
541///
542/// Similar to an [`alloc::sync::Arc`] but holding a `Strong` acts as a witness
543/// to the live-ness of the underlying data. That is, holding a `Strong` implies
544/// that the underlying data has not yet been destroyed.
545///
546/// Note that `Strong`'s implementation of [`Hash`] and [`PartialEq`] operate on
547/// the pointer itself and not the underlying data.
548#[derive(Debug, Derivative)]
549pub struct Strong<T> {
550    inner: alloc::sync::Arc<Inner<T>>,
551    caller: caller::TrackedCaller,
552}
553
554impl<T> Drop for Strong<T> {
555    fn drop(&mut self) {
556        let Self { inner, caller } = self;
557        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
558            &**inner;
559        caller.release(callers);
560    }
561}
562
563impl<T> AsRef<T> for Strong<T> {
564    fn as_ref(&self) -> &T {
565        self.deref()
566    }
567}
568
569impl<T> Deref for Strong<T> {
570    type Target = T;
571
572    fn deref(&self) -> &T {
573        let Self { inner, caller: _ } = self;
574        let Inner { marked_for_destruction: _, data, callers: _, notifier: _, resource_token: _ } =
575            inner.deref();
576        data
577    }
578}
579
580impl<T> core::cmp::Eq for Strong<T> {}
581
582impl<T> core::cmp::PartialEq for Strong<T> {
583    fn eq(&self, other: &Self) -> bool {
584        Self::ptr_eq(self, other)
585    }
586}
587
588impl<T> Hash for Strong<T> {
589    fn hash<H: Hasher>(&self, state: &mut H) {
590        let Self { inner, caller: _ } = self;
591        alloc::sync::Arc::as_ptr(inner).hash(state)
592    }
593}
594
595impl<T> Clone for Strong<T> {
596    #[cfg_attr(feature = "rc-debug-names", track_caller)]
597    fn clone(&self) -> Self {
598        let Self { inner, caller: _ } = self;
599        let Inner { data: _, marked_for_destruction: _, callers, notifier: _, resource_token: _ } =
600            &**inner;
601        let caller = callers.insert(Location::caller());
602        Self { inner: alloc::sync::Arc::clone(inner), caller }
603    }
604}
605
606impl<T> Strong<T> {
607    /// Returns a weak reference pointing to the same underlying data.
608    pub fn downgrade(Self { inner, caller: _ }: &Self) -> Weak<T> {
609        Weak(alloc::sync::Arc::downgrade(inner))
610    }
611
612    /// Returns [`Debug`] implementation that is stable and unique
613    /// for the data held behind this [`Strong`].
614    pub fn debug_id(&self) -> impl Debug + '_ {
615        let Self { inner, caller: _ } = self;
616
617        // The lifetime of the returned `DebugId` is bound to the lifetime
618        // of `self`.
619        let token = inner.resource_token.token().extend_lifetime();
620
621        debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(inner), token }
622    }
623
624    /// Returns a [`ResourceToken`] that corresponds to this object.
625    pub fn resource_token(&self) -> ResourceToken<'_> {
626        self.inner.resource_token.token()
627    }
628
629    /// Returns true if the inner value has since been marked for destruction.
630    pub fn marked_for_destruction(Self { inner, caller: _ }: &Self) -> bool {
631        let Inner { marked_for_destruction, data: _, callers: _, notifier: _, resource_token: _ } =
632            inner.as_ref();
633        // `Ordering::Acquire` because we want to synchronize with with the
634        // `Ordering::Release` write to `marked_for_destruction` so that all
635        // memory writes before the reference was marked for destruction is
636        // visible here.
637        marked_for_destruction.load(Ordering::Acquire)
638    }
639
640    /// Returns true if the two pointers point to the same allocation.
641    pub fn weak_ptr_eq(Self { inner: this, caller: _ }: &Self, Weak(other): &Weak<T>) -> bool {
642        core::ptr::eq(alloc::sync::Arc::as_ptr(this), other.as_ptr())
643    }
644
645    /// Returns true if the two pointers point to the same allocation.
646    pub fn ptr_eq(
647        Self { inner: this, caller: _ }: &Self,
648        Self { inner: other, caller: _ }: &Self,
649    ) -> bool {
650        alloc::sync::Arc::ptr_eq(this, other)
651    }
652
653    /// Compares the two pointers.
654    pub fn ptr_cmp(
655        Self { inner: this, caller: _ }: &Self,
656        Self { inner: other, caller: _ }: &Self,
657    ) -> core::cmp::Ordering {
658        let this = alloc::sync::Arc::as_ptr(this);
659        let other = alloc::sync::Arc::as_ptr(other);
660        this.cmp(&other)
661    }
662
663    /// Creates a [`DebugReferences`] instance.
664    pub fn debug_references(this: &Self) -> DebugReferences<T> {
665        let Self { inner, caller: _ } = this;
666        DebugReferences(alloc::sync::Arc::downgrade(inner))
667    }
668}
669
670/// A weakly-held reference.
671///
672/// Similar to an [`alloc::sync::Weak`].
673///
674/// A `Weak` does not make any claim to the live-ness of the underlying data.
675/// Holders of a `Weak` must attempt to upgrade to a [`Strong`] through
676/// [`Weak::upgrade`] to access the underlying data.
677///
678/// Note that `Weak`'s implementation of [`Hash`] and [`PartialEq`] operate on
679/// the pointer itself and not the underlying data.
680#[derive(Debug)]
681pub struct Weak<T>(alloc::sync::Weak<Inner<T>>);
682
683impl<T> core::cmp::Eq for Weak<T> {}
684
685impl<T> core::cmp::PartialEq for Weak<T> {
686    fn eq(&self, other: &Self) -> bool {
687        Self::ptr_eq(self, other)
688    }
689}
690
691impl<T> Hash for Weak<T> {
692    fn hash<H: Hasher>(&self, state: &mut H) {
693        let Self(this) = self;
694        this.as_ptr().hash(state)
695    }
696}
697
698impl<T> Clone for Weak<T> {
699    fn clone(&self) -> Self {
700        let Self(this) = self;
701        Weak(this.clone())
702    }
703}
704
705impl<T> Weak<T> {
706    /// Returns true if the two pointers point to the same allocation.
707    pub fn ptr_eq(&self, Self(other): &Self) -> bool {
708        let Self(this) = self;
709        this.ptr_eq(other)
710    }
711
712    /// Returns [`Debug`] implementation that is stable and unique
713    /// for the data held behind this [`Weak`].
714    pub fn debug_id(&self) -> impl Debug + '_ {
715        match self.upgrade() {
716            Some(strong) => {
717                let Strong { inner, caller: _ } = &strong;
718
719                // The lifetime of the returned `DebugId` is still bound to the
720                // lifetime of `self`.
721                let token = inner.resource_token.token().extend_lifetime();
722
723                debug_id::DebugId::WithToken { ptr: alloc::sync::Arc::as_ptr(&inner), token }
724            }
725            None => {
726                let Self(this) = self;
727                // NB: If we can't upgrade the socket, we can't know the token.
728                debug_id::DebugId::WithoutToken { ptr: this.as_ptr() }
729            }
730        }
731    }
732
733    /// Attempts to upgrade to a [`Strong`].
734    ///
735    /// Returns `None` if the inner value has since been marked for destruction.
736    #[cfg_attr(feature = "rc-debug-names", track_caller)]
737    pub fn upgrade(&self) -> Option<Strong<T>> {
738        let Self(weak) = self;
739        let arc = weak.upgrade()?;
740        let Inner { marked_for_destruction, data: _, callers, notifier: _, resource_token: _ } =
741            arc.deref();
742
743        // `Ordering::Acquire` because we want to synchronize with with the
744        // `Ordering::Release` write to `marked_for_destruction` so that all
745        // memory writes before the reference was marked for destruction is
746        // visible here.
747        if !marked_for_destruction.load(Ordering::Acquire) {
748            let caller = callers.insert(Location::caller());
749            Some(Strong { inner: arc, caller })
750        } else {
751            None
752        }
753    }
754
755    /// Gets the number of [`Primary`] and [`Strong`] references to this allocation.
756    pub fn strong_count(&self) -> usize {
757        let Self(weak) = self;
758        weak.strong_count()
759    }
760
761    /// Creates a [`DebugReferences`] instance.
762    pub fn debug_references(&self) -> DebugReferences<T> {
763        let Self(inner) = self;
764        DebugReferences(inner.clone())
765    }
766}
767
768fn debug_refs(
769    refs: Option<(usize, &AtomicBool, &caller::Callers)>,
770    name: &'static str,
771    f: &mut core::fmt::Formatter<'_>,
772) -> core::fmt::Result {
773    let mut f = f.debug_struct(name);
774    match refs {
775        Some((strong_count, marked_for_destruction, callers)) => f
776            .field("strong_count", &strong_count)
777            .field("marked_for_destruction", marked_for_destruction)
778            .field("callers", callers)
779            .finish(),
780        None => {
781            let strong_count = 0_usize;
782            f.field("strong_count", &strong_count).finish_non_exhaustive()
783        }
784    }
785}
786
787/// Provides a [`Debug`] implementation that contains information helpful for
788/// debugging dangling references.
789#[derive(Clone)]
790pub struct DebugReferences<T>(alloc::sync::Weak<Inner<T>>);
791
792impl<T> Debug for DebugReferences<T> {
793    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
794        let Self(inner) = self;
795        let inner = inner.upgrade();
796        let refs = inner.as_ref().map(|inner| {
797            (alloc::sync::Arc::strong_count(inner), &inner.marked_for_destruction, &inner.callers)
798        });
799        debug_refs(refs, "DebugReferences", f)
800    }
801}
802
803impl<T: Send + Sync + 'static> DebugReferences<T> {
804    /// Transforms this `DebugReferences` into a [`DynDebugReferences`].
805    pub fn into_dyn(self) -> DynDebugReferences {
806        let Self(w) = self;
807        DynDebugReferences(w)
808    }
809}
810
811/// Like [`DebugReferences`], but type-erases the contained type.
812#[derive(Clone)]
813pub struct DynDebugReferences(alloc::sync::Weak<dyn ExposeRefs>);
814
815impl Debug for DynDebugReferences {
816    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
817        let Self(inner) = self;
818        let inner = inner.upgrade();
819        let refs = inner.as_ref().map(|inner| {
820            let (marked_for_destruction, callers) = inner.refs_info();
821            (alloc::sync::Arc::strong_count(inner), marked_for_destruction, callers)
822        });
823        debug_refs(refs, "DynDebugReferences", f)
824    }
825}
826
827/// A trait allowing [`DynDebugReferences`] to erase the `T` type on [`Inner`].
828trait ExposeRefs: Send + Sync + 'static {
829    fn refs_info(&self) -> (&AtomicBool, &caller::Callers);
830}
831
832impl<T: Send + Sync + 'static> ExposeRefs for Inner<T> {
833    fn refs_info(&self) -> (&AtomicBool, &caller::Callers) {
834        (&self.marked_for_destruction, &self.callers)
835    }
836}
837
838/// Provides delegated notification of all strong references of a [`Primary`]
839/// being dropped.
840///
841/// See [`Primary::unwrap_with_notifier`].
842pub trait Notifier<T>: Send {
843    /// Called when the data contained in the [`Primary`] reference can be
844    /// extracted out because there are no more strong references to it.
845    fn notify(&mut self, data: T);
846}
847
848/// An implementation of [`Notifier`] that stores the unwrapped data in a
849/// `Clone` type.
850///
851/// Useful for tests where completion assertions are possible and useful.
852#[derive(Debug, Derivative)]
853#[derivative(Clone(bound = ""))]
854pub struct ArcNotifier<T>(alloc::sync::Arc<crate::Mutex<Option<T>>>);
855
856impl<T> ArcNotifier<T> {
857    /// Creates a new `ArcNotifier`.
858    pub fn new() -> Self {
859        Self(alloc::sync::Arc::new(crate::Mutex::new(None)))
860    }
861
862    /// Takes the notified value, if any.
863    pub fn take(&self) -> Option<T> {
864        let Self(inner) = self;
865        inner.lock().take()
866    }
867}
868
869impl<T: Send> Notifier<T> for ArcNotifier<T> {
870    fn notify(&mut self, data: T) {
871        let Self(inner) = self;
872        assert!(inner.lock().replace(data).is_none(), "notified twice");
873    }
874}
875
876/// An implementation of [`Notifier`] that wraps another `Notifier` and applies
877/// a function on notified objects.
878pub struct MapNotifier<N, F> {
879    inner: N,
880    map: Option<F>,
881}
882
883impl<N, F> MapNotifier<N, F> {
884    /// Creates a new [`MapNotifier`] that wraps `notifier` with a mapping
885    /// function `F`.
886    pub fn new(notifier: N, map: F) -> Self {
887        Self { inner: notifier, map: Some(map) }
888    }
889}
890
891impl<A, B, N: Notifier<B>, F: FnOnce(A) -> B> Notifier<A> for MapNotifier<N, F>
892where
893    Self: Send,
894{
895    fn notify(&mut self, data: A) {
896        let Self { inner, map } = self;
897        let map = map.take().expect("notified twice");
898        inner.notify(map(data))
899    }
900}
901
902/// A handy implementation for the common Infallible "Never" type.
903impl<T> Notifier<T> for core::convert::Infallible {
904    fn notify(&mut self, _data: T) {
905        match *self {}
906    }
907}
908
909#[cfg(test)]
910mod tests {
911    use super::*;
912
913    #[test]
914    fn zombie_weak() {
915        let primary = Primary::new(());
916        let weak = {
917            let strong = Primary::clone_strong(&primary);
918            Strong::downgrade(&strong)
919        };
920        core::mem::drop(primary);
921
922        assert!(weak.upgrade().is_none());
923    }
924
925    #[test]
926    fn rcs() {
927        const INITIAL_VAL: u8 = 1;
928        const NEW_VAL: u8 = 2;
929
930        let primary = Primary::new(crate::sync::Mutex::new(INITIAL_VAL));
931        let strong = Primary::clone_strong(&primary);
932        let weak = Strong::downgrade(&strong);
933
934        *primary.lock().unwrap() = NEW_VAL;
935        assert_eq!(*primary.deref().lock().unwrap(), NEW_VAL);
936        assert_eq!(*strong.deref().lock().unwrap(), NEW_VAL);
937        assert_eq!(*weak.upgrade().unwrap().deref().lock().unwrap(), NEW_VAL);
938    }
939
940    #[test]
941    fn unwrap_primary_without_strong_held() {
942        const VAL: u16 = 6;
943        let primary = Primary::new(VAL);
944        assert_eq!(Primary::unwrap(primary), VAL);
945    }
946
947    #[test]
948    #[should_panic(expected = "can't unwrap, still had 1 strong refs")]
949    fn unwrap_primary_with_strong_held() {
950        let primary = Primary::new(8);
951        let _strong: Strong<_> = Primary::clone_strong(&primary);
952        let _: u16 = Primary::unwrap(primary);
953    }
954
955    #[test]
956    #[should_panic(expected = "dropped Primary with 1 strong refs remaining")]
957    fn drop_primary_with_strong_held() {
958        let primary = Primary::new(9);
959        let _strong: Strong<_> = Primary::clone_strong(&primary);
960        core::mem::drop(primary);
961    }
962
963    // This test trips LSAN on Fuchsia for some unknown reason. The host-side
964    // test should be enough to protect us against regressing on the panicking
965    // check.
966    #[cfg(not(target_os = "fuchsia"))]
967    #[test]
968    #[should_panic(expected = "oopsie")]
969    fn double_panic_protect() {
970        let primary = Primary::new(9);
971        let strong = Primary::clone_strong(&primary);
972        // This will cause primary to be dropped before strong and would yield a
973        // double panic if we didn't protect against it in Primary's Drop impl.
974        let _tuple_to_invert_drop_order = (primary, strong);
975        panic!("oopsie");
976    }
977
978    #[cfg(feature = "rc-debug-names")]
979    #[test]
980    fn tracked_callers() {
981        let primary = Primary::new(10);
982        // Mark this position so we ensure all track_caller marks are correct in
983        // the methods that support it.
984        let here = Location::caller();
985        let strong1 = Primary::clone_strong(&primary);
986        let strong2 = strong1.clone();
987        let weak = Strong::downgrade(&strong2);
988        let strong3 = weak.upgrade().unwrap();
989
990        let Primary { inner } = &primary;
991        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
992            &***inner;
993
994        let strongs = [strong1, strong2, strong3];
995        let _: &Location<'_> = strongs.iter().enumerate().fold(here, |prev, (i, cur)| {
996            let Strong { inner: _, caller: caller::TrackedCaller { location: cur } } = cur;
997            assert_eq!(prev.file(), cur.file(), "{i}");
998            assert!(prev.line() < cur.line(), "{prev} < {cur}, {i}");
999            {
1000                let callers = callers.callers.lock().unwrap();
1001                assert_eq!(callers.get(cur).copied(), Some(1));
1002            }
1003
1004            cur
1005        });
1006
1007        // All callers must be removed from the callers map on drop.
1008        std::mem::drop(strongs);
1009        {
1010            let callers = callers.callers.lock().unwrap();
1011            let callers = callers.deref();
1012            assert!(callers.is_empty(), "{callers:?}");
1013        }
1014    }
1015    #[cfg(feature = "rc-debug-names")]
1016    #[test]
1017    fn same_location_caller_tracking() {
1018        fn clone_in_fn<T>(p: &Primary<T>) -> Strong<T> {
1019            Primary::clone_strong(p)
1020        }
1021
1022        let primary = Primary::new(10);
1023        let strong1 = clone_in_fn(&primary);
1024        let strong2 = clone_in_fn(&primary);
1025        assert_eq!(strong1.caller.location, strong2.caller.location);
1026
1027        let Primary { inner } = &primary;
1028        let Inner { marked_for_destruction: _, callers, data: _, notifier: _, resource_token: _ } =
1029            &***inner;
1030
1031        {
1032            let callers = callers.callers.lock().unwrap();
1033            assert_eq!(callers.get(&strong1.caller.location).copied(), Some(2));
1034        }
1035
1036        std::mem::drop(strong1);
1037        std::mem::drop(strong2);
1038
1039        {
1040            let callers = callers.callers.lock().unwrap();
1041            let callers = callers.deref();
1042            assert!(callers.is_empty(), "{callers:?}");
1043        }
1044    }
1045
1046    #[cfg(feature = "rc-debug-names")]
1047    #[test]
1048    #[should_panic(expected = "core/sync/src/rc.rs")]
1049    fn callers_in_panic() {
1050        let primary = Primary::new(10);
1051        let _strong = Primary::clone_strong(&primary);
1052        drop(primary);
1053    }
1054
1055    #[test]
1056    fn unwrap_with_notifier() {
1057        let primary = Primary::new(10);
1058        let strong = Primary::clone_strong(&primary);
1059        let notifier = ArcNotifier::new();
1060        Primary::unwrap_with_notifier(primary, notifier.clone());
1061        // Strong reference is still alive.
1062        assert_eq!(notifier.take(), None);
1063        core::mem::drop(strong);
1064        assert_eq!(notifier.take(), Some(10));
1065    }
1066
1067    #[test]
1068    fn unwrap_or_notify_with_immediate() {
1069        let primary = Primary::new(10);
1070        let result = Primary::unwrap_or_notify_with::<ArcNotifier<_>, (), _>(primary, || {
1071            panic!("should not try to create notifier")
1072        });
1073        assert_eq!(result, Ok(10));
1074    }
1075
1076    #[test]
1077    fn unwrap_or_notify_with_deferred() {
1078        let primary = Primary::new(10);
1079        let strong = Primary::clone_strong(&primary);
1080        let result = Primary::unwrap_or_notify_with(primary, || {
1081            let notifier = ArcNotifier::new();
1082            (notifier.clone(), notifier)
1083        });
1084        let notifier = result.unwrap_err();
1085        assert_eq!(notifier.take(), None);
1086        core::mem::drop(strong);
1087        assert_eq!(notifier.take(), Some(10));
1088    }
1089
1090    #[test]
1091    fn map_notifier() {
1092        let primary = Primary::new(10);
1093        let notifier = ArcNotifier::new();
1094        let map_notifier = MapNotifier::new(notifier.clone(), |data| (data, data + 1));
1095        Primary::unwrap_with_notifier(primary, map_notifier);
1096        assert_eq!(notifier.take(), Some((10, 11)));
1097    }
1098
1099    #[test]
1100    fn new_cyclic() {
1101        #[derive(Debug)]
1102        struct Data {
1103            value: i32,
1104            weak: Weak<Data>,
1105        }
1106
1107        let primary = Primary::new_cyclic(|weak| Data { value: 2, weak });
1108        assert_eq!(primary.value, 2);
1109        let strong = primary.weak.upgrade().unwrap();
1110        assert_eq!(strong.value, 2);
1111        assert!(Primary::ptr_eq(&primary, &strong));
1112    }
1113
1114    macro_rules! assert_debug_id_eq {
1115        ($id1:expr, $id2:expr) => {
1116            assert_eq!(alloc::format!("{:?}", $id1), alloc::format!("{:?}", $id2))
1117        };
1118    }
1119    macro_rules! assert_debug_id_ne {
1120        ($id1:expr, $id2:expr) => {
1121            assert_ne!(alloc::format!("{:?}", $id1), alloc::format!("{:?}", $id2))
1122        };
1123    }
1124
1125    #[test]
1126    fn debug_ids_are_stable() {
1127        // Verify that transforming a given RC doesn't change it's debug_id.
1128        let primary = Primary::new(1);
1129        let strong = Primary::clone_strong(&primary);
1130        let weak_p = Primary::downgrade(&primary);
1131        let weak_s = Strong::downgrade(&strong);
1132        let weak_c = weak_p.clone();
1133        assert_debug_id_eq!(&primary.debug_id(), &strong.debug_id());
1134        assert_debug_id_eq!(&primary.debug_id(), &weak_p.debug_id());
1135        assert_debug_id_eq!(&primary.debug_id(), &weak_s.debug_id());
1136        assert_debug_id_eq!(&primary.debug_id(), &weak_c.debug_id());
1137    }
1138
1139    #[test]
1140    fn debug_ids_are_unique() {
1141        // Verify that RCs to different data have different debug_ids.
1142        let primary1 = Primary::new(1);
1143        let primary2 = Primary::new(1);
1144        assert_debug_id_ne!(&primary1.debug_id(), &primary2.debug_id());
1145
1146        // Verify that dropping an RC does not allow it's debug_id to be reused.
1147        let id1 = format!("{:?}", primary1.debug_id());
1148        std::mem::drop(primary1);
1149        let primary3 = Primary::new(1);
1150        assert_ne!(id1, format!("{:?}", primary3.debug_id()));
1151    }
1152}