starnix_types/
ownership.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! This crates introduces a framework to handle explicit ownership.
6//!
7//! Explicit ownership is used for object that needs to be cleaned, but cannot use `Drop` because
8//! the release operation requires a context. For example, when using the rust types to ensures the
9//! locking order, taking a lock requires knowing what locks are already held at this point, and
10//! uses an explicit object to represent this. If the object needs to take a lock during the
11//! release operation, `Drop` cannot provide it.
12//!
13//! An object that uses explicit ownership uses the `Releasable` trait. The user must calls the
14//! `release` method on it before it goes out of scope.
15//!
16//! A shared object that used explicit ownership used the `OwnedRef`/`WeakRef`/`TempRef`
17//! containers.
18//! The meaning are the following:
19//! - Object that owns the shared object use `OwnedRef`. They are responsible to call `release`
20//! before dropping the reference.
21//! - Object that do not owned the shared object use `WeakRef`. This acts as a weak reference to
22//! the object. They can convert it to a strong reference using the `upgrade` method. The returned
23//! value is an `Option<TempRef>`. The `TempRef` allows access to the object. Because this doesn't
24//! repsent ownership, the `TempRef` must not be kept, in particular, the user should not do any
25//! blocking operation while having a `TempRef`.
26
27// Not all instance of OwnedRef and Releasable are used in non test code yet.
28#![allow(dead_code)]
29
30// TODO(https://fxbug.dev/42081310): Create a linter to ensure TempRef is not held while calling any blocking
31// operation.
32
33use core::hash::Hasher;
34
35use std::hash::Hash;
36use std::ops::Deref;
37use std::sync::atomic::{AtomicUsize, Ordering, fence};
38use std::sync::{Arc, Weak};
39
40/// The base trait for explicit ownership. Any `Releasable` object must call `release` before
41/// being dropped.
42pub trait Releasable {
43    type Context<'a>;
44
45    // TODO(https://fxbug.dev/42081308): Only the `self` version should exist, but this is
46    // problematic with Task and CurrentTask at this point.
47    fn release<'a>(self: Self, c: Self::Context<'a>);
48}
49
50/// Releasing an option calls release if the option is not empty.
51impl<T: Releasable> Releasable for Option<T> {
52    type Context<'a> = T::Context<'a>;
53
54    fn release<'a>(self: Self, c: Self::Context<'a>) {
55        if let Some(v) = self {
56            v.release(c);
57        }
58    }
59}
60
61/// Releasing a vec calls release on each element
62impl<T: Releasable> Releasable for Vec<T>
63where
64    for<'a> T::Context<'a>: Clone,
65{
66    type Context<'a> = T::Context<'a>;
67
68    fn release<'a>(self: Self, c: Self::Context<'a>) {
69        for v in self {
70            v.release(c.clone());
71        }
72    }
73}
74
75/// Releasing a result calls release on the value if the result is ok.
76impl<T: Releasable, E> Releasable for Result<T, E> {
77    type Context<'a> = T::Context<'a>;
78
79    fn release<'a>(self: Self, c: Self::Context<'a>) {
80        if let Ok(v) = self {
81            v.release(c);
82        }
83    }
84}
85
86impl<T: Releasable> Releasable for ReleaseGuard<T> {
87    type Context<'a> = T::Context<'a>;
88
89    fn release<'a>(self: Self, c: Self::Context<'a>) {
90        self.drop_guard.disarm();
91        self.value.release(c);
92    }
93}
94
95/// Trait for object that can be shared. This is an equivalent of `Clone` for objects that require
96/// to be released.
97pub trait Share {
98    fn share(&self) -> Self;
99}
100
101impl<T: Share> Share for Option<T> {
102    fn share(&self) -> Self {
103        match self {
104            None => None,
105            Some(t) => Some(t.share()),
106        }
107    }
108}
109
110/// An owning reference to a shared owned object. Each instance must call `release` before being
111/// dropped.
112/// `OwnedRef` will panic on Drop in debug builds if it has not been released.
113#[must_use = "OwnedRef must be released"]
114pub struct OwnedRef<T> {
115    /// The shared data.
116    inner: Option<Arc<RefInner<T>>>,
117
118    /// A guard that will ensure a panic on drop if the ref has not been released.
119    drop_guard: DropGuard,
120}
121
122impl<T> OwnedRef<T> {
123    pub fn new(value: T) -> Self {
124        Self { inner: Some(Arc::new(RefInner::new(value))), drop_guard: Default::default() }
125    }
126
127    pub fn new_cyclic<F>(data_fn: F) -> Self
128    where
129        F: FnOnce(WeakRef<T>) -> T,
130    {
131        let inner = Arc::new_cyclic(|weak_inner| {
132            let weak = WeakRef(weak_inner.clone());
133            RefInner::new(data_fn(weak))
134        });
135        Self { inner: Some(inner), drop_guard: Default::default() }
136    }
137
138    /// Provides a raw pointer to the data.
139    ///
140    /// See `Arc::as_ptr`
141    pub fn as_ptr(this: &Self) -> *const T {
142        &Self::inner(this).value.value as *const T
143    }
144
145    /// Returns true if the two objects point to the same allocation
146    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
147        Self::as_ptr(this) == Self::as_ptr(other)
148    }
149
150    /// Produce a `WeakRef` from a `OwnedRef`.
151    pub fn downgrade(this: &Self) -> WeakRef<T> {
152        WeakRef(Arc::downgrade(Self::inner(this)))
153    }
154
155    /// Produce a `TempRef` from a `OwnedRef`. As an `OwnedRef` exists at the time of the creation,
156    /// this cannot fail.
157    pub fn temp(this: &Self) -> TempRef<'_, T> {
158        TempRef::new(Arc::clone(Self::inner(this)))
159    }
160
161    fn inner(this: &Self) -> &Arc<RefInner<T>> {
162        this.inner.as_ref().unwrap_or_else(|| {
163            panic!("OwnedRef<{}> has been released.", std::any::type_name::<T>())
164        })
165    }
166
167    fn re_own(inner: Arc<RefInner<T>>) -> Option<Self> {
168        let mut owned_refs = inner.owned_refs_count.load(Ordering::Relaxed);
169        loop {
170            if owned_refs == 0 {
171                return None;
172            }
173            match inner.owned_refs_count.compare_exchange(
174                owned_refs,
175                owned_refs + 1,
176                Ordering::Acquire,
177                Ordering::Relaxed,
178            ) {
179                Ok(_) => {
180                    return Some(Self { inner: Some(inner), drop_guard: Default::default() });
181                }
182                Err(v) => {
183                    owned_refs = v;
184                }
185            }
186        }
187    }
188}
189
190impl<T: Releasable> OwnedRef<T> {
191    /// Take the releasable from the `OwnedRef`. Returns None if the `OwnedRef` is not the last
192    /// reference to the data.
193    pub fn take(this: &mut Self) -> Option<ReleaseGuard<T>> {
194        this.drop_guard.disarm();
195        let inner = this.inner.take().unwrap_or_else(|| {
196            panic!("OwnedRef<{}> has been released.", std::any::type_name::<T>())
197        });
198        let previous_count = inner.owned_refs_count.fetch_sub(1, Ordering::Release);
199        if previous_count == 1 {
200            fence(Ordering::Acquire);
201            Some(Self::wait_and_take_value(inner))
202        } else {
203            None
204        }
205    }
206
207    /// Wait for this `OwnedRef` to be the only left reference to the data. This should only be
208    /// called on once the last `OwnedRef` has been released. This will wait for all existing
209    /// `TempRef >]` to be dropped before returning.
210    fn wait_and_take_value(mut inner: Arc<RefInner<T>>) -> ReleaseGuard<T> {
211        loop {
212            // Ensure no more `OwnedRef` exists.
213            debug_assert_eq!(inner.owned_refs_count.load(Ordering::Acquire), 0);
214            match Arc::try_unwrap(inner) {
215                Ok(value) => return value.value,
216                Err(value) => inner = value,
217            }
218            inner.wait_for_no_ref_once();
219        }
220    }
221}
222
223impl<T: std::fmt::Debug> std::fmt::Debug for OwnedRef<T> {
224    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
225        Self::inner(self).value.fmt(f)
226    }
227}
228
229impl<T: Releasable> Share for OwnedRef<T> {
230    /// Clone the `OwnedRef`. Both the current and the new reference needs to be `release`d.
231    fn share(&self) -> Self {
232        let inner = Self::inner(self);
233        let previous_count = inner.owned_refs_count.fetch_add(1, Ordering::Relaxed);
234        debug_assert!(previous_count > 0, "OwnedRef should not be used after being released.");
235        Self { inner: Some(Arc::clone(inner)), drop_guard: Default::default() }
236    }
237}
238
239impl<T: Releasable> Releasable for OwnedRef<T> {
240    type Context<'a> = T::Context<'a>;
241
242    /// Release the `OwnedRef`. If this is the last instance, this method will block until all
243    /// `TempRef` instances are dropped, and will release the underlying object.
244    #[allow(unused_mut)]
245    fn release<'a>(mut self, c: Self::Context<'a>) {
246        OwnedRef::take(&mut self).release(c);
247    }
248}
249
250impl<T: Default> Default for OwnedRef<T> {
251    fn default() -> Self {
252        Self::new(T::default())
253    }
254}
255
256impl<T> std::ops::Deref for OwnedRef<T> {
257    type Target = T;
258
259    fn deref(&self) -> &Self::Target {
260        &Self::inner(self).deref().value
261    }
262}
263
264impl<T> std::borrow::Borrow<T> for OwnedRef<T> {
265    fn borrow(&self) -> &T {
266        self.deref()
267    }
268}
269
270impl<T> std::convert::AsRef<T> for OwnedRef<T> {
271    fn as_ref(&self) -> &T {
272        self.deref()
273    }
274}
275
276impl<T: PartialEq> PartialEq<TempRef<'_, T>> for OwnedRef<T> {
277    fn eq(&self, other: &TempRef<'_, T>) -> bool {
278        Arc::ptr_eq(Self::inner(self), &other.0)
279    }
280}
281
282impl<T: PartialEq> PartialEq for OwnedRef<T> {
283    fn eq(&self, other: &OwnedRef<T>) -> bool {
284        Arc::ptr_eq(Self::inner(self), Self::inner(other)) || **self == **other
285    }
286}
287
288impl<T: Eq> Eq for OwnedRef<T> {}
289
290impl<T: PartialOrd> PartialOrd for OwnedRef<T> {
291    fn partial_cmp(&self, other: &OwnedRef<T>) -> Option<std::cmp::Ordering> {
292        (**self).partial_cmp(&**other)
293    }
294}
295
296impl<T: Ord> Ord for OwnedRef<T> {
297    fn cmp(&self, other: &OwnedRef<T>) -> std::cmp::Ordering {
298        (**self).cmp(&**other)
299    }
300}
301
302impl<T: Hash> Hash for OwnedRef<T> {
303    fn hash<H: Hasher>(&self, state: &mut H) {
304        (**self).hash(state)
305    }
306}
307
308impl<T> From<&OwnedRef<T>> for WeakRef<T> {
309    fn from(owner: &OwnedRef<T>) -> Self {
310        OwnedRef::downgrade(owner)
311    }
312}
313
314impl<'a, T> From<&'a OwnedRef<T>> for TempRef<'a, T> {
315    fn from(owner: &'a OwnedRef<T>) -> Self {
316        OwnedRef::temp(owner)
317    }
318}
319
320impl<'a, T> From<&'a mut OwnedRef<T>> for TempRef<'a, T> {
321    fn from(owner: &'a mut OwnedRef<T>) -> Self {
322        OwnedRef::temp(owner)
323    }
324}
325
326/// A weak reference to a shared owned object. The `upgrade` method try to build a `TempRef` from a
327/// `WeakRef` and will fail if there is no `OwnedRef` left.
328#[derive(Debug)]
329pub struct WeakRef<T>(Weak<RefInner<T>>);
330
331impl<T> WeakRef<T> {
332    pub fn new() -> Self {
333        Self(Weak::new())
334    }
335
336    /// Try to upgrade the `WeakRef` into a `TempRef`. This will fail as soon as the last
337    /// `OwnedRef` is released, even if some `TempRef` still exist at that time. The returned
338    /// `TempRef` must be dropped as soon as possible. In particular, it must not be kept across
339    /// blocking calls.
340    pub fn upgrade(&self) -> Option<TempRef<'_, T>> {
341        if let Some(value) = self.0.upgrade() {
342            // As soon as the Arc has been upgraded, creates a `TempRef` to ensure the futex is woken
343            // up in case `upgrade` and `release` are racing.
344            let temp_ref = TempRef::new(value);
345            // Only returns a valid `TempRef` if there are still some un-released `OwnedRef`. As
346            // soon as `release` is called, no more `TempRef` can be acquire.
347            if temp_ref.0.owned_refs_count.load(Ordering::Acquire) > 0 {
348                return Some(temp_ref);
349            }
350        }
351        None
352    }
353
354    /// Try to upgrade the `WeakRef` into a `OwnedRef`. This will fail as soon as the last
355    /// `OwnedRef` is released.
356    pub fn re_own(&self) -> Option<OwnedRef<T>> {
357        self.0.upgrade().and_then(OwnedRef::re_own)
358    }
359
360    /// Returns a raw pointer to the object T pointed to by this WeakRef<T>.
361    ///
362    /// See `Weak::as_ptr`
363    pub fn as_ptr(&self) -> *const T {
364        let base = self.0.as_ptr();
365        let value = memoffset::raw_field!(base, RefInner<T>, value);
366        memoffset::raw_field!(value, ReleaseGuard<T>, value)
367    }
368
369    /// Returns true if the two objects point to the same allocation
370    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
371        Self::as_ptr(this) == Self::as_ptr(other)
372    }
373}
374
375impl<T> Default for WeakRef<T> {
376    fn default() -> Self {
377        Self::new()
378    }
379}
380
381impl<T> Clone for WeakRef<T> {
382    fn clone(&self) -> Self {
383        Self(self.0.clone())
384    }
385}
386
387/// Wrapper around `WeakRef` allowing to use it in a Set or as a key of a Map.
388pub struct WeakRefKey<T>(pub WeakRef<T>);
389impl<T> PartialEq for WeakRefKey<T> {
390    fn eq(&self, other: &Self) -> bool {
391        WeakRef::ptr_eq(&self.0, &other.0)
392    }
393}
394impl<T> PartialOrd for WeakRefKey<T> {
395    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
396        Some(self.cmp(other))
397    }
398}
399impl<T> Ord for WeakRefKey<T> {
400    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
401        WeakRef::as_ptr(&self.0).cmp(&WeakRef::as_ptr(&other.0))
402    }
403}
404impl<T> From<WeakRef<T>> for WeakRefKey<T> {
405    fn from(weak_ref: WeakRef<T>) -> Self {
406        Self(weak_ref)
407    }
408}
409impl<'a, T> From<&TempRef<'a, T>> for WeakRefKey<T> {
410    fn from(temp_ref: &TempRef<'a, T>) -> Self {
411        Self(WeakRef::from(temp_ref))
412    }
413}
414impl<'a, T> From<&OwnedRef<T>> for WeakRefKey<T> {
415    fn from(owned_ref: &OwnedRef<T>) -> Self {
416        Self(WeakRef::from(owned_ref))
417    }
418}
419impl<T> Clone for WeakRefKey<T> {
420    fn clone(&self) -> Self {
421        Self(self.0.clone())
422    }
423}
424impl<T> Eq for WeakRefKey<T> {}
425impl<T> Hash for WeakRefKey<T> {
426    fn hash<H: Hasher>(&self, state: &mut H) {
427        WeakRef::as_ptr(&self.0).hash(state);
428    }
429}
430impl<T> std::ops::Deref for WeakRefKey<T> {
431    type Target = WeakRef<T>;
432    fn deref(&self) -> &Self::Target {
433        &self.0
434    }
435}
436impl<T> std::fmt::Debug for WeakRefKey<T> {
437    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
438        fmt.debug_tuple(std::any::type_name::<Self>()).field(&self.0.as_ptr()).finish()
439    }
440}
441
442/// A temporary reference to a shared owned object. This permits access to the shared object, but
443/// will block any thread trying to release the last `OwnedRef`. As such, such reference must be
444/// released as soon as possible. In particular, one must not do any blocking operation while
445/// owning such a refeence.
446// Until negative trait bound are implemented, using `*mut u8` to prevent transferring TempRef
447// across threads.
448pub struct TempRef<'a, T>(Arc<RefInner<T>>, std::marker::PhantomData<(&'a T, *mut u8)>);
449
450impl<'a, T> std::fmt::Debug for TempRef<'a, T>
451where
452    T: std::fmt::Debug,
453{
454    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
455        self.deref().fmt(f)
456    }
457}
458
459impl<'a, T> Drop for TempRef<'a, T> {
460    fn drop(&mut self) {
461        self.0.dec_temp_ref();
462    }
463}
464
465impl<'a, T> TempRef<'a, T> {
466    /// Build a new TempRef. Ensures `temp_refs_count` is correctly updated.
467    fn new(inner: Arc<RefInner<T>>) -> Self {
468        inner.inc_temp_ref();
469        Self(inner, Default::default())
470    }
471
472    /// Provides a raw pointer to the data.
473    ///
474    /// See `Arc::as_ptr`
475    pub fn as_ptr(this: &Self) -> *const T {
476        &this.0.value.value as *const T
477    }
478
479    /// Returns true if the two objects point to the same allocation
480    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
481        Self::as_ptr(this) == Self::as_ptr(other)
482    }
483
484    /// This allows to change the lifetime annotation of a `TempRef` to static.
485    ///
486    /// As `TempRef` must be dropped as soon as possible, this provided the way to block the release
487    /// of the related `OwnedRef`s and as such is considered sensitive. Any caller must ensure that
488    /// the returned `TempRef` is not kept around while doing blocking calls.
489    pub fn into_static(this: Self) -> TempRef<'static, T> {
490        TempRef::new(this.0.clone())
491    }
492
493    /// Try to upgrade the `WeakRef` into a `OwnedRef`. This will fail as soon as the last
494    /// `OwnedRef` is released.
495    pub fn re_own(&self) -> Option<OwnedRef<T>> {
496        OwnedRef::re_own(Arc::clone(&self.0))
497    }
498}
499
500impl<'a, T> From<&TempRef<'a, T>> for WeakRef<T> {
501    fn from(temp_ref: &TempRef<'a, T>) -> Self {
502        Self(Arc::downgrade(&temp_ref.0))
503    }
504}
505
506impl<'a, T> From<TempRef<'a, T>> for WeakRef<T> {
507    fn from(temp_ref: TempRef<'a, T>) -> Self {
508        Self(Arc::downgrade(&temp_ref.0))
509    }
510}
511
512impl<'a, T> std::ops::Deref for TempRef<'a, T> {
513    type Target = T;
514
515    fn deref(&self) -> &Self::Target {
516        &self.0.deref().value
517    }
518}
519
520impl<'a, T> std::borrow::Borrow<T> for TempRef<'a, T> {
521    fn borrow(&self) -> &T {
522        &self.0.deref().value
523    }
524}
525
526impl<'a, T> std::convert::AsRef<T> for TempRef<'a, T> {
527    fn as_ref(&self) -> &T {
528        &self.0.deref().value
529    }
530}
531
532impl<'a, T: PartialEq> PartialEq for TempRef<'a, T> {
533    fn eq(&self, other: &TempRef<'_, T>) -> bool {
534        Arc::ptr_eq(&self.0, &other.0) || **self == **other
535    }
536}
537
538impl<'a, T: Eq> Eq for TempRef<'a, T> {}
539
540impl<'a, T: PartialOrd> PartialOrd for TempRef<'a, T> {
541    fn partial_cmp(&self, other: &TempRef<'_, T>) -> Option<std::cmp::Ordering> {
542        (**self).partial_cmp(&**other)
543    }
544}
545
546impl<'a, T: Ord> Ord for TempRef<'a, T> {
547    fn cmp(&self, other: &TempRef<'_, T>) -> std::cmp::Ordering {
548        (**self).cmp(&**other)
549    }
550}
551
552impl<'a, T: Hash> Hash for TempRef<'a, T> {
553    fn hash<H: Hasher>(&self, state: &mut H) {
554        (**self).hash(state)
555    }
556}
557
558/// Wrapper around `TempRef` allowing to use it in a Set or as a key of a Map.
559pub struct TempRefKey<'a, T>(pub TempRef<'a, T>);
560impl<'a, T> PartialEq for TempRefKey<'a, T> {
561    fn eq(&self, other: &Self) -> bool {
562        TempRef::ptr_eq(&self.0, &other.0)
563    }
564}
565impl<'a, T> Eq for TempRefKey<'a, T> {}
566impl<'a, T> Hash for TempRefKey<'a, T> {
567    fn hash<H: Hasher>(&self, state: &mut H) {
568        TempRef::as_ptr(&self.0).hash(state);
569    }
570}
571impl<'a, T> std::ops::Deref for TempRefKey<'a, T> {
572    type Target = T;
573    fn deref(&self) -> &Self::Target {
574        self.0.deref()
575    }
576}
577
578/// A wrapper a round a Releasable object that will check, in test and when assertion are enabled,
579/// that the value has been released before being dropped.
580#[must_use = "ReleaseGuard must be released"]
581pub struct ReleaseGuard<T> {
582    /// The wrapped value.
583    value: T,
584
585    /// A guard that will ensure a panic on drop if the ref has not been released.
586    drop_guard: DropGuard,
587}
588
589#[cfg(test)]
590impl<T> ReleaseGuard<T> {
591    pub fn new_released(value: T) -> Self {
592        let result: Self = value.into();
593        result.drop_guard.disarm();
594        result
595    }
596}
597
598impl<T> ReleaseGuard<T> {
599    /// Disarm this release guard.
600    ///
601    /// This will prevent any runtime check that the `value` has been correctly released.
602    pub fn take(this: ReleaseGuard<T>) -> T {
603        this.drop_guard.disarm();
604        this.value
605    }
606}
607
608#[cfg(test)]
609impl<T: Default> ReleaseGuard<T> {
610    pub fn default_released() -> Self {
611        Self::new_released(T::default())
612    }
613}
614
615impl<T: std::fmt::Debug> std::fmt::Debug for ReleaseGuard<T> {
616    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
617        self.value.fmt(f)
618    }
619}
620
621impl<T: Default> Default for ReleaseGuard<T> {
622    fn default() -> Self {
623        T::default().into()
624    }
625}
626
627impl<T: Clone> Clone for ReleaseGuard<T> {
628    fn clone(&self) -> Self {
629        self.value.clone().into()
630    }
631}
632
633impl<T> From<T> for ReleaseGuard<T> {
634    fn from(value: T) -> Self {
635        Self { value, drop_guard: Default::default() }
636    }
637}
638
639impl<T> std::ops::Deref for ReleaseGuard<T> {
640    type Target = T;
641
642    fn deref(&self) -> &Self::Target {
643        &self.value
644    }
645}
646
647impl<T> std::ops::DerefMut for ReleaseGuard<T> {
648    fn deref_mut(&mut self) -> &mut Self::Target {
649        &mut self.value
650    }
651}
652
653impl<T> std::borrow::Borrow<T> for ReleaseGuard<T> {
654    fn borrow(&self) -> &T {
655        self.deref()
656    }
657}
658
659impl<T> std::convert::AsRef<T> for ReleaseGuard<T> {
660    fn as_ref(&self) -> &T {
661        self.deref()
662    }
663}
664
665impl<T: PartialEq> PartialEq for ReleaseGuard<T> {
666    fn eq(&self, other: &ReleaseGuard<T>) -> bool {
667        **self == **other
668    }
669}
670
671impl<T: Eq> Eq for ReleaseGuard<T> {}
672
673impl<T: PartialOrd> PartialOrd for ReleaseGuard<T> {
674    fn partial_cmp(&self, other: &ReleaseGuard<T>) -> Option<std::cmp::Ordering> {
675        (**self).partial_cmp(&**other)
676    }
677}
678
679impl<T: Ord> Ord for ReleaseGuard<T> {
680    fn cmp(&self, other: &ReleaseGuard<T>) -> std::cmp::Ordering {
681        (**self).cmp(&**other)
682    }
683}
684
685impl<T: Hash> Hash for ReleaseGuard<T> {
686    fn hash<H: Hasher>(&self, state: &mut H) {
687        (**self).hash(state)
688    }
689}
690
691#[derive(Default, Debug)]
692pub struct DropGuard {
693    #[cfg(any(test, debug_assertions))]
694    released: std::sync::atomic::AtomicBool,
695}
696
697impl DropGuard {
698    #[inline(always)]
699    pub fn disarm(&self) {
700        #[cfg(any(test, debug_assertions))]
701        {
702            if self
703                .released
704                .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire)
705                .is_err()
706            {
707                panic!("Guard was disarmed twice");
708            }
709        }
710    }
711}
712
713#[cfg(any(test, debug_assertions))]
714impl Drop for DropGuard {
715    fn drop(&mut self) {
716        assert!(*self.released.get_mut());
717    }
718}
719#[cfg(any(test, debug_assertions))]
720thread_local! {
721    /// Number of `TempRef` in the current thread. This is used to ensure there is no `TempRef`
722    /// while doing a blocking operation.
723    static TEMP_REF_LOCAL_COUNT: std::cell::RefCell<usize> = const { std::cell::RefCell::new(0) };
724}
725
726/// Assert that no temp ref exist on the current thread. This is used before executing a blocking
727/// operation to ensure it will not prevent a OwnedRef release.
728pub fn debug_assert_no_local_temp_ref() {
729    #[cfg(any(test, debug_assertions))]
730    {
731        TEMP_REF_LOCAL_COUNT.with(|count| {
732            assert_eq!(*count.borrow(), 0, "Current threads owns {} TempRef", *count.borrow());
733        });
734    }
735}
736
737/// The internal data of `OwnedRef`/`WeakRef`/`TempRef`.
738///
739/// To ensure that `wait_for_no_ref_once` is correct, the following constraints must apply:
740/// - Once `owned_refs_count` reaches 0, it must never increase again.
741/// - The strong count of `Arc<Self>` must always be incremented before `temp_refs_count` is
742///   incremented.
743/// - Whenever a the strong count of `Arc<Self>` is incremented, `temp_ref_count` must be
744///   increased.
745/// This ensures that `wait_for_no_ref_once` will always be notified when it is waiting on the
746/// `temp_refs_count` futex and the number of `TempRef` reaches 0.
747struct RefInner<T> {
748    /// The underlying value.
749    value: ReleaseGuard<T>,
750    /// The number of `OwnedRef` sharing this data.
751    owned_refs_count: AtomicUsize,
752    /// The number of `TempRef` sharing this data.
753    // This is close to a duplicate of the Arc strong_count, and could be replaced by it if this
754    // module reimplemented all of Arc/Weak. This can be changed without changing the API if this
755    // becomes a performance issue.
756    temp_refs_count: zx::Futex,
757}
758
759impl<T> RefInner<T> {
760    fn new(value: T) -> Self {
761        Self {
762            value: value.into(),
763            owned_refs_count: AtomicUsize::new(1),
764            temp_refs_count: zx::Futex::new(0),
765        }
766    }
767
768    /// Increase `temp_refs_count`. Must be called each time a new `TempRef` is built.
769    fn inc_temp_ref(&self) {
770        self.temp_refs_count.fetch_add(1, Ordering::Relaxed);
771        #[cfg(any(test, debug_assertions))]
772        {
773            TEMP_REF_LOCAL_COUNT.with(|count| {
774                *count.borrow_mut() += 1;
775            });
776        }
777    }
778
779    /// Decrease `temp_refs_count`. Must be called each time a new `TempRef` is dropped.
780    ///
781    /// This will wake the futex on `temp_refs_count` when it reaches 0.
782    fn dec_temp_ref(&self) {
783        let previous_count = self.temp_refs_count.fetch_sub(1, Ordering::Release);
784        if previous_count == 1 {
785            fence(Ordering::Acquire);
786            self.temp_refs_count.wake_single_owner();
787        }
788        #[cfg(any(test, debug_assertions))]
789        {
790            TEMP_REF_LOCAL_COUNT.with(|count| {
791                *count.borrow_mut() -= 1;
792            });
793        }
794    }
795
796    /// Wait for `temp_refs_count` to reach 0 once using the futex.
797    fn wait_for_no_ref_once(self: &Arc<Self>) {
798        // Compute the current number of temp refs, and wait for it to drop to 0.
799        let current_value = self.temp_refs_count.load(Ordering::Acquire);
800        if current_value == 0 {
801            // It is already 0, return.
802            return;
803        }
804        // Otherwise, wait on the futex that will be waken up when the number of temp_ref drops
805        // to 0.
806        let result = self.temp_refs_count.wait(current_value, None, zx::MonotonicInstant::INFINITE);
807        debug_assert!(
808            result == Ok(()) || result == Err(zx::Status::BAD_STATE),
809            "Unexpected result: {result:?}"
810        );
811    }
812}
813
814/// Macro that ensure the releasable is released with the given context if the body returns an
815/// error.
816#[macro_export]
817macro_rules! release_on_error {
818    ($releasable_name:ident, $context:expr, $body:block ) => {{
819        #[allow(clippy::redundant_closure_call)]
820        let result = { (|| $body)() };
821        match result {
822            Err(e) => {
823                $releasable_name.release($context);
824                return Err(e);
825            }
826            Ok(x) => x,
827        }
828    }};
829}
830
831/// Macro that ensure the releasable is released with the given context after the body returns.
832#[macro_export]
833macro_rules! release_after {
834    ($releasable_name:ident, $context:expr, async || $($output_type:ty)? $body:block ) => {{
835        #[allow(clippy::redundant_closure_call)]
836        let result = { (async || $(-> $output_type)? { $body })().await };
837        $releasable_name.release($context);
838        result
839    }};
840    ($releasable_name:ident, $context:expr, $(|| -> $output_type:ty)? $body:block ) => {{
841        #[allow(clippy::redundant_closure_call)]
842        let result = { (|| $(-> $output_type)? { $body })() };
843        $releasable_name.release($context);
844        result
845    }};
846}
847
848/// Macro that ensure the iterator of releasables are released with the given context after
849/// the body returns.
850#[macro_export]
851macro_rules! release_iter_after {
852    ($releasable_iter:ident, $context:expr, async || $(-> $output_type:ty)? $body:block ) => {{
853        #[allow(clippy::redundant_closure_call)]
854        let result = { (async || $(-> $output_type)? { $body })().await };
855        for item in $releasable_iter.into_iter() {
856            item.release($context);
857        }
858        result
859    }};
860    ($releasable_iter:ident, $context:expr, $(|| -> $output_type:ty)? $body:block ) => {{
861        #[allow(clippy::redundant_closure_call)]
862        let result = { (|| $(-> $output_type)? { $body })() };
863        for item in $releasable_iter.into_iter() {
864            item.release($context);
865        }
866        result
867    }};
868}
869
870pub use {release_after, release_iter_after, release_on_error};
871
872#[cfg(test)]
873mod test {
874    use super::*;
875
876    #[derive(Default)]
877    struct Data;
878
879    impl Releasable for Data {
880        type Context<'a> = ();
881        fn release<'a>(self, _: ()) {}
882    }
883
884    #[derive(Default)]
885    struct DataWithMutableReleaseContext;
886
887    impl Releasable for DataWithMutableReleaseContext {
888        type Context<'a> = &'a mut ();
889        fn release<'a>(self, _: &'a mut ()) {}
890    }
891
892    #[::fuchsia::test]
893    #[should_panic]
894    fn drop_without_release() {
895        let _ = OwnedRef::new(Data {});
896    }
897
898    #[::fuchsia::test]
899    fn test_creation_and_reference() {
900        let value = OwnedRef::new(Data {});
901        let reference = WeakRef::from(&value);
902        reference.upgrade().expect("upgrade");
903        value.release(());
904        assert!(reference.upgrade().is_none());
905    }
906
907    #[::fuchsia::test]
908    fn test_clone() {
909        let value = OwnedRef::new(Data {});
910        {
911            let value2 = OwnedRef::share(&value);
912            value2.release(());
913        }
914        #[allow(clippy::redundant_clone)]
915        {
916            let reference = WeakRef::from(&value);
917            let _reference2 = reference.clone();
918        }
919        value.release(());
920    }
921
922    #[::fuchsia::test]
923    fn test_default() {
924        let reference = WeakRef::<Data>::default();
925        assert!(reference.upgrade().is_none());
926    }
927
928    #[::fuchsia::test]
929    fn test_release_on_error() {
930        fn release_on_error() -> Result<(), ()> {
931            let value = OwnedRef::new(Data {});
932            release_on_error!(value, (), {
933                if true {
934                    return Err(());
935                }
936                Ok(())
937            });
938            Ok(())
939        }
940        assert_eq!(release_on_error(), Err(()));
941    }
942
943    #[::fuchsia::test]
944    fn test_into_static() {
945        let value = OwnedRef::new(Data {});
946        let weak = WeakRef::from(&value);
947        // SAFETY: This is safe, as static_ref remains on the stack.
948        let static_ref = TempRef::into_static(weak.upgrade().unwrap());
949        // Check that weak can now be dropped.
950        std::mem::drop(weak);
951        // Drop static_ref
952        std::mem::drop(static_ref);
953        value.release(());
954    }
955
956    #[::fuchsia::test]
957    fn test_debug_assert_no_local_temp_ref() {
958        debug_assert_no_local_temp_ref();
959        let value = OwnedRef::new(Data {});
960        debug_assert_no_local_temp_ref();
961        let _temp_ref = OwnedRef::temp(&value);
962        std::thread::spawn(|| {
963            debug_assert_no_local_temp_ref();
964        })
965        .join()
966        .expect("join");
967        std::mem::drop(_temp_ref);
968        debug_assert_no_local_temp_ref();
969        value.release(());
970        debug_assert_no_local_temp_ref();
971    }
972
973    #[::fuchsia::test]
974    #[should_panic]
975    fn test_debug_assert_no_local_temp_ref_aborts() {
976        let value = OwnedRef::new(Data {});
977        {
978            let _temp_ref = OwnedRef::temp(&value);
979            debug_assert_no_local_temp_ref();
980        }
981        // This code should not be reached, but ensures the test will fail is
982        // `debug_assert_no_local_temp_ref` fails to panic.
983        value.release(());
984    }
985
986    #[::fuchsia::test]
987    #[should_panic]
988    fn test_unrelease_release_guard() {
989        let _value = ReleaseGuard::<Data>::default();
990    }
991
992    #[::fuchsia::test]
993    fn test_released_release_guard() {
994        let _value = ReleaseGuard::<Data>::default_released();
995    }
996
997    #[::fuchsia::test]
998    fn release_with_mutable_context() {
999        let value = OwnedRef::new(DataWithMutableReleaseContext {});
1000        let mut context = ();
1001        value.release(&mut context);
1002    }
1003
1004    // If this test fails, it will almost always be with a very low probability. Any failure is a
1005    // real, high priority bug.
1006    #[::fuchsia::test]
1007    fn upgrade_while_release() {
1008        let value = OwnedRef::new(Data {});
1009        // Run 10 threads trying to upgrade a weak pointer in a loop.
1010        for _ in 0..10 {
1011            std::thread::spawn({
1012                let weak = OwnedRef::downgrade(&value);
1013                move || loop {
1014                    if weak.upgrade().is_none() {
1015                        return;
1016                    }
1017                }
1018            });
1019        }
1020        // Release the value after letting the threads make some progress.
1021        std::thread::sleep(std::time::Duration::from_millis(10));
1022        value.release(());
1023        // The test must finish, and no assertion should trigger.
1024    }
1025
1026    #[::fuchsia::test]
1027    fn new_cyclic() {
1028        let mut weak_value = None;
1029        let value = OwnedRef::new_cyclic(|weak| {
1030            weak_value = Some(weak);
1031            Data {}
1032        });
1033        let weak_value = weak_value.expect("weak_value");
1034        assert!(weak_value.upgrade().is_some());
1035        value.release(());
1036        assert!(weak_value.upgrade().is_none());
1037    }
1038
1039    #[::fuchsia::test]
1040    fn as_ptr() {
1041        let value = OwnedRef::new(Data {});
1042        let weak = OwnedRef::downgrade(&value);
1043        let temp = weak.upgrade().expect("upgrade");
1044        assert_eq!(OwnedRef::as_ptr(&value), weak.as_ptr());
1045        assert_eq!(OwnedRef::as_ptr(&value), TempRef::as_ptr(&temp));
1046        std::mem::drop(temp);
1047        value.release(());
1048    }
1049
1050    #[::fuchsia::test]
1051    fn test_re_own() {
1052        let data = Data::default();
1053        let owned = OwnedRef::new(data);
1054        let weak = WeakRef::from(&owned);
1055
1056        let re_owned = weak.re_own();
1057        assert!(re_owned.is_some());
1058
1059        // Release the original owned ref.
1060        owned.release(());
1061
1062        // The re_owned ref is still alive.
1063        let re_owned_again = weak.re_own();
1064        assert!(re_owned_again.is_some());
1065        re_owned_again.release(());
1066
1067        // Now release the first re-owned ref.
1068        re_owned.release(());
1069
1070        // Now that all owned refs are released, re_own should fail.
1071        let re_owned_finally = weak.re_own();
1072        assert!(re_owned_finally.is_none());
1073    }
1074
1075    #[::fuchsia::test]
1076    fn test_re_own_concurrent() {
1077        let owned = OwnedRef::new(Data::default());
1078        let weak = WeakRef::from(&owned);
1079        let num_threads = 10;
1080
1081        let mut handles = vec![];
1082        for _ in 0..num_threads {
1083            let weak = weak.clone();
1084            let handle = std::thread::spawn(move || {
1085                loop {
1086                    if let Some(re_owned) = weak.re_own() {
1087                        re_owned.release(());
1088                    } else {
1089                        return;
1090                    }
1091                }
1092            });
1093            handles.push(handle);
1094        }
1095
1096        owned.release(());
1097
1098        for handle in handles {
1099            handle.join().unwrap();
1100        }
1101
1102        assert!(weak.re_own().is_none());
1103    }
1104
1105    #[::fuchsia::test]
1106    fn test_release_after() {
1107        let owned = OwnedRef::new(Data::default());
1108        let value = release_after!(owned, (), { 0 });
1109        assert_eq!(value, 0);
1110    }
1111
1112    #[::fuchsia::test]
1113    async fn test_release_after_async() {
1114        let owned = OwnedRef::new(Data::default());
1115        let value = release_after!(owned, (), async || { 0 });
1116        assert_eq!(value, 0);
1117    }
1118}