crossbeam_epoch/
lib.rs

1//! Epoch-based memory reclamation.
2//!
3//! An interesting problem concurrent collections deal with comes from the remove operation.
4//! Suppose that a thread removes an element from a lock-free map, while another thread is reading
5//! that same element at the same time. The first thread must wait until the second thread stops
6//! reading the element. Only then it is safe to destruct it.
7//!
8//! Programming languages that come with garbage collectors solve this problem trivially. The
9//! garbage collector will destruct the removed element when no thread can hold a reference to it
10//! anymore.
11//!
12//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an
13//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and
14//! marked with the current epoch. Every time a thread accesses a collection, it checks the current
15//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread
16//! can be referencing it anymore.
17//!
18//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit
19//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something
20//! users of concurrent collections don't have to worry much about.
21//!
22//! # Pointers
23//!
24//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which
25//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a
26//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely
27//! read.
28//!
29//! # Pinning
30//!
31//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant
32//! we declare that any object that gets removed from now on must not be destructed just
33//! yet. Garbage collection of newly removed objects is suspended until the participant gets
34//! unpinned.
35//!
36//! # Garbage
37//!
38//! Objects that get removed from concurrent collections must be stashed away until all currently
39//! pinned participants get unpinned. Such objects can be stored into a thread-local or global
40//! storage, where they are kept until the right time for their destruction comes.
41//!
42//! There is a global shared instance of garbage queue. You can [`defer`](Guard::defer) the execution of an
43//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data
44//! structures may defer the deallocation of an object.
45//!
46//! # APIs
47//!
48//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you
49//! want to create your own garbage collector, use the [`Collector`] API.
50
51#![doc(test(
52    no_crate_inject,
53    attr(
54        deny(warnings, rust_2018_idioms),
55        allow(dead_code, unused_assignments, unused_variables)
56    )
57))]
58#![warn(
59    missing_docs,
60    missing_debug_implementations,
61    rust_2018_idioms,
62    unreachable_pub
63)]
64#![cfg_attr(not(feature = "std"), no_std)]
65#![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))]
66#![cfg_attr(feature = "nightly", feature(const_fn))]
67
68#[cfg(crossbeam_loom)]
69extern crate loom_crate as loom;
70
71use cfg_if::cfg_if;
72
73#[cfg(crossbeam_loom)]
74#[allow(unused_imports, dead_code)]
75mod primitive {
76    pub(crate) mod cell {
77        pub(crate) use loom::cell::UnsafeCell;
78    }
79    pub(crate) mod sync {
80        pub(crate) mod atomic {
81            use core::sync::atomic::Ordering;
82            pub(crate) use loom::sync::atomic::AtomicUsize;
83            pub(crate) fn fence(ord: Ordering) {
84                if let Ordering::Acquire = ord {
85                } else {
86                    // FIXME: loom only supports acquire fences at the moment.
87                    // https://github.com/tokio-rs/loom/issues/117
88                    // let's at least not panic...
89                    // this may generate some false positives (`SeqCst` is stronger than `Acquire`
90                    // for example), and some false negatives (`Relaxed` is weaker than `Acquire`),
91                    // but it's the best we can do for the time being.
92                }
93                loom::sync::atomic::fence(Ordering::Acquire)
94            }
95
96            // FIXME: loom does not support compiler_fence at the moment.
97            // https://github.com/tokio-rs/loom/issues/117
98            // we use fence as a stand-in for compiler_fence for the time being.
99            // this may miss some races since fence is stronger than compiler_fence,
100            // but it's the best we can do for the time being.
101            pub(crate) use self::fence as compiler_fence;
102        }
103        pub(crate) use loom::sync::Arc;
104    }
105    pub(crate) use loom::lazy_static;
106    pub(crate) use loom::thread_local;
107}
108#[cfg(not(crossbeam_loom))]
109#[allow(unused_imports, dead_code)]
110mod primitive {
111    #[cfg(any(feature = "alloc", feature = "std"))]
112    pub(crate) mod cell {
113        #[derive(Debug)]
114        #[repr(transparent)]
115        pub(crate) struct UnsafeCell<T>(::core::cell::UnsafeCell<T>);
116
117        // loom's UnsafeCell has a slightly different API than the standard library UnsafeCell.
118        // Since we want the rest of the code to be agnostic to whether it's running under loom or
119        // not, we write this small wrapper that provides the loom-supported API for the standard
120        // library UnsafeCell. This is also what the loom documentation recommends:
121        // https://github.com/tokio-rs/loom#handling-loom-api-differences
122        impl<T> UnsafeCell<T> {
123            #[inline]
124            pub(crate) fn new(data: T) -> UnsafeCell<T> {
125                UnsafeCell(::core::cell::UnsafeCell::new(data))
126            }
127
128            #[inline]
129            pub(crate) fn with<R>(&self, f: impl FnOnce(*const T) -> R) -> R {
130                f(self.0.get())
131            }
132
133            #[inline]
134            pub(crate) fn with_mut<R>(&self, f: impl FnOnce(*mut T) -> R) -> R {
135                f(self.0.get())
136            }
137        }
138    }
139    #[cfg(any(feature = "alloc", feature = "std"))]
140    pub(crate) mod sync {
141        pub(crate) mod atomic {
142            pub(crate) use core::sync::atomic::compiler_fence;
143            pub(crate) use core::sync::atomic::fence;
144            pub(crate) use core::sync::atomic::AtomicUsize;
145        }
146        #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))]
147        pub(crate) use alloc::sync::Arc;
148    }
149
150    #[cfg(feature = "std")]
151    pub(crate) use std::thread_local;
152
153    #[cfg(feature = "std")]
154    pub(crate) use lazy_static::lazy_static;
155}
156
157#[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))]
158cfg_if! {
159    if #[cfg(feature = "alloc")] {
160        extern crate alloc;
161
162        mod atomic;
163        mod collector;
164        mod deferred;
165        mod epoch;
166        mod guard;
167        mod internal;
168        mod sync;
169
170        pub use self::atomic::{
171            Pointable, Atomic, CompareExchangeError,
172            Owned, Pointer, Shared,
173        };
174        pub use self::collector::{Collector, LocalHandle};
175        pub use self::guard::{unprotected, Guard};
176
177        #[allow(deprecated)]
178        pub use self::atomic::{CompareAndSetError, CompareAndSetOrdering};
179    }
180}
181
182cfg_if! {
183    if #[cfg(feature = "std")] {
184        mod default;
185        pub use self::default::{default_collector, is_pinned, pin};
186    }
187}