crossbeam_utils/
cache_padded.rs

1use core::fmt;
2use core::ops::{Deref, DerefMut};
3
4/// Pads and aligns a value to the length of a cache line.
5///
6/// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of
7/// data are not placed into the same cache line. Updating an atomic value invalidates the whole
8/// cache line it belongs to, which makes the next access to the same cache line slower for other
9/// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other
10/// cached data.
11///
12/// # Size and alignment
13///
14/// Cache lines are assumed to be N bytes long, depending on the architecture:
15///
16/// * On x86-64, aarch64, and powerpc64, N = 128.
17/// * On arm, mips, mips64, and riscv64, N = 32.
18/// * On s390x, N = 256.
19/// * On all others, N = 64.
20///
21/// Note that N is just a reasonable guess and is not guaranteed to match the actual cache line
22/// length of the machine the program is running on. On modern Intel architectures, spatial
23/// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that
24/// cache lines are 128 bytes long.
25///
26/// The size of `CachePadded<T>` is the smallest multiple of N bytes large enough to accommodate
27/// a value of type `T`.
28///
29/// The alignment of `CachePadded<T>` is the maximum of N bytes and the alignment of `T`.
30///
31/// # Examples
32///
33/// Alignment and padding:
34///
35/// ```
36/// use crossbeam_utils::CachePadded;
37///
38/// let array = [CachePadded::new(1i8), CachePadded::new(2i8)];
39/// let addr1 = &*array[0] as *const i8 as usize;
40/// let addr2 = &*array[1] as *const i8 as usize;
41///
42/// assert!(addr2 - addr1 >= 32);
43/// assert_eq!(addr1 % 32, 0);
44/// assert_eq!(addr2 % 32, 0);
45/// ```
46///
47/// When building a concurrent queue with a head and a tail index, it is wise to place them in
48/// different cache lines so that concurrent threads pushing and popping elements don't invalidate
49/// each other's cache lines:
50///
51/// ```
52/// use crossbeam_utils::CachePadded;
53/// use std::sync::atomic::AtomicUsize;
54///
55/// struct Queue<T> {
56///     head: CachePadded<AtomicUsize>,
57///     tail: CachePadded<AtomicUsize>,
58///     buffer: *mut T,
59/// }
60/// ```
61#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
62// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
63// lines at a time, so we have to align to 128 bytes rather than 64.
64//
65// Sources:
66// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
67// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
68//
69// ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size.
70//
71// Sources:
72// - https://www.mono-project.com/news/2016/09/12/arm64-icache/
73//
74// powerpc64 has 128-byte cache line size.
75//
76// Sources:
77// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
78#[cfg_attr(
79    any(
80        target_arch = "x86_64",
81        target_arch = "aarch64",
82        target_arch = "powerpc64",
83    ),
84    repr(align(128))
85)]
86// arm, mips, mips64, and riscv64 have 32-byte cache line size.
87//
88// Sources:
89// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
90// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
91// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
92// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
93// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_riscv64.go#L7
94#[cfg_attr(
95    any(
96        target_arch = "arm",
97        target_arch = "mips",
98        target_arch = "mips64",
99        target_arch = "riscv64",
100    ),
101    repr(align(32))
102)]
103// s390x has 256-byte cache line size.
104//
105// Sources:
106// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
107#[cfg_attr(target_arch = "s390x", repr(align(256)))]
108// x86 and wasm have 64-byte cache line size.
109//
110// Sources:
111// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
112// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
113//
114// All others are assumed to have 64-byte cache line size.
115#[cfg_attr(
116    not(any(
117        target_arch = "x86_64",
118        target_arch = "aarch64",
119        target_arch = "powerpc64",
120        target_arch = "arm",
121        target_arch = "mips",
122        target_arch = "mips64",
123        target_arch = "riscv64",
124        target_arch = "s390x",
125    )),
126    repr(align(64))
127)]
128pub struct CachePadded<T> {
129    value: T,
130}
131
132unsafe impl<T: Send> Send for CachePadded<T> {}
133unsafe impl<T: Sync> Sync for CachePadded<T> {}
134
135impl<T> CachePadded<T> {
136    /// Pads and aligns a value to the length of a cache line.
137    ///
138    /// # Examples
139    ///
140    /// ```
141    /// use crossbeam_utils::CachePadded;
142    ///
143    /// let padded_value = CachePadded::new(1);
144    /// ```
145    pub const fn new(t: T) -> CachePadded<T> {
146        CachePadded::<T> { value: t }
147    }
148
149    /// Returns the inner value.
150    ///
151    /// # Examples
152    ///
153    /// ```
154    /// use crossbeam_utils::CachePadded;
155    ///
156    /// let padded_value = CachePadded::new(7);
157    /// let value = padded_value.into_inner();
158    /// assert_eq!(value, 7);
159    /// ```
160    pub fn into_inner(self) -> T {
161        self.value
162    }
163}
164
165impl<T> Deref for CachePadded<T> {
166    type Target = T;
167
168    fn deref(&self) -> &T {
169        &self.value
170    }
171}
172
173impl<T> DerefMut for CachePadded<T> {
174    fn deref_mut(&mut self) -> &mut T {
175        &mut self.value
176    }
177}
178
179impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
180    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
181        f.debug_struct("CachePadded")
182            .field("value", &self.value)
183            .finish()
184    }
185}
186
187impl<T> From<T> for CachePadded<T> {
188    fn from(t: T) -> Self {
189        CachePadded::new(t)
190    }
191}