Skip to main content

atomic_bitflags/
lib.rs

1// Copyright 2026 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5pub use {bitflags as __bitflags, paste};
6
7#[macro_export]
8macro_rules! atomic_bitflags {
9    (
10        $(#[$outer:meta])*
11        $vis:vis struct $BitFlags:ident: $T:ty {
12            $($t:tt)*
13        }
14    ) => {
15        $crate::paste::paste! {
16            $crate::__bitflags::bitflags! {
17                $(#[$outer])*
18                $vis struct $BitFlags: $T {
19                    $($t)*
20                }
21            }
22
23            #[allow(dead_code)]
24            #[derive(Debug, Default)]
25            $vis struct [<Atomic $BitFlags>] {
26                inner: std::sync::atomic::[<Atomic $T:camel>],
27            }
28
29            #[allow(dead_code)]
30            impl [<Atomic $BitFlags>] {
31                pub fn new(initial: $BitFlags) -> Self {
32                    Self {
33                        inner: std::sync::atomic::[<Atomic $T:camel>]::new(initial.bits()),
34                    }
35                }
36
37                pub fn load(&self, order: std::sync::atomic::Ordering) -> $BitFlags {
38                    $BitFlags::from_bits_truncate(self.inner.load(order))
39                }
40
41                pub fn store(&self, val: $BitFlags, order: std::sync::atomic::Ordering) {
42                    self.inner.store(val.bits(), order);
43                }
44
45                pub fn fetch_or(&self, val: $BitFlags, order: std::sync::atomic::Ordering) -> $BitFlags {
46                    $BitFlags::from_bits_truncate(self.inner.fetch_or(val.bits(), order))
47                }
48
49                pub fn fetch_and(&self, val: $BitFlags, order: std::sync::atomic::Ordering) -> $BitFlags {
50                    $BitFlags::from_bits_truncate(self.inner.fetch_and(val.bits(), order))
51                }
52
53                pub fn swap(&self, val: $BitFlags, order: std::sync::atomic::Ordering) -> $BitFlags {
54                    $BitFlags::from_bits_truncate(self.inner.swap(val.bits(), order))
55                }
56
57                pub fn compare_exchange(
58                    &self,
59                    current: $BitFlags,
60                    new: $BitFlags,
61                    success: std::sync::atomic::Ordering,
62                    failure: std::sync::atomic::Ordering,
63                ) -> Result<$BitFlags, $BitFlags> {
64                    self.inner.compare_exchange(current.bits(), new.bits(), success, failure)
65                        .map($BitFlags::from_bits_truncate)
66                        .map_err($BitFlags::from_bits_truncate)
67                }
68
69                pub fn update(
70                    &self,
71                    value: $BitFlags,
72                    mask: $BitFlags,
73                    set_order: std::sync::atomic::Ordering,
74                    fetch_order: std::sync::atomic::Ordering,
75                ) -> $BitFlags {
76                    self.inner.fetch_update(set_order, fetch_order, |old| {
77                        Some((old & !mask.bits()) | (value.bits() & mask.bits()))
78                    }).map($BitFlags::from_bits_truncate).unwrap()
79                }
80            }
81        }
82    };
83}
84
85#[cfg(test)]
86mod tests {
87    use std::sync::atomic::Ordering;
88
89    atomic_bitflags! {
90        #[derive(PartialEq, Eq, Debug, Clone, Copy)]
91        pub struct TestFlags: u32 {
92            const A = 0x1;
93            const B = 0x2;
94            const C = 0x4;
95        }
96    }
97
98    #[test]
99    fn test_atomic_bitflags() {
100        let atomic = AtomicTestFlags::new(TestFlags::A);
101        assert_eq!(atomic.load(Ordering::Relaxed), TestFlags::A);
102
103        atomic.store(TestFlags::B, Ordering::Relaxed);
104        assert_eq!(atomic.load(Ordering::Relaxed), TestFlags::B);
105
106        let prev = atomic.fetch_or(TestFlags::C, Ordering::Relaxed);
107        assert_eq!(prev, TestFlags::B);
108        assert_eq!(atomic.load(Ordering::Relaxed), TestFlags::B | TestFlags::C);
109
110        let prev = atomic.fetch_and(TestFlags::C, Ordering::Relaxed);
111        assert_eq!(prev, TestFlags::B | TestFlags::C);
112        assert_eq!(atomic.load(Ordering::Relaxed), TestFlags::C);
113    }
114
115    #[test]
116    fn test_update() {
117        let atomic = AtomicTestFlags::new(TestFlags::A | TestFlags::B);
118
119        // Update A to 0, leaving B as is. Mask is A. Value is 0.
120        let prev =
121            atomic.update(TestFlags::empty(), TestFlags::A, Ordering::Relaxed, Ordering::Relaxed);
122        assert_eq!(prev, TestFlags::A | TestFlags::B);
123        assert_eq!(atomic.load(Ordering::Relaxed), TestFlags::B);
124
125        // Update A to 1, leaving B as is. Mask is A. Value is A.
126        let prev = atomic.update(TestFlags::A, TestFlags::A, Ordering::Relaxed, Ordering::Relaxed);
127        assert_eq!(prev, TestFlags::B);
128        assert_eq!(atomic.load(Ordering::Relaxed), TestFlags::A | TestFlags::B);
129
130        // Update B to 0, A to 0. Mask is A | B. Value is 0.
131        let prev = atomic.update(
132            TestFlags::empty(),
133            TestFlags::A | TestFlags::B,
134            Ordering::Relaxed,
135            Ordering::Relaxed,
136        );
137        assert_eq!(prev, TestFlags::A | TestFlags::B);
138        assert_eq!(atomic.load(Ordering::Relaxed), TestFlags::empty());
139    }
140}