Skip to main content

seq_lock/
lib.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use starnix_logging::with_zx_name;
6use std::arch::asm;
7use std::marker::PhantomData;
8use std::mem::{align_of, size_of};
9use std::sync::Arc;
10use std::sync::atomic::AtomicU32;
11use zerocopy::{Immutable, IntoBytes};
12use zx::HandleBased as _;
13
14/// Declare an instance of [`SeqLock`] by supplying header([`H`]) and value([`T`]) types,
15/// which should be configured with C-style layout & alignment.
16/// [`SeqLock`] will place a 32-bit atomic sequence number in-between the
17/// header and value, in a VMO.
18pub struct SeqLock<H: IntoBytes + Immutable, T: IntoBytes + Immutable> {
19    map_addr: usize,
20    readonly_vmo: Arc<zx::Vmo>,
21    _phantom_data: PhantomData<(H, T)>,
22}
23
24impl<H: IntoBytes + Default + Immutable, T: IntoBytes + Default + Immutable> SeqLock<H, T> {
25    pub fn new_default() -> Result<Self, zx::Status> {
26        Self::new(H::default(), T::default())
27    }
28}
29
30const fn sequence_offset<H>() -> usize {
31    let offset = size_of::<H>();
32    assert!(offset % align_of::<AtomicU32>() == 0, "Sequence must be correctly aligned");
33    offset
34}
35
36const fn value_offset<H, T>() -> usize {
37    let offset = sequence_offset::<H>() + size_of::<AtomicU32>();
38    assert!(offset % align_of::<T>() == 0, "Value alignment must allow packing without padding");
39    offset
40}
41
42const fn vmo_size<H, T>() -> usize {
43    value_offset::<H, T>() + size_of::<T>()
44}
45
46impl<H: IntoBytes + Immutable, T: IntoBytes + Immutable> SeqLock<H, T> {
47    /// Returns an instance with initial values and a read-only VMO handle.
48    /// May fail if the VMO backing the structure cannot be created, duplicated
49    /// read-only, or mapped.
50    pub fn new(header: H, value: T) -> Result<Self, zx::Status> {
51        // Create a VMO sized to hold the header, value, and sequence number.
52        let writable_vmo =
53            with_zx_name(zx::Vmo::create(vmo_size::<H, T>() as u64)?, b"starnix:selinux");
54
55        // SAFETY: This is ok because there are no other references to this memory.
56        return unsafe { Self::new_from_vmo(header, value, writable_vmo) };
57    }
58
59    /// Same as new() except that we can pass in an existing Vmo. This means that the
60    /// first part of the Vmo is a SeqLock.
61    ///
62    /// # Safety
63    ///
64    /// Callers must guarantee that any other references to this memory will
65    /// only make aligned atomic accesses to the sequence offset within the memory
66    /// or to fields of H or T.
67    pub unsafe fn new_from_vmo(
68        header: H,
69        value: T,
70        writable_vmo: zx::Vmo,
71    ) -> Result<Self, zx::Status> {
72        // Populate the initial default values.
73        writable_vmo.write(header.as_bytes(), 0)?;
74        writable_vmo.write(value.as_bytes(), value_offset::<H, T>() as u64)?;
75
76        // Create a readonly handle to the VMO.
77        let writable_rights = writable_vmo.basic_info()?.rights;
78        let readonly_rights = writable_rights.difference(zx::Rights::WRITE);
79        let readonly_vmo = Arc::new(writable_vmo.duplicate_handle(readonly_rights)?);
80
81        // Map the VMO writable by this object, and populate it.
82        let flags = zx::VmarFlags::PERM_READ
83            | zx::VmarFlags::ALLOW_FAULTS
84            | zx::VmarFlags::REQUIRE_NON_RESIZABLE
85            | zx::VmarFlags::PERM_WRITE;
86
87        let status = Self {
88            map_addr: fuchsia_runtime::vmar_root_self().map(
89                0,
90                &writable_vmo,
91                0,
92                vmo_size::<H, T>(),
93                flags,
94            )?,
95            readonly_vmo: readonly_vmo,
96            _phantom_data: PhantomData,
97        };
98
99        Ok(status)
100    }
101
102    /// Returns a read-only handle to the VMO containing the header, atomic
103    /// sequence number, and value.
104    pub fn get_readonly_vmo(&self) -> Arc<zx::Vmo> {
105        self.readonly_vmo.clone()
106    }
107
108    /// Updates the value directly. Uses Seqlock pattern.
109    pub fn set_value(&self, value: T) {
110        // All data in <T> must be stored with some form of atomic write.
111        // Given two consecutive writes W1 and W2, it is technically possible for a
112        // client to observe the data written by W2 before observing the
113        // start-increment for W2. The reader observes the same post-W1/pre-W2
114        // sequence number at both start and end of the read, so thinks everything
115        // is consistent, but gets some mix of W1 and W2's data.
116        // In order to synchronize correctly we must either:
117        //
118        // 1) Store all the data with any atomic ordering (i.e. relaxed)
119        // 2) Store all the data with atomic-release
120        // We've chosen to do the second.
121        let starting_addr = self.map_addr + value_offset::<H, T>();
122
123        // Convert T to u8s so that we can process in u32 chunks.
124        const { assert!(align_of::<T>() == 4) };
125        const { assert!(size_of::<T>() % 4 == 0) };
126        let value_as_u8_bytes = value.as_bytes();
127        let value_ptr_in_u32 = value.as_bytes().as_ptr().cast::<u32>();
128
129        // Lock prior to writing.
130        let sequence_addr = (self.map_addr + sequence_offset::<H>()) as *mut u32;
131        // Don't use AtomicU32 fetch_add because it is undefined behavior to
132        // access across mutually distrusting address spaces, which happens for the seq lock.
133        // SAFETY: sequence_addr is a valid pointer because `map_addr` is sized to fit
134        // `H` and `T` and unmapped when `self` is dropped.
135        let old_sequence = unsafe { atomic_fetch_add_u32_acq_rel(sequence_addr, 1) };
136        // Old `sequence` value must always be even (i.e. unlocked) before writing.
137        assert!((old_sequence % 2) == 0, "expected sequence to be unlocked");
138
139        // Process and write to memory in u32 chunks.
140        for i in 0..(value_as_u8_bytes.len() / size_of::<u32>()) {
141            let current_value_addr = starting_addr + (i * size_of::<u32>());
142            // SAFETY: We checked alignment and size above so we know that this points to
143            // the valid current u32 value.
144            let current_value = unsafe { *value_ptr_in_u32.add(i) };
145
146            // Use asm to write u32 chunk so that the values are being written
147            // atomically between processes. Don't use std::sync::atomic because that
148            // only syncs writes within the Rust abstract machine.
149            // SAFETY: Caller has verified that no one else is writing to this exact memory, and
150            // that both currrent_value_addr and value_as_u64 are valid.
151            unsafe { atomic_store_u32_release(current_value_addr, current_value) };
152        }
153
154        // Unlock after all writing is done.
155        // SAFETY: sequence_addr is a valid pointer as per above SAFETY comment.
156        let _ = unsafe { atomic_fetch_add_u32_acq_rel(sequence_addr, 1) };
157    }
158
159    /// Retrieves the memory address of the beginning of the handle part of the VMO.
160    /// You can use this to point to a param you want to edit (e.g. with an offset).
161    pub fn get_map_address(&mut self) -> *const T {
162        let address = self.map_addr;
163        return std::ptr::with_exposed_provenance::<T>(address);
164    }
165}
166
167/// This performs an atomic store-release of a 32-bit value to `addr`.
168/// Use this if you have a u32 or your struct is align(4).
169///
170/// Rust's memory model defines how atomics work across threads, but
171/// doesn't account for the way Starnix handles access across mutually distrusting
172/// address spaces.
173/// This Seqlock is intended to be mapped and read by different address spaces. Rust's
174/// guarantees do not apply and reading across these address spaces is undefined behavior.
175/// Theoretically the Rust compiler could determine that the atomic is never read
176/// from within the process and optimize out the store. We work around this by directly
177/// including the assembly an atomic would generate to prevent the compiler from
178/// "helpfully" optimizing it away.
179///
180/// # Safety
181///
182/// 1. The caller must ensure `addr` is valid and 4-byte aligned. The `addr` must be
183///    writable by the current process. You can check: if std::ptr::write_volatile()
184///    is able to write successfully to this `addr`, then this should work too.
185/// 2. The caller must ensure that no other non-atomic operations are
186///    occurring on this memory address simultaneously.
187pub unsafe fn atomic_store_u32_release(addr: usize, value: u32) {
188    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "riscv64")))]
189    compile_error!("This architecture is not supported");
190
191    // SAFETY: Caller must provide a valid `addr` and `value`. The asm directly
192    // stores the value to that addr. The original value may not have been a u32
193    // (e.g. it's a SeLinuxStatusValue struct); caller is responsible to break struct
194    // into valid u32 chunks.
195    unsafe {
196        #[cfg(target_arch = "x86_64")]
197        {
198            asm!(
199                "mov [{ptr}], {val:e}",
200                ptr = in(reg) addr,
201                val = in(reg) value,
202                options(nostack, preserves_flags)
203            );
204        }
205        #[cfg(target_arch = "aarch64")]
206        {
207            asm!(
208                "stlr {val:w}, [{ptr}]",
209                ptr = in(reg) addr,
210                val = in(reg) value,
211                options(nostack, preserves_flags)
212            );
213        }
214        #[cfg(target_arch = "riscv64")]
215        {
216            asm!(
217                "fence rw, w",
218                "sw {val}, 0({ptr})",
219                ptr = in(reg) addr,
220                val = in(reg) value,
221                options(nostack, preserves_flags)
222            );
223        }
224    }
225}
226
227/// This performs an atomic fetch-add with Acquire and Release ordering of `val`
228/// to a 32-bit value at `ptr`. Use this to update the u32 lock.
229///
230/// Rust's memory model defines how atomics work across threads, but
231/// doesn't account for the way Starnix handles access across mutually distrusting
232/// address spaces.
233/// This Seqlock is intended to be mapped and read by different address spaces. Rust's
234/// guarantees do not apply and reading across these address spaces is undefined behavior.
235/// Theoretically the Rust compiler could determine that the atomic is never read
236/// from within the process and optimize out the store. We work around this by directly
237/// including the assembly an atomic would generate to prevent the compiler from
238/// "helpfully" optimizing it away.
239///
240/// # Safety
241/// The caller must ensure `ptr` is valid. The `ptr` must be writable by the current process.
242pub unsafe fn atomic_fetch_add_u32_acq_rel(ptr: *mut u32, value: u32) -> u32 {
243    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "riscv64")))]
244    compile_error!("This architecture is not supported");
245
246    let old_value: u32;
247    // SAFETY: Caller must provide a valid `ptr` and `value`. The asm directly
248    // updates the value at that ptr.
249    unsafe {
250        #[cfg(target_arch = "x86_64")]
251        {
252            asm!(
253                "lock xadd [{ptr}], {val:e}",
254                ptr = in(reg) ptr,
255                val = inout(reg) value => old_value,
256                options(nostack),
257            );
258        }
259        #[cfg(target_arch = "aarch64")]
260        {
261            asm!(
262                "1:",
263                "ldaxr {old:w}, [{ptr}]",
264                "add {tmp:w}, {old:w}, {val:w}",
265                "stlxr {status:w}, {tmp:w}, [{ptr}]",
266                "cbnz {status:w}, 1b",
267                ptr = in(reg) ptr,
268                val = in(reg) value,
269                old = out(reg) old_value,
270                tmp = out(reg) _,
271                status = out(reg) _,
272                options(nostack),
273            );
274        }
275        #[cfg(target_arch = "riscv64")]
276        {
277            asm!(
278                "amoadd.w.aqrl {old}, {val}, ({ptr})",
279                ptr = in(reg) ptr,
280                val = in(reg) value,
281                old = out(reg) old_value,
282                options(nostack),
283            );
284        }
285    }
286    old_value
287}
288
289impl<H: IntoBytes + Immutable, T: IntoBytes + Immutable> Drop for SeqLock<H, T> {
290    fn drop(&mut self) {
291        // SAFETY: `self` owns the mapping, and does not dispense any references
292        // to it.
293        unsafe {
294            fuchsia_runtime::vmar_root_self()
295                .unmap(self.map_addr, vmo_size::<H, T>())
296                .expect("failed to unmap SeqLock");
297        }
298    }
299}