Skip to main content

seq_lock/
lib.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use starnix_logging::with_zx_name;
6use std::arch::asm;
7use std::marker::PhantomData;
8use std::mem::{align_of, size_of};
9use std::sync::Arc;
10use std::sync::atomic::AtomicU32;
11use zerocopy::{Immutable, IntoBytes};
12use zx::HandleBased as _;
13
14const SEQUENCE_SIZE: usize = size_of::<AtomicU32>();
15
16/// Byte size to use when incrementally writing out T in [`set_value()`]. Determined
17/// by the params in T.
18/// Four -> write in u32 chunks.
19/// Eight -> write in u64 chunks, although the first 8 bytes may be two u32s (one
20/// of which is the `sequence`).
21#[derive(PartialEq)]
22pub enum WriteSize {
23    Four,
24    Eight,
25}
26
27/// Types that are safe to be synchronized across address spaces using a Seqlock.
28///
29/// A type implementing this trait can optionally include the sequence as
30/// its first field, indicated by `HAS_INLINE_SEQUENCE`. If it does not, [`SeqLock`]
31/// will place a u32 atomic sequence number in between the header and value.
32///
33/// # Safety
34///
35/// Types implementing this trait guarantee that they can be safely written
36/// to shared memory in chunks of `WRITE_SIZE` without introducing undefined
37/// behavior for concurrent readers in other address spaces.
38pub unsafe trait SeqLockable: IntoBytes + Immutable {
39    /// The chunk size to use when writing to memory, either 4 or 8 bytes.
40    const WRITE_SIZE: WriteSize;
41
42    /// Indicates whether the type includes the u32 sequence as its first field.
43    const HAS_INLINE_SEQUENCE: bool;
44
45    /// Name used to identify the VMO for debugging.
46    const VMO_NAME: &'static [u8];
47}
48
49/// Declare an instance of [`SeqLock`] by supplying header([`H`]) and value([`T`]) types,
50/// which should be configured with C-style layout & alignment.
51/// The value T can optionally include the sequence param as its first field (HAS_INLINE_SEQUENCE).
52/// If you choose not to do that, [`SeqLock`] will place a u32 atomic sequence number
53/// in between the header and value, in a VMO, shifting the value payload by `SEQUENCE_SIZE`.
54pub struct SeqLock<H: IntoBytes + Immutable, T: SeqLockable> {
55    map_addr: usize,
56    readonly_vmo: Arc<zx::Vmo>,
57    _phantom_data: PhantomData<(H, T)>,
58}
59
60impl<H: IntoBytes + Default + Immutable, T: SeqLockable + Default> SeqLock<H, T> {
61    pub fn new_default() -> Result<Self, zx::Status> {
62        Self::new(H::default(), T::default())
63    }
64}
65
66/// Points to the sequence (lock) address.
67/// This is always right after the H struct.
68const fn sequence_offset<H>() -> usize {
69    let offset = size_of::<H>();
70    assert!(offset % align_of::<AtomicU32>() == 0, "Sequence must be correctly aligned");
71    offset
72}
73
74impl<H: IntoBytes + Immutable, T: SeqLockable> SeqLock<H, T> {
75    /// Points to the value address, adding any required padding if `sequence` is not inline.
76    ///
77    /// Example with inline sequence (HAS_INLINE_SEQUENCE = true):
78    ///   H: 0
79    ///   H: 4
80    ///   T: 8 <-- points here, because `sequence` is the first param of T.
81    ///   T: 12
82    ///
83    /// Example without inline sequence (HAS_INLINE_SEQUENCE = false):
84    ///   H: 0
85    ///   H: 4
86    ///   [sequence]: 8
87    ///   T: 12 <-- points here, after the added sequence.
88    ///
89    /// Some implementations (SeLinuxStatusValue) rely on SeqLock to track `sequence`, while
90    /// some others (PerfMetadataValue) track `sequence` in T so that they can refer to it.
91    const fn value_offset() -> usize {
92        let offset = sequence_offset::<H>();
93        assert!(
94            offset % align_of::<T>() == 0,
95            "Value alignment must allow packing without padding"
96        );
97        offset + if T::HAS_INLINE_SEQUENCE { 0 } else { SEQUENCE_SIZE }
98    }
99
100    /// Returns the total size of the VMO required to store the header, value, and sequence.
101    const fn vmo_size() -> usize {
102        Self::value_offset() + size_of::<T>()
103    }
104
105    /// Returns an instance with initial values and a read-only VMO handle.
106    /// May fail if the VMO backing the structure cannot be created, duplicated
107    /// read-only, or mapped.
108    pub fn new(header: H, value: T) -> Result<Self, zx::Status> {
109        // Create a VMO sized to hold the header H, value T, and sequence number.
110        let vmo_size = Self::vmo_size();
111        let writable_vmo = with_zx_name(zx::Vmo::create(vmo_size as u64)?, T::VMO_NAME);
112
113        // SAFETY: This is ok because there are no other references to this memory.
114        return unsafe { Self::new_from_vmo(header, value, writable_vmo) };
115    }
116
117    /// Same as new() except that we can pass in an existing Vmo. This means that the
118    /// first part of the Vmo is a SeqLock.
119    ///
120    /// # Safety
121    ///
122    /// Callers must guarantee that any other references to this memory will
123    /// only make aligned atomic accesses to the sequence offset within the memory
124    /// or to fields of H or T.
125    pub unsafe fn new_from_vmo(
126        header: H,
127        value: T,
128        writable_vmo: zx::Vmo,
129    ) -> Result<Self, zx::Status> {
130        const {
131            let write_size = match T::WRITE_SIZE {
132                WriteSize::Four => size_of::<u32>(),
133                WriteSize::Eight => size_of::<u64>(),
134            };
135            assert!(align_of::<T>() >= write_size, "T must be aligned to the write size");
136            assert!(size_of::<T>() % write_size == 0, "size of T must be a multiple of write size");
137            assert!(
138                Self::value_offset() % write_size == 0,
139                "value_offset must be aligned to the write size"
140            );
141        }
142        let value_offset = Self::value_offset();
143        let vmo_size = Self::vmo_size();
144        // Populate the initial default values.
145        writable_vmo.write(header.as_bytes(), 0)?;
146        writable_vmo.write(value.as_bytes(), value_offset as u64)?;
147
148        // Create a readonly handle to the VMO.
149        let writable_rights = writable_vmo.basic_info()?.rights;
150        let readonly_rights = writable_rights.difference(zx::Rights::WRITE);
151        let readonly_vmo = Arc::new(writable_vmo.duplicate_handle(readonly_rights)?);
152
153        // Map the VMO writable by this object, and populate it.
154        let flags = zx::VmarFlags::PERM_READ
155            | zx::VmarFlags::ALLOW_FAULTS
156            | zx::VmarFlags::REQUIRE_NON_RESIZABLE
157            | zx::VmarFlags::PERM_WRITE;
158
159        let status = Self {
160            map_addr: fuchsia_runtime::vmar_root_self().map(
161                0,
162                &writable_vmo,
163                0,
164                vmo_size,
165                flags,
166            )?,
167            readonly_vmo: readonly_vmo,
168            _phantom_data: PhantomData,
169        };
170        Ok(status)
171    }
172
173    /// Returns a read-only handle to the VMO containing the header, atomic
174    /// sequence number, and value.
175    pub fn get_readonly_vmo(&self) -> Arc<zx::Vmo> {
176        self.readonly_vmo.clone()
177    }
178
179    /// Returns a read-only copy of the value as a T struct object.
180    /// This read occurs with a sequence check to ensure that:
181    ///   1. Someone else is not already in the middle of writing the data
182    ///   2. The data had not been modified during the read
183    pub fn get(&self) -> T {
184        let mut value = std::mem::MaybeUninit::<T>::uninit();
185        let value_ptr = value.as_mut_ptr();
186        let starting_addr = self.map_addr + Self::value_offset();
187        let sequence_addr = self.map_addr + sequence_offset::<H>();
188
189        loop {
190            // Read sequence (lock) value.
191            // SAFETY: We know sequence is u32 hardcoded to sequence_addr.
192            let sequence = unsafe { atomic_load_u32_acquire(sequence_addr as *mut u32) };
193            if sequence % 2 != 0 {
194                std::hint::spin_loop();
195                continue;
196            }
197
198            // Read data in chunks of u32 or u64 depending on the WriteSize for T.
199            if T::WRITE_SIZE == WriteSize::Four {
200                for i in 0..(size_of::<T>() / size_of::<u32>()) {
201                    let addr = starting_addr + i * size_of::<u32>();
202                    // SAFETY: User stated via WriteSize that T is made of u32s.
203                    let val = unsafe { atomic_load_u32_acquire(addr as *mut u32) };
204                    // SAFETY: We know value_ptr points to a T struct param.
205                    unsafe { (value_ptr as *mut u32).add(i).write(val) };
206                }
207            } else if T::WRITE_SIZE == WriteSize::Eight {
208                for i in 0..(size_of::<T>() / size_of::<u64>()) {
209                    let addr = starting_addr + i * size_of::<u64>();
210                    // SAFETY: User stated via WriteSize that T is made of u64s.
211                    let val = unsafe { atomic_load_u64_acquire(addr as *mut u64) };
212                    // SAFETY: We know value_ptr points to a T struct param.
213                    unsafe { (value_ptr as *mut u64).add(i).write(val) };
214                }
215            }
216
217            // Read sequence again to compare with earlier sequence value.
218            // SAFETY: We know sequence is u32 hardcoded to sequence_addr.
219            let current_sequence = unsafe { atomic_load_u32_acquire(sequence_addr as *mut u32) };
220            if sequence != current_sequence {
221                continue;
222            }
223            break;
224        }
225        // Only return after sequence checks are valid, otherwise loops to check again.
226        // SAFETY: By this point the value should be synced and valid. Also we know the
227        // data starting at the offset is a T struct.
228        unsafe { value.assume_init() }
229    }
230
231    /// Updates the value directly. Uses Seqlock pattern.
232    pub fn set_value(&self, value: T) {
233        // All data in <T> must be stored with some form of atomic write.
234        // Given two consecutive writes W1 and W2, it is technically possible for a
235        // client to observe the data written by W2 before observing the
236        // start-increment for W2. The reader observes the same post-W1/pre-W2
237        // sequence number at both start and end of the read, so thinks everything
238        // is consistent, but gets some mix of W1 and W2's data.
239        // In order to synchronize correctly we must either:
240        //
241        // 1) Store all the data with any atomic ordering (i.e. relaxed)
242        // 2) Store all the data with atomic-release
243        // We've chosen to do the second.
244        let starting_addr = self.map_addr + Self::value_offset();
245
246        // Convert T to u8s so that we can process in u32 or u64 chunks.
247        const { assert!(size_of::<T>() % 4 == 0) };
248        let value_as_u8_bytes = value.as_bytes();
249        let value_ptr_in_u32 = value.as_bytes().as_ptr().cast::<u32>();
250
251        // Lock prior to writing.
252        let sequence_addr = (self.map_addr + sequence_offset::<H>()) as *mut u32;
253        // Don't use AtomicU32 fetch_add because it is undefined behavior to
254        // access across mutually distrusting address spaces, which happens for the seq lock.
255        // SAFETY: sequence_addr is a valid pointer because `map_addr` is sized to fit
256        // `H` and `T` and unmapped when `self` is dropped.
257        let old_sequence = unsafe { atomic_fetch_add_u32_acq_rel(sequence_addr, 1) };
258        // Old `sequence` value must always be even (i.e. unlocked) before writing.
259        assert!((old_sequence % 2) == 0, "expected sequence to be unlocked");
260
261        // Process and write to memory in u32 or u64 chunks.
262        const { assert!(align_of::<T>() == 4 || align_of::<T>() == 8) };
263        // If T included the sequence number, we shouldn't write to it
264        // (overwrite it) here. We should just skip it.
265        let mut start_index = 0;
266        if T::HAS_INLINE_SEQUENCE {
267            start_index = 1;
268        }
269
270        if T::WRITE_SIZE == WriteSize::Four {
271            assert!(align_of::<T>() == 4);
272            for i in start_index..(value_as_u8_bytes.len() / size_of::<u32>()) {
273                let current_value_addr = starting_addr + (i * size_of::<u32>());
274                // SAFETY: We checked alignment and size above so we know that this points to
275                // the valid current u32 value.
276                let current_value = unsafe { *value_ptr_in_u32.add(i) };
277
278                // Use asm to write u32 chunk so that the values are being written
279                // atomically between address spaces. Don't use std::sync::atomic because that
280                // only syncs writes within the Rust abstract machine.
281                // SAFETY: Caller has verified that no one else is writing to this exact memory, and
282                // that both currrent_value_addr and value_as_u64 are valid.
283                unsafe { atomic_store_u32_release(current_value_addr as *mut u32, current_value) };
284            }
285        } else if T::WRITE_SIZE == WriteSize::Eight {
286            assert!(align_of::<T>() == 8 && size_of::<T>() % 8 == 0);
287
288            // When `WRITE_SIZE` is `Eight`, the memory is 8-byte aligned.
289            // If `HAS_INLINE_SEQUENCE` is true, the 4-byte sequence lock occupies the
290            // first half of an 8-byte block. We must skip that 4-byte sequence, perform a
291            // 4-byte store for the remainder of that block, and then proceed with 8-byte stores.
292            let mut offset_index = 0;
293
294            if start_index == 1 {
295                // Skip first u32 (sequence). Write next u32.
296                let addr = starting_addr + (start_index * size_of::<u32>());
297                // SAFETY: As a `SeqLockable`, the caller guarantees via `HAS_INLINE_SEQUENCE` that
298                // the u32 sequence spans the first half of the 8-byte aligned block. This means that
299                // getting the next u32 value (to sum up to a complete u64) is safe.
300                let value = unsafe { *value_ptr_in_u32.add(start_index) };
301                // SAFETY: Caller has verified that no one else is writing to this exact memory, and
302                // that both addr and value are valid.
303                unsafe { atomic_store_u32_release(addr as *mut u32, value) };
304
305                offset_index += 1;
306            }
307
308            // Write the rest of the data using 8-byte stores.
309            let value_ptr_in_u64 = value.as_bytes().as_ptr().cast::<u64>();
310            for i in offset_index..(value_as_u8_bytes.len() / size_of::<u64>()) {
311                let addr = starting_addr + (i * size_of::<u64>());
312                // SAFETY: We checked alignment and size above so we know that this points to
313                // the valid current u64 value.
314                let value = unsafe { *value_ptr_in_u64.add(i) };
315
316                // Use asm to write u64 chunk so that the values are being written
317                // atomically between address spaces. Don't use std::sync::atomic because that
318                // only syncs writes within the Rust abstract machine.
319                // SAFETY: Caller has verified that no one else is writing to this exact memory, and
320                // that both addr and value are valid.
321                unsafe { atomic_store_u64_release(addr as *mut u64, value) };
322            }
323        }
324
325        // Unlock after all writing is done.
326        // SAFETY: sequence_addr is a valid pointer as per above SAFETY comment.
327        let _ = unsafe { atomic_fetch_add_u32_acq_rel(sequence_addr, 1) };
328    }
329
330    /// Retrieves the memory address of the beginning of the handle part of the VMO.
331    /// You can use this to point to a param you want to edit (e.g. with an offset).
332    pub fn get_map_address(&mut self) -> *const T {
333        let address = self.map_addr;
334        return std::ptr::with_exposed_provenance::<T>(address);
335    }
336}
337
338/// This performs an atomic store-release of a 32-bit value to `addr`.
339/// Use this if you have a u32 or your struct is align(4).
340///
341/// Rust's memory model defines how atomics work across threads, but
342/// doesn't account for the way Starnix handles access across mutually distrusting
343/// address spaces.
344/// This Seqlock is intended to be mapped and read by different address spaces. Rust's
345/// guarantees do not apply and reading across these address spaces is undefined behavior.
346/// Theoretically the Rust compiler could determine that the atomic is never read
347/// from within the process and optimize out the store. We work around this by directly
348/// including the assembly an atomic would generate to prevent the compiler from
349/// "helpfully" optimizing it away.
350///
351/// # Safety
352///
353/// 1. The caller must ensure `addr` points to an address ptr that is valid and 4-byte
354///    aligned. The `addr` must be writable by the current process.
355/// 2. The caller must ensure that no other non-atomic operations are
356///    occurring on this memory address simultaneously.
357pub unsafe fn atomic_store_u32_release(addr: *mut u32, value: u32) {
358    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "riscv64")))]
359    compile_error!("This architecture is not supported");
360
361    // SAFETY: Caller must provide a valid `addr` and `value` as defined in the # Safety
362    // section above. The asm directly stores the value to that ptr. The original value
363    // may not have been a u32 (e.g. it's a SeLinuxStatusValue struct); caller is
364    // responsible to break struct into valid u32 chunks.
365    unsafe {
366        #[cfg(target_arch = "x86_64")]
367        {
368            asm!(
369                "mov [{addr}], {val:e}",
370                addr = in(reg) addr,
371                val = in(reg) value,
372                options(nostack, preserves_flags)
373            );
374        }
375        #[cfg(target_arch = "aarch64")]
376        {
377            asm!(
378                "stlr {val:w}, [{addr}]",
379                addr = in(reg) addr,
380                val = in(reg) value,
381                options(nostack, preserves_flags)
382            );
383        }
384        #[cfg(target_arch = "riscv64")]
385        {
386            asm!(
387                "fence rw, w",
388                "sw {val}, 0({addr})",
389                addr = in(reg) addr,
390                val = in(reg) value,
391                options(nostack, preserves_flags)
392            );
393        }
394    }
395}
396
397/// This performs an atomic fetch-add with Acquire and Release ordering of `val`
398/// to a 32-bit value at `addr`. Use this to update the u32 lock.
399///
400/// Rust's memory model defines how atomics work across threads, but
401/// doesn't account for the way Starnix handles access across mutually distrusting
402/// address spaces.
403/// This Seqlock is intended to be mapped and read by different address spaces. Rust's
404/// guarantees do not apply and reading across these address spaces is undefined behavior.
405/// Theoretically the Rust compiler could determine that the atomic is never read
406/// from within the process and optimize out the store. We work around this by directly
407/// including the assembly an atomic would generate to prevent the compiler from
408/// "helpfully" optimizing it away.
409///
410/// # Safety
411/// The caller must ensure `addr` is valid. The `addr` must be writable by the current process.
412pub unsafe fn atomic_fetch_add_u32_acq_rel(addr: *mut u32, value: u32) -> u32 {
413    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "riscv64")))]
414    compile_error!("This architecture is not supported");
415
416    let old_value: u32;
417    // SAFETY: Caller must provide a valid `addr` and `value`. The asm directly
418    // updates the value at that ptr.
419    unsafe {
420        #[cfg(target_arch = "x86_64")]
421        {
422            asm!(
423                "lock xadd [{addr}], {val:e}",
424                addr = in(reg) addr,
425                val = inout(reg) value => old_value,
426                options(nostack),
427            );
428        }
429        #[cfg(target_arch = "aarch64")]
430        {
431            asm!(
432                "1:",
433                "ldaxr {old:w}, [{addr}]",
434                "add {tmp:w}, {old:w}, {val:w}",
435                "stlxr {status:w}, {tmp:w}, [{addr}]",
436                "cbnz {status:w}, 1b",
437                addr = in(reg) addr,
438                val = in(reg) value,
439                old = out(reg) old_value,
440                tmp = out(reg) _,
441                status = out(reg) _,
442                options(nostack),
443            );
444        }
445        #[cfg(target_arch = "riscv64")]
446        {
447            asm!(
448                "amoadd.w.aqrl {old}, {val}, ({addr})",
449                addr = in(reg) addr,
450                val = in(reg) value,
451                old = out(reg) old_value,
452                options(nostack),
453            );
454        }
455    }
456    old_value
457}
458
459/// This performs an atomic store-release of a 64-bit value to `addr`.
460/// Use this if you have a u64 or your struct is align(8).
461///
462/// Rust's memory model defines how atomics work across threads, but
463/// doesn't account for the way Starnix handles access across mutually distrusting
464/// address spaces.
465/// This Seqlock is intended to be mapped and read by different address spaces. Rust's
466/// guarantees do not apply and reading across these address spaces is undefined behavior.
467/// Theoretically the Rust compiler could determine that the atomic is never read
468/// from within the process and optimize out the store. We work around this by directly
469/// including the assembly an atomic would generate to prevent the compiler from
470/// "helpfully" optimizing it away.
471///
472/// # Safety
473///
474/// 1. The caller must ensure `addr` points to an address ptr that is valid and 8-byte
475///    aligned. The `addr` must be writable by the current process.
476/// 2. The caller must ensure that no other non-atomic operations are
477///    occurring on this memory address simultaneously.
478pub unsafe fn atomic_store_u64_release(addr: *mut u64, value: u64) {
479    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "riscv64")))]
480    compile_error!("This architecture is not supported");
481
482    // SAFETY: Caller must provide a valid `addr` and `value` as defined in the # Safety
483    // section above. The asm directly stores the value to that ptr. The original value
484    // may not have been a u64 (e.g. it's a PerfMetadataValue struct); caller is
485    // responsible to break struct into valid u64 chunks.
486    unsafe {
487        #[cfg(target_arch = "x86_64")]
488        {
489            asm!(
490                "mov [{addr}], {val}",
491                addr = in(reg) addr,
492                val = in(reg) value,
493                options(nostack, preserves_flags)
494            );
495        }
496        #[cfg(target_arch = "aarch64")]
497        {
498            asm!(
499                // Add memory barrier.
500                "dmb ishst",
501                // Use str instead of stlr to explicitly write only.
502                // Otherwise stlr attempts to read first and we don't have permissions.
503                "str {val}, [{addr}]",
504                addr = in(reg) addr,
505                val = in(reg) value,
506                options(nostack, preserves_flags)
507            );
508        }
509        #[cfg(target_arch = "riscv64")]
510        {
511            asm!(
512                "fence rw, w",
513                "sd {val}, 0({addr})",
514                addr = in(reg) addr,
515                val = in(reg) value,
516                options(nostack, preserves_flags)
517            );
518        }
519    }
520}
521
522/// Performs an atomic acquire (load, or read) of a u32 from `addr`.
523/// You can use this to read the `sequence` or `lock` value.
524///
525/// # Safety
526/// `addr` must point to a valid address and be 4-byte aligned.
527pub unsafe fn atomic_load_u32_acquire(addr: *mut u32) -> u32 {
528    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "riscv64")))]
529    compile_error!("This architecture is not supported");
530
531    let value: u32;
532    // SAFETY: addr must be a valid pointer and 4-byte aligned.
533    unsafe {
534        #[cfg(target_arch = "x86_64")]
535        {
536            asm!(
537                "mov {val:e}, [{ptr}]",
538                ptr = in(reg) addr,
539                val = out(reg) value,
540                options(nostack, preserves_flags)
541            );
542        }
543        #[cfg(target_arch = "aarch64")]
544        {
545            asm!(
546                "ldar {val:w}, [{ptr}]",
547                ptr = in(reg) addr,
548                val = out(reg) value,
549                options(nostack, preserves_flags)
550            );
551        }
552        #[cfg(target_arch = "riscv64")]
553        {
554            asm!(
555                "lw {val}, 0({ptr})",
556                "fence r, rw",
557                ptr = in(reg) addr,
558                val = out(reg) value,
559                options(nostack, preserves_flags)
560            );
561        }
562    }
563    value
564}
565
566/// Performs an atomic acquire (load, or read) of a u64 from `addr`.
567///
568/// # Safety
569/// `addr` must point to a valid address and be 8-byte aligned.
570pub unsafe fn atomic_load_u64_acquire(addr: *mut u64) -> u64 {
571    #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "riscv64")))]
572    compile_error!("This architecture is not supported");
573
574    let value: u64;
575    // SAFETY: addr must be a valid pointer and 8-byte aligned.
576    unsafe {
577        #[cfg(target_arch = "x86_64")]
578        {
579            asm!(
580                "mov {val}, [{ptr}]",
581                ptr = in(reg) addr,
582                val = out(reg) value,
583                options(nostack, preserves_flags)
584            );
585        }
586        #[cfg(target_arch = "aarch64")]
587        {
588            asm!(
589                "ldar {val}, [{ptr}]",
590                ptr = in(reg) addr,
591                val = out(reg) value,
592                options(nostack, preserves_flags)
593            );
594        }
595        #[cfg(target_arch = "riscv64")]
596        {
597            asm!(
598                "ld {val}, 0({ptr})",
599                "fence r, rw",
600                ptr = in(reg) addr,
601                val = out(reg) value,
602                options(nostack, preserves_flags)
603            );
604        }
605    }
606    value
607}
608
609impl<H: IntoBytes + Immutable, T: SeqLockable> Drop for SeqLock<H, T> {
610    fn drop(&mut self) {
611        // SAFETY: `self` owns the mapping, and does not dispense any references
612        // to it.
613        unsafe {
614            fuchsia_runtime::vmar_root_self()
615                .unmap(self.map_addr, Self::vmo_size())
616                .expect("failed to unmap SeqLock");
617        }
618    }
619}
620#[cfg(test)]
621mod tests {
622    use super::*;
623    use zerocopy::KnownLayout;
624
625    // Example struct that mirrors PerfMetadataValue.
626    #[repr(C)]
627    #[derive(IntoBytes, Immutable, KnownLayout, Copy, Clone, Debug, PartialEq, Default)]
628    struct WriteSizeEightStruct {
629        lock: u32,
630        val1: u32,
631        val2: u64,
632        val3: u64,
633    }
634
635    // SAFETY: This struct is composed of fields that are safe to write
636    // in 8-byte chunks (two u32s and u64s). It is only used for testing.
637    // It emulates a perf_event_value struct.
638    unsafe impl SeqLockable for WriteSizeEightStruct {
639        const WRITE_SIZE: WriteSize = WriteSize::Eight;
640        const HAS_INLINE_SEQUENCE: bool = true;
641        const VMO_NAME: &'static [u8] = b"test:write_size_eight";
642    }
643
644    #[test]
645    fn test_seqlock_gets_align_eight_with_sequence() {
646        let seqlock = SeqLock::<u64, WriteSizeEightStruct>::new(0, WriteSizeEightStruct::default())
647            .expect("failed to create seqlock");
648
649        let val = WriteSizeEightStruct {
650            lock: 0,
651            val1: 42,
652            val2: 123_456_789_012_345_678,
653            val3: 987_654_321_098_765_432,
654        };
655        seqlock.set_value(val);
656
657        let data = seqlock.get();
658        // The 'lock' field was incremented twice by set_value(),
659        // and not incremented for get().
660        assert_eq!(data.lock, 2);
661        assert_eq!(data.val1, val.val1);
662        assert_eq!(data.val2, val.val2);
663        assert_eq!(data.val3, val.val3);
664    }
665
666    // Example struct that mirrors SeLinuxStatusValue.
667    #[repr(C)]
668    #[derive(IntoBytes, Immutable, KnownLayout, Copy, Clone, Debug, PartialEq, Default)]
669    struct WriteSizeFourStruct {
670        val1: u32,
671        val2: u32,
672        val3: u32,
673    }
674
675    // SAFETY: This struct is composed of u32 fields, making it safe
676    // to write in 4-byte chunks. It is only used for testing.
677    // It emulates a SeLinuxStatusValue struct.
678    unsafe impl SeqLockable for WriteSizeFourStruct {
679        const WRITE_SIZE: WriteSize = WriteSize::Four;
680        const HAS_INLINE_SEQUENCE: bool = false;
681        const VMO_NAME: &'static [u8] = b"test:write_size_four";
682    }
683
684    #[test]
685    fn test_seqlock_gets_align_four() {
686        let seqlock = SeqLock::<u32, WriteSizeFourStruct>::new(0, WriteSizeFourStruct::default())
687            .expect("failed to create seqlock");
688
689        let val = WriteSizeFourStruct { val1: 42, val2: 123_456_789, val3: 987_654_321 };
690        seqlock.set_value(val);
691
692        let data = seqlock.get();
693        assert_eq!(data.val1, val.val1);
694        assert_eq!(data.val2, val.val2);
695        assert_eq!(data.val3, val.val3);
696    }
697
698    // Stress test for get() and set_value().
699    // For two threads, get() and set_value() should work on the same piece of memory.
700    // One thread tries to read a lot, and another writes a lot. This test verifies that,
701    // thanks to the seqlock, the data read is correct (didn't get overwritten mid-read).
702    // TODO(https://fxbug.dev/460246292): Handle cases for more than 1 writer thread.
703    #[test]
704    fn test_seqlock_handles_concurrent_gets_and_sets() {
705        let seqlock = std::sync::Arc::new(
706            SeqLock::<u64, WriteSizeEightStruct>::new(0, WriteSizeEightStruct::default())
707                .expect("failed to create seqlock"),
708        );
709
710        let seqlock_clone = std::sync::Arc::clone(&seqlock);
711        let seqlock_clone_2 = std::sync::Arc::clone(&seqlock);
712
713        let barrier = std::sync::Arc::new(std::sync::Barrier::new(2));
714        let barrier_clone = std::sync::Arc::clone(&barrier);
715
716        // Spawn 2 threads that run concurrently.
717        let writer_thread = std::thread::spawn(move || {
718            barrier.wait();
719            let start = std::time::Instant::now();
720            let mut i = 0u32;
721            while start.elapsed() < std::time::Duration::from_millis(200) {
722                let val = WriteSizeEightStruct { lock: 0, val1: i, val2: i as u64, val3: i as u64 };
723                seqlock_clone.set_value(val);
724                i += 1;
725            }
726        });
727        let reader_thread = std::thread::spawn(move || {
728            let mut reads = 0;
729            let mut last_valid_read = 0;
730            barrier_clone.wait();
731            let start = std::time::Instant::now();
732            while start.elapsed() < std::time::Duration::from_millis(200) {
733                let data = seqlock_clone_2.get();
734                // All fields are the same (no mid-read writes).
735                assert_eq!(data.val1 as u64, data.val2);
736                assert_eq!(data.val2, data.val3);
737
738                // The sequence (lock) should be even (completed writes).
739                assert_eq!(data.lock % 2, 0);
740
741                // get() returns the latest value. The latest value might not increment exactly
742                // by 1 each time because the writer thread might have written zero or multiple
743                // times since we last read. So, we just verify that the latest value is higher
744                // than the previous value.
745                assert!(data.val1 >= last_valid_read);
746                last_valid_read = data.val1;
747                reads += 1;
748            }
749            reads
750        });
751
752        // Wait for both threads to finish.
753        writer_thread.join().unwrap();
754        let total_reads = reader_thread.join().unwrap();
755
756        // Check that reading actually happened.
757        assert!(total_reads > 1, "Expected threads to run concurrently");
758
759        // Check that writes actually happened.
760        let final_data = seqlock.get();
761        assert!(final_data.val1 > 0, "Expected some writes to happen");
762        assert_eq!(final_data.val1 as u64, final_data.val2);
763        assert_eq!(final_data.val2, final_data.val3);
764        assert_eq!(final_data.lock % 2, 0, "Sequence lock should be unlocked");
765    }
766}