mmio/
region.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Support for implementing splittable MMIO regions.
6//!
7//! This module defines the [MmioRegion] type which provides safe [Mmio] and [MmioSplit]
8//! implementations on top of the more relaxed [UnsafeMmio] trait.
9//!
10//! The [UnsafeMmio] trait allows mutations through a shared reference, provided the caller
11//! ensures that store operations are not performed concurrently with any other operation that may
12//! overlap it.
13//!
14//! Implementing [UnsafeMmio] correctly is likely to be simpler than implementing [Mmio] and
15//! [MmioSplit] for many use cases.
16
17use crate::{Mmio, MmioError, MmioExt, MmioSplit};
18use core::borrow::Borrow;
19use core::marker::PhantomData;
20use core::ops::Range;
21use std::rc::Rc;
22use std::sync::Arc;
23
24/// An MMIO region that can be stored to through a shared reference.
25///
26/// This trait requires the caller to uphold some safety constraints, but enables a generic
27/// implementation of [MmioSplit]. See the [MmioRegion] which provides a safe wrapper on top of
28/// this trait.
29///
30/// This is primarily intended to simplify implementing the [MmioSplit] trait, not for users of the
31/// library. However, it is possible to use [UnsafeMmio] directly, provided the safety requirements
32/// are met.
33///
34/// # Safety
35/// - Callers must ensure that stores are never performed concurrently with any other operation on
36///   an overlapping range.
37/// - Concurrent loads are allowed on overlapping ranges.
38/// - Callers must ensure that offsets are suitably aligned for the type being loaded or stored.
39pub trait UnsafeMmio {
40    /// Returns the size, in bytes, of the underlying MMIO region that can be accessed through this
41    /// object.
42    fn len(&self) -> usize;
43
44    /// Returns true if the MMIO region has a length of 0.
45    fn is_empty(&self) -> bool {
46        self.len() == 0
47    }
48
49    /// Returns the first offset into this MMIO region that is suitably aligned for`align`.
50    ///
51    /// An offset is suitably aligned if `offset = align_offset(align) + i * align` for some `i`.
52    fn align_offset(&self, align: usize) -> usize;
53
54    /// Loads a u8 from this MMIO region at the given offset.
55    ///
56    /// # Safety
57    /// See the trait-level documentation.
58    unsafe fn load8_unchecked(&self, offset: usize) -> u8;
59
60    /// Loads a u16 from this MMIO region at the given offset.
61    ///
62    /// # Safety
63    /// See the trait-level documentation.
64    unsafe fn load16_unchecked(&self, offset: usize) -> u16;
65
66    /// Loads a u32 from this MMIO region at the given offset.
67    ///
68    /// # Safety
69    /// See the trait-level documentation.
70    unsafe fn load32_unchecked(&self, offset: usize) -> u32;
71
72    /// Loads a u64 from this MMIO region at the given offset.
73    ///
74    /// # Safety
75    /// See the trait-level documentation.
76    unsafe fn load64_unchecked(&self, offset: usize) -> u64;
77
78    /// Stores a u8 to this MMIO region at the given offset.
79    ///
80    /// # Safety
81    /// See the trait-level documentation.
82    unsafe fn store8_unchecked(&self, offset: usize, v: u8);
83
84    /// Stores a u16 to this MMIO region at the given offset.
85    ///
86    /// # Safety
87    /// See the trait-level documentation.
88    unsafe fn store16_unchecked(&self, offset: usize, v: u16);
89
90    /// Stores a u32 to this MMIO region at the given offset.
91    ///
92    /// # Safety
93    /// See the trait-level documentation.
94    unsafe fn store32_unchecked(&self, offset: usize, v: u32);
95
96    /// Stores a u64 to this MMIO region at the given offset.
97    ///
98    /// # Safety
99    /// See the trait-level documentation.
100    unsafe fn store64_unchecked(&self, offset: usize, v: u64);
101}
102
103/// An `MmioRegion` provides a safe implementation of [Mmio] and [MmioSplit] on top of an
104/// [UnsafeMmio] implementation.
105///
106/// The safety constraints of [UnsafeMmio] require callers to ensure that stores are not performed
107/// concurrently with loads for any overlapping range.
108///
109/// This type meets these requirements while supporting being split into independently owned due to
110/// the following:
111///
112/// 1. An MmioRegion has exclusive ownership of a sub-region from the wrapped [UnsafeMmio]
113///    implementation (required by [MmioRegion::new]).
114/// 2. An MmioRegion only performs operations that are fully contained within the region it owns.
115/// 3. All stores are performed through a mutable reference (ensuring stores are exclusive with all
116///    other operations to the owned region).
117/// 4. When splitting off `MmioRegions`, the split_off region owns a range that was owned by region
118///    it was split off from prior to the split, and that it has exclusive ownership of after the
119///    split.
120///
121/// # Type Parameters
122/// An MmioRegion is parameterized by two types:
123/// - `Impl`: the [UnsafeMmio] implementation wrapped by this region.
124/// - `Owner`: an object with shared ownership of the `Impl` instance.
125///
126/// An MmioRegion is splittable if `Owner` can be cloned.
127pub struct MmioRegion<Impl, Owner = Impl> {
128    owner: Owner,
129    bounds: Range<usize>,
130    phantom: PhantomData<Impl>,
131}
132
133impl<U: UnsafeMmio> MmioRegion<U> {
134    /// Create a new `MmioRegion` that has exclusive ownership of the entire range.
135    ///
136    /// The returned object is guaranteed to be the only one capable of referencing any value in
137    /// the range. It can be converted into one that can be split
138    pub fn new(inner: U) -> Self {
139        let bounds = 0..inner.len();
140        let owner = inner;
141        Self { owner, bounds, phantom: PhantomData }
142    }
143
144    /// Converts this region into one which can be split.
145    pub fn into_split(self) -> MmioRegion<U, Rc<U>> {
146        let owner = Rc::new(self.owner);
147        let bounds = self.bounds;
148        // Safety:
149        // - this region exclusively owns its bounds.
150        // - ownership of the UnsafeMmio is transferred into the Rc.
151        // - the returned region has the same bounds as self did at the start of the call.
152        unsafe { MmioRegion::<U, _>::new_unchecked(owner, bounds) }
153    }
154}
155
156impl<U: UnsafeMmio + Send + Sync> MmioRegion<U> {
157    /// Converts this region into one which can be split and sent.
158    pub fn into_split_send(self) -> MmioRegion<U, Arc<U>> {
159        let owner = Arc::new(self.owner);
160        let bounds = self.bounds;
161        // Safety:
162        // - this region exclusively owns its bounds.
163        // - ownership of the UnsafeMmio is transferred into the Arc.
164        // - the returned region has the same bounds as self did at the start of the call.
165        unsafe { MmioRegion::<U, _>::new_unchecked(owner, bounds) }
166    }
167}
168
169impl<Impl: UnsafeMmio, Owner: Borrow<Impl>> MmioRegion<Impl, Owner> {
170    /// Create an MmioRegion that constrains all operations to the underlying wrapped UnsafeMmio
171    /// to be within the given bounds.
172    ///
173    /// # Safety
174    /// - For the lifetime of this MmioRegion or any split off from it the given range must only be
175    ///   accessed through this MmioRegion or a region split off from it.
176    unsafe fn new_unchecked(owner: Owner, bounds: Range<usize>) -> Self {
177        Self { owner, bounds, phantom: PhantomData }
178    }
179
180    /// Resolves the offset relative to the start of this MmioRegion's bounds, provided that offset
181    /// is suitably aligned for type T and there is sufficient capacity within this MmioRegion's
182    /// bounds at the given offset.
183    fn resolve_offset<T>(&self, offset: usize) -> Result<usize, MmioError> {
184        self.check_suitable_for::<T>(offset)?;
185        Ok(self.bounds.start + offset)
186    }
187}
188
189impl<Impl: UnsafeMmio, Owner: Borrow<Impl>> Mmio for MmioRegion<Impl, Owner> {
190    fn len(&self) -> usize {
191        self.bounds.len()
192    }
193
194    fn align_offset(&self, align: usize) -> usize {
195        // Determine the first offset into the wrapped region that is correctly aligned.
196        let first_aligned_offset = self.owner.borrow().align_offset(align);
197
198        // An aligned offset is any where offset = first_aligned_offset + i * align.
199        // Or where (offset - first_aligned_offset) % align = 0.
200        //
201        // For offsets relative to the start of this region, they are aligned if:
202        // (rel_offset + region_start - first_aligned_offset) % align = 0.
203        // or rel_offset % align = (first_aligned_offset - region_start) % align
204        //
205        // Therefore, the first aligned offset, relative to the start of this region, is:
206        // (first_aligned_offset - region_start) % align.
207        first_aligned_offset.wrapping_sub(self.bounds.start) % align
208    }
209
210    fn try_load8(&self, offset: usize) -> Result<u8, MmioError> {
211        let offset = self.resolve_offset::<u8>(offset)?;
212        // Safety:
213        // - this region exclusively owns its covered range (required by safety constraints)
214        // - the immutable receiver excludes stores for this entire range
215        Ok(unsafe { self.owner.borrow().load8_unchecked(offset) })
216    }
217
218    fn try_load16(&self, offset: usize) -> Result<u16, MmioError> {
219        let offset = self.resolve_offset::<u16>(offset)?;
220        // Safety:
221        // - this region exclusively owns its covered range (required by safety constraints)
222        // - the immutable receiver excludes stores for this entire range
223        Ok(unsafe { self.owner.borrow().load16_unchecked(offset) })
224    }
225
226    fn try_load32(&self, offset: usize) -> Result<u32, MmioError> {
227        let offset = self.resolve_offset::<u32>(offset)?;
228        // Safety:
229        // - this region exclusively owns its covered range (required by safety constraints)
230        // - the immutable receiver excludes stores for this entire range
231        Ok(unsafe { self.owner.borrow().load32_unchecked(offset) })
232    }
233
234    fn try_load64(&self, offset: usize) -> Result<u64, MmioError> {
235        let offset = self.resolve_offset::<u64>(offset)?;
236        // Safety:
237        // - this region exclusively owns its covered range (required by safety constraints)
238        // - the immutable receiver excludes stores for this entire range
239        Ok(unsafe { self.owner.borrow().load64_unchecked(offset) })
240    }
241
242    fn try_store8(&mut self, offset: usize, v: u8) -> Result<(), MmioError> {
243        let offset = self.resolve_offset::<u8>(offset)?;
244        // Safety:
245        // - this region exclusively owns its covered range (required by safety constraints)
246        // - the mutable receiver excludes all other operations for this entire range
247        unsafe {
248            self.owner.borrow().store8_unchecked(offset, v);
249        }
250        Ok(())
251    }
252
253    fn try_store16(&mut self, offset: usize, v: u16) -> Result<(), MmioError> {
254        let offset = self.resolve_offset::<u16>(offset)?;
255        // Safety:
256        // - this region exclusively owns its covered range (required by safety constraints)
257        // - the mutable receiver excludes all other operations for this entire range
258        unsafe {
259            self.owner.borrow().store16_unchecked(offset, v);
260        }
261        Ok(())
262    }
263
264    fn try_store32(&mut self, offset: usize, v: u32) -> Result<(), MmioError> {
265        let offset = self.resolve_offset::<u32>(offset)?;
266        // Safety:
267        // - this region exclusively owns its covered range (required by safety constraints)
268        // - the mutable receiver excludes all other operations for this entire range
269        unsafe {
270            self.owner.borrow().store32_unchecked(offset, v);
271        }
272        Ok(())
273    }
274
275    fn try_store64(&mut self, offset: usize, v: u64) -> Result<(), MmioError> {
276        let offset = self.resolve_offset::<u64>(offset)?;
277        // Safety:
278        // - this region exclusively owns its covered range (required by safety constraints)
279        // - the mutable receiver excludes all other operations for this entire range
280        unsafe {
281            self.owner.borrow().store64_unchecked(offset, v);
282        }
283        Ok(())
284    }
285}
286
287impl<Impl: UnsafeMmio, Owner: Borrow<Impl> + Clone> MmioSplit for MmioRegion<Impl, Owner> {
288    fn try_split_off(&mut self, mid: usize) -> Result<Self, MmioError> {
289        if mid > self.len() {
290            return Err(MmioError::OutOfRange);
291        }
292
293        // Resolve the midpoint to an absolute offset.
294        let mid = self.bounds.start + mid;
295
296        // Split the bounds into two disjoint ranges.
297        let lhs = self.bounds.start..mid;
298        let rhs = mid..self.bounds.end;
299
300        // Relinquish ownership of the lhs.
301        self.bounds = rhs;
302
303        // Safety:
304        // - this region exclusively owns its covered range (required by safety constraints)
305        // - the mutable receiver excludes all other operations for this entire range
306        // - this mmio region splits off a portion of its owned range and relinquishes ownership of
307        // it before returning
308        // - the returned MmioRegion owns a range that was owned by this MmioRegion at the start of
309        // this call and no longer is
310        Ok(unsafe { Self::new_unchecked(self.owner.clone(), lhs) })
311    }
312}
313
314#[cfg(test)]
315mod tests {
316    use super::*;
317    use crate::MmioOperand;
318    use fuchsia_sync::RwLock;
319    use rand::Rng;
320    use std::sync::Barrier;
321    use std::thread::sleep;
322    use std::time::Duration;
323
324    /// An UnsafeMmio implementation that dynamically checks violations of the safety requirements:
325    /// - a store concurrent with another operation on a memory range
326    /// - an unaligned access
327    ///
328    /// This implementation will panic if an unaligned operation is issued.
329    ///
330    /// This implementation *might* panic on unsafe concurrent usage. If it does panic in this case
331    /// there was unsafe concurrent usage, however the lack of a panic doesn't guarantee all usage
332    /// was safe. The mean_op_duration parameter to new controls how long the average borrow will
333    /// last - increasing this can make it more likely that unsafe usage will be detected.
334    struct CheckedRegisters {
335        cells: Vec<RwLock<u8>>,
336        mean_op_duration: f32,
337    }
338
339    impl CheckedRegisters {
340        fn new(len: usize, mean_op_duration: Duration) -> Self {
341            let mut cells = Vec::new();
342            cells.resize_with(len, || RwLock::new(0));
343
344            let mean_op_duration = mean_op_duration.as_secs_f32();
345
346            Self { cells, mean_op_duration }
347        }
348
349        fn sleep(&self) {
350            // model op duration as a poisson process to get some jitter.
351            let uniform_sample: f32 = rand::random::<f32>().max(0.000001);
352            let duration_secs = -self.mean_op_duration * uniform_sample.ln();
353            sleep(Duration::from_secs_f32(duration_secs));
354        }
355
356        fn load<const N: usize>(&self, start: usize) -> [u8; N] {
357            let borrows: [_; N] = core::array::from_fn(|i| {
358                self.cells[start + i]
359                    .try_read()
360                    .expect("attempt to load from an address that is being stored to")
361            });
362
363            // Sleep while borrowing these cells to increase the chance that unsafe usage will be
364            // detected.
365            self.sleep();
366
367            borrows.map(|r| *r)
368        }
369
370        fn store<const N: usize>(&self, start: usize, bytes: [u8; N]) {
371            let borrows: [_; N] = core::array::from_fn(|i| {
372                self.cells[start + i]
373                    .try_write()
374                    .expect("attempt to store to an address concurrently with another operation")
375            });
376
377            // Sleep while borrowing these cells to increase the chance that unsafe usage will be
378            // detected.
379            self.sleep();
380
381            borrows.into_iter().zip(bytes).for_each(|(mut r, b)| *r = b);
382        }
383    }
384
385    impl UnsafeMmio for CheckedRegisters {
386        fn len(&self) -> usize {
387            self.cells.len()
388        }
389
390        fn align_offset(&self, _align: usize) -> usize {
391            0
392        }
393
394        unsafe fn load8_unchecked(&self, offset: usize) -> u8 {
395            self.load::<1>(offset)[0]
396        }
397
398        unsafe fn load16_unchecked(&self, offset: usize) -> u16 {
399            assert_eq!(offset % 2, 0);
400            u16::from_le_bytes(self.load::<2>(offset))
401        }
402
403        unsafe fn load32_unchecked(&self, offset: usize) -> u32 {
404            assert_eq!(offset % 4, 0);
405            u32::from_le_bytes(self.load::<4>(offset))
406        }
407
408        unsafe fn load64_unchecked(&self, offset: usize) -> u64 {
409            assert_eq!(offset % 8, 0);
410            u64::from_le_bytes(self.load::<8>(offset))
411        }
412
413        unsafe fn store8_unchecked(&self, offset: usize, v: u8) {
414            self.store::<1>(offset, [v]);
415        }
416
417        unsafe fn store16_unchecked(&self, offset: usize, v: u16) {
418            assert_eq!(offset % 2, 0);
419            self.store::<2>(offset, v.to_le_bytes())
420        }
421
422        unsafe fn store32_unchecked(&self, offset: usize, v: u32) {
423            assert_eq!(offset % 4, 0);
424            self.store::<4>(offset, v.to_le_bytes())
425        }
426
427        unsafe fn store64_unchecked(&self, offset: usize, v: u64) {
428            assert_eq!(offset % 8, 0);
429            self.store::<8>(offset, v.to_le_bytes())
430        }
431    }
432
433    #[test]
434    fn test_memory_region_thread_safety() {
435        // The number of concurrent threads.
436        const CONCURRENCY: usize = 64;
437
438        // The number of bytes each thread owns. Must be a non-zero multiple of 8.
439        const BYTES_PER_THREAD: usize = 8;
440
441        // The average time for an operation to hold a borrow.
442        const MEAN_OP_TIME: Duration = Duration::from_micros(100);
443
444        // The number of ops to perform per thread. At 100us per op the minimum sleep time per
445        // thread should be around 0.5s.
446        const THREAD_OP_COUNT: usize = 5000;
447
448        // The total size of the Mmio region.
449        const LEN: usize = CONCURRENCY * BYTES_PER_THREAD;
450
451        // These are required for test correctness.
452        assert_ne!(BYTES_PER_THREAD, 0);
453        assert_eq!(BYTES_PER_THREAD % 8, 0);
454
455        let registers = CheckedRegisters::new(LEN, MEAN_OP_TIME);
456        // Safety:
457        // - CheckedRegisters only references memory it owns
458        // - MmioRegion takes ownership of the CheckedRegisters object
459        let mut region = MmioRegion::new(registers).into_split_send();
460
461        let barrier = Barrier::new(CONCURRENCY);
462
463        std::thread::scope(|s| {
464            let barrier = &barrier;
465            for _ in 0..CONCURRENCY {
466                let mut split = region.split_off(BYTES_PER_THREAD);
467                s.spawn(move || {
468                    let mut rng = rand::rng();
469
470                    // Wait until threads are ready to start to increase the chance of a race.
471                    barrier.wait();
472
473                    for _i in 0..THREAD_OP_COUNT {
474                        let offset = rng.random_range(0..BYTES_PER_THREAD);
475                        let op = rng.random_range(0usize..8);
476
477                        let size = 1 << (op % 4);
478                        // Choose a random offset from 0 to 2x the size of this region. MmioRegion
479                        // should prevent reading out of bounds.
480                        let offset = offset.next_multiple_of(size) % (BYTES_PER_THREAD * 2);
481
482                        // We don't care whether these operations fail.
483                        let _ = match op {
484                            0 => split.try_load8(offset).err(),
485                            1 => split.try_load16(offset).err(),
486                            2 => split.try_load32(offset).err(),
487                            3 => split.try_load64(offset).err(),
488                            4 => split.try_store8(offset, rng.random()).err(),
489                            5 => split.try_store16(offset, rng.random()).err(),
490                            6 => split.try_store32(offset, rng.random()).err(),
491                            7 => split.try_store64(offset, rng.random()).err(),
492                            _ => unreachable!(),
493                        };
494                    }
495                });
496            }
497        });
498    }
499
500    #[test]
501    fn test_alignment() {
502        const LEN: usize = 64;
503        let registers = CheckedRegisters::new(LEN, Duration::ZERO);
504        let mut region = MmioRegion::new(registers).into_split();
505        let mut rng = rand::rng();
506
507        fn assert_alignment<M: Mmio, T: MmioOperand>(
508            mmio: &mut M,
509            offset: usize,
510            region_offset: usize,
511            v: T,
512        ) {
513            let absolute_offset = offset + region_offset;
514            let is_aligned = absolute_offset.is_multiple_of(align_of::<T>());
515            let expected_res = if is_aligned { Ok(()) } else { Err(MmioError::Unaligned) };
516            assert_eq!(mmio.check_suitable_for::<T>(offset), expected_res);
517            assert_eq!(mmio.try_store(offset, v), expected_res);
518            assert_eq!(mmio.try_load(offset), expected_res.map(|_| v));
519        }
520
521        for region_offset in 0..8 {
522            // Do at least two cycles of the largest operand alignment to test modular arithmetic.
523            for relative_offset in 0..16 {
524                let v: u64 = rng.random();
525                assert_alignment(&mut region, relative_offset, region_offset, v as u8);
526                assert_alignment(&mut region, relative_offset, region_offset, v as u16);
527                assert_alignment(&mut region, relative_offset, region_offset, v as u32);
528                assert_alignment(&mut region, relative_offset, region_offset, v);
529            }
530            // Throw away the first byte to advance the region's bounds.
531            let _ = region.split_off(1);
532        }
533    }
534
535    #[test]
536    fn test_wrapped_alignment() {
537        // Test all combinations for how the wrapped MMIO regions alignment, region start and region
538        // relative offsets may stack with respect to alignment.
539        struct OffsetMmio(usize);
540        impl UnsafeMmio for OffsetMmio {
541            fn len(&self) -> usize {
542                isize::MAX as usize
543            }
544
545            fn align_offset(&self, align: usize) -> usize {
546                align.wrapping_sub(self.0) % align
547            }
548
549            unsafe fn load8_unchecked(&self, _offset: usize) -> u8 {
550                unreachable!()
551            }
552
553            unsafe fn load16_unchecked(&self, _offset: usize) -> u16 {
554                unreachable!()
555            }
556
557            unsafe fn load32_unchecked(&self, _offset: usize) -> u32 {
558                unreachable!()
559            }
560
561            unsafe fn load64_unchecked(&self, _offset: usize) -> u64 {
562                unreachable!()
563            }
564
565            unsafe fn store8_unchecked(&self, _offset: usize, _value: u8) {
566                unreachable!()
567            }
568
569            unsafe fn store16_unchecked(&self, _offset: usize, _value: u16) {
570                unreachable!()
571            }
572
573            unsafe fn store32_unchecked(&self, _offset: usize, _value: u32) {
574                unreachable!()
575            }
576
577            unsafe fn store64_unchecked(&self, _offset: usize, _value: u64) {
578                unreachable!()
579            }
580        }
581
582        // Loop through all combinations of the wrapped offset, region_start and relative_offset
583        // for 2x the size of the largest operand in order to test all combinations of these in the
584        // face of the modular arithmetic.
585        for wrapped_offset in 0..16 {
586            let offset_mmio = OffsetMmio(wrapped_offset);
587            let mut region = MmioRegion::new(offset_mmio).into_split();
588
589            for region_start in 0..16 {
590                let absolute_region_start = wrapped_offset + region_start;
591                assert_eq!(region.align_offset(1), 0);
592                assert_eq!(region.align_offset(2), 2_usize.wrapping_sub(absolute_region_start) % 2);
593                assert_eq!(region.align_offset(4), 4_usize.wrapping_sub(absolute_region_start) % 4);
594                assert_eq!(region.align_offset(8), 8_usize.wrapping_sub(absolute_region_start) % 8);
595
596                for relative_offset in 0..16 {
597                    let absolute_offset = wrapped_offset + region_start + relative_offset;
598
599                    // Every offset is suitably aligned for u8.
600                    assert_eq!(region.check_aligned_for::<u8>(relative_offset), Ok(()));
601                    assert_eq!(
602                        region.check_aligned_for::<u16>(relative_offset),
603                        if absolute_offset % 2 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
604                    );
605                    assert_eq!(
606                        region.check_aligned_for::<u32>(relative_offset),
607                        if absolute_offset % 4 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
608                    );
609                    assert_eq!(
610                        region.check_aligned_for::<u64>(relative_offset),
611                        if absolute_offset % 8 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
612                    );
613                }
614                // Drop the first byte to advance the region's offset.
615                let _ = region.split_off(1);
616            }
617        }
618    }
619}