mmio/
region.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Support for implementing splittable MMIO regions.
6//!
7//! This module defines the [MmioRegion] type which provides safe [Mmio] and [MmioSplit]
8//! implementations on top of the more relaxed [UnsafeMmio] trait.
9//!
10//! The [UnsafeMmio] trait allows mutations through a shared reference, provided the caller
11//! ensures that store operations are not performed concurrently with any other operation that may
12//! overlap it.
13//!
14//! Implementing [UnsafeMmio] correctly is likely to be simpler than implementing [Mmio] and
15//! [MmioSplit] for many use cases.
16
17use crate::{Mmio, MmioError, MmioExt, MmioSplit};
18use core::borrow::Borrow;
19use core::marker::PhantomData;
20use core::ops::Range;
21use std::rc::Rc;
22use std::sync::Arc;
23
24/// An MMIO region that can be stored to through a shared reference.
25///
26/// This trait requires the caller to uphold some safety constraints, but enables a generic
27/// implementation of [MmioSplit]. See the [MmioRegion] which provides a safe wrapper on top of
28/// this trait.
29///
30/// This is primarily intended to simplify implementing the [MmioSplit] trait, not for users of the
31/// library. However, it is possible to use [UnsafeMmio] directly, provided the safety requirements
32/// are met.
33///
34/// # Safety
35/// - Callers must ensure that stores are never performed concurrently with any other operation on
36/// an overlapping range.
37/// - Concurrent loads are allowed on overlapping ranges.
38/// - Callers must ensure that offsets are suitably aligned for the type being loaded or stored.
39pub trait UnsafeMmio {
40    /// Returns the size, in bytes, of the underlying MMIO region that can be accessed through this
41    /// object.
42    fn len(&self) -> usize;
43
44    /// Returns the first offset into this MMIO region that is suitably aligned for`align`.
45    ///
46    /// An offset is suitably aligned if `offset = align_offset(align) + i * align` for some `i`.
47    fn align_offset(&self, align: usize) -> usize;
48
49    /// Loads a u8 from this MMIO region at the given offset.
50    ///
51    /// # Safety
52    /// See the trait-level documentation.
53    unsafe fn load8_unchecked(&self, offset: usize) -> u8;
54
55    /// Loads a u16 from this MMIO region at the given offset.
56    ///
57    /// # Safety
58    /// See the trait-level documentation.
59    unsafe fn load16_unchecked(&self, offset: usize) -> u16;
60
61    /// Loads a u32 from this MMIO region at the given offset.
62    ///
63    /// # Safety
64    /// See the trait-level documentation.
65    unsafe fn load32_unchecked(&self, offset: usize) -> u32;
66
67    /// Loads a u64 from this MMIO region at the given offset.
68    ///
69    /// # Safety
70    /// See the trait-level documentation.
71    unsafe fn load64_unchecked(&self, offset: usize) -> u64;
72
73    /// Stores a u8 to this MMIO region at the given offset.
74    ///
75    /// # Safety
76    /// See the trait-level documentation.
77    unsafe fn store8_unchecked(&self, offset: usize, v: u8);
78
79    /// Stores a u16 to this MMIO region at the given offset.
80    ///
81    /// # Safety
82    /// See the trait-level documentation.
83    unsafe fn store16_unchecked(&self, offset: usize, v: u16);
84
85    /// Stores a u32 to this MMIO region at the given offset.
86    ///
87    /// # Safety
88    /// See the trait-level documentation.
89    unsafe fn store32_unchecked(&self, offset: usize, v: u32);
90
91    /// Stores a u64 to this MMIO region at the given offset.
92    ///
93    /// # Safety
94    /// See the trait-level documentation.
95    unsafe fn store64_unchecked(&self, offset: usize, v: u64);
96}
97
98/// An `MmioRegion` provides a safe implementation of [Mmio] and [MmioSplit] on top of an
99/// [UnsafeMmio] implementation.
100///
101/// The safety constraints of [UnsafeMmio] require callers to ensure that stores are not performed
102/// concurrently with loads for any overlapping range.
103///
104/// This type meets these requirements while supporting being split into independently owned due to
105/// the following:
106///
107/// 1. An MmioRegion has exclusive ownership of a sub-region from the wrapped [UnsafeMmio]
108///    implementation (required by [MmioRegion::new]).
109/// 2. An MmioRegion only performs operations that are fully contained within the region it owns.
110/// 3. All stores are performed through a mutable reference (ensuring stores are exclusive with all
111///    other operations to the owned region).
112/// 4. When splitting off `MmioRegions`, the split_off region owns a range that was owned by region
113///    it was split off from prior to the split, and that it has exclusive ownership of after the
114///    split.
115///
116/// # Type Parameters
117/// An MmioRegion is parameterized by two types:
118/// - `Impl`: the [UnsafeMmio] implementation wrapped by this region.
119/// - `Owner`: an object with shared ownership of the `Impl` instance.
120///
121/// An MmioRegion is splittable if `Owner` can be cloned.
122pub struct MmioRegion<Impl, Owner = Impl> {
123    owner: Owner,
124    bounds: Range<usize>,
125    phantom: PhantomData<Impl>,
126}
127
128impl<U: UnsafeMmio> MmioRegion<U> {
129    /// Create a new `MmioRegion` that has exclusive ownership of the entire range.
130    ///
131    /// The returned object is guaranteed to be the only one capable of referencing any value in
132    /// the range. It can be converted into one that can be split
133    pub fn new(inner: U) -> Self {
134        let bounds = 0..inner.len();
135        let owner = inner;
136        Self { owner, bounds, phantom: PhantomData }
137    }
138
139    /// Converts this region into one which can be split.
140    pub fn into_split(self) -> MmioRegion<U, Rc<U>> {
141        let owner = Rc::new(self.owner);
142        let bounds = self.bounds;
143        // Safety:
144        // - this region exclusively owns its bounds.
145        // - ownership of the UnsafeMmio is transferred into the Rc.
146        // - the returned region has the same bounds as self did at the start of the call.
147        unsafe { MmioRegion::<U, _>::new_unchecked(owner, bounds) }
148    }
149}
150
151impl<U: UnsafeMmio + Send + Sync> MmioRegion<U> {
152    /// Converts this region into one which can be split and sent.
153    pub fn into_split_send(self) -> MmioRegion<U, Arc<U>> {
154        let owner = Arc::new(self.owner);
155        let bounds = self.bounds;
156        // Safety:
157        // - this region exclusively owns its bounds.
158        // - ownership of the UnsafeMmio is transferred into the Arc.
159        // - the returned region has the same bounds as self did at the start of the call.
160        unsafe { MmioRegion::<U, _>::new_unchecked(owner, bounds) }
161    }
162}
163
164impl<Impl: UnsafeMmio, Owner: Borrow<Impl>> MmioRegion<Impl, Owner> {
165    /// Create an MmioRegion that constrains all operations to the underlying wrapped UnsafeMmio
166    /// to be within the given bounds.
167    ///
168    /// # Safety
169    /// - For the lifetime of this MmioRegion or any split off from it the given range must only be
170    /// accessed through this MmioRegion or a region split off from it.
171    unsafe fn new_unchecked(owner: Owner, bounds: Range<usize>) -> Self {
172        Self { owner, bounds, phantom: PhantomData }
173    }
174
175    /// Resolves the offset relative to the start of this MmioRegion's bounds, provided that offset
176    /// is suitably aligned for type T and there is sufficient capacity within this MmioRegion's
177    /// bounds at the given offset.
178    fn resolve_offset<T>(&self, offset: usize) -> Result<usize, MmioError> {
179        self.check_suitable_for::<T>(offset)?;
180        Ok(self.bounds.start + offset)
181    }
182}
183
184impl<Impl: UnsafeMmio, Owner: Borrow<Impl>> Mmio for MmioRegion<Impl, Owner> {
185    fn len(&self) -> usize {
186        self.bounds.len()
187    }
188
189    fn align_offset(&self, align: usize) -> usize {
190        // Determine the first offset into the wrapped region that is correctly aligned.
191        let first_aligned_offset = self.owner.borrow().align_offset(align);
192
193        // An aligned offset is any where offset = first_aligned_offset + i * align.
194        // Or where (offset - first_aligned_offset) % align = 0.
195        //
196        // For offsets relative to the start of this region, they are aligned if:
197        // (rel_offset + region_start - first_aligned_offset) % align = 0.
198        // or rel_offset % align = (first_aligned_offset - region_start) % align
199        //
200        // Therefore, the first aligned offset, relative to the start of this region, is:
201        // (first_aligned_offset - region_start) % align.
202        first_aligned_offset.wrapping_sub(self.bounds.start) % align
203    }
204
205    fn try_load8(&self, offset: usize) -> Result<u8, MmioError> {
206        let offset = self.resolve_offset::<u8>(offset)?;
207        // Safety:
208        // - this region exclusively owns its covered range (required by safety constraints)
209        // - the immutable receiver excludes stores for this entire range
210        Ok(unsafe { self.owner.borrow().load8_unchecked(offset) })
211    }
212
213    fn try_load16(&self, offset: usize) -> Result<u16, MmioError> {
214        let offset = self.resolve_offset::<u16>(offset)?;
215        // Safety:
216        // - this region exclusively owns its covered range (required by safety constraints)
217        // - the immutable receiver excludes stores for this entire range
218        Ok(unsafe { self.owner.borrow().load16_unchecked(offset) })
219    }
220
221    fn try_load32(&self, offset: usize) -> Result<u32, MmioError> {
222        let offset = self.resolve_offset::<u32>(offset)?;
223        // Safety:
224        // - this region exclusively owns its covered range (required by safety constraints)
225        // - the immutable receiver excludes stores for this entire range
226        Ok(unsafe { self.owner.borrow().load32_unchecked(offset) })
227    }
228
229    fn try_load64(&self, offset: usize) -> Result<u64, MmioError> {
230        let offset = self.resolve_offset::<u64>(offset)?;
231        // Safety:
232        // - this region exclusively owns its covered range (required by safety constraints)
233        // - the immutable receiver excludes stores for this entire range
234        Ok(unsafe { self.owner.borrow().load64_unchecked(offset) })
235    }
236
237    fn try_store8(&mut self, offset: usize, v: u8) -> Result<(), MmioError> {
238        let offset = self.resolve_offset::<u8>(offset)?;
239        // Safety:
240        // - this region exclusively owns its covered range (required by safety constraints)
241        // - the mutable receiver excludes all other operations for this entire range
242        unsafe {
243            self.owner.borrow().store8_unchecked(offset, v);
244        }
245        Ok(())
246    }
247
248    fn try_store16(&mut self, offset: usize, v: u16) -> Result<(), MmioError> {
249        let offset = self.resolve_offset::<u16>(offset)?;
250        // Safety:
251        // - this region exclusively owns its covered range (required by safety constraints)
252        // - the mutable receiver excludes all other operations for this entire range
253        unsafe {
254            self.owner.borrow().store16_unchecked(offset, v);
255        }
256        Ok(())
257    }
258
259    fn try_store32(&mut self, offset: usize, v: u32) -> Result<(), MmioError> {
260        let offset = self.resolve_offset::<u32>(offset)?;
261        // Safety:
262        // - this region exclusively owns its covered range (required by safety constraints)
263        // - the mutable receiver excludes all other operations for this entire range
264        unsafe {
265            self.owner.borrow().store32_unchecked(offset, v);
266        }
267        Ok(())
268    }
269
270    fn try_store64(&mut self, offset: usize, v: u64) -> Result<(), MmioError> {
271        let offset = self.resolve_offset::<u64>(offset)?;
272        // Safety:
273        // - this region exclusively owns its covered range (required by safety constraints)
274        // - the mutable receiver excludes all other operations for this entire range
275        unsafe {
276            self.owner.borrow().store64_unchecked(offset, v);
277        }
278        Ok(())
279    }
280}
281
282impl<Impl: UnsafeMmio, Owner: Borrow<Impl> + Clone> MmioSplit for MmioRegion<Impl, Owner> {
283    fn try_split_off(&mut self, mid: usize) -> Result<Self, MmioError> {
284        if mid > self.len() {
285            return Err(MmioError::OutOfRange);
286        }
287
288        // Resolve the midpoint to an absolute offset.
289        let mid = self.bounds.start + mid;
290
291        // Split the bounds into two disjoint ranges.
292        let lhs = self.bounds.start..mid;
293        let rhs = mid..self.bounds.end;
294
295        // Relinquish ownership of the lhs.
296        self.bounds = rhs;
297
298        // Safety:
299        // - this region exclusively owns its covered range (required by safety constraints)
300        // - the mutable receiver excludes all other operations for this entire range
301        // - this mmio region splits off a portion of its owned range and relinquishes ownership of
302        // it before returning
303        // - the returned MmioRegion owns a range that was owned by this MmioRegion at the start of
304        // this call and no longer is
305        Ok(unsafe { Self::new_unchecked(self.owner.clone(), lhs) })
306    }
307}
308
309#[cfg(test)]
310mod tests {
311    use super::*;
312    use crate::MmioOperand;
313    use fuchsia_sync::RwLock;
314    use rand::Rng;
315    use std::sync::Barrier;
316    use std::thread::sleep;
317    use std::time::Duration;
318
319    /// An UnsafeMmio implementation that dynamically checks violations of the safety requirements:
320    /// - a store concurrent with another operation on a memory range
321    /// - an unaligned access
322    ///
323    /// This implementation will panic if an unaligned operation is issued.
324    ///
325    /// This implementation *might* panic on unsafe concurrent usage. If it does panic in this case
326    /// there was unsafe concurrent usage, however the lack of a panic doesn't guarantee all usage
327    /// was safe. The mean_op_duration parameter to new controls how long the average borrow will
328    /// last - increasing this can make it more likely that unsafe usage will be detected.
329    struct CheckedRegisters {
330        cells: Vec<RwLock<u8>>,
331        mean_op_duration: f32,
332    }
333
334    impl CheckedRegisters {
335        fn new(len: usize, mean_op_duration: Duration) -> Self {
336            let mut cells = Vec::new();
337            cells.resize_with(len, || RwLock::new(0));
338
339            let mean_op_duration = mean_op_duration.as_secs_f32();
340
341            Self { cells, mean_op_duration }
342        }
343
344        fn sleep(&self) {
345            // model op duration as a poisson process to get some jitter.
346            let uniform_sample: f32 = rand::random::<f32>().max(0.000001);
347            let duration_secs = -self.mean_op_duration * uniform_sample.ln();
348            sleep(Duration::from_secs_f32(duration_secs));
349        }
350
351        fn load<const N: usize>(&self, start: usize) -> [u8; N] {
352            let borrows: [_; N] = core::array::from_fn(|i| {
353                self.cells[start + i]
354                    .try_read()
355                    .expect("attempt to load from an address that is being stored to")
356            });
357
358            // Sleep while borrowing these cells to increase the chance that unsafe usage will be
359            // detected.
360            self.sleep();
361
362            borrows.map(|r| *r)
363        }
364
365        fn store<const N: usize>(&self, start: usize, bytes: [u8; N]) {
366            let borrows: [_; N] = core::array::from_fn(|i| {
367                self.cells[start + i]
368                    .try_write()
369                    .expect("attempt to store to an address concurrently with another operation")
370            });
371
372            // Sleep while borrowing these cells to increase the chance that unsafe usage will be
373            // detected.
374            self.sleep();
375
376            borrows.into_iter().zip(bytes.into_iter()).for_each(|(mut r, b)| *r = b);
377        }
378    }
379
380    impl UnsafeMmio for CheckedRegisters {
381        fn len(&self) -> usize {
382            self.cells.len()
383        }
384
385        fn align_offset(&self, _align: usize) -> usize {
386            0
387        }
388
389        unsafe fn load8_unchecked(&self, offset: usize) -> u8 {
390            self.load::<1>(offset)[0]
391        }
392
393        unsafe fn load16_unchecked(&self, offset: usize) -> u16 {
394            assert_eq!(offset % 2, 0);
395            u16::from_le_bytes(self.load::<2>(offset))
396        }
397
398        unsafe fn load32_unchecked(&self, offset: usize) -> u32 {
399            assert_eq!(offset % 4, 0);
400            u32::from_le_bytes(self.load::<4>(offset))
401        }
402
403        unsafe fn load64_unchecked(&self, offset: usize) -> u64 {
404            assert_eq!(offset % 8, 0);
405            u64::from_le_bytes(self.load::<8>(offset))
406        }
407
408        unsafe fn store8_unchecked(&self, offset: usize, v: u8) {
409            self.store::<1>(offset, [v]);
410        }
411
412        unsafe fn store16_unchecked(&self, offset: usize, v: u16) {
413            assert_eq!(offset % 2, 0);
414            self.store::<2>(offset, v.to_le_bytes())
415        }
416
417        unsafe fn store32_unchecked(&self, offset: usize, v: u32) {
418            assert_eq!(offset % 4, 0);
419            self.store::<4>(offset, v.to_le_bytes())
420        }
421
422        unsafe fn store64_unchecked(&self, offset: usize, v: u64) {
423            assert_eq!(offset % 8, 0);
424            self.store::<8>(offset, v.to_le_bytes())
425        }
426    }
427
428    #[test]
429    fn test_memory_region_thread_safety() {
430        // The number of concurrent threads.
431        const CONCURRENCY: usize = 64;
432
433        // The number of bytes each thread owns. Must be a non-zero multiple of 8.
434        const BYTES_PER_THREAD: usize = 8;
435
436        // The average time for an operation to hold a borrow.
437        const MEAN_OP_TIME: Duration = Duration::from_micros(100);
438
439        // The number of ops to perform per thread. At 100us per op the minimum sleep time per
440        // thread should be around 0.5s.
441        const THREAD_OP_COUNT: usize = 5000;
442
443        // The total size of the Mmio region.
444        const LEN: usize = CONCURRENCY * BYTES_PER_THREAD;
445
446        // These are required for test correctness.
447        assert_ne!(BYTES_PER_THREAD, 0);
448        assert_eq!(BYTES_PER_THREAD % 8, 0);
449
450        let registers = CheckedRegisters::new(LEN, MEAN_OP_TIME);
451        // Safety:
452        // - CheckedRegisters only references memory it owns
453        // - MmioRegion takes ownership of the CheckedRegisters object
454        let mut region = MmioRegion::new(registers).into_split_send();
455
456        let barrier = Barrier::new(CONCURRENCY);
457
458        std::thread::scope(|s| {
459            let barrier = &barrier;
460            for _ in 0..CONCURRENCY {
461                let mut split = region.split_off(BYTES_PER_THREAD);
462                s.spawn(move || {
463                    let mut rng = rand::rng();
464
465                    // Wait until threads are ready to start to increase the chance of a race.
466                    barrier.wait();
467
468                    for _i in 0..THREAD_OP_COUNT {
469                        let offset = rng.random_range(0..BYTES_PER_THREAD);
470                        let op = rng.random_range(0usize..8);
471
472                        let size = 1 << (op % 4);
473                        // Choose a random offset from 0 to 2x the size of this region. MmioRegion
474                        // should prevent reading out of bounds.
475                        let offset = offset.next_multiple_of(size) % (BYTES_PER_THREAD * 2);
476
477                        // We don't care whether these operations fail.
478                        let _ = match op {
479                            0 => split.try_load8(offset).err(),
480                            1 => split.try_load16(offset).err(),
481                            2 => split.try_load32(offset).err(),
482                            3 => split.try_load64(offset).err(),
483                            4 => split.try_store8(offset, rng.random()).err(),
484                            5 => split.try_store16(offset, rng.random()).err(),
485                            6 => split.try_store32(offset, rng.random()).err(),
486                            7 => split.try_store64(offset, rng.random()).err(),
487                            _ => unreachable!(),
488                        };
489                    }
490                });
491            }
492        });
493    }
494
495    #[test]
496    fn test_alignment() {
497        const LEN: usize = 64;
498        let registers = CheckedRegisters::new(LEN, Duration::ZERO);
499        let mut region = MmioRegion::new(registers).into_split();
500        let mut rng = rand::rng();
501
502        fn assert_alignment<M: Mmio, T: MmioOperand>(
503            mmio: &mut M,
504            offset: usize,
505            region_offset: usize,
506            v: T,
507        ) {
508            let absolute_offset = offset + region_offset;
509            let is_aligned = (absolute_offset % align_of::<T>()) == 0;
510            let expected_res = if is_aligned { Ok(()) } else { Err(MmioError::Unaligned) };
511            assert_eq!(mmio.check_suitable_for::<T>(offset), expected_res);
512            assert_eq!(mmio.try_store(offset, v), expected_res);
513            assert_eq!(mmio.try_load(offset), expected_res.map(|_| v));
514        }
515
516        for region_offset in 0..8 {
517            // Do at least two cycles of the largest operand alignment to test modular arithmetic.
518            for relative_offset in 0..16 {
519                let v: u64 = rng.random();
520                assert_alignment(&mut region, relative_offset, region_offset, v as u8);
521                assert_alignment(&mut region, relative_offset, region_offset, v as u16);
522                assert_alignment(&mut region, relative_offset, region_offset, v as u32);
523                assert_alignment(&mut region, relative_offset, region_offset, v as u64);
524            }
525            // Throw away the first byte to advance the region's bounds.
526            let _ = region.split_off(1);
527        }
528    }
529
530    #[test]
531    fn test_wrapped_alignment() {
532        // Test all combinations for how the wrapped MMIO regions alignment, region start and region
533        // relative offsets may stack with respect to alignment.
534        struct OffsetMmio(usize);
535        impl UnsafeMmio for OffsetMmio {
536            fn len(&self) -> usize {
537                isize::MAX as usize
538            }
539
540            fn align_offset(&self, align: usize) -> usize {
541                align.wrapping_sub(self.0) % align
542            }
543
544            unsafe fn load8_unchecked(&self, _offset: usize) -> u8 {
545                unreachable!()
546            }
547
548            unsafe fn load16_unchecked(&self, _offset: usize) -> u16 {
549                unreachable!()
550            }
551
552            unsafe fn load32_unchecked(&self, _offset: usize) -> u32 {
553                unreachable!()
554            }
555
556            unsafe fn load64_unchecked(&self, _offset: usize) -> u64 {
557                unreachable!()
558            }
559
560            unsafe fn store8_unchecked(&self, _offset: usize, _value: u8) {
561                unreachable!()
562            }
563
564            unsafe fn store16_unchecked(&self, _offset: usize, _value: u16) {
565                unreachable!()
566            }
567
568            unsafe fn store32_unchecked(&self, _offset: usize, _value: u32) {
569                unreachable!()
570            }
571
572            unsafe fn store64_unchecked(&self, _offset: usize, _value: u64) {
573                unreachable!()
574            }
575        }
576
577        // Loop through all combinations of the wrapped offset, region_start and relative_offset
578        // for 2x the size of the largest operand in order to test all combinations of these in the
579        // face of the modular arithmetic.
580        for wrapped_offset in 0..16 {
581            let offset_mmio = OffsetMmio(wrapped_offset);
582            let mut region = MmioRegion::new(offset_mmio).into_split();
583
584            for region_start in 0..16 {
585                let absolute_region_start = wrapped_offset + region_start;
586                assert_eq!(region.align_offset(1), 0);
587                assert_eq!(region.align_offset(2), 2_usize.wrapping_sub(absolute_region_start) % 2);
588                assert_eq!(region.align_offset(4), 4_usize.wrapping_sub(absolute_region_start) % 4);
589                assert_eq!(region.align_offset(8), 8_usize.wrapping_sub(absolute_region_start) % 8);
590
591                for relative_offset in 0..16 {
592                    let absolute_offset = wrapped_offset + region_start + relative_offset;
593
594                    // Every offset is suitably aligned for u8.
595                    assert_eq!(region.check_aligned_for::<u8>(relative_offset), Ok(()));
596                    assert_eq!(
597                        region.check_aligned_for::<u16>(relative_offset),
598                        if absolute_offset % 2 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
599                    );
600                    assert_eq!(
601                        region.check_aligned_for::<u32>(relative_offset),
602                        if absolute_offset % 4 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
603                    );
604                    assert_eq!(
605                        region.check_aligned_for::<u64>(relative_offset),
606                        if absolute_offset % 8 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
607                    );
608                }
609                // Drop the first byte to advance the region's offset.
610                let _ = region.split_off(1);
611            }
612        }
613    }
614}