Skip to main content

mmio/
region.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Support for implementing splittable MMIO regions.
6//!
7//! This module defines the [MmioRegion] type which provides safe [Mmio] and [MmioSplit]
8//! implementations on top of the more relaxed [UnsafeMmio] trait.
9//!
10//! The [UnsafeMmio] trait allows mutations through a shared reference, provided the caller
11//! ensures that store operations are not performed concurrently with any other operation that may
12//! overlap it.
13//!
14//! Implementing [UnsafeMmio] correctly is likely to be simpler than implementing [Mmio] and
15//! [MmioSplit] for many use cases.
16
17use crate::{Mmio, MmioError, MmioExt, MmioSplit};
18use core::borrow::Borrow;
19use core::marker::PhantomData;
20use core::ops::Range;
21use std::rc::Rc;
22use std::sync::Arc;
23
24/// An MMIO region that can be stored to through a shared reference.
25///
26/// This trait requires the caller to uphold some safety constraints, but enables a generic
27/// implementation of [MmioSplit]. See the [MmioRegion] which provides a safe wrapper on top of
28/// this trait.
29///
30/// This is primarily intended to simplify implementing the [MmioSplit] trait, not for users of the
31/// library. However, it is possible to use [UnsafeMmio] directly, provided the safety requirements
32/// are met.
33///
34/// # Safety
35/// - Callers must ensure that stores are never performed concurrently with any other operation on
36///   an overlapping range.
37/// - Concurrent loads are allowed on overlapping ranges.
38/// - Callers must ensure that offsets are suitably aligned for the type being loaded or stored.
39pub trait UnsafeMmio {
40    /// Returns the size, in bytes, of the underlying MMIO region that can be accessed through this
41    /// object.
42    fn len(&self) -> usize;
43
44    /// Returns true if the MMIO region has a length of 0.
45    fn is_empty(&self) -> bool {
46        self.len() == 0
47    }
48
49    /// Returns the first offset into this MMIO region that is suitably aligned for`align`.
50    ///
51    /// An offset is suitably aligned if `offset = align_offset(align) + i * align` for some `i`.
52    fn align_offset(&self, align: usize) -> usize;
53
54    /// Loads a u8 from this MMIO region at the given offset.
55    ///
56    /// # Safety
57    /// See the trait-level documentation.
58    unsafe fn load8_unchecked(&self, offset: usize) -> u8;
59
60    /// Loads a u16 from this MMIO region at the given offset.
61    ///
62    /// # Safety
63    /// See the trait-level documentation.
64    unsafe fn load16_unchecked(&self, offset: usize) -> u16;
65
66    /// Loads a u32 from this MMIO region at the given offset.
67    ///
68    /// # Safety
69    /// See the trait-level documentation.
70    unsafe fn load32_unchecked(&self, offset: usize) -> u32;
71
72    /// Loads a u64 from this MMIO region at the given offset.
73    ///
74    /// # Safety
75    /// See the trait-level documentation.
76    unsafe fn load64_unchecked(&self, offset: usize) -> u64;
77
78    /// Stores a u8 to this MMIO region at the given offset.
79    ///
80    /// # Safety
81    /// See the trait-level documentation.
82    unsafe fn store8_unchecked(&self, offset: usize, v: u8);
83
84    /// Stores a u16 to this MMIO region at the given offset.
85    ///
86    /// # Safety
87    /// See the trait-level documentation.
88    unsafe fn store16_unchecked(&self, offset: usize, v: u16);
89
90    /// Stores a u32 to this MMIO region at the given offset.
91    ///
92    /// # Safety
93    /// See the trait-level documentation.
94    unsafe fn store32_unchecked(&self, offset: usize, v: u32);
95
96    /// Stores a u64 to this MMIO region at the given offset.
97    ///
98    /// # Safety
99    /// See the trait-level documentation.
100    unsafe fn store64_unchecked(&self, offset: usize, v: u64);
101
102    /// Issues a memory write barrier.  It is guaranteed that all stores preceding this barrier will
103    /// appear to have happened before all stores following this barrier.
104    fn write_barrier(&self);
105}
106
107/// An `MmioRegion` provides a safe implementation of [Mmio] and [MmioSplit] on top of an
108/// [UnsafeMmio] implementation.
109///
110/// The safety constraints of [UnsafeMmio] require callers to ensure that stores are not performed
111/// concurrently with loads for any overlapping range.
112///
113/// This type meets these requirements while supporting being split into independently owned due to
114/// the following:
115///
116/// 1. An MmioRegion has exclusive ownership of a sub-region from the wrapped [UnsafeMmio]
117///    implementation (required by [MmioRegion::new]).
118/// 2. An MmioRegion only performs operations that are fully contained within the region it owns.
119/// 3. All stores are performed through a mutable reference (ensuring stores are exclusive with all
120///    other operations to the owned region).
121/// 4. When splitting off `MmioRegions`, the split_off region owns a range that was owned by region
122///    it was split off from prior to the split, and that it has exclusive ownership of after the
123///    split.
124///
125/// # Type Parameters
126/// An MmioRegion is parameterized by two types:
127/// - `Impl`: the [UnsafeMmio] implementation wrapped by this region.
128/// - `Owner`: an object with shared ownership of the `Impl` instance.
129///
130/// An MmioRegion is splittable if `Owner` can be cloned.
131pub struct MmioRegion<Impl, Owner = Impl> {
132    owner: Owner,
133    bounds: Range<usize>,
134    phantom: PhantomData<Impl>,
135}
136
137impl<U: UnsafeMmio> MmioRegion<U> {
138    /// Create a new `MmioRegion` that has exclusive ownership of the entire range.
139    ///
140    /// The returned object is guaranteed to be the only one capable of referencing any value in
141    /// the range. It can be converted into one that can be split
142    pub fn new(inner: U) -> Self {
143        let bounds = 0..inner.len();
144        let owner = inner;
145        Self { owner, bounds, phantom: PhantomData }
146    }
147
148    /// Converts this region into one which can be split.
149    pub fn into_split(self) -> MmioRegion<U, Rc<U>> {
150        let owner = Rc::new(self.owner);
151        let bounds = self.bounds;
152        // Safety:
153        // - this region exclusively owns its bounds.
154        // - ownership of the UnsafeMmio is transferred into the Rc.
155        // - the returned region has the same bounds as self did at the start of the call.
156        unsafe { MmioRegion::<U, _>::new_unchecked(owner, bounds) }
157    }
158}
159
160impl<U: UnsafeMmio + Send + Sync> MmioRegion<U> {
161    /// Converts this region into one which can be split and sent.
162    pub fn into_split_send(self) -> MmioRegion<U, Arc<U>> {
163        let owner = Arc::new(self.owner);
164        let bounds = self.bounds;
165        // Safety:
166        // - this region exclusively owns its bounds.
167        // - ownership of the UnsafeMmio is transferred into the Arc.
168        // - the returned region has the same bounds as self did at the start of the call.
169        unsafe { MmioRegion::<U, _>::new_unchecked(owner, bounds) }
170    }
171}
172
173impl<Impl: UnsafeMmio, Owner: Borrow<Impl>> MmioRegion<Impl, Owner> {
174    /// Create an MmioRegion that constrains all operations to the underlying wrapped UnsafeMmio
175    /// to be within the given bounds.
176    ///
177    /// # Safety
178    /// - For the lifetime of this MmioRegion or any split off from it the given range must only be
179    ///   accessed through this MmioRegion or a region split off from it.
180    unsafe fn new_unchecked(owner: Owner, bounds: Range<usize>) -> Self {
181        Self { owner, bounds, phantom: PhantomData }
182    }
183
184    /// Resolves the offset relative to the start of this MmioRegion's bounds, provided that offset
185    /// is suitably aligned for type T and there is sufficient capacity within this MmioRegion's
186    /// bounds at the given offset.
187    fn resolve_offset<T>(&self, offset: usize) -> Result<usize, MmioError> {
188        self.check_suitable_for::<T>(offset)?;
189        Ok(self.bounds.start + offset)
190    }
191}
192
193impl<Impl: UnsafeMmio, Owner: Borrow<Impl>> Mmio for MmioRegion<Impl, Owner> {
194    fn len(&self) -> usize {
195        self.bounds.len()
196    }
197
198    fn align_offset(&self, align: usize) -> usize {
199        // Determine the first offset into the wrapped region that is correctly aligned.
200        let first_aligned_offset = self.owner.borrow().align_offset(align);
201
202        // An aligned offset is any where offset = first_aligned_offset + i * align.
203        // Or where (offset - first_aligned_offset) % align = 0.
204        //
205        // For offsets relative to the start of this region, they are aligned if:
206        // (rel_offset + region_start - first_aligned_offset) % align = 0.
207        // or rel_offset % align = (first_aligned_offset - region_start) % align
208        //
209        // Therefore, the first aligned offset, relative to the start of this region, is:
210        // (first_aligned_offset - region_start) % align.
211        first_aligned_offset.wrapping_sub(self.bounds.start) % align
212    }
213
214    fn try_load8(&self, offset: usize) -> Result<u8, MmioError> {
215        let offset = self.resolve_offset::<u8>(offset)?;
216        // Safety:
217        // - this region exclusively owns its covered range (required by safety constraints)
218        // - the immutable receiver excludes stores for this entire range
219        Ok(unsafe { self.owner.borrow().load8_unchecked(offset) })
220    }
221
222    fn try_load16(&self, offset: usize) -> Result<u16, MmioError> {
223        let offset = self.resolve_offset::<u16>(offset)?;
224        // Safety:
225        // - this region exclusively owns its covered range (required by safety constraints)
226        // - the immutable receiver excludes stores for this entire range
227        Ok(unsafe { self.owner.borrow().load16_unchecked(offset) })
228    }
229
230    fn try_load32(&self, offset: usize) -> Result<u32, MmioError> {
231        let offset = self.resolve_offset::<u32>(offset)?;
232        // Safety:
233        // - this region exclusively owns its covered range (required by safety constraints)
234        // - the immutable receiver excludes stores for this entire range
235        Ok(unsafe { self.owner.borrow().load32_unchecked(offset) })
236    }
237
238    fn try_load64(&self, offset: usize) -> Result<u64, MmioError> {
239        let offset = self.resolve_offset::<u64>(offset)?;
240        // Safety:
241        // - this region exclusively owns its covered range (required by safety constraints)
242        // - the immutable receiver excludes stores for this entire range
243        Ok(unsafe { self.owner.borrow().load64_unchecked(offset) })
244    }
245
246    fn try_store8(&mut self, offset: usize, v: u8) -> Result<(), MmioError> {
247        let offset = self.resolve_offset::<u8>(offset)?;
248        // Safety:
249        // - this region exclusively owns its covered range (required by safety constraints)
250        // - the mutable receiver excludes all other operations for this entire range
251        unsafe {
252            self.owner.borrow().store8_unchecked(offset, v);
253        }
254        Ok(())
255    }
256
257    fn try_store16(&mut self, offset: usize, v: u16) -> Result<(), MmioError> {
258        let offset = self.resolve_offset::<u16>(offset)?;
259        // Safety:
260        // - this region exclusively owns its covered range (required by safety constraints)
261        // - the mutable receiver excludes all other operations for this entire range
262        unsafe {
263            self.owner.borrow().store16_unchecked(offset, v);
264        }
265        Ok(())
266    }
267
268    fn try_store32(&mut self, offset: usize, v: u32) -> Result<(), MmioError> {
269        let offset = self.resolve_offset::<u32>(offset)?;
270        // Safety:
271        // - this region exclusively owns its covered range (required by safety constraints)
272        // - the mutable receiver excludes all other operations for this entire range
273        unsafe {
274            self.owner.borrow().store32_unchecked(offset, v);
275        }
276        Ok(())
277    }
278
279    fn try_store64(&mut self, offset: usize, v: u64) -> Result<(), MmioError> {
280        let offset = self.resolve_offset::<u64>(offset)?;
281        // Safety:
282        // - this region exclusively owns its covered range (required by safety constraints)
283        // - the mutable receiver excludes all other operations for this entire range
284        unsafe {
285            self.owner.borrow().store64_unchecked(offset, v);
286        }
287        Ok(())
288    }
289
290    fn write_barrier(&self) {
291        self.owner.borrow().write_barrier();
292    }
293}
294
295impl<Impl: UnsafeMmio, Owner: Borrow<Impl> + Clone> MmioSplit for MmioRegion<Impl, Owner> {
296    fn try_split_off(&mut self, mid: usize) -> Result<Self, MmioError> {
297        if mid > self.len() {
298            return Err(MmioError::OutOfRange);
299        }
300
301        // Resolve the midpoint to an absolute offset.
302        let mid = self.bounds.start + mid;
303
304        // Split the bounds into two disjoint ranges.
305        let lhs = self.bounds.start..mid;
306        let rhs = mid..self.bounds.end;
307
308        // Relinquish ownership of the lhs.
309        self.bounds = rhs;
310
311        // Safety:
312        // - this region exclusively owns its covered range (required by safety constraints)
313        // - the mutable receiver excludes all other operations for this entire range
314        // - this mmio region splits off a portion of its owned range and relinquishes ownership of
315        // it before returning
316        // - the returned MmioRegion owns a range that was owned by this MmioRegion at the start of
317        // this call and no longer is
318        Ok(unsafe { Self::new_unchecked(self.owner.clone(), lhs) })
319    }
320}
321
322#[cfg(test)]
323mod tests {
324    use super::*;
325    use crate::MmioOperand;
326    use fuchsia_sync::RwLock;
327    use rand::Rng;
328    use std::sync::Barrier;
329    use std::thread::sleep;
330    use std::time::Duration;
331
332    /// An UnsafeMmio implementation that dynamically checks violations of the safety requirements:
333    /// - a store concurrent with another operation on a memory range
334    /// - an unaligned access
335    ///
336    /// This implementation will panic if an unaligned operation is issued.
337    ///
338    /// This implementation *might* panic on unsafe concurrent usage. If it does panic in this case
339    /// there was unsafe concurrent usage, however the lack of a panic doesn't guarantee all usage
340    /// was safe. The mean_op_duration parameter to new controls how long the average borrow will
341    /// last - increasing this can make it more likely that unsafe usage will be detected.
342    struct CheckedRegisters {
343        cells: Vec<RwLock<u8>>,
344        mean_op_duration: f32,
345    }
346
347    impl CheckedRegisters {
348        fn new(len: usize, mean_op_duration: Duration) -> Self {
349            let mut cells = Vec::new();
350            cells.resize_with(len, || RwLock::new(0));
351
352            let mean_op_duration = mean_op_duration.as_secs_f32();
353
354            Self { cells, mean_op_duration }
355        }
356
357        fn sleep(&self) {
358            // model op duration as a poisson process to get some jitter.
359            let uniform_sample: f32 = rand::random::<f32>().max(0.000001);
360            let duration_secs = -self.mean_op_duration * uniform_sample.ln();
361            sleep(Duration::from_secs_f32(duration_secs));
362        }
363
364        fn load<const N: usize>(&self, start: usize) -> [u8; N] {
365            let borrows: [_; N] = core::array::from_fn(|i| {
366                self.cells[start + i]
367                    .try_read()
368                    .expect("attempt to load from an address that is being stored to")
369            });
370
371            // Sleep while borrowing these cells to increase the chance that unsafe usage will be
372            // detected.
373            self.sleep();
374
375            borrows.map(|r| *r)
376        }
377
378        fn store<const N: usize>(&self, start: usize, bytes: [u8; N]) {
379            let borrows: [_; N] = core::array::from_fn(|i| {
380                self.cells[start + i]
381                    .try_write()
382                    .expect("attempt to store to an address concurrently with another operation")
383            });
384
385            // Sleep while borrowing these cells to increase the chance that unsafe usage will be
386            // detected.
387            self.sleep();
388
389            borrows.into_iter().zip(bytes).for_each(|(mut r, b)| *r = b);
390        }
391    }
392
393    impl UnsafeMmio for CheckedRegisters {
394        fn len(&self) -> usize {
395            self.cells.len()
396        }
397
398        fn align_offset(&self, _align: usize) -> usize {
399            0
400        }
401
402        unsafe fn load8_unchecked(&self, offset: usize) -> u8 {
403            self.load::<1>(offset)[0]
404        }
405
406        unsafe fn load16_unchecked(&self, offset: usize) -> u16 {
407            assert_eq!(offset % 2, 0);
408            u16::from_le_bytes(self.load::<2>(offset))
409        }
410
411        unsafe fn load32_unchecked(&self, offset: usize) -> u32 {
412            assert_eq!(offset % 4, 0);
413            u32::from_le_bytes(self.load::<4>(offset))
414        }
415
416        unsafe fn load64_unchecked(&self, offset: usize) -> u64 {
417            assert_eq!(offset % 8, 0);
418            u64::from_le_bytes(self.load::<8>(offset))
419        }
420
421        unsafe fn store8_unchecked(&self, offset: usize, v: u8) {
422            self.store::<1>(offset, [v]);
423        }
424
425        unsafe fn store16_unchecked(&self, offset: usize, v: u16) {
426            assert_eq!(offset % 2, 0);
427            self.store::<2>(offset, v.to_le_bytes())
428        }
429
430        unsafe fn store32_unchecked(&self, offset: usize, v: u32) {
431            assert_eq!(offset % 4, 0);
432            self.store::<4>(offset, v.to_le_bytes())
433        }
434
435        unsafe fn store64_unchecked(&self, offset: usize, v: u64) {
436            assert_eq!(offset % 8, 0);
437            self.store::<8>(offset, v.to_le_bytes())
438        }
439
440        fn write_barrier(&self) {
441            // NOP
442        }
443    }
444
445    #[test]
446    fn test_memory_region_thread_safety() {
447        // The number of concurrent threads.
448        const CONCURRENCY: usize = 64;
449
450        // The number of bytes each thread owns. Must be a non-zero multiple of 8.
451        const BYTES_PER_THREAD: usize = 8;
452
453        // The average time for an operation to hold a borrow.
454        const MEAN_OP_TIME: Duration = Duration::from_micros(100);
455
456        // The number of ops to perform per thread. At 100us per op the minimum sleep time per
457        // thread should be around 0.5s.
458        const THREAD_OP_COUNT: usize = 5000;
459
460        // The total size of the Mmio region.
461        const LEN: usize = CONCURRENCY * BYTES_PER_THREAD;
462
463        // These are required for test correctness.
464        assert_ne!(BYTES_PER_THREAD, 0);
465        assert_eq!(BYTES_PER_THREAD % 8, 0);
466
467        let registers = CheckedRegisters::new(LEN, MEAN_OP_TIME);
468        // Safety:
469        // - CheckedRegisters only references memory it owns
470        // - MmioRegion takes ownership of the CheckedRegisters object
471        let mut region = MmioRegion::new(registers).into_split_send();
472
473        let barrier = Barrier::new(CONCURRENCY);
474
475        std::thread::scope(|s| {
476            let barrier = &barrier;
477            for _ in 0..CONCURRENCY {
478                let mut split = region.split_off(BYTES_PER_THREAD);
479                s.spawn(move || {
480                    let mut rng = rand::rng();
481
482                    // Wait until threads are ready to start to increase the chance of a race.
483                    barrier.wait();
484
485                    for _i in 0..THREAD_OP_COUNT {
486                        let offset = rng.random_range(0..BYTES_PER_THREAD);
487                        let op = rng.random_range(0usize..8);
488
489                        let size = 1 << (op % 4);
490                        // Choose a random offset from 0 to 2x the size of this region. MmioRegion
491                        // should prevent reading out of bounds.
492                        let offset = offset.next_multiple_of(size) % (BYTES_PER_THREAD * 2);
493
494                        // We don't care whether these operations fail.
495                        let _ = match op {
496                            0 => split.try_load8(offset).err(),
497                            1 => split.try_load16(offset).err(),
498                            2 => split.try_load32(offset).err(),
499                            3 => split.try_load64(offset).err(),
500                            4 => split.try_store8(offset, rng.random()).err(),
501                            5 => split.try_store16(offset, rng.random()).err(),
502                            6 => split.try_store32(offset, rng.random()).err(),
503                            7 => split.try_store64(offset, rng.random()).err(),
504                            _ => unreachable!(),
505                        };
506                    }
507                });
508            }
509        });
510    }
511
512    #[test]
513    fn test_alignment() {
514        const LEN: usize = 64;
515        let registers = CheckedRegisters::new(LEN, Duration::ZERO);
516        let mut region = MmioRegion::new(registers).into_split();
517        let mut rng = rand::rng();
518
519        fn assert_alignment<M: Mmio, T: MmioOperand>(
520            mmio: &mut M,
521            offset: usize,
522            region_offset: usize,
523            v: T,
524        ) {
525            let absolute_offset = offset + region_offset;
526            let is_aligned = absolute_offset.is_multiple_of(align_of::<T>());
527            let expected_res = if is_aligned { Ok(()) } else { Err(MmioError::Unaligned) };
528            assert_eq!(mmio.check_suitable_for::<T>(offset), expected_res);
529            assert_eq!(mmio.try_store(offset, v), expected_res);
530            assert_eq!(mmio.try_load(offset), expected_res.map(|_| v));
531        }
532
533        for region_offset in 0..8 {
534            // Do at least two cycles of the largest operand alignment to test modular arithmetic.
535            for relative_offset in 0..16 {
536                let v: u64 = rng.random();
537                assert_alignment(&mut region, relative_offset, region_offset, v as u8);
538                assert_alignment(&mut region, relative_offset, region_offset, v as u16);
539                assert_alignment(&mut region, relative_offset, region_offset, v as u32);
540                assert_alignment(&mut region, relative_offset, region_offset, v);
541            }
542            // Throw away the first byte to advance the region's bounds.
543            let _ = region.split_off(1);
544        }
545    }
546
547    #[test]
548    fn test_wrapped_alignment() {
549        // Test all combinations for how the wrapped MMIO regions alignment, region start and region
550        // relative offsets may stack with respect to alignment.
551        struct OffsetMmio(usize);
552        impl UnsafeMmio for OffsetMmio {
553            fn len(&self) -> usize {
554                isize::MAX as usize
555            }
556
557            fn align_offset(&self, align: usize) -> usize {
558                align.wrapping_sub(self.0) % align
559            }
560
561            unsafe fn load8_unchecked(&self, _offset: usize) -> u8 {
562                unreachable!()
563            }
564
565            unsafe fn load16_unchecked(&self, _offset: usize) -> u16 {
566                unreachable!()
567            }
568
569            unsafe fn load32_unchecked(&self, _offset: usize) -> u32 {
570                unreachable!()
571            }
572
573            unsafe fn load64_unchecked(&self, _offset: usize) -> u64 {
574                unreachable!()
575            }
576
577            unsafe fn store8_unchecked(&self, _offset: usize, _value: u8) {
578                unreachable!()
579            }
580
581            unsafe fn store16_unchecked(&self, _offset: usize, _value: u16) {
582                unreachable!()
583            }
584
585            unsafe fn store32_unchecked(&self, _offset: usize, _value: u32) {
586                unreachable!()
587            }
588
589            unsafe fn store64_unchecked(&self, _offset: usize, _value: u64) {
590                unreachable!()
591            }
592
593            fn write_barrier(&self) {
594                unreachable!()
595            }
596        }
597
598        // Loop through all combinations of the wrapped offset, region_start and relative_offset
599        // for 2x the size of the largest operand in order to test all combinations of these in the
600        // face of the modular arithmetic.
601        for wrapped_offset in 0..16 {
602            let offset_mmio = OffsetMmio(wrapped_offset);
603            let mut region = MmioRegion::new(offset_mmio).into_split();
604
605            for region_start in 0..16 {
606                let absolute_region_start = wrapped_offset + region_start;
607                assert_eq!(region.align_offset(1), 0);
608                assert_eq!(region.align_offset(2), 2_usize.wrapping_sub(absolute_region_start) % 2);
609                assert_eq!(region.align_offset(4), 4_usize.wrapping_sub(absolute_region_start) % 4);
610                assert_eq!(region.align_offset(8), 8_usize.wrapping_sub(absolute_region_start) % 8);
611
612                for relative_offset in 0..16 {
613                    let absolute_offset = wrapped_offset + region_start + relative_offset;
614
615                    // Every offset is suitably aligned for u8.
616                    assert_eq!(region.check_aligned_for::<u8>(relative_offset), Ok(()));
617                    assert_eq!(
618                        region.check_aligned_for::<u16>(relative_offset),
619                        if absolute_offset % 2 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
620                    );
621                    assert_eq!(
622                        region.check_aligned_for::<u32>(relative_offset),
623                        if absolute_offset % 4 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
624                    );
625                    assert_eq!(
626                        region.check_aligned_for::<u64>(relative_offset),
627                        if absolute_offset % 8 == 0 { Ok(()) } else { Err(MmioError::Unaligned) }
628                    );
629                }
630                // Drop the first byte to advance the region's offset.
631                let _ = region.split_off(1);
632            }
633        }
634    }
635}