mmio/
memory.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! MMIO regions backed by memory.
6//!
7//! This module defines the primary [Mmio] implementation, backed by memory.
8
9use crate::arch;
10use crate::region::{MmioRegion, UnsafeMmio};
11use core::marker::PhantomData;
12use core::mem::MaybeUninit;
13use core::ptr::NonNull;
14
15/// An exclusively owned region of memory.
16pub struct Memory<Claim> {
17    base_ptr: NonNull<u8>,
18    len: usize,
19    _claim: Claim,
20}
21
22// Safety: it is safe to send the pointer to another thread as accessing the pointer requires
23// handling thread safety.
24unsafe impl<Claim: Send> Send for Memory<Claim> {}
25
26// Safety it is safe to access this pointer from multiple threads as accessing it already requires
27// handling thread safety.
28unsafe impl<Claim> Sync for Memory<Claim> {}
29
30impl<Claim> Memory<Claim> {
31    /// Creates an instance of UnsafeMemory representing the region starting at `base_ptr` and
32    /// extending for the subsequent `len` bytes.
33    ///
34    /// # Safety
35    /// - The given memory must be exclusively owned by the returned object for the lifetime of the
36    ///   claim.
37    pub unsafe fn new_unchecked(claim: Claim, base_ptr: NonNull<u8>, len: usize) -> Self {
38        Self { base_ptr, len, _claim: claim }
39    }
40
41    fn ptr<T>(&self, offset: usize) -> NonNull<T> {
42        // If this fails the caller has not met the safety requirements. It's safer to panic than it
43        // is to continue.
44        assert!((offset + size_of::<T>()) <= self.len && offset <= isize::MAX as usize);
45        let ptr = unsafe { self.base_ptr.add(offset) };
46        let ptr = ptr.cast::<T>();
47        // If this fails the caller has not met the safety requirements. It's safer to panic than
48        // it is to continue.
49        assert!(ptr.is_aligned());
50        ptr
51    }
52}
53
54impl<Claim> UnsafeMmio for Memory<Claim> {
55    fn len(&self) -> usize {
56        self.len
57    }
58
59    fn align_offset(&self, align: usize) -> usize {
60        self.base_ptr.align_offset(align)
61    }
62
63    unsafe fn load8_unchecked(&self, offset: usize) -> u8 {
64        let ptr = self.ptr::<u8>(offset);
65        // Safety: provided by this function's safety requirements.
66        unsafe { arch::load8(ptr) }
67    }
68
69    unsafe fn load16_unchecked(&self, offset: usize) -> u16 {
70        let ptr = self.ptr::<u16>(offset);
71        // Safety: provided by this function's safety requirements.
72        unsafe { arch::load16(ptr) }
73    }
74
75    unsafe fn load32_unchecked(&self, offset: usize) -> u32 {
76        let ptr = self.ptr::<u32>(offset);
77        // Safety: provided by this function's safety requirements.
78        unsafe { arch::load32(ptr) }
79    }
80
81    unsafe fn load64_unchecked(&self, offset: usize) -> u64 {
82        let ptr = self.ptr::<u64>(offset);
83        // Safety: provided by this function's safety requirements.
84        unsafe { arch::load64(ptr) }
85    }
86
87    unsafe fn store8_unchecked(&self, offset: usize, v: u8) {
88        let ptr = self.ptr::<u8>(offset);
89        // Safety: provided by this function's safety requirements.
90        unsafe {
91            arch::store8(ptr, v);
92        }
93    }
94
95    unsafe fn store16_unchecked(&self, offset: usize, v: u16) {
96        let ptr = self.ptr::<u16>(offset);
97        // Safety: provided by this function's safety requirements.
98        unsafe {
99            arch::store16(ptr, v);
100        }
101    }
102
103    unsafe fn store32_unchecked(&self, offset: usize, v: u32) {
104        let ptr = self.ptr::<u32>(offset);
105        // Safety: provided by this function's safety requirements.
106        unsafe {
107            arch::store32(ptr, v);
108        }
109    }
110
111    unsafe fn store64_unchecked(&self, offset: usize, v: u64) {
112        let ptr = self.ptr::<u64>(offset);
113        // Safety: provided by this function's safety requirements.
114        unsafe {
115            arch::store64(ptr, v);
116        }
117    }
118}
119
120/// Represents a mutable borrow for the lifetime `'a`.
121///
122/// This represents a claim valid for the lifetime `'a`, used when borrowing memory through mutable
123/// references.
124pub struct MutableBorrow<'a>(PhantomData<&'a mut u8>);
125
126impl<'a> Memory<MutableBorrow<'a>> {
127    /// Borrows an `MmioRegion` backed by the uninitialized memory.
128    pub fn borrow_uninit<T>(mem: &'a mut MaybeUninit<T>) -> MmioRegion<Self> {
129        let len = size_of_val(mem);
130        let base_ptr = NonNull::from(mem).cast::<u8>();
131        // Safety:
132        // - the returned memory takes a mutable borrow on the passed mem.
133        // - the returned memory range is completely within the passed mem.
134        MmioRegion::new(unsafe { Self::new_unchecked(MutableBorrow(PhantomData), base_ptr, len) })
135    }
136}
137
138#[cfg(test)]
139mod tests {
140    use super::*;
141    use crate::{Mmio, MmioError};
142
143    #[test]
144    fn test_aligned_access() {
145        const NUM_REGISTERS: usize = 32;
146        let mut mem = MaybeUninit::<[u64; NUM_REGISTERS]>::zeroed();
147        let size = size_of_val(&mem);
148
149        {
150            let mut mmio = Memory::borrow_uninit(&mut mem);
151            assert_eq!(size, mmio.len());
152            for i in 0..NUM_REGISTERS {
153                // check this returns the initial register memory state.
154                assert_eq!(mmio.load64(i * size_of::<u64>()), 0);
155            }
156
157            for i in 0..NUM_REGISTERS {
158                // write into the register memory.
159                mmio.store64(i * size_of::<u64>(), i as u64);
160            }
161
162            for i in 0..NUM_REGISTERS {
163                // ensure the stores occurred.
164                assert_eq!(mmio.load64(i * size_of::<u64>()), i as u64);
165            }
166        }
167
168        // Safety: any value of [u64; N] us valid.
169        let registers = unsafe { mem.assume_init() };
170
171        for (i, v) in registers.iter().copied().enumerate() {
172            // ensure the stores modified the underlying memory range.
173            assert_eq!(i as u64, v);
174        }
175    }
176
177    #[test]
178    fn test_alignment_checking() {
179        const NUM_REGISTERS: usize = 32;
180        let mut mem = MaybeUninit::<[u64; NUM_REGISTERS]>::zeroed();
181        let mut mmio = Memory::borrow_uninit(&mut mem);
182
183        for i in 0..32 {
184            assert_eq!(mmio.try_load8(i), Ok(0));
185            assert_eq!(mmio.try_store8(i, 0), Ok(()));
186
187            if i % 2 == 0 {
188                assert_eq!(mmio.try_load16(i), Ok(0));
189                assert_eq!(mmio.try_store16(i, 0), Ok(()));
190            } else {
191                assert_eq!(mmio.try_load16(i), Err(MmioError::Unaligned));
192                assert_eq!(mmio.try_store16(i, 0), Err(MmioError::Unaligned));
193            }
194
195            if i % 4 == 0 {
196                assert_eq!(mmio.try_load32(i), Ok(0));
197                assert_eq!(mmio.try_store32(i, 0), Ok(()));
198            } else {
199                assert_eq!(mmio.try_load32(i), Err(MmioError::Unaligned));
200                assert_eq!(mmio.try_store32(i, 0), Err(MmioError::Unaligned));
201            }
202
203            if i % 8 == 0 {
204                assert_eq!(mmio.try_load64(i), Ok(0));
205                assert_eq!(mmio.try_store64(i, 0), Ok(()));
206            } else {
207                assert_eq!(mmio.try_load64(i), Err(MmioError::Unaligned));
208                assert_eq!(mmio.try_store64(i, 0), Err(MmioError::Unaligned));
209            }
210        }
211    }
212
213    #[test]
214    fn test_bounds_checking() {
215        const NUM_REGISTERS: usize = 32;
216        let mut mem = MaybeUninit::<[u64; NUM_REGISTERS]>::zeroed();
217        let size = size_of_val(&mem);
218        let mut mmio = Memory::borrow_uninit(&mut mem);
219
220        assert_eq!(mmio.try_load8(size), Err(MmioError::OutOfRange));
221        assert_eq!(mmio.try_store8(size, 0), Err(MmioError::OutOfRange));
222
223        assert_eq!(mmio.try_load16(size), Err(MmioError::OutOfRange));
224        assert_eq!(mmio.try_store16(size, 0), Err(MmioError::OutOfRange));
225
226        assert_eq!(mmio.try_load32(size), Err(MmioError::OutOfRange));
227        assert_eq!(mmio.try_store32(size, 0), Err(MmioError::OutOfRange));
228
229        assert_eq!(mmio.try_load64(size), Err(MmioError::OutOfRange));
230        assert_eq!(mmio.try_store64(size, 0), Err(MmioError::OutOfRange));
231    }
232
233    #[test]
234    fn test_aliased_access() {
235        const NUM_REGISTERS: usize = 32;
236        let mut mem = MaybeUninit::<[u64; NUM_REGISTERS]>::zeroed();
237        let mut mmio = Memory::borrow_uninit(&mut mem);
238
239        fn test_bytes(i: usize) -> [u8; 8] {
240            let i = i as u8;
241            [
242                i,
243                i.wrapping_mul(3),
244                i.wrapping_mul(5),
245                i.wrapping_mul(7),
246                i.wrapping_mul(11),
247                i.wrapping_mul(13),
248                i.wrapping_mul(17),
249                i.wrapping_mul(19),
250            ]
251        }
252
253        for i in 0..NUM_REGISTERS {
254            let v = u64::from_le_bytes(test_bytes(i));
255            mmio.store64(i * size_of::<u64>(), v.to_le());
256        }
257
258        for i in 0..NUM_REGISTERS / 4 {
259            let bytes = test_bytes(i);
260            let v64 = u64::from_le_bytes(bytes).to_le();
261            let v32s = [
262                u32::from_le_bytes(bytes[..4].try_into().unwrap()).to_le(),
263                u32::from_le_bytes(bytes[4..].try_into().unwrap()).to_le(),
264            ];
265            let v16s = [
266                u16::from_le_bytes(bytes[..2].try_into().unwrap()).to_le(),
267                u16::from_le_bytes(bytes[2..4].try_into().unwrap()).to_le(),
268                u16::from_le_bytes(bytes[4..6].try_into().unwrap()).to_le(),
269                u16::from_le_bytes(bytes[6..8].try_into().unwrap()).to_le(),
270            ];
271            let v8s = bytes;
272
273            let base_offset = 4 * i * size_of::<u64>();
274
275            let r0 = base_offset;
276            let r1 = base_offset + size_of::<u64>();
277            let r2 = base_offset + 2 * size_of::<u64>();
278            let r3 = base_offset + 3 * size_of::<u64>();
279
280            // Store each register with a different granularity.
281            mmio.store64(r0, v64);
282            for (j, v) in v32s.iter().enumerate() {
283                mmio.store32(r1 + j * size_of::<u32>(), *v);
284            }
285            for (j, v) in v16s.iter().enumerate() {
286                mmio.store16(r2 + j * size_of::<u16>(), *v);
287            }
288            for (j, v) in v8s.iter().enumerate() {
289                mmio.store8(r3 + j * size_of::<u8>(), *v);
290            }
291
292            // Now test loading each register back at the different granularities.
293            for j in 0..4 {
294                let r = base_offset + j * size_of::<u64>();
295                assert_eq!(mmio.load64(r), v64);
296
297                for (j, v) in v32s.iter().enumerate() {
298                    assert_eq!(mmio.load32(r + j * size_of::<u32>()), *v);
299                }
300
301                for (j, v) in v16s.iter().enumerate() {
302                    assert_eq!(mmio.load16(r + j * size_of::<u16>()), *v);
303                }
304
305                for (j, v) in v8s.iter().enumerate() {
306                    assert_eq!(mmio.load8(r + j), *v);
307                }
308            }
309        }
310    }
311
312    #[test]
313    fn test_masked_access() {
314        use crate::mmio::MmioExt;
315
316        let mut mem = MaybeUninit::<u64>::zeroed();
317        let mut mmio = Memory::borrow_uninit(&mut mem);
318
319        mmio.store(0, 0xfedcba98_76543210_u64);
320        assert_eq!(0xfedcba98_76543210, mmio.masked_load(0, u64::MAX));
321        assert_eq!(0x00000000_76543210, mmio.masked_load(0, u32::MAX as u64));
322        assert_eq!(0xfedcba98_00000000, mmio.masked_load(0, (u32::MAX as u64) << 32));
323        assert_eq!(
324            0xfedcba98_76543210 & 0xaaaaaaaa_aaaaaaaa,
325            mmio.masked_load(0, 0xaaaaaaaa_aaaaaaaa_u64)
326        );
327
328        mmio.masked_modify(0, 0xff000000_00000000u64, 0);
329        assert_eq!(0x00dcba98_76543210, mmio.masked_load(0, u64::MAX));
330    }
331}