mmio/
vmo.rs

1// Copyright 2025 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! MMIO regions backed by Fuchsia Virtual Memory Objects.
6
7use crate::memory::Memory;
8use crate::region::MmioRegion;
9use core::ptr::{with_exposed_provenance_mut, NonNull};
10use zx::{CachePolicy, VmarFlags, Vmo};
11use zx_status::Status;
12
13/// An active mapping of a Vmo in the root Vmar.
14///
15/// The mapped memory is kept mapped while the VmoMapping object is alive.
16pub struct VmoMapping {
17    map_addr: usize,
18    map_size: usize,
19}
20
21/// A Memory region backed by Vmo mapped memory.
22pub type VmoMemory = Memory<VmoMapping>;
23
24impl VmoMapping {
25    /// Map the specified memory range of the given Vmo in the root Vmar for this process and
26    /// return an object that maintains the mapping for its lifetime.
27    ///
28    /// Errors:
29    /// - [Status::OUT_OF_RANGE]: if `size > isize::MAX`.
30    /// - [Status::OUT_OF_RANGE]: if the requested region falls outside of the vmo's memory.
31    /// - An error returned by [zx::Vmar::map]: if the mapping fails.
32    pub fn map(offset: usize, size: usize, vmo: Vmo) -> Result<MmioRegion<VmoMemory>, Status> {
33        Self::map_with_cache_policy(offset, size, vmo, CachePolicy::UnCachedDevice)
34    }
35
36    fn map_with_cache_policy(
37        offset: usize,
38        size: usize,
39        vmo: Vmo,
40        cache_policy: CachePolicy,
41    ) -> Result<MmioRegion<VmoMemory>, Status> {
42        if size > isize::MAX as usize {
43            return Err(Status::OUT_OF_RANGE);
44        }
45
46        let page_size = zx::system_get_page_size() as usize;
47        // Determine how far the offset is into its containing page.
48        let page_offset = offset % page_size;
49
50        // Round the offset down to a page boundary.
51        let offset = (offset - page_offset) as u64;
52
53        // Round the mapped size up so it covers complete pages.
54        let map_size = (size + page_offset).next_multiple_of(page_size);
55
56        let info = vmo.info()?;
57        if info.cache_policy() != cache_policy {
58            vmo.set_cache_policy(cache_policy)?;
59        }
60        if offset.saturating_add(map_size as u64) > info.size_bytes {
61            return Err(Status::OUT_OF_RANGE);
62        }
63
64        let root_self = fuchsia_runtime::vmar_root_self();
65        let map_addr = root_self.map(
66            0,
67            &vmo,
68            offset,
69            map_size,
70            VmarFlags::PERM_READ | VmarFlags::PERM_WRITE | VmarFlags::MAP_RANGE,
71        )?;
72        let base_ptr =
73            NonNull::<u8>::new(with_exposed_provenance_mut(map_addr + page_offset)).unwrap();
74        let len = size;
75
76        let mapping = Self { map_addr, map_size };
77
78        // Safety:
79        // - the range from base_ptr to base_ptr + len is within the range exclusively owned by the
80        // mapping.
81        // - the mapping is used as the claim - this keeps the memory valid for the lifetime of the
82        // claim.
83        let memory = unsafe { Memory::new_unchecked(mapping, base_ptr, len) };
84        Ok(MmioRegion::new(memory))
85    }
86}
87
88/// Unmaps the memory.
89impl Drop for VmoMapping {
90    fn drop(&mut self) {
91        let root_self = fuchsia_runtime::vmar_root_self();
92        // Safety:
93        // - This object only exposes the mapped memory range through the `memory_range` function
94        // whose safety requirements require the caller to only use this memory while the
95        // VmoMapping is alive.
96        let _ = unsafe { root_self.unmap(self.map_addr, self.map_size) };
97    }
98}
99
100#[cfg(test)]
101mod tests {
102    use super::*;
103    use crate::Mmio;
104    use zx::{HandleBased, Rights};
105
106    #[test]
107    fn test_mapping() {
108        const TEST_LEN: usize = 256;
109        let vmo = Vmo::create(TEST_LEN as u64).unwrap();
110
111        let mut mmio = VmoMapping::map(0, TEST_LEN, vmo).unwrap().into_split_send();
112
113        for i in 0..TEST_LEN {
114            assert_eq!(mmio.try_store8(i, i as u8), Ok(()));
115        }
116
117        for i in 0..TEST_LEN {
118            assert_eq!(mmio.try_load8(i), Ok(i as u8));
119        }
120    }
121
122    #[test]
123    fn test_page_offset() {
124        const VMO_SIZE: u64 = 1024;
125        let vmo = Vmo::create(VMO_SIZE).unwrap();
126
127        // Write the offset of every 16 bit location into that location.
128        for i in (0..VMO_SIZE).step_by(2) {
129            let addr = i as u16;
130            vmo.write(&addr.to_le_bytes(), i).unwrap();
131        }
132
133        const TEST_OFFSET: usize = 128;
134        const TEST_LEN: usize = 256;
135        let mmio = VmoMapping::map(TEST_OFFSET, TEST_LEN, vmo).unwrap();
136
137        for i in (0..TEST_LEN).step_by(2) {
138            assert_eq!(mmio.try_load16(i), Ok((i + TEST_OFFSET) as u16));
139        }
140    }
141
142    #[test]
143    fn test_mapping_unmaps() {
144        const TEST_LEN: usize = 256;
145        let vmo = Vmo::create(TEST_LEN as u64).unwrap();
146        let vmo_read_handle: Vmo = vmo.duplicate_handle(Rights::READ).unwrap();
147
148        {
149            let _mapping = VmoMapping::map(0, TEST_LEN, vmo).unwrap();
150            // The vmo should be mapped exactly once.
151            assert_eq!(vmo_read_handle.info().unwrap().num_mappings, 1);
152        }
153
154        // The mapping should have been unmapped by now.
155        assert_eq!(vmo_read_handle.info().unwrap().num_mappings, 0);
156    }
157}