1use crate::memory::Memory;
8use crate::region::MmioRegion;
9use core::ptr::{with_exposed_provenance_mut, NonNull};
10use zx::{CachePolicy, VmarFlags, Vmo};
11use zx_status::Status;
12
13pub struct VmoMapping {
17 map_addr: usize,
18 map_size: usize,
19}
20
21pub type VmoMemory = Memory<VmoMapping>;
23
24impl VmoMapping {
25 pub fn map(offset: usize, size: usize, vmo: Vmo) -> Result<MmioRegion<VmoMemory>, Status> {
33 Self::map_with_cache_policy(offset, size, vmo, CachePolicy::UnCachedDevice)
34 }
35
36 fn map_with_cache_policy(
37 offset: usize,
38 size: usize,
39 vmo: Vmo,
40 cache_policy: CachePolicy,
41 ) -> Result<MmioRegion<VmoMemory>, Status> {
42 if size > isize::MAX as usize {
43 return Err(Status::OUT_OF_RANGE);
44 }
45
46 let page_size = zx::system_get_page_size() as usize;
47 let page_offset = offset % page_size;
49
50 let offset = (offset - page_offset) as u64;
52
53 let map_size = (size + page_offset).next_multiple_of(page_size);
55
56 let info = vmo.info()?;
57 if info.cache_policy() != cache_policy {
58 vmo.set_cache_policy(cache_policy)?;
59 }
60 if offset.saturating_add(map_size as u64) > info.size_bytes {
61 return Err(Status::OUT_OF_RANGE);
62 }
63
64 let root_self = fuchsia_runtime::vmar_root_self();
65 let map_addr = root_self.map(
66 0,
67 &vmo,
68 offset,
69 map_size,
70 VmarFlags::PERM_READ | VmarFlags::PERM_WRITE | VmarFlags::MAP_RANGE,
71 )?;
72 let base_ptr =
73 NonNull::<u8>::new(with_exposed_provenance_mut(map_addr + page_offset)).unwrap();
74 let len = size;
75
76 let mapping = Self { map_addr, map_size };
77
78 let memory = unsafe { Memory::new_unchecked(mapping, base_ptr, len) };
84 Ok(MmioRegion::new(memory))
85 }
86}
87
88impl Drop for VmoMapping {
90 fn drop(&mut self) {
91 let root_self = fuchsia_runtime::vmar_root_self();
92 let _ = unsafe { root_self.unmap(self.map_addr, self.map_size) };
97 }
98}
99
100#[cfg(test)]
101mod tests {
102 use super::*;
103 use crate::Mmio;
104 use zx::{HandleBased, Rights};
105
106 #[test]
107 fn test_mapping() {
108 const TEST_LEN: usize = 256;
109 let vmo = Vmo::create(TEST_LEN as u64).unwrap();
110
111 let mut mmio = VmoMapping::map(0, TEST_LEN, vmo).unwrap().into_split_send();
112
113 for i in 0..TEST_LEN {
114 assert_eq!(mmio.try_store8(i, i as u8), Ok(()));
115 }
116
117 for i in 0..TEST_LEN {
118 assert_eq!(mmio.try_load8(i), Ok(i as u8));
119 }
120 }
121
122 #[test]
123 fn test_page_offset() {
124 const VMO_SIZE: u64 = 1024;
125 let vmo = Vmo::create(VMO_SIZE).unwrap();
126
127 for i in (0..VMO_SIZE).step_by(2) {
129 let addr = i as u16;
130 vmo.write(&addr.to_le_bytes(), i).unwrap();
131 }
132
133 const TEST_OFFSET: usize = 128;
134 const TEST_LEN: usize = 256;
135 let mmio = VmoMapping::map(TEST_OFFSET, TEST_LEN, vmo).unwrap();
136
137 for i in (0..TEST_LEN).step_by(2) {
138 assert_eq!(mmio.try_load16(i), Ok((i + TEST_OFFSET) as u16));
139 }
140 }
141
142 #[test]
143 fn test_mapping_unmaps() {
144 const TEST_LEN: usize = 256;
145 let vmo = Vmo::create(TEST_LEN as u64).unwrap();
146 let vmo_read_handle: Vmo = vmo.duplicate_handle(Rights::READ).unwrap();
147
148 {
149 let _mapping = VmoMapping::map(0, TEST_LEN, vmo).unwrap();
150 assert_eq!(vmo_read_handle.info().unwrap().num_mappings, 1);
152 }
153
154 assert_eq!(vmo_read_handle.info().unwrap().num_mappings, 0);
156 }
157}