heapdump_vmo/
memory_mapped_vmo.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::mem::{align_of, size_of};
6
7/// Memory-maps a VMO and mediates access to its memory.
8pub struct MemoryMappedVmo {
9    map_addr: usize,
10    vmo_size: usize,
11    writable: bool,
12}
13
14impl MemoryMappedVmo {
15    /// Maps a VMO in read-only mode.
16    ///
17    /// Attempting to call methods on the returned object that return mutable references will panic.
18    pub fn new_readonly(vmo: &zx::Vmo) -> Result<MemoryMappedVmo, zx::Status> {
19        Self::new_impl(vmo, false)
20    }
21
22    /// Maps a VMO in read-write mode.
23    pub fn new_readwrite(vmo: &zx::Vmo) -> Result<MemoryMappedVmo, zx::Status> {
24        Self::new_impl(vmo, true)
25    }
26
27    fn new_impl(vmo: &zx::Vmo, writable: bool) -> Result<MemoryMappedVmo, zx::Status> {
28        let vmo_size = vmo.get_content_size()? as usize;
29
30        let mut flags = zx::VmarFlags::PERM_READ
31            | zx::VmarFlags::ALLOW_FAULTS
32            | zx::VmarFlags::REQUIRE_NON_RESIZABLE;
33        if writable {
34            flags |= zx::VmarFlags::PERM_WRITE;
35        }
36
37        let map_addr = fuchsia_runtime::vmar_root_self().map(0, &vmo, 0, vmo_size, flags)?;
38        Ok(MemoryMappedVmo { map_addr, vmo_size, writable })
39    }
40
41    /// Returns the number of usable bytes in the VMO (i.e. its ZX_PROP_VMO_CONTENT_SIZE property,
42    /// which is not rounded to the page size).
43    pub fn vmo_size(&self) -> usize {
44        self.vmo_size
45    }
46
47    /// Given an element type, a base offset within the VMO and a number of elements, verifies that
48    /// the offset is suitably aligned and that the range of the elements fits in the VMO bounds. If
49    /// both conditions are satisfied, return a const pointer to its first element.
50    fn validate_and_get_ptr<T>(
51        &self,
52        byte_offset: usize,
53        num_elements: usize,
54    ) -> Result<*const T, crate::Error> {
55        if byte_offset % align_of::<T>() == 0 {
56            if let Some(num_bytes) = size_of::<T>().checked_mul(num_elements) {
57                if let Some(end) = byte_offset.checked_add(num_bytes) {
58                    if end <= self.vmo_size {
59                        return Ok((self.map_addr + byte_offset) as *const T);
60                    }
61                }
62            }
63        }
64
65        Err(crate::Error::InvalidInput)
66    }
67
68    /// Like validate_and_get_ptr, but returns a mut pointer and panics if the VMO is not writable.
69    fn validate_and_get_mut_ptr<T>(
70        &mut self,
71        byte_offset: usize,
72        num_elements: usize,
73    ) -> Result<*mut T, crate::Error> {
74        if !self.writable {
75            panic!("MemoryMappedVmo is not writable");
76        }
77
78        Ok(self.validate_and_get_ptr::<T>(byte_offset, num_elements)? as *mut T)
79    }
80
81    /// Returns a reference to a slice of elements in the VMO.
82    ///
83    /// This method validates the alignment and the bounds against the VMO size.
84    pub fn get_slice<'a, T: MemoryMappable>(
85        &'a self,
86        byte_offset: usize,
87        num_elements: usize,
88    ) -> Result<&'a [T], crate::Error> {
89        let ptr = self.validate_and_get_ptr(byte_offset, num_elements)?;
90        unsafe { Ok(std::slice::from_raw_parts(ptr, num_elements)) }
91    }
92
93    /// Returns a reference to an element in the VMO.
94    ///
95    /// This method validates the alignment and the bounds against the VMO size.
96    pub fn get_object<'a, T: MemoryMappable>(
97        &'a self,
98        byte_offset: usize,
99    ) -> Result<&'a T, crate::Error> {
100        let ptr = self.validate_and_get_ptr(byte_offset, 1)?;
101        unsafe { Ok(&*ptr) }
102    }
103
104    /// Returns a mutable reference to a slice of elements in the VMO.
105    ///
106    /// This method validates the alignment and the bounds against the VMO size.
107    pub fn get_slice_mut<'a, T: MemoryMappable>(
108        &'a mut self,
109        byte_offset: usize,
110        num_elements: usize,
111    ) -> Result<&'a mut [T], crate::Error> {
112        let ptr = self.validate_and_get_mut_ptr(byte_offset, num_elements)?;
113        unsafe { Ok(std::slice::from_raw_parts_mut(ptr, num_elements)) }
114    }
115
116    /// Returns a mutable reference to an element in the VMO.
117    ///
118    /// This method validates the alignment and the bounds against the VMO size.
119    pub fn get_object_mut<'a, T: MemoryMappable>(
120        &mut self,
121        byte_offset: usize,
122    ) -> Result<&'a mut T, crate::Error> {
123        let ptr = self.validate_and_get_mut_ptr(byte_offset, 1)?;
124        unsafe { Ok(&mut *ptr) }
125    }
126}
127
128impl Drop for MemoryMappedVmo {
129    fn drop(&mut self) {
130        // SAFETY: We owned the mapping.
131        unsafe {
132            fuchsia_runtime::vmar_root_self()
133                .unmap(self.map_addr, self.vmo_size)
134                .expect("failed to unmap MemoryMappedVmo");
135        }
136    }
137}
138
139/// Trait for types that can be stored into a MemoryMappedVmo.
140///
141/// # Safety
142/// - In general, since VMOs can be received from potentially hostile processes, types that
143///   implement this trait must be prepared to handle any possible sequence of bytes safely.
144/// - They must not contain references/pointers, as they are useless across process boundaries.
145///
146/// These requirements are similar to zerocopy::FromBytes, but we define our own trait because
147/// zerocopy's FromBytes derive macro does not accept some types that we know that, in the way
148/// we use them, can be stored safely. Having our own trait makes it possible to mark such types
149/// as MemoryMappable.
150pub unsafe trait MemoryMappable {}
151
152unsafe impl MemoryMappable for u8 {}
153unsafe impl MemoryMappable for u16 {}
154unsafe impl MemoryMappable for u32 {}
155unsafe impl MemoryMappable for u64 {}
156unsafe impl<T: MemoryMappable> MemoryMappable for [T] {}
157unsafe impl<T: MemoryMappable, const N: usize> MemoryMappable for [T; N] {}
158
159#[cfg(test)]
160mod tests {
161    use super::*;
162    use assert_matches::assert_matches;
163
164    // Test data used by some of the following tests.
165    const TEST_DATA: [u64; 4] = [11, 22, 33, 44];
166    const TEST_DATA_SIZE: usize = size_of::<u64>() * TEST_DATA.len();
167
168    #[test]
169    fn test_vmo_size() {
170        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
171        let m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
172
173        assert_eq!(m.vmo_size(), TEST_DATA_SIZE);
174    }
175
176    #[test]
177    fn test_write_objects_read_slice() {
178        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
179
180        // Fill VMO with test data as individual objects.
181        {
182            let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
183            for (i, val) in TEST_DATA.iter().enumerate() {
184                *m.get_object_mut(size_of::<u64>() * i).unwrap() = *val;
185            }
186        }
187
188        // Verify that we can read them back correctly as a slice.
189        {
190            let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
191            assert_eq!(*m.get_slice::<u64>(0, 4).unwrap(), TEST_DATA);
192        }
193    }
194
195    #[test]
196    fn test_write_slice_read_objects() {
197        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
198
199        // Fill VMO with test data as a slice.
200        {
201            let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
202            m.get_slice_mut(0, 4).unwrap().copy_from_slice(&TEST_DATA);
203        }
204
205        // Verify that we can read it back correctly as individual objects.
206        {
207            let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
208            for (i, expected_val) in TEST_DATA.iter().enumerate() {
209                let actual_val: &u64 = m.get_object(size_of::<u64>() * i).unwrap();
210                assert_eq!(*actual_val, *expected_val, "value mismatch at i={}", i);
211            }
212        }
213    }
214
215    #[test]
216    fn test_write_slice_read_subslices() {
217        const COUNT: usize = 4;
218        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
219
220        // Fill VMO with test data.
221        let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
222        m.get_slice_mut::<u64>(0, COUNT).unwrap().copy_from_slice(&[11, 22, 33, 44]);
223
224        // Verify that we can read subslices correctly.
225        const SECOND_ELEM_BYTE_OFFSET: usize = size_of::<u64>();
226        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 0).unwrap(), []);
227        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 1).unwrap(), [22]);
228        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 2).unwrap(), [22, 33]);
229        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 3).unwrap(), [22, 33, 44]);
230    }
231
232    #[test]
233    fn test_uninitialized_is_zero() {
234        const COUNT: usize = 4;
235        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
236        let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
237
238        // Verify that the value of uninitialized data is zero.
239        assert_eq!(*m.get_slice::<u64>(0, COUNT).unwrap(), [0; COUNT]);
240    }
241
242    #[test]
243    fn test_range_errors() {
244        const COUNT: usize = 4;
245        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
246        let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
247
248        // Reading at a misaligned offset should fail.
249        const MISALIGNED_OFFSET: usize = size_of::<u64>() - 1;
250        assert_matches!(m.get_object::<u64>(MISALIGNED_OFFSET), Err(crate::Error::InvalidInput));
251
252        // Reading an out-of-bounds range should fail.
253        const SECOND_ELEM_BYTE_OFFSET: usize = size_of::<u64>();
254        assert_matches!(
255            m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, COUNT),
256            Err(crate::Error::InvalidInput)
257        );
258    }
259
260    #[test]
261    #[should_panic(expected = "MemoryMappedVmo is not writable")]
262    fn test_cannot_get_mutable_slice_from_readonly_vmo() {
263        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
264        let mut m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
265
266        // This should panic:
267        let _ = m.get_slice_mut::<u64>(0, 1);
268    }
269
270    #[test]
271    #[should_panic(expected = "MemoryMappedVmo is not writable")]
272    fn test_cannot_get_mutable_object_from_readonly_vmo() {
273        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
274        let mut m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
275
276        // This should panic:
277        let _ = m.get_object_mut::<u64>(0);
278    }
279}