memory_mapped_vmo/
lib.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::mem::{align_of, size_of};
6
7#[derive(Debug)]
8pub enum Error {
9    InvalidInput,
10}
11
12/// Memory-maps a VMO and mediates access to its memory.
13pub struct MemoryMappedVmo {
14    map_addr: usize,
15    vmo_size: usize,
16    writable: bool,
17}
18
19impl MemoryMappedVmo {
20    /// Maps a VMO in read-only mode.
21    ///
22    /// Attempting to call methods on the returned object that return mutable references will panic.
23    ///
24    /// # Safety
25    /// The caller must either guarantee that the `vmo` is not modified by others while the returned
26    /// instance is alive or that accesses are synchronized in application-specific ways.
27    pub unsafe fn new_readonly(vmo: &zx::Vmo) -> Result<MemoryMappedVmo, zx::Status> {
28        unsafe { Self::new_impl(vmo, false) }
29    }
30
31    /// Maps a VMO in read-write mode.
32    ///
33    /// # Safety
34    /// The caller must either guarantee that the `vmo` is not modified by others while the returned
35    /// instance is alive or that accesses are synchronized in application-specific ways.
36    pub unsafe fn new_readwrite(vmo: &zx::Vmo) -> Result<MemoryMappedVmo, zx::Status> {
37        unsafe { Self::new_impl(vmo, true) }
38    }
39
40    /// # Safety
41    /// The caller must either guarantee that the `vmo` is not modified by others while the returned
42    /// instance is alive or that accesses are synchronized in application-specific ways.
43    unsafe fn new_impl(vmo: &zx::Vmo, writable: bool) -> Result<MemoryMappedVmo, zx::Status> {
44        let vmo_size = vmo.get_content_size()? as usize;
45
46        let mut flags = zx::VmarFlags::PERM_READ
47            | zx::VmarFlags::ALLOW_FAULTS
48            | zx::VmarFlags::REQUIRE_NON_RESIZABLE;
49        if writable {
50            flags |= zx::VmarFlags::PERM_WRITE;
51        }
52
53        let map_addr = fuchsia_runtime::vmar_root_self().map(0, &vmo, 0, vmo_size, flags)?;
54        Ok(MemoryMappedVmo { map_addr, vmo_size, writable })
55    }
56
57    /// Returns the number of usable bytes in the VMO (i.e. its ZX_PROP_VMO_CONTENT_SIZE property,
58    /// which is not rounded to the page size).
59    pub fn vmo_size(&self) -> usize {
60        self.vmo_size
61    }
62
63    /// Given an element type, a base offset within the VMO and a number of elements, verifies that
64    /// the offset is suitably aligned and that the range of the elements fits in the VMO bounds. If
65    /// both conditions are satisfied, return a const pointer to its first element.
66    fn validate_and_get_ptr<T>(
67        &self,
68        byte_offset: usize,
69        num_elements: usize,
70    ) -> Result<*const T, Error> {
71        if byte_offset % align_of::<T>() == 0 {
72            if let Some(num_bytes) = size_of::<T>().checked_mul(num_elements) {
73                if let Some(end) = byte_offset.checked_add(num_bytes) {
74                    if end <= self.vmo_size {
75                        return Ok((self.map_addr + byte_offset) as *const T);
76                    }
77                }
78            }
79        }
80
81        Err(Error::InvalidInput)
82    }
83
84    /// Like validate_and_get_ptr, but returns a mut pointer and panics if the VMO is not writable.
85    fn validate_and_get_mut_ptr<T>(
86        &mut self,
87        byte_offset: usize,
88        num_elements: usize,
89    ) -> Result<*mut T, Error> {
90        if !self.writable {
91            panic!("MemoryMappedVmo is not writable");
92        }
93
94        Ok(self.validate_and_get_ptr::<T>(byte_offset, num_elements)? as *mut T)
95    }
96
97    /// Returns a reference to a slice of elements in the VMO.
98    ///
99    /// This method validates the alignment and the bounds against the VMO size.
100    pub fn get_slice<'a, T: MemoryMappable>(
101        &'a self,
102        byte_offset: usize,
103        num_elements: usize,
104    ) -> Result<&'a [T], Error> {
105        let ptr = self.validate_and_get_ptr(byte_offset, num_elements)?;
106        unsafe { Ok(std::slice::from_raw_parts(ptr, num_elements)) }
107    }
108
109    /// Returns a reference to an element in the VMO.
110    ///
111    /// This method validates the alignment and the bounds against the VMO size.
112    pub fn get_object<'a, T: MemoryMappable>(&'a self, byte_offset: usize) -> Result<&'a T, Error> {
113        let ptr = self.validate_and_get_ptr(byte_offset, 1)?;
114        unsafe { Ok(&*ptr) }
115    }
116
117    /// Returns a mutable reference to a slice of elements in the VMO.
118    ///
119    /// This method validates the alignment and the bounds against the VMO size.
120    pub fn get_slice_mut<'a, T: MemoryMappable>(
121        &'a mut self,
122        byte_offset: usize,
123        num_elements: usize,
124    ) -> Result<&'a mut [T], Error> {
125        let ptr = self.validate_and_get_mut_ptr(byte_offset, num_elements)?;
126        unsafe { Ok(std::slice::from_raw_parts_mut(ptr, num_elements)) }
127    }
128
129    /// Returns a mutable reference to an element in the VMO.
130    ///
131    /// This method validates the alignment and the bounds against the VMO size.
132    pub fn get_object_mut<'a, T: MemoryMappable>(
133        &'a mut self,
134        byte_offset: usize,
135    ) -> Result<&'a mut T, Error> {
136        let ptr = self.validate_and_get_mut_ptr(byte_offset, 1)?;
137        unsafe { Ok(&mut *ptr) }
138    }
139}
140
141impl Drop for MemoryMappedVmo {
142    fn drop(&mut self) {
143        // SAFETY: We owned the mapping.
144        unsafe {
145            fuchsia_runtime::vmar_root_self()
146                .unmap(self.map_addr, self.vmo_size)
147                .expect("failed to unmap MemoryMappedVmo");
148        }
149    }
150}
151
152/// Trait for types that can be stored into a MemoryMappedVmo.
153///
154/// # Safety
155/// - In general, since VMOs can be received from potentially hostile processes, types that
156///   implement this trait must be prepared to handle any possible sequence of bytes safely.
157/// - They must not contain references/pointers, as they are useless across process boundaries.
158///
159/// These requirements are similar to zerocopy::FromBytes, but we define our own trait because
160/// zerocopy's FromBytes derive macro does not accept some types that we know that, in the way
161/// we use them, can be stored safely. Having our own trait makes it possible to mark such types
162/// as MemoryMappable.
163pub unsafe trait MemoryMappable {}
164
165unsafe impl MemoryMappable for u8 {}
166unsafe impl MemoryMappable for u16 {}
167unsafe impl MemoryMappable for u32 {}
168unsafe impl MemoryMappable for u64 {}
169unsafe impl<T: MemoryMappable> MemoryMappable for [T] {}
170unsafe impl<T: MemoryMappable, const N: usize> MemoryMappable for [T; N] {}
171
172#[cfg(test)]
173mod tests {
174    use super::*;
175    use assert_matches::assert_matches;
176
177    // Test data used by some of the following tests.
178    const TEST_DATA: [u64; 4] = [11, 22, 33, 44];
179    const TEST_DATA_SIZE: usize = size_of::<u64>() * TEST_DATA.len();
180
181    #[test]
182    fn test_vmo_size() {
183        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
184        let m = unsafe { MemoryMappedVmo::new_readwrite(&vmo) }.unwrap();
185
186        assert_eq!(m.vmo_size(), TEST_DATA_SIZE);
187    }
188
189    #[test]
190    fn test_write_objects_read_slice() {
191        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
192
193        // Fill VMO with test data as individual objects.
194        {
195            let mut m = unsafe { MemoryMappedVmo::new_readwrite(&vmo) }.unwrap();
196            for (i, val) in TEST_DATA.iter().enumerate() {
197                *m.get_object_mut(size_of::<u64>() * i).unwrap() = *val;
198            }
199        }
200
201        // Verify that we can read them back correctly as a slice.
202        {
203            let m = unsafe { MemoryMappedVmo::new_readonly(&vmo) }.unwrap();
204            assert_eq!(*m.get_slice::<u64>(0, 4).unwrap(), TEST_DATA);
205        }
206    }
207
208    #[test]
209    fn test_write_slice_read_objects() {
210        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
211
212        // Fill VMO with test data as a slice.
213        {
214            let mut m = unsafe { MemoryMappedVmo::new_readwrite(&vmo) }.unwrap();
215            m.get_slice_mut(0, 4).unwrap().copy_from_slice(&TEST_DATA);
216        }
217
218        // Verify that we can read it back correctly as individual objects.
219        {
220            let m = unsafe { MemoryMappedVmo::new_readonly(&vmo) }.unwrap();
221            for (i, expected_val) in TEST_DATA.iter().enumerate() {
222                let actual_val: &u64 = m.get_object(size_of::<u64>() * i).unwrap();
223                assert_eq!(*actual_val, *expected_val, "value mismatch at i={}", i);
224            }
225        }
226    }
227
228    #[test]
229    fn test_write_slice_read_subslices() {
230        const COUNT: usize = 4;
231        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
232
233        // Fill VMO with test data.
234        let mut m = unsafe { MemoryMappedVmo::new_readwrite(&vmo) }.unwrap();
235        m.get_slice_mut::<u64>(0, COUNT).unwrap().copy_from_slice(&[11, 22, 33, 44]);
236
237        // Verify that we can read subslices correctly.
238        const SECOND_ELEM_BYTE_OFFSET: usize = size_of::<u64>();
239        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 0).unwrap(), []);
240        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 1).unwrap(), [22]);
241        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 2).unwrap(), [22, 33]);
242        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 3).unwrap(), [22, 33, 44]);
243    }
244
245    #[test]
246    fn test_uninitialized_is_zero() {
247        const COUNT: usize = 4;
248        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
249        let m = unsafe { MemoryMappedVmo::new_readonly(&vmo) }.unwrap();
250
251        // Verify that the value of uninitialized data is zero.
252        assert_eq!(*m.get_slice::<u64>(0, COUNT).unwrap(), [0; COUNT]);
253    }
254
255    #[test]
256    fn test_range_errors() {
257        const COUNT: usize = 4;
258        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
259        let m = unsafe { MemoryMappedVmo::new_readonly(&vmo) }.unwrap();
260
261        // Reading at a misaligned offset should fail.
262        const MISALIGNED_OFFSET: usize = size_of::<u64>() - 1;
263        assert_matches!(m.get_object::<u64>(MISALIGNED_OFFSET), Err(Error::InvalidInput));
264
265        // Reading an out-of-bounds range should fail.
266        const SECOND_ELEM_BYTE_OFFSET: usize = size_of::<u64>();
267        assert_matches!(
268            m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, COUNT),
269            Err(Error::InvalidInput)
270        );
271    }
272
273    #[test]
274    #[should_panic(expected = "MemoryMappedVmo is not writable")]
275    fn test_cannot_get_mutable_slice_from_readonly_vmo() {
276        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
277        let mut m = unsafe { MemoryMappedVmo::new_readonly(&vmo) }.unwrap();
278
279        // This should panic:
280        let _ = m.get_slice_mut::<u64>(0, 1);
281    }
282
283    #[test]
284    #[should_panic(expected = "MemoryMappedVmo is not writable")]
285    fn test_cannot_get_mutable_object_from_readonly_vmo() {
286        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
287        let mut m = unsafe { MemoryMappedVmo::new_readonly(&vmo) }.unwrap();
288
289        // This should panic:
290        let _ = m.get_object_mut::<u64>(0);
291    }
292}