1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
45use std::mem::{align_of, size_of};
67/// Memory-maps a VMO and mediates access to its memory.
8pub struct MemoryMappedVmo {
9 map_addr: usize,
10 vmo_size: usize,
11 writable: bool,
12}
1314impl MemoryMappedVmo {
15/// Maps a VMO in read-only mode.
16 ///
17 /// Attempting to call methods on the returned object that return mutable references will panic.
18pub fn new_readonly(vmo: &zx::Vmo) -> Result<MemoryMappedVmo, zx::Status> {
19Self::new_impl(vmo, false)
20 }
2122/// Maps a VMO in read-write mode.
23pub fn new_readwrite(vmo: &zx::Vmo) -> Result<MemoryMappedVmo, zx::Status> {
24Self::new_impl(vmo, true)
25 }
2627fn new_impl(vmo: &zx::Vmo, writable: bool) -> Result<MemoryMappedVmo, zx::Status> {
28let vmo_size = vmo.get_content_size()? as usize;
2930let mut flags = zx::VmarFlags::PERM_READ
31 | zx::VmarFlags::ALLOW_FAULTS
32 | zx::VmarFlags::REQUIRE_NON_RESIZABLE;
33if writable {
34 flags |= zx::VmarFlags::PERM_WRITE;
35 }
3637let map_addr = fuchsia_runtime::vmar_root_self().map(0, &vmo, 0, vmo_size, flags)?;
38Ok(MemoryMappedVmo { map_addr, vmo_size, writable })
39 }
4041/// Returns the number of usable bytes in the VMO (i.e. its ZX_PROP_VMO_CONTENT_SIZE property,
42 /// which is not rounded to the page size).
43pub fn vmo_size(&self) -> usize {
44self.vmo_size
45 }
4647/// Given an element type, a base offset within the VMO and a number of elements, verifies that
48 /// the offset is suitably aligned and that the range of the elements fits in the VMO bounds. If
49 /// both conditions are satisfied, return a const pointer to its first element.
50fn validate_and_get_ptr<T>(
51&self,
52 byte_offset: usize,
53 num_elements: usize,
54 ) -> Result<*const T, crate::Error> {
55if byte_offset % align_of::<T>() == 0 {
56if let Some(num_bytes) = size_of::<T>().checked_mul(num_elements) {
57if let Some(end) = byte_offset.checked_add(num_bytes) {
58if end <= self.vmo_size {
59return Ok((self.map_addr + byte_offset) as *const T);
60 }
61 }
62 }
63 }
6465Err(crate::Error::InvalidInput)
66 }
6768/// Like validate_and_get_ptr, but returns a mut pointer and panics if the VMO is not writable.
69fn validate_and_get_mut_ptr<T>(
70&mut self,
71 byte_offset: usize,
72 num_elements: usize,
73 ) -> Result<*mut T, crate::Error> {
74if !self.writable {
75panic!("MemoryMappedVmo is not writable");
76 }
7778Ok(self.validate_and_get_ptr::<T>(byte_offset, num_elements)? as *mut T)
79 }
8081/// Returns a reference to a slice of elements in the VMO.
82 ///
83 /// This method validates the alignment and the bounds against the VMO size.
84pub fn get_slice<'a, T: MemoryMappable>(
85&'a self,
86 byte_offset: usize,
87 num_elements: usize,
88 ) -> Result<&'a [T], crate::Error> {
89let ptr = self.validate_and_get_ptr(byte_offset, num_elements)?;
90unsafe { Ok(std::slice::from_raw_parts(ptr, num_elements)) }
91 }
9293/// Returns a reference to an element in the VMO.
94 ///
95 /// This method validates the alignment and the bounds against the VMO size.
96pub fn get_object<'a, T: MemoryMappable>(
97&'a self,
98 byte_offset: usize,
99 ) -> Result<&'a T, crate::Error> {
100let ptr = self.validate_and_get_ptr(byte_offset, 1)?;
101unsafe { Ok(&*ptr) }
102 }
103104/// Returns a mutable reference to a slice of elements in the VMO.
105 ///
106 /// This method validates the alignment and the bounds against the VMO size.
107pub fn get_slice_mut<'a, T: MemoryMappable>(
108&'a mut self,
109 byte_offset: usize,
110 num_elements: usize,
111 ) -> Result<&'a mut [T], crate::Error> {
112let ptr = self.validate_and_get_mut_ptr(byte_offset, num_elements)?;
113unsafe { Ok(std::slice::from_raw_parts_mut(ptr, num_elements)) }
114 }
115116/// Returns a mutable reference to an element in the VMO.
117 ///
118 /// This method validates the alignment and the bounds against the VMO size.
119pub fn get_object_mut<'a, T: MemoryMappable>(
120&mut self,
121 byte_offset: usize,
122 ) -> Result<&'a mut T, crate::Error> {
123let ptr = self.validate_and_get_mut_ptr(byte_offset, 1)?;
124unsafe { Ok(&mut *ptr) }
125 }
126}
127128impl Drop for MemoryMappedVmo {
129fn drop(&mut self) {
130// SAFETY: We owned the mapping.
131unsafe {
132 fuchsia_runtime::vmar_root_self()
133 .unmap(self.map_addr, self.vmo_size)
134 .expect("failed to unmap MemoryMappedVmo");
135 }
136 }
137}
138139/// Trait for types that can be stored into a MemoryMappedVmo.
140///
141/// # Safety
142/// - In general, since VMOs can be received from potentially hostile processes, types that
143/// implement this trait must be prepared to handle any possible sequence of bytes safely.
144/// - They must not contain references/pointers, as they are useless across process boundaries.
145///
146/// These requirements are similar to zerocopy::FromBytes, but we define our own trait because
147/// zerocopy's FromBytes derive macro does not accept some types that we know that, in the way
148/// we use them, can be stored safely. Having our own trait makes it possible to mark such types
149/// as MemoryMappable.
150pub unsafe trait MemoryMappable {}
151152unsafe impl MemoryMappable for u8 {}
153unsafe impl MemoryMappable for u16 {}
154unsafe impl MemoryMappable for u32 {}
155unsafe impl MemoryMappable for u64 {}
156unsafe impl<T: MemoryMappable> MemoryMappable for [T] {}
157unsafe impl<T: MemoryMappable, const N: usize> MemoryMappable for [T; N] {}
158159#[cfg(test)]
160mod tests {
161use super::*;
162use assert_matches::assert_matches;
163164// Test data used by some of the following tests.
165const TEST_DATA: [u64; 4] = [11, 22, 33, 44];
166const TEST_DATA_SIZE: usize = size_of::<u64>() * TEST_DATA.len();
167168#[test]
169fn test_vmo_size() {
170let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
171let m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
172173assert_eq!(m.vmo_size(), TEST_DATA_SIZE);
174 }
175176#[test]
177fn test_write_objects_read_slice() {
178let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
179180// Fill VMO with test data as individual objects.
181{
182let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
183for (i, val) in TEST_DATA.iter().enumerate() {
184*m.get_object_mut(size_of::<u64>() * i).unwrap() = *val;
185 }
186 }
187188// Verify that we can read them back correctly as a slice.
189{
190let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
191assert_eq!(*m.get_slice::<u64>(0, 4).unwrap(), TEST_DATA);
192 }
193 }
194195#[test]
196fn test_write_slice_read_objects() {
197let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
198199// Fill VMO with test data as a slice.
200{
201let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
202 m.get_slice_mut(0, 4).unwrap().copy_from_slice(&TEST_DATA);
203 }
204205// Verify that we can read it back correctly as individual objects.
206{
207let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
208for (i, expected_val) in TEST_DATA.iter().enumerate() {
209let actual_val: &u64 = m.get_object(size_of::<u64>() * i).unwrap();
210assert_eq!(*actual_val, *expected_val, "value mismatch at i={}", i);
211 }
212 }
213 }
214215#[test]
216fn test_write_slice_read_subslices() {
217const COUNT: usize = 4;
218let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
219220// Fill VMO with test data.
221let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
222 m.get_slice_mut::<u64>(0, COUNT).unwrap().copy_from_slice(&[11, 22, 33, 44]);
223224// Verify that we can read subslices correctly.
225const SECOND_ELEM_BYTE_OFFSET: usize = size_of::<u64>();
226assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 0).unwrap(), []);
227assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 1).unwrap(), [22]);
228assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 2).unwrap(), [22, 33]);
229assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 3).unwrap(), [22, 33, 44]);
230 }
231232#[test]
233fn test_uninitialized_is_zero() {
234const COUNT: usize = 4;
235let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
236let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
237238// Verify that the value of uninitialized data is zero.
239assert_eq!(*m.get_slice::<u64>(0, COUNT).unwrap(), [0; COUNT]);
240 }
241242#[test]
243fn test_range_errors() {
244const COUNT: usize = 4;
245let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
246let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
247248// Reading at a misaligned offset should fail.
249const MISALIGNED_OFFSET: usize = size_of::<u64>() - 1;
250assert_matches!(m.get_object::<u64>(MISALIGNED_OFFSET), Err(crate::Error::InvalidInput));
251252// Reading an out-of-bounds range should fail.
253const SECOND_ELEM_BYTE_OFFSET: usize = size_of::<u64>();
254assert_matches!(
255 m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, COUNT),
256Err(crate::Error::InvalidInput)
257 );
258 }
259260#[test]
261 #[should_panic(expected = "MemoryMappedVmo is not writable")]
262fn test_cannot_get_mutable_slice_from_readonly_vmo() {
263let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
264let mut m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
265266// This should panic:
267let _ = m.get_slice_mut::<u64>(0, 1);
268 }
269270#[test]
271 #[should_panic(expected = "MemoryMappedVmo is not writable")]
272fn test_cannot_get_mutable_object_from_readonly_vmo() {
273let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
274let mut m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
275276// This should panic:
277let _ = m.get_object_mut::<u64>(0);
278 }
279}