heapdump_vmo/
memory_mapped_vmo.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
// Copyright 2023 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

use std::mem::{align_of, size_of};

/// Memory-maps a VMO and mediates access to its memory.
pub struct MemoryMappedVmo {
    map_addr: usize,
    vmo_size: usize,
    writable: bool,
}

impl MemoryMappedVmo {
    /// Maps a VMO in read-only mode.
    ///
    /// Attempting to call methods on the returned object that return mutable references will panic.
    pub fn new_readonly(vmo: &zx::Vmo) -> Result<MemoryMappedVmo, zx::Status> {
        Self::new_impl(vmo, false)
    }

    /// Maps a VMO in read-write mode.
    pub fn new_readwrite(vmo: &zx::Vmo) -> Result<MemoryMappedVmo, zx::Status> {
        Self::new_impl(vmo, true)
    }

    fn new_impl(vmo: &zx::Vmo, writable: bool) -> Result<MemoryMappedVmo, zx::Status> {
        let vmo_size = vmo.get_content_size()? as usize;

        let mut flags = zx::VmarFlags::PERM_READ
            | zx::VmarFlags::ALLOW_FAULTS
            | zx::VmarFlags::REQUIRE_NON_RESIZABLE;
        if writable {
            flags |= zx::VmarFlags::PERM_WRITE;
        }

        let map_addr = fuchsia_runtime::vmar_root_self().map(0, &vmo, 0, vmo_size, flags)?;
        Ok(MemoryMappedVmo { map_addr, vmo_size, writable })
    }

    /// Returns the number of usable bytes in the VMO (i.e. its ZX_PROP_VMO_CONTENT_SIZE property,
    /// which is not rounded to the page size).
    pub fn vmo_size(&self) -> usize {
        self.vmo_size
    }

    /// Given an element type, a base offset within the VMO and a number of elements, verifies that
    /// the offset is suitably aligned and that the range of the elements fits in the VMO bounds. If
    /// both conditions are satisfied, return a const pointer to its first element.
    fn validate_and_get_ptr<T>(
        &self,
        byte_offset: usize,
        num_elements: usize,
    ) -> Result<*const T, crate::Error> {
        if byte_offset % align_of::<T>() == 0 {
            if let Some(num_bytes) = size_of::<T>().checked_mul(num_elements) {
                if let Some(end) = byte_offset.checked_add(num_bytes) {
                    if end <= self.vmo_size {
                        return Ok((self.map_addr + byte_offset) as *const T);
                    }
                }
            }
        }

        Err(crate::Error::InvalidInput)
    }

    /// Like validate_and_get_ptr, but returns a mut pointer and panics if the VMO is not writable.
    fn validate_and_get_mut_ptr<T>(
        &mut self,
        byte_offset: usize,
        num_elements: usize,
    ) -> Result<*mut T, crate::Error> {
        if !self.writable {
            panic!("MemoryMappedVmo is not writable");
        }

        Ok(self.validate_and_get_ptr::<T>(byte_offset, num_elements)? as *mut T)
    }

    /// Returns a reference to a slice of elements in the VMO.
    ///
    /// This method validates the alignment and the bounds against the VMO size.
    pub fn get_slice<'a, T: MemoryMappable>(
        &'a self,
        byte_offset: usize,
        num_elements: usize,
    ) -> Result<&'a [T], crate::Error> {
        let ptr = self.validate_and_get_ptr(byte_offset, num_elements)?;
        unsafe { Ok(std::slice::from_raw_parts(ptr, num_elements)) }
    }

    /// Returns a reference to an element in the VMO.
    ///
    /// This method validates the alignment and the bounds against the VMO size.
    pub fn get_object<'a, T: MemoryMappable>(
        &'a self,
        byte_offset: usize,
    ) -> Result<&'a T, crate::Error> {
        let ptr = self.validate_and_get_ptr(byte_offset, 1)?;
        unsafe { Ok(&*ptr) }
    }

    /// Returns a mutable reference to a slice of elements in the VMO.
    ///
    /// This method validates the alignment and the bounds against the VMO size.
    pub fn get_slice_mut<'a, T: MemoryMappable>(
        &'a mut self,
        byte_offset: usize,
        num_elements: usize,
    ) -> Result<&'a mut [T], crate::Error> {
        let ptr = self.validate_and_get_mut_ptr(byte_offset, num_elements)?;
        unsafe { Ok(std::slice::from_raw_parts_mut(ptr, num_elements)) }
    }

    /// Returns a mutable reference to an element in the VMO.
    ///
    /// This method validates the alignment and the bounds against the VMO size.
    pub fn get_object_mut<'a, T: MemoryMappable>(
        &mut self,
        byte_offset: usize,
    ) -> Result<&'a mut T, crate::Error> {
        let ptr = self.validate_and_get_mut_ptr(byte_offset, 1)?;
        unsafe { Ok(&mut *ptr) }
    }
}

impl Drop for MemoryMappedVmo {
    fn drop(&mut self) {
        // SAFETY: We owned the mapping.
        unsafe {
            fuchsia_runtime::vmar_root_self()
                .unmap(self.map_addr, self.vmo_size)
                .expect("failed to unmap MemoryMappedVmo");
        }
    }
}

/// Trait for types that can be stored into a MemoryMappedVmo.
///
/// # Safety
/// - In general, since VMOs can be received from potentially hostile processes, types that
///   implement this trait must be prepared to handle any possible sequence of bytes safely.
/// - They must not contain references/pointers, as they are useless across process boundaries.
///
/// These requirements are similar to zerocopy::FromBytes, but we define our own trait because
/// zerocopy's FromBytes derive macro does not accept some types that we know that, in the way
/// we use them, can be stored safely. Having our own trait makes it possible to mark such types
/// as MemoryMappable.
pub unsafe trait MemoryMappable {}

unsafe impl MemoryMappable for u8 {}
unsafe impl MemoryMappable for u16 {}
unsafe impl MemoryMappable for u32 {}
unsafe impl MemoryMappable for u64 {}
unsafe impl<T: MemoryMappable> MemoryMappable for [T] {}
unsafe impl<T: MemoryMappable, const N: usize> MemoryMappable for [T; N] {}

#[cfg(test)]
mod tests {
    use super::*;
    use assert_matches::assert_matches;

    // Test data used by some of the following tests.
    const TEST_DATA: [u64; 4] = [11, 22, 33, 44];
    const TEST_DATA_SIZE: usize = size_of::<u64>() * TEST_DATA.len();

    #[test]
    fn test_vmo_size() {
        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
        let m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();

        assert_eq!(m.vmo_size(), TEST_DATA_SIZE);
    }

    #[test]
    fn test_write_objects_read_slice() {
        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();

        // Fill VMO with test data as individual objects.
        {
            let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
            for (i, val) in TEST_DATA.iter().enumerate() {
                *m.get_object_mut(size_of::<u64>() * i).unwrap() = *val;
            }
        }

        // Verify that we can read them back correctly as a slice.
        {
            let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
            assert_eq!(*m.get_slice::<u64>(0, 4).unwrap(), TEST_DATA);
        }
    }

    #[test]
    fn test_write_slice_read_objects() {
        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();

        // Fill VMO with test data as a slice.
        {
            let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
            m.get_slice_mut(0, 4).unwrap().copy_from_slice(&TEST_DATA);
        }

        // Verify that we can read it back correctly as individual objects.
        {
            let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();
            for (i, expected_val) in TEST_DATA.iter().enumerate() {
                let actual_val: &u64 = m.get_object(size_of::<u64>() * i).unwrap();
                assert_eq!(*actual_val, *expected_val, "value mismatch at i={}", i);
            }
        }
    }

    #[test]
    fn test_write_slice_read_subslices() {
        const COUNT: usize = 4;
        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();

        // Fill VMO with test data.
        let mut m = MemoryMappedVmo::new_readwrite(&vmo).unwrap();
        m.get_slice_mut::<u64>(0, COUNT).unwrap().copy_from_slice(&[11, 22, 33, 44]);

        // Verify that we can read subslices correctly.
        const SECOND_ELEM_BYTE_OFFSET: usize = size_of::<u64>();
        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 0).unwrap(), []);
        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 1).unwrap(), [22]);
        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 2).unwrap(), [22, 33]);
        assert_eq!(*m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, 3).unwrap(), [22, 33, 44]);
    }

    #[test]
    fn test_uninitialized_is_zero() {
        const COUNT: usize = 4;
        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
        let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();

        // Verify that the value of uninitialized data is zero.
        assert_eq!(*m.get_slice::<u64>(0, COUNT).unwrap(), [0; COUNT]);
    }

    #[test]
    fn test_range_errors() {
        const COUNT: usize = 4;
        let vmo = zx::Vmo::create((size_of::<u64>() * COUNT) as u64).unwrap();
        let m = MemoryMappedVmo::new_readonly(&vmo).unwrap();

        // Reading at a misaligned offset should fail.
        const MISALIGNED_OFFSET: usize = size_of::<u64>() - 1;
        assert_matches!(m.get_object::<u64>(MISALIGNED_OFFSET), Err(crate::Error::InvalidInput));

        // Reading an out-of-bounds range should fail.
        const SECOND_ELEM_BYTE_OFFSET: usize = size_of::<u64>();
        assert_matches!(
            m.get_slice::<u64>(SECOND_ELEM_BYTE_OFFSET, COUNT),
            Err(crate::Error::InvalidInput)
        );
    }

    #[test]
    #[should_panic(expected = "MemoryMappedVmo is not writable")]
    fn test_cannot_get_mutable_slice_from_readonly_vmo() {
        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
        let mut m = MemoryMappedVmo::new_readonly(&vmo).unwrap();

        // This should panic:
        let _ = m.get_slice_mut::<u64>(0, 1);
    }

    #[test]
    #[should_panic(expected = "MemoryMappedVmo is not writable")]
    fn test_cannot_get_mutable_object_from_readonly_vmo() {
        let vmo = zx::Vmo::create(TEST_DATA_SIZE as u64).unwrap();
        let mut m = MemoryMappedVmo::new_readonly(&vmo).unwrap();

        // This should panic:
        let _ = m.get_object_mut::<u64>(0);
    }
}