api_impl/
mem.rs

1// Copyright 2024 The Fuchsia Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::ops::Range;
6
7use tee_internal::binding::{
8    TEE_Result, TEE_ERROR_ACCESS_DENIED, TEE_MEMORY_ACCESS_ANY_OWNER, TEE_MEMORY_ACCESS_READ,
9    TEE_MEMORY_ACCESS_WRITE, TEE_SUCCESS,
10};
11
12pub fn malloc(size: usize, _hint: u32) -> *mut ::std::os::raw::c_void {
13    // The 'hint' parameter allows requesting memory that is zeroed and that's
14    // not shared with other TAs. We always zero allocations and don't share
15    // memory with other TAs so we ignore the hint.
16    unsafe { libc::malloc(size) }
17}
18
19/// # Safety
20///
21/// This wraps libc::realloc and is only safe to call with a pointer value that is NULL or allocated
22/// through malloc / realloc.
23pub unsafe fn realloc(
24    buffer: *mut ::std::os::raw::c_void,
25    new_size: usize,
26) -> *mut ::std::os::raw::c_void {
27    unsafe { libc::realloc(buffer, new_size) }
28}
29
30/// # Safety
31///
32/// This wraps libc::free and is only safe to call with a pointer value that is NULL or allocated
33/// through malloc / realloc.
34pub unsafe fn free(buffer: *mut ::std::os::raw::c_void) {
35    unsafe { libc::free(buffer) }
36}
37
38pub fn mem_move(dest: *mut ::std::os::raw::c_void, src: *mut ::std::os::raw::c_void, size: usize) {
39    // The spec does not define the behaviour of this function when the size is
40    // zero (ditto for libc's memmove). However, there are xtest cases that
41    // explicitly rely on being able to do this(!) (with null pointers even),
42    // so better for this implementation to regard that as a no-op.
43    if size == 0 {
44        return;
45    }
46
47    // The spec also does not define the behavior of this function when either
48    // pointer is null - but at the very least this is a condition of
49    // std::ptr::copy, so check that here for a clearer panic.
50    assert!(!src.is_null());
51    assert!(!dest.is_null());
52
53    // This is semantically equivalent to libc::memmove() with the order of operands reversed.
54    // This uses the Rust library routine instead so that it can be optimized directly if the
55    // toolchain decides, calling libc::memmove() would require an external library call here.
56    unsafe { std::ptr::copy(src as *const u8, dest as *mut u8, size) }
57}
58
59pub fn mem_compare(
60    buffer1: *mut ::std::os::raw::c_void,
61    buffer2: *mut ::std::os::raw::c_void,
62    size: usize,
63) -> i32 {
64    // The spec does not define the behavior of this function when either
65    // pointer is null - but at the very least this is a condition of
66    // std::slice::from_raw_parts, so check that here for a clearer panic.
67    assert!(!buffer1.is_null());
68    assert!(!buffer2.is_null());
69
70    unsafe {
71        let buffer1 = std::slice::from_raw_parts::<u8>(buffer1 as *const u8, size);
72        let buffer2 = std::slice::from_raw_parts::<u8>(buffer2 as *const u8, size);
73        match buffer1.cmp(buffer2) {
74            std::cmp::Ordering::Less => -1,
75            std::cmp::Ordering::Equal => 0,
76            std::cmp::Ordering::Greater => 1,
77        }
78    }
79}
80
81pub fn mem_fill(buffer: *mut ::std::os::raw::c_void, x: u8, size: usize) {
82    // The spec does not define the behavior of this function when the pointer
83    // is null - but at the very least this is a condition of
84    //std::ptr::write_bytes, so check that here for a clearer panic.
85    assert!(!buffer.is_null());
86
87    // This is semantically equivalent to libc::memset() as it's called with a byte sized type.
88    // This uses the Rust library routine instead so that it can be optimized directly if the
89    // toolchain decides, calling libc::memmove() would require an external library call here.
90    unsafe { std::ptr::write_bytes(buffer as *mut u8, x, size) }
91}
92
93fn vmar_flags_from_access_flags(access_flags: u32) -> zx::VmarFlagsExtended {
94    let mut flags = zx::VmarFlagsExtended::empty();
95    if access_flags & TEE_MEMORY_ACCESS_READ != 0 {
96        flags |= zx::VmarFlagsExtended::PERM_READ;
97    }
98    if access_flags & TEE_MEMORY_ACCESS_WRITE != 0 {
99        flags |= zx::VmarFlagsExtended::PERM_WRITE;
100    }
101    flags
102}
103
104fn range_contains(contain: &Range<usize>, check: &Range<usize>) -> bool {
105    check.is_empty() || contain.contains(&check.start) && (contain.contains(&(check.end - 1)))
106}
107
108pub fn check_memory_access_rights(
109    access_flags: u32,
110    start: usize,
111    size: usize,
112    mapped_param_ranges: &Vec<Range<usize>>,
113) -> TEE_Result {
114    let end = match start.checked_add(size) {
115        Some(end) => end,
116        None => return TEE_ERROR_ACCESS_DENIED,
117    };
118    let check_range = start..end;
119    let required_mmu_flags = vmar_flags_from_access_flags(access_flags);
120
121    let check_for_exclusive_access = access_flags & TEE_MEMORY_ACCESS_ANY_OWNER == 0;
122    let maps = fuchsia_runtime::vmar_root_self().info_maps_vec().unwrap();
123    for map in maps {
124        if let Some(details) = map.details().as_mapping() {
125            let map_range = map.base..map.base + map.size;
126            if range_contains(&map_range, &check_range)
127                && details.mmu_flags.contains(required_mmu_flags)
128            {
129                if check_for_exclusive_access {
130                    for range in mapped_param_ranges {
131                        if range_contains(range, &check_range) {
132                            return TEE_ERROR_ACCESS_DENIED;
133                        }
134                    }
135                }
136                return TEE_SUCCESS;
137            }
138        }
139    }
140
141    // No mapping found covering the input range.
142    TEE_ERROR_ACCESS_DENIED
143}
144
145#[unsafe(no_mangle)]
146pub extern "C" fn __scudo_default_options() -> *const std::ffi::c_char {
147    b"zero_contents=true\0" as *const u8 as *const std::ffi::c_char
148}
149
150#[cfg(test)]
151mod test {
152    use super::*;
153    use std::ffi::c_void;
154
155    fn check_memory_contains(addr: *const c_void, fill: u8, len: usize) {
156        for i in 0..len as isize {
157            let val = unsafe { std::ptr::read_volatile(addr.byte_offset(i) as *const u8) };
158            assert_eq!(val, fill, "offset {i}");
159        }
160    }
161
162    fn fill_memory(addr: *mut c_void, fill: u8, len: usize) {
163        for i in 0..len as isize {
164            unsafe { std::ptr::write_volatile(addr.byte_offset(i) as *mut u8, fill) }
165        }
166    }
167
168    #[fuchsia::test]
169    fn malloc_free() {
170        const ALLOC_SIZE: usize = 25;
171        let buf = malloc(ALLOC_SIZE, 0);
172        assert_ne!(buf, std::ptr::null_mut());
173
174        unsafe { free(buf) };
175    }
176
177    #[fuchsia::test]
178    fn small_malloc_zeroed() {
179        const ALLOC_SIZE: usize = 25;
180        let buf = malloc(ALLOC_SIZE, 0);
181        assert_ne!(buf, std::ptr::null_mut());
182
183        check_memory_contains(buf, 0, ALLOC_SIZE);
184
185        // Fill memory and then free/malloc to check that if the memory is reused for the
186        // new allocation that it's still zeroed out.
187        fill_memory(buf, 7u8, ALLOC_SIZE);
188        unsafe { free(buf) };
189        let buf = malloc(ALLOC_SIZE, 0);
190        assert_ne!(buf, std::ptr::null_mut());
191        check_memory_contains(buf, 0, ALLOC_SIZE);
192        unsafe { free(buf) };
193    }
194
195    #[fuchsia::test]
196    fn large_malloc_zeroed() {
197        const ALLOC_SIZE: usize = 1024 * 1024 + 1;
198        let buf = malloc(ALLOC_SIZE, 0);
199        assert_ne!(buf, std::ptr::null_mut());
200        check_memory_contains(buf, 0, ALLOC_SIZE);
201        fill_memory(buf, 7u8, ALLOC_SIZE);
202        unsafe { free(buf) };
203        let buf = malloc(ALLOC_SIZE, 0);
204        check_memory_contains(buf, 0, ALLOC_SIZE);
205        unsafe { free(buf) };
206    }
207
208    #[fuchsia::test]
209    fn realloc_grow() {
210        let buf = malloc(5, 0);
211        assert_ne!(buf, std::ptr::null_mut());
212        fill_memory(buf, 7u8, 5);
213        let realloced_buf = unsafe { realloc(buf, 5 * 1024) };
214        assert_ne!(realloced_buf as usize, 0);
215        check_memory_contains(realloced_buf, 7u8, 5);
216        check_memory_contains(unsafe { realloced_buf.byte_offset(5) }, 0, 5 * 1024 - 5);
217
218        unsafe { free(realloced_buf) };
219    }
220
221    #[fuchsia::test]
222    fn realloc_shrink() {
223        let buf = malloc(5 * 1024, 0);
224        assert_ne!(buf, std::ptr::null_mut());
225        fill_memory(buf, 7u8, 5 * 1024);
226        let realloced_buf = unsafe { realloc(buf, 5) };
227        assert_ne!(realloced_buf, std::ptr::null_mut());
228        check_memory_contains(realloced_buf, 7, 5);
229        unsafe {
230            free(realloced_buf);
231        }
232    }
233
234    // See note at top of implementation.
235    #[fuchsia::test]
236    fn mem_move_size_zero() {
237        mem_move(std::ptr::null_mut(), std::ptr::null_mut(), 0);
238
239        let nonnull = malloc(10, 0);
240        mem_move(nonnull, std::ptr::null_mut(), 0);
241        mem_move(std::ptr::null_mut(), nonnull, 0);
242
243        unsafe { free(nonnull) };
244    }
245
246    #[fuchsia::test]
247    fn mem_move_overlap() {
248        // Allocate a buffer 12 bytes long and initialize the first 8 elements, then move 8 bytes from
249        // the start of the buffer to an offset of 4 from the start.
250        let buf = malloc(12, 0);
251        assert_ne!(buf, std::ptr::null_mut());
252        fill_memory(buf, 1, 4);
253        fill_memory(unsafe { buf.byte_offset(4) }, 2, 4);
254        let dest = unsafe { buf.byte_offset(4) };
255        mem_move(dest, buf, 8);
256        check_memory_contains(buf, 1, 4);
257        check_memory_contains(unsafe { buf.byte_offset(4) }, 1, 4);
258        check_memory_contains(unsafe { buf.byte_offset(8) }, 2, 4);
259        unsafe { free(buf) };
260    }
261
262    #[fuchsia::test]
263    fn mem_fill_nonzero() {
264        let buf = malloc(5, 0);
265        assert_ne!(buf, std::ptr::null_mut());
266        mem_fill(buf, 0x42, 5);
267        check_memory_contains(buf, 0x42, 5);
268        unsafe { free(buf) };
269    }
270
271    #[fuchsia::test]
272    fn mem_compare_tests() {
273        let a = &mut [1u8, 2, 3];
274        let b = &mut [2u8, 2, 3];
275        let c = &mut [2u8, 2, 4];
276        let a_ptr = a.as_mut_ptr() as *mut c_void;
277        let b_ptr = b.as_mut_ptr() as *mut c_void;
278        let c_ptr = c.as_mut_ptr() as *mut c_void;
279
280        assert_eq!(mem_compare(a_ptr, a_ptr, 1), 0);
281        assert_eq!(mem_compare(a_ptr, a_ptr, 3), 0);
282        assert_eq!(mem_compare(a_ptr, b_ptr, 1), -1);
283        assert_eq!(mem_compare(b_ptr, a_ptr, 1), 1);
284        assert_eq!(mem_compare(b_ptr, c_ptr, 2), 0);
285        assert_eq!(mem_compare(b_ptr, c_ptr, 3), -1);
286    }
287
288    #[fuchsia::test]
289    fn test_range_contains() {
290        let empty_range = 0..0;
291        assert_eq!(range_contains(&empty_range, &(0..0)), true);
292        assert_eq!(range_contains(&empty_range, &(1..1)), true);
293
294        let range = 1..5;
295        // Fits range exactly.
296        assert_eq!(range_contains(&range, &(1..5)), true);
297        // Too long.
298        assert_eq!(range_contains(&range, &(1..6)), false);
299        // Starts too early.
300        assert_eq!(range_contains(&range, &(0..5)), false);
301        // Empty length outside the range.
302        assert_eq!(range_contains(&range, &(0..0)), true);
303        // Empty length inside the range.
304        assert_eq!(range_contains(&range, &(2..2)), true);
305    }
306}