usercopy/
lib.rs

1// Copyright 2023 The Fuchsia Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::mem::MaybeUninit;
6use std::ops::Range;
7
8use zerocopy::FromBytes;
9use zx::{AsHandleRef, HandleBased, Task};
10
11extern "C" {
12    // This function performs a data copy like `memcpy`.
13    //
14    // Returns the last accessed destination address when `ret_dest` is `true`,
15    // or the last accessed source address when `ret_dest` is `false`.
16    fn hermetic_copy(dest: *mut u8, source: *const u8, len: usize, ret_dest: bool) -> usize;
17    fn hermetic_copy_end();
18
19    // Performs a data copy like `strncpy`.
20    //
21    // Returns the last accessed destination address when `ret_dest` is `true`,
22    // or the last accessed source address when `ret_dest` is `false`.
23    fn hermetic_copy_until_null_byte(
24        dest: *mut u8,
25        source: *const u8,
26        len: usize,
27        ret_dest: bool,
28    ) -> usize;
29    fn hermetic_copy_until_null_byte_end();
30
31    // This function performs a `memset` to 0.
32    //
33    // Returns the last accessed destination address.
34    fn hermetic_zero(dest: *mut u8, len: usize) -> usize;
35    fn hermetic_zero_end();
36
37    // This function generates a "return" from the usercopy routine with an error.
38    fn hermetic_copy_error();
39
40    // This generates a return from an error generated by an atomic routine.
41    fn atomic_error();
42
43    // This performs a relaxed atomic load of a 32 bit value at `addr`.
44    // On success the loaded value will be in the lower 32 bits of the returned value and the high
45    // bits will be zero. If a fault occurred, the high bits will be one.
46    fn atomic_load_u32_relaxed(addr: usize) -> u64;
47
48    // Symbol representing the end of the atomic_load_u32_relaxed() function.
49    fn atomic_load_u32_relaxed_end();
50
51    // This performs an atomic load-acquire of a 32 bit value at `addr`.
52    // On success the loaded value will be in the lower 32 bits of the returned value and the high
53    // bits will be zero. If a fault occurred, the high bits will be one.
54    fn atomic_load_u32_acquire(addr: usize) -> u64;
55
56    // Symbol representing the end of the atomic_load_u32_acquire() function.
57    fn atomic_load_u32_acquire_end();
58
59    // This performs a relaxed atomic store of a 32 bit value to `addr`.
60    // On success zero is returned. On fault a nonzero value is returned.
61    fn atomic_store_u32_relaxed(addr: usize, value: u32) -> u64;
62
63    // Symbol representing the end of the atomic_store_u32_relaxed() function.
64    fn atomic_store_u32_relaxed_end();
65
66    // This performs an atomic store-release of a 32 bit value to `addr`.
67    // On success zero is returned. On fault a nonzero value is returned.
68    fn atomic_store_u32_release(addr: usize, value: u32) -> u64;
69
70    // Symbol representing the end of the atomic_store_u32_release() function.
71    fn atomic_store_u32_release_end();
72
73    // This performs an atomic compare-and-exchange operation of the 32 bit value at `addr`.
74    // If the operation succeeded, stores `desired` to `addr` and returns 1.
75    //
76    // If the operation failed because `addr` did not contain the value `*expected`, stores the
77    // observed value to `*expected`.
78    //
79    // Memory ordering:
80    // On success, the read-modify-write has both acquire and release semantics.
81    // On failure, the load from 'addr' has acquire semantics.
82    //
83    // If the operation encountered a fault, the high bits of the returned value will be one.
84    fn atomic_compare_exchange_u32_acq_rel(addr: usize, expected: *mut u32, desired: u32) -> u64;
85
86    // Symbol representing the end of the atomic_compare_exchange_u32_acq_rel() function.
87    fn atomic_compare_exchange_u32_acq_rel_end();
88
89    // This performs an atomic compare-and-exchange operation of the 32 bit value at `addr`.
90    // If the operation succeeded, stores `desired` to `addr` and returns 1.
91    // If the operation failed (perhaps because `addr` did not contain the value `*expected`),
92    // stores the observed value to `*expected` and returns 0.
93    //
94    // This operation can fail spuriously.
95    //
96    // Memory ordering:
97    // On success, the read-modify-write has both acquire and release semantics.
98    // On failure, the load from 'addr' has acquire semantics.
99    //
100    // If the operation encountered a fault, the high bits of the returned value will be one.
101    fn atomic_compare_exchange_weak_u32_acq_rel(
102        addr: usize,
103        expected: *mut u32,
104        desired: u32,
105    ) -> u64;
106
107    // Symbol representing the end of the atomic_compare_exchange_weak_u32_relaxed() function.
108    fn atomic_compare_exchange_weak_u32_acq_rel_end();
109}
110
111/// Converts a slice to an equivalent MaybeUninit slice.
112pub fn slice_to_maybe_uninit_mut<T>(slice: &mut [T]) -> &mut [MaybeUninit<T>] {
113    let ptr = slice.as_mut_ptr();
114    let ptr = ptr as *mut MaybeUninit<T>;
115    // SAFETY: This is effectively reinterpreting the `slice` reference as a
116    // slice of uninitialized T's. `MaybeUninit<T>` has the same layout[1] as
117    // `T` and we know the original slice is initialized and its okay to from
118    // initialized to maybe initialized.
119    //
120    // [1]: https://doc.rust-lang.org/std/mem/union.MaybeUninit.html#layout-1
121    unsafe { std::slice::from_raw_parts_mut(ptr, slice.len()) }
122}
123
124type HermeticCopyFn =
125    unsafe extern "C" fn(dest: *mut u8, source: *const u8, len: usize, ret_dest: bool) -> usize;
126
127#[derive(Debug)]
128pub struct Usercopy {
129    // This is an event used to signal the exception handling thread to shut down.
130    shutdown_event: zx::Event,
131
132    // Handle to the exception handling thread.
133    join_handle: Option<std::thread::JoinHandle<()>>,
134
135    // The range of the restricted address space.
136    restricted_address_range: Range<usize>,
137}
138
139/// Parses a fault exception.
140///
141/// Returns `(pc, fault_address)`, where `pc` is the address of the instruction
142/// that triggered the fault and `fault_address` is the address that faulted.
143fn parse_fault_exception(
144    regs: &mut zx::sys::zx_thread_state_general_regs_t,
145    report: zx::ExceptionReport,
146) -> (usize, usize) {
147    #[cfg(target_arch = "x86_64")]
148    {
149        let pc = regs.rip as usize;
150        let fault_address = report.arch.cr2;
151
152        (pc, fault_address as usize)
153    }
154
155    #[cfg(target_arch = "aarch64")]
156    {
157        let pc = regs.pc as usize;
158        let fault_address = report.arch.far;
159
160        (pc, fault_address as usize)
161    }
162
163    #[cfg(target_arch = "riscv64")]
164    {
165        let pc = regs.pc as usize;
166        let fault_address = report.arch.tval;
167
168        (pc, fault_address as usize)
169    }
170}
171
172fn set_registers_for_hermetic_error(
173    regs: &mut zx::sys::zx_thread_state_general_regs_t,
174    fault_address: usize,
175) {
176    #[cfg(target_arch = "x86_64")]
177    {
178        regs.rip = hermetic_copy_error as u64;
179        regs.rax = fault_address as u64;
180    }
181
182    #[cfg(target_arch = "aarch64")]
183    {
184        regs.pc = hermetic_copy_error as u64;
185        regs.r[0] = fault_address as u64;
186    }
187
188    #[cfg(target_arch = "riscv64")]
189    {
190        regs.pc = hermetic_copy_error as u64;
191        regs.a0 = fault_address as u64;
192    }
193}
194
195const ATOMIC_ERROR_MASK: u64 = 0xFFFFFFFF00000000;
196
197fn set_registers_for_atomic_error(regs: &mut zx::sys::zx_thread_state_general_regs_t) {
198    #[cfg(target_arch = "x86_64")]
199    {
200        regs.rax = ATOMIC_ERROR_MASK;
201        regs.rip = atomic_error as u64;
202    }
203
204    #[cfg(target_arch = "aarch64")]
205    {
206        regs.r[0] = ATOMIC_ERROR_MASK;
207        regs.pc = atomic_error as u64;
208    }
209
210    #[cfg(target_arch = "riscv64")]
211    {
212        regs.a0 = ATOMIC_ERROR_MASK;
213        regs.pc = atomic_error as u64;
214    }
215}
216
217/// Assumes the buffer's first `initialized_until` bytes are initialized and
218/// returns the initialized and uninitialized portions.
219///
220/// # Safety
221///
222/// The caller must guarantee that `buf`'s first `initialized_until` bytes are
223/// initialized.
224unsafe fn assume_initialized_until(
225    buf: &mut [MaybeUninit<u8>],
226    initialized_until: usize,
227) -> (&mut [u8], &mut [MaybeUninit<u8>]) {
228    let (init_bytes, uninit_bytes) = buf.split_at_mut(initialized_until);
229    debug_assert_eq!(init_bytes.len(), initialized_until);
230
231    let init_bytes =
232        std::slice::from_raw_parts_mut(init_bytes.as_mut_ptr() as *mut u8, init_bytes.len());
233
234    (init_bytes, uninit_bytes)
235}
236
237/// Copies bytes from the source address to the destination address using the
238/// provided copy function.
239///
240/// # Safety
241///
242/// Only one of `source`/`dest` may be an address to a buffer owned by user/restricted-mode.
243/// The other must be a valid Starnix/normal-mode buffer that will never cause a fault
244/// when the first `count` bytes are read/written.
245unsafe fn do_hermetic_copy(
246    f: HermeticCopyFn,
247    dest: usize,
248    source: usize,
249    count: usize,
250    ret_dest: bool,
251) -> usize {
252    let unread_address = unsafe { f(dest as *mut u8, source as *const u8, count, ret_dest) };
253
254    let ret_base = if ret_dest { dest } else { source };
255
256    debug_assert!(
257        unread_address >= ret_base,
258        "unread_address={:#x}, ret_base={:#x}",
259        unread_address,
260        ret_base,
261    );
262    let copied = unread_address - ret_base;
263    debug_assert!(
264        copied <= count,
265        "copied={}, count={}; unread_address={:#x}, ret_base={:#x}",
266        copied,
267        count,
268        unread_address,
269        ret_base,
270    );
271    copied
272}
273
274impl Usercopy {
275    /// Returns a new instance of `Usercopy` if unified address spaces is
276    /// supported on the target architecture.
277    pub fn new(restricted_address_range: Range<usize>) -> Result<Self, zx::Status> {
278        let hermetic_copy_addr_range =
279            hermetic_copy as *const () as usize..hermetic_copy_end as *const () as usize;
280
281        let hermetic_copy_until_null_byte_addr_range = hermetic_copy_until_null_byte as *const ()
282            as usize
283            ..hermetic_copy_until_null_byte_end as *const () as usize;
284
285        let hermetic_zero_addr_range =
286            hermetic_zero as *const () as usize..hermetic_zero_end as *const () as usize;
287
288        let atomic_load_relaxed_range = atomic_load_u32_relaxed as *const () as usize
289            ..atomic_load_u32_relaxed_end as *const () as usize;
290
291        let atomic_load_acquire_range = atomic_load_u32_acquire as *const () as usize
292            ..atomic_load_u32_acquire_end as *const () as usize;
293
294        let atomic_store_relaxed_range = atomic_store_u32_relaxed as *const () as usize
295            ..atomic_store_u32_relaxed_end as *const () as usize;
296
297        let atomic_store_release_range = atomic_store_u32_release as *const () as usize
298            ..atomic_store_u32_release_end as *const () as usize;
299
300        let atomic_compare_exchange_range = atomic_compare_exchange_u32_acq_rel as *const ()
301            as usize
302            ..atomic_compare_exchange_u32_acq_rel_end as *const () as usize;
303
304        let atomic_compare_exchange_weak_range = atomic_compare_exchange_weak_u32_acq_rel
305            as *const () as usize
306            ..atomic_compare_exchange_weak_u32_acq_rel_end as *const () as usize;
307
308        let (tx, rx) = std::sync::mpsc::channel::<zx::Status>();
309
310        let shutdown_event = zx::Event::create();
311        let shutdown_event_clone =
312            shutdown_event.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap();
313
314        let faultable_addresses = restricted_address_range.clone();
315        let join_handle = std::thread::spawn(move || {
316            let exception_channel_result =
317                fuchsia_runtime::job_default().create_exception_channel();
318
319            let exception_channel = match exception_channel_result {
320                Ok(c) => c,
321                Err(e) => {
322                    let _ = tx.send(e);
323                    return;
324                }
325            };
326
327            // register exception handler
328            let _ = tx.send(zx::Status::OK);
329
330            // loop on exceptions
331            loop {
332                let mut wait_items = [
333                    zx::WaitItem {
334                        handle: exception_channel.as_handle_ref(),
335                        waitfor: zx::Signals::CHANNEL_READABLE,
336                        pending: zx::Signals::empty(),
337                    },
338                    zx::WaitItem {
339                        handle: shutdown_event_clone.as_handle_ref(),
340                        waitfor: zx::Signals::USER_0,
341                        pending: zx::Signals::empty(),
342                    },
343                ];
344                let _ = zx::object_wait_many(&mut wait_items, zx::MonotonicInstant::INFINITE);
345                if wait_items[1].pending == zx::Signals::USER_0 {
346                    break;
347                }
348                let mut buf = zx::MessageBuf::new();
349                exception_channel.read(&mut buf).unwrap();
350
351                let excp_info = zx::sys::zx_exception_info_t::read_from_bytes(buf.bytes()).unwrap();
352
353                if excp_info.type_ != zx::sys::ZX_EXCP_FATAL_PAGE_FAULT {
354                    // Only process page faults.
355                    continue;
356                }
357
358                let excp = zx::Exception::from_handle(buf.take_handle(0).unwrap());
359                let thread = excp.get_thread().unwrap();
360                let mut regs = thread.read_state_general_regs().unwrap();
361                let report = thread.get_exception_report().unwrap();
362
363                // Get the address of the instruction that triggered the fault and
364                // the address that faulted. Setup the registers such that execution
365                // restarts in the `hermetic_copy_error` method with the faulting
366                // address in the platform-specific register where the first argument
367                // is held.
368                //
369                // Note that even though the registers are modified, the registers
370                // are not written to the thread's CPU until some checks below are
371                // performed.
372                let (pc, fault_address) = parse_fault_exception(&mut regs, report);
373
374                // Only handle faults if the faulting address is within the range
375                // of faultable addresses.
376                if !faultable_addresses.contains(&fault_address) {
377                    continue;
378                }
379
380                // Only handle faults that occur within one of our usercopy routines.
381                if hermetic_copy_addr_range.contains(&pc)
382                    || hermetic_copy_until_null_byte_addr_range.contains(&pc)
383                    || hermetic_zero_addr_range.contains(&pc)
384                {
385                    set_registers_for_hermetic_error(&mut regs, fault_address);
386                } else if atomic_load_relaxed_range.contains(&pc)
387                    || atomic_load_acquire_range.contains(&pc)
388                    || atomic_store_relaxed_range.contains(&pc)
389                    || atomic_store_release_range.contains(&pc)
390                    || atomic_compare_exchange_range.contains(&pc)
391                    || atomic_compare_exchange_weak_range.contains(&pc)
392                {
393                    set_registers_for_atomic_error(&mut regs);
394                } else {
395                    continue;
396                }
397
398                thread.write_state_general_regs(regs).unwrap();
399                excp.set_exception_state(&zx::sys::ZX_EXCEPTION_STATE_HANDLED).unwrap();
400            }
401        });
402
403        match rx.recv().unwrap() {
404            zx::Status::OK => {}
405            s => {
406                return Err(s);
407            }
408        };
409
410        Ok(Self { shutdown_event, join_handle: Some(join_handle), restricted_address_range })
411    }
412
413    /// Copies bytes from the source address to the destination address.
414    ///
415    /// # Safety
416    ///
417    /// Only one of `source`/`dest` may be an address to a buffer owned by user/restricted-mode
418    /// (`ret_dest` indicates whether the user-owned buffer is `dest` when `true`).
419    /// The other must be a valid Starnix/normal-mode buffer that will never cause a fault
420    /// when the first `count` bytes are read/written.
421    pub unsafe fn raw_hermetic_copy(
422        &self,
423        dest: *mut u8,
424        source: *const u8,
425        count: usize,
426        ret_dest: bool,
427    ) -> usize {
428        do_hermetic_copy(hermetic_copy, dest as usize, source as usize, count, ret_dest)
429    }
430
431    /// Zeros `count` bytes to starting at `dest_addr`.
432    ///
433    /// Returns the number of bytes zeroed.
434    pub fn zero(&self, dest_addr: usize, count: usize) -> usize {
435        // Assumption: The address 0 is invalid and cannot be mapped.  The error encoding scheme has
436        // a collision on the value 0 - it could mean that there was a fault at the address 0 or
437        // that there was no fault. We want to treat an attempt to copy to 0 as a fault always.
438        if dest_addr == 0 || !self.restricted_address_range.contains(&dest_addr) {
439            return 0;
440        }
441
442        let unset_address = unsafe { hermetic_zero(dest_addr as *mut u8, count) };
443        debug_assert!(
444            unset_address >= dest_addr,
445            "unset_address={:#x}, dest_addr={:#x}",
446            unset_address,
447            dest_addr,
448        );
449        let bytes_set = unset_address - dest_addr;
450        debug_assert!(
451            bytes_set <= count,
452            "bytes_set={}, count={}; unset_address={:#x}, dest_addr={:#x}",
453            bytes_set,
454            count,
455            unset_address,
456            dest_addr,
457        );
458        bytes_set
459    }
460
461    /// Copies data from `source` to the restricted address `dest_addr`.
462    ///
463    /// Returns the number of bytes copied.
464    pub fn copyout(&self, source: &[u8], dest_addr: usize) -> usize {
465        // Assumption: The address 0 is invalid and cannot be mapped.  The error encoding scheme has
466        // a collision on the value 0 - it could mean that there was a fault at the address 0 or
467        // that there was no fault. We want to treat an attempt to copy to 0 as a fault always.
468        if dest_addr == 0 || !self.restricted_address_range.contains(&dest_addr) {
469            return 0;
470        }
471
472        // SAFETY: `source` is a valid Starnix-owned buffer and `dest_addr` is the user-mode
473        // buffer.
474        unsafe {
475            do_hermetic_copy(hermetic_copy, dest_addr, source.as_ptr() as usize, source.len(), true)
476        }
477    }
478
479    /// Copies data from the restricted address `source_addr` to `dest`.
480    ///
481    /// Returns the read and unread bytes.
482    ///
483    /// The returned slices will always reference `dest`. Because of this, it is
484    /// guaranteed that that `dest` and the returned initialized slice will have
485    /// the same address.
486    pub fn copyin<'a>(
487        &self,
488        source_addr: usize,
489        dest: &'a mut [MaybeUninit<u8>],
490    ) -> (&'a mut [u8], &'a mut [MaybeUninit<u8>]) {
491        // Assumption: The address 0 is invalid and cannot be mapped.  The error encoding scheme has
492        // a collision on the value 0 - it could mean that there was a fault at the address 0 or
493        // that there was no fault. We want to treat an attempt to copy from 0 as a fault always.
494        let read_count =
495            if source_addr == 0 || !self.restricted_address_range.contains(&source_addr) {
496                0
497            } else {
498                // SAFETY: `dest` is a valid Starnix-owned buffer and `source_addr` is the user-mode
499                // buffer.
500                unsafe {
501                    do_hermetic_copy(
502                        hermetic_copy,
503                        dest.as_ptr() as usize,
504                        source_addr,
505                        dest.len(),
506                        false,
507                    )
508                }
509            };
510
511        // SAFETY: `dest`'s first `read_count` bytes are initialized.
512        unsafe { assume_initialized_until(dest, read_count) }
513    }
514
515    /// Copies data from the restricted address `source_addr` to `dest` until the
516    /// first null byte.
517    ///
518    /// Returns the read and unread bytes. The read bytes includes the null byte
519    /// if present.
520    ///
521    /// The returned slices will always reference `dest`. Because of this, it is
522    /// guaranteed that that `dest` and the returned initialized slice will have
523    /// the same address.
524    pub fn copyin_until_null_byte<'a>(
525        &self,
526        source_addr: usize,
527        dest: &'a mut [MaybeUninit<u8>],
528    ) -> (&'a mut [u8], &'a mut [MaybeUninit<u8>]) {
529        // Assumption: The address 0 is invalid and cannot be mapped.  The error encoding scheme has
530        // a collision on the value 0 - it could mean that there was a fault at the address 0 or
531        // that there was no fault. We want to treat an attempt to copy from 0 as a fault always.
532        let read_count =
533            if source_addr == 0 || !self.restricted_address_range.contains(&source_addr) {
534                0
535            } else {
536                // SAFETY: `dest` is a valid Starnix-owned buffer and `source_addr` is the user-mode
537                // buffer.
538                unsafe {
539                    do_hermetic_copy(
540                        hermetic_copy_until_null_byte,
541                        dest.as_ptr() as usize,
542                        source_addr,
543                        dest.len(),
544                        false,
545                    )
546                }
547            };
548
549        // SAFETY: `dest`'s first `read_count` bytes are initialized
550        unsafe { assume_initialized_until(dest, read_count) }
551    }
552
553    #[inline]
554    fn atomic_load_u32(
555        &self,
556        load_fn: unsafe extern "C" fn(usize) -> u64,
557        addr: usize,
558    ) -> Result<u32, ()> {
559        let value_or_error = unsafe { load_fn(addr) };
560        if value_or_error & ATOMIC_ERROR_MASK == 0 {
561            Ok(value_or_error as u32)
562        } else {
563            Err(())
564        }
565    }
566
567    /// Performs an atomic load of a 32 bit value at `addr`.
568    /// `addr` must be aligned to 4 bytes.
569    pub fn atomic_load_u32_relaxed(&self, addr: usize) -> Result<u32, ()> {
570        self.atomic_load_u32(atomic_load_u32_relaxed, addr)
571    }
572
573    /// Performs an atomic load of a 32 bit value at `addr`.
574    /// `addr` must be aligned to 4 bytes.
575    pub fn atomic_load_u32_acquire(&self, addr: usize) -> Result<u32, ()> {
576        self.atomic_load_u32(atomic_load_u32_acquire, addr)
577    }
578
579    fn atomic_store_u32(
580        &self,
581        store_fn: unsafe extern "C" fn(usize, u32) -> u64,
582        addr: usize,
583        value: u32,
584    ) -> Result<(), ()> {
585        match unsafe { store_fn(addr, value) } {
586            0 => Ok(()),
587            _ => Err(()),
588        }
589    }
590
591    /// Performs an atomic store of a 32 bit value to `addr`.
592    /// `addr` must be aligned to 4 bytes.
593    pub fn atomic_store_u32_relaxed(&self, addr: usize, value: u32) -> Result<(), ()> {
594        self.atomic_store_u32(atomic_store_u32_relaxed, addr, value)
595    }
596
597    /// Performs an atomic store of a 32 bit value to `addr`.
598    /// `addr` must be aligned to 4 bytes.
599    pub fn atomic_store_u32_release(&self, addr: usize, value: u32) -> Result<(), ()> {
600        self.atomic_store_u32(atomic_store_u32_release, addr, value)
601    }
602
603    /// Performs an atomic compare and exchange of a 32 bit value at addr `addr`.
604    /// `addr` must be aligned to 4 bytes.
605    pub fn atomic_compare_exchange_u32_acq_rel(
606        &self,
607        addr: usize,
608        expected: u32,
609        desired: u32,
610    ) -> Result<Result<u32, u32>, ()> {
611        let mut expected = expected;
612        let value_or_error = unsafe {
613            atomic_compare_exchange_u32_acq_rel(addr, &mut expected as *mut u32, desired)
614        };
615        Self::parse_compare_exchange_result(expected, value_or_error)
616    }
617
618    /// Performs a weak atomic compare and exchange of a 32 bit value at addr `addr`.
619    /// `addr` must be aligned to 4 bytes.
620    pub fn atomic_compare_exchange_weak_u32_acq_rel(
621        &self,
622        addr: usize,
623        expected: u32,
624        desired: u32,
625    ) -> Result<Result<u32, u32>, ()> {
626        let mut expected = expected;
627        let value_or_error = unsafe {
628            atomic_compare_exchange_weak_u32_acq_rel(addr, &mut expected as *mut u32, desired)
629        };
630        Self::parse_compare_exchange_result(expected, value_or_error)
631    }
632
633    fn parse_compare_exchange_result(
634        expected: u32,
635        value_or_error: u64,
636    ) -> Result<Result<u32, u32>, ()> {
637        match value_or_error {
638            0 => Ok(Err(expected)),
639            1 => Ok(Ok(expected)),
640            _ => Err(()),
641        }
642    }
643}
644
645impl Drop for Usercopy {
646    fn drop(&mut self) {
647        self.shutdown_event.signal_handle(zx::Signals::empty(), zx::Signals::USER_0).unwrap();
648        self.join_handle.take().unwrap().join().unwrap();
649    }
650}
651
652#[cfg(test)]
653mod test {
654    use super::*;
655
656    use test_case::test_case;
657
658    impl Usercopy {
659        fn new_for_test(restricted_address_range: Range<usize>) -> Self {
660            Self::new(restricted_address_range).unwrap()
661        }
662    }
663
664    #[test_case(0, 0)]
665    #[test_case(1, 1)]
666    #[test_case(7, 2)]
667    #[test_case(8, 3)]
668    #[test_case(9, 4)]
669    #[test_case(128, 5)]
670    #[test_case(zx::system_get_page_size() as usize - 1, 6)]
671    #[test_case(zx::system_get_page_size() as usize, 7)]
672    #[::fuchsia::test]
673    fn zero_no_fault(zero_len: usize, ch: u8) {
674        let page_size = zx::system_get_page_size() as usize;
675
676        let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
677
678        let root_vmar = fuchsia_runtime::vmar_root_self();
679
680        let mapped_addr = root_vmar
681            .map(0, &dest_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
682            .unwrap();
683        let mapped_bytes =
684            unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, page_size) };
685        mapped_bytes.fill(ch);
686
687        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
688
689        let result = usercopy.zero(mapped_addr, zero_len);
690        assert_eq!(result, zero_len);
691
692        assert_eq!(&mapped_bytes[..zero_len], &vec![0; zero_len]);
693        assert_eq!(&mapped_bytes[zero_len..], &vec![ch; page_size - zero_len]);
694    }
695
696    #[test_case(1, 2, 0)]
697    #[test_case(1, 4, 1)]
698    #[test_case(1, 8, 2)]
699    #[test_case(1, 16, 3)]
700    #[test_case(1, 32, 4)]
701    #[test_case(1, 64, 5)]
702    #[test_case(1, 128, 6)]
703    #[test_case(1, 256, 7)]
704    #[test_case(1, 512, 8)]
705    #[test_case(1, 1024, 9)]
706    #[test_case(32, 64, 10)]
707    #[test_case(32, 128, 11)]
708    #[test_case(32, 256, 12)]
709    #[test_case(32, 512, 13)]
710    #[test_case(32, 1024, 14)]
711    #[::fuchsia::test]
712    fn zero_fault(offset: usize, zero_len: usize, ch: u8) {
713        let page_size = zx::system_get_page_size() as usize;
714
715        let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
716
717        let root_vmar = fuchsia_runtime::vmar_root_self();
718
719        let mapped_addr = root_vmar
720            .map(
721                0,
722                &dest_vmo,
723                0,
724                page_size * 2,
725                zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
726            )
727            .unwrap();
728        let mapped_bytes =
729            unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, page_size) };
730        mapped_bytes.fill(ch);
731
732        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
733
734        let dest_addr = mapped_addr + page_size - offset;
735
736        let result = usercopy.zero(dest_addr, zero_len);
737        assert_eq!(result, offset);
738
739        assert_eq!(&mapped_bytes[page_size - offset..], &vec![0; offset][..]);
740        assert_eq!(&mapped_bytes[..page_size - offset], &vec![ch; page_size - offset][..]);
741    }
742
743    #[test_case(0)]
744    #[test_case(1)]
745    #[test_case(7)]
746    #[test_case(8)]
747    #[test_case(9)]
748    #[test_case(128)]
749    #[test_case(zx::system_get_page_size() as usize - 1)]
750    #[test_case(zx::system_get_page_size() as usize)]
751    #[::fuchsia::test]
752    fn copyout_no_fault(buf_len: usize) {
753        let page_size = zx::system_get_page_size() as usize;
754
755        let source = vec!['a' as u8; buf_len];
756
757        let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
758
759        let root_vmar = fuchsia_runtime::vmar_root_self();
760
761        let mapped_addr = root_vmar
762            .map(0, &dest_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
763            .unwrap();
764
765        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
766
767        let result = usercopy.copyout(&source, mapped_addr);
768        assert_eq!(result, buf_len);
769
770        assert_eq!(
771            unsafe { std::slice::from_raw_parts(mapped_addr as *const u8, buf_len) },
772            &vec!['a' as u8; buf_len]
773        );
774    }
775
776    #[test_case(1, 2)]
777    #[test_case(1, 4)]
778    #[test_case(1, 8)]
779    #[test_case(1, 16)]
780    #[test_case(1, 32)]
781    #[test_case(1, 64)]
782    #[test_case(1, 128)]
783    #[test_case(1, 256)]
784    #[test_case(1, 512)]
785    #[test_case(1, 1024)]
786    #[test_case(32, 64)]
787    #[test_case(32, 128)]
788    #[test_case(32, 256)]
789    #[test_case(32, 512)]
790    #[test_case(32, 1024)]
791    #[::fuchsia::test]
792    fn copyout_fault(offset: usize, buf_len: usize) {
793        let page_size = zx::system_get_page_size() as usize;
794
795        let source = vec!['a' as u8; buf_len];
796
797        let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
798
799        let root_vmar = fuchsia_runtime::vmar_root_self();
800
801        let mapped_addr = root_vmar
802            .map(
803                0,
804                &dest_vmo,
805                0,
806                page_size * 2,
807                zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
808            )
809            .unwrap();
810
811        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
812
813        let dest_addr = mapped_addr + page_size - offset;
814
815        let result = usercopy.copyout(&source, dest_addr);
816
817        assert_eq!(result, offset);
818
819        assert_eq!(
820            unsafe { std::slice::from_raw_parts(dest_addr as *const u8, offset) },
821            &vec!['a' as u8; offset][..],
822        );
823    }
824
825    #[test_case(0)]
826    #[test_case(1)]
827    #[test_case(7)]
828    #[test_case(8)]
829    #[test_case(9)]
830    #[test_case(128)]
831    #[test_case(zx::system_get_page_size() as usize - 1)]
832    #[test_case(zx::system_get_page_size() as usize)]
833    #[::fuchsia::test]
834    fn copyin_no_fault(buf_len: usize) {
835        let page_size = zx::system_get_page_size() as usize;
836
837        let mut dest = Vec::with_capacity(buf_len);
838
839        let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
840
841        let root_vmar = fuchsia_runtime::vmar_root_self();
842
843        let mapped_addr = root_vmar
844            .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
845            .unwrap();
846
847        unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, buf_len) }.fill('a' as u8);
848
849        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
850        let dest_as_mut_ptr = dest.as_mut_ptr();
851        let (read_bytes, unread_bytes) = usercopy.copyin(mapped_addr, dest.spare_capacity_mut());
852        let expected = vec!['a' as u8; buf_len];
853        assert_eq!(read_bytes, &expected);
854        assert_eq!(unread_bytes.len(), 0);
855        assert_eq!(read_bytes.as_mut_ptr(), dest_as_mut_ptr);
856
857        // SAFETY: OK because the copyin was successful.
858        unsafe { dest.set_len(buf_len) }
859        assert_eq!(dest, expected);
860    }
861
862    #[test_case(1, 2)]
863    #[test_case(1, 4)]
864    #[test_case(1, 8)]
865    #[test_case(1, 16)]
866    #[test_case(1, 32)]
867    #[test_case(1, 64)]
868    #[test_case(1, 128)]
869    #[test_case(1, 256)]
870    #[test_case(1, 512)]
871    #[test_case(1, 1024)]
872    #[test_case(32, 64)]
873    #[test_case(32, 128)]
874    #[test_case(32, 256)]
875    #[test_case(32, 512)]
876    #[test_case(32, 1024)]
877    #[::fuchsia::test]
878    fn copyin_fault(offset: usize, buf_len: usize) {
879        let page_size = zx::system_get_page_size() as usize;
880
881        let mut dest = vec![0u8; buf_len];
882
883        let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
884
885        let root_vmar = fuchsia_runtime::vmar_root_self();
886
887        let mapped_addr = root_vmar
888            .map(
889                0,
890                &source_vmo,
891                0,
892                page_size * 2,
893                zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
894            )
895            .unwrap();
896
897        let source_addr = mapped_addr + page_size - offset;
898
899        unsafe { std::slice::from_raw_parts_mut(source_addr as *mut u8, offset) }.fill('a' as u8);
900
901        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
902
903        let (read_bytes, unread_bytes) =
904            usercopy.copyin(source_addr, slice_to_maybe_uninit_mut(&mut dest));
905        let expected_copied = vec!['a' as u8; offset];
906        let expected_uncopied = vec![0 as u8; buf_len - offset];
907        assert_eq!(read_bytes, &expected_copied);
908        assert_eq!(unread_bytes.len(), expected_uncopied.len());
909
910        assert_eq!(&dest[0..offset], &expected_copied);
911        assert_eq!(&dest[offset..], &expected_uncopied);
912    }
913
914    #[test_case(0)]
915    #[test_case(1)]
916    #[test_case(7)]
917    #[test_case(8)]
918    #[test_case(9)]
919    #[test_case(128)]
920    #[test_case(zx::system_get_page_size() as usize - 1)]
921    #[test_case(zx::system_get_page_size() as usize)]
922    #[::fuchsia::test]
923    fn copyin_until_null_byte_no_fault(buf_len: usize) {
924        let page_size = zx::system_get_page_size() as usize;
925
926        let mut dest = Vec::with_capacity(buf_len);
927
928        let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
929
930        let root_vmar = fuchsia_runtime::vmar_root_self();
931
932        let mapped_addr = root_vmar
933            .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
934            .unwrap();
935
936        unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, buf_len) }.fill('a' as u8);
937
938        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
939
940        let dest_as_mut_ptr = dest.as_mut_ptr();
941        let (read_bytes, unread_bytes) =
942            usercopy.copyin_until_null_byte(mapped_addr, dest.spare_capacity_mut());
943        let expected = vec!['a' as u8; buf_len];
944        assert_eq!(read_bytes, &expected);
945        assert_eq!(unread_bytes.len(), 0);
946        assert_eq!(read_bytes.as_mut_ptr(), dest_as_mut_ptr);
947
948        // SAFETY: OK because the copyin_until_null_byte was successful.
949        unsafe { dest.set_len(dest.capacity()) }
950        assert_eq!(dest, expected);
951    }
952
953    #[test_case(1, 2)]
954    #[test_case(1, 4)]
955    #[test_case(1, 8)]
956    #[test_case(1, 16)]
957    #[test_case(1, 32)]
958    #[test_case(1, 64)]
959    #[test_case(1, 128)]
960    #[test_case(1, 256)]
961    #[test_case(1, 512)]
962    #[test_case(1, 1024)]
963    #[test_case(32, 64)]
964    #[test_case(32, 128)]
965    #[test_case(32, 256)]
966    #[test_case(32, 512)]
967    #[test_case(32, 1024)]
968    #[::fuchsia::test]
969    fn copyin_until_null_byte_fault(offset: usize, buf_len: usize) {
970        let page_size = zx::system_get_page_size() as usize;
971
972        let mut dest = vec![0u8; buf_len];
973
974        let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
975
976        let root_vmar = fuchsia_runtime::vmar_root_self();
977
978        let mapped_addr = root_vmar
979            .map(
980                0,
981                &source_vmo,
982                0,
983                page_size * 2,
984                zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
985            )
986            .unwrap();
987
988        let source_addr = mapped_addr + page_size - offset;
989
990        unsafe { std::slice::from_raw_parts_mut(source_addr as *mut u8, offset) }.fill('a' as u8);
991
992        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
993
994        let (read_bytes, unread_bytes) =
995            usercopy.copyin_until_null_byte(source_addr, slice_to_maybe_uninit_mut(&mut dest));
996        let expected_copied = vec!['a' as u8; offset];
997        let expected_uncopied = vec![0 as u8; buf_len - offset];
998        assert_eq!(read_bytes, &expected_copied);
999        assert_eq!(unread_bytes.len(), expected_uncopied.len());
1000
1001        assert_eq!(&dest[0..offset], &expected_copied);
1002        assert_eq!(&dest[offset..], &expected_uncopied);
1003    }
1004
1005    #[test_case(0)]
1006    #[test_case(1)]
1007    #[test_case(2)]
1008    #[test_case(126)]
1009    #[test_case(127)]
1010    #[::fuchsia::test]
1011    fn copyin_until_null_byte_no_fault_with_zero(zero_idx: usize) {
1012        const DEST_LEN: usize = 128;
1013
1014        let page_size = zx::system_get_page_size() as usize;
1015
1016        let mut dest = vec!['b' as u8; DEST_LEN];
1017
1018        let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
1019
1020        let root_vmar = fuchsia_runtime::vmar_root_self();
1021
1022        let mapped_addr = root_vmar
1023            .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
1024            .unwrap();
1025
1026        {
1027            let slice =
1028                unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, dest.len()) };
1029            slice.fill('a' as u8);
1030            slice[zero_idx] = 0;
1031        };
1032
1033        let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
1034
1035        let (read_bytes, unread_bytes) =
1036            usercopy.copyin_until_null_byte(mapped_addr, slice_to_maybe_uninit_mut(&mut dest));
1037        let expected_copied_non_zero_bytes = vec!['a' as u8; zero_idx];
1038        let expected_uncopied = vec!['b' as u8; DEST_LEN - zero_idx - 1];
1039        assert_eq!(&read_bytes[..zero_idx], &expected_copied_non_zero_bytes);
1040        assert_eq!(&read_bytes[zero_idx..], &[0]);
1041        assert_eq!(unread_bytes.len(), expected_uncopied.len());
1042
1043        assert_eq!(&dest[..zero_idx], &expected_copied_non_zero_bytes);
1044        assert_eq!(dest[zero_idx], 0);
1045        assert_eq!(&dest[zero_idx + 1..], &expected_uncopied);
1046    }
1047
1048    #[test_case(0..1, 0)]
1049    #[test_case(0..1, 1)]
1050    #[test_case(0..1, 2)]
1051    #[test_case(5..10, 0)]
1052    #[test_case(5..10, 1)]
1053    #[test_case(5..10, 2)]
1054    #[test_case(5..10, 5)]
1055    #[test_case(5..10, 7)]
1056    #[test_case(5..10, 10)]
1057    #[::fuchsia::test]
1058    fn starting_fault_address_copyin_until_null_byte(range: Range<usize>, addr: usize) {
1059        let usercopy = Usercopy::new_for_test(range);
1060
1061        let mut dest = vec![0u8];
1062
1063        let (read_bytes, unread_bytes) =
1064            usercopy.copyin_until_null_byte(addr, slice_to_maybe_uninit_mut(&mut dest));
1065        assert_eq!(read_bytes, &[]);
1066        assert_eq!(unread_bytes.len(), dest.len());
1067        assert_eq!(dest, [0]);
1068    }
1069
1070    #[test_case(0..1, 0)]
1071    #[test_case(0..1, 1)]
1072    #[test_case(0..1, 2)]
1073    #[test_case(5..10, 0)]
1074    #[test_case(5..10, 1)]
1075    #[test_case(5..10, 2)]
1076    #[test_case(5..10, 5)]
1077    #[test_case(5..10, 7)]
1078    #[test_case(5..10, 10)]
1079    #[::fuchsia::test]
1080    fn starting_fault_address_copyin(range: Range<usize>, addr: usize) {
1081        let usercopy = Usercopy::new_for_test(range);
1082
1083        let mut dest = vec![0u8];
1084
1085        let (read_bytes, unread_bytes) =
1086            usercopy.copyin(addr, slice_to_maybe_uninit_mut(&mut dest));
1087        assert_eq!(read_bytes, &[]);
1088        assert_eq!(unread_bytes.len(), dest.len());
1089        assert_eq!(dest, [0]);
1090    }
1091
1092    #[test_case(0..1, 0)]
1093    #[test_case(0..1, 1)]
1094    #[test_case(0..1, 2)]
1095    #[test_case(5..10, 0)]
1096    #[test_case(5..10, 1)]
1097    #[test_case(5..10, 2)]
1098    #[test_case(5..10, 5)]
1099    #[test_case(5..10, 7)]
1100    #[test_case(5..10, 10)]
1101    #[::fuchsia::test]
1102    fn starting_fault_address_copyout(range: Range<usize>, addr: usize) {
1103        let usercopy = Usercopy::new_for_test(range);
1104
1105        let source = vec![0u8];
1106
1107        let result = usercopy.copyout(&source, addr);
1108        assert_eq!(result, 0);
1109        assert_eq!(source, [0]);
1110    }
1111    struct MappedPageUsercopy {
1112        usercopy: Usercopy,
1113        addr: usize,
1114    }
1115
1116    impl MappedPageUsercopy {
1117        fn new(flags: zx::VmarFlags) -> Self {
1118            let page_size = zx::system_get_page_size() as usize;
1119
1120            let vmo = zx::Vmo::create(page_size as u64).unwrap();
1121
1122            let root_vmar = fuchsia_runtime::vmar_root_self();
1123
1124            let addr = root_vmar.map(0, &vmo, 0, page_size, flags).unwrap();
1125
1126            let usercopy = Usercopy::new_for_test(addr..addr + page_size);
1127            Self { usercopy, addr }
1128        }
1129    }
1130
1131    impl std::ops::Drop for MappedPageUsercopy {
1132        fn drop(&mut self) {
1133            let page_size = zx::system_get_page_size() as usize;
1134
1135            unsafe { fuchsia_runtime::vmar_root_self().unmap(self.addr, page_size) }.unwrap();
1136        }
1137    }
1138
1139    #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_relaxed(mapped_addr); "relaxed")]
1140    #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_acquire(mapped_addr); "acquire")]
1141    #[::fuchsia::test]
1142    fn atomic_load_u32_no_fault(load_fn: fn(&Usercopy, usize) -> Result<u32, ()>) {
1143        let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1144
1145        unsafe { *(m.addr as *mut u32) = 0x12345678 };
1146
1147        let result = load_fn(&m.usercopy, m.addr);
1148
1149        assert_eq!(Ok(0x12345678), result);
1150    }
1151
1152    #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_relaxed(mapped_addr); "relaxed")]
1153    #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_acquire(mapped_addr); "acquire")]
1154    #[::fuchsia::test]
1155    fn atomic_load_u32_fault(load_fn: fn(&Usercopy, usize) -> Result<u32, ()>) {
1156        let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1157
1158        let result = load_fn(&m.usercopy, m.addr);
1159        assert_eq!(Err(()), result);
1160    }
1161
1162    #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_relaxed(mapped_addr, val); "relaxed")]
1163    #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_release(mapped_addr, val); "release")]
1164    #[::fuchsia::test]
1165    fn atomic_store_u32_no_fault(store_fn: fn(&Usercopy, usize, u32) -> Result<(), ()>) {
1166        let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1167
1168        assert_eq!(store_fn(&m.usercopy, m.addr, 0x12345678), Ok(()));
1169
1170        assert_eq!(unsafe { *(m.addr as *mut u32) }, 0x12345678);
1171    }
1172
1173    #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_relaxed(mapped_addr, val); "relaxed")]
1174    #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_release(mapped_addr, val); "release")]
1175    #[::fuchsia::test]
1176    fn atomic_store_u32_fault(store_fn: fn(&Usercopy, usize, u32) -> Result<(), ()>) {
1177        let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1178
1179        let result = store_fn(&m.usercopy, m.addr, 0x12345678);
1180        assert_eq!(Err(()), result);
1181
1182        let page_size = zx::system_get_page_size() as usize;
1183        unsafe {
1184            fuchsia_runtime::vmar_root_self().protect(m.addr, page_size, zx::VmarFlags::PERM_READ)
1185        }
1186        .unwrap();
1187
1188        assert_ne!(unsafe { *(m.addr as *mut u32) }, 0x12345678);
1189    }
1190
1191    #[::fuchsia::test]
1192    fn atomic_compare_exchange_u32_acq_rel_no_fault() {
1193        let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1194
1195        unsafe { *(m.addr as *mut u32) = 0x12345678 };
1196
1197        assert_eq!(
1198            m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x12345678, 0xffffffff),
1199            Ok(Ok(0x12345678))
1200        );
1201
1202        assert_eq!(unsafe { *(m.addr as *mut u32) }, 0xffffffff);
1203
1204        assert_eq!(
1205            m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x22222222, 0x11111111),
1206            Ok(Err(0xffffffff))
1207        );
1208
1209        assert_eq!(unsafe { *(m.addr as *mut u32) }, 0xffffffff);
1210    }
1211
1212    #[::fuchsia::test]
1213    fn atomic_compare_exchange_u32_acq_rel_fault() {
1214        let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1215
1216        let result = m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x00000000, 0x11111111);
1217        assert_eq!(Err(()), result);
1218
1219        let page_size = zx::system_get_page_size() as usize;
1220        unsafe {
1221            fuchsia_runtime::vmar_root_self().protect(m.addr, page_size, zx::VmarFlags::PERM_READ)
1222        }
1223        .unwrap();
1224
1225        assert_eq!(unsafe { *(m.addr as *mut u32) }, 0x00000000);
1226    }
1227}