use std::mem::MaybeUninit;
use std::ops::Range;
use zerocopy::FromBytes;
use zx::{AsHandleRef, HandleBased, Task};
extern "C" {
fn hermetic_copy(dest: *mut u8, source: *const u8, len: usize, ret_dest: bool) -> usize;
fn hermetic_copy_end();
fn hermetic_copy_until_null_byte(
dest: *mut u8,
source: *const u8,
len: usize,
ret_dest: bool,
) -> usize;
fn hermetic_copy_until_null_byte_end();
fn hermetic_zero(dest: *mut u8, len: usize) -> usize;
fn hermetic_zero_end();
fn hermetic_copy_error();
fn atomic_error();
fn atomic_load_u32_relaxed(addr: usize) -> u64;
fn atomic_load_u32_relaxed_end();
fn atomic_load_u32_acquire(addr: usize) -> u64;
fn atomic_load_u32_acquire_end();
fn atomic_store_u32_relaxed(addr: usize, value: u32) -> u64;
fn atomic_store_u32_relaxed_end();
fn atomic_store_u32_release(addr: usize, value: u32) -> u64;
fn atomic_store_u32_release_end();
fn atomic_compare_exchange_u32_acq_rel(addr: usize, expected: *mut u32, desired: u32) -> u64;
fn atomic_compare_exchange_u32_acq_rel_end();
fn atomic_compare_exchange_weak_u32_acq_rel(
addr: usize,
expected: *mut u32,
desired: u32,
) -> u64;
fn atomic_compare_exchange_weak_u32_acq_rel_end();
}
pub fn slice_to_maybe_uninit_mut<T>(slice: &mut [T]) -> &mut [MaybeUninit<T>] {
let ptr = slice.as_mut_ptr();
let ptr = ptr as *mut MaybeUninit<T>;
unsafe { std::slice::from_raw_parts_mut(ptr, slice.len()) }
}
type HermeticCopyFn =
unsafe extern "C" fn(dest: *mut u8, source: *const u8, len: usize, ret_dest: bool) -> usize;
#[derive(Debug)]
pub struct Usercopy {
shutdown_event: zx::Event,
join_handle: Option<std::thread::JoinHandle<()>>,
restricted_address_range: Range<usize>,
}
fn parse_fault_exception(
regs: &mut zx::sys::zx_thread_state_general_regs_t,
report: zx::ExceptionReport,
) -> (usize, usize) {
#[cfg(target_arch = "x86_64")]
{
let pc = regs.rip as usize;
let fault_address = report.arch.cr2;
(pc, fault_address as usize)
}
#[cfg(target_arch = "aarch64")]
{
let pc = regs.pc as usize;
let fault_address = report.arch.far;
(pc, fault_address as usize)
}
#[cfg(target_arch = "riscv64")]
{
let pc = regs.pc as usize;
let fault_address = report.arch.tval;
(pc, fault_address as usize)
}
}
fn set_registers_for_hermetic_error(
regs: &mut zx::sys::zx_thread_state_general_regs_t,
fault_address: usize,
) {
#[cfg(target_arch = "x86_64")]
{
regs.rip = hermetic_copy_error as u64;
regs.rax = fault_address as u64;
}
#[cfg(target_arch = "aarch64")]
{
regs.pc = hermetic_copy_error as u64;
regs.r[0] = fault_address as u64;
}
#[cfg(target_arch = "riscv64")]
{
regs.pc = hermetic_copy_error as u64;
regs.a0 = fault_address as u64;
}
}
const ATOMIC_ERROR_MASK: u64 = 0xFFFFFFFF00000000;
fn set_registers_for_atomic_error(regs: &mut zx::sys::zx_thread_state_general_regs_t) {
#[cfg(target_arch = "x86_64")]
{
regs.rax = ATOMIC_ERROR_MASK;
regs.rip = atomic_error as u64;
}
#[cfg(target_arch = "aarch64")]
{
regs.r[0] = ATOMIC_ERROR_MASK;
regs.pc = atomic_error as u64;
}
#[cfg(target_arch = "riscv64")]
{
regs.a0 = ATOMIC_ERROR_MASK;
regs.pc = atomic_error as u64;
}
}
unsafe fn assume_initialized_until(
buf: &mut [MaybeUninit<u8>],
initialized_until: usize,
) -> (&mut [u8], &mut [MaybeUninit<u8>]) {
let (init_bytes, uninit_bytes) = buf.split_at_mut(initialized_until);
debug_assert_eq!(init_bytes.len(), initialized_until);
let init_bytes =
std::slice::from_raw_parts_mut(init_bytes.as_mut_ptr() as *mut u8, init_bytes.len());
(init_bytes, uninit_bytes)
}
unsafe fn do_hermetic_copy(
f: HermeticCopyFn,
dest: usize,
source: usize,
count: usize,
ret_dest: bool,
) -> usize {
let unread_address = unsafe { f(dest as *mut u8, source as *const u8, count, ret_dest) };
let ret_base = if ret_dest { dest } else { source };
debug_assert!(
unread_address >= ret_base,
"unread_address={:#x}, ret_base={:#x}",
unread_address,
ret_base,
);
let copied = unread_address - ret_base;
debug_assert!(
copied <= count,
"copied={}, count={}; unread_address={:#x}, ret_base={:#x}",
copied,
count,
unread_address,
ret_base,
);
copied
}
impl Usercopy {
pub fn new(restricted_address_range: Range<usize>) -> Result<Self, zx::Status> {
let hermetic_copy_addr_range =
hermetic_copy as *const () as usize..hermetic_copy_end as *const () as usize;
let hermetic_copy_until_null_byte_addr_range = hermetic_copy_until_null_byte as *const ()
as usize
..hermetic_copy_until_null_byte_end as *const () as usize;
let hermetic_zero_addr_range =
hermetic_zero as *const () as usize..hermetic_zero_end as *const () as usize;
let atomic_load_relaxed_range = atomic_load_u32_relaxed as *const () as usize
..atomic_load_u32_relaxed_end as *const () as usize;
let atomic_load_acquire_range = atomic_load_u32_acquire as *const () as usize
..atomic_load_u32_acquire_end as *const () as usize;
let atomic_store_relaxed_range = atomic_store_u32_relaxed as *const () as usize
..atomic_store_u32_relaxed_end as *const () as usize;
let atomic_store_release_range = atomic_store_u32_release as *const () as usize
..atomic_store_u32_release_end as *const () as usize;
let atomic_compare_exchange_range = atomic_compare_exchange_u32_acq_rel as *const ()
as usize
..atomic_compare_exchange_u32_acq_rel_end as *const () as usize;
let atomic_compare_exchange_weak_range = atomic_compare_exchange_weak_u32_acq_rel
as *const () as usize
..atomic_compare_exchange_weak_u32_acq_rel_end as *const () as usize;
let (tx, rx) = std::sync::mpsc::channel::<zx::Status>();
let shutdown_event = zx::Event::create();
let shutdown_event_clone =
shutdown_event.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap();
let faultable_addresses = restricted_address_range.clone();
let join_handle = std::thread::spawn(move || {
let exception_channel_result =
fuchsia_runtime::job_default().create_exception_channel();
let exception_channel = match exception_channel_result {
Ok(c) => c,
Err(e) => {
let _ = tx.send(e);
return;
}
};
let _ = tx.send(zx::Status::OK);
loop {
let mut wait_items = [
zx::WaitItem {
handle: exception_channel.as_handle_ref(),
waitfor: zx::Signals::CHANNEL_READABLE,
pending: zx::Signals::empty(),
},
zx::WaitItem {
handle: shutdown_event_clone.as_handle_ref(),
waitfor: zx::Signals::USER_0,
pending: zx::Signals::empty(),
},
];
let _ = zx::object_wait_many(&mut wait_items, zx::MonotonicInstant::INFINITE);
if wait_items[1].pending == zx::Signals::USER_0 {
break;
}
let mut buf = zx::MessageBuf::new();
exception_channel.read(&mut buf).unwrap();
let excp_info = zx::sys::zx_exception_info_t::read_from_bytes(buf.bytes()).unwrap();
if excp_info.type_ != zx::sys::ZX_EXCP_FATAL_PAGE_FAULT {
continue;
}
let excp = zx::Exception::from_handle(buf.take_handle(0).unwrap());
let thread = excp.get_thread().unwrap();
let mut regs = thread.read_state_general_regs().unwrap();
let report = thread.get_exception_report().unwrap();
let (pc, fault_address) = parse_fault_exception(&mut regs, report);
if !faultable_addresses.contains(&fault_address) {
continue;
}
if hermetic_copy_addr_range.contains(&pc)
|| hermetic_copy_until_null_byte_addr_range.contains(&pc)
|| hermetic_zero_addr_range.contains(&pc)
{
set_registers_for_hermetic_error(&mut regs, fault_address);
} else if atomic_load_relaxed_range.contains(&pc)
|| atomic_load_acquire_range.contains(&pc)
|| atomic_store_relaxed_range.contains(&pc)
|| atomic_store_release_range.contains(&pc)
|| atomic_compare_exchange_range.contains(&pc)
|| atomic_compare_exchange_weak_range.contains(&pc)
{
set_registers_for_atomic_error(&mut regs);
} else {
continue;
}
thread.write_state_general_regs(regs).unwrap();
excp.set_exception_state(&zx::sys::ZX_EXCEPTION_STATE_HANDLED).unwrap();
}
});
match rx.recv().unwrap() {
zx::Status::OK => {}
s => {
return Err(s);
}
};
Ok(Self { shutdown_event, join_handle: Some(join_handle), restricted_address_range })
}
pub unsafe fn raw_hermetic_copy(
&self,
dest: *mut u8,
source: *const u8,
count: usize,
ret_dest: bool,
) -> usize {
do_hermetic_copy(hermetic_copy, dest as usize, source as usize, count, ret_dest)
}
pub fn zero(&self, dest_addr: usize, count: usize) -> usize {
if dest_addr == 0 || !self.restricted_address_range.contains(&dest_addr) {
return 0;
}
let unset_address = unsafe { hermetic_zero(dest_addr as *mut u8, count) };
debug_assert!(
unset_address >= dest_addr,
"unset_address={:#x}, dest_addr={:#x}",
unset_address,
dest_addr,
);
let bytes_set = unset_address - dest_addr;
debug_assert!(
bytes_set <= count,
"bytes_set={}, count={}; unset_address={:#x}, dest_addr={:#x}",
bytes_set,
count,
unset_address,
dest_addr,
);
bytes_set
}
pub fn copyout(&self, source: &[u8], dest_addr: usize) -> usize {
if dest_addr == 0 || !self.restricted_address_range.contains(&dest_addr) {
return 0;
}
unsafe {
do_hermetic_copy(hermetic_copy, dest_addr, source.as_ptr() as usize, source.len(), true)
}
}
pub fn copyin<'a>(
&self,
source_addr: usize,
dest: &'a mut [MaybeUninit<u8>],
) -> (&'a mut [u8], &'a mut [MaybeUninit<u8>]) {
let read_count =
if source_addr == 0 || !self.restricted_address_range.contains(&source_addr) {
0
} else {
unsafe {
do_hermetic_copy(
hermetic_copy,
dest.as_ptr() as usize,
source_addr,
dest.len(),
false,
)
}
};
unsafe { assume_initialized_until(dest, read_count) }
}
pub fn copyin_until_null_byte<'a>(
&self,
source_addr: usize,
dest: &'a mut [MaybeUninit<u8>],
) -> (&'a mut [u8], &'a mut [MaybeUninit<u8>]) {
let read_count =
if source_addr == 0 || !self.restricted_address_range.contains(&source_addr) {
0
} else {
unsafe {
do_hermetic_copy(
hermetic_copy_until_null_byte,
dest.as_ptr() as usize,
source_addr,
dest.len(),
false,
)
}
};
unsafe { assume_initialized_until(dest, read_count) }
}
#[inline]
fn atomic_load_u32(
&self,
load_fn: unsafe extern "C" fn(usize) -> u64,
addr: usize,
) -> Result<u32, ()> {
let value_or_error = unsafe { load_fn(addr) };
if value_or_error & ATOMIC_ERROR_MASK == 0 {
Ok(value_or_error as u32)
} else {
Err(())
}
}
pub fn atomic_load_u32_relaxed(&self, addr: usize) -> Result<u32, ()> {
self.atomic_load_u32(atomic_load_u32_relaxed, addr)
}
pub fn atomic_load_u32_acquire(&self, addr: usize) -> Result<u32, ()> {
self.atomic_load_u32(atomic_load_u32_acquire, addr)
}
fn atomic_store_u32(
&self,
store_fn: unsafe extern "C" fn(usize, u32) -> u64,
addr: usize,
value: u32,
) -> Result<(), ()> {
match unsafe { store_fn(addr, value) } {
0 => Ok(()),
_ => Err(()),
}
}
pub fn atomic_store_u32_relaxed(&self, addr: usize, value: u32) -> Result<(), ()> {
self.atomic_store_u32(atomic_store_u32_relaxed, addr, value)
}
pub fn atomic_store_u32_release(&self, addr: usize, value: u32) -> Result<(), ()> {
self.atomic_store_u32(atomic_store_u32_release, addr, value)
}
pub fn atomic_compare_exchange_u32_acq_rel(
&self,
addr: usize,
expected: u32,
desired: u32,
) -> Result<Result<u32, u32>, ()> {
let mut expected = expected;
let value_or_error = unsafe {
atomic_compare_exchange_u32_acq_rel(addr, &mut expected as *mut u32, desired)
};
Self::parse_compare_exchange_result(expected, value_or_error)
}
pub fn atomic_compare_exchange_weak_u32_acq_rel(
&self,
addr: usize,
expected: u32,
desired: u32,
) -> Result<Result<u32, u32>, ()> {
let mut expected = expected;
let value_or_error = unsafe {
atomic_compare_exchange_weak_u32_acq_rel(addr, &mut expected as *mut u32, desired)
};
Self::parse_compare_exchange_result(expected, value_or_error)
}
fn parse_compare_exchange_result(
expected: u32,
value_or_error: u64,
) -> Result<Result<u32, u32>, ()> {
match value_or_error {
0 => Ok(Err(expected)),
1 => Ok(Ok(expected)),
_ => Err(()),
}
}
}
impl Drop for Usercopy {
fn drop(&mut self) {
self.shutdown_event.signal_handle(zx::Signals::empty(), zx::Signals::USER_0).unwrap();
self.join_handle.take().unwrap().join().unwrap();
}
}
#[cfg(test)]
mod test {
use super::*;
use test_case::test_case;
impl Usercopy {
fn new_for_test(restricted_address_range: Range<usize>) -> Self {
Self::new(restricted_address_range).unwrap()
}
}
#[test_case(0, 0)]
#[test_case(1, 1)]
#[test_case(7, 2)]
#[test_case(8, 3)]
#[test_case(9, 4)]
#[test_case(128, 5)]
#[test_case(zx::system_get_page_size() as usize - 1, 6)]
#[test_case(zx::system_get_page_size() as usize, 7)]
#[::fuchsia::test]
fn zero_no_fault(zero_len: usize, ch: u8) {
let page_size = zx::system_get_page_size() as usize;
let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(0, &dest_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
.unwrap();
let mapped_bytes =
unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, page_size) };
mapped_bytes.fill(ch);
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
let result = usercopy.zero(mapped_addr, zero_len);
assert_eq!(result, zero_len);
assert_eq!(&mapped_bytes[..zero_len], &vec![0; zero_len]);
assert_eq!(&mapped_bytes[zero_len..], &vec![ch; page_size - zero_len]);
}
#[test_case(1, 2, 0)]
#[test_case(1, 4, 1)]
#[test_case(1, 8, 2)]
#[test_case(1, 16, 3)]
#[test_case(1, 32, 4)]
#[test_case(1, 64, 5)]
#[test_case(1, 128, 6)]
#[test_case(1, 256, 7)]
#[test_case(1, 512, 8)]
#[test_case(1, 1024, 9)]
#[test_case(32, 64, 10)]
#[test_case(32, 128, 11)]
#[test_case(32, 256, 12)]
#[test_case(32, 512, 13)]
#[test_case(32, 1024, 14)]
#[::fuchsia::test]
fn zero_fault(offset: usize, zero_len: usize, ch: u8) {
let page_size = zx::system_get_page_size() as usize;
let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(
0,
&dest_vmo,
0,
page_size * 2,
zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
)
.unwrap();
let mapped_bytes =
unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, page_size) };
mapped_bytes.fill(ch);
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
let dest_addr = mapped_addr + page_size - offset;
let result = usercopy.zero(dest_addr, zero_len);
assert_eq!(result, offset);
assert_eq!(&mapped_bytes[page_size - offset..], &vec![0; offset][..]);
assert_eq!(&mapped_bytes[..page_size - offset], &vec![ch; page_size - offset][..]);
}
#[test_case(0)]
#[test_case(1)]
#[test_case(7)]
#[test_case(8)]
#[test_case(9)]
#[test_case(128)]
#[test_case(zx::system_get_page_size() as usize - 1)]
#[test_case(zx::system_get_page_size() as usize)]
#[::fuchsia::test]
fn copyout_no_fault(buf_len: usize) {
let page_size = zx::system_get_page_size() as usize;
let source = vec!['a' as u8; buf_len];
let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(0, &dest_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
.unwrap();
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
let result = usercopy.copyout(&source, mapped_addr);
assert_eq!(result, buf_len);
assert_eq!(
unsafe { std::slice::from_raw_parts(mapped_addr as *const u8, buf_len) },
&vec!['a' as u8; buf_len]
);
}
#[test_case(1, 2)]
#[test_case(1, 4)]
#[test_case(1, 8)]
#[test_case(1, 16)]
#[test_case(1, 32)]
#[test_case(1, 64)]
#[test_case(1, 128)]
#[test_case(1, 256)]
#[test_case(1, 512)]
#[test_case(1, 1024)]
#[test_case(32, 64)]
#[test_case(32, 128)]
#[test_case(32, 256)]
#[test_case(32, 512)]
#[test_case(32, 1024)]
#[::fuchsia::test]
fn copyout_fault(offset: usize, buf_len: usize) {
let page_size = zx::system_get_page_size() as usize;
let source = vec!['a' as u8; buf_len];
let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(
0,
&dest_vmo,
0,
page_size * 2,
zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
)
.unwrap();
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
let dest_addr = mapped_addr + page_size - offset;
let result = usercopy.copyout(&source, dest_addr);
assert_eq!(result, offset);
assert_eq!(
unsafe { std::slice::from_raw_parts(dest_addr as *const u8, offset) },
&vec!['a' as u8; offset][..],
);
}
#[test_case(0)]
#[test_case(1)]
#[test_case(7)]
#[test_case(8)]
#[test_case(9)]
#[test_case(128)]
#[test_case(zx::system_get_page_size() as usize - 1)]
#[test_case(zx::system_get_page_size() as usize)]
#[::fuchsia::test]
fn copyin_no_fault(buf_len: usize) {
let page_size = zx::system_get_page_size() as usize;
let mut dest = Vec::with_capacity(buf_len);
let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
.unwrap();
unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, buf_len) }.fill('a' as u8);
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
let dest_as_mut_ptr = dest.as_mut_ptr();
let (read_bytes, unread_bytes) = usercopy.copyin(mapped_addr, dest.spare_capacity_mut());
let expected = vec!['a' as u8; buf_len];
assert_eq!(read_bytes, &expected);
assert_eq!(unread_bytes.len(), 0);
assert_eq!(read_bytes.as_mut_ptr(), dest_as_mut_ptr);
unsafe { dest.set_len(buf_len) }
assert_eq!(dest, expected);
}
#[test_case(1, 2)]
#[test_case(1, 4)]
#[test_case(1, 8)]
#[test_case(1, 16)]
#[test_case(1, 32)]
#[test_case(1, 64)]
#[test_case(1, 128)]
#[test_case(1, 256)]
#[test_case(1, 512)]
#[test_case(1, 1024)]
#[test_case(32, 64)]
#[test_case(32, 128)]
#[test_case(32, 256)]
#[test_case(32, 512)]
#[test_case(32, 1024)]
#[::fuchsia::test]
fn copyin_fault(offset: usize, buf_len: usize) {
let page_size = zx::system_get_page_size() as usize;
let mut dest = vec![0u8; buf_len];
let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(
0,
&source_vmo,
0,
page_size * 2,
zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
)
.unwrap();
let source_addr = mapped_addr + page_size - offset;
unsafe { std::slice::from_raw_parts_mut(source_addr as *mut u8, offset) }.fill('a' as u8);
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
let (read_bytes, unread_bytes) =
usercopy.copyin(source_addr, slice_to_maybe_uninit_mut(&mut dest));
let expected_copied = vec!['a' as u8; offset];
let expected_uncopied = vec![0 as u8; buf_len - offset];
assert_eq!(read_bytes, &expected_copied);
assert_eq!(unread_bytes.len(), expected_uncopied.len());
assert_eq!(&dest[0..offset], &expected_copied);
assert_eq!(&dest[offset..], &expected_uncopied);
}
#[test_case(0)]
#[test_case(1)]
#[test_case(7)]
#[test_case(8)]
#[test_case(9)]
#[test_case(128)]
#[test_case(zx::system_get_page_size() as usize - 1)]
#[test_case(zx::system_get_page_size() as usize)]
#[::fuchsia::test]
fn copyin_until_null_byte_no_fault(buf_len: usize) {
let page_size = zx::system_get_page_size() as usize;
let mut dest = Vec::with_capacity(buf_len);
let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
.unwrap();
unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, buf_len) }.fill('a' as u8);
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
let dest_as_mut_ptr = dest.as_mut_ptr();
let (read_bytes, unread_bytes) =
usercopy.copyin_until_null_byte(mapped_addr, dest.spare_capacity_mut());
let expected = vec!['a' as u8; buf_len];
assert_eq!(read_bytes, &expected);
assert_eq!(unread_bytes.len(), 0);
assert_eq!(read_bytes.as_mut_ptr(), dest_as_mut_ptr);
unsafe { dest.set_len(dest.capacity()) }
assert_eq!(dest, expected);
}
#[test_case(1, 2)]
#[test_case(1, 4)]
#[test_case(1, 8)]
#[test_case(1, 16)]
#[test_case(1, 32)]
#[test_case(1, 64)]
#[test_case(1, 128)]
#[test_case(1, 256)]
#[test_case(1, 512)]
#[test_case(1, 1024)]
#[test_case(32, 64)]
#[test_case(32, 128)]
#[test_case(32, 256)]
#[test_case(32, 512)]
#[test_case(32, 1024)]
#[::fuchsia::test]
fn copyin_until_null_byte_fault(offset: usize, buf_len: usize) {
let page_size = zx::system_get_page_size() as usize;
let mut dest = vec![0u8; buf_len];
let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(
0,
&source_vmo,
0,
page_size * 2,
zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
)
.unwrap();
let source_addr = mapped_addr + page_size - offset;
unsafe { std::slice::from_raw_parts_mut(source_addr as *mut u8, offset) }.fill('a' as u8);
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
let (read_bytes, unread_bytes) =
usercopy.copyin_until_null_byte(source_addr, slice_to_maybe_uninit_mut(&mut dest));
let expected_copied = vec!['a' as u8; offset];
let expected_uncopied = vec![0 as u8; buf_len - offset];
assert_eq!(read_bytes, &expected_copied);
assert_eq!(unread_bytes.len(), expected_uncopied.len());
assert_eq!(&dest[0..offset], &expected_copied);
assert_eq!(&dest[offset..], &expected_uncopied);
}
#[test_case(0)]
#[test_case(1)]
#[test_case(2)]
#[test_case(126)]
#[test_case(127)]
#[::fuchsia::test]
fn copyin_until_null_byte_no_fault_with_zero(zero_idx: usize) {
const DEST_LEN: usize = 128;
let page_size = zx::system_get_page_size() as usize;
let mut dest = vec!['b' as u8; DEST_LEN];
let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let mapped_addr = root_vmar
.map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
.unwrap();
{
let slice =
unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, dest.len()) };
slice.fill('a' as u8);
slice[zero_idx] = 0;
};
let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
let (read_bytes, unread_bytes) =
usercopy.copyin_until_null_byte(mapped_addr, slice_to_maybe_uninit_mut(&mut dest));
let expected_copied_non_zero_bytes = vec!['a' as u8; zero_idx];
let expected_uncopied = vec!['b' as u8; DEST_LEN - zero_idx - 1];
assert_eq!(&read_bytes[..zero_idx], &expected_copied_non_zero_bytes);
assert_eq!(&read_bytes[zero_idx..], &[0]);
assert_eq!(unread_bytes.len(), expected_uncopied.len());
assert_eq!(&dest[..zero_idx], &expected_copied_non_zero_bytes);
assert_eq!(dest[zero_idx], 0);
assert_eq!(&dest[zero_idx + 1..], &expected_uncopied);
}
#[test_case(0..1, 0)]
#[test_case(0..1, 1)]
#[test_case(0..1, 2)]
#[test_case(5..10, 0)]
#[test_case(5..10, 1)]
#[test_case(5..10, 2)]
#[test_case(5..10, 5)]
#[test_case(5..10, 7)]
#[test_case(5..10, 10)]
#[::fuchsia::test]
fn starting_fault_address_copyin_until_null_byte(range: Range<usize>, addr: usize) {
let usercopy = Usercopy::new_for_test(range);
let mut dest = vec![0u8];
let (read_bytes, unread_bytes) =
usercopy.copyin_until_null_byte(addr, slice_to_maybe_uninit_mut(&mut dest));
assert_eq!(read_bytes, &[]);
assert_eq!(unread_bytes.len(), dest.len());
assert_eq!(dest, [0]);
}
#[test_case(0..1, 0)]
#[test_case(0..1, 1)]
#[test_case(0..1, 2)]
#[test_case(5..10, 0)]
#[test_case(5..10, 1)]
#[test_case(5..10, 2)]
#[test_case(5..10, 5)]
#[test_case(5..10, 7)]
#[test_case(5..10, 10)]
#[::fuchsia::test]
fn starting_fault_address_copyin(range: Range<usize>, addr: usize) {
let usercopy = Usercopy::new_for_test(range);
let mut dest = vec![0u8];
let (read_bytes, unread_bytes) =
usercopy.copyin(addr, slice_to_maybe_uninit_mut(&mut dest));
assert_eq!(read_bytes, &[]);
assert_eq!(unread_bytes.len(), dest.len());
assert_eq!(dest, [0]);
}
#[test_case(0..1, 0)]
#[test_case(0..1, 1)]
#[test_case(0..1, 2)]
#[test_case(5..10, 0)]
#[test_case(5..10, 1)]
#[test_case(5..10, 2)]
#[test_case(5..10, 5)]
#[test_case(5..10, 7)]
#[test_case(5..10, 10)]
#[::fuchsia::test]
fn starting_fault_address_copyout(range: Range<usize>, addr: usize) {
let usercopy = Usercopy::new_for_test(range);
let source = vec![0u8];
let result = usercopy.copyout(&source, addr);
assert_eq!(result, 0);
assert_eq!(source, [0]);
}
struct MappedPageUsercopy {
usercopy: Usercopy,
addr: usize,
}
impl MappedPageUsercopy {
fn new(flags: zx::VmarFlags) -> Self {
let page_size = zx::system_get_page_size() as usize;
let vmo = zx::Vmo::create(page_size as u64).unwrap();
let root_vmar = fuchsia_runtime::vmar_root_self();
let addr = root_vmar.map(0, &vmo, 0, page_size, flags).unwrap();
let usercopy = Usercopy::new_for_test(addr..addr + page_size);
Self { usercopy, addr }
}
}
impl std::ops::Drop for MappedPageUsercopy {
fn drop(&mut self) {
let page_size = zx::system_get_page_size() as usize;
unsafe { fuchsia_runtime::vmar_root_self().unmap(self.addr, page_size) }.unwrap();
}
}
#[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_relaxed(mapped_addr); "relaxed")]
#[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_acquire(mapped_addr); "acquire")]
#[::fuchsia::test]
fn atomic_load_u32_no_fault(load_fn: fn(&Usercopy, usize) -> Result<u32, ()>) {
let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
unsafe { *(m.addr as *mut u32) = 0x12345678 };
let result = load_fn(&m.usercopy, m.addr);
assert_eq!(Ok(0x12345678), result);
}
#[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_relaxed(mapped_addr); "relaxed")]
#[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_acquire(mapped_addr); "acquire")]
#[::fuchsia::test]
fn atomic_load_u32_fault(load_fn: fn(&Usercopy, usize) -> Result<u32, ()>) {
let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
let result = load_fn(&m.usercopy, m.addr);
assert_eq!(Err(()), result);
}
#[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_relaxed(mapped_addr, val); "relaxed")]
#[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_release(mapped_addr, val); "release")]
#[::fuchsia::test]
fn atomic_store_u32_no_fault(store_fn: fn(&Usercopy, usize, u32) -> Result<(), ()>) {
let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
assert_eq!(store_fn(&m.usercopy, m.addr, 0x12345678), Ok(()));
assert_eq!(unsafe { *(m.addr as *mut u32) }, 0x12345678);
}
#[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_relaxed(mapped_addr, val); "relaxed")]
#[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_release(mapped_addr, val); "release")]
#[::fuchsia::test]
fn atomic_store_u32_fault(store_fn: fn(&Usercopy, usize, u32) -> Result<(), ()>) {
let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
let result = store_fn(&m.usercopy, m.addr, 0x12345678);
assert_eq!(Err(()), result);
let page_size = zx::system_get_page_size() as usize;
unsafe {
fuchsia_runtime::vmar_root_self().protect(m.addr, page_size, zx::VmarFlags::PERM_READ)
}
.unwrap();
assert_ne!(unsafe { *(m.addr as *mut u32) }, 0x12345678);
}
#[::fuchsia::test]
fn atomic_compare_exchange_u32_acq_rel_no_fault() {
let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
unsafe { *(m.addr as *mut u32) = 0x12345678 };
assert_eq!(
m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x12345678, 0xffffffff),
Ok(Ok(0x12345678))
);
assert_eq!(unsafe { *(m.addr as *mut u32) }, 0xffffffff);
assert_eq!(
m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x22222222, 0x11111111),
Ok(Err(0xffffffff))
);
assert_eq!(unsafe { *(m.addr as *mut u32) }, 0xffffffff);
}
#[::fuchsia::test]
fn atomic_compare_exchange_u32_acq_rel_fault() {
let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
let result = m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x00000000, 0x11111111);
assert_eq!(Err(()), result);
let page_size = zx::system_get_page_size() as usize;
unsafe {
fuchsia_runtime::vmar_root_self().protect(m.addr, page_size, zx::VmarFlags::PERM_READ)
}
.unwrap();
assert_eq!(unsafe { *(m.addr as *mut u32) }, 0x00000000);
}
}