1use std::mem::MaybeUninit;
6use std::ops::Range;
7
8use zerocopy::FromBytes;
9use zx::{AsHandleRef, HandleBased, Task};
10
11extern "C" {
12 fn hermetic_copy(dest: *mut u8, source: *const u8, len: usize, ret_dest: bool) -> usize;
17 fn hermetic_copy_end();
18
19 fn hermetic_copy_until_null_byte(
24 dest: *mut u8,
25 source: *const u8,
26 len: usize,
27 ret_dest: bool,
28 ) -> usize;
29 fn hermetic_copy_until_null_byte_end();
30
31 fn hermetic_zero(dest: *mut u8, len: usize) -> usize;
35 fn hermetic_zero_end();
36
37 fn hermetic_copy_error();
39
40 fn atomic_error();
42
43 fn atomic_load_u32_relaxed(addr: usize) -> u64;
47
48 fn atomic_load_u32_relaxed_end();
50
51 fn atomic_load_u32_acquire(addr: usize) -> u64;
55
56 fn atomic_load_u32_acquire_end();
58
59 fn atomic_store_u32_relaxed(addr: usize, value: u32) -> u64;
62
63 fn atomic_store_u32_relaxed_end();
65
66 fn atomic_store_u32_release(addr: usize, value: u32) -> u64;
69
70 fn atomic_store_u32_release_end();
72
73 fn atomic_compare_exchange_u32_acq_rel(addr: usize, expected: *mut u32, desired: u32) -> u64;
85
86 fn atomic_compare_exchange_u32_acq_rel_end();
88
89 fn atomic_compare_exchange_weak_u32_acq_rel(
102 addr: usize,
103 expected: *mut u32,
104 desired: u32,
105 ) -> u64;
106
107 fn atomic_compare_exchange_weak_u32_acq_rel_end();
109}
110
111pub fn slice_to_maybe_uninit_mut<T>(slice: &mut [T]) -> &mut [MaybeUninit<T>] {
113 let ptr = slice.as_mut_ptr();
114 let ptr = ptr as *mut MaybeUninit<T>;
115 unsafe { std::slice::from_raw_parts_mut(ptr, slice.len()) }
122}
123
124type HermeticCopyFn =
125 unsafe extern "C" fn(dest: *mut u8, source: *const u8, len: usize, ret_dest: bool) -> usize;
126
127#[derive(Debug)]
128pub struct Usercopy {
129 shutdown_event: zx::Event,
131
132 join_handle: Option<std::thread::JoinHandle<()>>,
134
135 restricted_address_range: Range<usize>,
137}
138
139fn parse_fault_exception(
144 regs: &mut zx::sys::zx_thread_state_general_regs_t,
145 report: zx::ExceptionReport,
146) -> (usize, usize) {
147 #[cfg(target_arch = "x86_64")]
148 {
149 let pc = regs.rip as usize;
150 let fault_address = report.arch.cr2;
151
152 (pc, fault_address as usize)
153 }
154
155 #[cfg(target_arch = "aarch64")]
156 {
157 let pc = regs.pc as usize;
158 let fault_address = report.arch.far;
159
160 (pc, fault_address as usize)
161 }
162
163 #[cfg(target_arch = "riscv64")]
164 {
165 let pc = regs.pc as usize;
166 let fault_address = report.arch.tval;
167
168 (pc, fault_address as usize)
169 }
170}
171
172fn set_registers_for_hermetic_error(
173 regs: &mut zx::sys::zx_thread_state_general_regs_t,
174 fault_address: usize,
175) {
176 #[cfg(target_arch = "x86_64")]
177 {
178 regs.rip = hermetic_copy_error as u64;
179 regs.rax = fault_address as u64;
180 }
181
182 #[cfg(target_arch = "aarch64")]
183 {
184 regs.pc = hermetic_copy_error as u64;
185 regs.r[0] = fault_address as u64;
186 }
187
188 #[cfg(target_arch = "riscv64")]
189 {
190 regs.pc = hermetic_copy_error as u64;
191 regs.a0 = fault_address as u64;
192 }
193}
194
195const ATOMIC_ERROR_MASK: u64 = 0xFFFFFFFF00000000;
196
197fn set_registers_for_atomic_error(regs: &mut zx::sys::zx_thread_state_general_regs_t) {
198 #[cfg(target_arch = "x86_64")]
199 {
200 regs.rax = ATOMIC_ERROR_MASK;
201 regs.rip = atomic_error as u64;
202 }
203
204 #[cfg(target_arch = "aarch64")]
205 {
206 regs.r[0] = ATOMIC_ERROR_MASK;
207 regs.pc = atomic_error as u64;
208 }
209
210 #[cfg(target_arch = "riscv64")]
211 {
212 regs.a0 = ATOMIC_ERROR_MASK;
213 regs.pc = atomic_error as u64;
214 }
215}
216
217unsafe fn assume_initialized_until(
225 buf: &mut [MaybeUninit<u8>],
226 initialized_until: usize,
227) -> (&mut [u8], &mut [MaybeUninit<u8>]) {
228 let (init_bytes, uninit_bytes) = buf.split_at_mut(initialized_until);
229 debug_assert_eq!(init_bytes.len(), initialized_until);
230
231 let init_bytes =
232 std::slice::from_raw_parts_mut(init_bytes.as_mut_ptr() as *mut u8, init_bytes.len());
233
234 (init_bytes, uninit_bytes)
235}
236
237unsafe fn do_hermetic_copy(
246 f: HermeticCopyFn,
247 dest: usize,
248 source: usize,
249 count: usize,
250 ret_dest: bool,
251) -> usize {
252 #[allow(
253 clippy::undocumented_unsafe_blocks,
254 reason = "Force documented unsafe blocks in Starnix"
255 )]
256 let unread_address = unsafe { f(dest as *mut u8, source as *const u8, count, ret_dest) };
257
258 let ret_base = if ret_dest { dest } else { source };
259
260 debug_assert!(
261 unread_address >= ret_base,
262 "unread_address={:#x}, ret_base={:#x}",
263 unread_address,
264 ret_base,
265 );
266 let copied = unread_address - ret_base;
267 debug_assert!(
268 copied <= count,
269 "copied={}, count={}; unread_address={:#x}, ret_base={:#x}",
270 copied,
271 count,
272 unread_address,
273 ret_base,
274 );
275 copied
276}
277
278impl Usercopy {
279 pub fn new(restricted_address_range: Range<usize>) -> Result<Self, zx::Status> {
282 let hermetic_copy_addr_range =
283 hermetic_copy as *const () as usize..hermetic_copy_end as *const () as usize;
284
285 let hermetic_copy_until_null_byte_addr_range = hermetic_copy_until_null_byte as *const ()
286 as usize
287 ..hermetic_copy_until_null_byte_end as *const () as usize;
288
289 let hermetic_zero_addr_range =
290 hermetic_zero as *const () as usize..hermetic_zero_end as *const () as usize;
291
292 let atomic_load_relaxed_range = atomic_load_u32_relaxed as *const () as usize
293 ..atomic_load_u32_relaxed_end as *const () as usize;
294
295 let atomic_load_acquire_range = atomic_load_u32_acquire as *const () as usize
296 ..atomic_load_u32_acquire_end as *const () as usize;
297
298 let atomic_store_relaxed_range = atomic_store_u32_relaxed as *const () as usize
299 ..atomic_store_u32_relaxed_end as *const () as usize;
300
301 let atomic_store_release_range = atomic_store_u32_release as *const () as usize
302 ..atomic_store_u32_release_end as *const () as usize;
303
304 let atomic_compare_exchange_range = atomic_compare_exchange_u32_acq_rel as *const ()
305 as usize
306 ..atomic_compare_exchange_u32_acq_rel_end as *const () as usize;
307
308 let atomic_compare_exchange_weak_range = atomic_compare_exchange_weak_u32_acq_rel
309 as *const () as usize
310 ..atomic_compare_exchange_weak_u32_acq_rel_end as *const () as usize;
311
312 let (tx, rx) = std::sync::mpsc::channel::<zx::Status>();
313
314 let shutdown_event = zx::Event::create();
315 let shutdown_event_clone =
316 shutdown_event.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap();
317
318 let faultable_addresses = restricted_address_range.clone();
319 let join_handle = std::thread::spawn(move || {
320 let exception_channel_result =
321 fuchsia_runtime::job_default().create_exception_channel();
322
323 let exception_channel = match exception_channel_result {
324 Ok(c) => c,
325 Err(e) => {
326 let _ = tx.send(e);
327 return;
328 }
329 };
330
331 let _ = tx.send(zx::Status::OK);
333
334 loop {
336 let mut wait_items = [
337 zx::WaitItem {
338 handle: exception_channel.as_handle_ref(),
339 waitfor: zx::Signals::CHANNEL_READABLE,
340 pending: zx::Signals::empty(),
341 },
342 zx::WaitItem {
343 handle: shutdown_event_clone.as_handle_ref(),
344 waitfor: zx::Signals::USER_0,
345 pending: zx::Signals::empty(),
346 },
347 ];
348 let _ = zx::object_wait_many(&mut wait_items, zx::MonotonicInstant::INFINITE);
349 if wait_items[1].pending == zx::Signals::USER_0 {
350 break;
351 }
352 let mut buf = zx::MessageBuf::new();
353 exception_channel.read(&mut buf).unwrap();
354
355 let excp_info = zx::sys::zx_exception_info_t::read_from_bytes(buf.bytes()).unwrap();
356
357 if excp_info.type_ != zx::sys::ZX_EXCP_FATAL_PAGE_FAULT {
358 continue;
360 }
361
362 let excp = zx::Exception::from_handle(buf.take_handle(0).unwrap());
363 let thread = excp.get_thread().unwrap();
364 let mut regs = thread.read_state_general_regs().unwrap();
365 let report = thread.get_exception_report().unwrap();
366
367 let (pc, fault_address) = parse_fault_exception(&mut regs, report);
377
378 if !faultable_addresses.contains(&fault_address) {
381 continue;
382 }
383
384 if hermetic_copy_addr_range.contains(&pc)
386 || hermetic_copy_until_null_byte_addr_range.contains(&pc)
387 || hermetic_zero_addr_range.contains(&pc)
388 {
389 set_registers_for_hermetic_error(&mut regs, fault_address);
390 } else if atomic_load_relaxed_range.contains(&pc)
391 || atomic_load_acquire_range.contains(&pc)
392 || atomic_store_relaxed_range.contains(&pc)
393 || atomic_store_release_range.contains(&pc)
394 || atomic_compare_exchange_range.contains(&pc)
395 || atomic_compare_exchange_weak_range.contains(&pc)
396 {
397 set_registers_for_atomic_error(&mut regs);
398 } else {
399 continue;
400 }
401
402 thread.write_state_general_regs(regs).unwrap();
403 excp.set_exception_state(&zx::sys::ZX_EXCEPTION_STATE_HANDLED).unwrap();
404 }
405 });
406
407 match rx.recv().unwrap() {
408 zx::Status::OK => {}
409 s => {
410 return Err(s);
411 }
412 };
413
414 Ok(Self { shutdown_event, join_handle: Some(join_handle), restricted_address_range })
415 }
416
417 pub unsafe fn raw_hermetic_copy(
426 &self,
427 dest: *mut u8,
428 source: *const u8,
429 count: usize,
430 ret_dest: bool,
431 ) -> usize {
432 do_hermetic_copy(hermetic_copy, dest as usize, source as usize, count, ret_dest)
433 }
434
435 pub fn zero(&self, dest_addr: usize, count: usize) -> usize {
439 if dest_addr == 0 || !self.restricted_address_range.contains(&dest_addr) {
443 return 0;
444 }
445
446 #[allow(
447 clippy::undocumented_unsafe_blocks,
448 reason = "Force documented unsafe blocks in Starnix"
449 )]
450 let unset_address = unsafe { hermetic_zero(dest_addr as *mut u8, count) };
451 debug_assert!(
452 unset_address >= dest_addr,
453 "unset_address={:#x}, dest_addr={:#x}",
454 unset_address,
455 dest_addr,
456 );
457 let bytes_set = unset_address - dest_addr;
458 debug_assert!(
459 bytes_set <= count,
460 "bytes_set={}, count={}; unset_address={:#x}, dest_addr={:#x}",
461 bytes_set,
462 count,
463 unset_address,
464 dest_addr,
465 );
466 bytes_set
467 }
468
469 pub fn copyout(&self, source: &[u8], dest_addr: usize) -> usize {
473 if dest_addr == 0 || !self.restricted_address_range.contains(&dest_addr) {
477 return 0;
478 }
479
480 unsafe {
483 do_hermetic_copy(hermetic_copy, dest_addr, source.as_ptr() as usize, source.len(), true)
484 }
485 }
486
487 pub fn copyin<'a>(
495 &self,
496 source_addr: usize,
497 dest: &'a mut [MaybeUninit<u8>],
498 ) -> (&'a mut [u8], &'a mut [MaybeUninit<u8>]) {
499 let read_count =
503 if source_addr == 0 || !self.restricted_address_range.contains(&source_addr) {
504 0
505 } else {
506 unsafe {
509 do_hermetic_copy(
510 hermetic_copy,
511 dest.as_ptr() as usize,
512 source_addr,
513 dest.len(),
514 false,
515 )
516 }
517 };
518
519 unsafe { assume_initialized_until(dest, read_count) }
521 }
522
523 pub fn copyin_until_null_byte<'a>(
533 &self,
534 source_addr: usize,
535 dest: &'a mut [MaybeUninit<u8>],
536 ) -> (&'a mut [u8], &'a mut [MaybeUninit<u8>]) {
537 let read_count =
541 if source_addr == 0 || !self.restricted_address_range.contains(&source_addr) {
542 0
543 } else {
544 unsafe {
547 do_hermetic_copy(
548 hermetic_copy_until_null_byte,
549 dest.as_ptr() as usize,
550 source_addr,
551 dest.len(),
552 false,
553 )
554 }
555 };
556
557 unsafe { assume_initialized_until(dest, read_count) }
559 }
560
561 #[inline]
562 fn atomic_load_u32(
563 &self,
564 load_fn: unsafe extern "C" fn(usize) -> u64,
565 addr: usize,
566 ) -> Result<u32, ()> {
567 #[allow(
568 clippy::undocumented_unsafe_blocks,
569 reason = "Force documented unsafe blocks in Starnix"
570 )]
571 let value_or_error = unsafe { load_fn(addr) };
572 if value_or_error & ATOMIC_ERROR_MASK == 0 { Ok(value_or_error as u32) } else { Err(()) }
573 }
574
575 pub fn atomic_load_u32_relaxed(&self, addr: usize) -> Result<u32, ()> {
578 self.atomic_load_u32(atomic_load_u32_relaxed, addr)
579 }
580
581 pub fn atomic_load_u32_acquire(&self, addr: usize) -> Result<u32, ()> {
584 self.atomic_load_u32(atomic_load_u32_acquire, addr)
585 }
586
587 fn atomic_store_u32(
588 &self,
589 store_fn: unsafe extern "C" fn(usize, u32) -> u64,
590 addr: usize,
591 value: u32,
592 ) -> Result<(), ()> {
593 #[allow(
594 clippy::undocumented_unsafe_blocks,
595 reason = "Force documented unsafe blocks in Starnix"
596 )]
597 match unsafe { store_fn(addr, value) } {
598 0 => Ok(()),
599 _ => Err(()),
600 }
601 }
602
603 pub fn atomic_store_u32_relaxed(&self, addr: usize, value: u32) -> Result<(), ()> {
606 self.atomic_store_u32(atomic_store_u32_relaxed, addr, value)
607 }
608
609 pub fn atomic_store_u32_release(&self, addr: usize, value: u32) -> Result<(), ()> {
612 self.atomic_store_u32(atomic_store_u32_release, addr, value)
613 }
614
615 pub fn atomic_compare_exchange_u32_acq_rel(
618 &self,
619 addr: usize,
620 expected: u32,
621 desired: u32,
622 ) -> Result<Result<u32, u32>, ()> {
623 let mut expected = expected;
624 #[allow(
625 clippy::undocumented_unsafe_blocks,
626 reason = "Force documented unsafe blocks in Starnix"
627 )]
628 let value_or_error = unsafe {
629 atomic_compare_exchange_u32_acq_rel(addr, &mut expected as *mut u32, desired)
630 };
631 Self::parse_compare_exchange_result(expected, value_or_error)
632 }
633
634 pub fn atomic_compare_exchange_weak_u32_acq_rel(
637 &self,
638 addr: usize,
639 expected: u32,
640 desired: u32,
641 ) -> Result<Result<u32, u32>, ()> {
642 let mut expected = expected;
643 #[allow(
644 clippy::undocumented_unsafe_blocks,
645 reason = "Force documented unsafe blocks in Starnix"
646 )]
647 let value_or_error = unsafe {
648 atomic_compare_exchange_weak_u32_acq_rel(addr, &mut expected as *mut u32, desired)
649 };
650 Self::parse_compare_exchange_result(expected, value_or_error)
651 }
652
653 fn parse_compare_exchange_result(
654 expected: u32,
655 value_or_error: u64,
656 ) -> Result<Result<u32, u32>, ()> {
657 match value_or_error {
658 0 => Ok(Err(expected)),
659 1 => Ok(Ok(expected)),
660 _ => Err(()),
661 }
662 }
663}
664
665impl Drop for Usercopy {
666 fn drop(&mut self) {
667 self.shutdown_event.signal_handle(zx::Signals::empty(), zx::Signals::USER_0).unwrap();
668 self.join_handle.take().unwrap().join().unwrap();
669 }
670}
671
672#[cfg(test)]
673mod test {
674 #![allow(
675 clippy::undocumented_unsafe_blocks,
676 reason = "Force documented unsafe blocks in Starnix"
677 )]
678 use super::*;
679
680 use test_case::test_case;
681
682 impl Usercopy {
683 fn new_for_test(restricted_address_range: Range<usize>) -> Self {
684 Self::new(restricted_address_range).unwrap()
685 }
686 }
687
688 #[test_case(0, 0)]
689 #[test_case(1, 1)]
690 #[test_case(7, 2)]
691 #[test_case(8, 3)]
692 #[test_case(9, 4)]
693 #[test_case(128, 5)]
694 #[test_case(zx::system_get_page_size() as usize - 1, 6)]
695 #[test_case(zx::system_get_page_size() as usize, 7)]
696 #[::fuchsia::test]
697 fn zero_no_fault(zero_len: usize, ch: u8) {
698 let page_size = zx::system_get_page_size() as usize;
699
700 let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
701
702 let root_vmar = fuchsia_runtime::vmar_root_self();
703
704 let mapped_addr = root_vmar
705 .map(0, &dest_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
706 .unwrap();
707 let mapped_bytes =
708 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, page_size) };
709 mapped_bytes.fill(ch);
710
711 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
712
713 let result = usercopy.zero(mapped_addr, zero_len);
714 assert_eq!(result, zero_len);
715
716 assert_eq!(&mapped_bytes[..zero_len], &vec![0; zero_len]);
717 assert_eq!(&mapped_bytes[zero_len..], &vec![ch; page_size - zero_len]);
718 }
719
720 #[test_case(1, 2, 0)]
721 #[test_case(1, 4, 1)]
722 #[test_case(1, 8, 2)]
723 #[test_case(1, 16, 3)]
724 #[test_case(1, 32, 4)]
725 #[test_case(1, 64, 5)]
726 #[test_case(1, 128, 6)]
727 #[test_case(1, 256, 7)]
728 #[test_case(1, 512, 8)]
729 #[test_case(1, 1024, 9)]
730 #[test_case(32, 64, 10)]
731 #[test_case(32, 128, 11)]
732 #[test_case(32, 256, 12)]
733 #[test_case(32, 512, 13)]
734 #[test_case(32, 1024, 14)]
735 #[::fuchsia::test]
736 fn zero_fault(offset: usize, zero_len: usize, ch: u8) {
737 let page_size = zx::system_get_page_size() as usize;
738
739 let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
740
741 let root_vmar = fuchsia_runtime::vmar_root_self();
742
743 let mapped_addr = root_vmar
744 .map(
745 0,
746 &dest_vmo,
747 0,
748 page_size * 2,
749 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
750 )
751 .unwrap();
752 let mapped_bytes =
753 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, page_size) };
754 mapped_bytes.fill(ch);
755
756 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
757
758 let dest_addr = mapped_addr + page_size - offset;
759
760 let result = usercopy.zero(dest_addr, zero_len);
761 assert_eq!(result, offset);
762
763 assert_eq!(&mapped_bytes[page_size - offset..], &vec![0; offset][..]);
764 assert_eq!(&mapped_bytes[..page_size - offset], &vec![ch; page_size - offset][..]);
765 }
766
767 #[test_case(0)]
768 #[test_case(1)]
769 #[test_case(7)]
770 #[test_case(8)]
771 #[test_case(9)]
772 #[test_case(128)]
773 #[test_case(zx::system_get_page_size() as usize - 1)]
774 #[test_case(zx::system_get_page_size() as usize)]
775 #[::fuchsia::test]
776 fn copyout_no_fault(buf_len: usize) {
777 let page_size = zx::system_get_page_size() as usize;
778
779 let source = vec!['a' as u8; buf_len];
780
781 let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
782
783 let root_vmar = fuchsia_runtime::vmar_root_self();
784
785 let mapped_addr = root_vmar
786 .map(0, &dest_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
787 .unwrap();
788
789 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
790
791 let result = usercopy.copyout(&source, mapped_addr);
792 assert_eq!(result, buf_len);
793
794 assert_eq!(
795 unsafe { std::slice::from_raw_parts(mapped_addr as *const u8, buf_len) },
796 &vec!['a' as u8; buf_len]
797 );
798 }
799
800 #[test_case(1, 2)]
801 #[test_case(1, 4)]
802 #[test_case(1, 8)]
803 #[test_case(1, 16)]
804 #[test_case(1, 32)]
805 #[test_case(1, 64)]
806 #[test_case(1, 128)]
807 #[test_case(1, 256)]
808 #[test_case(1, 512)]
809 #[test_case(1, 1024)]
810 #[test_case(32, 64)]
811 #[test_case(32, 128)]
812 #[test_case(32, 256)]
813 #[test_case(32, 512)]
814 #[test_case(32, 1024)]
815 #[::fuchsia::test]
816 fn copyout_fault(offset: usize, buf_len: usize) {
817 let page_size = zx::system_get_page_size() as usize;
818
819 let source = vec!['a' as u8; buf_len];
820
821 let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
822
823 let root_vmar = fuchsia_runtime::vmar_root_self();
824
825 let mapped_addr = root_vmar
826 .map(
827 0,
828 &dest_vmo,
829 0,
830 page_size * 2,
831 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
832 )
833 .unwrap();
834
835 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
836
837 let dest_addr = mapped_addr + page_size - offset;
838
839 let result = usercopy.copyout(&source, dest_addr);
840
841 assert_eq!(result, offset);
842
843 assert_eq!(
844 unsafe { std::slice::from_raw_parts(dest_addr as *const u8, offset) },
845 &vec!['a' as u8; offset][..],
846 );
847 }
848
849 #[test_case(0)]
850 #[test_case(1)]
851 #[test_case(7)]
852 #[test_case(8)]
853 #[test_case(9)]
854 #[test_case(128)]
855 #[test_case(zx::system_get_page_size() as usize - 1)]
856 #[test_case(zx::system_get_page_size() as usize)]
857 #[::fuchsia::test]
858 fn copyin_no_fault(buf_len: usize) {
859 let page_size = zx::system_get_page_size() as usize;
860
861 let mut dest = Vec::with_capacity(buf_len);
862
863 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
864
865 let root_vmar = fuchsia_runtime::vmar_root_self();
866
867 let mapped_addr = root_vmar
868 .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
869 .unwrap();
870
871 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, buf_len) }.fill('a' as u8);
872
873 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
874 let dest_as_mut_ptr = dest.as_mut_ptr();
875 let (read_bytes, unread_bytes) = usercopy.copyin(mapped_addr, dest.spare_capacity_mut());
876 let expected = vec!['a' as u8; buf_len];
877 assert_eq!(read_bytes, &expected);
878 assert_eq!(unread_bytes.len(), 0);
879 assert_eq!(read_bytes.as_mut_ptr(), dest_as_mut_ptr);
880
881 unsafe { dest.set_len(buf_len) }
883 assert_eq!(dest, expected);
884 }
885
886 #[test_case(1, 2)]
887 #[test_case(1, 4)]
888 #[test_case(1, 8)]
889 #[test_case(1, 16)]
890 #[test_case(1, 32)]
891 #[test_case(1, 64)]
892 #[test_case(1, 128)]
893 #[test_case(1, 256)]
894 #[test_case(1, 512)]
895 #[test_case(1, 1024)]
896 #[test_case(32, 64)]
897 #[test_case(32, 128)]
898 #[test_case(32, 256)]
899 #[test_case(32, 512)]
900 #[test_case(32, 1024)]
901 #[::fuchsia::test]
902 fn copyin_fault(offset: usize, buf_len: usize) {
903 let page_size = zx::system_get_page_size() as usize;
904
905 let mut dest = vec![0u8; buf_len];
906
907 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
908
909 let root_vmar = fuchsia_runtime::vmar_root_self();
910
911 let mapped_addr = root_vmar
912 .map(
913 0,
914 &source_vmo,
915 0,
916 page_size * 2,
917 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
918 )
919 .unwrap();
920
921 let source_addr = mapped_addr + page_size - offset;
922
923 unsafe { std::slice::from_raw_parts_mut(source_addr as *mut u8, offset) }.fill('a' as u8);
924
925 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
926
927 let (read_bytes, unread_bytes) =
928 usercopy.copyin(source_addr, slice_to_maybe_uninit_mut(&mut dest));
929 let expected_copied = vec!['a' as u8; offset];
930 let expected_uncopied = vec![0 as u8; buf_len - offset];
931 assert_eq!(read_bytes, &expected_copied);
932 assert_eq!(unread_bytes.len(), expected_uncopied.len());
933
934 assert_eq!(&dest[0..offset], &expected_copied);
935 assert_eq!(&dest[offset..], &expected_uncopied);
936 }
937
938 #[test_case(0)]
939 #[test_case(1)]
940 #[test_case(7)]
941 #[test_case(8)]
942 #[test_case(9)]
943 #[test_case(128)]
944 #[test_case(zx::system_get_page_size() as usize - 1)]
945 #[test_case(zx::system_get_page_size() as usize)]
946 #[::fuchsia::test]
947 fn copyin_until_null_byte_no_fault(buf_len: usize) {
948 let page_size = zx::system_get_page_size() as usize;
949
950 let mut dest = Vec::with_capacity(buf_len);
951
952 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
953
954 let root_vmar = fuchsia_runtime::vmar_root_self();
955
956 let mapped_addr = root_vmar
957 .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
958 .unwrap();
959
960 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, buf_len) }.fill('a' as u8);
961
962 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
963
964 let dest_as_mut_ptr = dest.as_mut_ptr();
965 let (read_bytes, unread_bytes) =
966 usercopy.copyin_until_null_byte(mapped_addr, dest.spare_capacity_mut());
967 let expected = vec!['a' as u8; buf_len];
968 assert_eq!(read_bytes, &expected);
969 assert_eq!(unread_bytes.len(), 0);
970 assert_eq!(read_bytes.as_mut_ptr(), dest_as_mut_ptr);
971
972 unsafe { dest.set_len(dest.capacity()) }
974 assert_eq!(dest, expected);
975 }
976
977 #[test_case(1, 2)]
978 #[test_case(1, 4)]
979 #[test_case(1, 8)]
980 #[test_case(1, 16)]
981 #[test_case(1, 32)]
982 #[test_case(1, 64)]
983 #[test_case(1, 128)]
984 #[test_case(1, 256)]
985 #[test_case(1, 512)]
986 #[test_case(1, 1024)]
987 #[test_case(32, 64)]
988 #[test_case(32, 128)]
989 #[test_case(32, 256)]
990 #[test_case(32, 512)]
991 #[test_case(32, 1024)]
992 #[::fuchsia::test]
993 fn copyin_until_null_byte_fault(offset: usize, buf_len: usize) {
994 let page_size = zx::system_get_page_size() as usize;
995
996 let mut dest = vec![0u8; buf_len];
997
998 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
999
1000 let root_vmar = fuchsia_runtime::vmar_root_self();
1001
1002 let mapped_addr = root_vmar
1003 .map(
1004 0,
1005 &source_vmo,
1006 0,
1007 page_size * 2,
1008 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
1009 )
1010 .unwrap();
1011
1012 let source_addr = mapped_addr + page_size - offset;
1013
1014 unsafe { std::slice::from_raw_parts_mut(source_addr as *mut u8, offset) }.fill('a' as u8);
1015
1016 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
1017
1018 let (read_bytes, unread_bytes) =
1019 usercopy.copyin_until_null_byte(source_addr, slice_to_maybe_uninit_mut(&mut dest));
1020 let expected_copied = vec!['a' as u8; offset];
1021 let expected_uncopied = vec![0 as u8; buf_len - offset];
1022 assert_eq!(read_bytes, &expected_copied);
1023 assert_eq!(unread_bytes.len(), expected_uncopied.len());
1024
1025 assert_eq!(&dest[0..offset], &expected_copied);
1026 assert_eq!(&dest[offset..], &expected_uncopied);
1027 }
1028
1029 #[test_case(0)]
1030 #[test_case(1)]
1031 #[test_case(2)]
1032 #[test_case(126)]
1033 #[test_case(127)]
1034 #[::fuchsia::test]
1035 fn copyin_until_null_byte_no_fault_with_zero(zero_idx: usize) {
1036 const DEST_LEN: usize = 128;
1037
1038 let page_size = zx::system_get_page_size() as usize;
1039
1040 let mut dest = vec!['b' as u8; DEST_LEN];
1041
1042 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
1043
1044 let root_vmar = fuchsia_runtime::vmar_root_self();
1045
1046 let mapped_addr = root_vmar
1047 .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
1048 .unwrap();
1049
1050 {
1051 let slice =
1052 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, dest.len()) };
1053 slice.fill('a' as u8);
1054 slice[zero_idx] = 0;
1055 };
1056
1057 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
1058
1059 let (read_bytes, unread_bytes) =
1060 usercopy.copyin_until_null_byte(mapped_addr, slice_to_maybe_uninit_mut(&mut dest));
1061 let expected_copied_non_zero_bytes = vec!['a' as u8; zero_idx];
1062 let expected_uncopied = vec!['b' as u8; DEST_LEN - zero_idx - 1];
1063 assert_eq!(&read_bytes[..zero_idx], &expected_copied_non_zero_bytes);
1064 assert_eq!(&read_bytes[zero_idx..], &[0]);
1065 assert_eq!(unread_bytes.len(), expected_uncopied.len());
1066
1067 assert_eq!(&dest[..zero_idx], &expected_copied_non_zero_bytes);
1068 assert_eq!(dest[zero_idx], 0);
1069 assert_eq!(&dest[zero_idx + 1..], &expected_uncopied);
1070 }
1071
1072 #[test_case(0..1, 0)]
1073 #[test_case(0..1, 1)]
1074 #[test_case(0..1, 2)]
1075 #[test_case(5..10, 0)]
1076 #[test_case(5..10, 1)]
1077 #[test_case(5..10, 2)]
1078 #[test_case(5..10, 5)]
1079 #[test_case(5..10, 7)]
1080 #[test_case(5..10, 10)]
1081 #[::fuchsia::test]
1082 fn starting_fault_address_copyin_until_null_byte(range: Range<usize>, addr: usize) {
1083 let usercopy = Usercopy::new_for_test(range);
1084
1085 let mut dest = vec![0u8];
1086
1087 let (read_bytes, unread_bytes) =
1088 usercopy.copyin_until_null_byte(addr, slice_to_maybe_uninit_mut(&mut dest));
1089 assert_eq!(read_bytes, &[]);
1090 assert_eq!(unread_bytes.len(), dest.len());
1091 assert_eq!(dest, [0]);
1092 }
1093
1094 #[test_case(0..1, 0)]
1095 #[test_case(0..1, 1)]
1096 #[test_case(0..1, 2)]
1097 #[test_case(5..10, 0)]
1098 #[test_case(5..10, 1)]
1099 #[test_case(5..10, 2)]
1100 #[test_case(5..10, 5)]
1101 #[test_case(5..10, 7)]
1102 #[test_case(5..10, 10)]
1103 #[::fuchsia::test]
1104 fn starting_fault_address_copyin(range: Range<usize>, addr: usize) {
1105 let usercopy = Usercopy::new_for_test(range);
1106
1107 let mut dest = vec![0u8];
1108
1109 let (read_bytes, unread_bytes) =
1110 usercopy.copyin(addr, slice_to_maybe_uninit_mut(&mut dest));
1111 assert_eq!(read_bytes, &[]);
1112 assert_eq!(unread_bytes.len(), dest.len());
1113 assert_eq!(dest, [0]);
1114 }
1115
1116 #[test_case(0..1, 0)]
1117 #[test_case(0..1, 1)]
1118 #[test_case(0..1, 2)]
1119 #[test_case(5..10, 0)]
1120 #[test_case(5..10, 1)]
1121 #[test_case(5..10, 2)]
1122 #[test_case(5..10, 5)]
1123 #[test_case(5..10, 7)]
1124 #[test_case(5..10, 10)]
1125 #[::fuchsia::test]
1126 fn starting_fault_address_copyout(range: Range<usize>, addr: usize) {
1127 let usercopy = Usercopy::new_for_test(range);
1128
1129 let source = vec![0u8];
1130
1131 let result = usercopy.copyout(&source, addr);
1132 assert_eq!(result, 0);
1133 assert_eq!(source, [0]);
1134 }
1135 struct MappedPageUsercopy {
1136 usercopy: Usercopy,
1137 addr: usize,
1138 }
1139
1140 impl MappedPageUsercopy {
1141 fn new(flags: zx::VmarFlags) -> Self {
1142 let page_size = zx::system_get_page_size() as usize;
1143
1144 let vmo = zx::Vmo::create(page_size as u64).unwrap();
1145
1146 let root_vmar = fuchsia_runtime::vmar_root_self();
1147
1148 let addr = root_vmar.map(0, &vmo, 0, page_size, flags).unwrap();
1149
1150 let usercopy = Usercopy::new_for_test(addr..addr + page_size);
1151 Self { usercopy, addr }
1152 }
1153 }
1154
1155 impl std::ops::Drop for MappedPageUsercopy {
1156 fn drop(&mut self) {
1157 let page_size = zx::system_get_page_size() as usize;
1158
1159 unsafe { fuchsia_runtime::vmar_root_self().unmap(self.addr, page_size) }.unwrap();
1160 }
1161 }
1162
1163 #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_relaxed(mapped_addr); "relaxed")]
1164 #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_acquire(mapped_addr); "acquire")]
1165 #[::fuchsia::test]
1166 fn atomic_load_u32_no_fault(load_fn: fn(&Usercopy, usize) -> Result<u32, ()>) {
1167 let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1168
1169 unsafe { *(m.addr as *mut u32) = 0x12345678 };
1170
1171 let result = load_fn(&m.usercopy, m.addr);
1172
1173 assert_eq!(Ok(0x12345678), result);
1174 }
1175
1176 #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_relaxed(mapped_addr); "relaxed")]
1177 #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_acquire(mapped_addr); "acquire")]
1178 #[::fuchsia::test]
1179 fn atomic_load_u32_fault(load_fn: fn(&Usercopy, usize) -> Result<u32, ()>) {
1180 let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1181
1182 let result = load_fn(&m.usercopy, m.addr);
1183 assert_eq!(Err(()), result);
1184 }
1185
1186 #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_relaxed(mapped_addr, val); "relaxed")]
1187 #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_release(mapped_addr, val); "release")]
1188 #[::fuchsia::test]
1189 fn atomic_store_u32_no_fault(store_fn: fn(&Usercopy, usize, u32) -> Result<(), ()>) {
1190 let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1191
1192 assert_eq!(store_fn(&m.usercopy, m.addr, 0x12345678), Ok(()));
1193
1194 assert_eq!(unsafe { *(m.addr as *mut u32) }, 0x12345678);
1195 }
1196
1197 #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_relaxed(mapped_addr, val); "relaxed")]
1198 #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_release(mapped_addr, val); "release")]
1199 #[::fuchsia::test]
1200 fn atomic_store_u32_fault(store_fn: fn(&Usercopy, usize, u32) -> Result<(), ()>) {
1201 let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1202
1203 let result = store_fn(&m.usercopy, m.addr, 0x12345678);
1204 assert_eq!(Err(()), result);
1205
1206 let page_size = zx::system_get_page_size() as usize;
1207 unsafe {
1208 fuchsia_runtime::vmar_root_self().protect(m.addr, page_size, zx::VmarFlags::PERM_READ)
1209 }
1210 .unwrap();
1211
1212 assert_ne!(unsafe { *(m.addr as *mut u32) }, 0x12345678);
1213 }
1214
1215 #[::fuchsia::test]
1216 fn atomic_compare_exchange_u32_acq_rel_no_fault() {
1217 let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1218
1219 unsafe { *(m.addr as *mut u32) = 0x12345678 };
1220
1221 assert_eq!(
1222 m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x12345678, 0xffffffff),
1223 Ok(Ok(0x12345678))
1224 );
1225
1226 assert_eq!(unsafe { *(m.addr as *mut u32) }, 0xffffffff);
1227
1228 assert_eq!(
1229 m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x22222222, 0x11111111),
1230 Ok(Err(0xffffffff))
1231 );
1232
1233 assert_eq!(unsafe { *(m.addr as *mut u32) }, 0xffffffff);
1234 }
1235
1236 #[::fuchsia::test]
1237 fn atomic_compare_exchange_u32_acq_rel_fault() {
1238 let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1239
1240 let result = m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x00000000, 0x11111111);
1241 assert_eq!(Err(()), result);
1242
1243 let page_size = zx::system_get_page_size() as usize;
1244 unsafe {
1245 fuchsia_runtime::vmar_root_self().protect(m.addr, page_size, zx::VmarFlags::PERM_READ)
1246 }
1247 .unwrap();
1248
1249 assert_eq!(unsafe { *(m.addr as *mut u32) }, 0x00000000);
1250 }
1251}