1use std::mem::MaybeUninit;
6use std::ops::Range;
7
8use zerocopy::FromBytes;
9use zx::{AsHandleRef, HandleBased, Task};
10
11unsafe extern "C" {
12 fn hermetic_copy(dest: *mut u8, source: *const u8, len: usize, ret_dest: bool) -> usize;
17 fn hermetic_copy_end();
18
19 fn hermetic_copy_until_null_byte(
24 dest: *mut u8,
25 source: *const u8,
26 len: usize,
27 ret_dest: bool,
28 ) -> usize;
29 fn hermetic_copy_until_null_byte_end();
30
31 fn hermetic_zero(dest: *mut u8, len: usize) -> usize;
35 fn hermetic_zero_end();
36
37 fn hermetic_copy_error();
39
40 fn atomic_error();
42
43 fn atomic_load_u32_relaxed(addr: usize) -> u64;
47
48 fn atomic_load_u32_relaxed_end();
50
51 fn atomic_load_u32_acquire(addr: usize) -> u64;
55
56 fn atomic_load_u32_acquire_end();
58
59 fn atomic_store_u32_relaxed(addr: usize, value: u32) -> u64;
62
63 fn atomic_store_u32_relaxed_end();
65
66 fn atomic_store_u32_release(addr: usize, value: u32) -> u64;
69
70 fn atomic_store_u32_release_end();
72
73 fn atomic_compare_exchange_u32_acq_rel(addr: usize, expected: *mut u32, desired: u32) -> u64;
85
86 fn atomic_compare_exchange_u32_acq_rel_end();
88
89 fn atomic_compare_exchange_weak_u32_acq_rel(
102 addr: usize,
103 expected: *mut u32,
104 desired: u32,
105 ) -> u64;
106
107 fn atomic_compare_exchange_weak_u32_acq_rel_end();
109}
110
111pub fn slice_to_maybe_uninit_mut<T>(slice: &mut [T]) -> &mut [MaybeUninit<T>] {
113 let ptr = slice.as_mut_ptr();
114 let ptr = ptr as *mut MaybeUninit<T>;
115 unsafe { std::slice::from_raw_parts_mut(ptr, slice.len()) }
122}
123
124type HermeticCopyFn =
125 unsafe extern "C" fn(dest: *mut u8, source: *const u8, len: usize, ret_dest: bool) -> usize;
126
127#[derive(Debug)]
128pub struct Usercopy {
129 shutdown_event: zx::Event,
131
132 join_handle: Option<std::thread::JoinHandle<()>>,
134
135 restricted_address_range: Range<usize>,
137}
138
139fn parse_fault_exception(
144 regs: &mut zx::sys::zx_thread_state_general_regs_t,
145 report: zx::ExceptionReport,
146) -> (usize, usize) {
147 #[cfg(target_arch = "x86_64")]
148 {
149 let pc = regs.rip as usize;
150 let fault_address = report.arch.cr2;
151
152 (pc, fault_address as usize)
153 }
154
155 #[cfg(target_arch = "aarch64")]
156 {
157 let pc = regs.pc as usize;
158 let fault_address = report.arch.far;
159
160 (pc, fault_address as usize)
161 }
162
163 #[cfg(target_arch = "riscv64")]
164 {
165 let pc = regs.pc as usize;
166 let fault_address = report.arch.tval;
167
168 (pc, fault_address as usize)
169 }
170}
171
172fn set_registers_for_hermetic_error(
173 regs: &mut zx::sys::zx_thread_state_general_regs_t,
174 fault_address: usize,
175) {
176 #[cfg(target_arch = "x86_64")]
177 {
178 regs.rip = hermetic_copy_error as *const () as u64;
179 regs.rax = fault_address as u64;
180 }
181
182 #[cfg(target_arch = "aarch64")]
183 {
184 regs.pc = hermetic_copy_error as *const () as u64;
185 regs.r[0] = fault_address as u64;
186 }
187
188 #[cfg(target_arch = "riscv64")]
189 {
190 regs.pc = hermetic_copy_error as *const () as u64;
191 regs.a0 = fault_address as u64;
192 }
193}
194
195const ATOMIC_ERROR_MASK: u64 = 0xFFFFFFFF00000000;
196
197fn set_registers_for_atomic_error(regs: &mut zx::sys::zx_thread_state_general_regs_t) {
198 #[cfg(target_arch = "x86_64")]
199 {
200 regs.rax = ATOMIC_ERROR_MASK;
201 regs.rip = atomic_error as *const () as u64;
202 }
203
204 #[cfg(target_arch = "aarch64")]
205 {
206 regs.r[0] = ATOMIC_ERROR_MASK;
207 regs.pc = atomic_error as *const () as u64;
208 }
209
210 #[cfg(target_arch = "riscv64")]
211 {
212 regs.a0 = ATOMIC_ERROR_MASK;
213 regs.pc = atomic_error as *const () as u64;
214 }
215}
216
217unsafe fn assume_initialized_until(
225 buf: &mut [MaybeUninit<u8>],
226 initialized_until: usize,
227) -> (&mut [u8], &mut [MaybeUninit<u8>]) {
228 let (init_bytes, uninit_bytes) = buf.split_at_mut(initialized_until);
229 debug_assert_eq!(init_bytes.len(), initialized_until);
230
231 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
232 let init_bytes = unsafe {
233 std::slice::from_raw_parts_mut(init_bytes.as_mut_ptr() as *mut u8, init_bytes.len())
234 };
235
236 (init_bytes, uninit_bytes)
237}
238
239unsafe fn do_hermetic_copy(
248 f: HermeticCopyFn,
249 dest: usize,
250 source: usize,
251 count: usize,
252 ret_dest: bool,
253) -> usize {
254 #[allow(
255 clippy::undocumented_unsafe_blocks,
256 reason = "Force documented unsafe blocks in Starnix"
257 )]
258 let unread_address = unsafe { f(dest as *mut u8, source as *const u8, count, ret_dest) };
259
260 let ret_base = if ret_dest { dest } else { source };
261
262 debug_assert!(
263 unread_address >= ret_base,
264 "unread_address={:#x}, ret_base={:#x}",
265 unread_address,
266 ret_base,
267 );
268 let copied = unread_address - ret_base;
269 debug_assert!(
270 copied <= count,
271 "copied={}, count={}; unread_address={:#x}, ret_base={:#x}",
272 copied,
273 count,
274 unread_address,
275 ret_base,
276 );
277 copied
278}
279
280impl Usercopy {
281 pub fn new(restricted_address_range: Range<usize>) -> Result<Self, zx::Status> {
284 let hermetic_copy_addr_range =
285 hermetic_copy as *const () as usize..hermetic_copy_end as *const () as usize;
286
287 let hermetic_copy_until_null_byte_addr_range = hermetic_copy_until_null_byte as *const ()
288 as usize
289 ..hermetic_copy_until_null_byte_end as *const () as usize;
290
291 let hermetic_zero_addr_range =
292 hermetic_zero as *const () as usize..hermetic_zero_end as *const () as usize;
293
294 let atomic_load_relaxed_range = atomic_load_u32_relaxed as *const () as usize
295 ..atomic_load_u32_relaxed_end as *const () as usize;
296
297 let atomic_load_acquire_range = atomic_load_u32_acquire as *const () as usize
298 ..atomic_load_u32_acquire_end as *const () as usize;
299
300 let atomic_store_relaxed_range = atomic_store_u32_relaxed as *const () as usize
301 ..atomic_store_u32_relaxed_end as *const () as usize;
302
303 let atomic_store_release_range = atomic_store_u32_release as *const () as usize
304 ..atomic_store_u32_release_end as *const () as usize;
305
306 let atomic_compare_exchange_range = atomic_compare_exchange_u32_acq_rel as *const ()
307 as usize
308 ..atomic_compare_exchange_u32_acq_rel_end as *const () as usize;
309
310 let atomic_compare_exchange_weak_range = atomic_compare_exchange_weak_u32_acq_rel
311 as *const () as usize
312 ..atomic_compare_exchange_weak_u32_acq_rel_end as *const () as usize;
313
314 let (tx, rx) = std::sync::mpsc::channel::<zx::Status>();
315
316 let shutdown_event = zx::Event::create();
317 let shutdown_event_clone =
318 shutdown_event.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap();
319
320 let faultable_addresses = restricted_address_range.clone();
321 let join_handle = std::thread::spawn(move || {
322 let exception_channel_result =
323 fuchsia_runtime::job_default().create_exception_channel();
324
325 let exception_channel = match exception_channel_result {
326 Ok(c) => c,
327 Err(e) => {
328 let _ = tx.send(e);
329 return;
330 }
331 };
332
333 let _ = tx.send(zx::Status::OK);
335
336 loop {
338 let mut wait_items = [
339 zx::WaitItem {
340 handle: exception_channel.as_handle_ref(),
341 waitfor: zx::Signals::CHANNEL_READABLE,
342 pending: zx::Signals::empty(),
343 },
344 zx::WaitItem {
345 handle: shutdown_event_clone.as_handle_ref(),
346 waitfor: zx::Signals::USER_0,
347 pending: zx::Signals::empty(),
348 },
349 ];
350 let _ = zx::object_wait_many(&mut wait_items, zx::MonotonicInstant::INFINITE);
351 if wait_items[1].pending == zx::Signals::USER_0 {
352 break;
353 }
354 let mut buf = zx::MessageBuf::new();
355 exception_channel.read(&mut buf).unwrap();
356
357 let excp_info = zx::sys::zx_exception_info_t::read_from_bytes(buf.bytes()).unwrap();
358
359 if excp_info.type_ != zx::sys::ZX_EXCP_FATAL_PAGE_FAULT {
360 continue;
362 }
363
364 let excp = zx::Exception::from_handle(buf.take_handle(0).unwrap());
365 let thread = excp.get_thread().unwrap();
366 let mut regs = thread.read_state_general_regs().unwrap();
367 let report = thread.exception_report().unwrap();
368
369 let (pc, fault_address) = parse_fault_exception(&mut regs, report);
379
380 if !faultable_addresses.contains(&fault_address) {
383 continue;
384 }
385
386 if hermetic_copy_addr_range.contains(&pc)
388 || hermetic_copy_until_null_byte_addr_range.contains(&pc)
389 || hermetic_zero_addr_range.contains(&pc)
390 {
391 set_registers_for_hermetic_error(&mut regs, fault_address);
392 } else if atomic_load_relaxed_range.contains(&pc)
393 || atomic_load_acquire_range.contains(&pc)
394 || atomic_store_relaxed_range.contains(&pc)
395 || atomic_store_release_range.contains(&pc)
396 || atomic_compare_exchange_range.contains(&pc)
397 || atomic_compare_exchange_weak_range.contains(&pc)
398 {
399 set_registers_for_atomic_error(&mut regs);
400 } else {
401 continue;
402 }
403
404 thread.write_state_general_regs(regs).unwrap();
405 excp.set_exception_state(&zx::sys::ZX_EXCEPTION_STATE_HANDLED).unwrap();
406 }
407 });
408
409 match rx.recv().unwrap() {
410 zx::Status::OK => {}
411 s => {
412 return Err(s);
413 }
414 };
415
416 Ok(Self { shutdown_event, join_handle: Some(join_handle), restricted_address_range })
417 }
418
419 pub unsafe fn raw_hermetic_copy(
428 &self,
429 dest: *mut u8,
430 source: *const u8,
431 count: usize,
432 ret_dest: bool,
433 ) -> usize {
434 #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
435 unsafe {
436 do_hermetic_copy(hermetic_copy, dest as usize, source as usize, count, ret_dest)
437 }
438 }
439
440 pub fn zero(&self, dest_addr: usize, count: usize) -> usize {
444 if dest_addr == 0 || !self.restricted_address_range.contains(&dest_addr) {
448 return 0;
449 }
450
451 #[allow(
452 clippy::undocumented_unsafe_blocks,
453 reason = "Force documented unsafe blocks in Starnix"
454 )]
455 let unset_address = unsafe { hermetic_zero(dest_addr as *mut u8, count) };
456 debug_assert!(
457 unset_address >= dest_addr,
458 "unset_address={:#x}, dest_addr={:#x}",
459 unset_address,
460 dest_addr,
461 );
462 let bytes_set = unset_address - dest_addr;
463 debug_assert!(
464 bytes_set <= count,
465 "bytes_set={}, count={}; unset_address={:#x}, dest_addr={:#x}",
466 bytes_set,
467 count,
468 unset_address,
469 dest_addr,
470 );
471 bytes_set
472 }
473
474 pub fn copyout(&self, source: &[u8], dest_addr: usize) -> usize {
478 if dest_addr == 0 || !self.restricted_address_range.contains(&dest_addr) {
482 return 0;
483 }
484
485 unsafe {
488 do_hermetic_copy(hermetic_copy, dest_addr, source.as_ptr() as usize, source.len(), true)
489 }
490 }
491
492 pub fn copyin<'a>(
500 &self,
501 source_addr: usize,
502 dest: &'a mut [MaybeUninit<u8>],
503 ) -> (&'a mut [u8], &'a mut [MaybeUninit<u8>]) {
504 let read_count =
508 if source_addr == 0 || !self.restricted_address_range.contains(&source_addr) {
509 0
510 } else {
511 unsafe {
514 do_hermetic_copy(
515 hermetic_copy,
516 dest.as_ptr() as usize,
517 source_addr,
518 dest.len(),
519 false,
520 )
521 }
522 };
523
524 unsafe { assume_initialized_until(dest, read_count) }
526 }
527
528 pub fn copyin_until_null_byte<'a>(
538 &self,
539 source_addr: usize,
540 dest: &'a mut [MaybeUninit<u8>],
541 ) -> (&'a mut [u8], &'a mut [MaybeUninit<u8>]) {
542 let read_count =
546 if source_addr == 0 || !self.restricted_address_range.contains(&source_addr) {
547 0
548 } else {
549 unsafe {
552 do_hermetic_copy(
553 hermetic_copy_until_null_byte,
554 dest.as_ptr() as usize,
555 source_addr,
556 dest.len(),
557 false,
558 )
559 }
560 };
561
562 unsafe { assume_initialized_until(dest, read_count) }
564 }
565
566 #[inline]
567 fn atomic_load_u32(
568 &self,
569 load_fn: unsafe extern "C" fn(usize) -> u64,
570 addr: usize,
571 ) -> Result<u32, ()> {
572 #[allow(
573 clippy::undocumented_unsafe_blocks,
574 reason = "Force documented unsafe blocks in Starnix"
575 )]
576 let value_or_error = unsafe { load_fn(addr) };
577 if value_or_error & ATOMIC_ERROR_MASK == 0 { Ok(value_or_error as u32) } else { Err(()) }
578 }
579
580 pub fn atomic_load_u32_relaxed(&self, addr: usize) -> Result<u32, ()> {
583 self.atomic_load_u32(atomic_load_u32_relaxed, addr)
584 }
585
586 pub fn atomic_load_u32_acquire(&self, addr: usize) -> Result<u32, ()> {
589 self.atomic_load_u32(atomic_load_u32_acquire, addr)
590 }
591
592 fn atomic_store_u32(
593 &self,
594 store_fn: unsafe extern "C" fn(usize, u32) -> u64,
595 addr: usize,
596 value: u32,
597 ) -> Result<(), ()> {
598 #[allow(
599 clippy::undocumented_unsafe_blocks,
600 reason = "Force documented unsafe blocks in Starnix"
601 )]
602 match unsafe { store_fn(addr, value) } {
603 0 => Ok(()),
604 _ => Err(()),
605 }
606 }
607
608 pub fn atomic_store_u32_relaxed(&self, addr: usize, value: u32) -> Result<(), ()> {
611 self.atomic_store_u32(atomic_store_u32_relaxed, addr, value)
612 }
613
614 pub fn atomic_store_u32_release(&self, addr: usize, value: u32) -> Result<(), ()> {
617 self.atomic_store_u32(atomic_store_u32_release, addr, value)
618 }
619
620 pub fn atomic_compare_exchange_u32_acq_rel(
623 &self,
624 addr: usize,
625 expected: u32,
626 desired: u32,
627 ) -> Result<Result<u32, u32>, ()> {
628 let mut expected = expected;
629 #[allow(
630 clippy::undocumented_unsafe_blocks,
631 reason = "Force documented unsafe blocks in Starnix"
632 )]
633 let value_or_error = unsafe {
634 atomic_compare_exchange_u32_acq_rel(addr, &mut expected as *mut u32, desired)
635 };
636 Self::parse_compare_exchange_result(expected, value_or_error)
637 }
638
639 pub fn atomic_compare_exchange_weak_u32_acq_rel(
642 &self,
643 addr: usize,
644 expected: u32,
645 desired: u32,
646 ) -> Result<Result<u32, u32>, ()> {
647 let mut expected = expected;
648 #[allow(
649 clippy::undocumented_unsafe_blocks,
650 reason = "Force documented unsafe blocks in Starnix"
651 )]
652 let value_or_error = unsafe {
653 atomic_compare_exchange_weak_u32_acq_rel(addr, &mut expected as *mut u32, desired)
654 };
655 Self::parse_compare_exchange_result(expected, value_or_error)
656 }
657
658 fn parse_compare_exchange_result(
659 expected: u32,
660 value_or_error: u64,
661 ) -> Result<Result<u32, u32>, ()> {
662 match value_or_error {
663 0 => Ok(Err(expected)),
664 1 => Ok(Ok(expected)),
665 _ => Err(()),
666 }
667 }
668}
669
670impl Drop for Usercopy {
671 fn drop(&mut self) {
672 self.shutdown_event.signal_handle(zx::Signals::empty(), zx::Signals::USER_0).unwrap();
673 self.join_handle.take().unwrap().join().unwrap();
674 }
675}
676
677#[cfg(test)]
678mod test {
679 #![allow(
680 clippy::undocumented_unsafe_blocks,
681 reason = "Force documented unsafe blocks in Starnix"
682 )]
683 use super::*;
684
685 use test_case::test_case;
686
687 impl Usercopy {
688 fn new_for_test(restricted_address_range: Range<usize>) -> Self {
689 Self::new(restricted_address_range).unwrap()
690 }
691 }
692
693 #[test_case(0, 0)]
694 #[test_case(1, 1)]
695 #[test_case(7, 2)]
696 #[test_case(8, 3)]
697 #[test_case(9, 4)]
698 #[test_case(128, 5)]
699 #[test_case(zx::system_get_page_size() as usize - 1, 6)]
700 #[test_case(zx::system_get_page_size() as usize, 7)]
701 #[::fuchsia::test]
702 fn zero_no_fault(zero_len: usize, ch: u8) {
703 let page_size = zx::system_get_page_size() as usize;
704
705 let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
706
707 let root_vmar = fuchsia_runtime::vmar_root_self();
708
709 let mapped_addr = root_vmar
710 .map(0, &dest_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
711 .unwrap();
712 let mapped_bytes =
713 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, page_size) };
714 mapped_bytes.fill(ch);
715
716 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
717
718 let result = usercopy.zero(mapped_addr, zero_len);
719 assert_eq!(result, zero_len);
720
721 assert_eq!(&mapped_bytes[..zero_len], &vec![0; zero_len]);
722 assert_eq!(&mapped_bytes[zero_len..], &vec![ch; page_size - zero_len]);
723 }
724
725 #[test_case(1, 2, 0)]
726 #[test_case(1, 4, 1)]
727 #[test_case(1, 8, 2)]
728 #[test_case(1, 16, 3)]
729 #[test_case(1, 32, 4)]
730 #[test_case(1, 64, 5)]
731 #[test_case(1, 128, 6)]
732 #[test_case(1, 256, 7)]
733 #[test_case(1, 512, 8)]
734 #[test_case(1, 1024, 9)]
735 #[test_case(32, 64, 10)]
736 #[test_case(32, 128, 11)]
737 #[test_case(32, 256, 12)]
738 #[test_case(32, 512, 13)]
739 #[test_case(32, 1024, 14)]
740 #[::fuchsia::test]
741 fn zero_fault(offset: usize, zero_len: usize, ch: u8) {
742 let page_size = zx::system_get_page_size() as usize;
743
744 let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
745
746 let root_vmar = fuchsia_runtime::vmar_root_self();
747
748 let mapped_addr = root_vmar
749 .map(
750 0,
751 &dest_vmo,
752 0,
753 page_size * 2,
754 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
755 )
756 .unwrap();
757 let mapped_bytes =
758 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, page_size) };
759 mapped_bytes.fill(ch);
760
761 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
762
763 let dest_addr = mapped_addr + page_size - offset;
764
765 let result = usercopy.zero(dest_addr, zero_len);
766 assert_eq!(result, offset);
767
768 assert_eq!(&mapped_bytes[page_size - offset..], &vec![0; offset][..]);
769 assert_eq!(&mapped_bytes[..page_size - offset], &vec![ch; page_size - offset][..]);
770 }
771
772 #[test_case(0)]
773 #[test_case(1)]
774 #[test_case(7)]
775 #[test_case(8)]
776 #[test_case(9)]
777 #[test_case(128)]
778 #[test_case(zx::system_get_page_size() as usize - 1)]
779 #[test_case(zx::system_get_page_size() as usize)]
780 #[::fuchsia::test]
781 fn copyout_no_fault(buf_len: usize) {
782 let page_size = zx::system_get_page_size() as usize;
783
784 let source = vec!['a' as u8; buf_len];
785
786 let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
787
788 let root_vmar = fuchsia_runtime::vmar_root_self();
789
790 let mapped_addr = root_vmar
791 .map(0, &dest_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
792 .unwrap();
793
794 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
795
796 let result = usercopy.copyout(&source, mapped_addr);
797 assert_eq!(result, buf_len);
798
799 assert_eq!(
800 unsafe { std::slice::from_raw_parts(mapped_addr as *const u8, buf_len) },
801 &vec!['a' as u8; buf_len]
802 );
803 }
804
805 #[test_case(1, 2)]
806 #[test_case(1, 4)]
807 #[test_case(1, 8)]
808 #[test_case(1, 16)]
809 #[test_case(1, 32)]
810 #[test_case(1, 64)]
811 #[test_case(1, 128)]
812 #[test_case(1, 256)]
813 #[test_case(1, 512)]
814 #[test_case(1, 1024)]
815 #[test_case(32, 64)]
816 #[test_case(32, 128)]
817 #[test_case(32, 256)]
818 #[test_case(32, 512)]
819 #[test_case(32, 1024)]
820 #[::fuchsia::test]
821 fn copyout_fault(offset: usize, buf_len: usize) {
822 let page_size = zx::system_get_page_size() as usize;
823
824 let source = vec!['a' as u8; buf_len];
825
826 let dest_vmo = zx::Vmo::create(page_size as u64).unwrap();
827
828 let root_vmar = fuchsia_runtime::vmar_root_self();
829
830 let mapped_addr = root_vmar
831 .map(
832 0,
833 &dest_vmo,
834 0,
835 page_size * 2,
836 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
837 )
838 .unwrap();
839
840 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
841
842 let dest_addr = mapped_addr + page_size - offset;
843
844 let result = usercopy.copyout(&source, dest_addr);
845
846 assert_eq!(result, offset);
847
848 assert_eq!(
849 unsafe { std::slice::from_raw_parts(dest_addr as *const u8, offset) },
850 &vec!['a' as u8; offset][..],
851 );
852 }
853
854 #[test_case(0)]
855 #[test_case(1)]
856 #[test_case(7)]
857 #[test_case(8)]
858 #[test_case(9)]
859 #[test_case(128)]
860 #[test_case(zx::system_get_page_size() as usize - 1)]
861 #[test_case(zx::system_get_page_size() as usize)]
862 #[::fuchsia::test]
863 fn copyin_no_fault(buf_len: usize) {
864 let page_size = zx::system_get_page_size() as usize;
865
866 let mut dest = Vec::with_capacity(buf_len);
867
868 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
869
870 let root_vmar = fuchsia_runtime::vmar_root_self();
871
872 let mapped_addr = root_vmar
873 .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
874 .unwrap();
875
876 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, buf_len) }.fill('a' as u8);
877
878 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
879 let dest_as_mut_ptr = dest.as_mut_ptr();
880 let (read_bytes, unread_bytes) = usercopy.copyin(mapped_addr, dest.spare_capacity_mut());
881 let expected = vec!['a' as u8; buf_len];
882 assert_eq!(read_bytes, &expected);
883 assert_eq!(unread_bytes.len(), 0);
884 assert_eq!(read_bytes.as_mut_ptr(), dest_as_mut_ptr);
885
886 unsafe { dest.set_len(buf_len) }
888 assert_eq!(dest, expected);
889 }
890
891 #[test_case(1, 2)]
892 #[test_case(1, 4)]
893 #[test_case(1, 8)]
894 #[test_case(1, 16)]
895 #[test_case(1, 32)]
896 #[test_case(1, 64)]
897 #[test_case(1, 128)]
898 #[test_case(1, 256)]
899 #[test_case(1, 512)]
900 #[test_case(1, 1024)]
901 #[test_case(32, 64)]
902 #[test_case(32, 128)]
903 #[test_case(32, 256)]
904 #[test_case(32, 512)]
905 #[test_case(32, 1024)]
906 #[::fuchsia::test]
907 fn copyin_fault(offset: usize, buf_len: usize) {
908 let page_size = zx::system_get_page_size() as usize;
909
910 let mut dest = vec![0u8; buf_len];
911
912 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
913
914 let root_vmar = fuchsia_runtime::vmar_root_self();
915
916 let mapped_addr = root_vmar
917 .map(
918 0,
919 &source_vmo,
920 0,
921 page_size * 2,
922 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
923 )
924 .unwrap();
925
926 let source_addr = mapped_addr + page_size - offset;
927
928 unsafe { std::slice::from_raw_parts_mut(source_addr as *mut u8, offset) }.fill('a' as u8);
929
930 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
931
932 let (read_bytes, unread_bytes) =
933 usercopy.copyin(source_addr, slice_to_maybe_uninit_mut(&mut dest));
934 let expected_copied = vec!['a' as u8; offset];
935 let expected_uncopied = vec![0 as u8; buf_len - offset];
936 assert_eq!(read_bytes, &expected_copied);
937 assert_eq!(unread_bytes.len(), expected_uncopied.len());
938
939 assert_eq!(&dest[0..offset], &expected_copied);
940 assert_eq!(&dest[offset..], &expected_uncopied);
941 }
942
943 #[test_case(0)]
944 #[test_case(1)]
945 #[test_case(7)]
946 #[test_case(8)]
947 #[test_case(9)]
948 #[test_case(128)]
949 #[test_case(zx::system_get_page_size() as usize - 1)]
950 #[test_case(zx::system_get_page_size() as usize)]
951 #[::fuchsia::test]
952 fn copyin_until_null_byte_no_fault(buf_len: usize) {
953 let page_size = zx::system_get_page_size() as usize;
954
955 let mut dest = Vec::with_capacity(buf_len);
956
957 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
958
959 let root_vmar = fuchsia_runtime::vmar_root_self();
960
961 let mapped_addr = root_vmar
962 .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
963 .unwrap();
964
965 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, buf_len) }.fill('a' as u8);
966
967 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
968
969 let dest_as_mut_ptr = dest.as_mut_ptr();
970 let (read_bytes, unread_bytes) =
971 usercopy.copyin_until_null_byte(mapped_addr, dest.spare_capacity_mut());
972 let expected = vec!['a' as u8; buf_len];
973 assert_eq!(read_bytes, &expected);
974 assert_eq!(unread_bytes.len(), 0);
975 assert_eq!(read_bytes.as_mut_ptr(), dest_as_mut_ptr);
976
977 unsafe { dest.set_len(dest.capacity()) }
979 assert_eq!(dest, expected);
980 }
981
982 #[test_case(1, 2)]
983 #[test_case(1, 4)]
984 #[test_case(1, 8)]
985 #[test_case(1, 16)]
986 #[test_case(1, 32)]
987 #[test_case(1, 64)]
988 #[test_case(1, 128)]
989 #[test_case(1, 256)]
990 #[test_case(1, 512)]
991 #[test_case(1, 1024)]
992 #[test_case(32, 64)]
993 #[test_case(32, 128)]
994 #[test_case(32, 256)]
995 #[test_case(32, 512)]
996 #[test_case(32, 1024)]
997 #[::fuchsia::test]
998 fn copyin_until_null_byte_fault(offset: usize, buf_len: usize) {
999 let page_size = zx::system_get_page_size() as usize;
1000
1001 let mut dest = vec![0u8; buf_len];
1002
1003 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
1004
1005 let root_vmar = fuchsia_runtime::vmar_root_self();
1006
1007 let mapped_addr = root_vmar
1008 .map(
1009 0,
1010 &source_vmo,
1011 0,
1012 page_size * 2,
1013 zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
1014 )
1015 .unwrap();
1016
1017 let source_addr = mapped_addr + page_size - offset;
1018
1019 unsafe { std::slice::from_raw_parts_mut(source_addr as *mut u8, offset) }.fill('a' as u8);
1020
1021 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size * 2);
1022
1023 let (read_bytes, unread_bytes) =
1024 usercopy.copyin_until_null_byte(source_addr, slice_to_maybe_uninit_mut(&mut dest));
1025 let expected_copied = vec!['a' as u8; offset];
1026 let expected_uncopied = vec![0 as u8; buf_len - offset];
1027 assert_eq!(read_bytes, &expected_copied);
1028 assert_eq!(unread_bytes.len(), expected_uncopied.len());
1029
1030 assert_eq!(&dest[0..offset], &expected_copied);
1031 assert_eq!(&dest[offset..], &expected_uncopied);
1032 }
1033
1034 #[test_case(0)]
1035 #[test_case(1)]
1036 #[test_case(2)]
1037 #[test_case(126)]
1038 #[test_case(127)]
1039 #[::fuchsia::test]
1040 fn copyin_until_null_byte_no_fault_with_zero(zero_idx: usize) {
1041 const DEST_LEN: usize = 128;
1042
1043 let page_size = zx::system_get_page_size() as usize;
1044
1045 let mut dest = vec!['b' as u8; DEST_LEN];
1046
1047 let source_vmo = zx::Vmo::create(page_size as u64).unwrap();
1048
1049 let root_vmar = fuchsia_runtime::vmar_root_self();
1050
1051 let mapped_addr = root_vmar
1052 .map(0, &source_vmo, 0, page_size, zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE)
1053 .unwrap();
1054
1055 {
1056 let slice =
1057 unsafe { std::slice::from_raw_parts_mut(mapped_addr as *mut u8, dest.len()) };
1058 slice.fill('a' as u8);
1059 slice[zero_idx] = 0;
1060 };
1061
1062 let usercopy = Usercopy::new_for_test(mapped_addr..mapped_addr + page_size);
1063
1064 let (read_bytes, unread_bytes) =
1065 usercopy.copyin_until_null_byte(mapped_addr, slice_to_maybe_uninit_mut(&mut dest));
1066 let expected_copied_non_zero_bytes = vec!['a' as u8; zero_idx];
1067 let expected_uncopied = vec!['b' as u8; DEST_LEN - zero_idx - 1];
1068 assert_eq!(&read_bytes[..zero_idx], &expected_copied_non_zero_bytes);
1069 assert_eq!(&read_bytes[zero_idx..], &[0]);
1070 assert_eq!(unread_bytes.len(), expected_uncopied.len());
1071
1072 assert_eq!(&dest[..zero_idx], &expected_copied_non_zero_bytes);
1073 assert_eq!(dest[zero_idx], 0);
1074 assert_eq!(&dest[zero_idx + 1..], &expected_uncopied);
1075 }
1076
1077 #[test_case(0..1, 0)]
1078 #[test_case(0..1, 1)]
1079 #[test_case(0..1, 2)]
1080 #[test_case(5..10, 0)]
1081 #[test_case(5..10, 1)]
1082 #[test_case(5..10, 2)]
1083 #[test_case(5..10, 5)]
1084 #[test_case(5..10, 7)]
1085 #[test_case(5..10, 10)]
1086 #[::fuchsia::test]
1087 fn starting_fault_address_copyin_until_null_byte(range: Range<usize>, addr: usize) {
1088 let usercopy = Usercopy::new_for_test(range);
1089
1090 let mut dest = vec![0u8];
1091
1092 let (read_bytes, unread_bytes) =
1093 usercopy.copyin_until_null_byte(addr, slice_to_maybe_uninit_mut(&mut dest));
1094 assert_eq!(read_bytes, &[] as &[u8]);
1095 assert_eq!(unread_bytes.len(), dest.len());
1096 assert_eq!(dest, [0]);
1097 }
1098
1099 #[test_case(0..1, 0)]
1100 #[test_case(0..1, 1)]
1101 #[test_case(0..1, 2)]
1102 #[test_case(5..10, 0)]
1103 #[test_case(5..10, 1)]
1104 #[test_case(5..10, 2)]
1105 #[test_case(5..10, 5)]
1106 #[test_case(5..10, 7)]
1107 #[test_case(5..10, 10)]
1108 #[::fuchsia::test]
1109 fn starting_fault_address_copyin(range: Range<usize>, addr: usize) {
1110 let usercopy = Usercopy::new_for_test(range);
1111
1112 let mut dest = vec![0u8];
1113
1114 let (read_bytes, unread_bytes) =
1115 usercopy.copyin(addr, slice_to_maybe_uninit_mut(&mut dest));
1116 assert_eq!(read_bytes, &[] as &[u8]);
1117 assert_eq!(unread_bytes.len(), dest.len());
1118 assert_eq!(dest, [0]);
1119 }
1120
1121 #[test_case(0..1, 0)]
1122 #[test_case(0..1, 1)]
1123 #[test_case(0..1, 2)]
1124 #[test_case(5..10, 0)]
1125 #[test_case(5..10, 1)]
1126 #[test_case(5..10, 2)]
1127 #[test_case(5..10, 5)]
1128 #[test_case(5..10, 7)]
1129 #[test_case(5..10, 10)]
1130 #[::fuchsia::test]
1131 fn starting_fault_address_copyout(range: Range<usize>, addr: usize) {
1132 let usercopy = Usercopy::new_for_test(range);
1133
1134 let source = vec![0u8];
1135
1136 let result = usercopy.copyout(&source, addr);
1137 assert_eq!(result, 0);
1138 assert_eq!(source, [0]);
1139 }
1140 struct MappedPageUsercopy {
1141 usercopy: Usercopy,
1142 addr: usize,
1143 }
1144
1145 impl MappedPageUsercopy {
1146 fn new(flags: zx::VmarFlags) -> Self {
1147 let page_size = zx::system_get_page_size() as usize;
1148
1149 let vmo = zx::Vmo::create(page_size as u64).unwrap();
1150
1151 let root_vmar = fuchsia_runtime::vmar_root_self();
1152
1153 let addr = root_vmar.map(0, &vmo, 0, page_size, flags).unwrap();
1154
1155 let usercopy = Usercopy::new_for_test(addr..addr + page_size);
1156 Self { usercopy, addr }
1157 }
1158 }
1159
1160 impl std::ops::Drop for MappedPageUsercopy {
1161 fn drop(&mut self) {
1162 let page_size = zx::system_get_page_size() as usize;
1163
1164 unsafe { fuchsia_runtime::vmar_root_self().unmap(self.addr, page_size) }.unwrap();
1165 }
1166 }
1167
1168 #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_relaxed(mapped_addr); "relaxed")]
1169 #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_acquire(mapped_addr); "acquire")]
1170 #[::fuchsia::test]
1171 fn atomic_load_u32_no_fault(load_fn: fn(&Usercopy, usize) -> Result<u32, ()>) {
1172 let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1173
1174 unsafe { *(m.addr as *mut u32) = 0x12345678 };
1175
1176 let result = load_fn(&m.usercopy, m.addr);
1177
1178 assert_eq!(Ok(0x12345678), result);
1179 }
1180
1181 #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_relaxed(mapped_addr); "relaxed")]
1182 #[test_case(|usercopy, mapped_addr| usercopy.atomic_load_u32_acquire(mapped_addr); "acquire")]
1183 #[::fuchsia::test]
1184 fn atomic_load_u32_fault(load_fn: fn(&Usercopy, usize) -> Result<u32, ()>) {
1185 let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1186
1187 let result = load_fn(&m.usercopy, m.addr);
1188 assert_eq!(Err(()), result);
1189 }
1190
1191 #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_relaxed(mapped_addr, val); "relaxed")]
1192 #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_release(mapped_addr, val); "release")]
1193 #[::fuchsia::test]
1194 fn atomic_store_u32_no_fault(store_fn: fn(&Usercopy, usize, u32) -> Result<(), ()>) {
1195 let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1196
1197 assert_eq!(store_fn(&m.usercopy, m.addr, 0x12345678), Ok(()));
1198
1199 assert_eq!(unsafe { *(m.addr as *mut u32) }, 0x12345678);
1200 }
1201
1202 #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_relaxed(mapped_addr, val); "relaxed")]
1203 #[test_case(|usercopy, mapped_addr, val| usercopy.atomic_store_u32_release(mapped_addr, val); "release")]
1204 #[::fuchsia::test]
1205 fn atomic_store_u32_fault(store_fn: fn(&Usercopy, usize, u32) -> Result<(), ()>) {
1206 let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1207
1208 let result = store_fn(&m.usercopy, m.addr, 0x12345678);
1209 assert_eq!(Err(()), result);
1210
1211 let page_size = zx::system_get_page_size() as usize;
1212 unsafe {
1213 fuchsia_runtime::vmar_root_self().protect(m.addr, page_size, zx::VmarFlags::PERM_READ)
1214 }
1215 .unwrap();
1216
1217 assert_ne!(unsafe { *(m.addr as *mut u32) }, 0x12345678);
1218 }
1219
1220 #[::fuchsia::test]
1221 fn atomic_compare_exchange_u32_acq_rel_no_fault() {
1222 let m = MappedPageUsercopy::new(zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE);
1223
1224 unsafe { *(m.addr as *mut u32) = 0x12345678 };
1225
1226 assert_eq!(
1227 m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x12345678, 0xffffffff),
1228 Ok(Ok(0x12345678))
1229 );
1230
1231 assert_eq!(unsafe { *(m.addr as *mut u32) }, 0xffffffff);
1232
1233 assert_eq!(
1234 m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x22222222, 0x11111111),
1235 Ok(Err(0xffffffff))
1236 );
1237
1238 assert_eq!(unsafe { *(m.addr as *mut u32) }, 0xffffffff);
1239 }
1240
1241 #[::fuchsia::test]
1242 fn atomic_compare_exchange_u32_acq_rel_fault() {
1243 let m = MappedPageUsercopy::new(zx::VmarFlags::empty());
1244
1245 let result = m.usercopy.atomic_compare_exchange_u32_acq_rel(m.addr, 0x00000000, 0x11111111);
1246 assert_eq!(Err(()), result);
1247
1248 let page_size = zx::system_get_page_size() as usize;
1249 unsafe {
1250 fuchsia_runtime::vmar_root_self().protect(m.addr, page_size, zx::VmarFlags::PERM_READ)
1251 }
1252 .unwrap();
1253
1254 assert_eq!(unsafe { *(m.addr as *mut u32) }, 0x00000000);
1255 }
1256}