1pub use super::signal_handling::sys_restart_syscall;
6use super::signalfd::SignalFd;
7use crate::mm::MemoryAccessorExt;
8use crate::security;
9use crate::signals::{
10 IntoSignalInfoOptions, SI_MAX_SIZE_AS_USIZE, SignalDetail, SignalInfo, UncheckedSignalInfo,
11 restore_from_signal_handler, send_signal,
12};
13use crate::task::{
14 CurrentTask, PidTable, ProcessEntryRef, ProcessSelector, Task, TaskMutableState, ThreadGroup,
15 ThreadGroupLifecycleWaitValue, WaitResult, WaitableChildResult, Waiter,
16};
17use crate::vfs::{FdFlags, FdNumber};
18use starnix_sync::{LockBefore, RwLockReadGuard, ThreadGroupLimits};
19use starnix_uapi::user_address::{ArchSpecific, MultiArchUserRef};
20use starnix_uapi::{tid_t, uapi};
21
22use starnix_logging::track_stub;
23use starnix_sync::{Locked, Unlocked};
24use starnix_syscalls::SyscallResult;
25use starnix_types::time::{duration_from_timespec, timeval_from_duration};
26use starnix_uapi::errors::{EINTR, ETIMEDOUT, Errno, ErrnoResultExt};
27use starnix_uapi::open_flags::OpenFlags;
28use starnix_uapi::signals::{SigSet, Signal, UNBLOCKABLE_SIGNALS, UncheckedSignal};
29use starnix_uapi::user_address::{UserAddress, UserRef};
30use starnix_uapi::{
31 __WALL, __WCLONE, P_ALL, P_PGID, P_PID, P_PIDFD, SFD_CLOEXEC, SFD_NONBLOCK, SI_TKILL,
32 SIG_BLOCK, SIG_SETMASK, SIG_UNBLOCK, SS_AUTODISARM, SS_DISABLE, SS_ONSTACK, WCONTINUED,
33 WEXITED, WNOHANG, WNOWAIT, WSTOPPED, WUNTRACED, errno, error, pid_t, rusage, sigaltstack,
34};
35use static_assertions::const_assert_eq;
36use zerocopy::{FromBytes, Immutable, IntoBytes};
37
38pub type RUsagePtr = MultiArchUserRef<uapi::rusage, uapi::arch32::rusage>;
39type SigAction64Ptr = MultiArchUserRef<uapi::sigaction_t, uapi::arch32::sigaction64_t>;
40type SigActionPtr = MultiArchUserRef<uapi::sigaction_t, uapi::arch32::sigaction_t>;
41
42pub fn sys_rt_sigaction(
57 _locked: &mut Locked<Unlocked>,
58 current_task: &CurrentTask,
59 signum: UncheckedSignal,
60 user_action: SigAction64Ptr,
61 user_old_action: SigAction64Ptr,
62 sigset_size: usize,
63) -> Result<(), Errno> {
64 if user_action.is_arch32() && sigset_size == std::mem::size_of::<uapi::arch32::sigset_t>() {
65 let user_action = SigActionPtr::from_32(user_action.addr().into());
66 let user_old_action = SigActionPtr::from_32(user_old_action.addr().into());
67 return rt_sigaction(current_task, signum, user_action, user_old_action);
68 }
69
70 if sigset_size != std::mem::size_of::<uapi::sigset_t>() {
71 return error!(EINVAL);
72 }
73 rt_sigaction(current_task, signum, user_action, user_old_action)
74}
75
76fn rt_sigaction<Arch32SigAction>(
77 current_task: &CurrentTask,
78 signum: UncheckedSignal,
79 user_action: MultiArchUserRef<uapi::sigaction_t, Arch32SigAction>,
80 user_old_action: MultiArchUserRef<uapi::sigaction_t, Arch32SigAction>,
81) -> Result<(), Errno>
82where
83 Arch32SigAction:
84 IntoBytes + FromBytes + Immutable + TryFrom<uapi::sigaction_t> + TryInto<uapi::sigaction_t>,
85{
86 let signal = Signal::try_from(signum)?;
87
88 let new_signal_action = if !user_action.is_null() {
89 if signal.is_unblockable() {
93 return error!(EINVAL);
94 }
95
96 let signal_action = current_task.read_multi_arch_object(user_action)?;
97 Some(signal_action)
98 } else {
99 None
100 };
101
102 let signal_actions = ¤t_task.thread_group().signal_actions;
103 let old_action = if let Some(new_signal_action) = new_signal_action {
104 signal_actions.set(signal, new_signal_action)
105 } else {
106 signal_actions.get(signal)
107 };
108
109 if !user_old_action.is_null() {
110 current_task.write_multi_arch_object(user_old_action, old_action)?;
111 }
112
113 Ok(())
114}
115
116pub fn sys_rt_sigpending(
126 _locked: &mut Locked<Unlocked>,
127 current_task: &CurrentTask,
128 set: UserRef<SigSet>,
129 sigset_size: usize,
130) -> Result<(), Errno> {
131 if sigset_size != std::mem::size_of::<SigSet>() {
132 return error!(EINVAL);
133 }
134
135 let signals = current_task.read().pending_signals();
136 current_task.write_object(set, &signals)?;
137 Ok(())
138}
139
140pub fn sys_rt_sigprocmask(
153 _locked: &mut Locked<Unlocked>,
154 current_task: &CurrentTask,
155 how: u32,
156 user_set: UserRef<SigSet>,
157 user_old_set: UserRef<SigSet>,
158 sigset_size: usize,
159) -> Result<(), Errno> {
160 if sigset_size != std::mem::size_of::<SigSet>() {
161 return error!(EINVAL);
162 }
163 match how {
164 SIG_BLOCK | SIG_UNBLOCK | SIG_SETMASK => (),
165 _ => return error!(EINVAL),
166 };
167
168 let mut new_mask = SigSet::default();
171 if !user_set.is_null() {
172 new_mask = current_task.read_object(user_set)?;
173 }
174
175 let mut state = current_task.write();
176 let signal_mask = state.signal_mask();
177 if !user_old_set.is_null() {
179 current_task.write_object(user_old_set, &signal_mask)?;
180 }
181
182 if user_set.is_null() {
184 return Ok(());
185 }
186
187 let signal_mask = match how {
188 SIG_BLOCK => signal_mask | new_mask,
189 SIG_UNBLOCK => signal_mask & !new_mask,
190 SIG_SETMASK => new_mask,
191 _ => return error!(EINVAL),
193 };
194 state.set_signal_mask(signal_mask);
195
196 Ok(())
197}
198
199type SigAltStackPtr = MultiArchUserRef<uapi::sigaltstack, uapi::arch32::sigaltstack>;
200
201pub fn sys_sigaltstack(
210 _locked: &mut Locked<Unlocked>,
211 current_task: &CurrentTask,
212 user_ss: SigAltStackPtr,
213 user_old_ss: SigAltStackPtr,
214) -> Result<(), Errno> {
215 let stack_pointer_register = current_task.thread_state.registers.stack_pointer_register();
216 let mut state = current_task.write();
217 let on_signal_stack = state.on_signal_stack(stack_pointer_register);
218
219 let mut ss = sigaltstack::default();
220 if !user_ss.is_null() {
221 if on_signal_stack {
222 return error!(EPERM);
223 }
224 ss = current_task.read_multi_arch_object(user_ss)?;
225 if (ss.ss_flags & !((SS_AUTODISARM | SS_DISABLE) as i32)) != 0 {
226 return error!(EINVAL);
227 }
228 let min_stack_size =
229 if current_task.is_arch32() { uapi::arch32::MINSIGSTKSZ } else { uapi::MINSIGSTKSZ };
230 if ss.ss_flags & (SS_DISABLE as i32) == 0 && ss.ss_size < min_stack_size as u64 {
231 return error!(ENOMEM);
232 }
233 }
234
235 if !user_old_ss.is_null() {
236 let mut old_ss = match state.sigaltstack() {
237 Some(old_ss) => old_ss,
238 None => sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() },
239 };
240 if on_signal_stack {
241 old_ss.ss_flags = SS_ONSTACK as i32;
242 }
243 current_task.write_multi_arch_object(user_old_ss, old_ss)?;
244 }
245
246 if !user_ss.is_null() {
247 if ss.ss_flags & (SS_DISABLE as i32) != 0 {
248 state.set_sigaltstack(None);
249 } else {
250 state.set_sigaltstack(Some(ss));
251 }
252 }
253
254 Ok(())
255}
256
257pub fn sys_rt_sigsuspend(
269 locked: &mut Locked<Unlocked>,
270 current_task: &mut CurrentTask,
271 user_mask: UserRef<SigSet>,
272 sigset_size: usize,
273) -> Result<(), Errno> {
274 if sigset_size != std::mem::size_of::<SigSet>() {
275 return error!(EINVAL);
276 }
277 let mask = current_task.read_object(user_mask)?;
278
279 let waiter = Waiter::new();
280 current_task
284 .wait_with_temporary_mask(locked, mask, |locked, current_task| {
285 waiter.wait(locked, current_task)
286 })
287 .map_eintr(|| errno!(ERESTARTNOHAND))
288}
289
290pub fn sys_rt_sigtimedwait(
303 locked: &mut Locked<Unlocked>,
304 current_task: &mut CurrentTask,
305 set_addr: UserRef<SigSet>,
306 siginfo_addr: MultiArchUserRef<uapi::siginfo_t, uapi::arch32::siginfo_t>,
307 timeout_addr: MultiArchUserRef<uapi::timespec, uapi::arch32::timespec>,
308 sigset_size: usize,
309) -> Result<Signal, Errno> {
310 if sigset_size != std::mem::size_of::<SigSet>() {
311 return error!(EINVAL);
312 }
313
314 let set = current_task.read_object(set_addr)?;
316 let unblock = set & !UNBLOCKABLE_SIGNALS;
318 let deadline = if timeout_addr.is_null() {
319 zx::MonotonicInstant::INFINITE
320 } else {
321 let timeout = current_task.read_multi_arch_object(timeout_addr)?;
322 zx::MonotonicInstant::after(duration_from_timespec(timeout)?)
323 };
324
325 let signal_info = loop {
326 let waiter;
327
328 {
329 let mut task_state = current_task.write();
330 if let Some(signal) = task_state.take_signal_with_mask(!unblock) {
333 break signal;
334 }
335
336 waiter = Waiter::new();
337 task_state.wait_on_signal(&waiter);
338 }
339
340 let tmp_mask = current_task.read().signal_mask() & !unblock;
343
344 let waiter_result =
346 current_task.wait_with_temporary_mask(locked, tmp_mask, |locked, current_task| {
347 waiter.wait_until(locked, current_task, deadline)
348 });
349
350 current_task.write().restore_signal_mask();
352
353 if let Err(e) = waiter_result {
354 if e == EINTR {
355 if let Some(signal) = current_task.write().take_signal_with_mask(!unblock) {
357 break signal;
358 }
359 } else if e == ETIMEDOUT {
360 return error!(EAGAIN);
361 }
362
363 return Err(e);
364 }
365 };
366
367 if !siginfo_addr.is_null() {
368 signal_info.write(current_task, siginfo_addr)?;
369 }
370
371 Ok(signal_info.signal)
372}
373
374pub fn sys_signalfd4(
388 locked: &mut Locked<Unlocked>,
389 current_task: &CurrentTask,
390 fd: FdNumber,
391 mask_addr: UserRef<SigSet>,
392 mask_size: usize,
393 flags: u32,
394) -> Result<FdNumber, Errno> {
395 if flags & !(SFD_CLOEXEC | SFD_NONBLOCK) != 0 {
396 return error!(EINVAL);
397 }
398 if mask_size != std::mem::size_of::<SigSet>() {
399 return error!(EINVAL);
400 }
401 let mask = current_task.read_object(mask_addr)?;
402
403 if fd.raw() != -1 {
404 let file = current_task.files.get(fd)?;
405 let file = file.downcast_file::<SignalFd>().ok_or_else(|| errno!(EINVAL))?;
406 file.set_mask(mask);
407 Ok(fd)
408 } else {
409 let signalfd = SignalFd::new_file(locked, current_task, mask, flags);
410 let flags = if flags & SFD_CLOEXEC != 0 { FdFlags::CLOEXEC } else { FdFlags::empty() };
411 let fd = current_task.add_file(locked, signalfd, flags)?;
412 Ok(fd)
413 }
414}
415
416#[track_caller]
417fn send_unchecked_signal<L>(
418 locked: &mut Locked<L>,
419 current_task: &CurrentTask,
420 target: &Task,
421 unchecked_signal: UncheckedSignal,
422 si_code: i32,
423) -> Result<(), Errno>
424where
425 L: LockBefore<ThreadGroupLimits>,
426{
427 current_task.can_signal(&target, unchecked_signal)?;
428
429 if unchecked_signal.is_zero() {
431 return Ok(());
432 }
433
434 let signal = Signal::try_from(unchecked_signal)?;
435 security::check_signal_access(current_task, &target, signal)?;
436
437 send_signal(
438 locked,
439 target,
440 SignalInfo::with_sender(
441 signal,
442 si_code,
443 SignalDetail::Kill {
444 pid: current_task.thread_group().leader,
445 uid: current_task.current_creds().uid,
446 },
447 Some(current_task.weak_self.clone()),
448 ),
449 )
450}
451
452#[track_caller]
453fn send_unchecked_signal_info<L>(
454 locked: &mut Locked<L>,
455 current_task: &CurrentTask,
456 target: &Task,
457 unchecked_signal: UncheckedSignal,
458 siginfo_ref: UserAddress,
459) -> Result<(), Errno>
460where
461 L: LockBefore<ThreadGroupLimits>,
462{
463 current_task.can_signal(&target, unchecked_signal)?;
464
465 if unchecked_signal.is_zero() {
467 current_task.read_memory_to_array::<SI_MAX_SIZE_AS_USIZE>(siginfo_ref)?;
469 return Ok(());
470 }
471
472 let signal = Signal::try_from(unchecked_signal)?;
473 security::check_signal_access(current_task, &target, signal)?;
474
475 let siginfo = UncheckedSignalInfo::read_from_siginfo(current_task, siginfo_ref)?;
476 if target.get_pid() != current_task.get_pid()
477 && (siginfo.code() >= 0 || siginfo.code() == SI_TKILL)
478 {
479 return error!(EINVAL);
480 }
481
482 send_signal(locked, &target, siginfo.into_signal_info(signal, IntoSignalInfoOptions::None)?)
483}
484
485pub fn sys_kill(
494 locked: &mut Locked<Unlocked>,
495 current_task: &CurrentTask,
496 pid: pid_t,
497 unchecked_signal: UncheckedSignal,
498) -> Result<(), Errno> {
499 let pids = current_task.kernel().pids.read();
500 match pid {
501 pid if pid > 0 => {
502 let target_thread_group = {
505 match pids.get_process(pid) {
506 Some(ProcessEntryRef::Process(process)) => process,
507
508 Some(ProcessEntryRef::Zombie(_zombie)) => return Ok(()),
510
511 None => {
514 let weak_task = pids.get_task(pid);
515 let task = Task::from_weak(&weak_task)?;
516 task.thread_group().clone()
517 }
518 }
519 };
520
521 target_thread_group.send_signal_unchecked(current_task, unchecked_signal)?;
522 }
523 pid if pid == -1 => {
524 let thread_groups = pids.get_thread_groups();
533 signal_thread_groups(
534 current_task,
535 unchecked_signal,
536 thread_groups.into_iter().filter(|thread_group| {
537 if *current_task.thread_group() == *thread_group {
538 return false;
539 }
540 if thread_group.leader == 1 {
541 return false;
542 }
543 true
544 }),
545 )?;
546 }
547 _ => {
548 let process_group_id = match pid {
554 0 => current_task.thread_group().read().process_group.leader,
555 _ => negate_pid(pid)?,
556 };
557
558 let process_group = pids.get_process_group(process_group_id);
559 let thread_groups = process_group
560 .iter()
561 .flat_map(|pg| pg.read(locked).thread_groups().collect::<Vec<_>>());
562 signal_thread_groups(current_task, unchecked_signal, thread_groups)?;
563 }
564 };
565
566 Ok(())
567}
568
569fn verify_tgid_for_task(
570 task: &Task,
571 tgid: pid_t,
572 pids: &RwLockReadGuard<'_, PidTable>,
573) -> Result<(), Errno> {
574 let thread_group = match pids.get_process(tgid) {
575 Some(ProcessEntryRef::Process(proc)) => proc,
576 Some(ProcessEntryRef::Zombie(_)) => return error!(EINVAL),
577 None => return error!(ESRCH),
578 };
579 if *task.thread_group() != thread_group {
580 return error!(EINVAL);
581 } else {
582 Ok(())
583 }
584}
585
586pub fn sys_tkill(
598 locked: &mut Locked<Unlocked>,
599 current_task: &CurrentTask,
600 tid: tid_t,
601 unchecked_signal: UncheckedSignal,
602) -> Result<(), Errno> {
603 if tid <= 0 {
605 return error!(EINVAL);
606 }
607 let thread_weak = current_task.get_task(tid);
608 let thread = Task::from_weak(&thread_weak)?;
609 send_unchecked_signal(locked, current_task, &thread, unchecked_signal, SI_TKILL)
610}
611
612pub fn sys_tgkill(
623 locked: &mut Locked<Unlocked>,
624 current_task: &CurrentTask,
625 tgid: pid_t,
626 tid: tid_t,
627 unchecked_signal: UncheckedSignal,
628) -> Result<(), Errno> {
629 if tgid <= 0 || tid <= 0 {
631 return error!(EINVAL);
632 }
633 let pids = current_task.kernel().pids.read();
634
635 let weak_target = pids.get_task(tid);
636 let thread = Task::from_weak(&weak_target)?;
637 verify_tgid_for_task(&thread, tgid, &pids)?;
638
639 send_unchecked_signal(locked, current_task, &thread, unchecked_signal, SI_TKILL)
640}
641
642pub fn sys_rt_sigreturn(
651 _locked: &mut Locked<Unlocked>,
652 current_task: &mut CurrentTask,
653) -> Result<SyscallResult, Errno> {
654 restore_from_signal_handler(current_task)?;
655 Ok(current_task.thread_state.registers.return_register().into())
656}
657
658pub fn sys_rt_sigqueueinfo(
668 _locked: &mut Locked<Unlocked>,
669 current_task: &CurrentTask,
670 tgid: pid_t,
671 unchecked_signal: UncheckedSignal,
672 siginfo_ref: UserAddress,
673) -> Result<(), Errno> {
674 let weak_task = current_task.kernel().pids.read().get_task(tgid);
675 let task = &Task::from_weak(&weak_task)?;
676 task.thread_group().send_signal_unchecked_with_info(
677 current_task,
678 unchecked_signal,
679 siginfo_ref,
680 IntoSignalInfoOptions::None,
681 )
682}
683
684pub fn sys_rt_tgsigqueueinfo(
695 locked: &mut Locked<Unlocked>,
696 current_task: &CurrentTask,
697 tgid: pid_t,
698 tid: tid_t,
699 unchecked_signal: UncheckedSignal,
700 siginfo_ref: UserAddress,
701) -> Result<(), Errno> {
702 let pids = current_task.kernel().pids.read();
703
704 let thread_weak = pids.get_task(tid);
705 let task = Task::from_weak(&thread_weak)?;
706
707 verify_tgid_for_task(&task, tgid, &pids)?;
708 send_unchecked_signal_info(locked, current_task, &task, unchecked_signal, siginfo_ref)
709}
710
711pub fn sys_pidfd_send_signal(
724 _locked: &mut Locked<Unlocked>,
725 current_task: &CurrentTask,
726 pidfd: FdNumber,
727 unchecked_signal: UncheckedSignal,
728 siginfo_ref: UserAddress,
729 flags: u32,
730) -> Result<(), Errno> {
731 if flags != 0 {
732 return error!(EINVAL);
733 }
734
735 let file = current_task.files.get(pidfd)?;
736 let target = file.as_thread_group_key()?;
737 let target = target.upgrade().ok_or_else(|| errno!(ESRCH))?;
738
739 if siginfo_ref.is_null() {
740 target.send_signal_unchecked(current_task, unchecked_signal)
741 } else {
742 target.send_signal_unchecked_with_info(
743 current_task,
744 unchecked_signal,
745 siginfo_ref,
746 IntoSignalInfoOptions::CheckSigno,
747 )
748 }
749}
750
751#[track_caller]
762fn signal_thread_groups<F>(
763 current_task: &CurrentTask,
764 unchecked_signal: UncheckedSignal,
765 thread_groups: F,
766) -> Result<(), Errno>
767where
768 F: IntoIterator<Item: AsRef<ThreadGroup>>,
769{
770 let mut last_error = None;
771 let mut sent_signal = false;
772
773 for thread_group in thread_groups.into_iter() {
776 match thread_group.as_ref().send_signal_unchecked(current_task, unchecked_signal) {
777 Ok(_) => sent_signal = true,
778 Err(errno) => last_error = Some(errno),
779 }
780 }
781
782 if sent_signal { Ok(()) } else { Err(last_error.unwrap_or_else(|| errno!(ESRCH))) }
783}
784
785#[derive(Debug)]
787pub struct WaitingOptions {
788 pub wait_for_exited: bool,
790 pub wait_for_stopped: bool,
792 pub wait_for_continued: bool,
794 pub block: bool,
796 pub keep_waitable_state: bool,
798 pub wait_for_all: bool,
800 pub wait_for_clone: bool,
802}
803
804impl WaitingOptions {
805 fn new(options: u32) -> Self {
806 const_assert_eq!(WUNTRACED, WSTOPPED);
807 Self {
808 wait_for_exited: options & WEXITED > 0,
809 wait_for_stopped: options & WSTOPPED > 0,
810 wait_for_continued: options & WCONTINUED > 0,
811 block: options & WNOHANG == 0,
812 keep_waitable_state: options & WNOWAIT > 0,
813 wait_for_all: options & __WALL > 0,
814 wait_for_clone: options & __WCLONE > 0,
815 }
816 }
817
818 pub fn new_for_waitid(options: u32) -> Result<Self, Errno> {
820 if options & !(__WCLONE | __WALL | WNOHANG | WNOWAIT | WSTOPPED | WEXITED | WCONTINUED) != 0
821 {
822 track_stub!(TODO("https://fxbug.dev/322874788"), "waitid options", options);
823 return error!(EINVAL);
824 }
825 if options & (WEXITED | WSTOPPED | WCONTINUED) == 0 {
826 return error!(EINVAL);
827 }
828 Ok(Self::new(options))
829 }
830
831 pub fn new_for_wait4(options: u32) -> Result<Self, Errno> {
833 if options & !(__WCLONE | __WALL | WNOHANG | WUNTRACED | WCONTINUED) != 0 {
834 track_stub!(TODO("https://fxbug.dev/322874017"), "wait4 options", options);
835 return error!(EINVAL);
836 }
837 Ok(Self::new(options | WEXITED))
838 }
839}
840
841fn wait_on_pid(
847 locked: &mut Locked<Unlocked>,
848 current_task: &CurrentTask,
849 selector: &ProcessSelector,
850 options: &WaitingOptions,
851) -> Result<Option<WaitResult>, Errno> {
852 let waiter = Waiter::new();
853 loop {
854 {
855 let mut pids = current_task.kernel().pids.write();
856 if let Some(tracee) =
865 current_task.thread_group().get_waitable_ptracee(selector, options, &mut pids)
866 {
867 return Ok(Some(tracee));
868 }
869 {
870 let mut thread_group = current_task.thread_group().write();
871
872 let mut has_waitable_tracee = false;
875 let mut has_any_tracee = false;
876 current_task.thread_group().get_ptracees_and(
877 selector,
878 &pids,
879 &mut |task: &Task, task_state: &TaskMutableState| {
880 if let Some(ptrace) = &task_state.ptrace {
881 has_any_tracee = true;
882 ptrace.tracer_waiters().wait_async(&waiter);
883 if ptrace.is_waitable(task.load_stopped(), options) {
884 has_waitable_tracee = true;
885 }
886 }
887 },
888 );
889 if has_waitable_tracee
890 || thread_group.zombie_ptracees.has_zombie_matching(&selector)
891 {
892 continue;
893 }
894 match thread_group.get_waitable_child(selector, options, &mut pids) {
895 WaitableChildResult::ReadyNow(child) => {
896 return Ok(Some(child));
897 }
898 WaitableChildResult::ShouldWait => (),
899 WaitableChildResult::NoneFound => {
900 if !has_any_tracee {
901 return error!(ECHILD);
902 }
903 }
904 }
905 thread_group
906 .lifecycle_waiters
907 .wait_async_value(&waiter, ThreadGroupLifecycleWaitValue::ChildStatus);
908 }
909 }
910
911 if !options.block {
912 return Ok(None);
913 }
914 waiter.wait(locked, current_task).map_eintr(|| errno!(ERESTARTSYS))?;
915 }
916}
917
918pub fn sys_waitid(
932 locked: &mut Locked<Unlocked>,
933 current_task: &CurrentTask,
934 id_type: u32,
935 id: i32,
936 user_info: MultiArchUserRef<uapi::siginfo_t, uapi::arch32::siginfo_t>,
937 options: u32,
938 user_rusage: RUsagePtr,
939) -> Result<(), Errno> {
940 let mut waiting_options = WaitingOptions::new_for_waitid(options)?;
941
942 let task_selector = match id_type {
943 P_PID => ProcessSelector::Pid(id),
944 P_ALL => ProcessSelector::Any,
945 P_PGID => ProcessSelector::Pgid(if id == 0 {
946 current_task.thread_group().read().process_group.leader
947 } else {
948 id
949 }),
950 P_PIDFD => {
951 let fd = FdNumber::from_raw(id);
952 let file = current_task.files.get(fd)?;
953 if file.flags().contains(OpenFlags::NONBLOCK) {
954 waiting_options.block = false;
955 }
956 ProcessSelector::Process(file.as_thread_group_key()?)
957 }
958 _ => return error!(EINVAL),
959 };
960
961 if let Some(waitable_process) =
964 wait_on_pid(locked, current_task, &task_selector, &waiting_options)?
965 {
966 if !user_rusage.is_null() {
967 let usage = rusage {
968 ru_utime: timeval_from_duration(waitable_process.time_stats.user_time),
969 ru_stime: timeval_from_duration(waitable_process.time_stats.system_time),
970 ..Default::default()
971 };
972
973 track_stub!(TODO("https://fxbug.dev/322874712"), "real rusage from waitid");
974 current_task.write_multi_arch_object(user_rusage, usage)?;
975 }
976
977 if !user_info.is_null() {
978 let siginfo = waitable_process.as_signal_info();
979 siginfo.write(current_task, user_info)?;
980 }
981 } else if id_type == P_PIDFD {
982 return error!(EAGAIN);
991 }
992
993 Ok(())
994}
995
996pub fn sys_wait4(
1010 locked: &mut Locked<Unlocked>,
1011 current_task: &CurrentTask,
1012 raw_selector: pid_t,
1013 user_wstatus: UserRef<i32>,
1014 options: u32,
1015 user_rusage: RUsagePtr,
1016) -> Result<pid_t, Errno> {
1017 let waiting_options = WaitingOptions::new_for_wait4(options)?;
1018
1019 let selector = if raw_selector == 0 {
1020 ProcessSelector::Pgid(current_task.thread_group().read().process_group.leader)
1021 } else if raw_selector == -1 {
1022 ProcessSelector::Any
1023 } else if raw_selector > 0 {
1024 ProcessSelector::Pid(raw_selector)
1025 } else if raw_selector < -1 {
1026 ProcessSelector::Pgid(negate_pid(raw_selector)?)
1027 } else {
1028 track_stub!(
1029 TODO("https://fxbug.dev/322874213"),
1030 "wait4 with selector",
1031 raw_selector as u64
1032 );
1033 return error!(ENOSYS);
1034 };
1035
1036 if let Some(waitable_process) = wait_on_pid(locked, current_task, &selector, &waiting_options)?
1037 {
1038 let status = waitable_process.exit_info.status.wait_status();
1039
1040 if !user_rusage.is_null() {
1041 track_stub!(TODO("https://fxbug.dev/322874768"), "real rusage from wait4");
1042 let usage = rusage {
1043 ru_utime: timeval_from_duration(waitable_process.time_stats.user_time),
1044 ru_stime: timeval_from_duration(waitable_process.time_stats.system_time),
1045 ..Default::default()
1046 };
1047 current_task.write_multi_arch_object(user_rusage, usage)?;
1048 }
1049
1050 if !user_wstatus.is_null() {
1051 current_task.write_object(user_wstatus, &status)?;
1052 }
1053
1054 Ok(waitable_process.pid)
1055 } else {
1056 Ok(0)
1057 }
1058}
1059
1060fn negate_pid(pid: pid_t) -> Result<pid_t, Errno> {
1062 pid.checked_neg().ok_or_else(|| errno!(ESRCH))
1063}
1064
1065#[cfg(target_arch = "aarch64")]
1067mod arch32 {
1068 use crate::task::CurrentTask;
1069 use crate::vfs::FdNumber;
1070 use starnix_sync::{Locked, Unlocked};
1071 use starnix_uapi::errors::Errno;
1072 use starnix_uapi::signals::SigSet;
1073 use starnix_uapi::user_address::UserRef;
1074
1075 pub fn sys_arch32_signalfd(
1090 locked: &mut Locked<Unlocked>,
1091 current_task: &CurrentTask,
1092 fd: FdNumber,
1093 mask_addr: UserRef<SigSet>,
1094 mask_size: usize,
1095 ) -> Result<FdNumber, Errno> {
1096 super::sys_signalfd4(locked, current_task, fd, mask_addr, mask_size, 0)
1097 }
1098
1099 pub use super::{
1100 sys_pidfd_send_signal as sys_arch32_pidfd_send_signal,
1101 sys_rt_sigaction as sys_arch32_rt_sigaction,
1102 sys_rt_sigqueueinfo as sys_arch32_rt_sigqueueinfo,
1103 sys_rt_sigtimedwait as sys_arch32_rt_sigtimedwait,
1104 sys_rt_tgsigqueueinfo as sys_arch32_rt_tgsigqueueinfo,
1105 sys_sigaltstack as sys_arch32_sigaltstack, sys_signalfd4 as sys_arch32_signalfd4,
1106 sys_waitid as sys_arch32_waitid,
1107 };
1108}
1109
1110#[cfg(target_arch = "aarch64")]
1111pub use arch32::*;
1112
1113#[cfg(test)]
1114mod tests {
1115 use super::*;
1116 use crate::mm::{MemoryAccessor, PAGE_SIZE};
1117 use crate::signals::testing::dequeue_signal_for_test;
1118 use crate::signals::{SI_HEADER_SIZE, SignalInfoHeader, send_standard_signal};
1119 use crate::task::dynamic_thread_spawner::SpawnRequestBuilder;
1120 use crate::task::{EventHandler, ExitStatus, ProcessExitInfo};
1121 use crate::testing::*;
1122 use starnix_sync::Mutex;
1123 use starnix_types::math::round_up_to_system_page_size;
1124 use starnix_uapi::auth::Credentials;
1125 use starnix_uapi::errors::ERESTARTSYS;
1126 use starnix_uapi::signals::{
1127 SIGCHLD, SIGHUP, SIGINT, SIGIO, SIGKILL, SIGRTMIN, SIGSEGV, SIGSTOP, SIGTERM, SIGTRAP,
1128 SIGUSR1,
1129 };
1130 use starnix_uapi::vfs::FdEvents;
1131 use starnix_uapi::{SI_QUEUE, sigaction_t, uaddr, uid_t};
1132 use std::collections::VecDeque;
1133 use std::sync::Arc;
1134 use zerocopy::IntoBytes;
1135
1136 #[cfg(target_arch = "x86_64")]
1137 #[::fuchsia::test]
1138 async fn test_sigaltstack() {
1139 spawn_kernel_and_run(async |locked, current_task| {
1140 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1141
1142 let user_ss = UserRef::<sigaltstack>::new(addr);
1143 let nullptr = UserRef::<sigaltstack>::default();
1144
1145 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1147 .expect("failed to call sigaltstack");
1148 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1149 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1150
1151 ss.ss_sp = uaddr { addr: 0x7FFFF };
1153 ss.ss_size = 0x1000;
1154 ss.ss_flags = SS_AUTODISARM as i32;
1155 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1156 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1157 .expect("failed to call sigaltstack");
1158 current_task
1159 .write_memory(addr, &[0u8; std::mem::size_of::<sigaltstack>()])
1160 .expect("failed to clear struct");
1161 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1162 .expect("failed to call sigaltstack");
1163 let another_ss = current_task.read_object(user_ss).expect("failed to read struct");
1164 assert_eq!(ss.as_bytes(), another_ss.as_bytes());
1165
1166 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1168 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1169 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1170 .expect("failed to call sigaltstack");
1171 current_task
1172 .write_memory(addr, &[0u8; std::mem::size_of::<sigaltstack>()])
1173 .expect("failed to clear struct");
1174 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1175 .expect("failed to call sigaltstack");
1176 let ss = current_task.read_object(user_ss).expect("failed to read struct");
1177 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1178 })
1179 .await;
1180 }
1181
1182 #[::fuchsia::test]
1183 async fn test_sigaltstack_invalid_size() {
1184 spawn_kernel_and_run(async |locked, current_task| {
1185 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1186
1187 let user_ss = UserRef::<sigaltstack>::new(addr);
1188 let nullptr = UserRef::<sigaltstack>::default();
1189
1190 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1192 .expect("failed to call sigaltstack");
1193 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1194 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1195
1196 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1198 .expect("failed to round up");
1199 let sigaltstack_addr = map_memory(
1200 locked,
1201 ¤t_task,
1202 UserAddress::default(),
1203 sigaltstack_addr_size as u64,
1204 );
1205 ss.ss_sp = sigaltstack_addr.into();
1206 ss.ss_flags = 0;
1207 ss.ss_size = uapi::MINSIGSTKSZ as u64 - 1;
1208 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1209 assert_eq!(
1210 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1211 error!(ENOMEM)
1212 );
1213 })
1214 .await;
1215 }
1216
1217 #[cfg(target_arch = "x86_64")]
1218 #[::fuchsia::test]
1219 async fn test_sigaltstack_active_stack() {
1220 spawn_kernel_and_run(async |locked, current_task| {
1221 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1222
1223 let user_ss = UserRef::<sigaltstack>::new(addr);
1224 let nullptr = UserRef::<sigaltstack>::default();
1225
1226 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1228 .expect("failed to call sigaltstack");
1229 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1230 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1231
1232 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1234 .expect("failed to round up");
1235 let sigaltstack_addr = map_memory(
1236 locked,
1237 ¤t_task,
1238 UserAddress::default(),
1239 sigaltstack_addr_size as u64,
1240 );
1241 ss.ss_sp = sigaltstack_addr.into();
1242 ss.ss_flags = 0;
1243 ss.ss_size = sigaltstack_addr_size as u64;
1244 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1245 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1246 .expect("failed to call sigaltstack");
1247
1248 let next_addr = (sigaltstack_addr + sigaltstack_addr_size).unwrap();
1250 current_task.thread_state.registers.rsp = next_addr.ptr() as u64;
1251 ss.ss_flags = SS_DISABLE as i32;
1252 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1253 assert_eq!(
1254 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1255 error!(EPERM)
1256 );
1257
1258 let next_ss_addr = sigaltstack_addr
1261 .checked_add(sigaltstack_addr_size)
1262 .unwrap()
1263 .checked_add(0x1000usize)
1264 .unwrap();
1265 current_task.thread_state.registers.rsp = next_ss_addr.ptr() as u64;
1266 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1267 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1268 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1269 .expect("failed to call sigaltstack");
1270 })
1271 .await;
1272 }
1273
1274 #[cfg(target_arch = "x86_64")]
1275 #[::fuchsia::test]
1276 async fn test_sigaltstack_active_stack_saturates() {
1277 spawn_kernel_and_run(async |locked, current_task| {
1278 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1279
1280 let user_ss = UserRef::<sigaltstack>::new(addr);
1281 let nullptr = UserRef::<sigaltstack>::default();
1282
1283 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1285 .expect("failed to call sigaltstack");
1286 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1287 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1288
1289 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1291 .expect("failed to round up");
1292 let sigaltstack_addr = map_memory(
1293 locked,
1294 ¤t_task,
1295 UserAddress::default(),
1296 sigaltstack_addr_size as u64,
1297 );
1298 ss.ss_sp = sigaltstack_addr.into();
1299 ss.ss_flags = 0;
1300 ss.ss_size = u64::MAX;
1301 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1302 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1303 .expect("failed to call sigaltstack");
1304
1305 current_task.thread_state.registers.rsp =
1307 (sigaltstack_addr + sigaltstack_addr_size).unwrap().ptr() as u64;
1308 ss.ss_flags = SS_DISABLE as i32;
1309 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1310 assert_eq!(
1311 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1312 error!(EPERM)
1313 );
1314
1315 current_task.thread_state.registers.rsp = 0u64;
1317 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1318 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1319 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1320 .expect("failed to call sigaltstack");
1321 })
1322 .await;
1323 }
1324
1325 #[::fuchsia::test]
1328 async fn test_sigprocmask_invalid_size() {
1329 spawn_kernel_and_run(async |locked, current_task| {
1330 let set = UserRef::<SigSet>::default();
1331 let old_set = UserRef::<SigSet>::default();
1332 let how = 0;
1333
1334 assert_eq!(
1335 sys_rt_sigprocmask(
1336 locked,
1337 ¤t_task,
1338 how,
1339 set,
1340 old_set,
1341 std::mem::size_of::<SigSet>() * 2
1342 ),
1343 error!(EINVAL)
1344 );
1345 assert_eq!(
1346 sys_rt_sigprocmask(
1347 locked,
1348 ¤t_task,
1349 how,
1350 set,
1351 old_set,
1352 std::mem::size_of::<SigSet>() / 2
1353 ),
1354 error!(EINVAL)
1355 );
1356 })
1357 .await;
1358 }
1359
1360 #[::fuchsia::test]
1362 async fn test_sigprocmask_invalid_how() {
1363 spawn_kernel_and_run(async |locked, current_task| {
1364 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1365
1366 let set = UserRef::<SigSet>::new(addr);
1367 let old_set = UserRef::<SigSet>::default();
1368 let how = SIG_SETMASK | SIG_UNBLOCK | SIG_BLOCK;
1369
1370 assert_eq!(
1371 sys_rt_sigprocmask(
1372 locked,
1373 ¤t_task,
1374 how,
1375 set,
1376 old_set,
1377 std::mem::size_of::<SigSet>()
1378 ),
1379 error!(EINVAL)
1380 );
1381 })
1382 .await;
1383 }
1384
1385 #[::fuchsia::test]
1388 async fn test_sigprocmask_null_set() {
1389 spawn_kernel_and_run(async |locked, current_task| {
1390 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1391 let original_mask = SigSet::from(SIGTRAP);
1392 {
1393 current_task.write().set_signal_mask(original_mask);
1394 }
1395
1396 let set = UserRef::<SigSet>::default();
1397 let old_set = UserRef::<SigSet>::new(addr);
1398 let how = SIG_SETMASK;
1399
1400 current_task
1401 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>()])
1402 .expect("failed to clear struct");
1403
1404 assert_eq!(
1405 sys_rt_sigprocmask(
1406 locked,
1407 ¤t_task,
1408 how,
1409 set,
1410 old_set,
1411 std::mem::size_of::<SigSet>()
1412 ),
1413 Ok(())
1414 );
1415
1416 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1417 assert_eq!(old_mask, original_mask);
1418 })
1419 .await;
1420 }
1421
1422 #[::fuchsia::test]
1425 async fn test_sigprocmask_null_set_and_old_set() {
1426 spawn_kernel_and_run(async |locked, current_task| {
1427 let original_mask = SigSet::from(SIGTRAP);
1428 {
1429 current_task.write().set_signal_mask(original_mask);
1430 }
1431
1432 let set = UserRef::<SigSet>::default();
1433 let old_set = UserRef::<SigSet>::default();
1434 let how = SIG_SETMASK;
1435
1436 assert_eq!(
1437 sys_rt_sigprocmask(
1438 locked,
1439 ¤t_task,
1440 how,
1441 set,
1442 old_set,
1443 std::mem::size_of::<SigSet>()
1444 ),
1445 Ok(())
1446 );
1447 assert_eq!(current_task.read().signal_mask(), original_mask);
1448 })
1449 .await;
1450 }
1451
1452 #[::fuchsia::test]
1454 async fn test_sigprocmask_setmask() {
1455 spawn_kernel_and_run(async |locked, current_task| {
1456 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1457 current_task
1458 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1459 .expect("failed to clear struct");
1460
1461 let original_mask = SigSet::from(SIGTRAP);
1462 {
1463 current_task.write().set_signal_mask(original_mask);
1464 }
1465
1466 let new_mask = SigSet::from(SIGIO);
1467 let set = UserRef::<SigSet>::new(addr);
1468 current_task.write_object(set, &new_mask).expect("failed to set mask");
1469
1470 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1471 let old_set = UserRef::<SigSet>::new(old_addr_range);
1472 let how = SIG_SETMASK;
1473
1474 assert_eq!(
1475 sys_rt_sigprocmask(
1476 locked,
1477 ¤t_task,
1478 how,
1479 set,
1480 old_set,
1481 std::mem::size_of::<SigSet>()
1482 ),
1483 Ok(())
1484 );
1485
1486 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1487 assert_eq!(old_mask, original_mask);
1488 assert_eq!(current_task.read().signal_mask(), new_mask);
1489 })
1490 .await;
1491 }
1492
1493 #[::fuchsia::test]
1495 async fn test_sigprocmask_block() {
1496 spawn_kernel_and_run(async |locked, current_task| {
1497 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1498 current_task
1499 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1500 .expect("failed to clear struct");
1501
1502 let original_mask = SigSet::from(SIGTRAP);
1503 {
1504 current_task.write().set_signal_mask(original_mask);
1505 }
1506
1507 let new_mask = SigSet::from(SIGIO);
1508 let set = UserRef::<SigSet>::new(addr);
1509 current_task.write_object(set, &new_mask).expect("failed to set mask");
1510
1511 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1512 let old_set = UserRef::<SigSet>::new(old_addr_range);
1513 let how = SIG_BLOCK;
1514
1515 assert_eq!(
1516 sys_rt_sigprocmask(
1517 locked,
1518 ¤t_task,
1519 how,
1520 set,
1521 old_set,
1522 std::mem::size_of::<SigSet>()
1523 ),
1524 Ok(())
1525 );
1526
1527 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1528 assert_eq!(old_mask, original_mask);
1529 assert_eq!(current_task.read().signal_mask(), new_mask | original_mask);
1530 })
1531 .await;
1532 }
1533
1534 #[::fuchsia::test]
1536 async fn test_sigprocmask_unblock() {
1537 spawn_kernel_and_run(async |locked, current_task| {
1538 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1539 current_task
1540 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1541 .expect("failed to clear struct");
1542
1543 let original_mask = SigSet::from(SIGTRAP) | SigSet::from(SIGIO);
1544 {
1545 current_task.write().set_signal_mask(original_mask);
1546 }
1547
1548 let new_mask = SigSet::from(SIGTRAP);
1549 let set = UserRef::<SigSet>::new(addr);
1550 current_task.write_object(set, &new_mask).expect("failed to set mask");
1551
1552 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1553 let old_set = UserRef::<SigSet>::new(old_addr_range);
1554 let how = SIG_UNBLOCK;
1555
1556 assert_eq!(
1557 sys_rt_sigprocmask(
1558 locked,
1559 ¤t_task,
1560 how,
1561 set,
1562 old_set,
1563 std::mem::size_of::<SigSet>()
1564 ),
1565 Ok(())
1566 );
1567
1568 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1569 assert_eq!(old_mask, original_mask);
1570 assert_eq!(current_task.read().signal_mask(), SIGIO.into());
1571 })
1572 .await;
1573 }
1574
1575 #[::fuchsia::test]
1577 async fn test_sigprocmask_unblock_not_set() {
1578 spawn_kernel_and_run(async |locked, current_task| {
1579 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1580 current_task
1581 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1582 .expect("failed to clear struct");
1583
1584 let original_mask = SigSet::from(SIGIO);
1585 {
1586 current_task.write().set_signal_mask(original_mask);
1587 }
1588
1589 let new_mask = SigSet::from(SIGTRAP);
1590 let set = UserRef::<SigSet>::new(addr);
1591 current_task.write_object(set, &new_mask).expect("failed to set mask");
1592
1593 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1594 let old_set = UserRef::<SigSet>::new(old_addr_range);
1595 let how = SIG_UNBLOCK;
1596
1597 assert_eq!(
1598 sys_rt_sigprocmask(
1599 locked,
1600 ¤t_task,
1601 how,
1602 set,
1603 old_set,
1604 std::mem::size_of::<SigSet>()
1605 ),
1606 Ok(())
1607 );
1608
1609 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1610 assert_eq!(old_mask, original_mask);
1611 assert_eq!(current_task.read().signal_mask(), original_mask);
1612 })
1613 .await;
1614 }
1615
1616 #[::fuchsia::test]
1618 async fn test_sigprocmask_kill_stop() {
1619 spawn_kernel_and_run(async |locked, current_task| {
1620 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1621 current_task
1622 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1623 .expect("failed to clear struct");
1624
1625 let original_mask = SigSet::from(SIGIO);
1626 {
1627 current_task.write().set_signal_mask(original_mask);
1628 }
1629
1630 let new_mask = UNBLOCKABLE_SIGNALS;
1631 let set = UserRef::<SigSet>::new(addr);
1632 current_task.write_object(set, &new_mask).expect("failed to set mask");
1633
1634 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1635 let old_set = UserRef::<SigSet>::new(old_addr_range);
1636 let how = SIG_BLOCK;
1637
1638 assert_eq!(
1639 sys_rt_sigprocmask(
1640 locked,
1641 ¤t_task,
1642 how,
1643 set,
1644 old_set,
1645 std::mem::size_of::<SigSet>()
1646 ),
1647 Ok(())
1648 );
1649
1650 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1651 assert_eq!(old_mask, original_mask);
1652 assert_eq!(current_task.read().signal_mask(), original_mask);
1653 })
1654 .await;
1655 }
1656
1657 #[::fuchsia::test]
1658 async fn test_sigaction_invalid_signal() {
1659 spawn_kernel_and_run(async |locked, current_task| {
1660 assert_eq!(
1661 sys_rt_sigaction(
1662 locked,
1663 ¤t_task,
1664 UncheckedSignal::from(SIGKILL),
1665 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1667 UserRef::<sigaction_t>::default().into(),
1668 std::mem::size_of::<SigSet>(),
1669 ),
1670 error!(EINVAL)
1671 );
1672 assert_eq!(
1673 sys_rt_sigaction(
1674 locked,
1675 ¤t_task,
1676 UncheckedSignal::from(SIGSTOP),
1677 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1679 UserRef::<sigaction_t>::default().into(),
1680 std::mem::size_of::<SigSet>(),
1681 ),
1682 error!(EINVAL)
1683 );
1684 assert_eq!(
1685 sys_rt_sigaction(
1686 locked,
1687 ¤t_task,
1688 UncheckedSignal::from(Signal::NUM_SIGNALS + 1),
1689 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1691 UserRef::<sigaction_t>::default().into(),
1692 std::mem::size_of::<SigSet>(),
1693 ),
1694 error!(EINVAL)
1695 );
1696 })
1697 .await;
1698 }
1699
1700 #[::fuchsia::test]
1701 async fn test_sigaction_old_value_set() {
1702 spawn_kernel_and_run(async |locked, current_task| {
1703 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1704 current_task
1705 .write_memory(addr, &[0u8; std::mem::size_of::<sigaction_t>()])
1706 .expect("failed to clear struct");
1707
1708 let org_mask = SigSet::from(SIGHUP) | SigSet::from(SIGINT);
1709 let original_action =
1710 sigaction_t { sa_mask: org_mask.into(), ..sigaction_t::default() };
1711
1712 {
1713 current_task.thread_group().signal_actions.set(SIGHUP, original_action);
1714 }
1715
1716 let old_action_ref = UserRef::<sigaction_t>::new(addr);
1717 assert_eq!(
1718 sys_rt_sigaction(
1719 locked,
1720 ¤t_task,
1721 UncheckedSignal::from(SIGHUP),
1722 UserRef::<sigaction_t>::default().into(),
1723 old_action_ref.into(),
1724 std::mem::size_of::<SigSet>()
1725 ),
1726 Ok(())
1727 );
1728
1729 let old_action =
1730 current_task.read_object(old_action_ref).expect("failed to read action");
1731 assert_eq!(old_action.as_bytes(), original_action.as_bytes());
1732 })
1733 .await;
1734 }
1735
1736 #[::fuchsia::test]
1737 async fn test_sigaction_new_value_set() {
1738 spawn_kernel_and_run(async |locked, current_task| {
1739 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1740 current_task
1741 .write_memory(addr, &[0u8; std::mem::size_of::<sigaction_t>()])
1742 .expect("failed to clear struct");
1743
1744 let org_mask = SigSet::from(SIGHUP) | SigSet::from(SIGINT);
1745 let original_action =
1746 sigaction_t { sa_mask: org_mask.into(), ..sigaction_t::default() };
1747 let set_action_ref = UserRef::<sigaction_t>::new(addr);
1748 current_task
1749 .write_object(set_action_ref, &original_action)
1750 .expect("failed to set action");
1751
1752 assert_eq!(
1753 sys_rt_sigaction(
1754 locked,
1755 ¤t_task,
1756 UncheckedSignal::from(SIGINT),
1757 set_action_ref.into(),
1758 UserRef::<sigaction_t>::default().into(),
1759 std::mem::size_of::<SigSet>(),
1760 ),
1761 Ok(())
1762 );
1763
1764 assert_eq!(
1765 current_task.thread_group().signal_actions.get(SIGINT).as_bytes(),
1766 original_action.as_bytes()
1767 );
1768 })
1769 .await;
1770 }
1771
1772 #[::fuchsia::test]
1774 async fn test_kill_same_task() {
1775 spawn_kernel_and_run(async |locked, current_task| {
1776 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGINT.into()), Ok(()));
1777 })
1778 .await;
1779 }
1780
1781 #[::fuchsia::test]
1783 async fn test_kill_own_thread_group() {
1784 spawn_kernel_and_run(async |locked, init_task| {
1785 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1786 task1.thread_group().setsid(locked).expect("setsid");
1787 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1788
1789 assert_eq!(sys_kill(locked, &task1, 0, SIGINT.into()), Ok(()));
1790 assert_eq!(task1.read().queued_signal_count(SIGINT), 1);
1791 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1792 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1793 })
1794 .await;
1795 }
1796
1797 #[::fuchsia::test]
1799 async fn test_kill_thread_group() {
1800 spawn_kernel_and_run(async |locked, init_task| {
1801 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1802 task1.thread_group().setsid(locked).expect("setsid");
1803 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1804
1805 assert_eq!(sys_kill(locked, &task1, -task1.tid, SIGINT.into()), Ok(()));
1806 assert_eq!(task1.read().queued_signal_count(SIGINT), 1);
1807 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1808 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1809 })
1810 .await;
1811 }
1812
1813 #[::fuchsia::test]
1815 async fn test_kill_all() {
1816 spawn_kernel_and_run(async |locked, init_task| {
1817 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1818 task1.thread_group().setsid(locked).expect("setsid");
1819 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1820
1821 assert_eq!(sys_kill(locked, &task1, -1, SIGINT.into()), Ok(()));
1822 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1823 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1824 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1825 })
1826 .await;
1827 }
1828
1829 #[::fuchsia::test]
1831 async fn test_kill_inexistant_task() {
1832 spawn_kernel_and_run(async |locked, current_task| {
1833 assert_eq!(sys_kill(locked, ¤t_task, 9, SIGINT.into()), error!(ESRCH));
1834 })
1835 .await;
1836 }
1837
1838 #[::fuchsia::test]
1840 async fn test_kill_invalid_task() {
1841 spawn_kernel_and_run(async |locked, task1| {
1842 task1.set_creds(Credentials::with_ids(1, 1));
1844 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1845 task2.set_creds(Credentials::with_ids(2, 2));
1846
1847 assert!(task1.can_signal(&task2, SIGINT.into()).is_err());
1848 assert_eq!(sys_kill(locked, &task2, task1.tid, SIGINT.into()), error!(EPERM));
1849 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1850 })
1851 .await;
1852 }
1853
1854 #[::fuchsia::test]
1856 async fn test_kill_invalid_task_in_thread_group() {
1857 spawn_kernel_and_run(async |locked, init_task| {
1858 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1859 task1.thread_group().setsid(locked).expect("setsid");
1860 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1861 task2.thread_group().setsid(locked).expect("setsid");
1862 task2.set_creds(Credentials::with_ids(2, 2));
1863
1864 assert!(task2.can_signal(&task1, SIGINT.into()).is_err());
1865 assert_eq!(sys_kill(locked, &task2, -task1.tid, SIGINT.into()), error!(EPERM));
1866 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1867 })
1868 .await;
1869 }
1870
1871 #[::fuchsia::test]
1873 async fn test_kill_invalid_signal() {
1874 spawn_kernel_and_run(async |locked, current_task| {
1875 assert_eq!(
1876 sys_kill(locked, ¤t_task, current_task.tid, UncheckedSignal::from(75)),
1877 error!(EINVAL)
1878 );
1879 })
1880 .await;
1881 }
1882
1883 #[::fuchsia::test]
1885 async fn test_blocked_signal_pending() {
1886 spawn_kernel_and_run(async |locked, current_task| {
1887 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1888 current_task
1889 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1890 .expect("failed to clear struct");
1891
1892 let new_mask = SigSet::from(SIGIO);
1893 let set = UserRef::<SigSet>::new(addr);
1894 current_task.write_object(set, &new_mask).expect("failed to set mask");
1895
1896 assert_eq!(
1897 sys_rt_sigprocmask(
1898 locked,
1899 ¤t_task,
1900 SIG_BLOCK,
1901 set,
1902 UserRef::default(),
1903 std::mem::size_of::<SigSet>()
1904 ),
1905 Ok(())
1906 );
1907 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGIO.into()), Ok(()));
1908 assert_eq!(current_task.read().queued_signal_count(SIGIO), 1);
1909
1910 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGIO.into()), Ok(()));
1912 assert_eq!(current_task.read().queued_signal_count(SIGIO), 1);
1913 })
1914 .await;
1915 }
1916
1917 #[::fuchsia::test]
1919 async fn test_blocked_real_time_signal_pending() {
1920 spawn_kernel_and_run(async |locked, current_task| {
1921 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1922 current_task
1923 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1924 .expect("failed to clear struct");
1925
1926 let new_mask = SigSet::from(starnix_uapi::signals::SIGRTMIN);
1927 let set = UserRef::<SigSet>::new(addr);
1928 current_task.write_object(set, &new_mask).expect("failed to set mask");
1929
1930 assert_eq!(
1931 sys_rt_sigprocmask(
1932 locked,
1933 ¤t_task,
1934 SIG_BLOCK,
1935 set,
1936 UserRef::default(),
1937 std::mem::size_of::<SigSet>()
1938 ),
1939 Ok(())
1940 );
1941 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGRTMIN.into()), Ok(()));
1942 assert_eq!(current_task.read().queued_signal_count(starnix_uapi::signals::SIGRTMIN), 1);
1943
1944 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGRTMIN.into()), Ok(()));
1946 assert_eq!(current_task.read().queued_signal_count(starnix_uapi::signals::SIGRTMIN), 2);
1947 })
1948 .await;
1949 }
1950
1951 #[::fuchsia::test]
1952 async fn test_suspend() {
1953 spawn_kernel_and_run(async |locked, current_task| {
1954 let init_task_weak = current_task.weak_task();
1955 let (tx, rx) = std::sync::mpsc::sync_channel::<()>(0);
1956
1957 let closure = move |locked: &mut Locked<Unlocked>, current_task: &CurrentTask| {
1958 let init_task_temp = init_task_weak.upgrade().expect("Task must be alive");
1959
1960 let mut suspended = false;
1962 while !suspended {
1963 suspended = init_task_temp.read().is_blocked();
1964 std::thread::sleep(std::time::Duration::from_millis(10));
1965 }
1966
1967 let _ = sys_kill(
1969 locked,
1970 current_task,
1971 init_task_temp.tid,
1972 UncheckedSignal::from(SIGHUP),
1973 );
1974
1975 rx.recv().expect("receive");
1977 assert!(!init_task_temp.read().is_blocked());
1978 };
1979 let (thread, req) =
1980 SpawnRequestBuilder::new().with_sync_closure(closure).build_with_async_result();
1981 current_task.kernel().kthreads.spawner().spawn_from_request(req);
1982
1983 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1984 let user_ref = UserRef::<SigSet>::new(addr);
1985
1986 let sigset = !SigSet::from(SIGHUP);
1987 current_task.write_object(user_ref, &sigset).expect("failed to set action");
1988
1989 assert_eq!(
1990 sys_rt_sigsuspend(locked, current_task, user_ref, std::mem::size_of::<SigSet>()),
1991 error!(ERESTARTNOHAND)
1992 );
1993 tx.send(()).expect("send");
1994 futures::executor::block_on(thread).expect("join");
1995 })
1996 .await;
1997 }
1998
1999 #[::fuchsia::test]
2001 async fn test_waitid_options() {
2002 spawn_kernel_and_run(async |locked, current_task| {
2003 let id = 1;
2004 assert_eq!(
2005 sys_waitid(
2006 locked,
2007 ¤t_task,
2008 P_PID,
2009 id,
2010 MultiArchUserRef::null(current_task),
2011 0,
2012 UserRef::default().into()
2013 ),
2014 error!(EINVAL)
2015 );
2016 assert_eq!(
2017 sys_waitid(
2018 locked,
2019 ¤t_task,
2020 P_PID,
2021 id,
2022 MultiArchUserRef::null(current_task),
2023 0xffff,
2024 UserRef::default().into()
2025 ),
2026 error!(EINVAL)
2027 );
2028 })
2029 .await;
2030 }
2031
2032 #[::fuchsia::test]
2034 async fn test_wait4_options() {
2035 spawn_kernel_and_run(async |locked, current_task| {
2036 let id = 1;
2037 assert_eq!(
2038 sys_wait4(
2039 locked,
2040 ¤t_task,
2041 id,
2042 UserRef::default(),
2043 WEXITED,
2044 RUsagePtr::null(current_task)
2045 ),
2046 error!(EINVAL)
2047 );
2048 assert_eq!(
2049 sys_wait4(
2050 locked,
2051 ¤t_task,
2052 id,
2053 UserRef::default(),
2054 WNOWAIT,
2055 RUsagePtr::null(current_task)
2056 ),
2057 error!(EINVAL)
2058 );
2059 assert_eq!(
2060 sys_wait4(
2061 locked,
2062 ¤t_task,
2063 id,
2064 UserRef::default(),
2065 0xffff,
2066 RUsagePtr::null(current_task)
2067 ),
2068 error!(EINVAL)
2069 );
2070 })
2071 .await;
2072 }
2073
2074 #[::fuchsia::test]
2075 async fn test_echild_when_no_zombie() {
2076 spawn_kernel_and_run(async |locked, current_task| {
2077 assert!(
2079 sys_kill(
2080 locked,
2081 ¤t_task,
2082 current_task.get_pid(),
2083 UncheckedSignal::from(SIGCHLD)
2084 )
2085 .is_ok()
2086 );
2087 assert_eq!(
2090 wait_on_pid(
2091 locked,
2092 ¤t_task,
2093 &ProcessSelector::Any,
2094 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions")
2095 ),
2096 error!(ECHILD)
2097 );
2098 })
2099 .await;
2100 }
2101
2102 #[::fuchsia::test]
2103 async fn test_no_error_when_zombie() {
2104 spawn_kernel_and_run(async |locked, current_task| {
2105 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2106 let expected_result = WaitResult {
2107 pid: child.tid,
2108 uid: 0,
2109 exit_info: ProcessExitInfo {
2110 status: ExitStatus::Exit(1),
2111 exit_signal: Some(SIGCHLD),
2112 },
2113 time_stats: Default::default(),
2114 };
2115 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2116 std::mem::drop(child);
2117
2118 assert_eq!(
2119 wait_on_pid(
2120 locked,
2121 ¤t_task,
2122 &ProcessSelector::Any,
2123 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions")
2124 ),
2125 Ok(Some(expected_result))
2126 );
2127 })
2128 .await;
2129 }
2130
2131 #[::fuchsia::test]
2132 async fn test_waiting_for_child() {
2133 spawn_kernel_and_run(async |locked, task| {
2134 let child = task
2135 .clone_task(
2136 locked,
2137 0,
2138 Some(SIGCHLD),
2139 UserRef::default(),
2140 UserRef::default(),
2141 UserRef::default(),
2142 )
2143 .expect("clone_task");
2144
2145 assert_eq!(
2147 wait_on_pid(
2148 locked,
2149 &task,
2150 &ProcessSelector::Any,
2151 &WaitingOptions::new_for_wait4(WNOHANG).expect("WaitingOptions")
2152 ),
2153 Ok(None)
2154 );
2155
2156 let thread = std::thread::spawn({
2157 let task = task.weak_task();
2158 move || {
2159 #[allow(
2161 clippy::undocumented_unsafe_blocks,
2162 reason = "Force documented unsafe blocks in Starnix"
2163 )]
2164 let locked = unsafe { Unlocked::new() };
2165 let task = task.upgrade().expect("task must be alive");
2166 let child: AutoReleasableTask = child.into();
2167 while !task.read().is_blocked() {
2169 std::thread::sleep(std::time::Duration::from_millis(10));
2170 }
2171 child.thread_group().exit(locked, ExitStatus::Exit(0), None);
2172 child.tid
2173 }
2174 });
2175
2176 let waited_child = wait_on_pid(
2178 locked,
2179 &task,
2180 &ProcessSelector::Any,
2181 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions"),
2182 )
2183 .expect("wait_on_pid")
2184 .unwrap();
2185
2186 let child_id = thread.join().expect("join");
2188 assert_eq!(waited_child.pid, child_id);
2189 })
2190 .await;
2191 }
2192
2193 #[::fuchsia::test]
2194 async fn test_waiting_for_child_with_signal_pending() {
2195 spawn_kernel_and_run(async |locked, task| {
2196 task.thread_group().signal_actions.set(
2198 SIGUSR1,
2199 sigaction_t { sa_handler: uaddr { addr: 0xDEADBEEF }, ..sigaction_t::default() },
2200 );
2201
2202 let _child = task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2204
2205 send_standard_signal(locked, &task, SignalInfo::kernel(SIGUSR1));
2208
2209 let errno = wait_on_pid(
2210 locked,
2211 &task,
2212 &ProcessSelector::Any,
2213 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions"),
2214 )
2215 .expect_err("wait_on_pid");
2216 assert_eq!(errno, ERESTARTSYS);
2217 })
2218 .await;
2219 }
2220
2221 #[::fuchsia::test]
2222 async fn test_sigkill() {
2223 spawn_kernel_and_run(async |locked, current_task| {
2224 let mut child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2225
2226 send_standard_signal(locked, &child, SignalInfo::kernel(SIGKILL));
2228 dequeue_signal_for_test(locked, &mut child);
2229 std::mem::drop(child);
2230
2231 let address = map_memory(
2233 locked,
2234 ¤t_task,
2235 UserAddress::default(),
2236 std::mem::size_of::<i32>() as u64,
2237 );
2238 let address_ref = UserRef::<i32>::new(address);
2239 sys_wait4(locked, ¤t_task, -1, address_ref, 0, RUsagePtr::null(current_task))
2240 .expect("wait4");
2241 let wstatus = current_task.read_object(address_ref).expect("read memory");
2242 assert_eq!(wstatus, SIGKILL.number() as i32);
2243 })
2244 .await;
2245 }
2246
2247 async fn test_exit_status_for_signal(
2248 sig: Signal,
2249 wait_status: i32,
2250 exit_signal: Option<Signal>,
2251 ) {
2252 spawn_kernel_and_run(async move |locked, current_task| {
2253 let mut child = current_task.clone_task_for_test(locked, 0, exit_signal);
2254
2255 send_standard_signal(locked, &child, SignalInfo::kernel(sig));
2257 dequeue_signal_for_test(locked, &mut child);
2258 std::mem::drop(child);
2259
2260 let address = map_memory(
2262 locked,
2263 ¤t_task,
2264 UserAddress::default(),
2265 std::mem::size_of::<i32>() as u64,
2266 );
2267 let address_ref = UserRef::<i32>::new(address);
2268 sys_wait4(locked, ¤t_task, -1, address_ref, 0, RUsagePtr::null(current_task))
2269 .expect("wait4");
2270 let wstatus = current_task.read_object(address_ref).expect("read memory");
2271 assert_eq!(wstatus, wait_status);
2272 })
2273 .await;
2274 }
2275
2276 #[::fuchsia::test]
2277 async fn test_exit_status() {
2278 test_exit_status_for_signal(SIGTERM, SIGTERM.number() as i32, Some(SIGCHLD)).await;
2280 test_exit_status_for_signal(SIGSEGV, (SIGSEGV.number() as i32) | 0x80, Some(SIGCHLD)).await;
2282 }
2283
2284 #[::fuchsia::test]
2285 async fn test_wait4_by_pgid() {
2286 spawn_kernel_and_run(async |locked, current_task| {
2287 let child1 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2288 let child1_pid = child1.tid;
2289 child1.thread_group().exit(locked, ExitStatus::Exit(42), None);
2290 std::mem::drop(child1);
2291 let child2 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2292 child2.thread_group().setsid(locked).expect("setsid");
2293 let child2_pid = child2.tid;
2294 child2.thread_group().exit(locked, ExitStatus::Exit(42), None);
2295 std::mem::drop(child2);
2296
2297 assert_eq!(
2298 sys_wait4(
2299 locked,
2300 ¤t_task,
2301 -child2_pid,
2302 UserRef::default(),
2303 0,
2304 RUsagePtr::null(current_task)
2305 ),
2306 Ok(child2_pid)
2307 );
2308 assert_eq!(
2309 sys_wait4(
2310 locked,
2311 ¤t_task,
2312 0,
2313 UserRef::default(),
2314 0,
2315 RUsagePtr::null(current_task)
2316 ),
2317 Ok(child1_pid)
2318 );
2319 })
2320 .await;
2321 }
2322
2323 #[::fuchsia::test]
2324 async fn test_waitid_by_pgid() {
2325 spawn_kernel_and_run(async |locked, current_task| {
2326 let child1 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2327 let child1_pid = child1.tid;
2328 child1.thread_group().exit(locked, ExitStatus::Exit(42), None);
2329 std::mem::drop(child1);
2330 let child2 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2331 child2.thread_group().setsid(locked).expect("setsid");
2332 let child2_pid = child2.tid;
2333 child2.thread_group().exit(locked, ExitStatus::Exit(42), None);
2334 std::mem::drop(child2);
2335
2336 let address: UserRef<uapi::siginfo_t> =
2337 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE).into();
2338 assert_eq!(
2339 sys_waitid(
2340 locked,
2341 ¤t_task,
2342 P_PGID,
2343 child2_pid,
2344 address.into(),
2345 WEXITED,
2346 UserRef::default().into()
2347 ),
2348 Ok(())
2349 );
2350 assert_eq!(current_task.thread_group().read().zombie_children[0].pid(), child1_pid);
2352
2353 assert_eq!(
2354 sys_waitid(
2355 locked,
2356 ¤t_task,
2357 P_PGID,
2358 0,
2359 address.into(),
2360 WEXITED,
2361 UserRef::default().into()
2362 ),
2363 Ok(())
2364 );
2365 })
2366 .await;
2367 }
2368
2369 #[::fuchsia::test]
2370 async fn test_sigqueue() {
2371 spawn_kernel_and_run(async |locked, current_task| {
2372 let current_uid = current_task.current_creds().uid;
2373 let current_pid = current_task.get_pid();
2374
2375 const TEST_VALUE: u64 = 101;
2376
2377 const ARCH64_SI_HEADER_SIZE: usize = SI_HEADER_SIZE + 4;
2379 const PID_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE;
2381 const UID_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE + 4;
2382 const VALUE_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE + 8;
2383
2384 let mut data = vec![0u8; SI_MAX_SIZE_AS_USIZE];
2385 let header = SignalInfoHeader {
2386 signo: SIGIO.number(),
2387 code: SI_QUEUE,
2388 ..SignalInfoHeader::default()
2389 };
2390 let _ = header.write_to(&mut data[..SI_HEADER_SIZE]);
2391 data[PID_DATA_OFFSET..PID_DATA_OFFSET + 4].copy_from_slice(¤t_pid.to_ne_bytes());
2392 data[UID_DATA_OFFSET..UID_DATA_OFFSET + 4].copy_from_slice(¤t_uid.to_ne_bytes());
2393 data[VALUE_DATA_OFFSET..VALUE_DATA_OFFSET + 8]
2394 .copy_from_slice(&TEST_VALUE.to_ne_bytes());
2395
2396 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2397 current_task.write_memory(addr, &data).unwrap();
2398 let second_current = create_task(locked, current_task.kernel(), "second task");
2399 let second_pid = second_current.get_pid();
2400 let second_tid = second_current.get_tid();
2401 assert_eq!(second_current.read().queued_signal_count(SIGIO), 0);
2402
2403 assert_eq!(
2404 sys_rt_tgsigqueueinfo(
2405 locked,
2406 ¤t_task,
2407 second_pid,
2408 second_tid,
2409 UncheckedSignal::from(SIGIO),
2410 addr
2411 ),
2412 Ok(())
2413 );
2414 assert_eq!(second_current.read().queued_signal_count(SIGIO), 1);
2415
2416 let signal = SignalInfo::with_detail(
2417 SIGIO,
2418 SI_QUEUE,
2419 SignalDetail::Kill {
2420 pid: current_task.thread_group().leader,
2421 uid: current_task.current_creds().uid,
2422 },
2423 );
2424 let queued_signal = second_current.write().take_specific_signal(signal);
2425 if let Some(sig) = queued_signal {
2426 assert_eq!(sig.signal, SIGIO);
2427 assert_eq!(sig.errno, 0);
2428 assert_eq!(sig.code, SI_QUEUE);
2429 if let SignalDetail::Raw { data } = sig.detail {
2430 let offset_pid = PID_DATA_OFFSET - SI_HEADER_SIZE;
2432 let offset_uid = UID_DATA_OFFSET - SI_HEADER_SIZE;
2433 let offset_value = VALUE_DATA_OFFSET - SI_HEADER_SIZE;
2434 let pid =
2435 pid_t::from_ne_bytes(data[offset_pid..offset_pid + 4].try_into().unwrap());
2436 let uid =
2437 uid_t::from_ne_bytes(data[offset_uid..offset_uid + 4].try_into().unwrap());
2438 let value = u64::from_ne_bytes(
2439 data[offset_value..offset_value + 8].try_into().unwrap(),
2440 );
2441 assert_eq!(pid, current_pid);
2442 assert_eq!(uid, current_uid);
2443 assert_eq!(value, TEST_VALUE);
2444 } else {
2445 panic!("incorrect signal detail");
2446 }
2447 } else {
2448 panic!("expected a queued signal");
2449 }
2450 })
2451 .await;
2452 }
2453
2454 #[::fuchsia::test]
2455 async fn test_signalfd_filters_signals() {
2456 spawn_kernel_and_run(async |locked, current_task| {
2457 let memory_for_masks =
2458 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2459
2460 let term_int_mask = SigSet::from(SIGTERM) | SigSet::from(SIGINT);
2462 let term_int_mask_addr = UserRef::<SigSet>::new(memory_for_masks);
2463 current_task
2464 .write_object(term_int_mask_addr, &term_int_mask)
2465 .expect("failed to write mask");
2466 let sfd_term_int = sys_signalfd4(
2467 locked,
2468 ¤t_task,
2469 FdNumber::from_raw(-1),
2470 term_int_mask_addr,
2471 std::mem::size_of::<SigSet>(),
2472 0,
2473 )
2474 .expect("failed to create SIGTERM/SIGINT signalfd");
2475
2476 let sigchld_mask = SigSet::from(SIGCHLD);
2478 let sigchld_mask_addr =
2479 UserRef::<SigSet>::new((memory_for_masks + std::mem::size_of::<SigSet>()).unwrap());
2480 current_task
2481 .write_object(sigchld_mask_addr, &sigchld_mask)
2482 .expect("failed to write mask");
2483 let sfd_chld = sys_signalfd4(
2484 locked,
2485 ¤t_task,
2486 FdNumber::from_raw(-1),
2487 sigchld_mask_addr,
2488 std::mem::size_of::<SigSet>(),
2489 0,
2490 )
2491 .expect("failed to create SIGCHLD signalfd");
2492
2493 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2495 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2496 std::mem::drop(child);
2497
2498 let sfd_term_int_file =
2500 current_task.files.get(sfd_term_int).expect("failed to get sfd_term_int file");
2501 let sfd_chld_file =
2502 current_task.files.get(sfd_chld).expect("failed to get sfd_chld file");
2503
2504 let term_int_events = sfd_term_int_file
2505 .query_events(locked, ¤t_task)
2506 .expect("failed to query sfd_term_int events");
2507 let chld_events = sfd_chld_file
2508 .query_events(locked, ¤t_task)
2509 .expect("failed to query sfd_chld events");
2510
2511 assert!(!term_int_events.contains(FdEvents::POLLIN));
2512 assert!(chld_events.contains(FdEvents::POLLIN));
2513 })
2514 .await;
2515 }
2516
2517 #[::fuchsia::test]
2518 async fn test_signalfd_filters_signals_async() {
2519 spawn_kernel_and_run(async |locked, current_task| {
2520 let memory_for_masks =
2521 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2522
2523 let term_int_mask = SigSet::from(SIGTERM) | SigSet::from(SIGINT);
2525 let term_int_mask_addr = UserRef::<SigSet>::new(memory_for_masks);
2526 current_task
2527 .write_object(term_int_mask_addr, &term_int_mask)
2528 .expect("failed to write mask");
2529 let sfd_term_int = sys_signalfd4(
2530 locked,
2531 ¤t_task,
2532 FdNumber::from_raw(-1),
2533 term_int_mask_addr,
2534 std::mem::size_of::<SigSet>(),
2535 0,
2536 )
2537 .expect("failed to create SIGTERM/SIGINT signalfd");
2538
2539 let sigchld_mask = SigSet::from(SIGCHLD);
2541 let sigchld_mask_addr =
2542 UserRef::<SigSet>::new((memory_for_masks + std::mem::size_of::<SigSet>()).unwrap());
2543 current_task
2544 .write_object(sigchld_mask_addr, &sigchld_mask)
2545 .expect("failed to write mask");
2546 let sfd_chld = sys_signalfd4(
2547 locked,
2548 ¤t_task,
2549 FdNumber::from_raw(-1),
2550 sigchld_mask_addr,
2551 std::mem::size_of::<SigSet>(),
2552 0,
2553 )
2554 .expect("failed to create SIGCHLD signalfd");
2555
2556 let waiter = Waiter::new();
2558 let ready_items = Arc::new(Mutex::new(VecDeque::new()));
2559
2560 let sfd_term_int_file =
2561 current_task.files.get(sfd_term_int).expect("failed to get sfd_term_int file");
2562 let sfd_chld_file =
2563 current_task.files.get(sfd_chld).expect("failed to get sfd_chld file");
2564
2565 sfd_term_int_file
2566 .wait_async(
2567 locked,
2568 ¤t_task,
2569 &waiter,
2570 FdEvents::POLLIN,
2571 EventHandler::Enqueue {
2572 key: sfd_term_int.into(),
2573 queue: ready_items.clone(),
2574 sought_events: FdEvents::POLLIN,
2575 },
2576 )
2577 .expect("failed to wait on sfd_term_int");
2578
2579 sfd_chld_file
2580 .wait_async(
2581 locked,
2582 ¤t_task,
2583 &waiter,
2584 FdEvents::POLLIN,
2585 EventHandler::Enqueue {
2586 key: sfd_chld.into(),
2587 queue: ready_items.clone(),
2588 sought_events: FdEvents::POLLIN,
2589 },
2590 )
2591 .expect("failed to wait on sfd_chld");
2592
2593 let sigchld_mask_ref = UserRef::<SigSet>::new(memory_for_masks);
2595 current_task
2596 .write_object(sigchld_mask_ref, &sigchld_mask)
2597 .expect("failed to write mask");
2598 sys_rt_sigprocmask(
2599 locked,
2600 ¤t_task,
2601 SIG_BLOCK,
2602 sigchld_mask_ref,
2603 UserRef::default(),
2604 std::mem::size_of::<SigSet>(),
2605 )
2606 .expect("failed to block SIGCHLD");
2607
2608 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2610 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2611 std::mem::drop(child);
2612
2613 waiter.wait(locked, ¤t_task).expect("failed to wait");
2615
2616 let ready_items = ready_items.lock();
2618 assert_eq!(ready_items.len(), 1);
2619 assert_eq!(ready_items[0].key, sfd_chld.into());
2620 })
2621 .await;
2622 }
2623}