1pub use super::signal_handling::sys_restart_syscall;
6use super::signalfd::SignalFd;
7use crate::mm::MemoryAccessorExt;
8use crate::security;
9use crate::signals::{
10 IntoSignalInfoOptions, RunState, SI_MAX_SIZE_AS_USIZE, SignalDetail, SignalInfo,
11 UncheckedSignalInfo, restore_from_signal_handler, send_signal,
12};
13use crate::task::{
14 CurrentTask, PidTable, ProcessEntryRef, ProcessSelector, Task, TaskMutableState, ThreadGroup,
15 ThreadGroupLifecycleWaitValue, WaitResult, WaitableChildResult, Waiter,
16};
17use crate::vfs::{FdFlags, FdNumber};
18use starnix_sync::{LockBefore, RwLockReadGuard, ThreadGroupLimits};
19use starnix_uapi::user_address::{ArchSpecific, MultiArchUserRef};
20use starnix_uapi::{tid_t, uapi};
21
22use starnix_logging::track_stub;
23use starnix_sync::{InterruptibleEvent, Locked, Unlocked, WakeReason};
24use starnix_syscalls::SyscallResult;
25use starnix_types::time::{duration_from_timespec, timeval_from_duration};
26use starnix_uapi::errors::{EINTR, ETIMEDOUT, Errno, ErrnoResultExt};
27use starnix_uapi::open_flags::OpenFlags;
28use starnix_uapi::signals::{SigSet, Signal, UNBLOCKABLE_SIGNALS, UncheckedSignal};
29use starnix_uapi::user_address::{UserAddress, UserRef};
30use starnix_uapi::{
31 __WALL, __WCLONE, P_ALL, P_PGID, P_PID, P_PIDFD, SFD_CLOEXEC, SFD_NONBLOCK, SI_TKILL,
32 SIG_BLOCK, SIG_SETMASK, SIG_UNBLOCK, SS_AUTODISARM, SS_DISABLE, SS_ONSTACK, WCONTINUED,
33 WEXITED, WNOHANG, WNOWAIT, WSTOPPED, WUNTRACED, errno, error, pid_t, rusage, sigaltstack,
34};
35use static_assertions::const_assert_eq;
36use zerocopy::{FromBytes, Immutable, IntoBytes};
37
38pub type RUsagePtr = MultiArchUserRef<uapi::rusage, uapi::arch32::rusage>;
39type SigAction64Ptr = MultiArchUserRef<uapi::sigaction_t, uapi::arch32::sigaction64_t>;
40type SigActionPtr = MultiArchUserRef<uapi::sigaction_t, uapi::arch32::sigaction_t>;
41
42pub fn sys_rt_sigaction(
57 _locked: &mut Locked<Unlocked>,
58 current_task: &CurrentTask,
59 signum: UncheckedSignal,
60 user_action: SigAction64Ptr,
61 user_old_action: SigAction64Ptr,
62 sigset_size: usize,
63) -> Result<(), Errno> {
64 if user_action.is_arch32() && sigset_size == std::mem::size_of::<uapi::arch32::sigset_t>() {
65 let user_action = SigActionPtr::from_32(user_action.addr().into());
66 let user_old_action = SigActionPtr::from_32(user_old_action.addr().into());
67 return rt_sigaction(current_task, signum, user_action, user_old_action);
68 }
69
70 if sigset_size != std::mem::size_of::<uapi::sigset_t>() {
71 return error!(EINVAL);
72 }
73 rt_sigaction(current_task, signum, user_action, user_old_action)
74}
75
76fn rt_sigaction<Arch32SigAction>(
77 current_task: &CurrentTask,
78 signum: UncheckedSignal,
79 user_action: MultiArchUserRef<uapi::sigaction_t, Arch32SigAction>,
80 user_old_action: MultiArchUserRef<uapi::sigaction_t, Arch32SigAction>,
81) -> Result<(), Errno>
82where
83 Arch32SigAction:
84 IntoBytes + FromBytes + Immutable + TryFrom<uapi::sigaction_t> + TryInto<uapi::sigaction_t>,
85{
86 let signal = Signal::try_from(signum)?;
87
88 let new_signal_action = if !user_action.is_null() {
89 if signal.is_unblockable() {
93 return error!(EINVAL);
94 }
95
96 let signal_action = current_task.read_multi_arch_object(user_action)?;
97 Some(signal_action)
98 } else {
99 None
100 };
101
102 let signal_actions = ¤t_task.thread_group().signal_actions;
103 let old_action = if let Some(new_signal_action) = new_signal_action {
104 signal_actions.set(signal, new_signal_action)
105 } else {
106 signal_actions.get(signal)
107 };
108
109 if !user_old_action.is_null() {
110 current_task.write_multi_arch_object(user_old_action, old_action)?;
111 }
112
113 Ok(())
114}
115
116pub fn sys_rt_sigpending(
126 _locked: &mut Locked<Unlocked>,
127 current_task: &CurrentTask,
128 set: UserRef<SigSet>,
129 sigset_size: usize,
130) -> Result<(), Errno> {
131 if sigset_size != std::mem::size_of::<SigSet>() {
132 return error!(EINVAL);
133 }
134
135 let signals = current_task.read().pending_signals();
136 current_task.write_object(set, &signals)?;
137 Ok(())
138}
139
140pub fn sys_rt_sigprocmask(
153 _locked: &mut Locked<Unlocked>,
154 current_task: &CurrentTask,
155 how: u32,
156 user_set: UserRef<SigSet>,
157 user_old_set: UserRef<SigSet>,
158 sigset_size: usize,
159) -> Result<(), Errno> {
160 if sigset_size != std::mem::size_of::<SigSet>() {
161 return error!(EINVAL);
162 }
163 match how {
164 SIG_BLOCK | SIG_UNBLOCK | SIG_SETMASK => (),
165 _ => return error!(EINVAL),
166 };
167
168 let mut new_mask = SigSet::default();
171 if !user_set.is_null() {
172 new_mask = current_task.read_object(user_set)?;
173 }
174
175 let mut state = current_task.write();
176 let signal_mask = state.signal_mask();
177 if !user_old_set.is_null() {
179 current_task.write_object(user_old_set, &signal_mask)?;
180 }
181
182 if user_set.is_null() {
184 return Ok(());
185 }
186
187 let signal_mask = match how {
188 SIG_BLOCK => signal_mask | new_mask,
189 SIG_UNBLOCK => signal_mask & !new_mask,
190 SIG_SETMASK => new_mask,
191 _ => return error!(EINVAL),
193 };
194 state.set_signal_mask(signal_mask);
195
196 Ok(())
197}
198
199type SigAltStackPtr = MultiArchUserRef<uapi::sigaltstack, uapi::arch32::sigaltstack>;
200
201pub fn sys_sigaltstack(
210 _locked: &mut Locked<Unlocked>,
211 current_task: &CurrentTask,
212 user_ss: SigAltStackPtr,
213 user_old_ss: SigAltStackPtr,
214) -> Result<(), Errno> {
215 let stack_pointer_register = current_task.thread_state.registers.stack_pointer_register();
216 let mut state = current_task.write();
217 let on_signal_stack = state.on_signal_stack(stack_pointer_register);
218
219 let mut ss = sigaltstack::default();
220 if !user_ss.is_null() {
221 if on_signal_stack {
222 return error!(EPERM);
223 }
224 ss = current_task.read_multi_arch_object(user_ss)?;
225 if (ss.ss_flags & !((SS_AUTODISARM | SS_DISABLE) as i32)) != 0 {
226 return error!(EINVAL);
227 }
228 let min_stack_size =
229 if current_task.is_arch32() { uapi::arch32::MINSIGSTKSZ } else { uapi::MINSIGSTKSZ };
230 if ss.ss_flags & (SS_DISABLE as i32) == 0 && ss.ss_size < min_stack_size as u64 {
231 return error!(ENOMEM);
232 }
233 }
234
235 if !user_old_ss.is_null() {
236 let mut old_ss = match state.sigaltstack() {
237 Some(old_ss) => old_ss,
238 None => sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() },
239 };
240 if on_signal_stack {
241 old_ss.ss_flags = SS_ONSTACK as i32;
242 }
243 current_task.write_multi_arch_object(user_old_ss, old_ss)?;
244 }
245
246 if !user_ss.is_null() {
247 if ss.ss_flags & (SS_DISABLE as i32) != 0 {
248 state.set_sigaltstack(None);
249 } else {
250 state.set_sigaltstack(Some(ss));
251 }
252 }
253
254 Ok(())
255}
256
257pub fn sys_rt_sigsuspend(
269 locked: &mut Locked<Unlocked>,
270 current_task: &mut CurrentTask,
271 user_mask: UserRef<SigSet>,
272 sigset_size: usize,
273) -> Result<(), Errno> {
274 if sigset_size != std::mem::size_of::<SigSet>() {
275 return error!(EINVAL);
276 }
277 let mask = current_task.read_object(user_mask)?;
278
279 let waiter = Waiter::new();
280 current_task
284 .wait_with_temporary_mask(locked, mask, |locked, current_task| {
285 waiter.wait(locked, current_task)
286 })
287 .map_eintr(|| errno!(ERESTARTNOHAND))
288}
289
290pub fn sys_rt_sigtimedwait(
303 locked: &mut Locked<Unlocked>,
304 current_task: &mut CurrentTask,
305 set_addr: UserRef<SigSet>,
306 siginfo_addr: MultiArchUserRef<uapi::siginfo_t, uapi::arch32::siginfo_t>,
307 timeout_addr: MultiArchUserRef<uapi::timespec, uapi::arch32::timespec>,
308 sigset_size: usize,
309) -> Result<Signal, Errno> {
310 if sigset_size != std::mem::size_of::<SigSet>() {
311 return error!(EINVAL);
312 }
313
314 let set = current_task.read_object(set_addr)?;
316 let unblock = set & !UNBLOCKABLE_SIGNALS;
318 let deadline = if timeout_addr.is_null() {
319 zx::MonotonicInstant::INFINITE
320 } else {
321 let timeout = current_task.read_multi_arch_object(timeout_addr)?;
322 zx::MonotonicInstant::after(duration_from_timespec(timeout)?)
323 };
324
325 let signal_info = loop {
326 let waiter;
327
328 {
329 let mut task_state = current_task.write();
330 if let Some(signal) = task_state.take_signal_with_mask(!unblock) {
333 break signal;
334 }
335
336 waiter = Waiter::new();
337 task_state.wait_on_signal(&waiter);
338 }
339
340 let tmp_mask = current_task.read().signal_mask() & !unblock;
343
344 let waiter_result =
346 current_task.wait_with_temporary_mask(locked, tmp_mask, |locked, current_task| {
347 waiter.wait_until(locked, current_task, deadline)
348 });
349
350 current_task.write().restore_signal_mask();
352
353 if let Err(e) = waiter_result {
354 if e == EINTR {
355 if let Some(signal) = current_task.write().take_signal_with_mask(!unblock) {
357 break signal;
358 }
359 } else if e == ETIMEDOUT {
360 return error!(EAGAIN);
361 }
362
363 return Err(e);
364 }
365 };
366
367 if !siginfo_addr.is_null() {
368 signal_info.write(current_task, siginfo_addr)?;
369 }
370
371 Ok(signal_info.signal)
372}
373
374pub fn sys_signalfd4(
388 locked: &mut Locked<Unlocked>,
389 current_task: &CurrentTask,
390 fd: FdNumber,
391 mask_addr: UserRef<SigSet>,
392 mask_size: usize,
393 flags: u32,
394) -> Result<FdNumber, Errno> {
395 if flags & !(SFD_CLOEXEC | SFD_NONBLOCK) != 0 {
396 return error!(EINVAL);
397 }
398 if mask_size != std::mem::size_of::<SigSet>() {
399 return error!(EINVAL);
400 }
401 let mask = current_task.read_object(mask_addr)?;
402
403 if fd.raw() != -1 {
404 let file = current_task.get_file(fd)?;
405 let file = file.downcast_file::<SignalFd>().ok_or_else(|| errno!(EINVAL))?;
406 file.set_mask(mask);
407 Ok(fd)
408 } else {
409 let signalfd = SignalFd::new_file(locked, current_task, mask, flags);
410 let flags = if flags & SFD_CLOEXEC != 0 { FdFlags::CLOEXEC } else { FdFlags::empty() };
411 let fd = current_task.add_file(locked, signalfd, flags)?;
412 Ok(fd)
413 }
414}
415
416#[track_caller]
417fn send_unchecked_signal<L>(
418 locked: &mut Locked<L>,
419 current_task: &CurrentTask,
420 target: &Task,
421 unchecked_signal: UncheckedSignal,
422 si_code: i32,
423) -> Result<(), Errno>
424where
425 L: LockBefore<ThreadGroupLimits>,
426{
427 current_task.can_signal(&target, unchecked_signal)?;
428
429 if unchecked_signal.is_zero() {
431 return Ok(());
432 }
433
434 let signal = Signal::try_from(unchecked_signal)?;
435 security::check_signal_access(current_task, &target, signal)?;
436
437 send_signal(
438 locked,
439 target,
440 SignalInfo::with_sender(
441 signal,
442 si_code,
443 SignalDetail::Kill {
444 pid: current_task.thread_group().leader,
445 uid: current_task.current_creds().uid,
446 },
447 Some(current_task.weak_self.clone()),
448 ),
449 )
450}
451
452#[track_caller]
453fn send_unchecked_signal_info<L>(
454 locked: &mut Locked<L>,
455 current_task: &CurrentTask,
456 target: &Task,
457 unchecked_signal: UncheckedSignal,
458 siginfo_ref: UserAddress,
459) -> Result<(), Errno>
460where
461 L: LockBefore<ThreadGroupLimits>,
462{
463 current_task.can_signal(&target, unchecked_signal)?;
464
465 if unchecked_signal.is_zero() {
467 current_task.read_memory_to_array::<SI_MAX_SIZE_AS_USIZE>(siginfo_ref)?;
469 return Ok(());
470 }
471
472 let signal = Signal::try_from(unchecked_signal)?;
473 security::check_signal_access(current_task, &target, signal)?;
474
475 let siginfo = UncheckedSignalInfo::read_from_siginfo(current_task, siginfo_ref)?;
476 if target.get_pid() != current_task.get_pid()
477 && (siginfo.code() >= 0 || siginfo.code() == SI_TKILL)
478 {
479 return error!(EINVAL);
480 }
481
482 send_signal(locked, &target, siginfo.into_signal_info(signal, IntoSignalInfoOptions::None)?)
483}
484
485pub fn sys_kill(
494 locked: &mut Locked<Unlocked>,
495 current_task: &CurrentTask,
496 pid: pid_t,
497 unchecked_signal: UncheckedSignal,
498) -> Result<(), Errno> {
499 let pids = current_task.kernel().pids.read();
500 match pid {
501 pid if pid > 0 => {
502 let target_thread_group = {
505 match pids.get_process(pid) {
506 Some(ProcessEntryRef::Process(process)) => process,
507
508 Some(ProcessEntryRef::Zombie(_zombie)) => return Ok(()),
510
511 None => {
514 let weak_task = pids.get_task(pid);
515 let task = Task::from_weak(&weak_task)?;
516 task.thread_group().clone()
517 }
518 }
519 };
520
521 target_thread_group.send_signal_unchecked(current_task, unchecked_signal)?;
522 }
523 pid if pid == -1 => {
524 let thread_groups = pids.get_thread_groups();
533 signal_thread_groups(
534 current_task,
535 unchecked_signal,
536 thread_groups.into_iter().filter(|thread_group| {
537 if *current_task.thread_group() == *thread_group {
538 return false;
539 }
540 if thread_group.leader == 1 {
541 return false;
542 }
543 true
544 }),
545 )?;
546 }
547 _ => {
548 let process_group_id = match pid {
554 0 => current_task.thread_group().read().process_group.leader,
555 _ => negate_pid(pid)?,
556 };
557
558 let process_group = pids.get_process_group(process_group_id);
559 let thread_groups = process_group
560 .iter()
561 .flat_map(|pg| pg.read(locked).thread_groups().collect::<Vec<_>>());
562 signal_thread_groups(current_task, unchecked_signal, thread_groups)?;
563 }
564 };
565
566 Ok(())
567}
568
569fn verify_tgid_for_task(
570 task: &Task,
571 tgid: pid_t,
572 pids: &RwLockReadGuard<'_, PidTable>,
573) -> Result<(), Errno> {
574 let thread_group = match pids.get_process(tgid) {
575 Some(ProcessEntryRef::Process(proc)) => proc,
576 Some(ProcessEntryRef::Zombie(_)) => return error!(EINVAL),
577 None => return error!(ESRCH),
578 };
579 if *task.thread_group() != thread_group {
580 return error!(EINVAL);
581 } else {
582 Ok(())
583 }
584}
585
586pub fn sys_tkill(
598 locked: &mut Locked<Unlocked>,
599 current_task: &CurrentTask,
600 tid: tid_t,
601 unchecked_signal: UncheckedSignal,
602) -> Result<(), Errno> {
603 if tid <= 0 {
605 return error!(EINVAL);
606 }
607 let thread_weak = current_task.get_task(tid);
608 let thread = Task::from_weak(&thread_weak)?;
609 send_unchecked_signal(locked, current_task, &thread, unchecked_signal, SI_TKILL)
610}
611
612pub fn sys_tgkill(
623 locked: &mut Locked<Unlocked>,
624 current_task: &CurrentTask,
625 tgid: pid_t,
626 tid: tid_t,
627 unchecked_signal: UncheckedSignal,
628) -> Result<(), Errno> {
629 if tgid <= 0 || tid <= 0 {
631 return error!(EINVAL);
632 }
633 let pids = current_task.kernel().pids.read();
634
635 let weak_target = pids.get_task(tid);
636 let thread = Task::from_weak(&weak_target)?;
637 verify_tgid_for_task(&thread, tgid, &pids)?;
638
639 send_unchecked_signal(locked, current_task, &thread, unchecked_signal, SI_TKILL)
640}
641
642pub fn sys_rt_sigreturn(
651 _locked: &mut Locked<Unlocked>,
652 current_task: &mut CurrentTask,
653) -> Result<SyscallResult, Errno> {
654 restore_from_signal_handler(current_task)?;
655 Ok(current_task.thread_state.registers.return_register().into())
656}
657
658pub fn sys_rt_sigqueueinfo(
668 _locked: &mut Locked<Unlocked>,
669 current_task: &CurrentTask,
670 tgid: pid_t,
671 unchecked_signal: UncheckedSignal,
672 siginfo_ref: UserAddress,
673) -> Result<(), Errno> {
674 let weak_task = current_task.kernel().pids.read().get_task(tgid);
675 let task = &Task::from_weak(&weak_task)?;
676 task.thread_group().send_signal_unchecked_with_info(
677 current_task,
678 unchecked_signal,
679 siginfo_ref,
680 IntoSignalInfoOptions::None,
681 )
682}
683
684pub fn sys_rt_tgsigqueueinfo(
695 locked: &mut Locked<Unlocked>,
696 current_task: &CurrentTask,
697 tgid: pid_t,
698 tid: tid_t,
699 unchecked_signal: UncheckedSignal,
700 siginfo_ref: UserAddress,
701) -> Result<(), Errno> {
702 let pids = current_task.kernel().pids.read();
703
704 let thread_weak = pids.get_task(tid);
705 let task = Task::from_weak(&thread_weak)?;
706
707 verify_tgid_for_task(&task, tgid, &pids)?;
708 send_unchecked_signal_info(locked, current_task, &task, unchecked_signal, siginfo_ref)
709}
710
711pub fn sys_pause(_locked: &mut Locked<Unlocked>, current_task: &CurrentTask) -> Result<(), Errno> {
716 let event = InterruptibleEvent::new();
717 let guard = event.begin_wait();
718 let result = current_task.run_in_state(RunState::Event(event.clone()), || {
719 match guard.block_until(None, zx::MonotonicInstant::INFINITE) {
720 Err(WakeReason::Interrupted) => error!(ERESTARTNOHAND),
721 Err(WakeReason::DeadlineExpired) => panic!("blocking forever cannot time out"),
722 Ok(()) => Ok(()),
723 }
724 });
725 result.map_eintr(|| errno!(ERESTARTNOHAND))
727}
728
729pub fn sys_pidfd_send_signal(
742 _locked: &mut Locked<Unlocked>,
743 current_task: &CurrentTask,
744 pidfd: FdNumber,
745 unchecked_signal: UncheckedSignal,
746 siginfo_ref: UserAddress,
747 flags: u32,
748) -> Result<(), Errno> {
749 if flags != 0 {
750 return error!(EINVAL);
751 }
752
753 let file = current_task.get_file(pidfd)?;
754 let target = file.as_thread_group_key()?;
755 let target = target.upgrade().ok_or_else(|| errno!(ESRCH))?;
756
757 if siginfo_ref.is_null() {
758 target.send_signal_unchecked(current_task, unchecked_signal)
759 } else {
760 target.send_signal_unchecked_with_info(
761 current_task,
762 unchecked_signal,
763 siginfo_ref,
764 IntoSignalInfoOptions::CheckSigno,
765 )
766 }
767}
768
769#[track_caller]
780fn signal_thread_groups<F>(
781 current_task: &CurrentTask,
782 unchecked_signal: UncheckedSignal,
783 thread_groups: F,
784) -> Result<(), Errno>
785where
786 F: IntoIterator<Item: AsRef<ThreadGroup>>,
787{
788 let mut last_error = None;
789 let mut sent_signal = false;
790
791 for thread_group in thread_groups.into_iter() {
794 match thread_group.as_ref().send_signal_unchecked(current_task, unchecked_signal) {
795 Ok(_) => sent_signal = true,
796 Err(errno) => last_error = Some(errno),
797 }
798 }
799
800 if sent_signal { Ok(()) } else { Err(last_error.unwrap_or_else(|| errno!(ESRCH))) }
801}
802
803#[derive(Debug)]
805pub struct WaitingOptions {
806 pub wait_for_exited: bool,
808 pub wait_for_stopped: bool,
810 pub wait_for_continued: bool,
812 pub block: bool,
814 pub keep_waitable_state: bool,
816 pub wait_for_all: bool,
818 pub wait_for_clone: bool,
820}
821
822impl WaitingOptions {
823 fn new(options: u32) -> Self {
824 const_assert_eq!(WUNTRACED, WSTOPPED);
825 Self {
826 wait_for_exited: options & WEXITED > 0,
827 wait_for_stopped: options & WSTOPPED > 0,
828 wait_for_continued: options & WCONTINUED > 0,
829 block: options & WNOHANG == 0,
830 keep_waitable_state: options & WNOWAIT > 0,
831 wait_for_all: options & __WALL > 0,
832 wait_for_clone: options & __WCLONE > 0,
833 }
834 }
835
836 pub fn new_for_waitid(options: u32) -> Result<Self, Errno> {
838 if options & !(__WCLONE | __WALL | WNOHANG | WNOWAIT | WSTOPPED | WEXITED | WCONTINUED) != 0
839 {
840 track_stub!(TODO("https://fxbug.dev/322874788"), "waitid options", options);
841 return error!(EINVAL);
842 }
843 if options & (WEXITED | WSTOPPED | WCONTINUED) == 0 {
844 return error!(EINVAL);
845 }
846 Ok(Self::new(options))
847 }
848
849 pub fn new_for_wait4(options: u32) -> Result<Self, Errno> {
851 if options & !(__WCLONE | __WALL | WNOHANG | WUNTRACED | WCONTINUED) != 0 {
852 track_stub!(TODO("https://fxbug.dev/322874017"), "wait4 options", options);
853 return error!(EINVAL);
854 }
855 Ok(Self::new(options | WEXITED))
856 }
857}
858
859fn wait_on_pid(
865 locked: &mut Locked<Unlocked>,
866 current_task: &CurrentTask,
867 selector: &ProcessSelector,
868 options: &WaitingOptions,
869) -> Result<Option<WaitResult>, Errno> {
870 let waiter = Waiter::new();
871 loop {
872 {
873 let mut pids = current_task.kernel().pids.write();
874 if let Some(tracee) =
883 current_task.thread_group().get_waitable_ptracee(selector, options, &mut pids)
884 {
885 return Ok(Some(tracee));
886 }
887 {
888 let mut thread_group = current_task.thread_group().write();
889
890 let mut has_waitable_tracee = false;
893 let mut has_any_tracee = false;
894 current_task.thread_group().get_ptracees_and(
895 selector,
896 &pids,
897 &mut |task: &Task, task_state: &TaskMutableState| {
898 if let Some(ptrace) = &task_state.ptrace {
899 has_any_tracee = true;
900 ptrace.tracer_waiters().wait_async(&waiter);
901 if ptrace.is_waitable(task.load_stopped(), options) {
902 has_waitable_tracee = true;
903 }
904 }
905 },
906 );
907 if has_waitable_tracee
908 || thread_group.zombie_ptracees.has_zombie_matching(&selector)
909 {
910 continue;
911 }
912 match thread_group.get_waitable_child(selector, options, &mut pids) {
913 WaitableChildResult::ReadyNow(child) => {
914 return Ok(Some(*child));
915 }
916 WaitableChildResult::ShouldWait => (),
917 WaitableChildResult::NoneFound => {
918 if !has_any_tracee {
919 return error!(ECHILD);
920 }
921 }
922 }
923 thread_group
924 .lifecycle_waiters
925 .wait_async_value(&waiter, ThreadGroupLifecycleWaitValue::ChildStatus);
926 }
927 }
928
929 if !options.block {
930 return Ok(None);
931 }
932 waiter.wait(locked, current_task).map_eintr(|| errno!(ERESTARTSYS))?;
933 }
934}
935
936pub fn sys_waitid(
950 locked: &mut Locked<Unlocked>,
951 current_task: &CurrentTask,
952 id_type: u32,
953 id: i32,
954 user_info: MultiArchUserRef<uapi::siginfo_t, uapi::arch32::siginfo_t>,
955 options: u32,
956 user_rusage: RUsagePtr,
957) -> Result<(), Errno> {
958 let mut waiting_options = WaitingOptions::new_for_waitid(options)?;
959
960 let task_selector = match id_type {
961 P_PID => ProcessSelector::Pid(id),
962 P_ALL => ProcessSelector::Any,
963 P_PGID => ProcessSelector::Pgid(if id == 0 {
964 current_task.thread_group().read().process_group.leader
965 } else {
966 id
967 }),
968 P_PIDFD => {
969 let fd = FdNumber::from_raw(id);
970 let file = current_task.get_file(fd)?;
971 if file.flags().contains(OpenFlags::NONBLOCK) {
972 waiting_options.block = false;
973 }
974 ProcessSelector::Process(file.as_thread_group_key()?)
975 }
976 _ => return error!(EINVAL),
977 };
978
979 if let Some(waitable_process) =
982 wait_on_pid(locked, current_task, &task_selector, &waiting_options)?
983 {
984 if !user_rusage.is_null() {
985 let usage = rusage {
986 ru_utime: timeval_from_duration(waitable_process.time_stats.user_time),
987 ru_stime: timeval_from_duration(waitable_process.time_stats.system_time),
988 ..Default::default()
989 };
990
991 track_stub!(TODO("https://fxbug.dev/322874712"), "real rusage from waitid");
992 current_task.write_multi_arch_object(user_rusage, usage)?;
993 }
994
995 if !user_info.is_null() {
996 let siginfo = waitable_process.as_signal_info();
997 siginfo.write(current_task, user_info)?;
998 }
999 } else if id_type == P_PIDFD {
1000 return error!(EAGAIN);
1009 }
1010
1011 Ok(())
1012}
1013
1014pub fn sys_wait4(
1028 locked: &mut Locked<Unlocked>,
1029 current_task: &CurrentTask,
1030 raw_selector: pid_t,
1031 user_wstatus: UserRef<i32>,
1032 options: u32,
1033 user_rusage: RUsagePtr,
1034) -> Result<pid_t, Errno> {
1035 let waiting_options = WaitingOptions::new_for_wait4(options)?;
1036
1037 let selector = if raw_selector == 0 {
1038 ProcessSelector::Pgid(current_task.thread_group().read().process_group.leader)
1039 } else if raw_selector == -1 {
1040 ProcessSelector::Any
1041 } else if raw_selector > 0 {
1042 ProcessSelector::Pid(raw_selector)
1043 } else if raw_selector < -1 {
1044 ProcessSelector::Pgid(negate_pid(raw_selector)?)
1045 } else {
1046 track_stub!(
1047 TODO("https://fxbug.dev/322874213"),
1048 "wait4 with selector",
1049 raw_selector as u64
1050 );
1051 return error!(ENOSYS);
1052 };
1053
1054 if let Some(waitable_process) = wait_on_pid(locked, current_task, &selector, &waiting_options)?
1055 {
1056 let status = waitable_process.exit_info.status.wait_status();
1057
1058 if !user_rusage.is_null() {
1059 track_stub!(TODO("https://fxbug.dev/322874768"), "real rusage from wait4");
1060 let usage = rusage {
1061 ru_utime: timeval_from_duration(waitable_process.time_stats.user_time),
1062 ru_stime: timeval_from_duration(waitable_process.time_stats.system_time),
1063 ..Default::default()
1064 };
1065 current_task.write_multi_arch_object(user_rusage, usage)?;
1066 }
1067
1068 if !user_wstatus.is_null() {
1069 current_task.write_object(user_wstatus, &status)?;
1070 }
1071
1072 Ok(waitable_process.pid)
1073 } else {
1074 Ok(0)
1075 }
1076}
1077
1078fn negate_pid(pid: pid_t) -> Result<pid_t, Errno> {
1080 pid.checked_neg().ok_or_else(|| errno!(ESRCH))
1081}
1082
1083#[cfg(target_arch = "aarch64")]
1085mod arch32 {
1086 use crate::task::CurrentTask;
1087 use crate::vfs::FdNumber;
1088 use starnix_sync::{Locked, Unlocked};
1089 use starnix_uapi::errors::Errno;
1090 use starnix_uapi::signals::SigSet;
1091 use starnix_uapi::user_address::UserRef;
1092
1093 pub fn sys_arch32_signalfd(
1108 locked: &mut Locked<Unlocked>,
1109 current_task: &CurrentTask,
1110 fd: FdNumber,
1111 mask_addr: UserRef<SigSet>,
1112 mask_size: usize,
1113 ) -> Result<FdNumber, Errno> {
1114 super::sys_signalfd4(locked, current_task, fd, mask_addr, mask_size, 0)
1115 }
1116
1117 pub use super::{
1118 sys_pidfd_send_signal as sys_arch32_pidfd_send_signal,
1119 sys_rt_sigaction as sys_arch32_rt_sigaction,
1120 sys_rt_sigqueueinfo as sys_arch32_rt_sigqueueinfo,
1121 sys_rt_sigtimedwait as sys_arch32_rt_sigtimedwait,
1122 sys_rt_tgsigqueueinfo as sys_arch32_rt_tgsigqueueinfo,
1123 sys_sigaltstack as sys_arch32_sigaltstack, sys_signalfd4 as sys_arch32_signalfd4,
1124 sys_waitid as sys_arch32_waitid,
1125 };
1126}
1127
1128#[cfg(target_arch = "aarch64")]
1129pub use arch32::*;
1130
1131#[cfg(test)]
1132mod tests {
1133 use super::*;
1134 use crate::mm::{MemoryAccessor, PAGE_SIZE};
1135 use crate::signals::testing::dequeue_signal_for_test;
1136 use crate::signals::{SI_HEADER_SIZE, SignalInfoHeader, send_standard_signal};
1137 use crate::task::dynamic_thread_spawner::SpawnRequestBuilder;
1138 use crate::task::{EventHandler, ExitStatus, ProcessExitInfo};
1139 use crate::testing::*;
1140 use starnix_sync::Mutex;
1141 use starnix_types::math::round_up_to_system_page_size;
1142 use starnix_uapi::auth::Credentials;
1143 use starnix_uapi::errors::ERESTARTSYS;
1144 use starnix_uapi::signals::{
1145 SIGCHLD, SIGHUP, SIGINT, SIGIO, SIGKILL, SIGRTMIN, SIGSEGV, SIGSTOP, SIGTERM, SIGTRAP,
1146 SIGUSR1,
1147 };
1148 use starnix_uapi::vfs::FdEvents;
1149 use starnix_uapi::{SI_QUEUE, sigaction_t, uaddr, uid_t};
1150 use std::collections::VecDeque;
1151 use std::sync::Arc;
1152 use zerocopy::IntoBytes;
1153
1154 #[cfg(target_arch = "x86_64")]
1155 #[::fuchsia::test]
1156 async fn test_sigaltstack() {
1157 spawn_kernel_and_run(async |locked, current_task| {
1158 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1159
1160 let user_ss = UserRef::<sigaltstack>::new(addr);
1161 let nullptr = UserRef::<sigaltstack>::default();
1162
1163 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1165 .expect("failed to call sigaltstack");
1166 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1167 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1168
1169 ss.ss_sp = uaddr { addr: 0x7FFFF };
1171 ss.ss_size = 0x1000;
1172 ss.ss_flags = SS_AUTODISARM as i32;
1173 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1174 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1175 .expect("failed to call sigaltstack");
1176 current_task
1177 .write_memory(addr, &[0u8; std::mem::size_of::<sigaltstack>()])
1178 .expect("failed to clear struct");
1179 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1180 .expect("failed to call sigaltstack");
1181 let another_ss = current_task.read_object(user_ss).expect("failed to read struct");
1182 assert_eq!(ss.as_bytes(), another_ss.as_bytes());
1183
1184 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1186 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1187 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1188 .expect("failed to call sigaltstack");
1189 current_task
1190 .write_memory(addr, &[0u8; std::mem::size_of::<sigaltstack>()])
1191 .expect("failed to clear struct");
1192 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1193 .expect("failed to call sigaltstack");
1194 let ss = current_task.read_object(user_ss).expect("failed to read struct");
1195 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1196 })
1197 .await;
1198 }
1199
1200 #[::fuchsia::test]
1201 async fn test_sigaltstack_invalid_size() {
1202 spawn_kernel_and_run(async |locked, current_task| {
1203 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1204
1205 let user_ss = UserRef::<sigaltstack>::new(addr);
1206 let nullptr = UserRef::<sigaltstack>::default();
1207
1208 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1210 .expect("failed to call sigaltstack");
1211 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1212 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1213
1214 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1216 .expect("failed to round up");
1217 let sigaltstack_addr = map_memory(
1218 locked,
1219 ¤t_task,
1220 UserAddress::default(),
1221 sigaltstack_addr_size as u64,
1222 );
1223 ss.ss_sp = sigaltstack_addr.into();
1224 ss.ss_flags = 0;
1225 ss.ss_size = uapi::MINSIGSTKSZ as u64 - 1;
1226 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1227 assert_eq!(
1228 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1229 error!(ENOMEM)
1230 );
1231 })
1232 .await;
1233 }
1234
1235 #[cfg(target_arch = "x86_64")]
1236 #[::fuchsia::test]
1237 async fn test_sigaltstack_active_stack() {
1238 spawn_kernel_and_run(async |locked, current_task| {
1239 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1240
1241 let user_ss = UserRef::<sigaltstack>::new(addr);
1242 let nullptr = UserRef::<sigaltstack>::default();
1243
1244 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1246 .expect("failed to call sigaltstack");
1247 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1248 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1249
1250 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1252 .expect("failed to round up");
1253 let sigaltstack_addr = map_memory(
1254 locked,
1255 ¤t_task,
1256 UserAddress::default(),
1257 sigaltstack_addr_size as u64,
1258 );
1259 ss.ss_sp = sigaltstack_addr.into();
1260 ss.ss_flags = 0;
1261 ss.ss_size = sigaltstack_addr_size as u64;
1262 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1263 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1264 .expect("failed to call sigaltstack");
1265
1266 let next_addr = (sigaltstack_addr + sigaltstack_addr_size).unwrap();
1268 current_task.thread_state.registers.rsp = next_addr.ptr() as u64;
1269 ss.ss_flags = SS_DISABLE as i32;
1270 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1271 assert_eq!(
1272 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1273 error!(EPERM)
1274 );
1275
1276 let next_ss_addr = sigaltstack_addr
1279 .checked_add(sigaltstack_addr_size)
1280 .unwrap()
1281 .checked_add(0x1000usize)
1282 .unwrap();
1283 current_task.thread_state.registers.rsp = next_ss_addr.ptr() as u64;
1284 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1285 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1286 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1287 .expect("failed to call sigaltstack");
1288 })
1289 .await;
1290 }
1291
1292 #[cfg(target_arch = "x86_64")]
1293 #[::fuchsia::test]
1294 async fn test_sigaltstack_active_stack_saturates() {
1295 spawn_kernel_and_run(async |locked, current_task| {
1296 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1297
1298 let user_ss = UserRef::<sigaltstack>::new(addr);
1299 let nullptr = UserRef::<sigaltstack>::default();
1300
1301 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1303 .expect("failed to call sigaltstack");
1304 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1305 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1306
1307 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1309 .expect("failed to round up");
1310 let sigaltstack_addr = map_memory(
1311 locked,
1312 ¤t_task,
1313 UserAddress::default(),
1314 sigaltstack_addr_size as u64,
1315 );
1316 ss.ss_sp = sigaltstack_addr.into();
1317 ss.ss_flags = 0;
1318 ss.ss_size = u64::MAX;
1319 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1320 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1321 .expect("failed to call sigaltstack");
1322
1323 current_task.thread_state.registers.rsp =
1325 (sigaltstack_addr + sigaltstack_addr_size).unwrap().ptr() as u64;
1326 ss.ss_flags = SS_DISABLE as i32;
1327 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1328 assert_eq!(
1329 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1330 error!(EPERM)
1331 );
1332
1333 current_task.thread_state.registers.rsp = 0u64;
1335 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1336 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1337 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1338 .expect("failed to call sigaltstack");
1339 })
1340 .await;
1341 }
1342
1343 #[::fuchsia::test]
1346 async fn test_sigprocmask_invalid_size() {
1347 spawn_kernel_and_run(async |locked, current_task| {
1348 let set = UserRef::<SigSet>::default();
1349 let old_set = UserRef::<SigSet>::default();
1350 let how = 0;
1351
1352 assert_eq!(
1353 sys_rt_sigprocmask(
1354 locked,
1355 ¤t_task,
1356 how,
1357 set,
1358 old_set,
1359 std::mem::size_of::<SigSet>() * 2
1360 ),
1361 error!(EINVAL)
1362 );
1363 assert_eq!(
1364 sys_rt_sigprocmask(
1365 locked,
1366 ¤t_task,
1367 how,
1368 set,
1369 old_set,
1370 std::mem::size_of::<SigSet>() / 2
1371 ),
1372 error!(EINVAL)
1373 );
1374 })
1375 .await;
1376 }
1377
1378 #[::fuchsia::test]
1380 async fn test_sigprocmask_invalid_how() {
1381 spawn_kernel_and_run(async |locked, current_task| {
1382 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1383
1384 let set = UserRef::<SigSet>::new(addr);
1385 let old_set = UserRef::<SigSet>::default();
1386 let how = SIG_SETMASK | SIG_UNBLOCK | SIG_BLOCK;
1387
1388 assert_eq!(
1389 sys_rt_sigprocmask(
1390 locked,
1391 ¤t_task,
1392 how,
1393 set,
1394 old_set,
1395 std::mem::size_of::<SigSet>()
1396 ),
1397 error!(EINVAL)
1398 );
1399 })
1400 .await;
1401 }
1402
1403 #[::fuchsia::test]
1406 async fn test_sigprocmask_null_set() {
1407 spawn_kernel_and_run(async |locked, current_task| {
1408 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1409 let original_mask = SigSet::from(SIGTRAP);
1410 {
1411 current_task.write().set_signal_mask(original_mask);
1412 }
1413
1414 let set = UserRef::<SigSet>::default();
1415 let old_set = UserRef::<SigSet>::new(addr);
1416 let how = SIG_SETMASK;
1417
1418 current_task
1419 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>()])
1420 .expect("failed to clear struct");
1421
1422 assert_eq!(
1423 sys_rt_sigprocmask(
1424 locked,
1425 ¤t_task,
1426 how,
1427 set,
1428 old_set,
1429 std::mem::size_of::<SigSet>()
1430 ),
1431 Ok(())
1432 );
1433
1434 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1435 assert_eq!(old_mask, original_mask);
1436 })
1437 .await;
1438 }
1439
1440 #[::fuchsia::test]
1443 async fn test_sigprocmask_null_set_and_old_set() {
1444 spawn_kernel_and_run(async |locked, current_task| {
1445 let original_mask = SigSet::from(SIGTRAP);
1446 {
1447 current_task.write().set_signal_mask(original_mask);
1448 }
1449
1450 let set = UserRef::<SigSet>::default();
1451 let old_set = UserRef::<SigSet>::default();
1452 let how = SIG_SETMASK;
1453
1454 assert_eq!(
1455 sys_rt_sigprocmask(
1456 locked,
1457 ¤t_task,
1458 how,
1459 set,
1460 old_set,
1461 std::mem::size_of::<SigSet>()
1462 ),
1463 Ok(())
1464 );
1465 assert_eq!(current_task.read().signal_mask(), original_mask);
1466 })
1467 .await;
1468 }
1469
1470 #[::fuchsia::test]
1472 async fn test_sigprocmask_setmask() {
1473 spawn_kernel_and_run(async |locked, current_task| {
1474 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1475 current_task
1476 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1477 .expect("failed to clear struct");
1478
1479 let original_mask = SigSet::from(SIGTRAP);
1480 {
1481 current_task.write().set_signal_mask(original_mask);
1482 }
1483
1484 let new_mask = SigSet::from(SIGIO);
1485 let set = UserRef::<SigSet>::new(addr);
1486 current_task.write_object(set, &new_mask).expect("failed to set mask");
1487
1488 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1489 let old_set = UserRef::<SigSet>::new(old_addr_range);
1490 let how = SIG_SETMASK;
1491
1492 assert_eq!(
1493 sys_rt_sigprocmask(
1494 locked,
1495 ¤t_task,
1496 how,
1497 set,
1498 old_set,
1499 std::mem::size_of::<SigSet>()
1500 ),
1501 Ok(())
1502 );
1503
1504 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1505 assert_eq!(old_mask, original_mask);
1506 assert_eq!(current_task.read().signal_mask(), new_mask);
1507 })
1508 .await;
1509 }
1510
1511 #[::fuchsia::test]
1513 async fn test_sigprocmask_block() {
1514 spawn_kernel_and_run(async |locked, current_task| {
1515 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1516 current_task
1517 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1518 .expect("failed to clear struct");
1519
1520 let original_mask = SigSet::from(SIGTRAP);
1521 {
1522 current_task.write().set_signal_mask(original_mask);
1523 }
1524
1525 let new_mask = SigSet::from(SIGIO);
1526 let set = UserRef::<SigSet>::new(addr);
1527 current_task.write_object(set, &new_mask).expect("failed to set mask");
1528
1529 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1530 let old_set = UserRef::<SigSet>::new(old_addr_range);
1531 let how = SIG_BLOCK;
1532
1533 assert_eq!(
1534 sys_rt_sigprocmask(
1535 locked,
1536 ¤t_task,
1537 how,
1538 set,
1539 old_set,
1540 std::mem::size_of::<SigSet>()
1541 ),
1542 Ok(())
1543 );
1544
1545 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1546 assert_eq!(old_mask, original_mask);
1547 assert_eq!(current_task.read().signal_mask(), new_mask | original_mask);
1548 })
1549 .await;
1550 }
1551
1552 #[::fuchsia::test]
1554 async fn test_sigprocmask_unblock() {
1555 spawn_kernel_and_run(async |locked, current_task| {
1556 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1557 current_task
1558 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1559 .expect("failed to clear struct");
1560
1561 let original_mask = SigSet::from(SIGTRAP) | SigSet::from(SIGIO);
1562 {
1563 current_task.write().set_signal_mask(original_mask);
1564 }
1565
1566 let new_mask = SigSet::from(SIGTRAP);
1567 let set = UserRef::<SigSet>::new(addr);
1568 current_task.write_object(set, &new_mask).expect("failed to set mask");
1569
1570 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1571 let old_set = UserRef::<SigSet>::new(old_addr_range);
1572 let how = SIG_UNBLOCK;
1573
1574 assert_eq!(
1575 sys_rt_sigprocmask(
1576 locked,
1577 ¤t_task,
1578 how,
1579 set,
1580 old_set,
1581 std::mem::size_of::<SigSet>()
1582 ),
1583 Ok(())
1584 );
1585
1586 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1587 assert_eq!(old_mask, original_mask);
1588 assert_eq!(current_task.read().signal_mask(), SIGIO.into());
1589 })
1590 .await;
1591 }
1592
1593 #[::fuchsia::test]
1595 async fn test_sigprocmask_unblock_not_set() {
1596 spawn_kernel_and_run(async |locked, current_task| {
1597 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1598 current_task
1599 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1600 .expect("failed to clear struct");
1601
1602 let original_mask = SigSet::from(SIGIO);
1603 {
1604 current_task.write().set_signal_mask(original_mask);
1605 }
1606
1607 let new_mask = SigSet::from(SIGTRAP);
1608 let set = UserRef::<SigSet>::new(addr);
1609 current_task.write_object(set, &new_mask).expect("failed to set mask");
1610
1611 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1612 let old_set = UserRef::<SigSet>::new(old_addr_range);
1613 let how = SIG_UNBLOCK;
1614
1615 assert_eq!(
1616 sys_rt_sigprocmask(
1617 locked,
1618 ¤t_task,
1619 how,
1620 set,
1621 old_set,
1622 std::mem::size_of::<SigSet>()
1623 ),
1624 Ok(())
1625 );
1626
1627 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1628 assert_eq!(old_mask, original_mask);
1629 assert_eq!(current_task.read().signal_mask(), original_mask);
1630 })
1631 .await;
1632 }
1633
1634 #[::fuchsia::test]
1636 async fn test_sigprocmask_kill_stop() {
1637 spawn_kernel_and_run(async |locked, current_task| {
1638 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1639 current_task
1640 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1641 .expect("failed to clear struct");
1642
1643 let original_mask = SigSet::from(SIGIO);
1644 {
1645 current_task.write().set_signal_mask(original_mask);
1646 }
1647
1648 let new_mask = UNBLOCKABLE_SIGNALS;
1649 let set = UserRef::<SigSet>::new(addr);
1650 current_task.write_object(set, &new_mask).expect("failed to set mask");
1651
1652 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1653 let old_set = UserRef::<SigSet>::new(old_addr_range);
1654 let how = SIG_BLOCK;
1655
1656 assert_eq!(
1657 sys_rt_sigprocmask(
1658 locked,
1659 ¤t_task,
1660 how,
1661 set,
1662 old_set,
1663 std::mem::size_of::<SigSet>()
1664 ),
1665 Ok(())
1666 );
1667
1668 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1669 assert_eq!(old_mask, original_mask);
1670 assert_eq!(current_task.read().signal_mask(), original_mask);
1671 })
1672 .await;
1673 }
1674
1675 #[::fuchsia::test]
1676 async fn test_sigaction_invalid_signal() {
1677 spawn_kernel_and_run(async |locked, current_task| {
1678 assert_eq!(
1679 sys_rt_sigaction(
1680 locked,
1681 ¤t_task,
1682 UncheckedSignal::from(SIGKILL),
1683 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1685 UserRef::<sigaction_t>::default().into(),
1686 std::mem::size_of::<SigSet>(),
1687 ),
1688 error!(EINVAL)
1689 );
1690 assert_eq!(
1691 sys_rt_sigaction(
1692 locked,
1693 ¤t_task,
1694 UncheckedSignal::from(SIGSTOP),
1695 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1697 UserRef::<sigaction_t>::default().into(),
1698 std::mem::size_of::<SigSet>(),
1699 ),
1700 error!(EINVAL)
1701 );
1702 assert_eq!(
1703 sys_rt_sigaction(
1704 locked,
1705 ¤t_task,
1706 UncheckedSignal::from(Signal::NUM_SIGNALS + 1),
1707 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1709 UserRef::<sigaction_t>::default().into(),
1710 std::mem::size_of::<SigSet>(),
1711 ),
1712 error!(EINVAL)
1713 );
1714 })
1715 .await;
1716 }
1717
1718 #[::fuchsia::test]
1719 async fn test_sigaction_old_value_set() {
1720 spawn_kernel_and_run(async |locked, current_task| {
1721 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1722 current_task
1723 .write_memory(addr, &[0u8; std::mem::size_of::<sigaction_t>()])
1724 .expect("failed to clear struct");
1725
1726 let org_mask = SigSet::from(SIGHUP) | SigSet::from(SIGINT);
1727 let original_action =
1728 sigaction_t { sa_mask: org_mask.into(), ..sigaction_t::default() };
1729
1730 {
1731 current_task.thread_group().signal_actions.set(SIGHUP, original_action);
1732 }
1733
1734 let old_action_ref = UserRef::<sigaction_t>::new(addr);
1735 assert_eq!(
1736 sys_rt_sigaction(
1737 locked,
1738 ¤t_task,
1739 UncheckedSignal::from(SIGHUP),
1740 UserRef::<sigaction_t>::default().into(),
1741 old_action_ref.into(),
1742 std::mem::size_of::<SigSet>()
1743 ),
1744 Ok(())
1745 );
1746
1747 let old_action =
1748 current_task.read_object(old_action_ref).expect("failed to read action");
1749 assert_eq!(old_action.as_bytes(), original_action.as_bytes());
1750 })
1751 .await;
1752 }
1753
1754 #[::fuchsia::test]
1755 async fn test_sigaction_new_value_set() {
1756 spawn_kernel_and_run(async |locked, current_task| {
1757 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1758 current_task
1759 .write_memory(addr, &[0u8; std::mem::size_of::<sigaction_t>()])
1760 .expect("failed to clear struct");
1761
1762 let org_mask = SigSet::from(SIGHUP) | SigSet::from(SIGINT);
1763 let original_action =
1764 sigaction_t { sa_mask: org_mask.into(), ..sigaction_t::default() };
1765 let set_action_ref = UserRef::<sigaction_t>::new(addr);
1766 current_task
1767 .write_object(set_action_ref, &original_action)
1768 .expect("failed to set action");
1769
1770 assert_eq!(
1771 sys_rt_sigaction(
1772 locked,
1773 ¤t_task,
1774 UncheckedSignal::from(SIGINT),
1775 set_action_ref.into(),
1776 UserRef::<sigaction_t>::default().into(),
1777 std::mem::size_of::<SigSet>(),
1778 ),
1779 Ok(())
1780 );
1781
1782 assert_eq!(
1783 current_task.thread_group().signal_actions.get(SIGINT).as_bytes(),
1784 original_action.as_bytes()
1785 );
1786 })
1787 .await;
1788 }
1789
1790 #[::fuchsia::test]
1792 async fn test_kill_same_task() {
1793 spawn_kernel_and_run(async |locked, current_task| {
1794 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGINT.into()), Ok(()));
1795 })
1796 .await;
1797 }
1798
1799 #[::fuchsia::test]
1801 async fn test_kill_own_thread_group() {
1802 spawn_kernel_and_run(async |locked, init_task| {
1803 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1804 task1.thread_group().setsid(locked).expect("setsid");
1805 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1806
1807 assert_eq!(sys_kill(locked, &task1, 0, SIGINT.into()), Ok(()));
1808 assert_eq!(task1.read().queued_signal_count(SIGINT), 1);
1809 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1810 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1811 })
1812 .await;
1813 }
1814
1815 #[::fuchsia::test]
1817 async fn test_kill_thread_group() {
1818 spawn_kernel_and_run(async |locked, init_task| {
1819 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1820 task1.thread_group().setsid(locked).expect("setsid");
1821 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1822
1823 assert_eq!(sys_kill(locked, &task1, -task1.tid, SIGINT.into()), Ok(()));
1824 assert_eq!(task1.read().queued_signal_count(SIGINT), 1);
1825 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1826 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1827 })
1828 .await;
1829 }
1830
1831 #[::fuchsia::test]
1833 async fn test_kill_all() {
1834 spawn_kernel_and_run(async |locked, init_task| {
1835 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1836 task1.thread_group().setsid(locked).expect("setsid");
1837 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1838
1839 assert_eq!(sys_kill(locked, &task1, -1, SIGINT.into()), Ok(()));
1840 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1841 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1842 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1843 })
1844 .await;
1845 }
1846
1847 #[::fuchsia::test]
1849 async fn test_kill_inexistant_task() {
1850 spawn_kernel_and_run(async |locked, current_task| {
1851 assert_eq!(sys_kill(locked, ¤t_task, 9, SIGINT.into()), error!(ESRCH));
1852 })
1853 .await;
1854 }
1855
1856 #[::fuchsia::test]
1858 async fn test_kill_invalid_task() {
1859 spawn_kernel_and_run(async |locked, task1| {
1860 task1.set_creds(Credentials::with_ids(1, 1));
1862 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1863 task2.set_creds(Credentials::with_ids(2, 2));
1864
1865 assert!(task1.can_signal(&task2, SIGINT.into()).is_err());
1866 assert_eq!(sys_kill(locked, &task2, task1.tid, SIGINT.into()), error!(EPERM));
1867 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1868 })
1869 .await;
1870 }
1871
1872 #[::fuchsia::test]
1874 async fn test_kill_invalid_task_in_thread_group() {
1875 spawn_kernel_and_run(async |locked, init_task| {
1876 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1877 task1.thread_group().setsid(locked).expect("setsid");
1878 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1879 task2.thread_group().setsid(locked).expect("setsid");
1880 task2.set_creds(Credentials::with_ids(2, 2));
1881
1882 assert!(task2.can_signal(&task1, SIGINT.into()).is_err());
1883 assert_eq!(sys_kill(locked, &task2, -task1.tid, SIGINT.into()), error!(EPERM));
1884 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1885 })
1886 .await;
1887 }
1888
1889 #[::fuchsia::test]
1891 async fn test_kill_invalid_signal() {
1892 spawn_kernel_and_run(async |locked, current_task| {
1893 assert_eq!(
1894 sys_kill(locked, ¤t_task, current_task.tid, UncheckedSignal::from(75)),
1895 error!(EINVAL)
1896 );
1897 })
1898 .await;
1899 }
1900
1901 #[::fuchsia::test]
1903 async fn test_blocked_signal_pending() {
1904 spawn_kernel_and_run(async |locked, current_task| {
1905 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1906 current_task
1907 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1908 .expect("failed to clear struct");
1909
1910 let new_mask = SigSet::from(SIGIO);
1911 let set = UserRef::<SigSet>::new(addr);
1912 current_task.write_object(set, &new_mask).expect("failed to set mask");
1913
1914 assert_eq!(
1915 sys_rt_sigprocmask(
1916 locked,
1917 ¤t_task,
1918 SIG_BLOCK,
1919 set,
1920 UserRef::default(),
1921 std::mem::size_of::<SigSet>()
1922 ),
1923 Ok(())
1924 );
1925 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGIO.into()), Ok(()));
1926 assert_eq!(current_task.read().queued_signal_count(SIGIO), 1);
1927
1928 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGIO.into()), Ok(()));
1930 assert_eq!(current_task.read().queued_signal_count(SIGIO), 1);
1931 })
1932 .await;
1933 }
1934
1935 #[::fuchsia::test]
1937 async fn test_blocked_real_time_signal_pending() {
1938 spawn_kernel_and_run(async |locked, current_task| {
1939 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1940 current_task
1941 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1942 .expect("failed to clear struct");
1943
1944 let new_mask = SigSet::from(starnix_uapi::signals::SIGRTMIN);
1945 let set = UserRef::<SigSet>::new(addr);
1946 current_task.write_object(set, &new_mask).expect("failed to set mask");
1947
1948 assert_eq!(
1949 sys_rt_sigprocmask(
1950 locked,
1951 ¤t_task,
1952 SIG_BLOCK,
1953 set,
1954 UserRef::default(),
1955 std::mem::size_of::<SigSet>()
1956 ),
1957 Ok(())
1958 );
1959 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGRTMIN.into()), Ok(()));
1960 assert_eq!(current_task.read().queued_signal_count(starnix_uapi::signals::SIGRTMIN), 1);
1961
1962 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGRTMIN.into()), Ok(()));
1964 assert_eq!(current_task.read().queued_signal_count(starnix_uapi::signals::SIGRTMIN), 2);
1965 })
1966 .await;
1967 }
1968
1969 #[::fuchsia::test]
1970 async fn test_suspend() {
1971 spawn_kernel_and_run(async |locked, current_task| {
1972 let init_task_weak = current_task.weak_task();
1973 let (tx, rx) = std::sync::mpsc::sync_channel::<()>(0);
1974
1975 let closure = move |locked: &mut Locked<Unlocked>, current_task: &CurrentTask| {
1976 let init_task_temp = init_task_weak.upgrade().expect("Task must be alive");
1977
1978 let mut suspended = false;
1980 while !suspended {
1981 suspended = init_task_temp.read().is_blocked();
1982 std::thread::sleep(std::time::Duration::from_millis(10));
1983 }
1984
1985 let _ = sys_kill(
1987 locked,
1988 current_task,
1989 init_task_temp.tid,
1990 UncheckedSignal::from(SIGHUP),
1991 );
1992
1993 rx.recv().expect("receive");
1995 assert!(!init_task_temp.read().is_blocked());
1996 };
1997 let (thread, req) =
1998 SpawnRequestBuilder::new().with_sync_closure(closure).build_with_async_result();
1999 current_task.kernel().kthreads.spawner().spawn_from_request(req);
2000
2001 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2002 let user_ref = UserRef::<SigSet>::new(addr);
2003
2004 let sigset = !SigSet::from(SIGHUP);
2005 current_task.write_object(user_ref, &sigset).expect("failed to set action");
2006
2007 assert_eq!(
2008 sys_rt_sigsuspend(locked, current_task, user_ref, std::mem::size_of::<SigSet>()),
2009 error!(ERESTARTNOHAND)
2010 );
2011 tx.send(()).expect("send");
2012 futures::executor::block_on(thread).expect("join");
2013 })
2014 .await;
2015 }
2016
2017 #[::fuchsia::test]
2019 async fn test_waitid_options() {
2020 spawn_kernel_and_run(async |locked, current_task| {
2021 let id = 1;
2022 assert_eq!(
2023 sys_waitid(
2024 locked,
2025 ¤t_task,
2026 P_PID,
2027 id,
2028 MultiArchUserRef::null(current_task),
2029 0,
2030 UserRef::default().into()
2031 ),
2032 error!(EINVAL)
2033 );
2034 assert_eq!(
2035 sys_waitid(
2036 locked,
2037 ¤t_task,
2038 P_PID,
2039 id,
2040 MultiArchUserRef::null(current_task),
2041 0xffff,
2042 UserRef::default().into()
2043 ),
2044 error!(EINVAL)
2045 );
2046 })
2047 .await;
2048 }
2049
2050 #[::fuchsia::test]
2052 async fn test_wait4_options() {
2053 spawn_kernel_and_run(async |locked, current_task| {
2054 let id = 1;
2055 assert_eq!(
2056 sys_wait4(
2057 locked,
2058 ¤t_task,
2059 id,
2060 UserRef::default(),
2061 WEXITED,
2062 RUsagePtr::null(current_task)
2063 ),
2064 error!(EINVAL)
2065 );
2066 assert_eq!(
2067 sys_wait4(
2068 locked,
2069 ¤t_task,
2070 id,
2071 UserRef::default(),
2072 WNOWAIT,
2073 RUsagePtr::null(current_task)
2074 ),
2075 error!(EINVAL)
2076 );
2077 assert_eq!(
2078 sys_wait4(
2079 locked,
2080 ¤t_task,
2081 id,
2082 UserRef::default(),
2083 0xffff,
2084 RUsagePtr::null(current_task)
2085 ),
2086 error!(EINVAL)
2087 );
2088 })
2089 .await;
2090 }
2091
2092 #[::fuchsia::test]
2093 async fn test_echild_when_no_zombie() {
2094 spawn_kernel_and_run(async |locked, current_task| {
2095 assert!(
2097 sys_kill(
2098 locked,
2099 ¤t_task,
2100 current_task.get_pid(),
2101 UncheckedSignal::from(SIGCHLD)
2102 )
2103 .is_ok()
2104 );
2105 assert_eq!(
2108 wait_on_pid(
2109 locked,
2110 ¤t_task,
2111 &ProcessSelector::Any,
2112 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions")
2113 ),
2114 error!(ECHILD)
2115 );
2116 })
2117 .await;
2118 }
2119
2120 #[::fuchsia::test]
2121 async fn test_no_error_when_zombie() {
2122 spawn_kernel_and_run(async |locked, current_task| {
2123 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2124 let expected_result = WaitResult {
2125 pid: child.tid,
2126 uid: 0,
2127 exit_info: ProcessExitInfo {
2128 status: ExitStatus::Exit(1),
2129 exit_signal: Some(SIGCHLD),
2130 },
2131 time_stats: Default::default(),
2132 };
2133 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2134 std::mem::drop(child);
2135
2136 assert_eq!(
2137 wait_on_pid(
2138 locked,
2139 ¤t_task,
2140 &ProcessSelector::Any,
2141 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions")
2142 ),
2143 Ok(Some(expected_result))
2144 );
2145 })
2146 .await;
2147 }
2148
2149 #[::fuchsia::test]
2150 async fn test_waiting_for_child() {
2151 spawn_kernel_and_run(async |locked, task| {
2152 let child = task
2153 .clone_task(
2154 locked,
2155 0,
2156 Some(SIGCHLD),
2157 UserRef::default(),
2158 UserRef::default(),
2159 UserRef::default(),
2160 )
2161 .expect("clone_task");
2162
2163 assert_eq!(
2165 wait_on_pid(
2166 locked,
2167 &task,
2168 &ProcessSelector::Any,
2169 &WaitingOptions::new_for_wait4(WNOHANG).expect("WaitingOptions")
2170 ),
2171 Ok(None)
2172 );
2173
2174 let thread = std::thread::spawn({
2175 let task = task.weak_task();
2176 move || {
2177 #[allow(
2179 clippy::undocumented_unsafe_blocks,
2180 reason = "Force documented unsafe blocks in Starnix"
2181 )]
2182 let locked = unsafe { Unlocked::new() };
2183 let task = task.upgrade().expect("task must be alive");
2184 let child: AutoReleasableTask = child.into();
2185 while !task.read().is_blocked() {
2187 std::thread::sleep(std::time::Duration::from_millis(10));
2188 }
2189 child.thread_group().exit(locked, ExitStatus::Exit(0), None);
2190 child.tid
2191 }
2192 });
2193
2194 let waited_child = wait_on_pid(
2196 locked,
2197 &task,
2198 &ProcessSelector::Any,
2199 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions"),
2200 )
2201 .expect("wait_on_pid")
2202 .unwrap();
2203
2204 let child_id = thread.join().expect("join");
2206 assert_eq!(waited_child.pid, child_id);
2207 })
2208 .await;
2209 }
2210
2211 #[::fuchsia::test]
2212 async fn test_waiting_for_child_with_signal_pending() {
2213 spawn_kernel_and_run(async |locked, task| {
2214 task.thread_group().signal_actions.set(
2216 SIGUSR1,
2217 sigaction_t { sa_handler: uaddr { addr: 0xDEADBEEF }, ..sigaction_t::default() },
2218 );
2219
2220 let _child = task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2222
2223 send_standard_signal(locked, &task, SignalInfo::kernel(SIGUSR1));
2226
2227 let errno = wait_on_pid(
2228 locked,
2229 &task,
2230 &ProcessSelector::Any,
2231 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions"),
2232 )
2233 .expect_err("wait_on_pid");
2234 assert_eq!(errno, ERESTARTSYS);
2235 })
2236 .await;
2237 }
2238
2239 #[::fuchsia::test]
2240 async fn test_sigkill() {
2241 spawn_kernel_and_run(async |locked, current_task| {
2242 let mut child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2243
2244 send_standard_signal(locked, &child, SignalInfo::kernel(SIGKILL));
2246 dequeue_signal_for_test(locked, &mut child);
2247 std::mem::drop(child);
2248
2249 let address = map_memory(
2251 locked,
2252 ¤t_task,
2253 UserAddress::default(),
2254 std::mem::size_of::<i32>() as u64,
2255 );
2256 let address_ref = UserRef::<i32>::new(address);
2257 sys_wait4(locked, ¤t_task, -1, address_ref, 0, RUsagePtr::null(current_task))
2258 .expect("wait4");
2259 let wstatus = current_task.read_object(address_ref).expect("read memory");
2260 assert_eq!(wstatus, SIGKILL.number() as i32);
2261 })
2262 .await;
2263 }
2264
2265 async fn test_exit_status_for_signal(
2266 sig: Signal,
2267 wait_status: i32,
2268 exit_signal: Option<Signal>,
2269 ) {
2270 spawn_kernel_and_run(async move |locked, current_task| {
2271 let mut child = current_task.clone_task_for_test(locked, 0, exit_signal);
2272
2273 send_standard_signal(locked, &child, SignalInfo::kernel(sig));
2275 dequeue_signal_for_test(locked, &mut child);
2276 std::mem::drop(child);
2277
2278 let address = map_memory(
2280 locked,
2281 ¤t_task,
2282 UserAddress::default(),
2283 std::mem::size_of::<i32>() as u64,
2284 );
2285 let address_ref = UserRef::<i32>::new(address);
2286 sys_wait4(locked, ¤t_task, -1, address_ref, 0, RUsagePtr::null(current_task))
2287 .expect("wait4");
2288 let wstatus = current_task.read_object(address_ref).expect("read memory");
2289 assert_eq!(wstatus, wait_status);
2290 })
2291 .await;
2292 }
2293
2294 #[::fuchsia::test]
2295 async fn test_exit_status() {
2296 test_exit_status_for_signal(SIGTERM, SIGTERM.number() as i32, Some(SIGCHLD)).await;
2298 test_exit_status_for_signal(SIGSEGV, (SIGSEGV.number() as i32) | 0x80, Some(SIGCHLD)).await;
2300 }
2301
2302 #[::fuchsia::test]
2303 async fn test_wait4_by_pgid() {
2304 spawn_kernel_and_run(async |locked, current_task| {
2305 let child1 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2306 let child1_pid = child1.tid;
2307 child1.thread_group().exit(locked, ExitStatus::Exit(42), None);
2308 std::mem::drop(child1);
2309 let child2 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2310 child2.thread_group().setsid(locked).expect("setsid");
2311 let child2_pid = child2.tid;
2312 child2.thread_group().exit(locked, ExitStatus::Exit(42), None);
2313 std::mem::drop(child2);
2314
2315 assert_eq!(
2316 sys_wait4(
2317 locked,
2318 ¤t_task,
2319 -child2_pid,
2320 UserRef::default(),
2321 0,
2322 RUsagePtr::null(current_task)
2323 ),
2324 Ok(child2_pid)
2325 );
2326 assert_eq!(
2327 sys_wait4(
2328 locked,
2329 ¤t_task,
2330 0,
2331 UserRef::default(),
2332 0,
2333 RUsagePtr::null(current_task)
2334 ),
2335 Ok(child1_pid)
2336 );
2337 })
2338 .await;
2339 }
2340
2341 #[::fuchsia::test]
2342 async fn test_waitid_by_pgid() {
2343 spawn_kernel_and_run(async |locked, current_task| {
2344 let child1 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2345 let child1_pid = child1.tid;
2346 child1.thread_group().exit(locked, ExitStatus::Exit(42), None);
2347 std::mem::drop(child1);
2348 let child2 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2349 child2.thread_group().setsid(locked).expect("setsid");
2350 let child2_pid = child2.tid;
2351 child2.thread_group().exit(locked, ExitStatus::Exit(42), None);
2352 std::mem::drop(child2);
2353
2354 let address: UserRef<uapi::siginfo_t> =
2355 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE).into();
2356 assert_eq!(
2357 sys_waitid(
2358 locked,
2359 ¤t_task,
2360 P_PGID,
2361 child2_pid,
2362 address.into(),
2363 WEXITED,
2364 UserRef::default().into()
2365 ),
2366 Ok(())
2367 );
2368 assert_eq!(current_task.thread_group().read().zombie_children[0].pid(), child1_pid);
2370
2371 assert_eq!(
2372 sys_waitid(
2373 locked,
2374 ¤t_task,
2375 P_PGID,
2376 0,
2377 address.into(),
2378 WEXITED,
2379 UserRef::default().into()
2380 ),
2381 Ok(())
2382 );
2383 })
2384 .await;
2385 }
2386
2387 #[::fuchsia::test]
2388 async fn test_sigqueue() {
2389 spawn_kernel_and_run(async |locked, current_task| {
2390 let current_uid = current_task.current_creds().uid;
2391 let current_pid = current_task.get_pid();
2392
2393 const TEST_VALUE: u64 = 101;
2394
2395 const ARCH64_SI_HEADER_SIZE: usize = SI_HEADER_SIZE + 4;
2397 const PID_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE;
2399 const UID_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE + 4;
2400 const VALUE_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE + 8;
2401
2402 let mut data = vec![0u8; SI_MAX_SIZE_AS_USIZE];
2403 let header = SignalInfoHeader {
2404 signo: SIGIO.number(),
2405 code: SI_QUEUE,
2406 ..SignalInfoHeader::default()
2407 };
2408 let _ = header.write_to(&mut data[..SI_HEADER_SIZE]);
2409 data[PID_DATA_OFFSET..PID_DATA_OFFSET + 4].copy_from_slice(¤t_pid.to_ne_bytes());
2410 data[UID_DATA_OFFSET..UID_DATA_OFFSET + 4].copy_from_slice(¤t_uid.to_ne_bytes());
2411 data[VALUE_DATA_OFFSET..VALUE_DATA_OFFSET + 8]
2412 .copy_from_slice(&TEST_VALUE.to_ne_bytes());
2413
2414 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2415 current_task.write_memory(addr, &data).unwrap();
2416 let second_current = create_task(locked, current_task.kernel(), "second task");
2417 let second_pid = second_current.get_pid();
2418 let second_tid = second_current.get_tid();
2419 assert_eq!(second_current.read().queued_signal_count(SIGIO), 0);
2420
2421 assert_eq!(
2422 sys_rt_tgsigqueueinfo(
2423 locked,
2424 ¤t_task,
2425 second_pid,
2426 second_tid,
2427 UncheckedSignal::from(SIGIO),
2428 addr
2429 ),
2430 Ok(())
2431 );
2432 assert_eq!(second_current.read().queued_signal_count(SIGIO), 1);
2433
2434 let signal = SignalInfo::with_detail(
2435 SIGIO,
2436 SI_QUEUE,
2437 SignalDetail::Kill {
2438 pid: current_task.thread_group().leader,
2439 uid: current_task.current_creds().uid,
2440 },
2441 );
2442 let queued_signal = second_current.write().take_specific_signal(signal);
2443 if let Some(sig) = queued_signal {
2444 assert_eq!(sig.signal, SIGIO);
2445 assert_eq!(sig.errno, 0);
2446 assert_eq!(sig.code, SI_QUEUE);
2447 if let SignalDetail::Raw { data } = sig.detail {
2448 let offset_pid = PID_DATA_OFFSET - SI_HEADER_SIZE;
2450 let offset_uid = UID_DATA_OFFSET - SI_HEADER_SIZE;
2451 let offset_value = VALUE_DATA_OFFSET - SI_HEADER_SIZE;
2452 let pid =
2453 pid_t::from_ne_bytes(data[offset_pid..offset_pid + 4].try_into().unwrap());
2454 let uid =
2455 uid_t::from_ne_bytes(data[offset_uid..offset_uid + 4].try_into().unwrap());
2456 let value = u64::from_ne_bytes(
2457 data[offset_value..offset_value + 8].try_into().unwrap(),
2458 );
2459 assert_eq!(pid, current_pid);
2460 assert_eq!(uid, current_uid);
2461 assert_eq!(value, TEST_VALUE);
2462 } else {
2463 panic!("incorrect signal detail");
2464 }
2465 } else {
2466 panic!("expected a queued signal");
2467 }
2468 })
2469 .await;
2470 }
2471
2472 #[::fuchsia::test]
2473 async fn test_signalfd_filters_signals() {
2474 spawn_kernel_and_run(async |locked, current_task| {
2475 let memory_for_masks =
2476 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2477
2478 let term_int_mask = SigSet::from(SIGTERM) | SigSet::from(SIGINT);
2480 let term_int_mask_addr = UserRef::<SigSet>::new(memory_for_masks);
2481 current_task
2482 .write_object(term_int_mask_addr, &term_int_mask)
2483 .expect("failed to write mask");
2484 let sfd_term_int = sys_signalfd4(
2485 locked,
2486 ¤t_task,
2487 FdNumber::from_raw(-1),
2488 term_int_mask_addr,
2489 std::mem::size_of::<SigSet>(),
2490 0,
2491 )
2492 .expect("failed to create SIGTERM/SIGINT signalfd");
2493
2494 let sigchld_mask = SigSet::from(SIGCHLD);
2496 let sigchld_mask_addr =
2497 UserRef::<SigSet>::new((memory_for_masks + std::mem::size_of::<SigSet>()).unwrap());
2498 current_task
2499 .write_object(sigchld_mask_addr, &sigchld_mask)
2500 .expect("failed to write mask");
2501 let sfd_chld = sys_signalfd4(
2502 locked,
2503 ¤t_task,
2504 FdNumber::from_raw(-1),
2505 sigchld_mask_addr,
2506 std::mem::size_of::<SigSet>(),
2507 0,
2508 )
2509 .expect("failed to create SIGCHLD signalfd");
2510
2511 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2513 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2514 std::mem::drop(child);
2515
2516 let sfd_term_int_file = current_task
2518 .live()
2519 .files
2520 .get(sfd_term_int)
2521 .expect("failed to get sfd_term_int file");
2522 let sfd_chld_file =
2523 current_task.get_file(sfd_chld).expect("failed to get sfd_chld file");
2524
2525 let term_int_events = sfd_term_int_file
2526 .query_events(locked, ¤t_task)
2527 .expect("failed to query sfd_term_int events");
2528 let chld_events = sfd_chld_file
2529 .query_events(locked, ¤t_task)
2530 .expect("failed to query sfd_chld events");
2531
2532 assert!(!term_int_events.contains(FdEvents::POLLIN));
2533 assert!(chld_events.contains(FdEvents::POLLIN));
2534 })
2535 .await;
2536 }
2537
2538 #[::fuchsia::test]
2539 async fn test_signalfd_filters_signals_async() {
2540 spawn_kernel_and_run(async |locked, current_task| {
2541 let memory_for_masks =
2542 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2543
2544 let term_int_mask = SigSet::from(SIGTERM) | SigSet::from(SIGINT);
2546 let term_int_mask_addr = UserRef::<SigSet>::new(memory_for_masks);
2547 current_task
2548 .write_object(term_int_mask_addr, &term_int_mask)
2549 .expect("failed to write mask");
2550 let sfd_term_int = sys_signalfd4(
2551 locked,
2552 ¤t_task,
2553 FdNumber::from_raw(-1),
2554 term_int_mask_addr,
2555 std::mem::size_of::<SigSet>(),
2556 0,
2557 )
2558 .expect("failed to create SIGTERM/SIGINT signalfd");
2559
2560 let sigchld_mask = SigSet::from(SIGCHLD);
2562 let sigchld_mask_addr =
2563 UserRef::<SigSet>::new((memory_for_masks + std::mem::size_of::<SigSet>()).unwrap());
2564 current_task
2565 .write_object(sigchld_mask_addr, &sigchld_mask)
2566 .expect("failed to write mask");
2567 let sfd_chld = sys_signalfd4(
2568 locked,
2569 ¤t_task,
2570 FdNumber::from_raw(-1),
2571 sigchld_mask_addr,
2572 std::mem::size_of::<SigSet>(),
2573 0,
2574 )
2575 .expect("failed to create SIGCHLD signalfd");
2576
2577 let waiter = Waiter::new();
2579 let ready_items = Arc::new(Mutex::new(VecDeque::new()));
2580
2581 let sfd_term_int_file = current_task
2582 .live()
2583 .files
2584 .get(sfd_term_int)
2585 .expect("failed to get sfd_term_int file");
2586 let sfd_chld_file =
2587 current_task.get_file(sfd_chld).expect("failed to get sfd_chld file");
2588
2589 sfd_term_int_file
2590 .wait_async(
2591 locked,
2592 ¤t_task,
2593 &waiter,
2594 FdEvents::POLLIN,
2595 EventHandler::Enqueue {
2596 key: sfd_term_int.into(),
2597 queue: ready_items.clone(),
2598 sought_events: FdEvents::POLLIN,
2599 },
2600 )
2601 .expect("failed to wait on sfd_term_int");
2602
2603 sfd_chld_file
2604 .wait_async(
2605 locked,
2606 ¤t_task,
2607 &waiter,
2608 FdEvents::POLLIN,
2609 EventHandler::Enqueue {
2610 key: sfd_chld.into(),
2611 queue: ready_items.clone(),
2612 sought_events: FdEvents::POLLIN,
2613 },
2614 )
2615 .expect("failed to wait on sfd_chld");
2616
2617 let sigchld_mask_ref = UserRef::<SigSet>::new(memory_for_masks);
2619 current_task
2620 .write_object(sigchld_mask_ref, &sigchld_mask)
2621 .expect("failed to write mask");
2622 sys_rt_sigprocmask(
2623 locked,
2624 ¤t_task,
2625 SIG_BLOCK,
2626 sigchld_mask_ref,
2627 UserRef::default(),
2628 std::mem::size_of::<SigSet>(),
2629 )
2630 .expect("failed to block SIGCHLD");
2631
2632 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2634 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2635 std::mem::drop(child);
2636
2637 waiter.wait(locked, ¤t_task).expect("failed to wait");
2639
2640 let ready_items = ready_items.lock();
2642 assert_eq!(ready_items.len(), 1);
2643 assert_eq!(ready_items[0].key, sfd_chld.into());
2644 })
2645 .await;
2646 }
2647}