1pub use super::signal_handling::sys_restart_syscall;
6use super::signalfd::SignalFd;
7use crate::mm::MemoryAccessorExt;
8use crate::security;
9use crate::signals::{
10 IntoSignalInfoOptions, SI_MAX_SIZE_AS_USIZE, SignalDetail, SignalInfo, UncheckedSignalInfo,
11 restore_from_signal_handler, send_signal,
12};
13use crate::task::{
14 CurrentTask, PidTable, ProcessEntryRef, ProcessSelector, Task, TaskMutableState, ThreadGroup,
15 ThreadGroupLifecycleWaitValue, WaitResult, WaitableChildResult, Waiter,
16};
17use crate::vfs::{FdFlags, FdNumber};
18use starnix_sync::{LockBefore, RwLockReadGuard, ThreadGroupLimits};
19use starnix_uapi::user_address::{ArchSpecific, MultiArchUserRef};
20use starnix_uapi::{tid_t, uapi};
21
22use starnix_logging::track_stub;
23use starnix_sync::{Locked, Unlocked};
24use starnix_syscalls::SyscallResult;
25use starnix_types::time::{duration_from_timespec, timeval_from_duration};
26use starnix_uapi::errors::{EINTR, ETIMEDOUT, Errno, ErrnoResultExt};
27use starnix_uapi::open_flags::OpenFlags;
28use starnix_uapi::signals::{SigSet, Signal, UNBLOCKABLE_SIGNALS, UncheckedSignal};
29use starnix_uapi::user_address::{UserAddress, UserRef};
30use starnix_uapi::{
31 __WALL, __WCLONE, P_ALL, P_PGID, P_PID, P_PIDFD, SFD_CLOEXEC, SFD_NONBLOCK, SI_TKILL,
32 SIG_BLOCK, SIG_SETMASK, SIG_UNBLOCK, SS_AUTODISARM, SS_DISABLE, SS_ONSTACK, WCONTINUED,
33 WEXITED, WNOHANG, WNOWAIT, WSTOPPED, WUNTRACED, errno, error, pid_t, rusage, sigaltstack,
34};
35use static_assertions::const_assert_eq;
36use zerocopy::{FromBytes, Immutable, IntoBytes};
37
38pub type RUsagePtr = MultiArchUserRef<uapi::rusage, uapi::arch32::rusage>;
39type SigAction64Ptr = MultiArchUserRef<uapi::sigaction_t, uapi::arch32::sigaction64_t>;
40type SigActionPtr = MultiArchUserRef<uapi::sigaction_t, uapi::arch32::sigaction_t>;
41
42pub fn sys_rt_sigaction(
57 _locked: &mut Locked<Unlocked>,
58 current_task: &CurrentTask,
59 signum: UncheckedSignal,
60 user_action: SigAction64Ptr,
61 user_old_action: SigAction64Ptr,
62 sigset_size: usize,
63) -> Result<(), Errno> {
64 if user_action.is_arch32() && sigset_size == std::mem::size_of::<uapi::arch32::sigset_t>() {
65 let user_action = SigActionPtr::from_32(user_action.addr().into());
66 let user_old_action = SigActionPtr::from_32(user_old_action.addr().into());
67 return rt_sigaction(current_task, signum, user_action, user_old_action);
68 }
69
70 if sigset_size != std::mem::size_of::<uapi::sigset_t>() {
71 return error!(EINVAL);
72 }
73 rt_sigaction(current_task, signum, user_action, user_old_action)
74}
75
76fn rt_sigaction<Arch32SigAction>(
77 current_task: &CurrentTask,
78 signum: UncheckedSignal,
79 user_action: MultiArchUserRef<uapi::sigaction_t, Arch32SigAction>,
80 user_old_action: MultiArchUserRef<uapi::sigaction_t, Arch32SigAction>,
81) -> Result<(), Errno>
82where
83 Arch32SigAction:
84 IntoBytes + FromBytes + Immutable + TryFrom<uapi::sigaction_t> + TryInto<uapi::sigaction_t>,
85{
86 let signal = Signal::try_from(signum)?;
87
88 let new_signal_action = if !user_action.is_null() {
89 if signal.is_unblockable() {
93 return error!(EINVAL);
94 }
95
96 let signal_action = current_task.read_multi_arch_object(user_action)?;
97 Some(signal_action)
98 } else {
99 None
100 };
101
102 let signal_actions = ¤t_task.thread_group().signal_actions;
103 let old_action = if let Some(new_signal_action) = new_signal_action {
104 signal_actions.set(signal, new_signal_action)
105 } else {
106 signal_actions.get(signal)
107 };
108
109 if !user_old_action.is_null() {
110 current_task.write_multi_arch_object(user_old_action, old_action)?;
111 }
112
113 Ok(())
114}
115
116pub fn sys_rt_sigpending(
126 _locked: &mut Locked<Unlocked>,
127 current_task: &CurrentTask,
128 set: UserRef<SigSet>,
129 sigset_size: usize,
130) -> Result<(), Errno> {
131 if sigset_size != std::mem::size_of::<SigSet>() {
132 return error!(EINVAL);
133 }
134
135 let signals = current_task.read().pending_signals();
136 current_task.write_object(set, &signals)?;
137 Ok(())
138}
139
140pub fn sys_rt_sigprocmask(
153 _locked: &mut Locked<Unlocked>,
154 current_task: &CurrentTask,
155 how: u32,
156 user_set: UserRef<SigSet>,
157 user_old_set: UserRef<SigSet>,
158 sigset_size: usize,
159) -> Result<(), Errno> {
160 if sigset_size != std::mem::size_of::<SigSet>() {
161 return error!(EINVAL);
162 }
163 match how {
164 SIG_BLOCK | SIG_UNBLOCK | SIG_SETMASK => (),
165 _ => return error!(EINVAL),
166 };
167
168 let mut new_mask = SigSet::default();
171 if !user_set.is_null() {
172 new_mask = current_task.read_object(user_set)?;
173 }
174
175 let mut state = current_task.write();
176 let signal_mask = state.signal_mask();
177 if !user_old_set.is_null() {
179 current_task.write_object(user_old_set, &signal_mask)?;
180 }
181
182 if user_set.is_null() {
184 return Ok(());
185 }
186
187 let signal_mask = match how {
188 SIG_BLOCK => signal_mask | new_mask,
189 SIG_UNBLOCK => signal_mask & !new_mask,
190 SIG_SETMASK => new_mask,
191 _ => return error!(EINVAL),
193 };
194 state.set_signal_mask(signal_mask);
195
196 Ok(())
197}
198
199type SigAltStackPtr = MultiArchUserRef<uapi::sigaltstack, uapi::arch32::sigaltstack>;
200
201pub fn sys_sigaltstack(
210 _locked: &mut Locked<Unlocked>,
211 current_task: &CurrentTask,
212 user_ss: SigAltStackPtr,
213 user_old_ss: SigAltStackPtr,
214) -> Result<(), Errno> {
215 let stack_pointer_register = current_task.thread_state.registers.stack_pointer_register();
216 let mut state = current_task.write();
217 let on_signal_stack = state.on_signal_stack(stack_pointer_register);
218
219 let mut ss = sigaltstack::default();
220 if !user_ss.is_null() {
221 if on_signal_stack {
222 return error!(EPERM);
223 }
224 ss = current_task.read_multi_arch_object(user_ss)?;
225 if (ss.ss_flags & !((SS_AUTODISARM | SS_DISABLE) as i32)) != 0 {
226 return error!(EINVAL);
227 }
228 let min_stack_size =
229 if current_task.is_arch32() { uapi::arch32::MINSIGSTKSZ } else { uapi::MINSIGSTKSZ };
230 if ss.ss_flags & (SS_DISABLE as i32) == 0 && ss.ss_size < min_stack_size as u64 {
231 return error!(ENOMEM);
232 }
233 }
234
235 if !user_old_ss.is_null() {
236 let mut old_ss = match state.sigaltstack() {
237 Some(old_ss) => old_ss,
238 None => sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() },
239 };
240 if on_signal_stack {
241 old_ss.ss_flags = SS_ONSTACK as i32;
242 }
243 current_task.write_multi_arch_object(user_old_ss, old_ss)?;
244 }
245
246 if !user_ss.is_null() {
247 if ss.ss_flags & (SS_DISABLE as i32) != 0 {
248 state.set_sigaltstack(None);
249 } else {
250 state.set_sigaltstack(Some(ss));
251 }
252 }
253
254 Ok(())
255}
256
257pub fn sys_rt_sigsuspend(
269 locked: &mut Locked<Unlocked>,
270 current_task: &mut CurrentTask,
271 user_mask: UserRef<SigSet>,
272 sigset_size: usize,
273) -> Result<(), Errno> {
274 if sigset_size != std::mem::size_of::<SigSet>() {
275 return error!(EINVAL);
276 }
277 let mask = current_task.read_object(user_mask)?;
278
279 let waiter = Waiter::new();
280 current_task
284 .wait_with_temporary_mask(locked, mask, |locked, current_task| {
285 waiter.wait(locked, current_task)
286 })
287 .map_eintr(|| errno!(ERESTARTNOHAND))
288}
289
290pub fn sys_rt_sigtimedwait(
303 locked: &mut Locked<Unlocked>,
304 current_task: &mut CurrentTask,
305 set_addr: UserRef<SigSet>,
306 siginfo_addr: MultiArchUserRef<uapi::siginfo_t, uapi::arch32::siginfo_t>,
307 timeout_addr: MultiArchUserRef<uapi::timespec, uapi::arch32::timespec>,
308 sigset_size: usize,
309) -> Result<Signal, Errno> {
310 if sigset_size != std::mem::size_of::<SigSet>() {
311 return error!(EINVAL);
312 }
313
314 let set = current_task.read_object(set_addr)?;
316 let unblock = set & !UNBLOCKABLE_SIGNALS;
318 let deadline = if timeout_addr.is_null() {
319 zx::MonotonicInstant::INFINITE
320 } else {
321 let timeout = current_task.read_multi_arch_object(timeout_addr)?;
322 zx::MonotonicInstant::after(duration_from_timespec(timeout)?)
323 };
324
325 let signal_info = loop {
326 let waiter;
327
328 {
329 let mut task_state = current_task.write();
330 if let Some(signal) = task_state.take_signal_with_mask(!unblock) {
333 break signal;
334 }
335
336 waiter = Waiter::new();
337 task_state.wait_on_signal(&waiter);
338 }
339
340 let tmp_mask = current_task.read().signal_mask() & !unblock;
343
344 let waiter_result =
346 current_task.wait_with_temporary_mask(locked, tmp_mask, |locked, current_task| {
347 waiter.wait_until(locked, current_task, deadline)
348 });
349
350 current_task.write().restore_signal_mask();
352
353 if let Err(e) = waiter_result {
354 if e == EINTR {
355 if let Some(signal) = current_task.write().take_signal_with_mask(!unblock) {
357 break signal;
358 }
359 } else if e == ETIMEDOUT {
360 return error!(EAGAIN);
361 }
362
363 return Err(e);
364 }
365 };
366
367 if !siginfo_addr.is_null() {
368 signal_info.write(current_task, siginfo_addr)?;
369 }
370
371 Ok(signal_info.signal)
372}
373
374pub fn sys_signalfd4(
388 locked: &mut Locked<Unlocked>,
389 current_task: &CurrentTask,
390 fd: FdNumber,
391 mask_addr: UserRef<SigSet>,
392 mask_size: usize,
393 flags: u32,
394) -> Result<FdNumber, Errno> {
395 if flags & !(SFD_CLOEXEC | SFD_NONBLOCK) != 0 {
396 return error!(EINVAL);
397 }
398 if mask_size != std::mem::size_of::<SigSet>() {
399 return error!(EINVAL);
400 }
401 let mask = current_task.read_object(mask_addr)?;
402
403 if fd.raw() != -1 {
404 let file = current_task.files.get(fd)?;
405 let file = file.downcast_file::<SignalFd>().ok_or_else(|| errno!(EINVAL))?;
406 file.set_mask(mask);
407 Ok(fd)
408 } else {
409 let signalfd = SignalFd::new_file(locked, current_task, mask, flags);
410 let flags = if flags & SFD_CLOEXEC != 0 { FdFlags::CLOEXEC } else { FdFlags::empty() };
411 let fd = current_task.add_file(locked, signalfd, flags)?;
412 Ok(fd)
413 }
414}
415
416#[track_caller]
417fn send_unchecked_signal<L>(
418 locked: &mut Locked<L>,
419 current_task: &CurrentTask,
420 target: &Task,
421 unchecked_signal: UncheckedSignal,
422 si_code: i32,
423) -> Result<(), Errno>
424where
425 L: LockBefore<ThreadGroupLimits>,
426{
427 current_task.can_signal(&target, unchecked_signal)?;
428
429 if unchecked_signal.is_zero() {
431 return Ok(());
432 }
433
434 let signal = Signal::try_from(unchecked_signal)?;
435 security::check_signal_access(current_task, &target, signal)?;
436
437 send_signal(
438 locked,
439 target,
440 SignalInfo {
441 code: si_code,
442 detail: SignalDetail::Kill {
443 pid: current_task.thread_group().leader,
444 uid: current_task.current_creds().uid,
445 },
446 ..SignalInfo::default(signal)
447 },
448 )
449}
450
451#[track_caller]
452fn send_unchecked_signal_info<L>(
453 locked: &mut Locked<L>,
454 current_task: &CurrentTask,
455 target: &Task,
456 unchecked_signal: UncheckedSignal,
457 siginfo_ref: UserAddress,
458) -> Result<(), Errno>
459where
460 L: LockBefore<ThreadGroupLimits>,
461{
462 current_task.can_signal(&target, unchecked_signal)?;
463
464 if unchecked_signal.is_zero() {
466 current_task.read_memory_to_array::<SI_MAX_SIZE_AS_USIZE>(siginfo_ref)?;
468 return Ok(());
469 }
470
471 let signal = Signal::try_from(unchecked_signal)?;
472 security::check_signal_access(current_task, &target, signal)?;
473
474 let siginfo = UncheckedSignalInfo::read_from_siginfo(current_task, siginfo_ref)?;
475 if target.get_pid() != current_task.get_pid()
476 && (siginfo.code() >= 0 || siginfo.code() == SI_TKILL)
477 {
478 return error!(EINVAL);
479 }
480
481 send_signal(locked, &target, siginfo.into_signal_info(signal, IntoSignalInfoOptions::None)?)
482}
483
484pub fn sys_kill(
493 locked: &mut Locked<Unlocked>,
494 current_task: &CurrentTask,
495 pid: pid_t,
496 unchecked_signal: UncheckedSignal,
497) -> Result<(), Errno> {
498 let pids = current_task.kernel().pids.read();
499 match pid {
500 pid if pid > 0 => {
501 let target_thread_group = {
504 match pids.get_process(pid) {
505 Some(ProcessEntryRef::Process(process)) => process,
506
507 Some(ProcessEntryRef::Zombie(_zombie)) => return Ok(()),
509
510 None => {
513 let weak_task = pids.get_task(pid);
514 let task = Task::from_weak(&weak_task)?;
515 task.thread_group().clone()
516 }
517 }
518 };
519
520 target_thread_group.send_signal_unchecked(current_task, unchecked_signal)?;
521 }
522 pid if pid == -1 => {
523 let thread_groups = pids.get_thread_groups();
532 signal_thread_groups(
533 current_task,
534 unchecked_signal,
535 thread_groups.into_iter().filter(|thread_group| {
536 if *current_task.thread_group() == *thread_group {
537 return false;
538 }
539 if thread_group.leader == 1 {
540 return false;
541 }
542 true
543 }),
544 )?;
545 }
546 _ => {
547 let process_group_id = match pid {
553 0 => current_task.thread_group().read().process_group.leader,
554 _ => negate_pid(pid)?,
555 };
556
557 let process_group = pids.get_process_group(process_group_id);
558 let thread_groups = process_group
559 .iter()
560 .flat_map(|pg| pg.read(locked).thread_groups().collect::<Vec<_>>());
561 signal_thread_groups(current_task, unchecked_signal, thread_groups)?;
562 }
563 };
564
565 Ok(())
566}
567
568fn verify_tgid_for_task(
569 task: &Task,
570 tgid: pid_t,
571 pids: &RwLockReadGuard<'_, PidTable>,
572) -> Result<(), Errno> {
573 let thread_group = match pids.get_process(tgid) {
574 Some(ProcessEntryRef::Process(proc)) => proc,
575 Some(ProcessEntryRef::Zombie(_)) => return error!(EINVAL),
576 None => return error!(ESRCH),
577 };
578 if *task.thread_group() != thread_group {
579 return error!(EINVAL);
580 } else {
581 Ok(())
582 }
583}
584
585pub fn sys_tkill(
597 locked: &mut Locked<Unlocked>,
598 current_task: &CurrentTask,
599 tid: tid_t,
600 unchecked_signal: UncheckedSignal,
601) -> Result<(), Errno> {
602 if tid <= 0 {
604 return error!(EINVAL);
605 }
606 let thread_weak = current_task.get_task(tid);
607 let thread = Task::from_weak(&thread_weak)?;
608 send_unchecked_signal(locked, current_task, &thread, unchecked_signal, SI_TKILL)
609}
610
611pub fn sys_tgkill(
622 locked: &mut Locked<Unlocked>,
623 current_task: &CurrentTask,
624 tgid: pid_t,
625 tid: tid_t,
626 unchecked_signal: UncheckedSignal,
627) -> Result<(), Errno> {
628 if tgid <= 0 || tid <= 0 {
630 return error!(EINVAL);
631 }
632 let pids = current_task.kernel().pids.read();
633
634 let weak_target = pids.get_task(tid);
635 let thread = Task::from_weak(&weak_target)?;
636 verify_tgid_for_task(&thread, tgid, &pids)?;
637
638 send_unchecked_signal(locked, current_task, &thread, unchecked_signal, SI_TKILL)
639}
640
641pub fn sys_rt_sigreturn(
650 _locked: &mut Locked<Unlocked>,
651 current_task: &mut CurrentTask,
652) -> Result<SyscallResult, Errno> {
653 restore_from_signal_handler(current_task)?;
654 Ok(current_task.thread_state.registers.return_register().into())
655}
656
657pub fn sys_rt_sigqueueinfo(
667 _locked: &mut Locked<Unlocked>,
668 current_task: &CurrentTask,
669 tgid: pid_t,
670 unchecked_signal: UncheckedSignal,
671 siginfo_ref: UserAddress,
672) -> Result<(), Errno> {
673 let weak_task = current_task.kernel().pids.read().get_task(tgid);
674 let task = &Task::from_weak(&weak_task)?;
675 task.thread_group().send_signal_unchecked_with_info(
676 current_task,
677 unchecked_signal,
678 siginfo_ref,
679 IntoSignalInfoOptions::None,
680 )
681}
682
683pub fn sys_rt_tgsigqueueinfo(
694 locked: &mut Locked<Unlocked>,
695 current_task: &CurrentTask,
696 tgid: pid_t,
697 tid: tid_t,
698 unchecked_signal: UncheckedSignal,
699 siginfo_ref: UserAddress,
700) -> Result<(), Errno> {
701 let pids = current_task.kernel().pids.read();
702
703 let thread_weak = pids.get_task(tid);
704 let task = Task::from_weak(&thread_weak)?;
705
706 verify_tgid_for_task(&task, tgid, &pids)?;
707 send_unchecked_signal_info(locked, current_task, &task, unchecked_signal, siginfo_ref)
708}
709
710pub fn sys_pidfd_send_signal(
723 _locked: &mut Locked<Unlocked>,
724 current_task: &CurrentTask,
725 pidfd: FdNumber,
726 unchecked_signal: UncheckedSignal,
727 siginfo_ref: UserAddress,
728 flags: u32,
729) -> Result<(), Errno> {
730 if flags != 0 {
731 return error!(EINVAL);
732 }
733
734 let file = current_task.files.get(pidfd)?;
735 let target = file.as_thread_group_key()?;
736 let target = target.upgrade().ok_or_else(|| errno!(ESRCH))?;
737
738 if siginfo_ref.is_null() {
739 target.send_signal_unchecked(current_task, unchecked_signal)
740 } else {
741 target.send_signal_unchecked_with_info(
742 current_task,
743 unchecked_signal,
744 siginfo_ref,
745 IntoSignalInfoOptions::CheckSigno,
746 )
747 }
748}
749
750#[track_caller]
761fn signal_thread_groups<F>(
762 current_task: &CurrentTask,
763 unchecked_signal: UncheckedSignal,
764 thread_groups: F,
765) -> Result<(), Errno>
766where
767 F: IntoIterator<Item: AsRef<ThreadGroup>>,
768{
769 let mut last_error = None;
770 let mut sent_signal = false;
771
772 for thread_group in thread_groups.into_iter() {
775 match thread_group.as_ref().send_signal_unchecked(current_task, unchecked_signal) {
776 Ok(_) => sent_signal = true,
777 Err(errno) => last_error = Some(errno),
778 }
779 }
780
781 if sent_signal { Ok(()) } else { Err(last_error.unwrap_or_else(|| errno!(ESRCH))) }
782}
783
784#[derive(Debug)]
786pub struct WaitingOptions {
787 pub wait_for_exited: bool,
789 pub wait_for_stopped: bool,
791 pub wait_for_continued: bool,
793 pub block: bool,
795 pub keep_waitable_state: bool,
797 pub wait_for_all: bool,
799 pub wait_for_clone: bool,
801}
802
803impl WaitingOptions {
804 fn new(options: u32) -> Self {
805 const_assert_eq!(WUNTRACED, WSTOPPED);
806 Self {
807 wait_for_exited: options & WEXITED > 0,
808 wait_for_stopped: options & WSTOPPED > 0,
809 wait_for_continued: options & WCONTINUED > 0,
810 block: options & WNOHANG == 0,
811 keep_waitable_state: options & WNOWAIT > 0,
812 wait_for_all: options & __WALL > 0,
813 wait_for_clone: options & __WCLONE > 0,
814 }
815 }
816
817 pub fn new_for_waitid(options: u32) -> Result<Self, Errno> {
819 if options & !(__WCLONE | __WALL | WNOHANG | WNOWAIT | WSTOPPED | WEXITED | WCONTINUED) != 0
820 {
821 track_stub!(TODO("https://fxbug.dev/322874788"), "waitid options", options);
822 return error!(EINVAL);
823 }
824 if options & (WEXITED | WSTOPPED | WCONTINUED) == 0 {
825 return error!(EINVAL);
826 }
827 Ok(Self::new(options))
828 }
829
830 pub fn new_for_wait4(options: u32) -> Result<Self, Errno> {
832 if options & !(__WCLONE | __WALL | WNOHANG | WUNTRACED | WCONTINUED) != 0 {
833 track_stub!(TODO("https://fxbug.dev/322874017"), "wait4 options", options);
834 return error!(EINVAL);
835 }
836 Ok(Self::new(options | WEXITED))
837 }
838}
839
840fn wait_on_pid(
846 locked: &mut Locked<Unlocked>,
847 current_task: &CurrentTask,
848 selector: &ProcessSelector,
849 options: &WaitingOptions,
850) -> Result<Option<WaitResult>, Errno> {
851 let waiter = Waiter::new();
852 loop {
853 {
854 let mut pids = current_task.kernel().pids.write();
855 if let Some(tracee) =
864 current_task.thread_group().get_waitable_ptracee(selector, options, &mut pids)
865 {
866 return Ok(Some(tracee));
867 }
868 {
869 let mut thread_group = current_task.thread_group().write();
870
871 let mut has_waitable_tracee = false;
874 let mut has_any_tracee = false;
875 current_task.thread_group().get_ptracees_and(
876 selector,
877 &pids,
878 &mut |task: &Task, task_state: &TaskMutableState| {
879 if let Some(ptrace) = &task_state.ptrace {
880 has_any_tracee = true;
881 ptrace.tracer_waiters().wait_async(&waiter);
882 if ptrace.is_waitable(task.load_stopped(), options) {
883 has_waitable_tracee = true;
884 }
885 }
886 },
887 );
888 if has_waitable_tracee
889 || thread_group.zombie_ptracees.has_zombie_matching(&selector)
890 {
891 continue;
892 }
893 match thread_group.get_waitable_child(selector, options, &mut pids) {
894 WaitableChildResult::ReadyNow(child) => {
895 return Ok(Some(child));
896 }
897 WaitableChildResult::ShouldWait => (),
898 WaitableChildResult::NoneFound => {
899 if !has_any_tracee {
900 return error!(ECHILD);
901 }
902 }
903 }
904 thread_group
905 .lifecycle_waiters
906 .wait_async_value(&waiter, ThreadGroupLifecycleWaitValue::ChildStatus);
907 }
908 }
909
910 if !options.block {
911 return Ok(None);
912 }
913 waiter.wait(locked, current_task).map_eintr(|| errno!(ERESTARTSYS))?;
914 }
915}
916
917pub fn sys_waitid(
931 locked: &mut Locked<Unlocked>,
932 current_task: &CurrentTask,
933 id_type: u32,
934 id: i32,
935 user_info: MultiArchUserRef<uapi::siginfo_t, uapi::arch32::siginfo_t>,
936 options: u32,
937 user_rusage: RUsagePtr,
938) -> Result<(), Errno> {
939 let mut waiting_options = WaitingOptions::new_for_waitid(options)?;
940
941 let task_selector = match id_type {
942 P_PID => ProcessSelector::Pid(id),
943 P_ALL => ProcessSelector::Any,
944 P_PGID => ProcessSelector::Pgid(if id == 0 {
945 current_task.thread_group().read().process_group.leader
946 } else {
947 id
948 }),
949 P_PIDFD => {
950 let fd = FdNumber::from_raw(id);
951 let file = current_task.files.get(fd)?;
952 if file.flags().contains(OpenFlags::NONBLOCK) {
953 waiting_options.block = false;
954 }
955 ProcessSelector::Process(file.as_thread_group_key()?)
956 }
957 _ => return error!(EINVAL),
958 };
959
960 if let Some(waitable_process) =
963 wait_on_pid(locked, current_task, &task_selector, &waiting_options)?
964 {
965 if !user_rusage.is_null() {
966 let usage = rusage {
967 ru_utime: timeval_from_duration(waitable_process.time_stats.user_time),
968 ru_stime: timeval_from_duration(waitable_process.time_stats.system_time),
969 ..Default::default()
970 };
971
972 track_stub!(TODO("https://fxbug.dev/322874712"), "real rusage from waitid");
973 current_task.write_multi_arch_object(user_rusage, usage)?;
974 }
975
976 if !user_info.is_null() {
977 let siginfo = waitable_process.as_signal_info();
978 siginfo.write(current_task, user_info)?;
979 }
980 } else if id_type == P_PIDFD {
981 return error!(EAGAIN);
990 }
991
992 Ok(())
993}
994
995pub fn sys_wait4(
1009 locked: &mut Locked<Unlocked>,
1010 current_task: &CurrentTask,
1011 raw_selector: pid_t,
1012 user_wstatus: UserRef<i32>,
1013 options: u32,
1014 user_rusage: RUsagePtr,
1015) -> Result<pid_t, Errno> {
1016 let waiting_options = WaitingOptions::new_for_wait4(options)?;
1017
1018 let selector = if raw_selector == 0 {
1019 ProcessSelector::Pgid(current_task.thread_group().read().process_group.leader)
1020 } else if raw_selector == -1 {
1021 ProcessSelector::Any
1022 } else if raw_selector > 0 {
1023 ProcessSelector::Pid(raw_selector)
1024 } else if raw_selector < -1 {
1025 ProcessSelector::Pgid(negate_pid(raw_selector)?)
1026 } else {
1027 track_stub!(
1028 TODO("https://fxbug.dev/322874213"),
1029 "wait4 with selector",
1030 raw_selector as u64
1031 );
1032 return error!(ENOSYS);
1033 };
1034
1035 if let Some(waitable_process) = wait_on_pid(locked, current_task, &selector, &waiting_options)?
1036 {
1037 let status = waitable_process.exit_info.status.wait_status();
1038
1039 if !user_rusage.is_null() {
1040 track_stub!(TODO("https://fxbug.dev/322874768"), "real rusage from wait4");
1041 let usage = rusage {
1042 ru_utime: timeval_from_duration(waitable_process.time_stats.user_time),
1043 ru_stime: timeval_from_duration(waitable_process.time_stats.system_time),
1044 ..Default::default()
1045 };
1046 current_task.write_multi_arch_object(user_rusage, usage)?;
1047 }
1048
1049 if !user_wstatus.is_null() {
1050 current_task.write_object(user_wstatus, &status)?;
1051 }
1052
1053 Ok(waitable_process.pid)
1054 } else {
1055 Ok(0)
1056 }
1057}
1058
1059fn negate_pid(pid: pid_t) -> Result<pid_t, Errno> {
1061 pid.checked_neg().ok_or_else(|| errno!(ESRCH))
1062}
1063
1064#[cfg(target_arch = "aarch64")]
1066mod arch32 {
1067 use crate::task::CurrentTask;
1068 use crate::vfs::FdNumber;
1069 use starnix_sync::{Locked, Unlocked};
1070 use starnix_uapi::errors::Errno;
1071 use starnix_uapi::signals::SigSet;
1072 use starnix_uapi::user_address::UserRef;
1073
1074 pub fn sys_arch32_signalfd(
1089 locked: &mut Locked<Unlocked>,
1090 current_task: &CurrentTask,
1091 fd: FdNumber,
1092 mask_addr: UserRef<SigSet>,
1093 mask_size: usize,
1094 ) -> Result<FdNumber, Errno> {
1095 super::sys_signalfd4(locked, current_task, fd, mask_addr, mask_size, 0)
1096 }
1097
1098 pub use super::{
1099 sys_pidfd_send_signal as sys_arch32_pidfd_send_signal,
1100 sys_rt_sigaction as sys_arch32_rt_sigaction,
1101 sys_rt_sigqueueinfo as sys_arch32_rt_sigqueueinfo,
1102 sys_rt_sigtimedwait as sys_arch32_rt_sigtimedwait,
1103 sys_rt_tgsigqueueinfo as sys_arch32_rt_tgsigqueueinfo,
1104 sys_sigaltstack as sys_arch32_sigaltstack, sys_signalfd4 as sys_arch32_signalfd4,
1105 sys_waitid as sys_arch32_waitid,
1106 };
1107}
1108
1109#[cfg(target_arch = "aarch64")]
1110pub use arch32::*;
1111
1112#[cfg(test)]
1113mod tests {
1114 use super::*;
1115 use crate::mm::{MemoryAccessor, PAGE_SIZE};
1116 use crate::signals::testing::dequeue_signal_for_test;
1117 use crate::signals::{SI_HEADER_SIZE, SignalInfoHeader, send_standard_signal};
1118 use crate::task::dynamic_thread_spawner::SpawnRequestBuilder;
1119 use crate::task::{EventHandler, ExitStatus, ProcessExitInfo};
1120 use crate::testing::*;
1121 use starnix_sync::Mutex;
1122 use starnix_types::math::round_up_to_system_page_size;
1123 use starnix_uapi::auth::Credentials;
1124 use starnix_uapi::errors::ERESTARTSYS;
1125 use starnix_uapi::signals::{
1126 SIGCHLD, SIGHUP, SIGINT, SIGIO, SIGKILL, SIGRTMIN, SIGSEGV, SIGSTOP, SIGTERM, SIGTRAP,
1127 SIGUSR1,
1128 };
1129 use starnix_uapi::vfs::FdEvents;
1130 use starnix_uapi::{SI_QUEUE, SI_USER, sigaction_t, uaddr, uid_t};
1131 use std::collections::VecDeque;
1132 use std::sync::Arc;
1133 use zerocopy::IntoBytes;
1134
1135 #[cfg(target_arch = "x86_64")]
1136 #[::fuchsia::test]
1137 async fn test_sigaltstack() {
1138 spawn_kernel_and_run(async |locked, current_task| {
1139 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1140
1141 let user_ss = UserRef::<sigaltstack>::new(addr);
1142 let nullptr = UserRef::<sigaltstack>::default();
1143
1144 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1146 .expect("failed to call sigaltstack");
1147 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1148 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1149
1150 ss.ss_sp = uaddr { addr: 0x7FFFF };
1152 ss.ss_size = 0x1000;
1153 ss.ss_flags = SS_AUTODISARM as i32;
1154 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1155 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1156 .expect("failed to call sigaltstack");
1157 current_task
1158 .write_memory(addr, &[0u8; std::mem::size_of::<sigaltstack>()])
1159 .expect("failed to clear struct");
1160 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1161 .expect("failed to call sigaltstack");
1162 let another_ss = current_task.read_object(user_ss).expect("failed to read struct");
1163 assert_eq!(ss.as_bytes(), another_ss.as_bytes());
1164
1165 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1167 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1168 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1169 .expect("failed to call sigaltstack");
1170 current_task
1171 .write_memory(addr, &[0u8; std::mem::size_of::<sigaltstack>()])
1172 .expect("failed to clear struct");
1173 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1174 .expect("failed to call sigaltstack");
1175 let ss = current_task.read_object(user_ss).expect("failed to read struct");
1176 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1177 })
1178 .await;
1179 }
1180
1181 #[::fuchsia::test]
1182 async fn test_sigaltstack_invalid_size() {
1183 spawn_kernel_and_run(async |locked, current_task| {
1184 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1185
1186 let user_ss = UserRef::<sigaltstack>::new(addr);
1187 let nullptr = UserRef::<sigaltstack>::default();
1188
1189 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1191 .expect("failed to call sigaltstack");
1192 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1193 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1194
1195 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1197 .expect("failed to round up");
1198 let sigaltstack_addr = map_memory(
1199 locked,
1200 ¤t_task,
1201 UserAddress::default(),
1202 sigaltstack_addr_size as u64,
1203 );
1204 ss.ss_sp = sigaltstack_addr.into();
1205 ss.ss_flags = 0;
1206 ss.ss_size = uapi::MINSIGSTKSZ as u64 - 1;
1207 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1208 assert_eq!(
1209 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1210 error!(ENOMEM)
1211 );
1212 })
1213 .await;
1214 }
1215
1216 #[cfg(target_arch = "x86_64")]
1217 #[::fuchsia::test]
1218 async fn test_sigaltstack_active_stack() {
1219 spawn_kernel_and_run(async |locked, current_task| {
1220 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1221
1222 let user_ss = UserRef::<sigaltstack>::new(addr);
1223 let nullptr = UserRef::<sigaltstack>::default();
1224
1225 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1227 .expect("failed to call sigaltstack");
1228 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1229 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1230
1231 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1233 .expect("failed to round up");
1234 let sigaltstack_addr = map_memory(
1235 locked,
1236 ¤t_task,
1237 UserAddress::default(),
1238 sigaltstack_addr_size as u64,
1239 );
1240 ss.ss_sp = sigaltstack_addr.into();
1241 ss.ss_flags = 0;
1242 ss.ss_size = sigaltstack_addr_size as u64;
1243 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1244 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1245 .expect("failed to call sigaltstack");
1246
1247 let next_addr = (sigaltstack_addr + sigaltstack_addr_size).unwrap();
1249 current_task.thread_state.registers.rsp = next_addr.ptr() as u64;
1250 ss.ss_flags = SS_DISABLE as i32;
1251 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1252 assert_eq!(
1253 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1254 error!(EPERM)
1255 );
1256
1257 let next_ss_addr = sigaltstack_addr
1260 .checked_add(sigaltstack_addr_size)
1261 .unwrap()
1262 .checked_add(0x1000usize)
1263 .unwrap();
1264 current_task.thread_state.registers.rsp = next_ss_addr.ptr() as u64;
1265 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1266 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1267 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1268 .expect("failed to call sigaltstack");
1269 })
1270 .await;
1271 }
1272
1273 #[cfg(target_arch = "x86_64")]
1274 #[::fuchsia::test]
1275 async fn test_sigaltstack_active_stack_saturates() {
1276 spawn_kernel_and_run(async |locked, current_task| {
1277 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1278
1279 let user_ss = UserRef::<sigaltstack>::new(addr);
1280 let nullptr = UserRef::<sigaltstack>::default();
1281
1282 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1284 .expect("failed to call sigaltstack");
1285 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1286 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1287
1288 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1290 .expect("failed to round up");
1291 let sigaltstack_addr = map_memory(
1292 locked,
1293 ¤t_task,
1294 UserAddress::default(),
1295 sigaltstack_addr_size as u64,
1296 );
1297 ss.ss_sp = sigaltstack_addr.into();
1298 ss.ss_flags = 0;
1299 ss.ss_size = u64::MAX;
1300 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1301 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1302 .expect("failed to call sigaltstack");
1303
1304 current_task.thread_state.registers.rsp =
1306 (sigaltstack_addr + sigaltstack_addr_size).unwrap().ptr() as u64;
1307 ss.ss_flags = SS_DISABLE as i32;
1308 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1309 assert_eq!(
1310 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1311 error!(EPERM)
1312 );
1313
1314 current_task.thread_state.registers.rsp = 0u64;
1316 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1317 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1318 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1319 .expect("failed to call sigaltstack");
1320 })
1321 .await;
1322 }
1323
1324 #[::fuchsia::test]
1327 async fn test_sigprocmask_invalid_size() {
1328 spawn_kernel_and_run(async |locked, current_task| {
1329 let set = UserRef::<SigSet>::default();
1330 let old_set = UserRef::<SigSet>::default();
1331 let how = 0;
1332
1333 assert_eq!(
1334 sys_rt_sigprocmask(
1335 locked,
1336 ¤t_task,
1337 how,
1338 set,
1339 old_set,
1340 std::mem::size_of::<SigSet>() * 2
1341 ),
1342 error!(EINVAL)
1343 );
1344 assert_eq!(
1345 sys_rt_sigprocmask(
1346 locked,
1347 ¤t_task,
1348 how,
1349 set,
1350 old_set,
1351 std::mem::size_of::<SigSet>() / 2
1352 ),
1353 error!(EINVAL)
1354 );
1355 })
1356 .await;
1357 }
1358
1359 #[::fuchsia::test]
1361 async fn test_sigprocmask_invalid_how() {
1362 spawn_kernel_and_run(async |locked, current_task| {
1363 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1364
1365 let set = UserRef::<SigSet>::new(addr);
1366 let old_set = UserRef::<SigSet>::default();
1367 let how = SIG_SETMASK | SIG_UNBLOCK | SIG_BLOCK;
1368
1369 assert_eq!(
1370 sys_rt_sigprocmask(
1371 locked,
1372 ¤t_task,
1373 how,
1374 set,
1375 old_set,
1376 std::mem::size_of::<SigSet>()
1377 ),
1378 error!(EINVAL)
1379 );
1380 })
1381 .await;
1382 }
1383
1384 #[::fuchsia::test]
1387 async fn test_sigprocmask_null_set() {
1388 spawn_kernel_and_run(async |locked, current_task| {
1389 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1390 let original_mask = SigSet::from(SIGTRAP);
1391 {
1392 current_task.write().set_signal_mask(original_mask);
1393 }
1394
1395 let set = UserRef::<SigSet>::default();
1396 let old_set = UserRef::<SigSet>::new(addr);
1397 let how = SIG_SETMASK;
1398
1399 current_task
1400 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>()])
1401 .expect("failed to clear struct");
1402
1403 assert_eq!(
1404 sys_rt_sigprocmask(
1405 locked,
1406 ¤t_task,
1407 how,
1408 set,
1409 old_set,
1410 std::mem::size_of::<SigSet>()
1411 ),
1412 Ok(())
1413 );
1414
1415 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1416 assert_eq!(old_mask, original_mask);
1417 })
1418 .await;
1419 }
1420
1421 #[::fuchsia::test]
1424 async fn test_sigprocmask_null_set_and_old_set() {
1425 spawn_kernel_and_run(async |locked, current_task| {
1426 let original_mask = SigSet::from(SIGTRAP);
1427 {
1428 current_task.write().set_signal_mask(original_mask);
1429 }
1430
1431 let set = UserRef::<SigSet>::default();
1432 let old_set = UserRef::<SigSet>::default();
1433 let how = SIG_SETMASK;
1434
1435 assert_eq!(
1436 sys_rt_sigprocmask(
1437 locked,
1438 ¤t_task,
1439 how,
1440 set,
1441 old_set,
1442 std::mem::size_of::<SigSet>()
1443 ),
1444 Ok(())
1445 );
1446 assert_eq!(current_task.read().signal_mask(), original_mask);
1447 })
1448 .await;
1449 }
1450
1451 #[::fuchsia::test]
1453 async fn test_sigprocmask_setmask() {
1454 spawn_kernel_and_run(async |locked, current_task| {
1455 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1456 current_task
1457 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1458 .expect("failed to clear struct");
1459
1460 let original_mask = SigSet::from(SIGTRAP);
1461 {
1462 current_task.write().set_signal_mask(original_mask);
1463 }
1464
1465 let new_mask = SigSet::from(SIGIO);
1466 let set = UserRef::<SigSet>::new(addr);
1467 current_task.write_object(set, &new_mask).expect("failed to set mask");
1468
1469 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1470 let old_set = UserRef::<SigSet>::new(old_addr_range);
1471 let how = SIG_SETMASK;
1472
1473 assert_eq!(
1474 sys_rt_sigprocmask(
1475 locked,
1476 ¤t_task,
1477 how,
1478 set,
1479 old_set,
1480 std::mem::size_of::<SigSet>()
1481 ),
1482 Ok(())
1483 );
1484
1485 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1486 assert_eq!(old_mask, original_mask);
1487 assert_eq!(current_task.read().signal_mask(), new_mask);
1488 })
1489 .await;
1490 }
1491
1492 #[::fuchsia::test]
1494 async fn test_sigprocmask_block() {
1495 spawn_kernel_and_run(async |locked, current_task| {
1496 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1497 current_task
1498 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1499 .expect("failed to clear struct");
1500
1501 let original_mask = SigSet::from(SIGTRAP);
1502 {
1503 current_task.write().set_signal_mask(original_mask);
1504 }
1505
1506 let new_mask = SigSet::from(SIGIO);
1507 let set = UserRef::<SigSet>::new(addr);
1508 current_task.write_object(set, &new_mask).expect("failed to set mask");
1509
1510 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1511 let old_set = UserRef::<SigSet>::new(old_addr_range);
1512 let how = SIG_BLOCK;
1513
1514 assert_eq!(
1515 sys_rt_sigprocmask(
1516 locked,
1517 ¤t_task,
1518 how,
1519 set,
1520 old_set,
1521 std::mem::size_of::<SigSet>()
1522 ),
1523 Ok(())
1524 );
1525
1526 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1527 assert_eq!(old_mask, original_mask);
1528 assert_eq!(current_task.read().signal_mask(), new_mask | original_mask);
1529 })
1530 .await;
1531 }
1532
1533 #[::fuchsia::test]
1535 async fn test_sigprocmask_unblock() {
1536 spawn_kernel_and_run(async |locked, current_task| {
1537 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1538 current_task
1539 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1540 .expect("failed to clear struct");
1541
1542 let original_mask = SigSet::from(SIGTRAP) | SigSet::from(SIGIO);
1543 {
1544 current_task.write().set_signal_mask(original_mask);
1545 }
1546
1547 let new_mask = SigSet::from(SIGTRAP);
1548 let set = UserRef::<SigSet>::new(addr);
1549 current_task.write_object(set, &new_mask).expect("failed to set mask");
1550
1551 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1552 let old_set = UserRef::<SigSet>::new(old_addr_range);
1553 let how = SIG_UNBLOCK;
1554
1555 assert_eq!(
1556 sys_rt_sigprocmask(
1557 locked,
1558 ¤t_task,
1559 how,
1560 set,
1561 old_set,
1562 std::mem::size_of::<SigSet>()
1563 ),
1564 Ok(())
1565 );
1566
1567 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1568 assert_eq!(old_mask, original_mask);
1569 assert_eq!(current_task.read().signal_mask(), SIGIO.into());
1570 })
1571 .await;
1572 }
1573
1574 #[::fuchsia::test]
1576 async fn test_sigprocmask_unblock_not_set() {
1577 spawn_kernel_and_run(async |locked, current_task| {
1578 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1579 current_task
1580 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1581 .expect("failed to clear struct");
1582
1583 let original_mask = SigSet::from(SIGIO);
1584 {
1585 current_task.write().set_signal_mask(original_mask);
1586 }
1587
1588 let new_mask = SigSet::from(SIGTRAP);
1589 let set = UserRef::<SigSet>::new(addr);
1590 current_task.write_object(set, &new_mask).expect("failed to set mask");
1591
1592 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1593 let old_set = UserRef::<SigSet>::new(old_addr_range);
1594 let how = SIG_UNBLOCK;
1595
1596 assert_eq!(
1597 sys_rt_sigprocmask(
1598 locked,
1599 ¤t_task,
1600 how,
1601 set,
1602 old_set,
1603 std::mem::size_of::<SigSet>()
1604 ),
1605 Ok(())
1606 );
1607
1608 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1609 assert_eq!(old_mask, original_mask);
1610 assert_eq!(current_task.read().signal_mask(), original_mask);
1611 })
1612 .await;
1613 }
1614
1615 #[::fuchsia::test]
1617 async fn test_sigprocmask_kill_stop() {
1618 spawn_kernel_and_run(async |locked, current_task| {
1619 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1620 current_task
1621 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1622 .expect("failed to clear struct");
1623
1624 let original_mask = SigSet::from(SIGIO);
1625 {
1626 current_task.write().set_signal_mask(original_mask);
1627 }
1628
1629 let new_mask = UNBLOCKABLE_SIGNALS;
1630 let set = UserRef::<SigSet>::new(addr);
1631 current_task.write_object(set, &new_mask).expect("failed to set mask");
1632
1633 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1634 let old_set = UserRef::<SigSet>::new(old_addr_range);
1635 let how = SIG_BLOCK;
1636
1637 assert_eq!(
1638 sys_rt_sigprocmask(
1639 locked,
1640 ¤t_task,
1641 how,
1642 set,
1643 old_set,
1644 std::mem::size_of::<SigSet>()
1645 ),
1646 Ok(())
1647 );
1648
1649 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1650 assert_eq!(old_mask, original_mask);
1651 assert_eq!(current_task.read().signal_mask(), original_mask);
1652 })
1653 .await;
1654 }
1655
1656 #[::fuchsia::test]
1657 async fn test_sigaction_invalid_signal() {
1658 spawn_kernel_and_run(async |locked, current_task| {
1659 assert_eq!(
1660 sys_rt_sigaction(
1661 locked,
1662 ¤t_task,
1663 UncheckedSignal::from(SIGKILL),
1664 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1666 UserRef::<sigaction_t>::default().into(),
1667 std::mem::size_of::<SigSet>(),
1668 ),
1669 error!(EINVAL)
1670 );
1671 assert_eq!(
1672 sys_rt_sigaction(
1673 locked,
1674 ¤t_task,
1675 UncheckedSignal::from(SIGSTOP),
1676 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1678 UserRef::<sigaction_t>::default().into(),
1679 std::mem::size_of::<SigSet>(),
1680 ),
1681 error!(EINVAL)
1682 );
1683 assert_eq!(
1684 sys_rt_sigaction(
1685 locked,
1686 ¤t_task,
1687 UncheckedSignal::from(Signal::NUM_SIGNALS + 1),
1688 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1690 UserRef::<sigaction_t>::default().into(),
1691 std::mem::size_of::<SigSet>(),
1692 ),
1693 error!(EINVAL)
1694 );
1695 })
1696 .await;
1697 }
1698
1699 #[::fuchsia::test]
1700 async fn test_sigaction_old_value_set() {
1701 spawn_kernel_and_run(async |locked, current_task| {
1702 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1703 current_task
1704 .write_memory(addr, &[0u8; std::mem::size_of::<sigaction_t>()])
1705 .expect("failed to clear struct");
1706
1707 let org_mask = SigSet::from(SIGHUP) | SigSet::from(SIGINT);
1708 let original_action =
1709 sigaction_t { sa_mask: org_mask.into(), ..sigaction_t::default() };
1710
1711 {
1712 current_task.thread_group().signal_actions.set(SIGHUP, original_action);
1713 }
1714
1715 let old_action_ref = UserRef::<sigaction_t>::new(addr);
1716 assert_eq!(
1717 sys_rt_sigaction(
1718 locked,
1719 ¤t_task,
1720 UncheckedSignal::from(SIGHUP),
1721 UserRef::<sigaction_t>::default().into(),
1722 old_action_ref.into(),
1723 std::mem::size_of::<SigSet>()
1724 ),
1725 Ok(())
1726 );
1727
1728 let old_action =
1729 current_task.read_object(old_action_ref).expect("failed to read action");
1730 assert_eq!(old_action.as_bytes(), original_action.as_bytes());
1731 })
1732 .await;
1733 }
1734
1735 #[::fuchsia::test]
1736 async fn test_sigaction_new_value_set() {
1737 spawn_kernel_and_run(async |locked, current_task| {
1738 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1739 current_task
1740 .write_memory(addr, &[0u8; std::mem::size_of::<sigaction_t>()])
1741 .expect("failed to clear struct");
1742
1743 let org_mask = SigSet::from(SIGHUP) | SigSet::from(SIGINT);
1744 let original_action =
1745 sigaction_t { sa_mask: org_mask.into(), ..sigaction_t::default() };
1746 let set_action_ref = UserRef::<sigaction_t>::new(addr);
1747 current_task
1748 .write_object(set_action_ref, &original_action)
1749 .expect("failed to set action");
1750
1751 assert_eq!(
1752 sys_rt_sigaction(
1753 locked,
1754 ¤t_task,
1755 UncheckedSignal::from(SIGINT),
1756 set_action_ref.into(),
1757 UserRef::<sigaction_t>::default().into(),
1758 std::mem::size_of::<SigSet>(),
1759 ),
1760 Ok(())
1761 );
1762
1763 assert_eq!(
1764 current_task.thread_group().signal_actions.get(SIGINT).as_bytes(),
1765 original_action.as_bytes()
1766 );
1767 })
1768 .await;
1769 }
1770
1771 #[::fuchsia::test]
1773 async fn test_kill_same_task() {
1774 spawn_kernel_and_run(async |locked, current_task| {
1775 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGINT.into()), Ok(()));
1776 })
1777 .await;
1778 }
1779
1780 #[::fuchsia::test]
1782 async fn test_kill_own_thread_group() {
1783 spawn_kernel_and_run(async |locked, init_task| {
1784 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1785 task1.thread_group().setsid(locked).expect("setsid");
1786 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1787
1788 assert_eq!(sys_kill(locked, &task1, 0, SIGINT.into()), Ok(()));
1789 assert_eq!(task1.read().queued_signal_count(SIGINT), 1);
1790 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1791 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1792 })
1793 .await;
1794 }
1795
1796 #[::fuchsia::test]
1798 async fn test_kill_thread_group() {
1799 spawn_kernel_and_run(async |locked, init_task| {
1800 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1801 task1.thread_group().setsid(locked).expect("setsid");
1802 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1803
1804 assert_eq!(sys_kill(locked, &task1, -task1.tid, SIGINT.into()), Ok(()));
1805 assert_eq!(task1.read().queued_signal_count(SIGINT), 1);
1806 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1807 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1808 })
1809 .await;
1810 }
1811
1812 #[::fuchsia::test]
1814 async fn test_kill_all() {
1815 spawn_kernel_and_run(async |locked, init_task| {
1816 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1817 task1.thread_group().setsid(locked).expect("setsid");
1818 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1819
1820 assert_eq!(sys_kill(locked, &task1, -1, SIGINT.into()), Ok(()));
1821 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1822 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1823 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1824 })
1825 .await;
1826 }
1827
1828 #[::fuchsia::test]
1830 async fn test_kill_inexistant_task() {
1831 spawn_kernel_and_run(async |locked, current_task| {
1832 assert_eq!(sys_kill(locked, ¤t_task, 9, SIGINT.into()), error!(ESRCH));
1833 })
1834 .await;
1835 }
1836
1837 #[::fuchsia::test]
1839 async fn test_kill_invalid_task() {
1840 spawn_kernel_and_run(async |locked, task1| {
1841 task1.set_creds(Credentials::with_ids(1, 1));
1843 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1844 task2.set_creds(Credentials::with_ids(2, 2));
1845
1846 assert!(task1.can_signal(&task2, SIGINT.into()).is_err());
1847 assert_eq!(sys_kill(locked, &task2, task1.tid, SIGINT.into()), error!(EPERM));
1848 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1849 })
1850 .await;
1851 }
1852
1853 #[::fuchsia::test]
1855 async fn test_kill_invalid_task_in_thread_group() {
1856 spawn_kernel_and_run(async |locked, init_task| {
1857 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1858 task1.thread_group().setsid(locked).expect("setsid");
1859 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1860 task2.thread_group().setsid(locked).expect("setsid");
1861 task2.set_creds(Credentials::with_ids(2, 2));
1862
1863 assert!(task2.can_signal(&task1, SIGINT.into()).is_err());
1864 assert_eq!(sys_kill(locked, &task2, -task1.tid, SIGINT.into()), error!(EPERM));
1865 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1866 })
1867 .await;
1868 }
1869
1870 #[::fuchsia::test]
1872 async fn test_kill_invalid_signal() {
1873 spawn_kernel_and_run(async |locked, current_task| {
1874 assert_eq!(
1875 sys_kill(locked, ¤t_task, current_task.tid, UncheckedSignal::from(75)),
1876 error!(EINVAL)
1877 );
1878 })
1879 .await;
1880 }
1881
1882 #[::fuchsia::test]
1884 async fn test_blocked_signal_pending() {
1885 spawn_kernel_and_run(async |locked, current_task| {
1886 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1887 current_task
1888 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1889 .expect("failed to clear struct");
1890
1891 let new_mask = SigSet::from(SIGIO);
1892 let set = UserRef::<SigSet>::new(addr);
1893 current_task.write_object(set, &new_mask).expect("failed to set mask");
1894
1895 assert_eq!(
1896 sys_rt_sigprocmask(
1897 locked,
1898 ¤t_task,
1899 SIG_BLOCK,
1900 set,
1901 UserRef::default(),
1902 std::mem::size_of::<SigSet>()
1903 ),
1904 Ok(())
1905 );
1906 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGIO.into()), Ok(()));
1907 assert_eq!(current_task.read().queued_signal_count(SIGIO), 1);
1908
1909 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGIO.into()), Ok(()));
1911 assert_eq!(current_task.read().queued_signal_count(SIGIO), 1);
1912 })
1913 .await;
1914 }
1915
1916 #[::fuchsia::test]
1918 async fn test_blocked_real_time_signal_pending() {
1919 spawn_kernel_and_run(async |locked, current_task| {
1920 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1921 current_task
1922 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1923 .expect("failed to clear struct");
1924
1925 let new_mask = SigSet::from(starnix_uapi::signals::SIGRTMIN);
1926 let set = UserRef::<SigSet>::new(addr);
1927 current_task.write_object(set, &new_mask).expect("failed to set mask");
1928
1929 assert_eq!(
1930 sys_rt_sigprocmask(
1931 locked,
1932 ¤t_task,
1933 SIG_BLOCK,
1934 set,
1935 UserRef::default(),
1936 std::mem::size_of::<SigSet>()
1937 ),
1938 Ok(())
1939 );
1940 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGRTMIN.into()), Ok(()));
1941 assert_eq!(current_task.read().queued_signal_count(starnix_uapi::signals::SIGRTMIN), 1);
1942
1943 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGRTMIN.into()), Ok(()));
1945 assert_eq!(current_task.read().queued_signal_count(starnix_uapi::signals::SIGRTMIN), 2);
1946 })
1947 .await;
1948 }
1949
1950 #[::fuchsia::test]
1951 async fn test_suspend() {
1952 spawn_kernel_and_run(async |locked, current_task| {
1953 let init_task_weak = current_task.weak_task();
1954 let (tx, rx) = std::sync::mpsc::sync_channel::<()>(0);
1955
1956 let closure = move |locked: &mut Locked<Unlocked>, current_task: &CurrentTask| {
1957 let init_task_temp = init_task_weak.upgrade().expect("Task must be alive");
1958
1959 let mut suspended = false;
1961 while !suspended {
1962 suspended = init_task_temp.read().is_blocked();
1963 std::thread::sleep(std::time::Duration::from_millis(10));
1964 }
1965
1966 let _ = sys_kill(
1968 locked,
1969 current_task,
1970 init_task_temp.tid,
1971 UncheckedSignal::from(SIGHUP),
1972 );
1973
1974 rx.recv().expect("receive");
1976 assert!(!init_task_temp.read().is_blocked());
1977 };
1978 let (thread, req) =
1979 SpawnRequestBuilder::new().with_sync_closure(closure).build_with_async_result();
1980 current_task.kernel().kthreads.spawner().spawn_from_request(req);
1981
1982 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1983 let user_ref = UserRef::<SigSet>::new(addr);
1984
1985 let sigset = !SigSet::from(SIGHUP);
1986 current_task.write_object(user_ref, &sigset).expect("failed to set action");
1987
1988 assert_eq!(
1989 sys_rt_sigsuspend(locked, current_task, user_ref, std::mem::size_of::<SigSet>()),
1990 error!(ERESTARTNOHAND)
1991 );
1992 tx.send(()).expect("send");
1993 futures::executor::block_on(thread).expect("join");
1994 })
1995 .await;
1996 }
1997
1998 #[::fuchsia::test]
2000 async fn test_waitid_options() {
2001 spawn_kernel_and_run(async |locked, current_task| {
2002 let id = 1;
2003 assert_eq!(
2004 sys_waitid(
2005 locked,
2006 ¤t_task,
2007 P_PID,
2008 id,
2009 MultiArchUserRef::null(current_task),
2010 0,
2011 UserRef::default().into()
2012 ),
2013 error!(EINVAL)
2014 );
2015 assert_eq!(
2016 sys_waitid(
2017 locked,
2018 ¤t_task,
2019 P_PID,
2020 id,
2021 MultiArchUserRef::null(current_task),
2022 0xffff,
2023 UserRef::default().into()
2024 ),
2025 error!(EINVAL)
2026 );
2027 })
2028 .await;
2029 }
2030
2031 #[::fuchsia::test]
2033 async fn test_wait4_options() {
2034 spawn_kernel_and_run(async |locked, current_task| {
2035 let id = 1;
2036 assert_eq!(
2037 sys_wait4(
2038 locked,
2039 ¤t_task,
2040 id,
2041 UserRef::default(),
2042 WEXITED,
2043 RUsagePtr::null(current_task)
2044 ),
2045 error!(EINVAL)
2046 );
2047 assert_eq!(
2048 sys_wait4(
2049 locked,
2050 ¤t_task,
2051 id,
2052 UserRef::default(),
2053 WNOWAIT,
2054 RUsagePtr::null(current_task)
2055 ),
2056 error!(EINVAL)
2057 );
2058 assert_eq!(
2059 sys_wait4(
2060 locked,
2061 ¤t_task,
2062 id,
2063 UserRef::default(),
2064 0xffff,
2065 RUsagePtr::null(current_task)
2066 ),
2067 error!(EINVAL)
2068 );
2069 })
2070 .await;
2071 }
2072
2073 #[::fuchsia::test]
2074 async fn test_echild_when_no_zombie() {
2075 spawn_kernel_and_run(async |locked, current_task| {
2076 assert!(
2078 sys_kill(
2079 locked,
2080 ¤t_task,
2081 current_task.get_pid(),
2082 UncheckedSignal::from(SIGCHLD)
2083 )
2084 .is_ok()
2085 );
2086 assert_eq!(
2089 wait_on_pid(
2090 locked,
2091 ¤t_task,
2092 &ProcessSelector::Any,
2093 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions")
2094 ),
2095 error!(ECHILD)
2096 );
2097 })
2098 .await;
2099 }
2100
2101 #[::fuchsia::test]
2102 async fn test_no_error_when_zombie() {
2103 spawn_kernel_and_run(async |locked, current_task| {
2104 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2105 let expected_result = WaitResult {
2106 pid: child.tid,
2107 uid: 0,
2108 exit_info: ProcessExitInfo {
2109 status: ExitStatus::Exit(1),
2110 exit_signal: Some(SIGCHLD),
2111 },
2112 time_stats: Default::default(),
2113 };
2114 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2115 std::mem::drop(child);
2116
2117 assert_eq!(
2118 wait_on_pid(
2119 locked,
2120 ¤t_task,
2121 &ProcessSelector::Any,
2122 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions")
2123 ),
2124 Ok(Some(expected_result))
2125 );
2126 })
2127 .await;
2128 }
2129
2130 #[::fuchsia::test]
2131 async fn test_waiting_for_child() {
2132 spawn_kernel_and_run(async |locked, task| {
2133 let child = task
2134 .clone_task(
2135 locked,
2136 0,
2137 Some(SIGCHLD),
2138 UserRef::default(),
2139 UserRef::default(),
2140 UserRef::default(),
2141 )
2142 .expect("clone_task");
2143
2144 assert_eq!(
2146 wait_on_pid(
2147 locked,
2148 &task,
2149 &ProcessSelector::Any,
2150 &WaitingOptions::new_for_wait4(WNOHANG).expect("WaitingOptions")
2151 ),
2152 Ok(None)
2153 );
2154
2155 let thread = std::thread::spawn({
2156 let task = task.weak_task();
2157 move || {
2158 #[allow(
2160 clippy::undocumented_unsafe_blocks,
2161 reason = "Force documented unsafe blocks in Starnix"
2162 )]
2163 let locked = unsafe { Unlocked::new() };
2164 let task = task.upgrade().expect("task must be alive");
2165 let child: AutoReleasableTask = child.into();
2166 while !task.read().is_blocked() {
2168 std::thread::sleep(std::time::Duration::from_millis(10));
2169 }
2170 child.thread_group().exit(locked, ExitStatus::Exit(0), None);
2171 child.tid
2172 }
2173 });
2174
2175 let waited_child = wait_on_pid(
2177 locked,
2178 &task,
2179 &ProcessSelector::Any,
2180 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions"),
2181 )
2182 .expect("wait_on_pid")
2183 .unwrap();
2184
2185 let child_id = thread.join().expect("join");
2187 assert_eq!(waited_child.pid, child_id);
2188 })
2189 .await;
2190 }
2191
2192 #[::fuchsia::test]
2193 async fn test_waiting_for_child_with_signal_pending() {
2194 spawn_kernel_and_run(async |locked, task| {
2195 task.thread_group().signal_actions.set(
2197 SIGUSR1,
2198 sigaction_t { sa_handler: uaddr { addr: 0xDEADBEEF }, ..sigaction_t::default() },
2199 );
2200
2201 let _child = task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2203
2204 send_standard_signal(locked, &task, SignalInfo::default(SIGUSR1));
2207
2208 let errno = wait_on_pid(
2209 locked,
2210 &task,
2211 &ProcessSelector::Any,
2212 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions"),
2213 )
2214 .expect_err("wait_on_pid");
2215 assert_eq!(errno, ERESTARTSYS);
2216 })
2217 .await;
2218 }
2219
2220 #[::fuchsia::test]
2221 async fn test_sigkill() {
2222 spawn_kernel_and_run(async |locked, current_task| {
2223 let mut child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2224
2225 send_standard_signal(locked, &child, SignalInfo::default(SIGKILL));
2227 dequeue_signal_for_test(locked, &mut child);
2228 std::mem::drop(child);
2229
2230 let address = map_memory(
2232 locked,
2233 ¤t_task,
2234 UserAddress::default(),
2235 std::mem::size_of::<i32>() as u64,
2236 );
2237 let address_ref = UserRef::<i32>::new(address);
2238 sys_wait4(locked, ¤t_task, -1, address_ref, 0, RUsagePtr::null(current_task))
2239 .expect("wait4");
2240 let wstatus = current_task.read_object(address_ref).expect("read memory");
2241 assert_eq!(wstatus, SIGKILL.number() as i32);
2242 })
2243 .await;
2244 }
2245
2246 async fn test_exit_status_for_signal(
2247 sig: Signal,
2248 wait_status: i32,
2249 exit_signal: Option<Signal>,
2250 ) {
2251 spawn_kernel_and_run(async move |locked, current_task| {
2252 let mut child = current_task.clone_task_for_test(locked, 0, exit_signal);
2253
2254 send_standard_signal(locked, &child, SignalInfo::default(sig));
2256 dequeue_signal_for_test(locked, &mut child);
2257 std::mem::drop(child);
2258
2259 let address = map_memory(
2261 locked,
2262 ¤t_task,
2263 UserAddress::default(),
2264 std::mem::size_of::<i32>() as u64,
2265 );
2266 let address_ref = UserRef::<i32>::new(address);
2267 sys_wait4(locked, ¤t_task, -1, address_ref, 0, RUsagePtr::null(current_task))
2268 .expect("wait4");
2269 let wstatus = current_task.read_object(address_ref).expect("read memory");
2270 assert_eq!(wstatus, wait_status);
2271 })
2272 .await;
2273 }
2274
2275 #[::fuchsia::test]
2276 async fn test_exit_status() {
2277 test_exit_status_for_signal(SIGTERM, SIGTERM.number() as i32, Some(SIGCHLD)).await;
2279 test_exit_status_for_signal(SIGSEGV, (SIGSEGV.number() as i32) | 0x80, Some(SIGCHLD)).await;
2281 }
2282
2283 #[::fuchsia::test]
2284 async fn test_wait4_by_pgid() {
2285 spawn_kernel_and_run(async |locked, current_task| {
2286 let child1 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2287 let child1_pid = child1.tid;
2288 child1.thread_group().exit(locked, ExitStatus::Exit(42), None);
2289 std::mem::drop(child1);
2290 let child2 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2291 child2.thread_group().setsid(locked).expect("setsid");
2292 let child2_pid = child2.tid;
2293 child2.thread_group().exit(locked, ExitStatus::Exit(42), None);
2294 std::mem::drop(child2);
2295
2296 assert_eq!(
2297 sys_wait4(
2298 locked,
2299 ¤t_task,
2300 -child2_pid,
2301 UserRef::default(),
2302 0,
2303 RUsagePtr::null(current_task)
2304 ),
2305 Ok(child2_pid)
2306 );
2307 assert_eq!(
2308 sys_wait4(
2309 locked,
2310 ¤t_task,
2311 0,
2312 UserRef::default(),
2313 0,
2314 RUsagePtr::null(current_task)
2315 ),
2316 Ok(child1_pid)
2317 );
2318 })
2319 .await;
2320 }
2321
2322 #[::fuchsia::test]
2323 async fn test_waitid_by_pgid() {
2324 spawn_kernel_and_run(async |locked, current_task| {
2325 let child1 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2326 let child1_pid = child1.tid;
2327 child1.thread_group().exit(locked, ExitStatus::Exit(42), None);
2328 std::mem::drop(child1);
2329 let child2 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2330 child2.thread_group().setsid(locked).expect("setsid");
2331 let child2_pid = child2.tid;
2332 child2.thread_group().exit(locked, ExitStatus::Exit(42), None);
2333 std::mem::drop(child2);
2334
2335 let address: UserRef<uapi::siginfo_t> =
2336 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE).into();
2337 assert_eq!(
2338 sys_waitid(
2339 locked,
2340 ¤t_task,
2341 P_PGID,
2342 child2_pid,
2343 address.into(),
2344 WEXITED,
2345 UserRef::default().into()
2346 ),
2347 Ok(())
2348 );
2349 assert_eq!(current_task.thread_group().read().zombie_children[0].pid(), child1_pid);
2351
2352 assert_eq!(
2353 sys_waitid(
2354 locked,
2355 ¤t_task,
2356 P_PGID,
2357 0,
2358 address.into(),
2359 WEXITED,
2360 UserRef::default().into()
2361 ),
2362 Ok(())
2363 );
2364 })
2365 .await;
2366 }
2367
2368 #[::fuchsia::test]
2369 async fn test_sigqueue() {
2370 spawn_kernel_and_run(async |locked, current_task| {
2371 let current_uid = current_task.current_creds().uid;
2372 let current_pid = current_task.get_pid();
2373
2374 const TEST_VALUE: u64 = 101;
2375
2376 const ARCH64_SI_HEADER_SIZE: usize = SI_HEADER_SIZE + 4;
2378 const PID_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE;
2380 const UID_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE + 4;
2381 const VALUE_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE + 8;
2382
2383 let mut data = vec![0u8; SI_MAX_SIZE_AS_USIZE];
2384 let header = SignalInfoHeader {
2385 signo: SIGIO.number(),
2386 code: SI_QUEUE,
2387 ..SignalInfoHeader::default()
2388 };
2389 let _ = header.write_to(&mut data[..SI_HEADER_SIZE]);
2390 data[PID_DATA_OFFSET..PID_DATA_OFFSET + 4].copy_from_slice(¤t_pid.to_ne_bytes());
2391 data[UID_DATA_OFFSET..UID_DATA_OFFSET + 4].copy_from_slice(¤t_uid.to_ne_bytes());
2392 data[VALUE_DATA_OFFSET..VALUE_DATA_OFFSET + 8]
2393 .copy_from_slice(&TEST_VALUE.to_ne_bytes());
2394
2395 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2396 current_task.write_memory(addr, &data).unwrap();
2397 let second_current = create_task(locked, current_task.kernel(), "second task");
2398 let second_pid = second_current.get_pid();
2399 let second_tid = second_current.get_tid();
2400 assert_eq!(second_current.read().queued_signal_count(SIGIO), 0);
2401
2402 assert_eq!(
2403 sys_rt_tgsigqueueinfo(
2404 locked,
2405 ¤t_task,
2406 second_pid,
2407 second_tid,
2408 UncheckedSignal::from(SIGIO),
2409 addr
2410 ),
2411 Ok(())
2412 );
2413 assert_eq!(second_current.read().queued_signal_count(SIGIO), 1);
2414
2415 let signal = SignalInfo {
2416 code: SI_USER as i32,
2417 detail: SignalDetail::Kill {
2418 pid: current_task.thread_group().leader,
2419 uid: current_task.current_creds().uid,
2420 },
2421 ..SignalInfo::default(SIGIO)
2422 };
2423 let queued_signal = second_current.write().take_specific_signal(signal);
2424 if let Some(sig) = queued_signal {
2425 assert_eq!(sig.signal, SIGIO);
2426 assert_eq!(sig.errno, 0);
2427 assert_eq!(sig.code, SI_QUEUE);
2428 if let SignalDetail::Raw { data } = sig.detail {
2429 let offset_pid = PID_DATA_OFFSET - SI_HEADER_SIZE;
2431 let offset_uid = UID_DATA_OFFSET - SI_HEADER_SIZE;
2432 let offset_value = VALUE_DATA_OFFSET - SI_HEADER_SIZE;
2433 let pid =
2434 pid_t::from_ne_bytes(data[offset_pid..offset_pid + 4].try_into().unwrap());
2435 let uid =
2436 uid_t::from_ne_bytes(data[offset_uid..offset_uid + 4].try_into().unwrap());
2437 let value = u64::from_ne_bytes(
2438 data[offset_value..offset_value + 8].try_into().unwrap(),
2439 );
2440 assert_eq!(pid, current_pid);
2441 assert_eq!(uid, current_uid);
2442 assert_eq!(value, TEST_VALUE);
2443 } else {
2444 panic!("incorrect signal detail");
2445 }
2446 } else {
2447 panic!("expected a queued signal");
2448 }
2449 })
2450 .await;
2451 }
2452
2453 #[::fuchsia::test]
2454 async fn test_signalfd_filters_signals() {
2455 spawn_kernel_and_run(async |locked, current_task| {
2456 let memory_for_masks =
2457 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2458
2459 let term_int_mask = SigSet::from(SIGTERM) | SigSet::from(SIGINT);
2461 let term_int_mask_addr = UserRef::<SigSet>::new(memory_for_masks);
2462 current_task
2463 .write_object(term_int_mask_addr, &term_int_mask)
2464 .expect("failed to write mask");
2465 let sfd_term_int = sys_signalfd4(
2466 locked,
2467 ¤t_task,
2468 FdNumber::from_raw(-1),
2469 term_int_mask_addr,
2470 std::mem::size_of::<SigSet>(),
2471 0,
2472 )
2473 .expect("failed to create SIGTERM/SIGINT signalfd");
2474
2475 let sigchld_mask = SigSet::from(SIGCHLD);
2477 let sigchld_mask_addr =
2478 UserRef::<SigSet>::new((memory_for_masks + std::mem::size_of::<SigSet>()).unwrap());
2479 current_task
2480 .write_object(sigchld_mask_addr, &sigchld_mask)
2481 .expect("failed to write mask");
2482 let sfd_chld = sys_signalfd4(
2483 locked,
2484 ¤t_task,
2485 FdNumber::from_raw(-1),
2486 sigchld_mask_addr,
2487 std::mem::size_of::<SigSet>(),
2488 0,
2489 )
2490 .expect("failed to create SIGCHLD signalfd");
2491
2492 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2494 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2495 std::mem::drop(child);
2496
2497 let sfd_term_int_file =
2499 current_task.files.get(sfd_term_int).expect("failed to get sfd_term_int file");
2500 let sfd_chld_file =
2501 current_task.files.get(sfd_chld).expect("failed to get sfd_chld file");
2502
2503 let term_int_events = sfd_term_int_file
2504 .query_events(locked, ¤t_task)
2505 .expect("failed to query sfd_term_int events");
2506 let chld_events = sfd_chld_file
2507 .query_events(locked, ¤t_task)
2508 .expect("failed to query sfd_chld events");
2509
2510 assert!(!term_int_events.contains(FdEvents::POLLIN));
2511 assert!(chld_events.contains(FdEvents::POLLIN));
2512 })
2513 .await;
2514 }
2515
2516 #[::fuchsia::test]
2517 async fn test_signalfd_filters_signals_async() {
2518 spawn_kernel_and_run(async |locked, current_task| {
2519 let memory_for_masks =
2520 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2521
2522 let term_int_mask = SigSet::from(SIGTERM) | SigSet::from(SIGINT);
2524 let term_int_mask_addr = UserRef::<SigSet>::new(memory_for_masks);
2525 current_task
2526 .write_object(term_int_mask_addr, &term_int_mask)
2527 .expect("failed to write mask");
2528 let sfd_term_int = sys_signalfd4(
2529 locked,
2530 ¤t_task,
2531 FdNumber::from_raw(-1),
2532 term_int_mask_addr,
2533 std::mem::size_of::<SigSet>(),
2534 0,
2535 )
2536 .expect("failed to create SIGTERM/SIGINT signalfd");
2537
2538 let sigchld_mask = SigSet::from(SIGCHLD);
2540 let sigchld_mask_addr =
2541 UserRef::<SigSet>::new((memory_for_masks + std::mem::size_of::<SigSet>()).unwrap());
2542 current_task
2543 .write_object(sigchld_mask_addr, &sigchld_mask)
2544 .expect("failed to write mask");
2545 let sfd_chld = sys_signalfd4(
2546 locked,
2547 ¤t_task,
2548 FdNumber::from_raw(-1),
2549 sigchld_mask_addr,
2550 std::mem::size_of::<SigSet>(),
2551 0,
2552 )
2553 .expect("failed to create SIGCHLD signalfd");
2554
2555 let waiter = Waiter::new();
2557 let ready_items = Arc::new(Mutex::new(VecDeque::new()));
2558
2559 let sfd_term_int_file =
2560 current_task.files.get(sfd_term_int).expect("failed to get sfd_term_int file");
2561 let sfd_chld_file =
2562 current_task.files.get(sfd_chld).expect("failed to get sfd_chld file");
2563
2564 sfd_term_int_file
2565 .wait_async(
2566 locked,
2567 ¤t_task,
2568 &waiter,
2569 FdEvents::POLLIN,
2570 EventHandler::Enqueue {
2571 key: sfd_term_int.into(),
2572 queue: ready_items.clone(),
2573 sought_events: FdEvents::POLLIN,
2574 },
2575 )
2576 .expect("failed to wait on sfd_term_int");
2577
2578 sfd_chld_file
2579 .wait_async(
2580 locked,
2581 ¤t_task,
2582 &waiter,
2583 FdEvents::POLLIN,
2584 EventHandler::Enqueue {
2585 key: sfd_chld.into(),
2586 queue: ready_items.clone(),
2587 sought_events: FdEvents::POLLIN,
2588 },
2589 )
2590 .expect("failed to wait on sfd_chld");
2591
2592 let sigchld_mask_ref = UserRef::<SigSet>::new(memory_for_masks);
2594 current_task
2595 .write_object(sigchld_mask_ref, &sigchld_mask)
2596 .expect("failed to write mask");
2597 sys_rt_sigprocmask(
2598 locked,
2599 ¤t_task,
2600 SIG_BLOCK,
2601 sigchld_mask_ref,
2602 UserRef::default(),
2603 std::mem::size_of::<SigSet>(),
2604 )
2605 .expect("failed to block SIGCHLD");
2606
2607 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2609 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2610 std::mem::drop(child);
2611
2612 waiter.wait(locked, ¤t_task).expect("failed to wait");
2614
2615 let ready_items = ready_items.lock();
2617 assert_eq!(ready_items.len(), 1);
2618 assert_eq!(ready_items[0].key, sfd_chld.into());
2619 })
2620 .await;
2621 }
2622}