1pub use super::signal_handling::sys_restart_syscall;
6use super::signalfd::SignalFd;
7use crate::mm::MemoryAccessorExt;
8use crate::security;
9use crate::signals::{
10 SI_HEADER_SIZE, SignalDetail, SignalInfo, SignalInfoHeader, SignalSource,
11 restore_from_signal_handler, send_signal,
12};
13use crate::task::{
14 CurrentTask, PidTable, ProcessEntryRef, ProcessSelector, Task, TaskMutableState, ThreadGroup,
15 ThreadGroupLifecycleWaitValue, WaitResult, WaitableChildResult, Waiter,
16};
17use crate::vfs::{FdFlags, FdNumber};
18use starnix_sync::{LockBefore, RwLockReadGuard, ThreadGroupLimits};
19use starnix_uapi::user_address::{ArchSpecific, MultiArchUserRef};
20use starnix_uapi::{tid_t, uapi};
21
22use starnix_logging::track_stub;
23use starnix_sync::{Locked, Unlocked};
24use starnix_syscalls::SyscallResult;
25use starnix_types::time::{duration_from_timespec, timeval_from_duration};
26use starnix_uapi::errors::{EINTR, ETIMEDOUT, Errno, ErrnoResultExt};
27use starnix_uapi::open_flags::OpenFlags;
28use starnix_uapi::signals::{SigSet, Signal, UNBLOCKABLE_SIGNALS, UncheckedSignal};
29use starnix_uapi::user_address::{UserAddress, UserRef};
30use starnix_uapi::{
31 __WALL, __WCLONE, P_ALL, P_PGID, P_PID, P_PIDFD, SFD_CLOEXEC, SFD_NONBLOCK, SI_MAX_SIZE,
32 SI_TKILL, SIG_BLOCK, SIG_SETMASK, SIG_UNBLOCK, SS_AUTODISARM, SS_DISABLE, SS_ONSTACK,
33 WCONTINUED, WEXITED, WNOHANG, WNOWAIT, WSTOPPED, WUNTRACED, errno, error, pid_t, rusage,
34 sigaltstack,
35};
36use static_assertions::const_assert_eq;
37use zerocopy::{FromBytes, Immutable, IntoBytes};
38
39const SI_MAX_SIZE_AS_USIZE: usize = SI_MAX_SIZE as usize;
41
42pub type RUsagePtr = MultiArchUserRef<uapi::rusage, uapi::arch32::rusage>;
43type SigAction64Ptr = MultiArchUserRef<uapi::sigaction_t, uapi::arch32::sigaction64_t>;
44type SigActionPtr = MultiArchUserRef<uapi::sigaction_t, uapi::arch32::sigaction_t>;
45
46pub fn sys_rt_sigaction(
61 _locked: &mut Locked<Unlocked>,
62 current_task: &CurrentTask,
63 signum: UncheckedSignal,
64 user_action: SigAction64Ptr,
65 user_old_action: SigAction64Ptr,
66 sigset_size: usize,
67) -> Result<(), Errno> {
68 if user_action.is_arch32() && sigset_size == std::mem::size_of::<uapi::arch32::sigset_t>() {
69 let user_action = SigActionPtr::from_32(user_action.addr().into());
70 let user_old_action = SigActionPtr::from_32(user_old_action.addr().into());
71 return rt_sigaction(current_task, signum, user_action, user_old_action);
72 }
73
74 if sigset_size != std::mem::size_of::<uapi::sigset_t>() {
75 return error!(EINVAL);
76 }
77 rt_sigaction(current_task, signum, user_action, user_old_action)
78}
79
80fn rt_sigaction<Arch32SigAction>(
81 current_task: &CurrentTask,
82 signum: UncheckedSignal,
83 user_action: MultiArchUserRef<uapi::sigaction_t, Arch32SigAction>,
84 user_old_action: MultiArchUserRef<uapi::sigaction_t, Arch32SigAction>,
85) -> Result<(), Errno>
86where
87 Arch32SigAction:
88 IntoBytes + FromBytes + Immutable + TryFrom<uapi::sigaction_t> + TryInto<uapi::sigaction_t>,
89{
90 let signal = Signal::try_from(signum)?;
91
92 let new_signal_action = if !user_action.is_null() {
93 if signal.is_unblockable() {
97 return error!(EINVAL);
98 }
99
100 let signal_action = current_task.read_multi_arch_object(user_action)?;
101 Some(signal_action)
102 } else {
103 None
104 };
105
106 let signal_actions = ¤t_task.thread_group().signal_actions;
107 let old_action = if let Some(new_signal_action) = new_signal_action {
108 signal_actions.set(signal, new_signal_action)
109 } else {
110 signal_actions.get(signal)
111 };
112
113 if !user_old_action.is_null() {
114 current_task.write_multi_arch_object(user_old_action, old_action)?;
115 }
116
117 Ok(())
118}
119
120pub fn sys_rt_sigpending(
130 _locked: &mut Locked<Unlocked>,
131 current_task: &CurrentTask,
132 set: UserRef<SigSet>,
133 sigset_size: usize,
134) -> Result<(), Errno> {
135 if sigset_size != std::mem::size_of::<SigSet>() {
136 return error!(EINVAL);
137 }
138
139 let signals = current_task.read().pending_signals();
140 current_task.write_object(set, &signals)?;
141 Ok(())
142}
143
144pub fn sys_rt_sigprocmask(
157 _locked: &mut Locked<Unlocked>,
158 current_task: &CurrentTask,
159 how: u32,
160 user_set: UserRef<SigSet>,
161 user_old_set: UserRef<SigSet>,
162 sigset_size: usize,
163) -> Result<(), Errno> {
164 if sigset_size != std::mem::size_of::<SigSet>() {
165 return error!(EINVAL);
166 }
167 match how {
168 SIG_BLOCK | SIG_UNBLOCK | SIG_SETMASK => (),
169 _ => return error!(EINVAL),
170 };
171
172 let mut new_mask = SigSet::default();
175 if !user_set.is_null() {
176 new_mask = current_task.read_object(user_set)?;
177 }
178
179 let mut state = current_task.write();
180 let signal_mask = state.signal_mask();
181 if !user_old_set.is_null() {
183 current_task.write_object(user_old_set, &signal_mask)?;
184 }
185
186 if user_set.is_null() {
188 return Ok(());
189 }
190
191 let signal_mask = match how {
192 SIG_BLOCK => signal_mask | new_mask,
193 SIG_UNBLOCK => signal_mask & !new_mask,
194 SIG_SETMASK => new_mask,
195 _ => return error!(EINVAL),
197 };
198 state.set_signal_mask(signal_mask);
199
200 Ok(())
201}
202
203type SigAltStackPtr = MultiArchUserRef<uapi::sigaltstack, uapi::arch32::sigaltstack>;
204
205pub fn sys_sigaltstack(
214 _locked: &mut Locked<Unlocked>,
215 current_task: &CurrentTask,
216 user_ss: SigAltStackPtr,
217 user_old_ss: SigAltStackPtr,
218) -> Result<(), Errno> {
219 let stack_pointer_register = current_task.thread_state.registers.stack_pointer_register();
220 let mut state = current_task.write();
221 let on_signal_stack = state.on_signal_stack(stack_pointer_register);
222
223 let mut ss = sigaltstack::default();
224 if !user_ss.is_null() {
225 if on_signal_stack {
226 return error!(EPERM);
227 }
228 ss = current_task.read_multi_arch_object(user_ss)?;
229 if (ss.ss_flags & !((SS_AUTODISARM | SS_DISABLE) as i32)) != 0 {
230 return error!(EINVAL);
231 }
232 let min_stack_size =
233 if current_task.is_arch32() { uapi::arch32::MINSIGSTKSZ } else { uapi::MINSIGSTKSZ };
234 if ss.ss_flags & (SS_DISABLE as i32) == 0 && ss.ss_size < min_stack_size as u64 {
235 return error!(ENOMEM);
236 }
237 }
238
239 if !user_old_ss.is_null() {
240 let mut old_ss = match state.sigaltstack() {
241 Some(old_ss) => old_ss,
242 None => sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() },
243 };
244 if on_signal_stack {
245 old_ss.ss_flags = SS_ONSTACK as i32;
246 }
247 current_task.write_multi_arch_object(user_old_ss, old_ss)?;
248 }
249
250 if !user_ss.is_null() {
251 if ss.ss_flags & (SS_DISABLE as i32) != 0 {
252 state.set_sigaltstack(None);
253 } else {
254 state.set_sigaltstack(Some(ss));
255 }
256 }
257
258 Ok(())
259}
260
261pub fn sys_rt_sigsuspend(
273 locked: &mut Locked<Unlocked>,
274 current_task: &mut CurrentTask,
275 user_mask: UserRef<SigSet>,
276 sigset_size: usize,
277) -> Result<(), Errno> {
278 if sigset_size != std::mem::size_of::<SigSet>() {
279 return error!(EINVAL);
280 }
281 let mask = current_task.read_object(user_mask)?;
282
283 let waiter = Waiter::new();
284 current_task
288 .wait_with_temporary_mask(locked, mask, |locked, current_task| {
289 waiter.wait(locked, current_task)
290 })
291 .map_eintr(|| errno!(ERESTARTNOHAND))
292}
293
294pub fn sys_rt_sigtimedwait(
307 locked: &mut Locked<Unlocked>,
308 current_task: &mut CurrentTask,
309 set_addr: UserRef<SigSet>,
310 siginfo_addr: MultiArchUserRef<uapi::siginfo_t, uapi::arch32::siginfo_t>,
311 timeout_addr: MultiArchUserRef<uapi::timespec, uapi::arch32::timespec>,
312 sigset_size: usize,
313) -> Result<Signal, Errno> {
314 if sigset_size != std::mem::size_of::<SigSet>() {
315 return error!(EINVAL);
316 }
317
318 let set = current_task.read_object(set_addr)?;
320 let unblock = set & !UNBLOCKABLE_SIGNALS;
322 let deadline = if timeout_addr.is_null() {
323 zx::MonotonicInstant::INFINITE
324 } else {
325 let timeout = current_task.read_multi_arch_object(timeout_addr)?;
326 zx::MonotonicInstant::after(duration_from_timespec(timeout)?)
327 };
328
329 let signal_info = loop {
330 let waiter;
331
332 {
333 let mut task_state = current_task.write();
334 if let Some(signal) = task_state.take_signal_with_mask(!unblock) {
337 break signal;
338 }
339
340 waiter = Waiter::new();
341 task_state.wait_on_signal(&waiter);
342 }
343
344 let tmp_mask = current_task.read().signal_mask() & !unblock;
347
348 let waiter_result =
350 current_task.wait_with_temporary_mask(locked, tmp_mask, |locked, current_task| {
351 waiter.wait_until(locked, current_task, deadline)
352 });
353
354 current_task.write().restore_signal_mask();
356
357 if let Err(e) = waiter_result {
358 if e == EINTR {
359 if let Some(signal) = current_task.write().take_signal_with_mask(!unblock) {
361 break signal;
362 }
363 } else if e == ETIMEDOUT {
364 return error!(EAGAIN);
365 }
366
367 return Err(e);
368 }
369 };
370
371 if !siginfo_addr.is_null() {
372 signal_info.write(current_task, siginfo_addr)?;
373 }
374
375 Ok(signal_info.signal)
376}
377
378pub fn sys_signalfd4(
392 locked: &mut Locked<Unlocked>,
393 current_task: &CurrentTask,
394 fd: FdNumber,
395 mask_addr: UserRef<SigSet>,
396 mask_size: usize,
397 flags: u32,
398) -> Result<FdNumber, Errno> {
399 if flags & !(SFD_CLOEXEC | SFD_NONBLOCK) != 0 {
400 return error!(EINVAL);
401 }
402 if mask_size != std::mem::size_of::<SigSet>() {
403 return error!(EINVAL);
404 }
405 let mask = current_task.read_object(mask_addr)?;
406
407 if fd.raw() != -1 {
408 let file = current_task.files.get(fd)?;
409 let file = file.downcast_file::<SignalFd>().ok_or_else(|| errno!(EINVAL))?;
410 file.set_mask(mask);
411 Ok(fd)
412 } else {
413 let signalfd = SignalFd::new_file(locked, current_task, mask, flags);
414 let flags = if flags & SFD_CLOEXEC != 0 { FdFlags::CLOEXEC } else { FdFlags::empty() };
415 let fd = current_task.add_file(locked, signalfd, flags)?;
416 Ok(fd)
417 }
418}
419
420#[track_caller]
421fn send_unchecked_signal<L>(
422 locked: &mut Locked<L>,
423 current_task: &CurrentTask,
424 target: &Task,
425 unchecked_signal: UncheckedSignal,
426 si_code: i32,
427) -> Result<(), Errno>
428where
429 L: LockBefore<ThreadGroupLimits>,
430{
431 current_task.can_signal(&target, unchecked_signal)?;
432
433 if unchecked_signal.is_zero() {
435 return Ok(());
436 }
437
438 let signal = Signal::try_from(unchecked_signal)?;
439 security::check_signal_access(current_task, &target, signal)?;
440
441 send_signal(
442 locked,
443 target,
444 SignalInfo {
445 code: si_code,
446 detail: SignalDetail::Kill {
447 pid: current_task.thread_group().leader,
448 uid: current_task.with_current_creds(|creds| creds.uid),
449 },
450 ..SignalInfo::default(signal)
451 },
452 )
453}
454
455#[track_caller]
456fn send_unchecked_signal_info<L>(
457 locked: &mut Locked<L>,
458 current_task: &CurrentTask,
459 target: &Task,
460 unchecked_signal: UncheckedSignal,
461 siginfo_ref: UserAddress,
462) -> Result<(), Errno>
463where
464 L: LockBefore<ThreadGroupLimits>,
465{
466 current_task.can_signal(&target, unchecked_signal)?;
467
468 if unchecked_signal.is_zero() {
470 current_task.read_memory_to_array::<SI_MAX_SIZE_AS_USIZE>(siginfo_ref)?;
472 return Ok(());
473 }
474
475 let signal = Signal::try_from(unchecked_signal)?;
476 security::check_signal_access(current_task, &target, signal)?;
477
478 let siginfo = read_siginfo(current_task, signal, siginfo_ref)?;
479 if target.get_pid() != current_task.get_pid() && (siginfo.code >= 0 || siginfo.code == SI_TKILL)
480 {
481 return error!(EINVAL);
482 }
483
484 send_signal(locked, &target, siginfo)
485}
486
487pub fn sys_kill(
496 locked: &mut Locked<Unlocked>,
497 current_task: &CurrentTask,
498 pid: pid_t,
499 unchecked_signal: UncheckedSignal,
500) -> Result<(), Errno> {
501 let pids = current_task.kernel().pids.read();
502 match pid {
503 pid if pid > 0 => {
504 let target_thread_group = {
507 match pids.get_process(pid) {
508 Some(ProcessEntryRef::Process(process)) => process,
509
510 Some(ProcessEntryRef::Zombie(_zombie)) => return Ok(()),
512
513 None => {
516 let weak_task = pids.get_task(pid);
517 let task = Task::from_weak(&weak_task)?;
518 task.thread_group().clone()
519 }
520 }
521 };
522
523 target_thread_group.send_signal_unchecked(current_task, unchecked_signal)?;
524 }
525 pid if pid == -1 => {
526 let thread_groups = pids.get_thread_groups();
535 signal_thread_groups(
536 current_task,
537 unchecked_signal,
538 thread_groups.into_iter().filter(|thread_group| {
539 if *current_task.thread_group() == *thread_group {
540 return false;
541 }
542 if thread_group.leader == 1 {
543 return false;
544 }
545 true
546 }),
547 )?;
548 }
549 _ => {
550 let process_group_id = match pid {
556 0 => current_task.thread_group().read().process_group.leader,
557 _ => negate_pid(pid)?,
558 };
559
560 let process_group = pids.get_process_group(process_group_id);
561 let thread_groups = process_group
562 .iter()
563 .flat_map(|pg| pg.read(locked).thread_groups().collect::<Vec<_>>());
564 signal_thread_groups(current_task, unchecked_signal, thread_groups)?;
565 }
566 };
567
568 Ok(())
569}
570
571fn verify_tgid_for_task(
572 task: &Task,
573 tgid: pid_t,
574 pids: &RwLockReadGuard<'_, PidTable>,
575) -> Result<(), Errno> {
576 let thread_group = match pids.get_process(tgid) {
577 Some(ProcessEntryRef::Process(proc)) => proc,
578 Some(ProcessEntryRef::Zombie(_)) => return error!(EINVAL),
579 None => return error!(ESRCH),
580 };
581 if *task.thread_group() != thread_group {
582 return error!(EINVAL);
583 } else {
584 Ok(())
585 }
586}
587
588pub fn sys_tkill(
600 locked: &mut Locked<Unlocked>,
601 current_task: &CurrentTask,
602 tid: tid_t,
603 unchecked_signal: UncheckedSignal,
604) -> Result<(), Errno> {
605 if tid <= 0 {
607 return error!(EINVAL);
608 }
609 let thread_weak = current_task.get_task(tid);
610 let thread = Task::from_weak(&thread_weak)?;
611 send_unchecked_signal(locked, current_task, &thread, unchecked_signal, SI_TKILL)
612}
613
614pub fn sys_tgkill(
625 locked: &mut Locked<Unlocked>,
626 current_task: &CurrentTask,
627 tgid: pid_t,
628 tid: tid_t,
629 unchecked_signal: UncheckedSignal,
630) -> Result<(), Errno> {
631 if tgid <= 0 || tid <= 0 {
633 return error!(EINVAL);
634 }
635 let pids = current_task.kernel().pids.read();
636
637 let weak_target = pids.get_task(tid);
638 let thread = Task::from_weak(&weak_target)?;
639 verify_tgid_for_task(&thread, tgid, &pids)?;
640
641 send_unchecked_signal(locked, current_task, &thread, unchecked_signal, SI_TKILL)
642}
643
644pub fn sys_rt_sigreturn(
653 _locked: &mut Locked<Unlocked>,
654 current_task: &mut CurrentTask,
655) -> Result<SyscallResult, Errno> {
656 restore_from_signal_handler(current_task)?;
657 Ok(current_task.thread_state.registers.return_register().into())
658}
659
660#[track_caller]
661
662pub fn read_siginfo(
672 current_task: &CurrentTask,
673 signal: Signal,
674 siginfo_ref: UserAddress,
675) -> Result<SignalInfo, Errno> {
676 let siginfo_mem = current_task.read_memory_to_array::<SI_MAX_SIZE_AS_USIZE>(siginfo_ref)?;
677 let header = SignalInfoHeader::read_from_bytes(&siginfo_mem[..SI_HEADER_SIZE]).unwrap();
678
679 if header.signo != 0 && header.signo != signal.number() {
680 return error!(EINVAL);
681 }
682
683 let mut bytes = [0u8; SI_MAX_SIZE as usize - SI_HEADER_SIZE];
684 bytes.copy_from_slice(&siginfo_mem[SI_HEADER_SIZE..SI_MAX_SIZE as usize]);
685 let details = SignalDetail::Raw { data: bytes };
686
687 Ok(SignalInfo {
688 signal,
689 errno: header.errno,
690 code: header.code,
691 detail: details,
692 force: false,
693 source: SignalSource::capture(),
694 })
695}
696
697pub fn sys_rt_sigqueueinfo(
707 _locked: &mut Locked<Unlocked>,
708 current_task: &CurrentTask,
709 tgid: pid_t,
710 unchecked_signal: UncheckedSignal,
711 siginfo_ref: UserAddress,
712) -> Result<(), Errno> {
713 let weak_task = current_task.kernel().pids.read().get_task(tgid);
714 let task = &Task::from_weak(&weak_task)?;
715 task.thread_group().send_signal_unchecked_with_info(current_task, unchecked_signal, siginfo_ref)
716}
717
718pub fn sys_rt_tgsigqueueinfo(
729 locked: &mut Locked<Unlocked>,
730 current_task: &CurrentTask,
731 tgid: pid_t,
732 tid: tid_t,
733 unchecked_signal: UncheckedSignal,
734 siginfo_ref: UserAddress,
735) -> Result<(), Errno> {
736 let pids = current_task.kernel().pids.read();
737
738 let thread_weak = pids.get_task(tid);
739 let task = Task::from_weak(&thread_weak)?;
740
741 verify_tgid_for_task(&task, tgid, &pids)?;
742 send_unchecked_signal_info(locked, current_task, &task, unchecked_signal, siginfo_ref)
743}
744
745pub fn sys_pidfd_send_signal(
758 _locked: &mut Locked<Unlocked>,
759 current_task: &CurrentTask,
760 pidfd: FdNumber,
761 unchecked_signal: UncheckedSignal,
762 siginfo_ref: UserAddress,
763 flags: u32,
764) -> Result<(), Errno> {
765 if flags != 0 {
766 return error!(EINVAL);
767 }
768
769 let file = current_task.files.get(pidfd)?;
770 let target = file.as_thread_group_key()?;
771 let target = target.upgrade().ok_or_else(|| errno!(ESRCH))?;
772
773 if siginfo_ref.is_null() {
774 target.send_signal_unchecked(current_task, unchecked_signal)
775 } else {
776 target.send_signal_unchecked_with_info(current_task, unchecked_signal, siginfo_ref)
777 }
778}
779
780#[track_caller]
791fn signal_thread_groups<F>(
792 current_task: &CurrentTask,
793 unchecked_signal: UncheckedSignal,
794 thread_groups: F,
795) -> Result<(), Errno>
796where
797 F: IntoIterator<Item: AsRef<ThreadGroup>>,
798{
799 let mut last_error = None;
800 let mut sent_signal = false;
801
802 for thread_group in thread_groups.into_iter() {
805 match thread_group.as_ref().send_signal_unchecked(current_task, unchecked_signal) {
806 Ok(_) => sent_signal = true,
807 Err(errno) => last_error = Some(errno),
808 }
809 }
810
811 if sent_signal { Ok(()) } else { Err(last_error.unwrap_or_else(|| errno!(ESRCH))) }
812}
813
814#[derive(Debug)]
816pub struct WaitingOptions {
817 pub wait_for_exited: bool,
819 pub wait_for_stopped: bool,
821 pub wait_for_continued: bool,
823 pub block: bool,
825 pub keep_waitable_state: bool,
827 pub wait_for_all: bool,
829 pub wait_for_clone: bool,
831}
832
833impl WaitingOptions {
834 fn new(options: u32) -> Self {
835 const_assert_eq!(WUNTRACED, WSTOPPED);
836 Self {
837 wait_for_exited: options & WEXITED > 0,
838 wait_for_stopped: options & WSTOPPED > 0,
839 wait_for_continued: options & WCONTINUED > 0,
840 block: options & WNOHANG == 0,
841 keep_waitable_state: options & WNOWAIT > 0,
842 wait_for_all: options & __WALL > 0,
843 wait_for_clone: options & __WCLONE > 0,
844 }
845 }
846
847 pub fn new_for_waitid(options: u32) -> Result<Self, Errno> {
849 if options & !(__WCLONE | __WALL | WNOHANG | WNOWAIT | WSTOPPED | WEXITED | WCONTINUED) != 0
850 {
851 track_stub!(TODO("https://fxbug.dev/322874788"), "waitid options", options);
852 return error!(EINVAL);
853 }
854 if options & (WEXITED | WSTOPPED | WCONTINUED) == 0 {
855 return error!(EINVAL);
856 }
857 Ok(Self::new(options))
858 }
859
860 pub fn new_for_wait4(options: u32) -> Result<Self, Errno> {
862 if options & !(__WCLONE | __WALL | WNOHANG | WUNTRACED | WCONTINUED) != 0 {
863 track_stub!(TODO("https://fxbug.dev/322874017"), "wait4 options", options);
864 return error!(EINVAL);
865 }
866 Ok(Self::new(options | WEXITED))
867 }
868}
869
870fn wait_on_pid(
876 locked: &mut Locked<Unlocked>,
877 current_task: &CurrentTask,
878 selector: &ProcessSelector,
879 options: &WaitingOptions,
880) -> Result<Option<WaitResult>, Errno> {
881 let waiter = Waiter::new();
882 loop {
883 {
884 let mut pids = current_task.kernel().pids.write();
885 if let Some(tracee) =
894 current_task.thread_group().get_waitable_ptracee(selector, options, &mut pids)
895 {
896 return Ok(Some(tracee));
897 }
898 {
899 let mut thread_group = current_task.thread_group().write();
900
901 let mut has_waitable_tracee = false;
904 let mut has_any_tracee = false;
905 current_task.thread_group().get_ptracees_and(
906 selector,
907 &pids,
908 &mut |task: &Task, task_state: &TaskMutableState| {
909 if let Some(ptrace) = &task_state.ptrace {
910 has_any_tracee = true;
911 ptrace.tracer_waiters().wait_async(&waiter);
912 if ptrace.is_waitable(task.load_stopped(), options) {
913 has_waitable_tracee = true;
914 }
915 }
916 },
917 );
918 if has_waitable_tracee
919 || thread_group.zombie_ptracees.has_zombie_matching(&selector)
920 {
921 continue;
922 }
923 match thread_group.get_waitable_child(selector, options, &mut pids) {
924 WaitableChildResult::ReadyNow(child) => {
925 return Ok(Some(child));
926 }
927 WaitableChildResult::ShouldWait => (),
928 WaitableChildResult::NoneFound => {
929 if !has_any_tracee {
930 return error!(ECHILD);
931 }
932 }
933 }
934 thread_group
935 .lifecycle_waiters
936 .wait_async_value(&waiter, ThreadGroupLifecycleWaitValue::ChildStatus);
937 }
938 }
939
940 if !options.block {
941 return Ok(None);
942 }
943 waiter.wait(locked, current_task).map_eintr(|| errno!(ERESTARTSYS))?;
944 }
945}
946
947pub fn sys_waitid(
961 locked: &mut Locked<Unlocked>,
962 current_task: &CurrentTask,
963 id_type: u32,
964 id: i32,
965 user_info: MultiArchUserRef<uapi::siginfo_t, uapi::arch32::siginfo_t>,
966 options: u32,
967 user_rusage: RUsagePtr,
968) -> Result<(), Errno> {
969 let mut waiting_options = WaitingOptions::new_for_waitid(options)?;
970
971 let task_selector = match id_type {
972 P_PID => ProcessSelector::Pid(id),
973 P_ALL => ProcessSelector::Any,
974 P_PGID => ProcessSelector::Pgid(if id == 0 {
975 current_task.thread_group().read().process_group.leader
976 } else {
977 id
978 }),
979 P_PIDFD => {
980 let fd = FdNumber::from_raw(id);
981 let file = current_task.files.get(fd)?;
982 if file.flags().contains(OpenFlags::NONBLOCK) {
983 waiting_options.block = false;
984 }
985 ProcessSelector::Process(file.as_thread_group_key()?)
986 }
987 _ => return error!(EINVAL),
988 };
989
990 if let Some(waitable_process) =
993 wait_on_pid(locked, current_task, &task_selector, &waiting_options)?
994 {
995 if !user_rusage.is_null() {
996 let usage = rusage {
997 ru_utime: timeval_from_duration(waitable_process.time_stats.user_time),
998 ru_stime: timeval_from_duration(waitable_process.time_stats.system_time),
999 ..Default::default()
1000 };
1001
1002 track_stub!(TODO("https://fxbug.dev/322874712"), "real rusage from waitid");
1003 current_task.write_multi_arch_object(user_rusage, usage)?;
1004 }
1005
1006 if !user_info.is_null() {
1007 let siginfo = waitable_process.as_signal_info();
1008 siginfo.write(current_task, user_info)?;
1009 }
1010 } else if id_type == P_PIDFD {
1011 return error!(EAGAIN);
1020 }
1021
1022 Ok(())
1023}
1024
1025pub fn sys_wait4(
1039 locked: &mut Locked<Unlocked>,
1040 current_task: &CurrentTask,
1041 raw_selector: pid_t,
1042 user_wstatus: UserRef<i32>,
1043 options: u32,
1044 user_rusage: RUsagePtr,
1045) -> Result<pid_t, Errno> {
1046 let waiting_options = WaitingOptions::new_for_wait4(options)?;
1047
1048 let selector = if raw_selector == 0 {
1049 ProcessSelector::Pgid(current_task.thread_group().read().process_group.leader)
1050 } else if raw_selector == -1 {
1051 ProcessSelector::Any
1052 } else if raw_selector > 0 {
1053 ProcessSelector::Pid(raw_selector)
1054 } else if raw_selector < -1 {
1055 ProcessSelector::Pgid(negate_pid(raw_selector)?)
1056 } else {
1057 track_stub!(
1058 TODO("https://fxbug.dev/322874213"),
1059 "wait4 with selector",
1060 raw_selector as u64
1061 );
1062 return error!(ENOSYS);
1063 };
1064
1065 if let Some(waitable_process) = wait_on_pid(locked, current_task, &selector, &waiting_options)?
1066 {
1067 let status = waitable_process.exit_info.status.wait_status();
1068
1069 if !user_rusage.is_null() {
1070 track_stub!(TODO("https://fxbug.dev/322874768"), "real rusage from wait4");
1071 let usage = rusage {
1072 ru_utime: timeval_from_duration(waitable_process.time_stats.user_time),
1073 ru_stime: timeval_from_duration(waitable_process.time_stats.system_time),
1074 ..Default::default()
1075 };
1076 current_task.write_multi_arch_object(user_rusage, usage)?;
1077 }
1078
1079 if !user_wstatus.is_null() {
1080 current_task.write_object(user_wstatus, &status)?;
1081 }
1082
1083 Ok(waitable_process.pid)
1084 } else {
1085 Ok(0)
1086 }
1087}
1088
1089fn negate_pid(pid: pid_t) -> Result<pid_t, Errno> {
1091 pid.checked_neg().ok_or_else(|| errno!(ESRCH))
1092}
1093
1094#[cfg(target_arch = "aarch64")]
1096mod arch32 {
1097 use crate::task::CurrentTask;
1098 use crate::vfs::FdNumber;
1099 use starnix_sync::{Locked, Unlocked};
1100 use starnix_uapi::errors::Errno;
1101 use starnix_uapi::signals::SigSet;
1102 use starnix_uapi::user_address::UserRef;
1103
1104 pub fn sys_arch32_signalfd(
1119 locked: &mut Locked<Unlocked>,
1120 current_task: &CurrentTask,
1121 fd: FdNumber,
1122 mask_addr: UserRef<SigSet>,
1123 mask_size: usize,
1124 ) -> Result<FdNumber, Errno> {
1125 super::sys_signalfd4(locked, current_task, fd, mask_addr, mask_size, 0)
1126 }
1127
1128 pub use super::{
1129 sys_pidfd_send_signal as sys_arch32_pidfd_send_signal,
1130 sys_rt_sigaction as sys_arch32_rt_sigaction,
1131 sys_rt_sigqueueinfo as sys_arch32_rt_sigqueueinfo,
1132 sys_rt_sigtimedwait as sys_arch32_rt_sigtimedwait,
1133 sys_rt_tgsigqueueinfo as sys_arch32_rt_tgsigqueueinfo,
1134 sys_sigaltstack as sys_arch32_sigaltstack, sys_signalfd4 as sys_arch32_signalfd4,
1135 sys_waitid as sys_arch32_waitid,
1136 };
1137}
1138
1139#[cfg(target_arch = "aarch64")]
1140pub use arch32::*;
1141
1142#[cfg(test)]
1143mod tests {
1144 use super::*;
1145 use crate::mm::{MemoryAccessor, PAGE_SIZE};
1146 use crate::signals::send_standard_signal;
1147 use crate::signals::testing::dequeue_signal_for_test;
1148 use crate::task::dynamic_thread_spawner::SpawnRequestBuilder;
1149 use crate::task::{EventHandler, ExitStatus, ProcessExitInfo};
1150 use crate::testing::*;
1151 use starnix_sync::Mutex;
1152 use starnix_types::math::round_up_to_system_page_size;
1153 use starnix_uapi::auth::Credentials;
1154 use starnix_uapi::errors::ERESTARTSYS;
1155 use starnix_uapi::signals::{
1156 SIGCHLD, SIGHUP, SIGINT, SIGIO, SIGKILL, SIGRTMIN, SIGSEGV, SIGSTOP, SIGTERM, SIGTRAP,
1157 SIGUSR1,
1158 };
1159 use starnix_uapi::vfs::FdEvents;
1160 use starnix_uapi::{SI_QUEUE, SI_USER, sigaction_t, uaddr, uid_t};
1161 use std::collections::VecDeque;
1162 use std::sync::Arc;
1163 use zerocopy::IntoBytes;
1164
1165 #[cfg(target_arch = "x86_64")]
1166 #[::fuchsia::test]
1167 async fn test_sigaltstack() {
1168 spawn_kernel_and_run(async |locked, current_task| {
1169 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1170
1171 let user_ss = UserRef::<sigaltstack>::new(addr);
1172 let nullptr = UserRef::<sigaltstack>::default();
1173
1174 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1176 .expect("failed to call sigaltstack");
1177 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1178 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1179
1180 ss.ss_sp = uaddr { addr: 0x7FFFF };
1182 ss.ss_size = 0x1000;
1183 ss.ss_flags = SS_AUTODISARM as i32;
1184 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1185 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1186 .expect("failed to call sigaltstack");
1187 current_task
1188 .write_memory(addr, &[0u8; std::mem::size_of::<sigaltstack>()])
1189 .expect("failed to clear struct");
1190 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1191 .expect("failed to call sigaltstack");
1192 let another_ss = current_task.read_object(user_ss).expect("failed to read struct");
1193 assert_eq!(ss.as_bytes(), another_ss.as_bytes());
1194
1195 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1197 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1198 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1199 .expect("failed to call sigaltstack");
1200 current_task
1201 .write_memory(addr, &[0u8; std::mem::size_of::<sigaltstack>()])
1202 .expect("failed to clear struct");
1203 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1204 .expect("failed to call sigaltstack");
1205 let ss = current_task.read_object(user_ss).expect("failed to read struct");
1206 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1207 })
1208 .await;
1209 }
1210
1211 #[::fuchsia::test]
1212 async fn test_sigaltstack_invalid_size() {
1213 spawn_kernel_and_run(async |locked, current_task| {
1214 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1215
1216 let user_ss = UserRef::<sigaltstack>::new(addr);
1217 let nullptr = UserRef::<sigaltstack>::default();
1218
1219 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1221 .expect("failed to call sigaltstack");
1222 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1223 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1224
1225 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1227 .expect("failed to round up");
1228 let sigaltstack_addr = map_memory(
1229 locked,
1230 ¤t_task,
1231 UserAddress::default(),
1232 sigaltstack_addr_size as u64,
1233 );
1234 ss.ss_sp = sigaltstack_addr.into();
1235 ss.ss_flags = 0;
1236 ss.ss_size = uapi::MINSIGSTKSZ as u64 - 1;
1237 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1238 assert_eq!(
1239 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1240 error!(ENOMEM)
1241 );
1242 })
1243 .await;
1244 }
1245
1246 #[cfg(target_arch = "x86_64")]
1247 #[::fuchsia::test]
1248 async fn test_sigaltstack_active_stack() {
1249 spawn_kernel_and_run(async |locked, current_task| {
1250 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1251
1252 let user_ss = UserRef::<sigaltstack>::new(addr);
1253 let nullptr = UserRef::<sigaltstack>::default();
1254
1255 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1257 .expect("failed to call sigaltstack");
1258 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1259 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1260
1261 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1263 .expect("failed to round up");
1264 let sigaltstack_addr = map_memory(
1265 locked,
1266 ¤t_task,
1267 UserAddress::default(),
1268 sigaltstack_addr_size as u64,
1269 );
1270 ss.ss_sp = sigaltstack_addr.into();
1271 ss.ss_flags = 0;
1272 ss.ss_size = sigaltstack_addr_size as u64;
1273 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1274 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1275 .expect("failed to call sigaltstack");
1276
1277 let next_addr = (sigaltstack_addr + sigaltstack_addr_size).unwrap();
1279 current_task.thread_state.registers.rsp = next_addr.ptr() as u64;
1280 ss.ss_flags = SS_DISABLE as i32;
1281 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1282 assert_eq!(
1283 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1284 error!(EPERM)
1285 );
1286
1287 let next_ss_addr = sigaltstack_addr
1290 .checked_add(sigaltstack_addr_size)
1291 .unwrap()
1292 .checked_add(0x1000usize)
1293 .unwrap();
1294 current_task.thread_state.registers.rsp = next_ss_addr.ptr() as u64;
1295 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1296 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1297 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1298 .expect("failed to call sigaltstack");
1299 })
1300 .await;
1301 }
1302
1303 #[cfg(target_arch = "x86_64")]
1304 #[::fuchsia::test]
1305 async fn test_sigaltstack_active_stack_saturates() {
1306 spawn_kernel_and_run(async |locked, current_task| {
1307 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1308
1309 let user_ss = UserRef::<sigaltstack>::new(addr);
1310 let nullptr = UserRef::<sigaltstack>::default();
1311
1312 sys_sigaltstack(locked, ¤t_task, nullptr.into(), user_ss.into())
1314 .expect("failed to call sigaltstack");
1315 let mut ss = current_task.read_object(user_ss).expect("failed to read struct");
1316 assert!(ss.ss_flags & (SS_DISABLE as i32) != 0);
1317
1318 let sigaltstack_addr_size = round_up_to_system_page_size(uapi::MINSIGSTKSZ as usize)
1320 .expect("failed to round up");
1321 let sigaltstack_addr = map_memory(
1322 locked,
1323 ¤t_task,
1324 UserAddress::default(),
1325 sigaltstack_addr_size as u64,
1326 );
1327 ss.ss_sp = sigaltstack_addr.into();
1328 ss.ss_flags = 0;
1329 ss.ss_size = u64::MAX;
1330 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1331 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1332 .expect("failed to call sigaltstack");
1333
1334 current_task.thread_state.registers.rsp =
1336 (sigaltstack_addr + sigaltstack_addr_size).unwrap().ptr() as u64;
1337 ss.ss_flags = SS_DISABLE as i32;
1338 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1339 assert_eq!(
1340 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into()),
1341 error!(EPERM)
1342 );
1343
1344 current_task.thread_state.registers.rsp = 0u64;
1346 let ss = sigaltstack { ss_flags: SS_DISABLE as i32, ..sigaltstack::default() };
1347 current_task.write_object(user_ss, &ss).expect("failed to write struct");
1348 sys_sigaltstack(locked, ¤t_task, user_ss.into(), nullptr.into())
1349 .expect("failed to call sigaltstack");
1350 })
1351 .await;
1352 }
1353
1354 #[::fuchsia::test]
1357 async fn test_sigprocmask_invalid_size() {
1358 spawn_kernel_and_run(async |locked, current_task| {
1359 let set = UserRef::<SigSet>::default();
1360 let old_set = UserRef::<SigSet>::default();
1361 let how = 0;
1362
1363 assert_eq!(
1364 sys_rt_sigprocmask(
1365 locked,
1366 ¤t_task,
1367 how,
1368 set,
1369 old_set,
1370 std::mem::size_of::<SigSet>() * 2
1371 ),
1372 error!(EINVAL)
1373 );
1374 assert_eq!(
1375 sys_rt_sigprocmask(
1376 locked,
1377 ¤t_task,
1378 how,
1379 set,
1380 old_set,
1381 std::mem::size_of::<SigSet>() / 2
1382 ),
1383 error!(EINVAL)
1384 );
1385 })
1386 .await;
1387 }
1388
1389 #[::fuchsia::test]
1391 async fn test_sigprocmask_invalid_how() {
1392 spawn_kernel_and_run(async |locked, current_task| {
1393 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1394
1395 let set = UserRef::<SigSet>::new(addr);
1396 let old_set = UserRef::<SigSet>::default();
1397 let how = SIG_SETMASK | SIG_UNBLOCK | SIG_BLOCK;
1398
1399 assert_eq!(
1400 sys_rt_sigprocmask(
1401 locked,
1402 ¤t_task,
1403 how,
1404 set,
1405 old_set,
1406 std::mem::size_of::<SigSet>()
1407 ),
1408 error!(EINVAL)
1409 );
1410 })
1411 .await;
1412 }
1413
1414 #[::fuchsia::test]
1417 async fn test_sigprocmask_null_set() {
1418 spawn_kernel_and_run(async |locked, current_task| {
1419 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1420 let original_mask = SigSet::from(SIGTRAP);
1421 {
1422 current_task.write().set_signal_mask(original_mask);
1423 }
1424
1425 let set = UserRef::<SigSet>::default();
1426 let old_set = UserRef::<SigSet>::new(addr);
1427 let how = SIG_SETMASK;
1428
1429 current_task
1430 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>()])
1431 .expect("failed to clear struct");
1432
1433 assert_eq!(
1434 sys_rt_sigprocmask(
1435 locked,
1436 ¤t_task,
1437 how,
1438 set,
1439 old_set,
1440 std::mem::size_of::<SigSet>()
1441 ),
1442 Ok(())
1443 );
1444
1445 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1446 assert_eq!(old_mask, original_mask);
1447 })
1448 .await;
1449 }
1450
1451 #[::fuchsia::test]
1454 async fn test_sigprocmask_null_set_and_old_set() {
1455 spawn_kernel_and_run(async |locked, current_task| {
1456 let original_mask = SigSet::from(SIGTRAP);
1457 {
1458 current_task.write().set_signal_mask(original_mask);
1459 }
1460
1461 let set = UserRef::<SigSet>::default();
1462 let old_set = UserRef::<SigSet>::default();
1463 let how = SIG_SETMASK;
1464
1465 assert_eq!(
1466 sys_rt_sigprocmask(
1467 locked,
1468 ¤t_task,
1469 how,
1470 set,
1471 old_set,
1472 std::mem::size_of::<SigSet>()
1473 ),
1474 Ok(())
1475 );
1476 assert_eq!(current_task.read().signal_mask(), original_mask);
1477 })
1478 .await;
1479 }
1480
1481 #[::fuchsia::test]
1483 async fn test_sigprocmask_setmask() {
1484 spawn_kernel_and_run(async |locked, current_task| {
1485 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1486 current_task
1487 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1488 .expect("failed to clear struct");
1489
1490 let original_mask = SigSet::from(SIGTRAP);
1491 {
1492 current_task.write().set_signal_mask(original_mask);
1493 }
1494
1495 let new_mask = SigSet::from(SIGIO);
1496 let set = UserRef::<SigSet>::new(addr);
1497 current_task.write_object(set, &new_mask).expect("failed to set mask");
1498
1499 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1500 let old_set = UserRef::<SigSet>::new(old_addr_range);
1501 let how = SIG_SETMASK;
1502
1503 assert_eq!(
1504 sys_rt_sigprocmask(
1505 locked,
1506 ¤t_task,
1507 how,
1508 set,
1509 old_set,
1510 std::mem::size_of::<SigSet>()
1511 ),
1512 Ok(())
1513 );
1514
1515 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1516 assert_eq!(old_mask, original_mask);
1517 assert_eq!(current_task.read().signal_mask(), new_mask);
1518 })
1519 .await;
1520 }
1521
1522 #[::fuchsia::test]
1524 async fn test_sigprocmask_block() {
1525 spawn_kernel_and_run(async |locked, current_task| {
1526 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1527 current_task
1528 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1529 .expect("failed to clear struct");
1530
1531 let original_mask = SigSet::from(SIGTRAP);
1532 {
1533 current_task.write().set_signal_mask(original_mask);
1534 }
1535
1536 let new_mask = SigSet::from(SIGIO);
1537 let set = UserRef::<SigSet>::new(addr);
1538 current_task.write_object(set, &new_mask).expect("failed to set mask");
1539
1540 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1541 let old_set = UserRef::<SigSet>::new(old_addr_range);
1542 let how = SIG_BLOCK;
1543
1544 assert_eq!(
1545 sys_rt_sigprocmask(
1546 locked,
1547 ¤t_task,
1548 how,
1549 set,
1550 old_set,
1551 std::mem::size_of::<SigSet>()
1552 ),
1553 Ok(())
1554 );
1555
1556 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1557 assert_eq!(old_mask, original_mask);
1558 assert_eq!(current_task.read().signal_mask(), new_mask | original_mask);
1559 })
1560 .await;
1561 }
1562
1563 #[::fuchsia::test]
1565 async fn test_sigprocmask_unblock() {
1566 spawn_kernel_and_run(async |locked, current_task| {
1567 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1568 current_task
1569 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1570 .expect("failed to clear struct");
1571
1572 let original_mask = SigSet::from(SIGTRAP) | SigSet::from(SIGIO);
1573 {
1574 current_task.write().set_signal_mask(original_mask);
1575 }
1576
1577 let new_mask = SigSet::from(SIGTRAP);
1578 let set = UserRef::<SigSet>::new(addr);
1579 current_task.write_object(set, &new_mask).expect("failed to set mask");
1580
1581 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1582 let old_set = UserRef::<SigSet>::new(old_addr_range);
1583 let how = SIG_UNBLOCK;
1584
1585 assert_eq!(
1586 sys_rt_sigprocmask(
1587 locked,
1588 ¤t_task,
1589 how,
1590 set,
1591 old_set,
1592 std::mem::size_of::<SigSet>()
1593 ),
1594 Ok(())
1595 );
1596
1597 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1598 assert_eq!(old_mask, original_mask);
1599 assert_eq!(current_task.read().signal_mask(), SIGIO.into());
1600 })
1601 .await;
1602 }
1603
1604 #[::fuchsia::test]
1606 async fn test_sigprocmask_unblock_not_set() {
1607 spawn_kernel_and_run(async |locked, current_task| {
1608 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1609 current_task
1610 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1611 .expect("failed to clear struct");
1612
1613 let original_mask = SigSet::from(SIGIO);
1614 {
1615 current_task.write().set_signal_mask(original_mask);
1616 }
1617
1618 let new_mask = SigSet::from(SIGTRAP);
1619 let set = UserRef::<SigSet>::new(addr);
1620 current_task.write_object(set, &new_mask).expect("failed to set mask");
1621
1622 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1623 let old_set = UserRef::<SigSet>::new(old_addr_range);
1624 let how = SIG_UNBLOCK;
1625
1626 assert_eq!(
1627 sys_rt_sigprocmask(
1628 locked,
1629 ¤t_task,
1630 how,
1631 set,
1632 old_set,
1633 std::mem::size_of::<SigSet>()
1634 ),
1635 Ok(())
1636 );
1637
1638 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1639 assert_eq!(old_mask, original_mask);
1640 assert_eq!(current_task.read().signal_mask(), original_mask);
1641 })
1642 .await;
1643 }
1644
1645 #[::fuchsia::test]
1647 async fn test_sigprocmask_kill_stop() {
1648 spawn_kernel_and_run(async |locked, current_task| {
1649 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1650 current_task
1651 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1652 .expect("failed to clear struct");
1653
1654 let original_mask = SigSet::from(SIGIO);
1655 {
1656 current_task.write().set_signal_mask(original_mask);
1657 }
1658
1659 let new_mask = UNBLOCKABLE_SIGNALS;
1660 let set = UserRef::<SigSet>::new(addr);
1661 current_task.write_object(set, &new_mask).expect("failed to set mask");
1662
1663 let old_addr_range = (addr + std::mem::size_of::<SigSet>()).unwrap();
1664 let old_set = UserRef::<SigSet>::new(old_addr_range);
1665 let how = SIG_BLOCK;
1666
1667 assert_eq!(
1668 sys_rt_sigprocmask(
1669 locked,
1670 ¤t_task,
1671 how,
1672 set,
1673 old_set,
1674 std::mem::size_of::<SigSet>()
1675 ),
1676 Ok(())
1677 );
1678
1679 let old_mask = current_task.read_object(old_set).expect("failed to read mask");
1680 assert_eq!(old_mask, original_mask);
1681 assert_eq!(current_task.read().signal_mask(), original_mask);
1682 })
1683 .await;
1684 }
1685
1686 #[::fuchsia::test]
1687 async fn test_sigaction_invalid_signal() {
1688 spawn_kernel_and_run(async |locked, current_task| {
1689 assert_eq!(
1690 sys_rt_sigaction(
1691 locked,
1692 ¤t_task,
1693 UncheckedSignal::from(SIGKILL),
1694 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1696 UserRef::<sigaction_t>::default().into(),
1697 std::mem::size_of::<SigSet>(),
1698 ),
1699 error!(EINVAL)
1700 );
1701 assert_eq!(
1702 sys_rt_sigaction(
1703 locked,
1704 ¤t_task,
1705 UncheckedSignal::from(SIGSTOP),
1706 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1708 UserRef::<sigaction_t>::default().into(),
1709 std::mem::size_of::<SigSet>(),
1710 ),
1711 error!(EINVAL)
1712 );
1713 assert_eq!(
1714 sys_rt_sigaction(
1715 locked,
1716 ¤t_task,
1717 UncheckedSignal::from(Signal::NUM_SIGNALS + 1),
1718 UserRef::<sigaction_t>::new(UserAddress::from(10)).into(),
1720 UserRef::<sigaction_t>::default().into(),
1721 std::mem::size_of::<SigSet>(),
1722 ),
1723 error!(EINVAL)
1724 );
1725 })
1726 .await;
1727 }
1728
1729 #[::fuchsia::test]
1730 async fn test_sigaction_old_value_set() {
1731 spawn_kernel_and_run(async |locked, current_task| {
1732 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1733 current_task
1734 .write_memory(addr, &[0u8; std::mem::size_of::<sigaction_t>()])
1735 .expect("failed to clear struct");
1736
1737 let org_mask = SigSet::from(SIGHUP) | SigSet::from(SIGINT);
1738 let original_action =
1739 sigaction_t { sa_mask: org_mask.into(), ..sigaction_t::default() };
1740
1741 {
1742 current_task.thread_group().signal_actions.set(SIGHUP, original_action);
1743 }
1744
1745 let old_action_ref = UserRef::<sigaction_t>::new(addr);
1746 assert_eq!(
1747 sys_rt_sigaction(
1748 locked,
1749 ¤t_task,
1750 UncheckedSignal::from(SIGHUP),
1751 UserRef::<sigaction_t>::default().into(),
1752 old_action_ref.into(),
1753 std::mem::size_of::<SigSet>()
1754 ),
1755 Ok(())
1756 );
1757
1758 let old_action =
1759 current_task.read_object(old_action_ref).expect("failed to read action");
1760 assert_eq!(old_action.as_bytes(), original_action.as_bytes());
1761 })
1762 .await;
1763 }
1764
1765 #[::fuchsia::test]
1766 async fn test_sigaction_new_value_set() {
1767 spawn_kernel_and_run(async |locked, current_task| {
1768 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1769 current_task
1770 .write_memory(addr, &[0u8; std::mem::size_of::<sigaction_t>()])
1771 .expect("failed to clear struct");
1772
1773 let org_mask = SigSet::from(SIGHUP) | SigSet::from(SIGINT);
1774 let original_action =
1775 sigaction_t { sa_mask: org_mask.into(), ..sigaction_t::default() };
1776 let set_action_ref = UserRef::<sigaction_t>::new(addr);
1777 current_task
1778 .write_object(set_action_ref, &original_action)
1779 .expect("failed to set action");
1780
1781 assert_eq!(
1782 sys_rt_sigaction(
1783 locked,
1784 ¤t_task,
1785 UncheckedSignal::from(SIGINT),
1786 set_action_ref.into(),
1787 UserRef::<sigaction_t>::default().into(),
1788 std::mem::size_of::<SigSet>(),
1789 ),
1790 Ok(())
1791 );
1792
1793 assert_eq!(
1794 current_task.thread_group().signal_actions.get(SIGINT).as_bytes(),
1795 original_action.as_bytes()
1796 );
1797 })
1798 .await;
1799 }
1800
1801 #[::fuchsia::test]
1803 async fn test_kill_same_task() {
1804 spawn_kernel_and_run(async |locked, current_task| {
1805 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGINT.into()), Ok(()));
1806 })
1807 .await;
1808 }
1809
1810 #[::fuchsia::test]
1812 async fn test_kill_own_thread_group() {
1813 spawn_kernel_and_run(async |locked, init_task| {
1814 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1815 task1.thread_group().setsid(locked).expect("setsid");
1816 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1817
1818 assert_eq!(sys_kill(locked, &task1, 0, SIGINT.into()), Ok(()));
1819 assert_eq!(task1.read().queued_signal_count(SIGINT), 1);
1820 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1821 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1822 })
1823 .await;
1824 }
1825
1826 #[::fuchsia::test]
1828 async fn test_kill_thread_group() {
1829 spawn_kernel_and_run(async |locked, init_task| {
1830 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1831 task1.thread_group().setsid(locked).expect("setsid");
1832 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1833
1834 assert_eq!(sys_kill(locked, &task1, -task1.tid, SIGINT.into()), Ok(()));
1835 assert_eq!(task1.read().queued_signal_count(SIGINT), 1);
1836 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1837 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1838 })
1839 .await;
1840 }
1841
1842 #[::fuchsia::test]
1844 async fn test_kill_all() {
1845 spawn_kernel_and_run(async |locked, init_task| {
1846 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1847 task1.thread_group().setsid(locked).expect("setsid");
1848 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1849
1850 assert_eq!(sys_kill(locked, &task1, -1, SIGINT.into()), Ok(()));
1851 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1852 assert_eq!(task2.read().queued_signal_count(SIGINT), 1);
1853 assert_eq!(init_task.read().queued_signal_count(SIGINT), 0);
1854 })
1855 .await;
1856 }
1857
1858 #[::fuchsia::test]
1860 async fn test_kill_inexistant_task() {
1861 spawn_kernel_and_run(async |locked, current_task| {
1862 assert_eq!(sys_kill(locked, ¤t_task, 9, SIGINT.into()), error!(ESRCH));
1863 })
1864 .await;
1865 }
1866
1867 #[::fuchsia::test]
1869 async fn test_kill_invalid_task() {
1870 spawn_kernel_and_run(async |locked, task1| {
1871 task1.set_creds(Credentials::with_ids(1, 1));
1873 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1874 task2.set_creds(Credentials::with_ids(2, 2));
1875
1876 assert!(task1.can_signal(&task2, SIGINT.into()).is_err());
1877 assert_eq!(sys_kill(locked, &task2, task1.tid, SIGINT.into()), error!(EPERM));
1878 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1879 })
1880 .await;
1881 }
1882
1883 #[::fuchsia::test]
1885 async fn test_kill_invalid_task_in_thread_group() {
1886 spawn_kernel_and_run(async |locked, init_task| {
1887 let task1 = init_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1888 task1.thread_group().setsid(locked).expect("setsid");
1889 let task2 = task1.clone_task_for_test(locked, 0, Some(SIGCHLD));
1890 task2.thread_group().setsid(locked).expect("setsid");
1891 task2.set_creds(Credentials::with_ids(2, 2));
1892
1893 assert!(task2.can_signal(&task1, SIGINT.into()).is_err());
1894 assert_eq!(sys_kill(locked, &task2, -task1.tid, SIGINT.into()), error!(EPERM));
1895 assert_eq!(task1.read().queued_signal_count(SIGINT), 0);
1896 })
1897 .await;
1898 }
1899
1900 #[::fuchsia::test]
1902 async fn test_kill_invalid_signal() {
1903 spawn_kernel_and_run(async |locked, current_task| {
1904 assert_eq!(
1905 sys_kill(locked, ¤t_task, current_task.tid, UncheckedSignal::from(75)),
1906 error!(EINVAL)
1907 );
1908 })
1909 .await;
1910 }
1911
1912 #[::fuchsia::test]
1914 async fn test_blocked_signal_pending() {
1915 spawn_kernel_and_run(async |locked, current_task| {
1916 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1917 current_task
1918 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1919 .expect("failed to clear struct");
1920
1921 let new_mask = SigSet::from(SIGIO);
1922 let set = UserRef::<SigSet>::new(addr);
1923 current_task.write_object(set, &new_mask).expect("failed to set mask");
1924
1925 assert_eq!(
1926 sys_rt_sigprocmask(
1927 locked,
1928 ¤t_task,
1929 SIG_BLOCK,
1930 set,
1931 UserRef::default(),
1932 std::mem::size_of::<SigSet>()
1933 ),
1934 Ok(())
1935 );
1936 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGIO.into()), Ok(()));
1937 assert_eq!(current_task.read().queued_signal_count(SIGIO), 1);
1938
1939 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGIO.into()), Ok(()));
1941 assert_eq!(current_task.read().queued_signal_count(SIGIO), 1);
1942 })
1943 .await;
1944 }
1945
1946 #[::fuchsia::test]
1948 async fn test_blocked_real_time_signal_pending() {
1949 spawn_kernel_and_run(async |locked, current_task| {
1950 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
1951 current_task
1952 .write_memory(addr, &[0u8; std::mem::size_of::<SigSet>() * 2])
1953 .expect("failed to clear struct");
1954
1955 let new_mask = SigSet::from(starnix_uapi::signals::SIGRTMIN);
1956 let set = UserRef::<SigSet>::new(addr);
1957 current_task.write_object(set, &new_mask).expect("failed to set mask");
1958
1959 assert_eq!(
1960 sys_rt_sigprocmask(
1961 locked,
1962 ¤t_task,
1963 SIG_BLOCK,
1964 set,
1965 UserRef::default(),
1966 std::mem::size_of::<SigSet>()
1967 ),
1968 Ok(())
1969 );
1970 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGRTMIN.into()), Ok(()));
1971 assert_eq!(current_task.read().queued_signal_count(starnix_uapi::signals::SIGRTMIN), 1);
1972
1973 assert_eq!(sys_kill(locked, ¤t_task, current_task.tid, SIGRTMIN.into()), Ok(()));
1975 assert_eq!(current_task.read().queued_signal_count(starnix_uapi::signals::SIGRTMIN), 2);
1976 })
1977 .await;
1978 }
1979
1980 #[::fuchsia::test]
1981 async fn test_suspend() {
1982 spawn_kernel_and_run(async |locked, current_task| {
1983 let init_task_weak = current_task.weak_task();
1984 let (tx, rx) = std::sync::mpsc::sync_channel::<()>(0);
1985
1986 let closure = move |locked: &mut Locked<Unlocked>, current_task: &CurrentTask| {
1987 let init_task_temp = init_task_weak.upgrade().expect("Task must be alive");
1988
1989 let mut suspended = false;
1991 while !suspended {
1992 suspended = init_task_temp.read().is_blocked();
1993 std::thread::sleep(std::time::Duration::from_millis(10));
1994 }
1995
1996 let _ = sys_kill(
1998 locked,
1999 current_task,
2000 init_task_temp.tid,
2001 UncheckedSignal::from(SIGHUP),
2002 );
2003
2004 rx.recv().expect("receive");
2006 assert!(!init_task_temp.read().is_blocked());
2007 };
2008 let (thread, req) =
2009 SpawnRequestBuilder::new().with_sync_closure(closure).build_with_async_result();
2010 current_task.kernel().kthreads.spawner().spawn_from_request(req);
2011
2012 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2013 let user_ref = UserRef::<SigSet>::new(addr);
2014
2015 let sigset = !SigSet::from(SIGHUP);
2016 current_task.write_object(user_ref, &sigset).expect("failed to set action");
2017
2018 assert_eq!(
2019 sys_rt_sigsuspend(locked, current_task, user_ref, std::mem::size_of::<SigSet>()),
2020 error!(ERESTARTNOHAND)
2021 );
2022 tx.send(()).expect("send");
2023 futures::executor::block_on(thread).expect("join");
2024 })
2025 .await;
2026 }
2027
2028 #[::fuchsia::test]
2030 async fn test_waitid_options() {
2031 spawn_kernel_and_run(async |locked, current_task| {
2032 let id = 1;
2033 assert_eq!(
2034 sys_waitid(
2035 locked,
2036 ¤t_task,
2037 P_PID,
2038 id,
2039 MultiArchUserRef::null(current_task),
2040 0,
2041 UserRef::default().into()
2042 ),
2043 error!(EINVAL)
2044 );
2045 assert_eq!(
2046 sys_waitid(
2047 locked,
2048 ¤t_task,
2049 P_PID,
2050 id,
2051 MultiArchUserRef::null(current_task),
2052 0xffff,
2053 UserRef::default().into()
2054 ),
2055 error!(EINVAL)
2056 );
2057 })
2058 .await;
2059 }
2060
2061 #[::fuchsia::test]
2063 async fn test_wait4_options() {
2064 spawn_kernel_and_run(async |locked, current_task| {
2065 let id = 1;
2066 assert_eq!(
2067 sys_wait4(
2068 locked,
2069 ¤t_task,
2070 id,
2071 UserRef::default(),
2072 WEXITED,
2073 RUsagePtr::null(current_task)
2074 ),
2075 error!(EINVAL)
2076 );
2077 assert_eq!(
2078 sys_wait4(
2079 locked,
2080 ¤t_task,
2081 id,
2082 UserRef::default(),
2083 WNOWAIT,
2084 RUsagePtr::null(current_task)
2085 ),
2086 error!(EINVAL)
2087 );
2088 assert_eq!(
2089 sys_wait4(
2090 locked,
2091 ¤t_task,
2092 id,
2093 UserRef::default(),
2094 0xffff,
2095 RUsagePtr::null(current_task)
2096 ),
2097 error!(EINVAL)
2098 );
2099 })
2100 .await;
2101 }
2102
2103 #[::fuchsia::test]
2104 async fn test_echild_when_no_zombie() {
2105 spawn_kernel_and_run(async |locked, current_task| {
2106 assert!(
2108 sys_kill(
2109 locked,
2110 ¤t_task,
2111 current_task.get_pid(),
2112 UncheckedSignal::from(SIGCHLD)
2113 )
2114 .is_ok()
2115 );
2116 assert_eq!(
2119 wait_on_pid(
2120 locked,
2121 ¤t_task,
2122 &ProcessSelector::Any,
2123 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions")
2124 ),
2125 error!(ECHILD)
2126 );
2127 })
2128 .await;
2129 }
2130
2131 #[::fuchsia::test]
2132 async fn test_no_error_when_zombie() {
2133 spawn_kernel_and_run(async |locked, current_task| {
2134 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2135 let expected_result = WaitResult {
2136 pid: child.tid,
2137 uid: 0,
2138 exit_info: ProcessExitInfo {
2139 status: ExitStatus::Exit(1),
2140 exit_signal: Some(SIGCHLD),
2141 },
2142 time_stats: Default::default(),
2143 };
2144 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2145 std::mem::drop(child);
2146
2147 assert_eq!(
2148 wait_on_pid(
2149 locked,
2150 ¤t_task,
2151 &ProcessSelector::Any,
2152 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions")
2153 ),
2154 Ok(Some(expected_result))
2155 );
2156 })
2157 .await;
2158 }
2159
2160 #[::fuchsia::test]
2161 async fn test_waiting_for_child() {
2162 spawn_kernel_and_run(async |locked, task| {
2163 let child = task
2164 .clone_task(
2165 locked,
2166 0,
2167 Some(SIGCHLD),
2168 UserRef::default(),
2169 UserRef::default(),
2170 UserRef::default(),
2171 )
2172 .expect("clone_task");
2173
2174 assert_eq!(
2176 wait_on_pid(
2177 locked,
2178 &task,
2179 &ProcessSelector::Any,
2180 &WaitingOptions::new_for_wait4(WNOHANG).expect("WaitingOptions")
2181 ),
2182 Ok(None)
2183 );
2184
2185 let thread = std::thread::spawn({
2186 let task = task.weak_task();
2187 move || {
2188 #[allow(
2190 clippy::undocumented_unsafe_blocks,
2191 reason = "Force documented unsafe blocks in Starnix"
2192 )]
2193 let locked = unsafe { Unlocked::new() };
2194 let task = task.upgrade().expect("task must be alive");
2195 let child: AutoReleasableTask = child.into();
2196 while !task.read().is_blocked() {
2198 std::thread::sleep(std::time::Duration::from_millis(10));
2199 }
2200 child.thread_group().exit(locked, ExitStatus::Exit(0), None);
2201 child.tid
2202 }
2203 });
2204
2205 let waited_child = wait_on_pid(
2207 locked,
2208 &task,
2209 &ProcessSelector::Any,
2210 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions"),
2211 )
2212 .expect("wait_on_pid")
2213 .unwrap();
2214
2215 let child_id = thread.join().expect("join");
2217 assert_eq!(waited_child.pid, child_id);
2218 })
2219 .await;
2220 }
2221
2222 #[::fuchsia::test]
2223 async fn test_waiting_for_child_with_signal_pending() {
2224 spawn_kernel_and_run(async |locked, task| {
2225 task.thread_group().signal_actions.set(
2227 SIGUSR1,
2228 sigaction_t { sa_handler: uaddr { addr: 0xDEADBEEF }, ..sigaction_t::default() },
2229 );
2230
2231 let _child = task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2233
2234 send_standard_signal(locked, &task, SignalInfo::default(SIGUSR1));
2237
2238 let errno = wait_on_pid(
2239 locked,
2240 &task,
2241 &ProcessSelector::Any,
2242 &WaitingOptions::new_for_wait4(0).expect("WaitingOptions"),
2243 )
2244 .expect_err("wait_on_pid");
2245 assert_eq!(errno, ERESTARTSYS);
2246 })
2247 .await;
2248 }
2249
2250 #[::fuchsia::test]
2251 async fn test_sigkill() {
2252 spawn_kernel_and_run(async |locked, current_task| {
2253 let mut child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2254
2255 send_standard_signal(locked, &child, SignalInfo::default(SIGKILL));
2257 dequeue_signal_for_test(locked, &mut child);
2258 std::mem::drop(child);
2259
2260 let address = map_memory(
2262 locked,
2263 ¤t_task,
2264 UserAddress::default(),
2265 std::mem::size_of::<i32>() as u64,
2266 );
2267 let address_ref = UserRef::<i32>::new(address);
2268 sys_wait4(locked, ¤t_task, -1, address_ref, 0, RUsagePtr::null(current_task))
2269 .expect("wait4");
2270 let wstatus = current_task.read_object(address_ref).expect("read memory");
2271 assert_eq!(wstatus, SIGKILL.number() as i32);
2272 })
2273 .await;
2274 }
2275
2276 async fn test_exit_status_for_signal(
2277 sig: Signal,
2278 wait_status: i32,
2279 exit_signal: Option<Signal>,
2280 ) {
2281 spawn_kernel_and_run(async move |locked, current_task| {
2282 let mut child = current_task.clone_task_for_test(locked, 0, exit_signal);
2283
2284 send_standard_signal(locked, &child, SignalInfo::default(sig));
2286 dequeue_signal_for_test(locked, &mut child);
2287 std::mem::drop(child);
2288
2289 let address = map_memory(
2291 locked,
2292 ¤t_task,
2293 UserAddress::default(),
2294 std::mem::size_of::<i32>() as u64,
2295 );
2296 let address_ref = UserRef::<i32>::new(address);
2297 sys_wait4(locked, ¤t_task, -1, address_ref, 0, RUsagePtr::null(current_task))
2298 .expect("wait4");
2299 let wstatus = current_task.read_object(address_ref).expect("read memory");
2300 assert_eq!(wstatus, wait_status);
2301 })
2302 .await;
2303 }
2304
2305 #[::fuchsia::test]
2306 async fn test_exit_status() {
2307 test_exit_status_for_signal(SIGTERM, SIGTERM.number() as i32, Some(SIGCHLD)).await;
2309 test_exit_status_for_signal(SIGSEGV, (SIGSEGV.number() as i32) | 0x80, Some(SIGCHLD)).await;
2311 }
2312
2313 #[::fuchsia::test]
2314 async fn test_wait4_by_pgid() {
2315 spawn_kernel_and_run(async |locked, current_task| {
2316 let child1 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2317 let child1_pid = child1.tid;
2318 child1.thread_group().exit(locked, ExitStatus::Exit(42), None);
2319 std::mem::drop(child1);
2320 let child2 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2321 child2.thread_group().setsid(locked).expect("setsid");
2322 let child2_pid = child2.tid;
2323 child2.thread_group().exit(locked, ExitStatus::Exit(42), None);
2324 std::mem::drop(child2);
2325
2326 assert_eq!(
2327 sys_wait4(
2328 locked,
2329 ¤t_task,
2330 -child2_pid,
2331 UserRef::default(),
2332 0,
2333 RUsagePtr::null(current_task)
2334 ),
2335 Ok(child2_pid)
2336 );
2337 assert_eq!(
2338 sys_wait4(
2339 locked,
2340 ¤t_task,
2341 0,
2342 UserRef::default(),
2343 0,
2344 RUsagePtr::null(current_task)
2345 ),
2346 Ok(child1_pid)
2347 );
2348 })
2349 .await;
2350 }
2351
2352 #[::fuchsia::test]
2353 async fn test_waitid_by_pgid() {
2354 spawn_kernel_and_run(async |locked, current_task| {
2355 let child1 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2356 let child1_pid = child1.tid;
2357 child1.thread_group().exit(locked, ExitStatus::Exit(42), None);
2358 std::mem::drop(child1);
2359 let child2 = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2360 child2.thread_group().setsid(locked).expect("setsid");
2361 let child2_pid = child2.tid;
2362 child2.thread_group().exit(locked, ExitStatus::Exit(42), None);
2363 std::mem::drop(child2);
2364
2365 let address: UserRef<uapi::siginfo_t> =
2366 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE).into();
2367 assert_eq!(
2368 sys_waitid(
2369 locked,
2370 ¤t_task,
2371 P_PGID,
2372 child2_pid,
2373 address.into(),
2374 WEXITED,
2375 UserRef::default().into()
2376 ),
2377 Ok(())
2378 );
2379 assert_eq!(current_task.thread_group().read().zombie_children[0].pid(), child1_pid);
2381
2382 assert_eq!(
2383 sys_waitid(
2384 locked,
2385 ¤t_task,
2386 P_PGID,
2387 0,
2388 address.into(),
2389 WEXITED,
2390 UserRef::default().into()
2391 ),
2392 Ok(())
2393 );
2394 })
2395 .await;
2396 }
2397
2398 #[::fuchsia::test]
2399 async fn test_sigqueue() {
2400 spawn_kernel_and_run(async |locked, current_task| {
2401 let current_uid = current_task.with_current_creds(|creds| creds.uid);
2402 let current_pid = current_task.get_pid();
2403
2404 const TEST_VALUE: u64 = 101;
2405
2406 const ARCH64_SI_HEADER_SIZE: usize = SI_HEADER_SIZE + 4;
2408 const PID_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE;
2410 const UID_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE + 4;
2411 const VALUE_DATA_OFFSET: usize = ARCH64_SI_HEADER_SIZE + 8;
2412
2413 let mut data = vec![0u8; SI_MAX_SIZE as usize];
2414 let header = SignalInfoHeader {
2415 signo: SIGIO.number(),
2416 code: SI_QUEUE,
2417 ..SignalInfoHeader::default()
2418 };
2419 let _ = header.write_to(&mut data[..SI_HEADER_SIZE]);
2420 data[PID_DATA_OFFSET..PID_DATA_OFFSET + 4].copy_from_slice(¤t_pid.to_ne_bytes());
2421 data[UID_DATA_OFFSET..UID_DATA_OFFSET + 4].copy_from_slice(¤t_uid.to_ne_bytes());
2422 data[VALUE_DATA_OFFSET..VALUE_DATA_OFFSET + 8]
2423 .copy_from_slice(&TEST_VALUE.to_ne_bytes());
2424
2425 let addr = map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2426 current_task.write_memory(addr, &data).unwrap();
2427 let second_current = create_task(locked, current_task.kernel(), "second task");
2428 let second_pid = second_current.get_pid();
2429 let second_tid = second_current.get_tid();
2430 assert_eq!(second_current.read().queued_signal_count(SIGIO), 0);
2431
2432 assert_eq!(
2433 sys_rt_tgsigqueueinfo(
2434 locked,
2435 ¤t_task,
2436 second_pid,
2437 second_tid,
2438 UncheckedSignal::from(SIGIO),
2439 addr
2440 ),
2441 Ok(())
2442 );
2443 assert_eq!(second_current.read().queued_signal_count(SIGIO), 1);
2444
2445 let signal = SignalInfo {
2446 code: SI_USER as i32,
2447 detail: SignalDetail::Kill {
2448 pid: current_task.thread_group().leader,
2449 uid: current_task.with_current_creds(|creds| creds.uid),
2450 },
2451 ..SignalInfo::default(SIGIO)
2452 };
2453 let queued_signal = second_current.write().take_specific_signal(signal);
2454 if let Some(sig) = queued_signal {
2455 assert_eq!(sig.signal, SIGIO);
2456 assert_eq!(sig.errno, 0);
2457 assert_eq!(sig.code, SI_QUEUE);
2458 if let SignalDetail::Raw { data } = sig.detail {
2459 let offset_pid = PID_DATA_OFFSET - SI_HEADER_SIZE;
2461 let offset_uid = UID_DATA_OFFSET - SI_HEADER_SIZE;
2462 let offset_value = VALUE_DATA_OFFSET - SI_HEADER_SIZE;
2463 let pid =
2464 pid_t::from_ne_bytes(data[offset_pid..offset_pid + 4].try_into().unwrap());
2465 let uid =
2466 uid_t::from_ne_bytes(data[offset_uid..offset_uid + 4].try_into().unwrap());
2467 let value = u64::from_ne_bytes(
2468 data[offset_value..offset_value + 8].try_into().unwrap(),
2469 );
2470 assert_eq!(pid, current_pid);
2471 assert_eq!(uid, current_uid);
2472 assert_eq!(value, TEST_VALUE);
2473 } else {
2474 panic!("incorrect signal detail");
2475 }
2476 } else {
2477 panic!("expected a queued signal");
2478 }
2479 })
2480 .await;
2481 }
2482
2483 #[::fuchsia::test]
2484 async fn test_signalfd_filters_signals() {
2485 spawn_kernel_and_run(async |locked, current_task| {
2486 let memory_for_masks =
2487 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2488
2489 let term_int_mask = SigSet::from(SIGTERM) | SigSet::from(SIGINT);
2491 let term_int_mask_addr = UserRef::<SigSet>::new(memory_for_masks);
2492 current_task
2493 .write_object(term_int_mask_addr, &term_int_mask)
2494 .expect("failed to write mask");
2495 let sfd_term_int = sys_signalfd4(
2496 locked,
2497 ¤t_task,
2498 FdNumber::from_raw(-1),
2499 term_int_mask_addr,
2500 std::mem::size_of::<SigSet>(),
2501 0,
2502 )
2503 .expect("failed to create SIGTERM/SIGINT signalfd");
2504
2505 let sigchld_mask = SigSet::from(SIGCHLD);
2507 let sigchld_mask_addr =
2508 UserRef::<SigSet>::new((memory_for_masks + std::mem::size_of::<SigSet>()).unwrap());
2509 current_task
2510 .write_object(sigchld_mask_addr, &sigchld_mask)
2511 .expect("failed to write mask");
2512 let sfd_chld = sys_signalfd4(
2513 locked,
2514 ¤t_task,
2515 FdNumber::from_raw(-1),
2516 sigchld_mask_addr,
2517 std::mem::size_of::<SigSet>(),
2518 0,
2519 )
2520 .expect("failed to create SIGCHLD signalfd");
2521
2522 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2524 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2525 std::mem::drop(child);
2526
2527 let sfd_term_int_file =
2529 current_task.files.get(sfd_term_int).expect("failed to get sfd_term_int file");
2530 let sfd_chld_file =
2531 current_task.files.get(sfd_chld).expect("failed to get sfd_chld file");
2532
2533 let term_int_events = sfd_term_int_file
2534 .query_events(locked, ¤t_task)
2535 .expect("failed to query sfd_term_int events");
2536 let chld_events = sfd_chld_file
2537 .query_events(locked, ¤t_task)
2538 .expect("failed to query sfd_chld events");
2539
2540 assert!(!term_int_events.contains(FdEvents::POLLIN));
2541 assert!(chld_events.contains(FdEvents::POLLIN));
2542 })
2543 .await;
2544 }
2545
2546 #[::fuchsia::test]
2547 async fn test_signalfd_filters_signals_async() {
2548 spawn_kernel_and_run(async |locked, current_task| {
2549 let memory_for_masks =
2550 map_memory(locked, ¤t_task, UserAddress::default(), *PAGE_SIZE);
2551
2552 let term_int_mask = SigSet::from(SIGTERM) | SigSet::from(SIGINT);
2554 let term_int_mask_addr = UserRef::<SigSet>::new(memory_for_masks);
2555 current_task
2556 .write_object(term_int_mask_addr, &term_int_mask)
2557 .expect("failed to write mask");
2558 let sfd_term_int = sys_signalfd4(
2559 locked,
2560 ¤t_task,
2561 FdNumber::from_raw(-1),
2562 term_int_mask_addr,
2563 std::mem::size_of::<SigSet>(),
2564 0,
2565 )
2566 .expect("failed to create SIGTERM/SIGINT signalfd");
2567
2568 let sigchld_mask = SigSet::from(SIGCHLD);
2570 let sigchld_mask_addr =
2571 UserRef::<SigSet>::new((memory_for_masks + std::mem::size_of::<SigSet>()).unwrap());
2572 current_task
2573 .write_object(sigchld_mask_addr, &sigchld_mask)
2574 .expect("failed to write mask");
2575 let sfd_chld = sys_signalfd4(
2576 locked,
2577 ¤t_task,
2578 FdNumber::from_raw(-1),
2579 sigchld_mask_addr,
2580 std::mem::size_of::<SigSet>(),
2581 0,
2582 )
2583 .expect("failed to create SIGCHLD signalfd");
2584
2585 let waiter = Waiter::new();
2587 let ready_items = Arc::new(Mutex::new(VecDeque::new()));
2588
2589 let sfd_term_int_file =
2590 current_task.files.get(sfd_term_int).expect("failed to get sfd_term_int file");
2591 let sfd_chld_file =
2592 current_task.files.get(sfd_chld).expect("failed to get sfd_chld file");
2593
2594 sfd_term_int_file
2595 .wait_async(
2596 locked,
2597 ¤t_task,
2598 &waiter,
2599 FdEvents::POLLIN,
2600 EventHandler::Enqueue {
2601 key: sfd_term_int.into(),
2602 queue: ready_items.clone(),
2603 sought_events: FdEvents::POLLIN,
2604 },
2605 )
2606 .expect("failed to wait on sfd_term_int");
2607
2608 sfd_chld_file
2609 .wait_async(
2610 locked,
2611 ¤t_task,
2612 &waiter,
2613 FdEvents::POLLIN,
2614 EventHandler::Enqueue {
2615 key: sfd_chld.into(),
2616 queue: ready_items.clone(),
2617 sought_events: FdEvents::POLLIN,
2618 },
2619 )
2620 .expect("failed to wait on sfd_chld");
2621
2622 let sigchld_mask_ref = UserRef::<SigSet>::new(memory_for_masks);
2624 current_task
2625 .write_object(sigchld_mask_ref, &sigchld_mask)
2626 .expect("failed to write mask");
2627 sys_rt_sigprocmask(
2628 locked,
2629 ¤t_task,
2630 SIG_BLOCK,
2631 sigchld_mask_ref,
2632 UserRef::default(),
2633 std::mem::size_of::<SigSet>(),
2634 )
2635 .expect("failed to block SIGCHLD");
2636
2637 let child = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
2639 child.thread_group().exit(locked, ExitStatus::Exit(1), None);
2640 std::mem::drop(child);
2641
2642 waiter.wait(locked, ¤t_task).expect("failed to wait");
2644
2645 let ready_items = ready_items.lock();
2647 assert_eq!(ready_items.len(), 1);
2648 assert_eq!(ready_items[0].key, sfd_chld.into());
2649 })
2650 .await;
2651 }
2652}