1use crate::arch::execution::new_syscall_from_state;
6use crate::mm::{IOVecPtr, MemoryAccessor, MemoryAccessorExt};
7use crate::ptrace::StopState;
8use crate::security;
9use crate::signals::syscalls::WaitingOptions;
10use crate::signals::{
11 SignalDetail, SignalInfo, UncheckedSignalInfo, send_signal_first, send_standard_signal,
12};
13use crate::task::{
14 CurrentTask, PidTable, ProcessSelector, Task, TaskMutableState, ThreadGroup, ThreadState,
15 WaitQueue, ZombieProcess,
16};
17use bitflags::bitflags;
18use starnix_logging::track_stub;
19use starnix_registers::HeapRegs;
20use starnix_sync::{LockBefore, Locked, MmDumpable, ThreadGroupLimits, Unlocked};
21use starnix_syscalls::SyscallResult;
22use starnix_syscalls::decls::SyscallDecl;
23use starnix_types::ownership::{OwnedRef, Releasable, ReleaseGuard, WeakRef};
24use starnix_uapi::auth::PTRACE_MODE_ATTACH_REALCREDS;
25use starnix_uapi::elf::ElfNoteType;
26use starnix_uapi::errors::Errno;
27use starnix_uapi::signals::{SIGKILL, SIGSTOP, SIGTRAP, SigSet, Signal, UncheckedSignal};
28#[allow(unused_imports)]
29use starnix_uapi::user_address::ArchSpecific;
30use starnix_uapi::user_address::{LongPtr, MultiArchUserRef, UserAddress, UserRef};
31use starnix_uapi::{
32 PTRACE_CONT, PTRACE_DETACH, PTRACE_EVENT_CLONE, PTRACE_EVENT_EXEC, PTRACE_EVENT_EXIT,
33 PTRACE_EVENT_FORK, PTRACE_EVENT_SECCOMP, PTRACE_EVENT_STOP, PTRACE_EVENT_VFORK,
34 PTRACE_EVENT_VFORK_DONE, PTRACE_GET_SYSCALL_INFO, PTRACE_GETEVENTMSG, PTRACE_GETREGSET,
35 PTRACE_GETSIGINFO, PTRACE_GETSIGMASK, PTRACE_INTERRUPT, PTRACE_KILL, PTRACE_LISTEN,
36 PTRACE_O_EXITKILL, PTRACE_O_TRACECLONE, PTRACE_O_TRACEEXEC, PTRACE_O_TRACEEXIT,
37 PTRACE_O_TRACEFORK, PTRACE_O_TRACESYSGOOD, PTRACE_O_TRACEVFORK, PTRACE_O_TRACEVFORKDONE,
38 PTRACE_PEEKDATA, PTRACE_PEEKTEXT, PTRACE_PEEKUSR, PTRACE_POKEDATA, PTRACE_POKETEXT,
39 PTRACE_POKEUSR, PTRACE_SETOPTIONS, PTRACE_SETREGSET, PTRACE_SETSIGINFO, PTRACE_SETSIGMASK,
40 PTRACE_SYSCALL, PTRACE_SYSCALL_INFO_ENTRY, PTRACE_SYSCALL_INFO_EXIT, PTRACE_SYSCALL_INFO_NONE,
41 clone_args, errno, error, pid_t, ptrace_syscall_info, tid_t, uapi,
42};
43
44use std::collections::BTreeMap;
45use std::sync::atomic::Ordering;
46use std::sync::{Arc, Weak};
47
48#[cfg(target_arch = "x86_64")]
49use starnix_uapi::{PTRACE_GETREGS, user};
50
51#[cfg(all(target_arch = "aarch64"))]
52use starnix_uapi::arch32::PTRACE_GETREGS;
53
54type UserRegsStructPtr =
55 MultiArchUserRef<starnix_uapi::user_regs_struct, starnix_uapi::arch32::user_regs_struct>;
56
57uapi::check_arch_independent_layout! {
58 ptrace_syscall_info {
59 op,
60 arch,
61 instruction_pointer,
62 stack_pointer,
63 __bindgen_anon_1,
64 }
65
66 ptrace_syscall_info__bindgen_ty_1 {
67 entry,
68 exit,
69 seccomp,
70 }
71
72 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
73 nr,
74 args,
75 }
76
77 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
78 rval,
79 is_error,
80 }
81
82 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_3 {
83 nr,
84 args,
85 ret_data,
86 }
87}
88
89#[derive(Clone, Default, PartialEq)]
93pub enum PtraceStatus {
94 #[default]
96 Default,
97 Continuing,
99 Listening,
106}
107
108impl PtraceStatus {
109 pub fn is_continuing(&self) -> bool {
110 *self == PtraceStatus::Continuing
111 }
112}
113
114#[derive(Copy, Clone, PartialEq)]
116pub enum PtraceAttachType {
117 Attach,
119 Seize,
121}
122
123bitflags! {
124 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
125 #[repr(transparent)]
126 pub struct PtraceOptions: u32 {
127 const EXITKILL = starnix_uapi::PTRACE_O_EXITKILL;
128 const TRACECLONE = starnix_uapi::PTRACE_O_TRACECLONE;
129 const TRACEEXEC = starnix_uapi::PTRACE_O_TRACEEXEC;
130 const TRACEEXIT = starnix_uapi::PTRACE_O_TRACEEXIT;
131 const TRACEFORK = starnix_uapi::PTRACE_O_TRACEFORK;
132 const TRACESYSGOOD = starnix_uapi::PTRACE_O_TRACESYSGOOD;
133 const TRACEVFORK = starnix_uapi::PTRACE_O_TRACEVFORK;
134 const TRACEVFORKDONE = starnix_uapi::PTRACE_O_TRACEVFORKDONE;
135 const TRACESECCOMP = starnix_uapi::PTRACE_O_TRACESECCOMP;
136 const SUSPEND_SECCOMP = starnix_uapi::PTRACE_O_SUSPEND_SECCOMP;
137 }
138}
139
140#[repr(u32)]
141#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
142pub enum PtraceEvent {
143 #[default]
144 None = 0,
145 Stop = PTRACE_EVENT_STOP,
146 Clone = PTRACE_EVENT_CLONE,
147 Fork = PTRACE_EVENT_FORK,
148 Vfork = PTRACE_EVENT_VFORK,
149 VforkDone = PTRACE_EVENT_VFORK_DONE,
150 Exec = PTRACE_EVENT_EXEC,
151 Exit = PTRACE_EVENT_EXIT,
152 Seccomp = PTRACE_EVENT_SECCOMP,
153}
154
155impl PtraceEvent {
156 pub fn from_option(option: &PtraceOptions) -> Self {
157 match *option {
158 PtraceOptions::TRACECLONE => PtraceEvent::Clone,
159 PtraceOptions::TRACEFORK => PtraceEvent::Fork,
160 PtraceOptions::TRACEVFORK => PtraceEvent::Vfork,
161 PtraceOptions::TRACEVFORKDONE => PtraceEvent::VforkDone,
162 PtraceOptions::TRACEEXEC => PtraceEvent::Exec,
163 PtraceOptions::TRACEEXIT => PtraceEvent::Exit,
164 PtraceOptions::TRACESECCOMP => PtraceEvent::Seccomp,
165 _ => unreachable!("Bad ptrace event specified"),
166 }
167 }
168}
169
170pub struct PtraceEventData {
172 pub event: PtraceEvent,
174
175 pub msg: u64,
177}
178
179impl PtraceEventData {
180 pub fn new(option: PtraceOptions, msg: u64) -> Self {
181 Self { event: PtraceEvent::from_option(&option), msg }
182 }
183 pub fn new_from_event(event: PtraceEvent, msg: u64) -> Self {
184 Self { event, msg }
185 }
186}
187
188#[derive(Clone)]
191pub struct PtraceCoreState {
192 pub pid: pid_t,
194
195 pub thread_group: Weak<ThreadGroup>,
197
198 pub attach_type: PtraceAttachType,
201
202 pub options: PtraceOptions,
204
205 pub tracer_waiters: Arc<WaitQueue>,
208}
209
210impl PtraceCoreState {
211 pub fn has_option(&self, option: PtraceOptions) -> bool {
212 self.options.contains(option)
213 }
214}
215
216pub struct PtraceState {
218 pub core_state: PtraceCoreState,
220
221 pub tracee_waiters: WaitQueue,
224
225 pub last_signal: Option<SignalInfo>,
228
229 pub last_signal_waitable: bool,
232
233 pub event_data: Option<PtraceEventData>,
235
236 pub stop_status: PtraceStatus,
239
240 pub last_syscall_was_error: bool,
242}
243
244impl PtraceState {
245 pub fn new(
246 pid: pid_t,
247 thread_group: Weak<ThreadGroup>,
248 attach_type: PtraceAttachType,
249 options: PtraceOptions,
250 ) -> Box<Self> {
251 Box::new(PtraceState {
252 core_state: PtraceCoreState {
253 pid,
254 thread_group,
255 attach_type,
256 options,
257 tracer_waiters: Arc::new(WaitQueue::default()),
258 },
259 tracee_waiters: WaitQueue::default(),
260 last_signal: None,
261 last_signal_waitable: false,
262 event_data: None,
263 stop_status: PtraceStatus::default(),
264 last_syscall_was_error: false,
265 })
266 }
267
268 pub fn get_pid(&self) -> pid_t {
269 self.core_state.pid
270 }
271
272 pub fn set_pid(&mut self, pid: pid_t) {
273 self.core_state.pid = pid;
274 }
275
276 pub fn is_seized(&self) -> bool {
277 self.core_state.attach_type == PtraceAttachType::Seize
278 }
279
280 pub fn get_attach_type(&self) -> PtraceAttachType {
281 self.core_state.attach_type
282 }
283
284 pub fn is_waitable(&self, stop: StopState, options: &WaitingOptions) -> bool {
285 if self.stop_status == PtraceStatus::Listening {
286 return self.last_signal_waitable;
288 }
289 if !options.wait_for_continued && !stop.is_stopping_or_stopped() {
290 return false;
292 }
293 self.last_signal_waitable && !stop.is_in_progress()
294 }
295
296 pub fn set_last_signal(&mut self, mut signal: Option<SignalInfo>) {
297 if let Some(ref mut siginfo) = signal {
298 if siginfo.signal == SIGKILL {
301 return;
302 }
303 self.last_signal_waitable = true;
304 self.last_signal = signal;
305 }
306 }
307
308 pub fn set_last_event(&mut self, event: Option<PtraceEventData>) {
309 if event.is_some() {
310 self.event_data = event;
311 }
312 }
313
314 pub fn get_last_signal_ref(&self) -> Option<&SignalInfo> {
315 self.last_signal.as_ref()
316 }
317
318 pub fn get_last_signal(&mut self, keep_signal_waitable: bool) -> Option<SignalInfo> {
320 self.last_signal_waitable = keep_signal_waitable;
321 self.last_signal.clone()
322 }
323
324 pub fn has_option(&self, option: PtraceOptions) -> bool {
325 self.core_state.has_option(option)
326 }
327
328 pub fn set_options_from_bits(&mut self, option: u32) -> Result<(), Errno> {
329 if let Some(options) = PtraceOptions::from_bits(option) {
330 self.core_state.options = options;
331 Ok(())
332 } else {
333 error!(EINVAL)
334 }
335 }
336
337 pub fn get_options(&self) -> PtraceOptions {
338 self.core_state.options
339 }
340
341 pub fn get_core_state(&self) -> PtraceCoreState {
343 self.core_state.clone()
344 }
345
346 pub fn tracer_waiters(&self) -> &Arc<WaitQueue> {
347 &self.core_state.tracer_waiters
348 }
349
350 pub fn get_target_syscall(
357 &self,
358 target: &Task,
359 state: &TaskMutableState,
360 ) -> Result<(i32, ptrace_syscall_info), Errno> {
361 #[cfg(target_arch = "x86_64")]
362 let arch = starnix_uapi::AUDIT_ARCH_X86_64;
363 #[cfg(target_arch = "aarch64")]
364 let arch = starnix_uapi::AUDIT_ARCH_AARCH64;
365 #[cfg(target_arch = "riscv64")]
366 let arch = starnix_uapi::AUDIT_ARCH_RISCV64;
367
368 let mut info = ptrace_syscall_info { arch, ..Default::default() };
369 let mut info_len = memoffset::offset_of!(ptrace_syscall_info, __bindgen_anon_1);
370
371 match &state.captured_thread_state {
372 Some(captured) => {
373 let registers = captured.thread_state.registers.clone();
374 info.instruction_pointer = registers.instruction_pointer_register();
375 info.stack_pointer = registers.stack_pointer_register();
376 #[cfg(target_arch = "aarch64")]
377 if captured.thread_state.is_arch32() {
378 info.arch = starnix_uapi::AUDIT_ARCH_ARM;
381 }
382 match target.load_stopped() {
383 StopState::SyscallEnterStopped => {
384 let syscall_decl = SyscallDecl::from_number(
385 registers.syscall_register(),
386 captured.thread_state.arch_width(),
387 );
388 let syscall = new_syscall_from_state(syscall_decl, &captured.thread_state);
389 info.op = PTRACE_SYSCALL_INFO_ENTRY as u8;
390 let entry = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
391 nr: syscall.decl.number,
392 args: [
393 syscall.arg0.raw(),
394 syscall.arg1.raw(),
395 syscall.arg2.raw(),
396 syscall.arg3.raw(),
397 syscall.arg4.raw(),
398 syscall.arg5.raw(),
399 ],
400 };
401 info_len += memoffset::offset_of!(
402 linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1,
403 args
404 ) + std::mem::size_of_val(&entry.args);
405 info.__bindgen_anon_1.entry = entry;
406 }
407 StopState::SyscallExitStopped => {
408 info.op = PTRACE_SYSCALL_INFO_EXIT as u8;
409 let exit = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
410 rval: registers.return_register() as i64,
411 is_error: state
412 .ptrace
413 .as_ref()
414 .map_or(0, |ptrace| ptrace.last_syscall_was_error as u8),
415 ..Default::default()
416 };
417 info_len += memoffset::offset_of!(
418 linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2,
419 is_error
420 ) + std::mem::size_of_val(&exit.is_error);
421 info.__bindgen_anon_1.exit = exit;
422 }
423 _ => {
424 info.op = PTRACE_SYSCALL_INFO_NONE as u8;
425 }
426 };
427 }
428 _ => (),
429 }
430 Ok((info_len as i32, info))
431 }
432
433 pub fn get_core_state_for_clone(
438 &self,
439 clone_args: &clone_args,
440 ) -> (PtraceOptions, Option<PtraceCoreState>) {
441 let trace_type = if clone_args.flags & (starnix_uapi::CLONE_UNTRACED as u64) != 0 {
447 PtraceOptions::empty()
448 } else {
449 if clone_args.flags & (starnix_uapi::CLONE_VFORK as u64) != 0 {
450 PtraceOptions::TRACEVFORK
451 } else if clone_args.exit_signal != (starnix_uapi::SIGCHLD as u64) {
452 PtraceOptions::TRACECLONE
453 } else {
454 PtraceOptions::TRACEFORK
455 }
456 };
457
458 if !self.has_option(trace_type)
459 && (clone_args.flags & (starnix_uapi::CLONE_PTRACE as u64) == 0)
460 {
461 return (PtraceOptions::empty(), None);
462 }
463
464 (trace_type, Some(self.get_core_state()))
465 }
466}
467
468struct TracedZombie {
470 artificial_zombie: ZombieProcess,
472
473 delegate: Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>,
476}
477
478impl Releasable for TracedZombie {
479 type Context<'a> = &'a mut PidTable;
480
481 fn release<'a>(self, pids: &'a mut PidTable) {
482 self.artificial_zombie.release(pids);
483 if let Some((_, z)) = self.delegate {
484 z.release(pids);
485 }
486 }
487}
488
489impl TracedZombie {
490 fn new(artificial_zombie: ZombieProcess) -> ReleaseGuard<Self> {
491 ReleaseGuard::from(Self { artificial_zombie, delegate: None })
492 }
493
494 fn new_with_delegate(
495 artificial_zombie: ZombieProcess,
496 delegate: (Weak<ThreadGroup>, OwnedRef<ZombieProcess>),
497 ) -> ReleaseGuard<Self> {
498 ReleaseGuard::from(Self { artificial_zombie, delegate: Some(delegate) })
499 }
500
501 fn set_parent(
502 &mut self,
503 new_zombie: Option<OwnedRef<ZombieProcess>>,
504 new_parent: &ThreadGroup,
505 ) {
506 if let Some(new_zombie) = new_zombie {
507 self.delegate = Some((new_parent.weak_self.clone(), new_zombie));
508 } else {
509 self.delegate = self.delegate.take().map(|(_, z)| (new_parent.weak_self.clone(), z));
510 }
511 }
512}
513
514#[derive(Default)]
518pub struct ZombiePtracees {
519 zombies: BTreeMap<tid_t, ReleaseGuard<TracedZombie>>,
522}
523
524impl ZombiePtracees {
525 pub fn new() -> Self {
526 Self::default()
527 }
528
529 pub fn add(&mut self, pids: &mut PidTable, tid: tid_t, zombie: ZombieProcess) {
532 if let std::collections::btree_map::Entry::Vacant(entry) = self.zombies.entry(tid) {
533 entry.insert(TracedZombie::new(zombie));
534 } else {
535 zombie.release(pids);
536 }
537 }
538
539 pub fn remove(&mut self, pids: &mut PidTable, tid: tid_t) {
541 self.zombies.remove(&tid).release(pids);
542 }
543
544 pub fn is_empty(&self) -> bool {
545 self.zombies.is_empty()
546 }
547
548 pub fn set_parent_of(
551 &mut self,
552 tracee: tid_t,
553 new_zombie: Option<OwnedRef<ZombieProcess>>,
554 new_parent: &ThreadGroup,
555 ) {
556 match self.zombies.entry(tracee) {
557 std::collections::btree_map::Entry::Vacant(entry) => {
558 if let Some(new_zombie) = new_zombie {
559 entry.insert(TracedZombie::new_with_delegate(
560 new_zombie.as_artificial(),
561 (new_parent.weak_self.clone(), new_zombie),
562 ));
563 }
564 }
565 std::collections::btree_map::Entry::Occupied(mut entry) => {
566 entry.get_mut().set_parent(new_zombie, new_parent);
567 }
568 }
569 }
570
571 pub fn reparent(old_parent: &ThreadGroup, new_parent: &ThreadGroup) {
574 let mut lockless_list = old_parent.read().deferred_zombie_ptracers.clone();
575
576 for deferred_zombie_ptracer in &lockless_list {
577 if let Some(tg) = deferred_zombie_ptracer.tracer_thread_group_key.upgrade() {
578 tg.write().zombie_ptracees.set_parent_of(
579 deferred_zombie_ptracer.tracee_tid,
580 None,
581 new_parent,
582 );
583 }
584 }
585 let mut new_state = new_parent.write();
586 new_state.deferred_zombie_ptracers.append(&mut lockless_list);
587 }
588
589 pub fn release(&mut self, pids: &mut PidTable) {
592 let mut entry = self.zombies.pop_first();
593 while let Some((_, mut zombie)) = entry {
594 if let Some((tg, z)) = zombie.delegate.take() {
595 if let Some(tg) = tg.upgrade() {
596 tg.do_zombie_notifications(z);
597 }
598 }
599 zombie.release(pids);
600
601 entry = self.zombies.pop_first();
602 }
603 }
604
605 pub fn has_zombie_matching(&self, selector: &ProcessSelector) -> bool {
608 self.zombies.values().any(|z| z.artificial_zombie.matches_selector(selector))
609 }
610
611 pub fn has_tracee(&self, tid: tid_t) -> bool {
614 self.zombies.contains_key(&tid)
615 }
616
617 pub fn get_waitable_entry(
621 &mut self,
622 selector: &ProcessSelector,
623 options: &WaitingOptions,
624 ) -> Option<(ZombieProcess, Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>)> {
625 let Some((t, found_zombie)) = self
628 .zombies
629 .iter()
630 .map(|(t, z)| (*t, &z.artificial_zombie))
631 .rfind(|(_, zombie)| zombie.matches_selector_and_waiting_option(selector, options))
632 else {
633 return None;
634 };
635
636 let result;
637 if !options.keep_waitable_state {
638 result = self.zombies.remove(&t).map(|traced_zombie| {
640 let traced_zombie = ReleaseGuard::take(traced_zombie);
641 (traced_zombie.artificial_zombie, traced_zombie.delegate)
642 });
643 } else {
644 result = Some((found_zombie.as_artificial(), None));
645 }
646
647 result
648 }
649}
650
651pub const PR_SET_PTRACER_ANY: i32 = -1;
654
655#[derive(Copy, Clone, Default, PartialEq)]
658pub enum PtraceAllowedPtracers {
659 #[default]
660 None,
661 Some(pid_t),
662 Any,
663}
664
665fn ptrace_cont<L>(
670 locked: &mut Locked<L>,
671 tracee: &Task,
672 data: &UserAddress,
673 detach: bool,
674) -> Result<(), Errno>
675where
676 L: LockBefore<ThreadGroupLimits>,
677{
678 let data = data.ptr() as u64;
679 let new_state;
680 let mut siginfo = if data != 0 {
681 let signal = Signal::try_from(UncheckedSignal::new(data))?;
682 Some(SignalInfo::kernel(signal))
683 } else {
684 None
685 };
686
687 let mut state = tracee.write();
688 let is_listen = state.is_ptrace_listening();
689
690 if tracee.load_stopped().is_waking_or_awake() && !is_listen {
691 if detach {
692 state.set_ptrace(None)?;
693 }
694 return error!(EIO);
695 }
696
697 if !state.can_accept_ptrace_commands() && !detach {
698 return error!(ESRCH);
699 }
700
701 if let Some(ptrace) = &mut state.ptrace {
702 if data != 0 {
703 new_state = PtraceStatus::Continuing;
704 if let Some(last_signal) = &mut ptrace.last_signal {
705 if let Some(si) = siginfo {
706 let new_signal = si.signal;
707 last_signal.signal = new_signal;
708 }
709 siginfo = Some(last_signal.clone());
710 }
711 } else {
712 new_state = PtraceStatus::Default;
713 ptrace.last_signal = None;
714 ptrace.event_data = None;
715 }
716 ptrace.stop_status = new_state;
717
718 if is_listen {
719 state.notify_ptracees();
720 }
721 }
722
723 if let Some(siginfo) = siginfo {
724 send_signal_first(locked, &tracee, state, siginfo);
726 } else {
727 state.set_stopped(StopState::Waking, None, None, None);
728 drop(state);
729 tracee.thread_group().set_stopped(StopState::Waking, None, false);
730 }
731 if detach {
732 tracee.write().set_ptrace(None)?;
733 }
734 Ok(())
735}
736
737fn ptrace_interrupt(tracee: &Task) -> Result<(), Errno> {
738 let mut state = tracee.write();
739 if let Some(ptrace) = &mut state.ptrace {
740 if !ptrace.is_seized() {
741 return error!(EIO);
742 }
743 let status = ptrace.stop_status.clone();
744 ptrace.stop_status = PtraceStatus::Default;
745 let event_data = Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0));
746 if status == PtraceStatus::Listening {
747 let signal = ptrace.last_signal.clone();
748 state.set_stopped(StopState::PtraceEventStopped, signal, None, event_data);
752 } else {
753 state.set_stopped(
754 StopState::PtraceEventStopping,
755 Some(SignalInfo::kernel(SIGTRAP)),
756 None,
757 event_data,
758 );
759 drop(state);
760 tracee.interrupt();
761 }
762 }
763 Ok(())
764}
765
766fn ptrace_listen(tracee: &Task) -> Result<(), Errno> {
767 let mut state = tracee.write();
768 if let Some(ptrace) = &mut state.ptrace {
769 if !ptrace.is_seized()
770 || (ptrace.last_signal_waitable
771 && ptrace
772 .event_data
773 .as_ref()
774 .is_some_and(|event_data| event_data.event != PtraceEvent::Stop))
775 {
776 return error!(EIO);
777 }
778 ptrace.stop_status = PtraceStatus::Listening;
779 }
780 Ok(())
781}
782
783pub fn ptrace_detach<L>(
784 locked: &mut Locked<L>,
785 pids: &mut PidTable,
786 thread_group: &ThreadGroup,
787 tracee: &Task,
788 data: &UserAddress,
789) -> Result<(), Errno>
790where
791 L: LockBefore<ThreadGroupLimits>,
792{
793 if let Err(x) = ptrace_cont(locked, &tracee, &data, true) {
794 return Err(x);
795 }
796 let tid = tracee.get_tid();
797 thread_group.ptracees.lock().remove(&tid);
798 thread_group.write().zombie_ptracees.remove(pids, tid);
799 Ok(())
800}
801
802pub fn ptrace_dispatch<L>(
805 locked: &mut Locked<L>,
806 current_task: &mut CurrentTask,
807 request: u32,
808 pid: pid_t,
809 addr: UserAddress,
810 data: UserAddress,
811) -> Result<SyscallResult, Errno>
812where
813 L: LockBefore<ThreadGroupLimits>,
814{
815 let mut pids = current_task.kernel().pids.write();
816 let weak_task = pids.get_task(pid);
817 let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
818
819 if let Some(ptrace) = &tracee.read().ptrace {
820 if ptrace.get_pid() != current_task.get_pid() {
821 return error!(ESRCH);
822 }
823 }
824
825 match request {
828 PTRACE_KILL => {
829 let siginfo = SignalInfo::with_detail(
830 SIGKILL,
831 (SIGTRAP.number() | PTRACE_KILL << 8) as i32,
832 SignalDetail::None,
833 );
834 send_standard_signal(locked, &tracee, siginfo);
835 return Ok(starnix_syscalls::SUCCESS);
836 }
837 PTRACE_INTERRUPT => {
838 ptrace_interrupt(tracee.as_ref())?;
839 return Ok(starnix_syscalls::SUCCESS);
840 }
841 PTRACE_LISTEN => {
842 ptrace_listen(&tracee)?;
843 return Ok(starnix_syscalls::SUCCESS);
844 }
845 PTRACE_CONT => {
846 ptrace_cont(locked, &tracee, &data, false)?;
847 return Ok(starnix_syscalls::SUCCESS);
848 }
849 PTRACE_SYSCALL => {
850 tracee.trace_syscalls.store(true, std::sync::atomic::Ordering::Relaxed);
851 ptrace_cont(locked, &tracee, &data, false)?;
852 return Ok(starnix_syscalls::SUCCESS);
853 }
854 PTRACE_DETACH => {
855 ptrace_detach(locked, &mut pids, current_task.thread_group(), tracee.as_ref(), &data)?;
856 return Ok(starnix_syscalls::SUCCESS);
857 }
858 _ => {}
859 }
860
861 let mut state = tracee.write();
863 if !state.can_accept_ptrace_commands() {
864 return error!(ESRCH);
865 }
866
867 match request {
868 PTRACE_PEEKDATA | PTRACE_PEEKTEXT => {
869 let Some(captured) = &mut state.captured_thread_state else {
870 return error!(ESRCH);
871 };
872
873 let src = LongPtr::new(captured.as_ref(), addr);
876 let val = tracee.read_multi_arch_object(src)?;
877
878 let dst = LongPtr::new(&src, data);
879 current_task.write_multi_arch_object(dst, val)?;
880 Ok(starnix_syscalls::SUCCESS)
881 }
882 PTRACE_POKEDATA | PTRACE_POKETEXT => {
883 let Some(captured) = &mut state.captured_thread_state else {
884 return error!(ESRCH);
885 };
886
887 let bytes = if captured.is_arch32() {
888 u32::try_from(data.ptr()).map_err(|_| errno!(EINVAL))?.to_ne_bytes().to_vec()
889 } else {
890 data.ptr().to_ne_bytes().to_vec()
891 };
892
893 tracee.mm()?.force_write_memory(addr, &bytes)?;
894
895 Ok(starnix_syscalls::SUCCESS)
896 }
897 PTRACE_PEEKUSR => {
898 let Some(captured) = &mut state.captured_thread_state else {
899 return error!(ESRCH);
900 };
901
902 let dst = LongPtr::new(captured.as_ref(), data);
903 let val = ptrace_peekuser(&mut captured.thread_state, addr.ptr() as usize)?;
904 current_task.write_multi_arch_object(dst, val as u64)?;
905 return Ok(starnix_syscalls::SUCCESS);
906 }
907 PTRACE_POKEUSR => {
908 ptrace_pokeuser(&mut *state, data.ptr() as usize, addr.ptr() as usize)?;
909 return Ok(starnix_syscalls::SUCCESS);
910 }
911 PTRACE_GETREGSET => {
912 if let Some(ref mut captured) = state.captured_thread_state {
913 let uiv = IOVecPtr::new(current_task, data);
914 let mut iv = current_task.read_multi_arch_object(uiv)?;
915 let base = iv.iov_base.addr;
916 let mut len = iv.iov_len as usize;
917 ptrace_getregset(
918 current_task,
919 &mut captured.thread_state,
920 ElfNoteType::try_from(addr.ptr() as usize)?,
921 base,
922 &mut len,
923 )?;
924 iv.iov_len = len as u64;
925 current_task.write_multi_arch_object(uiv, iv)?;
926 return Ok(starnix_syscalls::SUCCESS);
927 }
928 error!(ESRCH)
929 }
930 PTRACE_SETREGSET => {
931 if let Some(ref mut captured) = state.captured_thread_state {
932 captured.dirty = true;
933 let uiv = IOVecPtr::new(current_task, data);
934 let iv = current_task.read_multi_arch_object(uiv)?;
935 let base = iv.iov_base.addr;
936 let len = iv.iov_len as usize;
937 ptrace_setregset(
938 current_task,
939 &mut captured.thread_state,
940 ElfNoteType::try_from(addr.ptr() as usize)?,
941 base,
942 len,
943 )?;
944 return Ok(starnix_syscalls::SUCCESS);
945 }
946 error!(ESRCH)
947 }
948 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
949 PTRACE_GETREGS => {
950 if let Some(captured) = &mut state.captured_thread_state {
951 let mut len = usize::MAX;
952 ptrace_getregset(
953 current_task,
954 &mut captured.thread_state,
955 ElfNoteType::PrStatus,
956 data.ptr() as u64,
957 &mut len,
958 )?;
959 return Ok(starnix_syscalls::SUCCESS);
960 }
961 error!(ESRCH)
962 }
963 PTRACE_SETSIGMASK => {
964 if addr.ptr() != std::mem::size_of::<SigSet>() {
967 return error!(EINVAL);
968 }
969 let src: UserRef<SigSet> = UserRef::from(data);
971 let val = current_task.read_object(src)?;
972 state.set_signal_mask(val);
973
974 Ok(starnix_syscalls::SUCCESS)
975 }
976 PTRACE_GETSIGMASK => {
977 if addr.ptr() != std::mem::size_of::<SigSet>() {
980 return error!(EINVAL);
981 }
982 let dst: UserRef<SigSet> = UserRef::from(data);
984 let val = state.signal_mask();
985 current_task.write_object(dst, &val)?;
986 Ok(starnix_syscalls::SUCCESS)
987 }
988 PTRACE_GETSIGINFO => {
989 if let Some(ptrace) = &state.ptrace {
990 if let Some(signal) = ptrace.last_signal.as_ref() {
991 let dst = MultiArchUserRef::<uapi::siginfo_t, uapi::arch32::siginfo_t>::new(
992 current_task,
993 data,
994 );
995 signal.write(current_task, dst)?;
996 } else {
997 return error!(EINVAL);
998 }
999 }
1000 Ok(starnix_syscalls::SUCCESS)
1001 }
1002 PTRACE_SETSIGINFO => {
1003 let siginfo = UncheckedSignalInfo::read_from_siginfo(current_task, data)?.try_into()?;
1004 if let Some(ptrace) = &mut state.ptrace {
1005 ptrace.last_signal = Some(siginfo);
1006 }
1007 Ok(starnix_syscalls::SUCCESS)
1008 }
1009 PTRACE_GET_SYSCALL_INFO => {
1010 if let Some(ptrace) = &state.ptrace {
1011 let (size, info) = ptrace.get_target_syscall(&tracee, &state)?;
1012 let dst: UserRef<ptrace_syscall_info> = UserRef::from(data);
1013 let len = std::cmp::min(std::mem::size_of::<ptrace_syscall_info>(), addr.ptr());
1014 let src = unsafe {
1017 std::slice::from_raw_parts(
1018 &info as *const ptrace_syscall_info as *const u8,
1019 len as usize,
1020 )
1021 };
1022 current_task.write_memory(dst.addr(), src)?;
1023 Ok(size.into())
1024 } else {
1025 error!(ESRCH)
1026 }
1027 }
1028 PTRACE_SETOPTIONS => {
1029 let mask = data.ptr() as u32;
1030 if mask != 0
1032 && (mask
1033 & !(PTRACE_O_TRACESYSGOOD
1034 | PTRACE_O_TRACECLONE
1035 | PTRACE_O_TRACEFORK
1036 | PTRACE_O_TRACEVFORK
1037 | PTRACE_O_TRACEVFORKDONE
1038 | PTRACE_O_TRACEEXEC
1039 | PTRACE_O_TRACEEXIT
1040 | PTRACE_O_EXITKILL)
1041 != 0)
1042 {
1043 track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace(PTRACE_SETOPTIONS)", mask);
1044 return error!(ENOSYS);
1045 }
1046 if let Some(ptrace) = &mut state.ptrace {
1047 ptrace.set_options_from_bits(mask)?;
1048 }
1049 Ok(starnix_syscalls::SUCCESS)
1050 }
1051 PTRACE_GETEVENTMSG => {
1052 if let Some(ptrace) = &state.ptrace {
1053 if let Some(event_data) = &ptrace.event_data {
1054 let dst = LongPtr::new(current_task, data);
1055 current_task.write_multi_arch_object(dst, event_data.msg)?;
1056 return Ok(starnix_syscalls::SUCCESS);
1057 }
1058 }
1059 error!(EIO)
1060 }
1061 _ => {
1062 track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace", request);
1063 error!(ENOSYS)
1064 }
1065 }
1066}
1067
1068fn do_attach(
1070 thread_group: &ThreadGroup,
1071 task: WeakRef<Task>,
1072 attach_type: PtraceAttachType,
1073 options: PtraceOptions,
1074) -> Result<(), Errno> {
1075 if let Some(task_ref) = task.upgrade() {
1076 thread_group.ptracees.lock().insert(task_ref.get_tid(), (&task_ref).into());
1077 {
1078 let process_state = &mut task_ref.thread_group().write();
1079 let mut state = task_ref.write();
1080 state.set_ptrace(Some(PtraceState::new(
1081 thread_group.leader,
1082 thread_group.weak_self.clone(),
1083 attach_type,
1084 options,
1085 )))?;
1086 if process_state.is_waitable()
1089 && process_state.base.load_stopped() == StopState::GroupStopped
1090 && task_ref.load_stopped() == StopState::GroupStopped
1091 {
1092 if let Some(ptrace) = &mut state.ptrace {
1093 ptrace.last_signal_waitable = true;
1094 }
1095 }
1096 }
1097 return Ok(());
1098 }
1099 unreachable!("Tracee thread not found");
1102}
1103
1104pub fn ptrace_attach_from_state<L>(
1108 locked: &mut Locked<L>,
1109 tracee_task: &OwnedRef<Task>,
1110 ptrace_state: PtraceCoreState,
1111) -> Result<(), Errno>
1112where
1113 L: LockBefore<ThreadGroupLimits>,
1114{
1115 {
1116 let weak_tg =
1117 tracee_task.thread_group().kernel.pids.read().get_thread_group(ptrace_state.pid);
1118 let tracer_tg = weak_tg.ok_or_else(|| errno!(ESRCH))?;
1119 do_attach(
1120 &tracer_tg,
1121 WeakRef::from(tracee_task),
1122 ptrace_state.attach_type,
1123 ptrace_state.options,
1124 )?;
1125 }
1126 let mut state = tracee_task.write();
1127 if let Some(ptrace) = &mut state.ptrace {
1128 ptrace.core_state.tracer_waiters = Arc::clone(&ptrace_state.tracer_waiters);
1129 }
1130
1131 let signal = if ptrace_state.attach_type == PtraceAttachType::Seize {
1133 if let Some(ptrace) = &mut state.ptrace {
1134 ptrace.set_last_event(Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0)));
1135 }
1136 SignalInfo::kernel(SIGTRAP)
1137 } else {
1138 SignalInfo::kernel(SIGSTOP)
1139 };
1140 send_signal_first(locked, tracee_task, state, signal);
1141
1142 Ok(())
1143}
1144
1145pub fn ptrace_traceme(current_task: &mut CurrentTask) -> Result<SyscallResult, Errno> {
1146 let parent = current_task.thread_group().read().parent.clone();
1147 if let Some(parent) = parent {
1148 let parent = parent.upgrade();
1149 {
1151 let pids = current_task.kernel().pids.read();
1152 let parent_task = pids.get_task(parent.leader);
1153 security::ptrace_traceme(
1154 current_task,
1155 parent_task.upgrade().ok_or_else(|| errno!(EINVAL))?.as_ref(),
1156 )?;
1157 }
1158
1159 let task_ref = OwnedRef::temp(¤t_task.task);
1160 do_attach(&parent, (&task_ref).into(), PtraceAttachType::Attach, PtraceOptions::empty())?;
1161 Ok(starnix_syscalls::SUCCESS)
1162 } else {
1163 error!(EPERM)
1164 }
1165}
1166
1167pub fn ptrace_attach<L>(
1168 locked: &mut Locked<L>,
1169 current_task: &mut CurrentTask,
1170 pid: pid_t,
1171 attach_type: PtraceAttachType,
1172 data: UserAddress,
1173) -> Result<SyscallResult, Errno>
1174where
1175 L: LockBefore<MmDumpable>,
1176{
1177 let weak_task = current_task.kernel().pids.read().get_task(pid);
1178 let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
1179
1180 if tracee.thread_group == current_task.thread_group {
1181 return error!(EPERM);
1182 }
1183
1184 current_task.check_ptrace_access_mode(locked, PTRACE_MODE_ATTACH_REALCREDS, &tracee)?;
1185 do_attach(current_task.thread_group(), weak_task.clone(), attach_type, PtraceOptions::empty())?;
1186 if attach_type == PtraceAttachType::Attach {
1187 send_standard_signal(
1188 locked.cast_locked::<MmDumpable>(),
1189 &tracee,
1190 SignalInfo::kernel(SIGSTOP),
1191 );
1192 } else if attach_type == PtraceAttachType::Seize {
1193 if let Some(task_ref) = weak_task.upgrade() {
1195 let mut state = task_ref.write();
1196 if let Some(ptrace) = &mut state.ptrace {
1197 ptrace.set_options_from_bits(data.ptr() as u32)?;
1198 }
1199 }
1200 }
1201 Ok(starnix_syscalls::SUCCESS)
1202}
1203
1204pub fn ptrace_peekuser(
1208 thread_state: &mut ThreadState<HeapRegs>,
1209 offset: usize,
1210) -> Result<usize, Errno> {
1211 #[cfg(any(target_arch = "x86_64"))]
1212 if offset >= std::mem::size_of::<user>() {
1213 return error!(EIO);
1214 }
1215 if offset < UserRegsStructPtr::size_of_object_for(thread_state) {
1216 let result = thread_state.get_user_register(offset)?;
1217 return Ok(result);
1218 }
1219 error!(EIO)
1220}
1221
1222pub fn ptrace_pokeuser(
1223 state: &mut TaskMutableState,
1224 value: usize,
1225 offset: usize,
1226) -> Result<(), Errno> {
1227 if let Some(ref mut thread_state) = state.captured_thread_state {
1228 thread_state.dirty = true;
1229
1230 #[cfg(any(target_arch = "x86_64"))]
1231 if offset >= std::mem::size_of::<user>() {
1232 return error!(EIO);
1233 }
1234 if offset < UserRegsStructPtr::size_of_object_for(thread_state.as_ref()) {
1235 return thread_state.thread_state.set_user_register(offset, value);
1236 }
1237 }
1238 error!(EIO)
1239}
1240
1241pub fn ptrace_getregset(
1242 current_task: &CurrentTask,
1243 thread_state: &mut ThreadState<HeapRegs>,
1244 regset_type: ElfNoteType,
1245 base: u64,
1246 len: &mut usize,
1247) -> Result<(), Errno> {
1248 match regset_type {
1249 ElfNoteType::PrStatus => {
1250 let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1251 if *len < user_regs_struct_len {
1252 return error!(EINVAL);
1253 }
1254 *len = user_regs_struct_len;
1255 let mut i: usize = 0;
1256 let mut reg_ptr = LongPtr::new(thread_state, base);
1257 while i < *len {
1258 let mut val = None;
1259 thread_state
1260 .registers
1261 .apply_user_register(i, &mut |register| val = Some(*register as usize))?;
1262 if let Some(val) = val {
1263 current_task.write_multi_arch_object(reg_ptr, val as u64)?;
1264 }
1265 i += reg_ptr.size_of_object();
1266 reg_ptr = reg_ptr.next()?;
1267 }
1268 Ok(())
1269 }
1270 _ => {
1271 error!(EINVAL)
1272 }
1273 }
1274}
1275
1276pub fn ptrace_setregset(
1277 current_task: &CurrentTask,
1278 thread_state: &mut ThreadState<HeapRegs>,
1279 regset_type: ElfNoteType,
1280 base: u64,
1281 mut len: usize,
1282) -> Result<(), Errno> {
1283 match regset_type {
1284 ElfNoteType::PrStatus => {
1285 let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1286 if len < user_regs_struct_len {
1287 return error!(EINVAL);
1288 }
1289 len = user_regs_struct_len;
1290 let mut i: usize = 0;
1291 let mut reg_ptr = LongPtr::new(thread_state, base);
1292 while i < len {
1293 let val = current_task.read_multi_arch_object(reg_ptr)?;
1294 thread_state.registers.apply_user_register(i, &mut |register| *register = val)?;
1295 i += reg_ptr.size_of_object();
1296 reg_ptr = reg_ptr.next()?;
1297 }
1298 Ok(())
1299 }
1300 _ => {
1301 error!(EINVAL)
1302 }
1303 }
1304}
1305
1306#[inline(never)]
1307pub fn ptrace_syscall_enter(locked: &mut Locked<Unlocked>, current_task: &mut CurrentTask) {
1308 let block = {
1309 let mut state = current_task.write();
1310 if state.ptrace.is_some() {
1311 current_task.trace_syscalls.store(false, Ordering::Relaxed);
1312 let mut sig = SignalInfo::with_detail(
1313 SIGTRAP,
1314 (linux_uapi::SIGTRAP | 0x80) as i32,
1315 SignalDetail::None,
1316 );
1317 if state
1318 .ptrace
1319 .as_ref()
1320 .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1321 {
1322 sig.signal.set_ptrace_syscall_bit();
1323 }
1324 state.set_stopped(StopState::SyscallEnterStopping, Some(sig), None, None);
1325 true
1326 } else {
1327 false
1328 }
1329 };
1330 if block {
1331 current_task.block_while_stopped(locked);
1332 }
1333}
1334
1335#[inline(never)]
1336pub fn ptrace_syscall_exit(
1337 locked: &mut Locked<Unlocked>,
1338 current_task: &mut CurrentTask,
1339 is_error: bool,
1340) {
1341 let block = {
1342 let mut state = current_task.write();
1343 current_task.trace_syscalls.store(false, Ordering::Relaxed);
1344 if state.ptrace.is_some() {
1345 let mut sig = SignalInfo::with_detail(
1346 SIGTRAP,
1347 (linux_uapi::SIGTRAP | 0x80) as i32,
1348 SignalDetail::None,
1349 );
1350 if state
1351 .ptrace
1352 .as_ref()
1353 .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1354 {
1355 sig.signal.set_ptrace_syscall_bit();
1356 }
1357
1358 state.set_stopped(StopState::SyscallExitStopping, Some(sig), None, None);
1359 if let Some(ptrace) = &mut state.ptrace {
1360 ptrace.last_syscall_was_error = is_error;
1361 }
1362 true
1363 } else {
1364 false
1365 }
1366 };
1367 if block {
1368 current_task.block_while_stopped(locked);
1369 }
1370}
1371
1372#[cfg(test)]
1373mod tests {
1374 use super::*;
1375 use crate::task::syscalls::sys_prctl;
1376 use crate::testing::{create_task, spawn_kernel_and_run};
1377 use starnix_uapi::PR_SET_PTRACER;
1378 use starnix_uapi::auth::CAP_SYS_PTRACE;
1379
1380 #[::fuchsia::test]
1381 async fn test_set_ptracer() {
1382 spawn_kernel_and_run(async |locked, current_task| {
1383 let kernel = current_task.kernel().clone();
1384 let mut tracee = create_task(locked, &kernel, "tracee");
1385 let mut tracer = create_task(locked, &kernel, "tracer");
1386
1387 let mut creds = tracer.real_creds().clone();
1388 creds.cap_effective &= !CAP_SYS_PTRACE;
1389 tracer.set_creds(creds);
1390
1391 kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1392 assert_eq!(
1393 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1394 error!(EINVAL)
1395 );
1396
1397 assert_eq!(
1398 ptrace_attach(
1399 locked,
1400 &mut tracer,
1401 tracee.as_ref().task.tid,
1402 PtraceAttachType::Attach,
1403 UserAddress::NULL,
1404 ),
1405 error!(EPERM)
1406 );
1407
1408 assert!(
1409 sys_prctl(
1410 locked,
1411 &mut tracee,
1412 PR_SET_PTRACER,
1413 tracer.thread_group().leader as u64,
1414 0,
1415 0,
1416 0
1417 )
1418 .is_ok()
1419 );
1420
1421 let mut not_tracer = create_task(locked, &kernel, "not-tracer");
1422 not_tracer.set_creds(tracer.real_creds().clone());
1423 assert_eq!(
1424 ptrace_attach(
1425 locked,
1426 &mut not_tracer,
1427 tracee.as_ref().task.tid,
1428 PtraceAttachType::Attach,
1429 UserAddress::NULL,
1430 ),
1431 error!(EPERM)
1432 );
1433
1434 assert!(
1435 ptrace_attach(
1436 locked,
1437 &mut tracer,
1438 tracee.as_ref().task.tid,
1439 PtraceAttachType::Attach,
1440 UserAddress::NULL,
1441 )
1442 .is_ok()
1443 );
1444 })
1445 .await;
1446 }
1447
1448 #[::fuchsia::test]
1449 async fn test_set_ptracer_any() {
1450 spawn_kernel_and_run(async |locked, current_task| {
1451 let kernel = current_task.kernel().clone();
1452 let mut tracee = create_task(locked, &kernel, "tracee");
1453 let mut tracer = create_task(locked, &kernel, "tracer");
1454
1455 let mut creds = tracer.real_creds().clone();
1456 creds.cap_effective &= !CAP_SYS_PTRACE;
1457 tracer.set_creds(creds);
1458
1459 kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1460 assert_eq!(
1461 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1462 error!(EINVAL)
1463 );
1464
1465 assert_eq!(
1466 ptrace_attach(
1467 locked,
1468 &mut tracer,
1469 tracee.as_ref().task.tid,
1470 PtraceAttachType::Attach,
1471 UserAddress::NULL,
1472 ),
1473 error!(EPERM)
1474 );
1475
1476 assert!(
1477 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, PR_SET_PTRACER_ANY as u64, 0, 0, 0)
1478 .is_ok()
1479 );
1480
1481 assert!(
1482 ptrace_attach(
1483 locked,
1484 &mut tracer,
1485 tracee.as_ref().task.tid,
1486 PtraceAttachType::Attach,
1487 UserAddress::NULL,
1488 )
1489 .is_ok()
1490 );
1491 })
1492 .await;
1493 }
1494}