1use crate::arch::execution::new_syscall_from_state;
6use crate::mm::{IOVecPtr, MemoryAccessor, MemoryAccessorExt};
7use crate::ptrace::StopState;
8use crate::security;
9use crate::signals::syscalls::WaitingOptions;
10use crate::signals::{SignalInfo, UncheckedSignalInfo, send_signal_first, send_standard_signal};
11use crate::task::{
12 CurrentTask, PidTable, ProcessSelector, Task, TaskMutableState, ThreadGroup, ThreadState,
13 WaitQueue, ZombieProcess,
14};
15use bitflags::bitflags;
16use starnix_logging::track_stub;
17use starnix_registers::HeapRegs;
18use starnix_sync::{LockBefore, Locked, MmDumpable, ThreadGroupLimits, Unlocked};
19use starnix_syscalls::SyscallResult;
20use starnix_syscalls::decls::SyscallDecl;
21use starnix_types::ownership::{OwnedRef, Releasable, ReleaseGuard, WeakRef};
22use starnix_uapi::auth::PTRACE_MODE_ATTACH_REALCREDS;
23use starnix_uapi::elf::ElfNoteType;
24use starnix_uapi::errors::Errno;
25use starnix_uapi::signals::{SIGKILL, SIGSTOP, SIGTRAP, SigSet, Signal, UncheckedSignal};
26#[allow(unused_imports)]
27use starnix_uapi::user_address::ArchSpecific;
28use starnix_uapi::user_address::{LongPtr, MultiArchUserRef, UserAddress, UserRef};
29use starnix_uapi::{
30 PTRACE_CONT, PTRACE_DETACH, PTRACE_EVENT_CLONE, PTRACE_EVENT_EXEC, PTRACE_EVENT_EXIT,
31 PTRACE_EVENT_FORK, PTRACE_EVENT_SECCOMP, PTRACE_EVENT_STOP, PTRACE_EVENT_VFORK,
32 PTRACE_EVENT_VFORK_DONE, PTRACE_GET_SYSCALL_INFO, PTRACE_GETEVENTMSG, PTRACE_GETREGSET,
33 PTRACE_GETSIGINFO, PTRACE_GETSIGMASK, PTRACE_INTERRUPT, PTRACE_KILL, PTRACE_LISTEN,
34 PTRACE_O_EXITKILL, PTRACE_O_TRACECLONE, PTRACE_O_TRACEEXEC, PTRACE_O_TRACEEXIT,
35 PTRACE_O_TRACEFORK, PTRACE_O_TRACESYSGOOD, PTRACE_O_TRACEVFORK, PTRACE_O_TRACEVFORKDONE,
36 PTRACE_PEEKDATA, PTRACE_PEEKTEXT, PTRACE_PEEKUSR, PTRACE_POKEDATA, PTRACE_POKETEXT,
37 PTRACE_POKEUSR, PTRACE_SETOPTIONS, PTRACE_SETREGSET, PTRACE_SETSIGINFO, PTRACE_SETSIGMASK,
38 PTRACE_SYSCALL, PTRACE_SYSCALL_INFO_ENTRY, PTRACE_SYSCALL_INFO_EXIT, PTRACE_SYSCALL_INFO_NONE,
39 clone_args, errno, error, pid_t, ptrace_syscall_info, tid_t, uapi,
40};
41
42use std::collections::BTreeMap;
43use std::sync::atomic::Ordering;
44use std::sync::{Arc, Weak};
45
46#[cfg(target_arch = "x86_64")]
47use starnix_uapi::{PTRACE_GETREGS, user};
48
49#[cfg(all(target_arch = "aarch64"))]
50use starnix_uapi::arch32::PTRACE_GETREGS;
51
52type UserRegsStructPtr =
53 MultiArchUserRef<starnix_uapi::user_regs_struct, starnix_uapi::arch32::user_regs_struct>;
54
55uapi::check_arch_independent_layout! {
56 ptrace_syscall_info {
57 op,
58 arch,
59 instruction_pointer,
60 stack_pointer,
61 __bindgen_anon_1,
62 }
63
64 ptrace_syscall_info__bindgen_ty_1 {
65 entry,
66 exit,
67 seccomp,
68 }
69
70 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
71 nr,
72 args,
73 }
74
75 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
76 rval,
77 is_error,
78 }
79
80 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_3 {
81 nr,
82 args,
83 ret_data,
84 }
85}
86
87#[derive(Clone, Default, PartialEq)]
91pub enum PtraceStatus {
92 #[default]
94 Default,
95 Continuing,
97 Listening,
104}
105
106impl PtraceStatus {
107 pub fn is_continuing(&self) -> bool {
108 *self == PtraceStatus::Continuing
109 }
110}
111
112#[derive(Copy, Clone, PartialEq)]
114pub enum PtraceAttachType {
115 Attach,
117 Seize,
119}
120
121bitflags! {
122 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
123 #[repr(transparent)]
124 pub struct PtraceOptions: u32 {
125 const EXITKILL = starnix_uapi::PTRACE_O_EXITKILL;
126 const TRACECLONE = starnix_uapi::PTRACE_O_TRACECLONE;
127 const TRACEEXEC = starnix_uapi::PTRACE_O_TRACEEXEC;
128 const TRACEEXIT = starnix_uapi::PTRACE_O_TRACEEXIT;
129 const TRACEFORK = starnix_uapi::PTRACE_O_TRACEFORK;
130 const TRACESYSGOOD = starnix_uapi::PTRACE_O_TRACESYSGOOD;
131 const TRACEVFORK = starnix_uapi::PTRACE_O_TRACEVFORK;
132 const TRACEVFORKDONE = starnix_uapi::PTRACE_O_TRACEVFORKDONE;
133 const TRACESECCOMP = starnix_uapi::PTRACE_O_TRACESECCOMP;
134 const SUSPEND_SECCOMP = starnix_uapi::PTRACE_O_SUSPEND_SECCOMP;
135 }
136}
137
138#[repr(u32)]
139#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
140pub enum PtraceEvent {
141 #[default]
142 None = 0,
143 Stop = PTRACE_EVENT_STOP,
144 Clone = PTRACE_EVENT_CLONE,
145 Fork = PTRACE_EVENT_FORK,
146 Vfork = PTRACE_EVENT_VFORK,
147 VforkDone = PTRACE_EVENT_VFORK_DONE,
148 Exec = PTRACE_EVENT_EXEC,
149 Exit = PTRACE_EVENT_EXIT,
150 Seccomp = PTRACE_EVENT_SECCOMP,
151}
152
153impl PtraceEvent {
154 pub fn from_option(option: &PtraceOptions) -> Self {
155 match *option {
156 PtraceOptions::TRACECLONE => PtraceEvent::Clone,
157 PtraceOptions::TRACEFORK => PtraceEvent::Fork,
158 PtraceOptions::TRACEVFORK => PtraceEvent::Vfork,
159 PtraceOptions::TRACEVFORKDONE => PtraceEvent::VforkDone,
160 PtraceOptions::TRACEEXEC => PtraceEvent::Exec,
161 PtraceOptions::TRACEEXIT => PtraceEvent::Exit,
162 PtraceOptions::TRACESECCOMP => PtraceEvent::Seccomp,
163 _ => unreachable!("Bad ptrace event specified"),
164 }
165 }
166}
167
168pub struct PtraceEventData {
170 pub event: PtraceEvent,
172
173 pub msg: u64,
175}
176
177impl PtraceEventData {
178 pub fn new(option: PtraceOptions, msg: u64) -> Self {
179 Self { event: PtraceEvent::from_option(&option), msg }
180 }
181 pub fn new_from_event(event: PtraceEvent, msg: u64) -> Self {
182 Self { event, msg }
183 }
184}
185
186#[derive(Clone)]
189pub struct PtraceCoreState {
190 pub pid: pid_t,
192
193 pub thread_group: Weak<ThreadGroup>,
195
196 pub attach_type: PtraceAttachType,
199
200 pub options: PtraceOptions,
202
203 pub tracer_waiters: Arc<WaitQueue>,
206}
207
208impl PtraceCoreState {
209 pub fn has_option(&self, option: PtraceOptions) -> bool {
210 self.options.contains(option)
211 }
212}
213
214pub struct PtraceState {
216 pub core_state: PtraceCoreState,
218
219 pub tracee_waiters: WaitQueue,
222
223 pub last_signal: Option<SignalInfo>,
226
227 pub last_signal_waitable: bool,
230
231 pub event_data: Option<PtraceEventData>,
233
234 pub stop_status: PtraceStatus,
237
238 pub last_syscall_was_error: bool,
240}
241
242impl PtraceState {
243 pub fn new(
244 pid: pid_t,
245 thread_group: Weak<ThreadGroup>,
246 attach_type: PtraceAttachType,
247 options: PtraceOptions,
248 ) -> Box<Self> {
249 Box::new(PtraceState {
250 core_state: PtraceCoreState {
251 pid,
252 thread_group,
253 attach_type,
254 options,
255 tracer_waiters: Arc::new(WaitQueue::default()),
256 },
257 tracee_waiters: WaitQueue::default(),
258 last_signal: None,
259 last_signal_waitable: false,
260 event_data: None,
261 stop_status: PtraceStatus::default(),
262 last_syscall_was_error: false,
263 })
264 }
265
266 pub fn get_pid(&self) -> pid_t {
267 self.core_state.pid
268 }
269
270 pub fn set_pid(&mut self, pid: pid_t) {
271 self.core_state.pid = pid;
272 }
273
274 pub fn is_seized(&self) -> bool {
275 self.core_state.attach_type == PtraceAttachType::Seize
276 }
277
278 pub fn get_attach_type(&self) -> PtraceAttachType {
279 self.core_state.attach_type
280 }
281
282 pub fn is_waitable(&self, stop: StopState, options: &WaitingOptions) -> bool {
283 if self.stop_status == PtraceStatus::Listening {
284 return self.last_signal_waitable;
286 }
287 if !options.wait_for_continued && !stop.is_stopping_or_stopped() {
288 return false;
290 }
291 self.last_signal_waitable && !stop.is_in_progress()
292 }
293
294 pub fn set_last_signal(&mut self, mut signal: Option<SignalInfo>) {
295 if let Some(ref mut siginfo) = signal {
296 if siginfo.signal == SIGKILL {
299 return;
300 }
301 self.last_signal_waitable = true;
302 self.last_signal = signal;
303 }
304 }
305
306 pub fn set_last_event(&mut self, event: Option<PtraceEventData>) {
307 if event.is_some() {
308 self.event_data = event;
309 }
310 }
311
312 pub fn get_last_signal_ref(&self) -> Option<&SignalInfo> {
313 self.last_signal.as_ref()
314 }
315
316 pub fn get_last_signal(&mut self, keep_signal_waitable: bool) -> Option<SignalInfo> {
318 self.last_signal_waitable = keep_signal_waitable;
319 self.last_signal.clone()
320 }
321
322 pub fn has_option(&self, option: PtraceOptions) -> bool {
323 self.core_state.has_option(option)
324 }
325
326 pub fn set_options_from_bits(&mut self, option: u32) -> Result<(), Errno> {
327 if let Some(options) = PtraceOptions::from_bits(option) {
328 self.core_state.options = options;
329 Ok(())
330 } else {
331 error!(EINVAL)
332 }
333 }
334
335 pub fn get_options(&self) -> PtraceOptions {
336 self.core_state.options
337 }
338
339 pub fn get_core_state(&self) -> PtraceCoreState {
341 self.core_state.clone()
342 }
343
344 pub fn tracer_waiters(&self) -> &Arc<WaitQueue> {
345 &self.core_state.tracer_waiters
346 }
347
348 pub fn get_target_syscall(
355 &self,
356 target: &Task,
357 state: &TaskMutableState,
358 ) -> Result<(i32, ptrace_syscall_info), Errno> {
359 #[cfg(target_arch = "x86_64")]
360 let arch = starnix_uapi::AUDIT_ARCH_X86_64;
361 #[cfg(target_arch = "aarch64")]
362 let arch = starnix_uapi::AUDIT_ARCH_AARCH64;
363 #[cfg(target_arch = "riscv64")]
364 let arch = starnix_uapi::AUDIT_ARCH_RISCV64;
365
366 let mut info = ptrace_syscall_info { arch, ..Default::default() };
367 let mut info_len = memoffset::offset_of!(ptrace_syscall_info, __bindgen_anon_1);
368
369 match &state.captured_thread_state {
370 Some(captured) => {
371 let registers = captured.thread_state.registers.clone();
372 info.instruction_pointer = registers.instruction_pointer_register();
373 info.stack_pointer = registers.stack_pointer_register();
374 #[cfg(target_arch = "aarch64")]
375 if captured.thread_state.arch_width.is_arch32() {
376 info.arch = starnix_uapi::AUDIT_ARCH_ARM;
379 }
380 match target.load_stopped() {
381 StopState::SyscallEnterStopped => {
382 let syscall_decl = SyscallDecl::from_number(
383 registers.syscall_register(),
384 captured.thread_state.arch_width,
385 );
386 let syscall = new_syscall_from_state(syscall_decl, &captured.thread_state);
387 info.op = PTRACE_SYSCALL_INFO_ENTRY as u8;
388 let entry = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
389 nr: syscall.decl.number,
390 args: [
391 syscall.arg0.raw(),
392 syscall.arg1.raw(),
393 syscall.arg2.raw(),
394 syscall.arg3.raw(),
395 syscall.arg4.raw(),
396 syscall.arg5.raw(),
397 ],
398 };
399 info_len += memoffset::offset_of!(
400 linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1,
401 args
402 ) + std::mem::size_of_val(&entry.args);
403 info.__bindgen_anon_1.entry = entry;
404 }
405 StopState::SyscallExitStopped => {
406 info.op = PTRACE_SYSCALL_INFO_EXIT as u8;
407 let exit = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
408 rval: registers.return_register() as i64,
409 is_error: state
410 .ptrace
411 .as_ref()
412 .map_or(0, |ptrace| ptrace.last_syscall_was_error as u8),
413 ..Default::default()
414 };
415 info_len += memoffset::offset_of!(
416 linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2,
417 is_error
418 ) + std::mem::size_of_val(&exit.is_error);
419 info.__bindgen_anon_1.exit = exit;
420 }
421 _ => {
422 info.op = PTRACE_SYSCALL_INFO_NONE as u8;
423 }
424 };
425 }
426 _ => (),
427 }
428 Ok((info_len as i32, info))
429 }
430
431 pub fn get_core_state_for_clone(
436 &self,
437 clone_args: &clone_args,
438 ) -> (PtraceOptions, Option<PtraceCoreState>) {
439 let trace_type = if clone_args.flags & (starnix_uapi::CLONE_UNTRACED as u64) != 0 {
445 PtraceOptions::empty()
446 } else {
447 if clone_args.flags & (starnix_uapi::CLONE_VFORK as u64) != 0 {
448 PtraceOptions::TRACEVFORK
449 } else if clone_args.exit_signal != (starnix_uapi::SIGCHLD as u64) {
450 PtraceOptions::TRACECLONE
451 } else {
452 PtraceOptions::TRACEFORK
453 }
454 };
455
456 if !self.has_option(trace_type)
457 && (clone_args.flags & (starnix_uapi::CLONE_PTRACE as u64) == 0)
458 {
459 return (PtraceOptions::empty(), None);
460 }
461
462 (trace_type, Some(self.get_core_state()))
463 }
464}
465
466struct TracedZombie {
468 artificial_zombie: ZombieProcess,
470
471 delegate: Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>,
474}
475
476impl Releasable for TracedZombie {
477 type Context<'a> = &'a mut PidTable;
478
479 fn release<'a>(self, pids: &'a mut PidTable) {
480 self.artificial_zombie.release(pids);
481 if let Some((_, z)) = self.delegate {
482 z.release(pids);
483 }
484 }
485}
486
487impl TracedZombie {
488 fn new(artificial_zombie: ZombieProcess) -> ReleaseGuard<Self> {
489 ReleaseGuard::from(Self { artificial_zombie, delegate: None })
490 }
491
492 fn new_with_delegate(
493 artificial_zombie: ZombieProcess,
494 delegate: (Weak<ThreadGroup>, OwnedRef<ZombieProcess>),
495 ) -> ReleaseGuard<Self> {
496 ReleaseGuard::from(Self { artificial_zombie, delegate: Some(delegate) })
497 }
498
499 fn set_parent(
500 &mut self,
501 new_zombie: Option<OwnedRef<ZombieProcess>>,
502 new_parent: &ThreadGroup,
503 ) {
504 if let Some(new_zombie) = new_zombie {
505 self.delegate = Some((new_parent.weak_self.clone(), new_zombie));
506 } else {
507 self.delegate = self.delegate.take().map(|(_, z)| (new_parent.weak_self.clone(), z));
508 }
509 }
510}
511
512#[derive(Default)]
516pub struct ZombiePtracees {
517 zombies: BTreeMap<tid_t, ReleaseGuard<TracedZombie>>,
520}
521
522impl ZombiePtracees {
523 pub fn new() -> Self {
524 Self::default()
525 }
526
527 pub fn add(&mut self, pids: &mut PidTable, tid: tid_t, zombie: ZombieProcess) {
530 if let std::collections::btree_map::Entry::Vacant(entry) = self.zombies.entry(tid) {
531 entry.insert(TracedZombie::new(zombie));
532 } else {
533 zombie.release(pids);
534 }
535 }
536
537 pub fn remove(&mut self, pids: &mut PidTable, tid: tid_t) {
539 self.zombies.remove(&tid).release(pids);
540 }
541
542 pub fn is_empty(&self) -> bool {
543 self.zombies.is_empty()
544 }
545
546 pub fn set_parent_of(
549 &mut self,
550 tracee: tid_t,
551 new_zombie: Option<OwnedRef<ZombieProcess>>,
552 new_parent: &ThreadGroup,
553 ) {
554 match self.zombies.entry(tracee) {
555 std::collections::btree_map::Entry::Vacant(entry) => {
556 if let Some(new_zombie) = new_zombie {
557 entry.insert(TracedZombie::new_with_delegate(
558 new_zombie.as_artificial(),
559 (new_parent.weak_self.clone(), new_zombie),
560 ));
561 }
562 }
563 std::collections::btree_map::Entry::Occupied(mut entry) => {
564 entry.get_mut().set_parent(new_zombie, new_parent);
565 }
566 }
567 }
568
569 pub fn reparent(old_parent: &ThreadGroup, new_parent: &ThreadGroup) {
572 let mut lockless_list = old_parent.read().deferred_zombie_ptracers.clone();
573
574 for deferred_zombie_ptracer in &lockless_list {
575 if let Some(tg) = deferred_zombie_ptracer.tracer_thread_group_key.upgrade() {
576 tg.write().zombie_ptracees.set_parent_of(
577 deferred_zombie_ptracer.tracee_tid,
578 None,
579 new_parent,
580 );
581 }
582 }
583 let mut new_state = new_parent.write();
584 new_state.deferred_zombie_ptracers.append(&mut lockless_list);
585 }
586
587 pub fn release(&mut self, pids: &mut PidTable) {
590 let mut entry = self.zombies.pop_first();
591 while let Some((_, mut zombie)) = entry {
592 if let Some((tg, z)) = zombie.delegate.take() {
593 if let Some(tg) = tg.upgrade() {
594 tg.do_zombie_notifications(z);
595 }
596 }
597 zombie.release(pids);
598
599 entry = self.zombies.pop_first();
600 }
601 }
602
603 pub fn has_zombie_matching(&self, selector: &ProcessSelector) -> bool {
606 self.zombies.values().any(|z| z.artificial_zombie.matches_selector(selector))
607 }
608
609 pub fn has_tracee(&self, tid: tid_t) -> bool {
612 self.zombies.contains_key(&tid)
613 }
614
615 pub fn get_waitable_entry(
619 &mut self,
620 selector: &ProcessSelector,
621 options: &WaitingOptions,
622 ) -> Option<(ZombieProcess, Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>)> {
623 let Some((t, found_zombie)) = self
626 .zombies
627 .iter()
628 .map(|(t, z)| (*t, &z.artificial_zombie))
629 .rfind(|(_, zombie)| zombie.matches_selector_and_waiting_option(selector, options))
630 else {
631 return None;
632 };
633
634 let result;
635 if !options.keep_waitable_state {
636 result = self.zombies.remove(&t).map(|traced_zombie| {
638 let traced_zombie = ReleaseGuard::take(traced_zombie);
639 (traced_zombie.artificial_zombie, traced_zombie.delegate)
640 });
641 } else {
642 result = Some((found_zombie.as_artificial(), None));
643 }
644
645 result
646 }
647}
648
649pub const PR_SET_PTRACER_ANY: i32 = -1;
652
653#[derive(Copy, Clone, Default, PartialEq)]
656pub enum PtraceAllowedPtracers {
657 #[default]
658 None,
659 Some(pid_t),
660 Any,
661}
662
663fn ptrace_cont<L>(
668 locked: &mut Locked<L>,
669 tracee: &Task,
670 data: &UserAddress,
671 detach: bool,
672) -> Result<(), Errno>
673where
674 L: LockBefore<ThreadGroupLimits>,
675{
676 let data = data.ptr() as u64;
677 let new_state;
678 let mut siginfo = if data != 0 {
679 let signal = Signal::try_from(UncheckedSignal::new(data))?;
680 Some(SignalInfo::default(signal))
681 } else {
682 None
683 };
684
685 let mut state = tracee.write();
686 let is_listen = state.is_ptrace_listening();
687
688 if tracee.load_stopped().is_waking_or_awake() && !is_listen {
689 if detach {
690 state.set_ptrace(None)?;
691 }
692 return error!(EIO);
693 }
694
695 if !state.can_accept_ptrace_commands() && !detach {
696 return error!(ESRCH);
697 }
698
699 if let Some(ptrace) = &mut state.ptrace {
700 if data != 0 {
701 new_state = PtraceStatus::Continuing;
702 if let Some(last_signal) = &mut ptrace.last_signal {
703 if let Some(si) = siginfo {
704 let new_signal = si.signal;
705 last_signal.signal = new_signal;
706 }
707 siginfo = Some(last_signal.clone());
708 }
709 } else {
710 new_state = PtraceStatus::Default;
711 ptrace.last_signal = None;
712 ptrace.event_data = None;
713 }
714 ptrace.stop_status = new_state;
715
716 if is_listen {
717 state.notify_ptracees();
718 }
719 }
720
721 if let Some(siginfo) = siginfo {
722 send_signal_first(locked, &tracee, state, siginfo);
724 } else {
725 state.set_stopped(StopState::Waking, None, None, None);
726 drop(state);
727 tracee.thread_group().set_stopped(StopState::Waking, None, false);
728 }
729 if detach {
730 tracee.write().set_ptrace(None)?;
731 }
732 Ok(())
733}
734
735fn ptrace_interrupt(tracee: &Task) -> Result<(), Errno> {
736 let mut state = tracee.write();
737 if let Some(ptrace) = &mut state.ptrace {
738 if !ptrace.is_seized() {
739 return error!(EIO);
740 }
741 let status = ptrace.stop_status.clone();
742 ptrace.stop_status = PtraceStatus::Default;
743 let event_data = Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0));
744 if status == PtraceStatus::Listening {
745 let signal = ptrace.last_signal.clone();
746 state.set_stopped(StopState::PtraceEventStopped, signal, None, event_data);
750 } else {
751 state.set_stopped(
752 StopState::PtraceEventStopping,
753 Some(SignalInfo::default(SIGTRAP)),
754 None,
755 event_data,
756 );
757 drop(state);
758 tracee.interrupt();
759 }
760 }
761 Ok(())
762}
763
764fn ptrace_listen(tracee: &Task) -> Result<(), Errno> {
765 let mut state = tracee.write();
766 if let Some(ptrace) = &mut state.ptrace {
767 if !ptrace.is_seized()
768 || (ptrace.last_signal_waitable
769 && ptrace
770 .event_data
771 .as_ref()
772 .is_some_and(|event_data| event_data.event != PtraceEvent::Stop))
773 {
774 return error!(EIO);
775 }
776 ptrace.stop_status = PtraceStatus::Listening;
777 }
778 Ok(())
779}
780
781pub fn ptrace_detach<L>(
782 locked: &mut Locked<L>,
783 pids: &mut PidTable,
784 thread_group: &ThreadGroup,
785 tracee: &Task,
786 data: &UserAddress,
787) -> Result<(), Errno>
788where
789 L: LockBefore<ThreadGroupLimits>,
790{
791 if let Err(x) = ptrace_cont(locked, &tracee, &data, true) {
792 return Err(x);
793 }
794 let tid = tracee.get_tid();
795 thread_group.ptracees.lock().remove(&tid);
796 thread_group.write().zombie_ptracees.remove(pids, tid);
797 Ok(())
798}
799
800pub fn ptrace_dispatch<L>(
803 locked: &mut Locked<L>,
804 current_task: &mut CurrentTask,
805 request: u32,
806 pid: pid_t,
807 addr: UserAddress,
808 data: UserAddress,
809) -> Result<SyscallResult, Errno>
810where
811 L: LockBefore<ThreadGroupLimits>,
812{
813 let mut pids = current_task.kernel().pids.write();
814 let weak_task = pids.get_task(pid);
815 let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
816
817 if let Some(ptrace) = &tracee.read().ptrace {
818 if ptrace.get_pid() != current_task.get_pid() {
819 return error!(ESRCH);
820 }
821 }
822
823 match request {
826 PTRACE_KILL => {
827 let mut siginfo = SignalInfo::default(SIGKILL);
828 siginfo.code = (linux_uapi::SIGTRAP | PTRACE_KILL << 8) as i32;
829 send_standard_signal(locked, &tracee, siginfo);
830 return Ok(starnix_syscalls::SUCCESS);
831 }
832 PTRACE_INTERRUPT => {
833 ptrace_interrupt(tracee.as_ref())?;
834 return Ok(starnix_syscalls::SUCCESS);
835 }
836 PTRACE_LISTEN => {
837 ptrace_listen(&tracee)?;
838 return Ok(starnix_syscalls::SUCCESS);
839 }
840 PTRACE_CONT => {
841 ptrace_cont(locked, &tracee, &data, false)?;
842 return Ok(starnix_syscalls::SUCCESS);
843 }
844 PTRACE_SYSCALL => {
845 tracee.trace_syscalls.store(true, std::sync::atomic::Ordering::Relaxed);
846 ptrace_cont(locked, &tracee, &data, false)?;
847 return Ok(starnix_syscalls::SUCCESS);
848 }
849 PTRACE_DETACH => {
850 ptrace_detach(locked, &mut pids, current_task.thread_group(), tracee.as_ref(), &data)?;
851 return Ok(starnix_syscalls::SUCCESS);
852 }
853 _ => {}
854 }
855
856 let mut state = tracee.write();
858 if !state.can_accept_ptrace_commands() {
859 return error!(ESRCH);
860 }
861
862 match request {
863 PTRACE_PEEKDATA | PTRACE_PEEKTEXT => {
864 let Some(captured) = &mut state.captured_thread_state else {
865 return error!(ESRCH);
866 };
867
868 let src = LongPtr::new(captured.as_ref(), addr);
871 let val = tracee.read_multi_arch_object(src)?;
872
873 let dst = LongPtr::new(&src, data);
874 current_task.write_multi_arch_object(dst, val)?;
875 Ok(starnix_syscalls::SUCCESS)
876 }
877 PTRACE_POKEDATA | PTRACE_POKETEXT => {
878 let Some(captured) = &mut state.captured_thread_state else {
879 return error!(ESRCH);
880 };
881
882 let bytes = if captured.is_arch32() {
883 u32::try_from(data.ptr()).map_err(|_| errno!(EINVAL))?.to_ne_bytes().to_vec()
884 } else {
885 data.ptr().to_ne_bytes().to_vec()
886 };
887
888 tracee.mm()?.force_write_memory(addr, &bytes)?;
889
890 Ok(starnix_syscalls::SUCCESS)
891 }
892 PTRACE_PEEKUSR => {
893 let Some(captured) = &mut state.captured_thread_state else {
894 return error!(ESRCH);
895 };
896
897 let dst = LongPtr::new(captured.as_ref(), data);
898 let val = ptrace_peekuser(&mut captured.thread_state, addr.ptr() as usize)?;
899 current_task.write_multi_arch_object(dst, val as u64)?;
900 return Ok(starnix_syscalls::SUCCESS);
901 }
902 PTRACE_POKEUSR => {
903 ptrace_pokeuser(&mut *state, data.ptr() as usize, addr.ptr() as usize)?;
904 return Ok(starnix_syscalls::SUCCESS);
905 }
906 PTRACE_GETREGSET => {
907 if let Some(ref mut captured) = state.captured_thread_state {
908 let uiv = IOVecPtr::new(current_task, data);
909 let mut iv = current_task.read_multi_arch_object(uiv)?;
910 let base = iv.iov_base.addr;
911 let mut len = iv.iov_len as usize;
912 ptrace_getregset(
913 current_task,
914 &mut captured.thread_state,
915 ElfNoteType::try_from(addr.ptr() as usize)?,
916 base,
917 &mut len,
918 )?;
919 iv.iov_len = len as u64;
920 current_task.write_multi_arch_object(uiv, iv)?;
921 return Ok(starnix_syscalls::SUCCESS);
922 }
923 error!(ESRCH)
924 }
925 PTRACE_SETREGSET => {
926 if let Some(ref mut captured) = state.captured_thread_state {
927 captured.dirty = true;
928 let uiv = IOVecPtr::new(current_task, data);
929 let iv = current_task.read_multi_arch_object(uiv)?;
930 let base = iv.iov_base.addr;
931 let len = iv.iov_len as usize;
932 ptrace_setregset(
933 current_task,
934 &mut captured.thread_state,
935 ElfNoteType::try_from(addr.ptr() as usize)?,
936 base,
937 len,
938 )?;
939 return Ok(starnix_syscalls::SUCCESS);
940 }
941 error!(ESRCH)
942 }
943 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
944 PTRACE_GETREGS => {
945 if let Some(captured) = &mut state.captured_thread_state {
946 let mut len = usize::MAX;
947 ptrace_getregset(
948 current_task,
949 &mut captured.thread_state,
950 ElfNoteType::PrStatus,
951 data.ptr() as u64,
952 &mut len,
953 )?;
954 return Ok(starnix_syscalls::SUCCESS);
955 }
956 error!(ESRCH)
957 }
958 PTRACE_SETSIGMASK => {
959 if addr.ptr() != std::mem::size_of::<SigSet>() {
962 return error!(EINVAL);
963 }
964 let src: UserRef<SigSet> = UserRef::from(data);
966 let val = current_task.read_object(src)?;
967 state.set_signal_mask(val);
968
969 Ok(starnix_syscalls::SUCCESS)
970 }
971 PTRACE_GETSIGMASK => {
972 if addr.ptr() != std::mem::size_of::<SigSet>() {
975 return error!(EINVAL);
976 }
977 let dst: UserRef<SigSet> = UserRef::from(data);
979 let val = state.signal_mask();
980 current_task.write_object(dst, &val)?;
981 Ok(starnix_syscalls::SUCCESS)
982 }
983 PTRACE_GETSIGINFO => {
984 if let Some(ptrace) = &state.ptrace {
985 if let Some(signal) = ptrace.last_signal.as_ref() {
986 let dst = MultiArchUserRef::<uapi::siginfo_t, uapi::arch32::siginfo_t>::new(
987 current_task,
988 data,
989 );
990 signal.write(current_task, dst)?;
991 } else {
992 return error!(EINVAL);
993 }
994 }
995 Ok(starnix_syscalls::SUCCESS)
996 }
997 PTRACE_SETSIGINFO => {
998 let siginfo = UncheckedSignalInfo::read_from_siginfo(current_task, data)?.try_into()?;
999 if let Some(ptrace) = &mut state.ptrace {
1000 ptrace.last_signal = Some(siginfo);
1001 }
1002 Ok(starnix_syscalls::SUCCESS)
1003 }
1004 PTRACE_GET_SYSCALL_INFO => {
1005 if let Some(ptrace) = &state.ptrace {
1006 let (size, info) = ptrace.get_target_syscall(&tracee, &state)?;
1007 let dst: UserRef<ptrace_syscall_info> = UserRef::from(data);
1008 let len = std::cmp::min(std::mem::size_of::<ptrace_syscall_info>(), addr.ptr());
1009 let src = unsafe {
1012 std::slice::from_raw_parts(
1013 &info as *const ptrace_syscall_info as *const u8,
1014 len as usize,
1015 )
1016 };
1017 current_task.write_memory(dst.addr(), src)?;
1018 Ok(size.into())
1019 } else {
1020 error!(ESRCH)
1021 }
1022 }
1023 PTRACE_SETOPTIONS => {
1024 let mask = data.ptr() as u32;
1025 if mask != 0
1027 && (mask
1028 & !(PTRACE_O_TRACESYSGOOD
1029 | PTRACE_O_TRACECLONE
1030 | PTRACE_O_TRACEFORK
1031 | PTRACE_O_TRACEVFORK
1032 | PTRACE_O_TRACEVFORKDONE
1033 | PTRACE_O_TRACEEXEC
1034 | PTRACE_O_TRACEEXIT
1035 | PTRACE_O_EXITKILL)
1036 != 0)
1037 {
1038 track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace(PTRACE_SETOPTIONS)", mask);
1039 return error!(ENOSYS);
1040 }
1041 if let Some(ptrace) = &mut state.ptrace {
1042 ptrace.set_options_from_bits(mask)?;
1043 }
1044 Ok(starnix_syscalls::SUCCESS)
1045 }
1046 PTRACE_GETEVENTMSG => {
1047 if let Some(ptrace) = &state.ptrace {
1048 if let Some(event_data) = &ptrace.event_data {
1049 let dst = LongPtr::new(current_task, data);
1050 current_task.write_multi_arch_object(dst, event_data.msg)?;
1051 return Ok(starnix_syscalls::SUCCESS);
1052 }
1053 }
1054 error!(EIO)
1055 }
1056 _ => {
1057 track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace", request);
1058 error!(ENOSYS)
1059 }
1060 }
1061}
1062
1063fn do_attach(
1065 thread_group: &ThreadGroup,
1066 task: WeakRef<Task>,
1067 attach_type: PtraceAttachType,
1068 options: PtraceOptions,
1069) -> Result<(), Errno> {
1070 if let Some(task_ref) = task.upgrade() {
1071 thread_group.ptracees.lock().insert(task_ref.get_tid(), (&task_ref).into());
1072 {
1073 let process_state = &mut task_ref.thread_group().write();
1074 let mut state = task_ref.write();
1075 state.set_ptrace(Some(PtraceState::new(
1076 thread_group.leader,
1077 thread_group.weak_self.clone(),
1078 attach_type,
1079 options,
1080 )))?;
1081 if process_state.is_waitable()
1084 && process_state.base.load_stopped() == StopState::GroupStopped
1085 && task_ref.load_stopped() == StopState::GroupStopped
1086 {
1087 if let Some(ptrace) = &mut state.ptrace {
1088 ptrace.last_signal_waitable = true;
1089 }
1090 }
1091 }
1092 return Ok(());
1093 }
1094 unreachable!("Tracee thread not found");
1097}
1098
1099pub fn ptrace_attach_from_state<L>(
1103 locked: &mut Locked<L>,
1104 tracee_task: &OwnedRef<Task>,
1105 ptrace_state: PtraceCoreState,
1106) -> Result<(), Errno>
1107where
1108 L: LockBefore<ThreadGroupLimits>,
1109{
1110 {
1111 let weak_tg =
1112 tracee_task.thread_group().kernel.pids.read().get_thread_group(ptrace_state.pid);
1113 let tracer_tg = weak_tg.ok_or_else(|| errno!(ESRCH))?;
1114 do_attach(
1115 &tracer_tg,
1116 WeakRef::from(tracee_task),
1117 ptrace_state.attach_type,
1118 ptrace_state.options,
1119 )?;
1120 }
1121 let mut state = tracee_task.write();
1122 if let Some(ptrace) = &mut state.ptrace {
1123 ptrace.core_state.tracer_waiters = Arc::clone(&ptrace_state.tracer_waiters);
1124 }
1125
1126 let signal = if ptrace_state.attach_type == PtraceAttachType::Seize {
1128 if let Some(ptrace) = &mut state.ptrace {
1129 ptrace.set_last_event(Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0)));
1130 }
1131 SignalInfo::default(SIGTRAP)
1132 } else {
1133 SignalInfo::default(SIGSTOP)
1134 };
1135 send_signal_first(locked, tracee_task, state, signal);
1136
1137 Ok(())
1138}
1139
1140pub fn ptrace_traceme(current_task: &mut CurrentTask) -> Result<SyscallResult, Errno> {
1141 let parent = current_task.thread_group().read().parent.clone();
1142 if let Some(parent) = parent {
1143 let parent = parent.upgrade();
1144 {
1146 let pids = current_task.kernel().pids.read();
1147 let parent_task = pids.get_task(parent.leader);
1148 security::ptrace_traceme(
1149 current_task,
1150 parent_task.upgrade().ok_or_else(|| errno!(EINVAL))?.as_ref(),
1151 )?;
1152 }
1153
1154 let task_ref = OwnedRef::temp(¤t_task.task);
1155 do_attach(&parent, (&task_ref).into(), PtraceAttachType::Attach, PtraceOptions::empty())?;
1156 Ok(starnix_syscalls::SUCCESS)
1157 } else {
1158 error!(EPERM)
1159 }
1160}
1161
1162pub fn ptrace_attach<L>(
1163 locked: &mut Locked<L>,
1164 current_task: &mut CurrentTask,
1165 pid: pid_t,
1166 attach_type: PtraceAttachType,
1167 data: UserAddress,
1168) -> Result<SyscallResult, Errno>
1169where
1170 L: LockBefore<MmDumpable>,
1171{
1172 let weak_task = current_task.kernel().pids.read().get_task(pid);
1173 let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
1174
1175 if tracee.thread_group == current_task.thread_group {
1176 return error!(EPERM);
1177 }
1178
1179 current_task.check_ptrace_access_mode(locked, PTRACE_MODE_ATTACH_REALCREDS, &tracee)?;
1180 do_attach(current_task.thread_group(), weak_task.clone(), attach_type, PtraceOptions::empty())?;
1181 if attach_type == PtraceAttachType::Attach {
1182 send_standard_signal(
1183 locked.cast_locked::<MmDumpable>(),
1184 &tracee,
1185 SignalInfo::default(SIGSTOP),
1186 );
1187 } else if attach_type == PtraceAttachType::Seize {
1188 if let Some(task_ref) = weak_task.upgrade() {
1190 let mut state = task_ref.write();
1191 if let Some(ptrace) = &mut state.ptrace {
1192 ptrace.set_options_from_bits(data.ptr() as u32)?;
1193 }
1194 }
1195 }
1196 Ok(starnix_syscalls::SUCCESS)
1197}
1198
1199pub fn ptrace_peekuser(
1203 thread_state: &mut ThreadState<HeapRegs>,
1204 offset: usize,
1205) -> Result<usize, Errno> {
1206 #[cfg(any(target_arch = "x86_64"))]
1207 if offset >= std::mem::size_of::<user>() {
1208 return error!(EIO);
1209 }
1210 if offset < UserRegsStructPtr::size_of_object_for(thread_state) {
1211 let result = thread_state.get_user_register(offset)?;
1212 return Ok(result);
1213 }
1214 error!(EIO)
1215}
1216
1217pub fn ptrace_pokeuser(
1218 state: &mut TaskMutableState,
1219 value: usize,
1220 offset: usize,
1221) -> Result<(), Errno> {
1222 if let Some(ref mut thread_state) = state.captured_thread_state {
1223 thread_state.dirty = true;
1224
1225 #[cfg(any(target_arch = "x86_64"))]
1226 if offset >= std::mem::size_of::<user>() {
1227 return error!(EIO);
1228 }
1229 if offset < UserRegsStructPtr::size_of_object_for(thread_state.as_ref()) {
1230 return thread_state.thread_state.set_user_register(offset, value);
1231 }
1232 }
1233 error!(EIO)
1234}
1235
1236pub fn ptrace_getregset(
1237 current_task: &CurrentTask,
1238 thread_state: &mut ThreadState<HeapRegs>,
1239 regset_type: ElfNoteType,
1240 base: u64,
1241 len: &mut usize,
1242) -> Result<(), Errno> {
1243 match regset_type {
1244 ElfNoteType::PrStatus => {
1245 let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1246 if *len < user_regs_struct_len {
1247 return error!(EINVAL);
1248 }
1249 *len = user_regs_struct_len;
1250 let mut i: usize = 0;
1251 let mut reg_ptr = LongPtr::new(thread_state, base);
1252 while i < *len {
1253 let mut val = None;
1254 thread_state
1255 .registers
1256 .apply_user_register(i, &mut |register| val = Some(*register as usize))?;
1257 if let Some(val) = val {
1258 current_task.write_multi_arch_object(reg_ptr, val as u64)?;
1259 }
1260 i += reg_ptr.size_of_object();
1261 reg_ptr = reg_ptr.next()?;
1262 }
1263 Ok(())
1264 }
1265 _ => {
1266 error!(EINVAL)
1267 }
1268 }
1269}
1270
1271pub fn ptrace_setregset(
1272 current_task: &CurrentTask,
1273 thread_state: &mut ThreadState<HeapRegs>,
1274 regset_type: ElfNoteType,
1275 base: u64,
1276 mut len: usize,
1277) -> Result<(), Errno> {
1278 match regset_type {
1279 ElfNoteType::PrStatus => {
1280 let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1281 if len < user_regs_struct_len {
1282 return error!(EINVAL);
1283 }
1284 len = user_regs_struct_len;
1285 let mut i: usize = 0;
1286 let mut reg_ptr = LongPtr::new(thread_state, base);
1287 while i < len {
1288 let val = current_task.read_multi_arch_object(reg_ptr)?;
1289 thread_state.registers.apply_user_register(i, &mut |register| *register = val)?;
1290 i += reg_ptr.size_of_object();
1291 reg_ptr = reg_ptr.next()?;
1292 }
1293 Ok(())
1294 }
1295 _ => {
1296 error!(EINVAL)
1297 }
1298 }
1299}
1300
1301#[inline(never)]
1302pub fn ptrace_syscall_enter(locked: &mut Locked<Unlocked>, current_task: &mut CurrentTask) {
1303 let block = {
1304 let mut state = current_task.write();
1305 if state.ptrace.is_some() {
1306 current_task.trace_syscalls.store(false, Ordering::Relaxed);
1307 let mut sig = SignalInfo::default(SIGTRAP);
1308 sig.code = (linux_uapi::SIGTRAP | 0x80) as i32;
1309 if state
1310 .ptrace
1311 .as_ref()
1312 .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1313 {
1314 sig.signal.set_ptrace_syscall_bit();
1315 }
1316 state.set_stopped(StopState::SyscallEnterStopping, Some(sig), None, None);
1317 true
1318 } else {
1319 false
1320 }
1321 };
1322 if block {
1323 current_task.block_while_stopped(locked);
1324 }
1325}
1326
1327#[inline(never)]
1328pub fn ptrace_syscall_exit(
1329 locked: &mut Locked<Unlocked>,
1330 current_task: &mut CurrentTask,
1331 is_error: bool,
1332) {
1333 let block = {
1334 let mut state = current_task.write();
1335 current_task.trace_syscalls.store(false, Ordering::Relaxed);
1336 if state.ptrace.is_some() {
1337 let mut sig = SignalInfo::default(SIGTRAP);
1338 sig.code = (linux_uapi::SIGTRAP | 0x80) as i32;
1339 if state
1340 .ptrace
1341 .as_ref()
1342 .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1343 {
1344 sig.signal.set_ptrace_syscall_bit();
1345 }
1346
1347 state.set_stopped(StopState::SyscallExitStopping, Some(sig), None, None);
1348 if let Some(ptrace) = &mut state.ptrace {
1349 ptrace.last_syscall_was_error = is_error;
1350 }
1351 true
1352 } else {
1353 false
1354 }
1355 };
1356 if block {
1357 current_task.block_while_stopped(locked);
1358 }
1359}
1360
1361#[cfg(test)]
1362mod tests {
1363 use super::*;
1364 use crate::task::syscalls::sys_prctl;
1365 use crate::testing::{create_task, spawn_kernel_and_run};
1366 use starnix_uapi::PR_SET_PTRACER;
1367 use starnix_uapi::auth::CAP_SYS_PTRACE;
1368
1369 #[::fuchsia::test]
1370 async fn test_set_ptracer() {
1371 spawn_kernel_and_run(async |locked, current_task| {
1372 let kernel = current_task.kernel().clone();
1373 let mut tracee = create_task(locked, &kernel, "tracee");
1374 let mut tracer = create_task(locked, &kernel, "tracer");
1375
1376 let mut creds = tracer.real_creds().clone();
1377 creds.cap_effective &= !CAP_SYS_PTRACE;
1378 tracer.set_creds(creds);
1379
1380 kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1381 assert_eq!(
1382 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1383 error!(EINVAL)
1384 );
1385
1386 assert_eq!(
1387 ptrace_attach(
1388 locked,
1389 &mut tracer,
1390 tracee.as_ref().task.tid,
1391 PtraceAttachType::Attach,
1392 UserAddress::NULL,
1393 ),
1394 error!(EPERM)
1395 );
1396
1397 assert!(
1398 sys_prctl(
1399 locked,
1400 &mut tracee,
1401 PR_SET_PTRACER,
1402 tracer.thread_group().leader as u64,
1403 0,
1404 0,
1405 0
1406 )
1407 .is_ok()
1408 );
1409
1410 let mut not_tracer = create_task(locked, &kernel, "not-tracer");
1411 not_tracer.set_creds(tracer.real_creds().clone());
1412 assert_eq!(
1413 ptrace_attach(
1414 locked,
1415 &mut not_tracer,
1416 tracee.as_ref().task.tid,
1417 PtraceAttachType::Attach,
1418 UserAddress::NULL,
1419 ),
1420 error!(EPERM)
1421 );
1422
1423 assert!(
1424 ptrace_attach(
1425 locked,
1426 &mut tracer,
1427 tracee.as_ref().task.tid,
1428 PtraceAttachType::Attach,
1429 UserAddress::NULL,
1430 )
1431 .is_ok()
1432 );
1433 })
1434 .await;
1435 }
1436
1437 #[::fuchsia::test]
1438 async fn test_set_ptracer_any() {
1439 spawn_kernel_and_run(async |locked, current_task| {
1440 let kernel = current_task.kernel().clone();
1441 let mut tracee = create_task(locked, &kernel, "tracee");
1442 let mut tracer = create_task(locked, &kernel, "tracer");
1443
1444 let mut creds = tracer.real_creds().clone();
1445 creds.cap_effective &= !CAP_SYS_PTRACE;
1446 tracer.set_creds(creds);
1447
1448 kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1449 assert_eq!(
1450 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1451 error!(EINVAL)
1452 );
1453
1454 assert_eq!(
1455 ptrace_attach(
1456 locked,
1457 &mut tracer,
1458 tracee.as_ref().task.tid,
1459 PtraceAttachType::Attach,
1460 UserAddress::NULL,
1461 ),
1462 error!(EPERM)
1463 );
1464
1465 assert!(
1466 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, PR_SET_PTRACER_ANY as u64, 0, 0, 0)
1467 .is_ok()
1468 );
1469
1470 assert!(
1471 ptrace_attach(
1472 locked,
1473 &mut tracer,
1474 tracee.as_ref().task.tid,
1475 PtraceAttachType::Attach,
1476 UserAddress::NULL,
1477 )
1478 .is_ok()
1479 );
1480 })
1481 .await;
1482 }
1483}