1use crate::arch::execution::new_syscall_from_state;
6use crate::mm::{IOVecPtr, MemoryAccessor, MemoryAccessorExt};
7use crate::security;
8use crate::signals::syscalls::WaitingOptions;
9use crate::signals::{
10 SI_HEADER_SIZE, SignalDetail, SignalInfo, SignalInfoHeader, SignalSource, send_signal_first,
11 send_standard_signal,
12};
13use crate::task::waiter::WaitQueue;
14use crate::task::{
15 CurrentTask, PidTable, ProcessSelector, StopState, Task, TaskMutableState, ThreadGroup,
16 ThreadState, ZombieProcess,
17};
18use bitflags::bitflags;
19use starnix_logging::track_stub;
20use starnix_sync::{LockBefore, Locked, MmDumpable, ThreadGroupLimits, Unlocked};
21use starnix_syscalls::SyscallResult;
22use starnix_syscalls::decls::SyscallDecl;
23use starnix_types::ownership::{OwnedRef, Releasable, ReleaseGuard, WeakRef};
24use starnix_uapi::auth::PTRACE_MODE_ATTACH_REALCREDS;
25use starnix_uapi::elf::ElfNoteType;
26use starnix_uapi::errors::Errno;
27use starnix_uapi::signals::{SIGKILL, SIGSTOP, SIGTRAP, SigSet, Signal, UncheckedSignal};
28#[allow(unused_imports)]
29use starnix_uapi::user_address::ArchSpecific;
30use starnix_uapi::user_address::{LongPtr, MultiArchUserRef, UserAddress, UserRef};
31use starnix_uapi::{
32 PTRACE_CONT, PTRACE_DETACH, PTRACE_EVENT_CLONE, PTRACE_EVENT_EXEC, PTRACE_EVENT_EXIT,
33 PTRACE_EVENT_FORK, PTRACE_EVENT_SECCOMP, PTRACE_EVENT_STOP, PTRACE_EVENT_VFORK,
34 PTRACE_EVENT_VFORK_DONE, PTRACE_GET_SYSCALL_INFO, PTRACE_GETEVENTMSG, PTRACE_GETREGSET,
35 PTRACE_GETSIGINFO, PTRACE_GETSIGMASK, PTRACE_INTERRUPT, PTRACE_KILL, PTRACE_LISTEN,
36 PTRACE_O_EXITKILL, PTRACE_O_TRACECLONE, PTRACE_O_TRACEEXEC, PTRACE_O_TRACEEXIT,
37 PTRACE_O_TRACEFORK, PTRACE_O_TRACESYSGOOD, PTRACE_O_TRACEVFORK, PTRACE_O_TRACEVFORKDONE,
38 PTRACE_PEEKDATA, PTRACE_PEEKTEXT, PTRACE_PEEKUSR, PTRACE_POKEDATA, PTRACE_POKETEXT,
39 PTRACE_POKEUSR, PTRACE_SETOPTIONS, PTRACE_SETREGSET, PTRACE_SETSIGINFO, PTRACE_SETSIGMASK,
40 PTRACE_SYSCALL, PTRACE_SYSCALL_INFO_ENTRY, PTRACE_SYSCALL_INFO_EXIT, PTRACE_SYSCALL_INFO_NONE,
41 SI_MAX_SIZE, clone_args, errno, error, pid_t, ptrace_syscall_info, tid_t, uapi,
42};
43
44use std::collections::BTreeMap;
45use std::sync::atomic::Ordering;
46use std::sync::{Arc, Weak};
47use zerocopy::FromBytes;
48
49#[cfg(target_arch = "x86_64")]
50use starnix_uapi::{PTRACE_GETREGS, user};
51
52#[cfg(all(target_arch = "aarch64"))]
53use starnix_uapi::arch32::PTRACE_GETREGS;
54
55type UserRegsStructPtr =
56 MultiArchUserRef<starnix_uapi::user_regs_struct, starnix_uapi::arch32::user_regs_struct>;
57
58uapi::check_arch_independent_layout! {
59 ptrace_syscall_info {
60 op,
61 arch,
62 instruction_pointer,
63 stack_pointer,
64 __bindgen_anon_1,
65 }
66
67 ptrace_syscall_info__bindgen_ty_1 {
68 entry,
69 exit,
70 seccomp,
71 }
72
73 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
74 nr,
75 args,
76 }
77
78 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
79 rval,
80 is_error,
81 }
82
83 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_3 {
84 nr,
85 args,
86 ret_data,
87 }
88}
89
90#[derive(Clone, Default, PartialEq)]
94pub enum PtraceStatus {
95 #[default]
97 Default,
98 Continuing,
100 Listening,
107}
108
109impl PtraceStatus {
110 pub fn is_continuing(&self) -> bool {
111 *self == PtraceStatus::Continuing
112 }
113}
114
115#[derive(Copy, Clone, PartialEq)]
117pub enum PtraceAttachType {
118 Attach,
120 Seize,
122}
123
124bitflags! {
125 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
126 #[repr(transparent)]
127 pub struct PtraceOptions: u32 {
128 const EXITKILL = starnix_uapi::PTRACE_O_EXITKILL;
129 const TRACECLONE = starnix_uapi::PTRACE_O_TRACECLONE;
130 const TRACEEXEC = starnix_uapi::PTRACE_O_TRACEEXEC;
131 const TRACEEXIT = starnix_uapi::PTRACE_O_TRACEEXIT;
132 const TRACEFORK = starnix_uapi::PTRACE_O_TRACEFORK;
133 const TRACESYSGOOD = starnix_uapi::PTRACE_O_TRACESYSGOOD;
134 const TRACEVFORK = starnix_uapi::PTRACE_O_TRACEVFORK;
135 const TRACEVFORKDONE = starnix_uapi::PTRACE_O_TRACEVFORKDONE;
136 const TRACESECCOMP = starnix_uapi::PTRACE_O_TRACESECCOMP;
137 const SUSPEND_SECCOMP = starnix_uapi::PTRACE_O_SUSPEND_SECCOMP;
138 }
139}
140
141#[repr(u32)]
142#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
143pub enum PtraceEvent {
144 #[default]
145 None = 0,
146 Stop = PTRACE_EVENT_STOP,
147 Clone = PTRACE_EVENT_CLONE,
148 Fork = PTRACE_EVENT_FORK,
149 Vfork = PTRACE_EVENT_VFORK,
150 VforkDone = PTRACE_EVENT_VFORK_DONE,
151 Exec = PTRACE_EVENT_EXEC,
152 Exit = PTRACE_EVENT_EXIT,
153 Seccomp = PTRACE_EVENT_SECCOMP,
154}
155
156impl PtraceEvent {
157 pub fn from_option(option: &PtraceOptions) -> Self {
158 match *option {
159 PtraceOptions::TRACECLONE => PtraceEvent::Clone,
160 PtraceOptions::TRACEFORK => PtraceEvent::Fork,
161 PtraceOptions::TRACEVFORK => PtraceEvent::Vfork,
162 PtraceOptions::TRACEVFORKDONE => PtraceEvent::VforkDone,
163 PtraceOptions::TRACEEXEC => PtraceEvent::Exec,
164 PtraceOptions::TRACEEXIT => PtraceEvent::Exit,
165 PtraceOptions::TRACESECCOMP => PtraceEvent::Seccomp,
166 _ => unreachable!("Bad ptrace event specified"),
167 }
168 }
169}
170
171pub struct PtraceEventData {
173 pub event: PtraceEvent,
175
176 pub msg: u64,
178}
179
180impl PtraceEventData {
181 pub fn new(option: PtraceOptions, msg: u64) -> Self {
182 Self { event: PtraceEvent::from_option(&option), msg }
183 }
184 pub fn new_from_event(event: PtraceEvent, msg: u64) -> Self {
185 Self { event, msg }
186 }
187}
188
189#[derive(Clone)]
192pub struct PtraceCoreState {
193 pub pid: pid_t,
195
196 pub attach_type: PtraceAttachType,
199
200 pub options: PtraceOptions,
202
203 pub tracer_waiters: Arc<WaitQueue>,
206}
207
208impl PtraceCoreState {
209 pub fn has_option(&self, option: PtraceOptions) -> bool {
210 self.options.contains(option)
211 }
212}
213
214pub struct PtraceState {
216 pub core_state: PtraceCoreState,
218
219 pub tracee_waiters: WaitQueue,
222
223 pub last_signal: Option<SignalInfo>,
226
227 pub last_signal_waitable: bool,
230
231 pub event_data: Option<PtraceEventData>,
233
234 pub stop_status: PtraceStatus,
237
238 pub last_syscall_was_error: bool,
240}
241
242impl PtraceState {
243 pub fn new(pid: pid_t, attach_type: PtraceAttachType, options: PtraceOptions) -> Box<Self> {
244 Box::new(PtraceState {
245 core_state: PtraceCoreState {
246 pid,
247 attach_type,
248 options,
249 tracer_waiters: Arc::new(WaitQueue::default()),
250 },
251 tracee_waiters: WaitQueue::default(),
252 last_signal: None,
253 last_signal_waitable: false,
254 event_data: None,
255 stop_status: PtraceStatus::default(),
256 last_syscall_was_error: false,
257 })
258 }
259
260 pub fn get_pid(&self) -> pid_t {
261 self.core_state.pid
262 }
263
264 pub fn set_pid(&mut self, pid: pid_t) {
265 self.core_state.pid = pid;
266 }
267
268 pub fn is_seized(&self) -> bool {
269 self.core_state.attach_type == PtraceAttachType::Seize
270 }
271
272 pub fn get_attach_type(&self) -> PtraceAttachType {
273 self.core_state.attach_type
274 }
275
276 pub fn is_waitable(&self, stop: StopState, options: &WaitingOptions) -> bool {
277 if self.stop_status == PtraceStatus::Listening {
278 return self.last_signal_waitable;
280 }
281 if !options.wait_for_continued && !stop.is_stopping_or_stopped() {
282 return false;
284 }
285 self.last_signal_waitable && !stop.is_in_progress()
286 }
287
288 pub fn set_last_signal(&mut self, mut signal: Option<SignalInfo>) {
289 if let Some(ref mut siginfo) = signal {
290 if siginfo.signal == SIGKILL {
293 return;
294 }
295 self.last_signal_waitable = true;
296 self.last_signal = signal;
297 }
298 }
299
300 pub fn set_last_event(&mut self, event: Option<PtraceEventData>) {
301 if event.is_some() {
302 self.event_data = event;
303 }
304 }
305
306 pub fn get_last_signal(&mut self, keep_signal_waitable: bool) -> Option<SignalInfo> {
308 self.last_signal_waitable = keep_signal_waitable;
309 self.last_signal.clone()
310 }
311
312 pub fn has_option(&self, option: PtraceOptions) -> bool {
313 self.core_state.has_option(option)
314 }
315
316 pub fn set_options_from_bits(&mut self, option: u32) -> Result<(), Errno> {
317 if let Some(options) = PtraceOptions::from_bits(option) {
318 self.core_state.options = options;
319 Ok(())
320 } else {
321 error!(EINVAL)
322 }
323 }
324
325 pub fn get_options(&self) -> PtraceOptions {
326 self.core_state.options
327 }
328
329 pub fn get_core_state(&self) -> PtraceCoreState {
331 self.core_state.clone()
332 }
333
334 pub fn tracer_waiters(&self) -> &Arc<WaitQueue> {
335 &self.core_state.tracer_waiters
336 }
337
338 pub fn get_target_syscall(
345 &self,
346 target: &Task,
347 state: &TaskMutableState,
348 ) -> Result<(i32, ptrace_syscall_info), Errno> {
349 #[cfg(target_arch = "x86_64")]
350 let arch = starnix_uapi::AUDIT_ARCH_X86_64;
351 #[cfg(target_arch = "aarch64")]
352 let arch = starnix_uapi::AUDIT_ARCH_AARCH64;
353 #[cfg(target_arch = "riscv64")]
354 let arch = starnix_uapi::AUDIT_ARCH_RISCV64;
355
356 let mut info = ptrace_syscall_info { arch, ..Default::default() };
357 let mut info_len = memoffset::offset_of!(ptrace_syscall_info, __bindgen_anon_1);
358
359 match &state.captured_thread_state {
360 Some(captured) => {
361 let registers = captured.thread_state.registers;
362 info.instruction_pointer = registers.instruction_pointer_register();
363 info.stack_pointer = registers.stack_pointer_register();
364 #[cfg(target_arch = "aarch64")]
365 if captured.thread_state.arch_width.is_arch32() {
366 info.arch = starnix_uapi::AUDIT_ARCH_ARM;
369 }
370 match target.load_stopped() {
371 StopState::SyscallEnterStopped => {
372 let syscall_decl = SyscallDecl::from_number(
373 registers.syscall_register(),
374 captured.thread_state.arch_width,
375 );
376 let syscall = new_syscall_from_state(syscall_decl, &captured.thread_state);
377 info.op = PTRACE_SYSCALL_INFO_ENTRY as u8;
378 let entry = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
379 nr: syscall.decl.number,
380 args: [
381 syscall.arg0.raw(),
382 syscall.arg1.raw(),
383 syscall.arg2.raw(),
384 syscall.arg3.raw(),
385 syscall.arg4.raw(),
386 syscall.arg5.raw(),
387 ],
388 };
389 info_len += memoffset::offset_of!(
390 linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1,
391 args
392 ) + std::mem::size_of_val(&entry.args);
393 info.__bindgen_anon_1.entry = entry;
394 }
395 StopState::SyscallExitStopped => {
396 info.op = PTRACE_SYSCALL_INFO_EXIT as u8;
397 let exit = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
398 rval: registers.return_register() as i64,
399 is_error: state
400 .ptrace
401 .as_ref()
402 .map_or(0, |ptrace| ptrace.last_syscall_was_error as u8),
403 ..Default::default()
404 };
405 info_len += memoffset::offset_of!(
406 linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2,
407 is_error
408 ) + std::mem::size_of_val(&exit.is_error);
409 info.__bindgen_anon_1.exit = exit;
410 }
411 _ => {
412 info.op = PTRACE_SYSCALL_INFO_NONE as u8;
413 }
414 };
415 }
416 _ => (),
417 }
418 Ok((info_len as i32, info))
419 }
420
421 pub fn get_core_state_for_clone(
426 &self,
427 clone_args: &clone_args,
428 ) -> (PtraceOptions, Option<PtraceCoreState>) {
429 let trace_type = if clone_args.flags & (starnix_uapi::CLONE_UNTRACED as u64) != 0 {
435 PtraceOptions::empty()
436 } else {
437 if clone_args.flags & (starnix_uapi::CLONE_VFORK as u64) != 0 {
438 PtraceOptions::TRACEVFORK
439 } else if clone_args.exit_signal != (starnix_uapi::SIGCHLD as u64) {
440 PtraceOptions::TRACECLONE
441 } else {
442 PtraceOptions::TRACEFORK
443 }
444 };
445
446 if !self.has_option(trace_type)
447 && (clone_args.flags & (starnix_uapi::CLONE_PTRACE as u64) == 0)
448 {
449 return (PtraceOptions::empty(), None);
450 }
451
452 (trace_type, Some(self.get_core_state()))
453 }
454}
455
456struct TracedZombie {
458 artificial_zombie: ZombieProcess,
460
461 delegate: Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>,
464}
465
466impl Releasable for TracedZombie {
467 type Context<'a> = &'a mut PidTable;
468
469 fn release<'a>(self, pids: &'a mut PidTable) {
470 self.artificial_zombie.release(pids);
471 if let Some((_, z)) = self.delegate {
472 z.release(pids);
473 }
474 }
475}
476
477impl TracedZombie {
478 fn new(artificial_zombie: ZombieProcess) -> ReleaseGuard<Self> {
479 ReleaseGuard::from(Self { artificial_zombie, delegate: None })
480 }
481
482 fn new_with_delegate(
483 artificial_zombie: ZombieProcess,
484 delegate: (Weak<ThreadGroup>, OwnedRef<ZombieProcess>),
485 ) -> ReleaseGuard<Self> {
486 ReleaseGuard::from(Self { artificial_zombie, delegate: Some(delegate) })
487 }
488
489 fn set_parent(
490 &mut self,
491 new_zombie: Option<OwnedRef<ZombieProcess>>,
492 new_parent: &ThreadGroup,
493 ) {
494 if let Some(new_zombie) = new_zombie {
495 self.delegate = Some((new_parent.weak_self.clone(), new_zombie));
496 } else {
497 self.delegate = self.delegate.take().map(|(_, z)| (new_parent.weak_self.clone(), z));
498 }
499 }
500}
501
502#[derive(Default)]
506pub struct ZombiePtracees {
507 zombies: BTreeMap<tid_t, ReleaseGuard<TracedZombie>>,
510}
511
512impl ZombiePtracees {
513 pub fn new() -> Self {
514 Self::default()
515 }
516
517 pub fn add(&mut self, pids: &mut PidTable, tid: tid_t, zombie: ZombieProcess) {
520 if let std::collections::btree_map::Entry::Vacant(entry) = self.zombies.entry(tid) {
521 entry.insert(TracedZombie::new(zombie));
522 } else {
523 zombie.release(pids);
524 }
525 }
526
527 pub fn remove(&mut self, pids: &mut PidTable, tid: tid_t) {
529 self.zombies.remove(&tid).release(pids);
530 }
531
532 pub fn is_empty(&self) -> bool {
533 self.zombies.is_empty()
534 }
535
536 pub fn set_parent_of(
539 &mut self,
540 tracee: tid_t,
541 new_zombie: Option<OwnedRef<ZombieProcess>>,
542 new_parent: &ThreadGroup,
543 ) {
544 match self.zombies.entry(tracee) {
545 std::collections::btree_map::Entry::Vacant(entry) => {
546 if let Some(new_zombie) = new_zombie {
547 entry.insert(TracedZombie::new_with_delegate(
548 new_zombie.as_artificial(),
549 (new_parent.weak_self.clone(), new_zombie),
550 ));
551 }
552 }
553 std::collections::btree_map::Entry::Occupied(mut entry) => {
554 entry.get_mut().set_parent(new_zombie, new_parent);
555 }
556 }
557 }
558
559 pub fn reparent(old_parent: &ThreadGroup, new_parent: &ThreadGroup) {
562 let mut lockless_list = old_parent.read().deferred_zombie_ptracers.clone();
563
564 for deferred_zombie_ptracer in &lockless_list {
565 if let Some(tg) = deferred_zombie_ptracer.tracer_thread_group_key.upgrade() {
566 tg.write().zombie_ptracees.set_parent_of(
567 deferred_zombie_ptracer.tracee_tid,
568 None,
569 new_parent,
570 );
571 }
572 }
573 let mut new_state = new_parent.write();
574 new_state.deferred_zombie_ptracers.append(&mut lockless_list);
575 }
576
577 pub fn release(&mut self, pids: &mut PidTable) {
580 let mut entry = self.zombies.pop_first();
581 while let Some((_, mut zombie)) = entry {
582 if let Some((tg, z)) = zombie.delegate.take() {
583 if let Some(tg) = tg.upgrade() {
584 tg.do_zombie_notifications(z);
585 }
586 }
587 zombie.release(pids);
588
589 entry = self.zombies.pop_first();
590 }
591 }
592
593 pub fn has_zombie_matching(&self, selector: &ProcessSelector) -> bool {
596 self.zombies.values().any(|z| z.artificial_zombie.matches_selector(selector))
597 }
598
599 pub fn has_tracee(&self, tid: tid_t) -> bool {
602 self.zombies.contains_key(&tid)
603 }
604
605 pub fn get_waitable_entry(
609 &mut self,
610 selector: &ProcessSelector,
611 options: &WaitingOptions,
612 ) -> Option<(ZombieProcess, Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>)> {
613 let Some((t, found_zombie)) = self
616 .zombies
617 .iter()
618 .map(|(t, z)| (*t, &z.artificial_zombie))
619 .rfind(|(_, zombie)| zombie.matches_selector_and_waiting_option(selector, options))
620 else {
621 return None;
622 };
623
624 let result;
625 if !options.keep_waitable_state {
626 result = self.zombies.remove(&t).map(|traced_zombie| {
628 let traced_zombie = ReleaseGuard::take(traced_zombie);
629 (traced_zombie.artificial_zombie, traced_zombie.delegate)
630 });
631 } else {
632 result = Some((found_zombie.as_artificial(), None));
633 }
634
635 result
636 }
637}
638
639pub const PR_SET_PTRACER_ANY: i32 = -1;
642
643#[derive(Copy, Clone, Default, PartialEq)]
646pub enum PtraceAllowedPtracers {
647 #[default]
648 None,
649 Some(pid_t),
650 Any,
651}
652
653fn ptrace_cont<L>(
658 locked: &mut Locked<L>,
659 tracee: &Task,
660 data: &UserAddress,
661 detach: bool,
662) -> Result<(), Errno>
663where
664 L: LockBefore<ThreadGroupLimits>,
665{
666 let data = data.ptr() as u64;
667 let new_state;
668 let mut siginfo = if data != 0 {
669 let signal = Signal::try_from(UncheckedSignal::new(data))?;
670 Some(SignalInfo::default(signal))
671 } else {
672 None
673 };
674
675 let mut state = tracee.write();
676 let is_listen = state.is_ptrace_listening();
677
678 if tracee.load_stopped().is_waking_or_awake() && !is_listen {
679 if detach {
680 state.set_ptrace(None)?;
681 }
682 return error!(EIO);
683 }
684
685 if !state.can_accept_ptrace_commands() && !detach {
686 return error!(ESRCH);
687 }
688
689 if let Some(ptrace) = &mut state.ptrace {
690 if data != 0 {
691 new_state = PtraceStatus::Continuing;
692 if let Some(last_signal) = &mut ptrace.last_signal {
693 if let Some(si) = siginfo {
694 let new_signal = si.signal;
695 last_signal.signal = new_signal;
696 }
697 siginfo = Some(last_signal.clone());
698 }
699 } else {
700 new_state = PtraceStatus::Default;
701 ptrace.last_signal = None;
702 ptrace.event_data = None;
703 }
704 ptrace.stop_status = new_state;
705
706 if is_listen {
707 state.notify_ptracees();
708 }
709 }
710
711 if let Some(siginfo) = siginfo {
712 send_signal_first(locked, &tracee, state, siginfo);
714 } else {
715 state.set_stopped(StopState::Waking, None, None, None);
716 drop(state);
717 tracee.thread_group().set_stopped(StopState::Waking, None, false);
718 }
719 if detach {
720 tracee.write().set_ptrace(None)?;
721 }
722 Ok(())
723}
724
725fn ptrace_interrupt(tracee: &Task) -> Result<(), Errno> {
726 let mut state = tracee.write();
727 if let Some(ptrace) = &mut state.ptrace {
728 if !ptrace.is_seized() {
729 return error!(EIO);
730 }
731 let status = ptrace.stop_status.clone();
732 ptrace.stop_status = PtraceStatus::Default;
733 let event_data = Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0));
734 if status == PtraceStatus::Listening {
735 let signal = ptrace.last_signal.clone();
736 state.set_stopped(StopState::PtraceEventStopped, signal, None, event_data);
740 } else {
741 state.set_stopped(
742 StopState::PtraceEventStopping,
743 Some(SignalInfo::default(SIGTRAP)),
744 None,
745 event_data,
746 );
747 drop(state);
748 tracee.interrupt();
749 }
750 }
751 Ok(())
752}
753
754fn ptrace_listen(tracee: &Task) -> Result<(), Errno> {
755 let mut state = tracee.write();
756 if let Some(ptrace) = &mut state.ptrace {
757 if !ptrace.is_seized()
758 || (ptrace.last_signal_waitable
759 && ptrace
760 .event_data
761 .as_ref()
762 .is_some_and(|event_data| event_data.event != PtraceEvent::Stop))
763 {
764 return error!(EIO);
765 }
766 ptrace.stop_status = PtraceStatus::Listening;
767 }
768 Ok(())
769}
770
771pub fn ptrace_detach<L>(
772 locked: &mut Locked<L>,
773 pids: &mut PidTable,
774 thread_group: &ThreadGroup,
775 tracee: &Task,
776 data: &UserAddress,
777) -> Result<(), Errno>
778where
779 L: LockBefore<ThreadGroupLimits>,
780{
781 if let Err(x) = ptrace_cont(locked, &tracee, &data, true) {
782 return Err(x);
783 }
784 let tid = tracee.get_tid();
785 thread_group.ptracees.lock().remove(&tid);
786 thread_group.write().zombie_ptracees.remove(pids, tid);
787 Ok(())
788}
789
790pub fn ptrace_dispatch<L>(
793 locked: &mut Locked<L>,
794 current_task: &mut CurrentTask,
795 request: u32,
796 pid: pid_t,
797 addr: UserAddress,
798 data: UserAddress,
799) -> Result<SyscallResult, Errno>
800where
801 L: LockBefore<ThreadGroupLimits>,
802{
803 let mut pids = current_task.kernel().pids.write();
804 let weak_task = pids.get_task(pid);
805 let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
806
807 if let Some(ptrace) = &tracee.read().ptrace {
808 if ptrace.get_pid() != current_task.get_pid() {
809 return error!(ESRCH);
810 }
811 }
812
813 match request {
816 PTRACE_KILL => {
817 let mut siginfo = SignalInfo::default(SIGKILL);
818 siginfo.code = (linux_uapi::SIGTRAP | PTRACE_KILL << 8) as i32;
819 send_standard_signal(locked, &tracee, siginfo);
820 return Ok(starnix_syscalls::SUCCESS);
821 }
822 PTRACE_INTERRUPT => {
823 ptrace_interrupt(tracee.as_ref())?;
824 return Ok(starnix_syscalls::SUCCESS);
825 }
826 PTRACE_LISTEN => {
827 ptrace_listen(&tracee)?;
828 return Ok(starnix_syscalls::SUCCESS);
829 }
830 PTRACE_CONT => {
831 ptrace_cont(locked, &tracee, &data, false)?;
832 return Ok(starnix_syscalls::SUCCESS);
833 }
834 PTRACE_SYSCALL => {
835 tracee.trace_syscalls.store(true, std::sync::atomic::Ordering::Relaxed);
836 ptrace_cont(locked, &tracee, &data, false)?;
837 return Ok(starnix_syscalls::SUCCESS);
838 }
839 PTRACE_DETACH => {
840 ptrace_detach(locked, &mut pids, current_task.thread_group(), tracee.as_ref(), &data)?;
841 return Ok(starnix_syscalls::SUCCESS);
842 }
843 _ => {}
844 }
845
846 let mut state = tracee.write();
848 if !state.can_accept_ptrace_commands() {
849 return error!(ESRCH);
850 }
851
852 match request {
853 PTRACE_PEEKDATA | PTRACE_PEEKTEXT => {
854 let Some(captured) = &mut state.captured_thread_state else {
855 return error!(ESRCH);
856 };
857
858 let src = LongPtr::new(captured.as_ref(), addr);
861 let val = tracee.read_multi_arch_object(src)?;
862
863 let dst = LongPtr::new(&src, data);
864 current_task.write_multi_arch_object(dst, val)?;
865 Ok(starnix_syscalls::SUCCESS)
866 }
867 PTRACE_POKEDATA | PTRACE_POKETEXT => {
868 let Some(captured) = &mut state.captured_thread_state else {
869 return error!(ESRCH);
870 };
871
872 let bytes = if captured.is_arch32() {
873 u32::try_from(data.ptr()).map_err(|_| errno!(EINVAL))?.to_ne_bytes().to_vec()
874 } else {
875 data.ptr().to_ne_bytes().to_vec()
876 };
877
878 tracee.mm()?.force_write_memory(addr, &bytes)?;
879
880 Ok(starnix_syscalls::SUCCESS)
881 }
882 PTRACE_PEEKUSR => {
883 let Some(captured) = &mut state.captured_thread_state else {
884 return error!(ESRCH);
885 };
886
887 let dst = LongPtr::new(captured.as_ref(), data);
888 let val = ptrace_peekuser(&mut captured.thread_state, addr.ptr() as usize)?;
889 current_task.write_multi_arch_object(dst, val as u64)?;
890 return Ok(starnix_syscalls::SUCCESS);
891 }
892 PTRACE_POKEUSR => {
893 ptrace_pokeuser(&mut *state, data.ptr() as usize, addr.ptr() as usize)?;
894 return Ok(starnix_syscalls::SUCCESS);
895 }
896 PTRACE_GETREGSET => {
897 if let Some(ref mut captured) = state.captured_thread_state {
898 let uiv = IOVecPtr::new(current_task, data);
899 let mut iv = current_task.read_multi_arch_object(uiv)?;
900 let base = iv.iov_base.addr;
901 let mut len = iv.iov_len as usize;
902 ptrace_getregset(
903 current_task,
904 &mut captured.thread_state,
905 ElfNoteType::try_from(addr.ptr() as usize)?,
906 base,
907 &mut len,
908 )?;
909 iv.iov_len = len as u64;
910 current_task.write_multi_arch_object(uiv, iv)?;
911 return Ok(starnix_syscalls::SUCCESS);
912 }
913 error!(ESRCH)
914 }
915 PTRACE_SETREGSET => {
916 if let Some(ref mut captured) = state.captured_thread_state {
917 captured.dirty = true;
918 let uiv = IOVecPtr::new(current_task, data);
919 let iv = current_task.read_multi_arch_object(uiv)?;
920 let base = iv.iov_base.addr;
921 let len = iv.iov_len as usize;
922 ptrace_setregset(
923 current_task,
924 &mut captured.thread_state,
925 ElfNoteType::try_from(addr.ptr() as usize)?,
926 base,
927 len,
928 )?;
929 return Ok(starnix_syscalls::SUCCESS);
930 }
931 error!(ESRCH)
932 }
933 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
934 PTRACE_GETREGS => {
935 if let Some(captured) = &mut state.captured_thread_state {
936 let mut len = usize::MAX;
937 ptrace_getregset(
938 current_task,
939 &mut captured.thread_state,
940 ElfNoteType::PrStatus,
941 data.ptr() as u64,
942 &mut len,
943 )?;
944 return Ok(starnix_syscalls::SUCCESS);
945 }
946 error!(ESRCH)
947 }
948 PTRACE_SETSIGMASK => {
949 if addr.ptr() != std::mem::size_of::<SigSet>() {
952 return error!(EINVAL);
953 }
954 let src: UserRef<SigSet> = UserRef::from(data);
956 let val = current_task.read_object(src)?;
957 state.set_signal_mask(val);
958
959 Ok(starnix_syscalls::SUCCESS)
960 }
961 PTRACE_GETSIGMASK => {
962 if addr.ptr() != std::mem::size_of::<SigSet>() {
965 return error!(EINVAL);
966 }
967 let dst: UserRef<SigSet> = UserRef::from(data);
969 let val = state.signal_mask();
970 current_task.write_object(dst, &val)?;
971 Ok(starnix_syscalls::SUCCESS)
972 }
973 PTRACE_GETSIGINFO => {
974 if let Some(ptrace) = &state.ptrace {
975 if let Some(signal) = ptrace.last_signal.as_ref() {
976 let dst = MultiArchUserRef::<uapi::siginfo_t, uapi::arch32::siginfo_t>::new(
977 current_task,
978 data,
979 );
980 signal.write(current_task, dst)?;
981 } else {
982 return error!(EINVAL);
983 }
984 }
985 Ok(starnix_syscalls::SUCCESS)
986 }
987 PTRACE_SETSIGINFO => {
988 const SI_MAX_SIZE_AS_USIZE: usize = SI_MAX_SIZE as usize;
991
992 let siginfo_mem = current_task.read_memory_to_array::<SI_MAX_SIZE_AS_USIZE>(data)?;
993 let header = SignalInfoHeader::read_from_bytes(&siginfo_mem[..SI_HEADER_SIZE]).unwrap();
994
995 let mut bytes = [0u8; SI_MAX_SIZE as usize - SI_HEADER_SIZE];
996 bytes.copy_from_slice(&siginfo_mem[SI_HEADER_SIZE..SI_MAX_SIZE as usize]);
997 let details = SignalDetail::Raw { data: bytes };
998 let unchecked_signal = UncheckedSignal::new(header.signo as u64);
999 let signal = Signal::try_from(unchecked_signal)?;
1000
1001 let siginfo = SignalInfo {
1002 signal,
1003 errno: header.errno,
1004 code: header.code,
1005 detail: details,
1006 force: false,
1007 source: SignalSource::capture(),
1008 };
1009 if let Some(ptrace) = &mut state.ptrace {
1010 ptrace.last_signal = Some(siginfo);
1011 }
1012 Ok(starnix_syscalls::SUCCESS)
1013 }
1014 PTRACE_GET_SYSCALL_INFO => {
1015 if let Some(ptrace) = &state.ptrace {
1016 let (size, info) = ptrace.get_target_syscall(&tracee, &state)?;
1017 let dst: UserRef<ptrace_syscall_info> = UserRef::from(data);
1018 let len = std::cmp::min(std::mem::size_of::<ptrace_syscall_info>(), addr.ptr());
1019 let src = unsafe {
1022 std::slice::from_raw_parts(
1023 &info as *const ptrace_syscall_info as *const u8,
1024 len as usize,
1025 )
1026 };
1027 current_task.write_memory(dst.addr(), src)?;
1028 Ok(size.into())
1029 } else {
1030 error!(ESRCH)
1031 }
1032 }
1033 PTRACE_SETOPTIONS => {
1034 let mask = data.ptr() as u32;
1035 if mask != 0
1037 && (mask
1038 & !(PTRACE_O_TRACESYSGOOD
1039 | PTRACE_O_TRACECLONE
1040 | PTRACE_O_TRACEFORK
1041 | PTRACE_O_TRACEVFORK
1042 | PTRACE_O_TRACEVFORKDONE
1043 | PTRACE_O_TRACEEXEC
1044 | PTRACE_O_TRACEEXIT
1045 | PTRACE_O_EXITKILL)
1046 != 0)
1047 {
1048 track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace(PTRACE_SETOPTIONS)", mask);
1049 return error!(ENOSYS);
1050 }
1051 if let Some(ptrace) = &mut state.ptrace {
1052 ptrace.set_options_from_bits(mask)?;
1053 }
1054 Ok(starnix_syscalls::SUCCESS)
1055 }
1056 PTRACE_GETEVENTMSG => {
1057 if let Some(ptrace) = &state.ptrace {
1058 if let Some(event_data) = &ptrace.event_data {
1059 let dst = LongPtr::new(current_task, data);
1060 current_task.write_multi_arch_object(dst, event_data.msg)?;
1061 return Ok(starnix_syscalls::SUCCESS);
1062 }
1063 }
1064 error!(EIO)
1065 }
1066 _ => {
1067 track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace", request);
1068 error!(ENOSYS)
1069 }
1070 }
1071}
1072
1073fn do_attach(
1075 thread_group: &ThreadGroup,
1076 task: WeakRef<Task>,
1077 attach_type: PtraceAttachType,
1078 options: PtraceOptions,
1079) -> Result<(), Errno> {
1080 if let Some(task_ref) = task.upgrade() {
1081 thread_group.ptracees.lock().insert(task_ref.get_tid(), (&task_ref).into());
1082 {
1083 let process_state = &mut task_ref.thread_group().write();
1084 let mut state = task_ref.write();
1085 state.set_ptrace(Some(PtraceState::new(thread_group.leader, attach_type, options)))?;
1086 if process_state.is_waitable()
1089 && process_state.base.load_stopped() == StopState::GroupStopped
1090 && task_ref.load_stopped() == StopState::GroupStopped
1091 {
1092 if let Some(ptrace) = &mut state.ptrace {
1093 ptrace.last_signal_waitable = true;
1094 }
1095 }
1096 }
1097 return Ok(());
1098 }
1099 unreachable!("Tracee thread not found");
1102}
1103
1104pub fn ptrace_attach_from_state<L>(
1108 locked: &mut Locked<L>,
1109 tracee_task: &OwnedRef<Task>,
1110 ptrace_state: PtraceCoreState,
1111) -> Result<(), Errno>
1112where
1113 L: LockBefore<ThreadGroupLimits>,
1114{
1115 {
1116 let weak_tg =
1117 tracee_task.thread_group().kernel.pids.read().get_thread_group(ptrace_state.pid);
1118 let tracer_tg = weak_tg.ok_or_else(|| errno!(ESRCH))?;
1119 do_attach(
1120 &tracer_tg,
1121 WeakRef::from(tracee_task),
1122 ptrace_state.attach_type,
1123 ptrace_state.options,
1124 )?;
1125 }
1126 let mut state = tracee_task.write();
1127 if let Some(ptrace) = &mut state.ptrace {
1128 ptrace.core_state.tracer_waiters = Arc::clone(&ptrace_state.tracer_waiters);
1129 }
1130
1131 let signal = if ptrace_state.attach_type == PtraceAttachType::Seize {
1133 if let Some(ptrace) = &mut state.ptrace {
1134 ptrace.set_last_event(Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0)));
1135 }
1136 SignalInfo::default(SIGTRAP)
1137 } else {
1138 SignalInfo::default(SIGSTOP)
1139 };
1140 send_signal_first(locked, tracee_task, state, signal);
1141
1142 Ok(())
1143}
1144
1145pub fn ptrace_traceme(current_task: &mut CurrentTask) -> Result<SyscallResult, Errno> {
1146 let parent = current_task.thread_group().read().parent.clone();
1147 if let Some(parent) = parent {
1148 let parent = parent.upgrade();
1149 {
1151 let pids = current_task.kernel().pids.read();
1152 let parent_task = pids.get_task(parent.leader);
1153 security::ptrace_traceme(
1154 current_task,
1155 parent_task.upgrade().ok_or_else(|| errno!(EINVAL))?.as_ref(),
1156 )?;
1157 }
1158
1159 let task_ref = OwnedRef::temp(¤t_task.task);
1160 do_attach(&parent, (&task_ref).into(), PtraceAttachType::Attach, PtraceOptions::empty())?;
1161 Ok(starnix_syscalls::SUCCESS)
1162 } else {
1163 error!(EPERM)
1164 }
1165}
1166
1167pub fn ptrace_attach<L>(
1168 locked: &mut Locked<L>,
1169 current_task: &mut CurrentTask,
1170 pid: pid_t,
1171 attach_type: PtraceAttachType,
1172 data: UserAddress,
1173) -> Result<SyscallResult, Errno>
1174where
1175 L: LockBefore<MmDumpable>,
1176{
1177 let weak_task = current_task.kernel().pids.read().get_task(pid);
1178 let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
1179
1180 if tracee.thread_group == current_task.thread_group {
1181 return error!(EPERM);
1182 }
1183
1184 current_task.check_ptrace_access_mode(locked, PTRACE_MODE_ATTACH_REALCREDS, &tracee)?;
1185 do_attach(current_task.thread_group(), weak_task.clone(), attach_type, PtraceOptions::empty())?;
1186 if attach_type == PtraceAttachType::Attach {
1187 send_standard_signal(
1188 locked.cast_locked::<MmDumpable>(),
1189 &tracee,
1190 SignalInfo::default(SIGSTOP),
1191 );
1192 } else if attach_type == PtraceAttachType::Seize {
1193 if let Some(task_ref) = weak_task.upgrade() {
1195 let mut state = task_ref.write();
1196 if let Some(ptrace) = &mut state.ptrace {
1197 ptrace.set_options_from_bits(data.ptr() as u32)?;
1198 }
1199 }
1200 }
1201 Ok(starnix_syscalls::SUCCESS)
1202}
1203
1204pub fn ptrace_peekuser(thread_state: &mut ThreadState, offset: usize) -> Result<usize, Errno> {
1208 #[cfg(any(target_arch = "x86_64"))]
1209 if offset >= std::mem::size_of::<user>() {
1210 return error!(EIO);
1211 }
1212 if offset < UserRegsStructPtr::size_of_object_for(thread_state) {
1213 let result = thread_state.get_user_register(offset)?;
1214 return Ok(result);
1215 }
1216 error!(EIO)
1217}
1218
1219pub fn ptrace_pokeuser(
1220 state: &mut TaskMutableState,
1221 value: usize,
1222 offset: usize,
1223) -> Result<(), Errno> {
1224 if let Some(ref mut thread_state) = state.captured_thread_state {
1225 thread_state.dirty = true;
1226
1227 #[cfg(any(target_arch = "x86_64"))]
1228 if offset >= std::mem::size_of::<user>() {
1229 return error!(EIO);
1230 }
1231 if offset < UserRegsStructPtr::size_of_object_for(thread_state.as_ref()) {
1232 return thread_state.thread_state.set_user_register(offset, value);
1233 }
1234 }
1235 error!(EIO)
1236}
1237
1238pub fn ptrace_getregset(
1239 current_task: &CurrentTask,
1240 thread_state: &mut ThreadState,
1241 regset_type: ElfNoteType,
1242 base: u64,
1243 len: &mut usize,
1244) -> Result<(), Errno> {
1245 match regset_type {
1246 ElfNoteType::PrStatus => {
1247 let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1248 if *len < user_regs_struct_len {
1249 return error!(EINVAL);
1250 }
1251 *len = user_regs_struct_len;
1252 let mut i: usize = 0;
1253 let mut reg_ptr = LongPtr::new(thread_state, base);
1254 while i < *len {
1255 let mut val = None;
1256 thread_state
1257 .registers
1258 .apply_user_register(i, &mut |register| val = Some(*register as usize))?;
1259 if let Some(val) = val {
1260 current_task.write_multi_arch_object(reg_ptr, val as u64)?;
1261 }
1262 i += reg_ptr.size_of_object();
1263 reg_ptr = reg_ptr.next()?;
1264 }
1265 Ok(())
1266 }
1267 _ => {
1268 error!(EINVAL)
1269 }
1270 }
1271}
1272
1273pub fn ptrace_setregset(
1274 current_task: &CurrentTask,
1275 thread_state: &mut ThreadState,
1276 regset_type: ElfNoteType,
1277 base: u64,
1278 mut len: usize,
1279) -> Result<(), Errno> {
1280 match regset_type {
1281 ElfNoteType::PrStatus => {
1282 let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1283 if len < user_regs_struct_len {
1284 return error!(EINVAL);
1285 }
1286 len = user_regs_struct_len;
1287 let mut i: usize = 0;
1288 let mut reg_ptr = LongPtr::new(thread_state, base);
1289 while i < len {
1290 let val = current_task.read_multi_arch_object(reg_ptr)?;
1291 thread_state.registers.apply_user_register(i, &mut |register| *register = val)?;
1292 i += reg_ptr.size_of_object();
1293 reg_ptr = reg_ptr.next()?;
1294 }
1295 Ok(())
1296 }
1297 _ => {
1298 error!(EINVAL)
1299 }
1300 }
1301}
1302
1303#[inline(never)]
1304pub fn ptrace_syscall_enter(locked: &mut Locked<Unlocked>, current_task: &mut CurrentTask) {
1305 let block = {
1306 let mut state = current_task.write();
1307 if state.ptrace.is_some() {
1308 current_task.trace_syscalls.store(false, Ordering::Relaxed);
1309 let mut sig = SignalInfo::default(SIGTRAP);
1310 sig.code = (linux_uapi::SIGTRAP | 0x80) as i32;
1311 if state
1312 .ptrace
1313 .as_ref()
1314 .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1315 {
1316 sig.signal.set_ptrace_syscall_bit();
1317 }
1318 state.set_stopped(StopState::SyscallEnterStopping, Some(sig), None, None);
1319 true
1320 } else {
1321 false
1322 }
1323 };
1324 if block {
1325 current_task.block_while_stopped(locked);
1326 }
1327}
1328
1329#[inline(never)]
1330pub fn ptrace_syscall_exit(
1331 locked: &mut Locked<Unlocked>,
1332 current_task: &mut CurrentTask,
1333 is_error: bool,
1334) {
1335 let block = {
1336 let mut state = current_task.write();
1337 current_task.trace_syscalls.store(false, Ordering::Relaxed);
1338 if state.ptrace.is_some() {
1339 let mut sig = SignalInfo::default(SIGTRAP);
1340 sig.code = (linux_uapi::SIGTRAP | 0x80) as i32;
1341 if state
1342 .ptrace
1343 .as_ref()
1344 .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1345 {
1346 sig.signal.set_ptrace_syscall_bit();
1347 }
1348
1349 state.set_stopped(StopState::SyscallExitStopping, Some(sig), None, None);
1350 if let Some(ptrace) = &mut state.ptrace {
1351 ptrace.last_syscall_was_error = is_error;
1352 }
1353 true
1354 } else {
1355 false
1356 }
1357 };
1358 if block {
1359 current_task.block_while_stopped(locked);
1360 }
1361}
1362
1363#[cfg(test)]
1364mod tests {
1365 use super::*;
1366 use crate::task::syscalls::sys_prctl;
1367 use crate::testing::{create_task, spawn_kernel_and_run};
1368 use starnix_uapi::PR_SET_PTRACER;
1369 use starnix_uapi::auth::CAP_SYS_PTRACE;
1370
1371 #[::fuchsia::test]
1372 async fn test_set_ptracer() {
1373 spawn_kernel_and_run(async |locked, current_task| {
1374 let kernel = current_task.kernel().clone();
1375 let mut tracee = create_task(locked, &kernel, "tracee");
1376 let mut tracer = create_task(locked, &kernel, "tracer");
1377
1378 let mut creds = tracer.real_creds().clone();
1379 creds.cap_effective &= !CAP_SYS_PTRACE;
1380 tracer.set_creds(creds);
1381
1382 kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1383 assert_eq!(
1384 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1385 error!(EINVAL)
1386 );
1387
1388 assert_eq!(
1389 ptrace_attach(
1390 locked,
1391 &mut tracer,
1392 tracee.as_ref().task.tid,
1393 PtraceAttachType::Attach,
1394 UserAddress::NULL,
1395 ),
1396 error!(EPERM)
1397 );
1398
1399 assert!(
1400 sys_prctl(
1401 locked,
1402 &mut tracee,
1403 PR_SET_PTRACER,
1404 tracer.thread_group().leader as u64,
1405 0,
1406 0,
1407 0
1408 )
1409 .is_ok()
1410 );
1411
1412 let mut not_tracer = create_task(locked, &kernel, "not-tracer");
1413 not_tracer.set_creds(tracer.real_creds());
1414 assert_eq!(
1415 ptrace_attach(
1416 locked,
1417 &mut not_tracer,
1418 tracee.as_ref().task.tid,
1419 PtraceAttachType::Attach,
1420 UserAddress::NULL,
1421 ),
1422 error!(EPERM)
1423 );
1424
1425 assert!(
1426 ptrace_attach(
1427 locked,
1428 &mut tracer,
1429 tracee.as_ref().task.tid,
1430 PtraceAttachType::Attach,
1431 UserAddress::NULL,
1432 )
1433 .is_ok()
1434 );
1435 })
1436 .await;
1437 }
1438
1439 #[::fuchsia::test]
1440 async fn test_set_ptracer_any() {
1441 spawn_kernel_and_run(async |locked, current_task| {
1442 let kernel = current_task.kernel().clone();
1443 let mut tracee = create_task(locked, &kernel, "tracee");
1444 let mut tracer = create_task(locked, &kernel, "tracer");
1445
1446 let mut creds = tracer.real_creds().clone();
1447 creds.cap_effective &= !CAP_SYS_PTRACE;
1448 tracer.set_creds(creds);
1449
1450 kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1451 assert_eq!(
1452 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1453 error!(EINVAL)
1454 );
1455
1456 assert_eq!(
1457 ptrace_attach(
1458 locked,
1459 &mut tracer,
1460 tracee.as_ref().task.tid,
1461 PtraceAttachType::Attach,
1462 UserAddress::NULL,
1463 ),
1464 error!(EPERM)
1465 );
1466
1467 assert!(
1468 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, PR_SET_PTRACER_ANY as u64, 0, 0, 0)
1469 .is_ok()
1470 );
1471
1472 assert!(
1473 ptrace_attach(
1474 locked,
1475 &mut tracer,
1476 tracee.as_ref().task.tid,
1477 PtraceAttachType::Attach,
1478 UserAddress::NULL,
1479 )
1480 .is_ok()
1481 );
1482 })
1483 .await;
1484 }
1485}