1use crate::arch::execution::new_syscall_from_state;
6use crate::mm::{IOVecPtr, MemoryAccessor, MemoryAccessorExt};
7use crate::security;
8use crate::signals::syscalls::WaitingOptions;
9use crate::signals::{
10 SI_HEADER_SIZE, SignalDetail, SignalInfo, SignalInfoHeader, SignalSource, send_signal_first,
11 send_standard_signal,
12};
13use crate::task::waiter::WaitQueue;
14use crate::task::{
15 CurrentTask, PidTable, ProcessSelector, StopState, Task, TaskMutableState, ThreadGroup,
16 ThreadState, ZombieProcess,
17};
18use bitflags::bitflags;
19use starnix_logging::track_stub;
20use starnix_sync::{LockBefore, Locked, MmDumpable, ThreadGroupLimits, Unlocked};
21use starnix_syscalls::SyscallResult;
22use starnix_syscalls::decls::SyscallDecl;
23use starnix_types::ownership::{OwnedRef, Releasable, ReleaseGuard, WeakRef};
24use starnix_uapi::auth::PTRACE_MODE_ATTACH_REALCREDS;
25use starnix_uapi::elf::ElfNoteType;
26use starnix_uapi::errors::Errno;
27use starnix_uapi::signals::{SIGKILL, SIGSTOP, SIGTRAP, SigSet, Signal, UncheckedSignal};
28#[allow(unused_imports)]
29use starnix_uapi::user_address::ArchSpecific;
30use starnix_uapi::user_address::{LongPtr, MultiArchUserRef, UserAddress, UserRef};
31use starnix_uapi::{
32 PTRACE_CONT, PTRACE_DETACH, PTRACE_EVENT_CLONE, PTRACE_EVENT_EXEC, PTRACE_EVENT_EXIT,
33 PTRACE_EVENT_FORK, PTRACE_EVENT_SECCOMP, PTRACE_EVENT_STOP, PTRACE_EVENT_VFORK,
34 PTRACE_EVENT_VFORK_DONE, PTRACE_GET_SYSCALL_INFO, PTRACE_GETEVENTMSG, PTRACE_GETREGSET,
35 PTRACE_GETSIGINFO, PTRACE_GETSIGMASK, PTRACE_INTERRUPT, PTRACE_KILL, PTRACE_LISTEN,
36 PTRACE_O_EXITKILL, PTRACE_O_TRACECLONE, PTRACE_O_TRACEEXEC, PTRACE_O_TRACEEXIT,
37 PTRACE_O_TRACEFORK, PTRACE_O_TRACESYSGOOD, PTRACE_O_TRACEVFORK, PTRACE_O_TRACEVFORKDONE,
38 PTRACE_PEEKDATA, PTRACE_PEEKTEXT, PTRACE_PEEKUSR, PTRACE_POKEDATA, PTRACE_POKETEXT,
39 PTRACE_POKEUSR, PTRACE_SETOPTIONS, PTRACE_SETREGSET, PTRACE_SETSIGINFO, PTRACE_SETSIGMASK,
40 PTRACE_SYSCALL, PTRACE_SYSCALL_INFO_ENTRY, PTRACE_SYSCALL_INFO_EXIT, PTRACE_SYSCALL_INFO_NONE,
41 SI_MAX_SIZE, clone_args, errno, error, pid_t, ptrace_syscall_info, tid_t, uapi,
42};
43
44use std::collections::BTreeMap;
45use std::sync::atomic::Ordering;
46use std::sync::{Arc, Weak};
47use zerocopy::FromBytes;
48
49#[cfg(target_arch = "x86_64")]
50use starnix_uapi::{PTRACE_GETREGS, user};
51
52#[cfg(all(target_arch = "aarch64"))]
53use starnix_uapi::arch32::PTRACE_GETREGS;
54
55type UserRegsStructPtr =
56 MultiArchUserRef<starnix_uapi::user_regs_struct, starnix_uapi::arch32::user_regs_struct>;
57
58uapi::check_arch_independent_layout! {
59 ptrace_syscall_info {
60 op,
61 arch,
62 instruction_pointer,
63 stack_pointer,
64 __bindgen_anon_1,
65 }
66
67 ptrace_syscall_info__bindgen_ty_1 {
68 entry,
69 exit,
70 seccomp,
71 }
72
73 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
74 nr,
75 args,
76 }
77
78 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
79 rval,
80 is_error,
81 }
82
83 ptrace_syscall_info__bindgen_ty_1__bindgen_ty_3 {
84 nr,
85 args,
86 ret_data,
87 }
88}
89
90#[derive(Clone, Default, PartialEq)]
94pub enum PtraceStatus {
95 #[default]
97 Default,
98 Continuing,
100 Listening,
107}
108
109impl PtraceStatus {
110 pub fn is_continuing(&self) -> bool {
111 *self == PtraceStatus::Continuing
112 }
113}
114
115#[derive(Copy, Clone, PartialEq)]
117pub enum PtraceAttachType {
118 Attach,
120 Seize,
122}
123
124bitflags! {
125 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
126 #[repr(transparent)]
127 pub struct PtraceOptions: u32 {
128 const EXITKILL = starnix_uapi::PTRACE_O_EXITKILL;
129 const TRACECLONE = starnix_uapi::PTRACE_O_TRACECLONE;
130 const TRACEEXEC = starnix_uapi::PTRACE_O_TRACEEXEC;
131 const TRACEEXIT = starnix_uapi::PTRACE_O_TRACEEXIT;
132 const TRACEFORK = starnix_uapi::PTRACE_O_TRACEFORK;
133 const TRACESYSGOOD = starnix_uapi::PTRACE_O_TRACESYSGOOD;
134 const TRACEVFORK = starnix_uapi::PTRACE_O_TRACEVFORK;
135 const TRACEVFORKDONE = starnix_uapi::PTRACE_O_TRACEVFORKDONE;
136 const TRACESECCOMP = starnix_uapi::PTRACE_O_TRACESECCOMP;
137 const SUSPEND_SECCOMP = starnix_uapi::PTRACE_O_SUSPEND_SECCOMP;
138 }
139}
140
141#[repr(u32)]
142#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
143pub enum PtraceEvent {
144 #[default]
145 None = 0,
146 Stop = PTRACE_EVENT_STOP,
147 Clone = PTRACE_EVENT_CLONE,
148 Fork = PTRACE_EVENT_FORK,
149 Vfork = PTRACE_EVENT_VFORK,
150 VforkDone = PTRACE_EVENT_VFORK_DONE,
151 Exec = PTRACE_EVENT_EXEC,
152 Exit = PTRACE_EVENT_EXIT,
153 Seccomp = PTRACE_EVENT_SECCOMP,
154}
155
156impl PtraceEvent {
157 pub fn from_option(option: &PtraceOptions) -> Self {
158 match *option {
159 PtraceOptions::TRACECLONE => PtraceEvent::Clone,
160 PtraceOptions::TRACEFORK => PtraceEvent::Fork,
161 PtraceOptions::TRACEVFORK => PtraceEvent::Vfork,
162 PtraceOptions::TRACEVFORKDONE => PtraceEvent::VforkDone,
163 PtraceOptions::TRACEEXEC => PtraceEvent::Exec,
164 PtraceOptions::TRACEEXIT => PtraceEvent::Exit,
165 PtraceOptions::TRACESECCOMP => PtraceEvent::Seccomp,
166 _ => unreachable!("Bad ptrace event specified"),
167 }
168 }
169}
170
171pub struct PtraceEventData {
173 pub event: PtraceEvent,
175
176 pub msg: u64,
178}
179
180impl PtraceEventData {
181 pub fn new(option: PtraceOptions, msg: u64) -> Self {
182 Self { event: PtraceEvent::from_option(&option), msg }
183 }
184 pub fn new_from_event(event: PtraceEvent, msg: u64) -> Self {
185 Self { event, msg }
186 }
187}
188
189#[derive(Clone)]
192pub struct PtraceCoreState {
193 pub pid: pid_t,
195
196 pub thread_group: Weak<ThreadGroup>,
198
199 pub attach_type: PtraceAttachType,
202
203 pub options: PtraceOptions,
205
206 pub tracer_waiters: Arc<WaitQueue>,
209}
210
211impl PtraceCoreState {
212 pub fn has_option(&self, option: PtraceOptions) -> bool {
213 self.options.contains(option)
214 }
215}
216
217pub struct PtraceState {
219 pub core_state: PtraceCoreState,
221
222 pub tracee_waiters: WaitQueue,
225
226 pub last_signal: Option<SignalInfo>,
229
230 pub last_signal_waitable: bool,
233
234 pub event_data: Option<PtraceEventData>,
236
237 pub stop_status: PtraceStatus,
240
241 pub last_syscall_was_error: bool,
243}
244
245impl PtraceState {
246 pub fn new(
247 pid: pid_t,
248 thread_group: Weak<ThreadGroup>,
249 attach_type: PtraceAttachType,
250 options: PtraceOptions,
251 ) -> Box<Self> {
252 Box::new(PtraceState {
253 core_state: PtraceCoreState {
254 pid,
255 thread_group,
256 attach_type,
257 options,
258 tracer_waiters: Arc::new(WaitQueue::default()),
259 },
260 tracee_waiters: WaitQueue::default(),
261 last_signal: None,
262 last_signal_waitable: false,
263 event_data: None,
264 stop_status: PtraceStatus::default(),
265 last_syscall_was_error: false,
266 })
267 }
268
269 pub fn get_pid(&self) -> pid_t {
270 self.core_state.pid
271 }
272
273 pub fn set_pid(&mut self, pid: pid_t) {
274 self.core_state.pid = pid;
275 }
276
277 pub fn is_seized(&self) -> bool {
278 self.core_state.attach_type == PtraceAttachType::Seize
279 }
280
281 pub fn get_attach_type(&self) -> PtraceAttachType {
282 self.core_state.attach_type
283 }
284
285 pub fn is_waitable(&self, stop: StopState, options: &WaitingOptions) -> bool {
286 if self.stop_status == PtraceStatus::Listening {
287 return self.last_signal_waitable;
289 }
290 if !options.wait_for_continued && !stop.is_stopping_or_stopped() {
291 return false;
293 }
294 self.last_signal_waitable && !stop.is_in_progress()
295 }
296
297 pub fn set_last_signal(&mut self, mut signal: Option<SignalInfo>) {
298 if let Some(ref mut siginfo) = signal {
299 if siginfo.signal == SIGKILL {
302 return;
303 }
304 self.last_signal_waitable = true;
305 self.last_signal = signal;
306 }
307 }
308
309 pub fn set_last_event(&mut self, event: Option<PtraceEventData>) {
310 if event.is_some() {
311 self.event_data = event;
312 }
313 }
314
315 pub fn get_last_signal_ref(&self) -> Option<&SignalInfo> {
316 self.last_signal.as_ref()
317 }
318
319 pub fn get_last_signal(&mut self, keep_signal_waitable: bool) -> Option<SignalInfo> {
321 self.last_signal_waitable = keep_signal_waitable;
322 self.last_signal.clone()
323 }
324
325 pub fn has_option(&self, option: PtraceOptions) -> bool {
326 self.core_state.has_option(option)
327 }
328
329 pub fn set_options_from_bits(&mut self, option: u32) -> Result<(), Errno> {
330 if let Some(options) = PtraceOptions::from_bits(option) {
331 self.core_state.options = options;
332 Ok(())
333 } else {
334 error!(EINVAL)
335 }
336 }
337
338 pub fn get_options(&self) -> PtraceOptions {
339 self.core_state.options
340 }
341
342 pub fn get_core_state(&self) -> PtraceCoreState {
344 self.core_state.clone()
345 }
346
347 pub fn tracer_waiters(&self) -> &Arc<WaitQueue> {
348 &self.core_state.tracer_waiters
349 }
350
351 pub fn get_target_syscall(
358 &self,
359 target: &Task,
360 state: &TaskMutableState,
361 ) -> Result<(i32, ptrace_syscall_info), Errno> {
362 #[cfg(target_arch = "x86_64")]
363 let arch = starnix_uapi::AUDIT_ARCH_X86_64;
364 #[cfg(target_arch = "aarch64")]
365 let arch = starnix_uapi::AUDIT_ARCH_AARCH64;
366 #[cfg(target_arch = "riscv64")]
367 let arch = starnix_uapi::AUDIT_ARCH_RISCV64;
368
369 let mut info = ptrace_syscall_info { arch, ..Default::default() };
370 let mut info_len = memoffset::offset_of!(ptrace_syscall_info, __bindgen_anon_1);
371
372 match &state.captured_thread_state {
373 Some(captured) => {
374 let registers = captured.thread_state.registers;
375 info.instruction_pointer = registers.instruction_pointer_register();
376 info.stack_pointer = registers.stack_pointer_register();
377 #[cfg(target_arch = "aarch64")]
378 if captured.thread_state.arch_width.is_arch32() {
379 info.arch = starnix_uapi::AUDIT_ARCH_ARM;
382 }
383 match target.load_stopped() {
384 StopState::SyscallEnterStopped => {
385 let syscall_decl = SyscallDecl::from_number(
386 registers.syscall_register(),
387 captured.thread_state.arch_width,
388 );
389 let syscall = new_syscall_from_state(syscall_decl, &captured.thread_state);
390 info.op = PTRACE_SYSCALL_INFO_ENTRY as u8;
391 let entry = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
392 nr: syscall.decl.number,
393 args: [
394 syscall.arg0.raw(),
395 syscall.arg1.raw(),
396 syscall.arg2.raw(),
397 syscall.arg3.raw(),
398 syscall.arg4.raw(),
399 syscall.arg5.raw(),
400 ],
401 };
402 info_len += memoffset::offset_of!(
403 linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1,
404 args
405 ) + std::mem::size_of_val(&entry.args);
406 info.__bindgen_anon_1.entry = entry;
407 }
408 StopState::SyscallExitStopped => {
409 info.op = PTRACE_SYSCALL_INFO_EXIT as u8;
410 let exit = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
411 rval: registers.return_register() as i64,
412 is_error: state
413 .ptrace
414 .as_ref()
415 .map_or(0, |ptrace| ptrace.last_syscall_was_error as u8),
416 ..Default::default()
417 };
418 info_len += memoffset::offset_of!(
419 linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2,
420 is_error
421 ) + std::mem::size_of_val(&exit.is_error);
422 info.__bindgen_anon_1.exit = exit;
423 }
424 _ => {
425 info.op = PTRACE_SYSCALL_INFO_NONE as u8;
426 }
427 };
428 }
429 _ => (),
430 }
431 Ok((info_len as i32, info))
432 }
433
434 pub fn get_core_state_for_clone(
439 &self,
440 clone_args: &clone_args,
441 ) -> (PtraceOptions, Option<PtraceCoreState>) {
442 let trace_type = if clone_args.flags & (starnix_uapi::CLONE_UNTRACED as u64) != 0 {
448 PtraceOptions::empty()
449 } else {
450 if clone_args.flags & (starnix_uapi::CLONE_VFORK as u64) != 0 {
451 PtraceOptions::TRACEVFORK
452 } else if clone_args.exit_signal != (starnix_uapi::SIGCHLD as u64) {
453 PtraceOptions::TRACECLONE
454 } else {
455 PtraceOptions::TRACEFORK
456 }
457 };
458
459 if !self.has_option(trace_type)
460 && (clone_args.flags & (starnix_uapi::CLONE_PTRACE as u64) == 0)
461 {
462 return (PtraceOptions::empty(), None);
463 }
464
465 (trace_type, Some(self.get_core_state()))
466 }
467}
468
469struct TracedZombie {
471 artificial_zombie: ZombieProcess,
473
474 delegate: Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>,
477}
478
479impl Releasable for TracedZombie {
480 type Context<'a> = &'a mut PidTable;
481
482 fn release<'a>(self, pids: &'a mut PidTable) {
483 self.artificial_zombie.release(pids);
484 if let Some((_, z)) = self.delegate {
485 z.release(pids);
486 }
487 }
488}
489
490impl TracedZombie {
491 fn new(artificial_zombie: ZombieProcess) -> ReleaseGuard<Self> {
492 ReleaseGuard::from(Self { artificial_zombie, delegate: None })
493 }
494
495 fn new_with_delegate(
496 artificial_zombie: ZombieProcess,
497 delegate: (Weak<ThreadGroup>, OwnedRef<ZombieProcess>),
498 ) -> ReleaseGuard<Self> {
499 ReleaseGuard::from(Self { artificial_zombie, delegate: Some(delegate) })
500 }
501
502 fn set_parent(
503 &mut self,
504 new_zombie: Option<OwnedRef<ZombieProcess>>,
505 new_parent: &ThreadGroup,
506 ) {
507 if let Some(new_zombie) = new_zombie {
508 self.delegate = Some((new_parent.weak_self.clone(), new_zombie));
509 } else {
510 self.delegate = self.delegate.take().map(|(_, z)| (new_parent.weak_self.clone(), z));
511 }
512 }
513}
514
515#[derive(Default)]
519pub struct ZombiePtracees {
520 zombies: BTreeMap<tid_t, ReleaseGuard<TracedZombie>>,
523}
524
525impl ZombiePtracees {
526 pub fn new() -> Self {
527 Self::default()
528 }
529
530 pub fn add(&mut self, pids: &mut PidTable, tid: tid_t, zombie: ZombieProcess) {
533 if let std::collections::btree_map::Entry::Vacant(entry) = self.zombies.entry(tid) {
534 entry.insert(TracedZombie::new(zombie));
535 } else {
536 zombie.release(pids);
537 }
538 }
539
540 pub fn remove(&mut self, pids: &mut PidTable, tid: tid_t) {
542 self.zombies.remove(&tid).release(pids);
543 }
544
545 pub fn is_empty(&self) -> bool {
546 self.zombies.is_empty()
547 }
548
549 pub fn set_parent_of(
552 &mut self,
553 tracee: tid_t,
554 new_zombie: Option<OwnedRef<ZombieProcess>>,
555 new_parent: &ThreadGroup,
556 ) {
557 match self.zombies.entry(tracee) {
558 std::collections::btree_map::Entry::Vacant(entry) => {
559 if let Some(new_zombie) = new_zombie {
560 entry.insert(TracedZombie::new_with_delegate(
561 new_zombie.as_artificial(),
562 (new_parent.weak_self.clone(), new_zombie),
563 ));
564 }
565 }
566 std::collections::btree_map::Entry::Occupied(mut entry) => {
567 entry.get_mut().set_parent(new_zombie, new_parent);
568 }
569 }
570 }
571
572 pub fn reparent(old_parent: &ThreadGroup, new_parent: &ThreadGroup) {
575 let mut lockless_list = old_parent.read().deferred_zombie_ptracers.clone();
576
577 for deferred_zombie_ptracer in &lockless_list {
578 if let Some(tg) = deferred_zombie_ptracer.tracer_thread_group_key.upgrade() {
579 tg.write().zombie_ptracees.set_parent_of(
580 deferred_zombie_ptracer.tracee_tid,
581 None,
582 new_parent,
583 );
584 }
585 }
586 let mut new_state = new_parent.write();
587 new_state.deferred_zombie_ptracers.append(&mut lockless_list);
588 }
589
590 pub fn release(&mut self, pids: &mut PidTable) {
593 let mut entry = self.zombies.pop_first();
594 while let Some((_, mut zombie)) = entry {
595 if let Some((tg, z)) = zombie.delegate.take() {
596 if let Some(tg) = tg.upgrade() {
597 tg.do_zombie_notifications(z);
598 }
599 }
600 zombie.release(pids);
601
602 entry = self.zombies.pop_first();
603 }
604 }
605
606 pub fn has_zombie_matching(&self, selector: &ProcessSelector) -> bool {
609 self.zombies.values().any(|z| z.artificial_zombie.matches_selector(selector))
610 }
611
612 pub fn has_tracee(&self, tid: tid_t) -> bool {
615 self.zombies.contains_key(&tid)
616 }
617
618 pub fn get_waitable_entry(
622 &mut self,
623 selector: &ProcessSelector,
624 options: &WaitingOptions,
625 ) -> Option<(ZombieProcess, Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>)> {
626 let Some((t, found_zombie)) = self
629 .zombies
630 .iter()
631 .map(|(t, z)| (*t, &z.artificial_zombie))
632 .rfind(|(_, zombie)| zombie.matches_selector_and_waiting_option(selector, options))
633 else {
634 return None;
635 };
636
637 let result;
638 if !options.keep_waitable_state {
639 result = self.zombies.remove(&t).map(|traced_zombie| {
641 let traced_zombie = ReleaseGuard::take(traced_zombie);
642 (traced_zombie.artificial_zombie, traced_zombie.delegate)
643 });
644 } else {
645 result = Some((found_zombie.as_artificial(), None));
646 }
647
648 result
649 }
650}
651
652pub const PR_SET_PTRACER_ANY: i32 = -1;
655
656#[derive(Copy, Clone, Default, PartialEq)]
659pub enum PtraceAllowedPtracers {
660 #[default]
661 None,
662 Some(pid_t),
663 Any,
664}
665
666fn ptrace_cont<L>(
671 locked: &mut Locked<L>,
672 tracee: &Task,
673 data: &UserAddress,
674 detach: bool,
675) -> Result<(), Errno>
676where
677 L: LockBefore<ThreadGroupLimits>,
678{
679 let data = data.ptr() as u64;
680 let new_state;
681 let mut siginfo = if data != 0 {
682 let signal = Signal::try_from(UncheckedSignal::new(data))?;
683 Some(SignalInfo::default(signal))
684 } else {
685 None
686 };
687
688 let mut state = tracee.write();
689 let is_listen = state.is_ptrace_listening();
690
691 if tracee.load_stopped().is_waking_or_awake() && !is_listen {
692 if detach {
693 state.set_ptrace(None)?;
694 }
695 return error!(EIO);
696 }
697
698 if !state.can_accept_ptrace_commands() && !detach {
699 return error!(ESRCH);
700 }
701
702 if let Some(ptrace) = &mut state.ptrace {
703 if data != 0 {
704 new_state = PtraceStatus::Continuing;
705 if let Some(last_signal) = &mut ptrace.last_signal {
706 if let Some(si) = siginfo {
707 let new_signal = si.signal;
708 last_signal.signal = new_signal;
709 }
710 siginfo = Some(last_signal.clone());
711 }
712 } else {
713 new_state = PtraceStatus::Default;
714 ptrace.last_signal = None;
715 ptrace.event_data = None;
716 }
717 ptrace.stop_status = new_state;
718
719 if is_listen {
720 state.notify_ptracees();
721 }
722 }
723
724 if let Some(siginfo) = siginfo {
725 send_signal_first(locked, &tracee, state, siginfo);
727 } else {
728 state.set_stopped(StopState::Waking, None, None, None);
729 drop(state);
730 tracee.thread_group().set_stopped(StopState::Waking, None, false);
731 }
732 if detach {
733 tracee.write().set_ptrace(None)?;
734 }
735 Ok(())
736}
737
738fn ptrace_interrupt(tracee: &Task) -> Result<(), Errno> {
739 let mut state = tracee.write();
740 if let Some(ptrace) = &mut state.ptrace {
741 if !ptrace.is_seized() {
742 return error!(EIO);
743 }
744 let status = ptrace.stop_status.clone();
745 ptrace.stop_status = PtraceStatus::Default;
746 let event_data = Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0));
747 if status == PtraceStatus::Listening {
748 let signal = ptrace.last_signal.clone();
749 state.set_stopped(StopState::PtraceEventStopped, signal, None, event_data);
753 } else {
754 state.set_stopped(
755 StopState::PtraceEventStopping,
756 Some(SignalInfo::default(SIGTRAP)),
757 None,
758 event_data,
759 );
760 drop(state);
761 tracee.interrupt();
762 }
763 }
764 Ok(())
765}
766
767fn ptrace_listen(tracee: &Task) -> Result<(), Errno> {
768 let mut state = tracee.write();
769 if let Some(ptrace) = &mut state.ptrace {
770 if !ptrace.is_seized()
771 || (ptrace.last_signal_waitable
772 && ptrace
773 .event_data
774 .as_ref()
775 .is_some_and(|event_data| event_data.event != PtraceEvent::Stop))
776 {
777 return error!(EIO);
778 }
779 ptrace.stop_status = PtraceStatus::Listening;
780 }
781 Ok(())
782}
783
784pub fn ptrace_detach<L>(
785 locked: &mut Locked<L>,
786 pids: &mut PidTable,
787 thread_group: &ThreadGroup,
788 tracee: &Task,
789 data: &UserAddress,
790) -> Result<(), Errno>
791where
792 L: LockBefore<ThreadGroupLimits>,
793{
794 if let Err(x) = ptrace_cont(locked, &tracee, &data, true) {
795 return Err(x);
796 }
797 let tid = tracee.get_tid();
798 thread_group.ptracees.lock().remove(&tid);
799 thread_group.write().zombie_ptracees.remove(pids, tid);
800 Ok(())
801}
802
803pub fn ptrace_dispatch<L>(
806 locked: &mut Locked<L>,
807 current_task: &mut CurrentTask,
808 request: u32,
809 pid: pid_t,
810 addr: UserAddress,
811 data: UserAddress,
812) -> Result<SyscallResult, Errno>
813where
814 L: LockBefore<ThreadGroupLimits>,
815{
816 let mut pids = current_task.kernel().pids.write();
817 let weak_task = pids.get_task(pid);
818 let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
819
820 if let Some(ptrace) = &tracee.read().ptrace {
821 if ptrace.get_pid() != current_task.get_pid() {
822 return error!(ESRCH);
823 }
824 }
825
826 match request {
829 PTRACE_KILL => {
830 let mut siginfo = SignalInfo::default(SIGKILL);
831 siginfo.code = (linux_uapi::SIGTRAP | PTRACE_KILL << 8) as i32;
832 send_standard_signal(locked, &tracee, siginfo);
833 return Ok(starnix_syscalls::SUCCESS);
834 }
835 PTRACE_INTERRUPT => {
836 ptrace_interrupt(tracee.as_ref())?;
837 return Ok(starnix_syscalls::SUCCESS);
838 }
839 PTRACE_LISTEN => {
840 ptrace_listen(&tracee)?;
841 return Ok(starnix_syscalls::SUCCESS);
842 }
843 PTRACE_CONT => {
844 ptrace_cont(locked, &tracee, &data, false)?;
845 return Ok(starnix_syscalls::SUCCESS);
846 }
847 PTRACE_SYSCALL => {
848 tracee.trace_syscalls.store(true, std::sync::atomic::Ordering::Relaxed);
849 ptrace_cont(locked, &tracee, &data, false)?;
850 return Ok(starnix_syscalls::SUCCESS);
851 }
852 PTRACE_DETACH => {
853 ptrace_detach(locked, &mut pids, current_task.thread_group(), tracee.as_ref(), &data)?;
854 return Ok(starnix_syscalls::SUCCESS);
855 }
856 _ => {}
857 }
858
859 let mut state = tracee.write();
861 if !state.can_accept_ptrace_commands() {
862 return error!(ESRCH);
863 }
864
865 match request {
866 PTRACE_PEEKDATA | PTRACE_PEEKTEXT => {
867 let Some(captured) = &mut state.captured_thread_state else {
868 return error!(ESRCH);
869 };
870
871 let src = LongPtr::new(captured.as_ref(), addr);
874 let val = tracee.read_multi_arch_object(src)?;
875
876 let dst = LongPtr::new(&src, data);
877 current_task.write_multi_arch_object(dst, val)?;
878 Ok(starnix_syscalls::SUCCESS)
879 }
880 PTRACE_POKEDATA | PTRACE_POKETEXT => {
881 let Some(captured) = &mut state.captured_thread_state else {
882 return error!(ESRCH);
883 };
884
885 let bytes = if captured.is_arch32() {
886 u32::try_from(data.ptr()).map_err(|_| errno!(EINVAL))?.to_ne_bytes().to_vec()
887 } else {
888 data.ptr().to_ne_bytes().to_vec()
889 };
890
891 tracee.mm()?.force_write_memory(addr, &bytes)?;
892
893 Ok(starnix_syscalls::SUCCESS)
894 }
895 PTRACE_PEEKUSR => {
896 let Some(captured) = &mut state.captured_thread_state else {
897 return error!(ESRCH);
898 };
899
900 let dst = LongPtr::new(captured.as_ref(), data);
901 let val = ptrace_peekuser(&mut captured.thread_state, addr.ptr() as usize)?;
902 current_task.write_multi_arch_object(dst, val as u64)?;
903 return Ok(starnix_syscalls::SUCCESS);
904 }
905 PTRACE_POKEUSR => {
906 ptrace_pokeuser(&mut *state, data.ptr() as usize, addr.ptr() as usize)?;
907 return Ok(starnix_syscalls::SUCCESS);
908 }
909 PTRACE_GETREGSET => {
910 if let Some(ref mut captured) = state.captured_thread_state {
911 let uiv = IOVecPtr::new(current_task, data);
912 let mut iv = current_task.read_multi_arch_object(uiv)?;
913 let base = iv.iov_base.addr;
914 let mut len = iv.iov_len as usize;
915 ptrace_getregset(
916 current_task,
917 &mut captured.thread_state,
918 ElfNoteType::try_from(addr.ptr() as usize)?,
919 base,
920 &mut len,
921 )?;
922 iv.iov_len = len as u64;
923 current_task.write_multi_arch_object(uiv, iv)?;
924 return Ok(starnix_syscalls::SUCCESS);
925 }
926 error!(ESRCH)
927 }
928 PTRACE_SETREGSET => {
929 if let Some(ref mut captured) = state.captured_thread_state {
930 captured.dirty = true;
931 let uiv = IOVecPtr::new(current_task, data);
932 let iv = current_task.read_multi_arch_object(uiv)?;
933 let base = iv.iov_base.addr;
934 let len = iv.iov_len as usize;
935 ptrace_setregset(
936 current_task,
937 &mut captured.thread_state,
938 ElfNoteType::try_from(addr.ptr() as usize)?,
939 base,
940 len,
941 )?;
942 return Ok(starnix_syscalls::SUCCESS);
943 }
944 error!(ESRCH)
945 }
946 #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
947 PTRACE_GETREGS => {
948 if let Some(captured) = &mut state.captured_thread_state {
949 let mut len = usize::MAX;
950 ptrace_getregset(
951 current_task,
952 &mut captured.thread_state,
953 ElfNoteType::PrStatus,
954 data.ptr() as u64,
955 &mut len,
956 )?;
957 return Ok(starnix_syscalls::SUCCESS);
958 }
959 error!(ESRCH)
960 }
961 PTRACE_SETSIGMASK => {
962 if addr.ptr() != std::mem::size_of::<SigSet>() {
965 return error!(EINVAL);
966 }
967 let src: UserRef<SigSet> = UserRef::from(data);
969 let val = current_task.read_object(src)?;
970 state.set_signal_mask(val);
971
972 Ok(starnix_syscalls::SUCCESS)
973 }
974 PTRACE_GETSIGMASK => {
975 if addr.ptr() != std::mem::size_of::<SigSet>() {
978 return error!(EINVAL);
979 }
980 let dst: UserRef<SigSet> = UserRef::from(data);
982 let val = state.signal_mask();
983 current_task.write_object(dst, &val)?;
984 Ok(starnix_syscalls::SUCCESS)
985 }
986 PTRACE_GETSIGINFO => {
987 if let Some(ptrace) = &state.ptrace {
988 if let Some(signal) = ptrace.last_signal.as_ref() {
989 let dst = MultiArchUserRef::<uapi::siginfo_t, uapi::arch32::siginfo_t>::new(
990 current_task,
991 data,
992 );
993 signal.write(current_task, dst)?;
994 } else {
995 return error!(EINVAL);
996 }
997 }
998 Ok(starnix_syscalls::SUCCESS)
999 }
1000 PTRACE_SETSIGINFO => {
1001 const SI_MAX_SIZE_AS_USIZE: usize = SI_MAX_SIZE as usize;
1004
1005 let siginfo_mem = current_task.read_memory_to_array::<SI_MAX_SIZE_AS_USIZE>(data)?;
1006 let header = SignalInfoHeader::read_from_bytes(&siginfo_mem[..SI_HEADER_SIZE]).unwrap();
1007
1008 let mut bytes = [0u8; SI_MAX_SIZE as usize - SI_HEADER_SIZE];
1009 bytes.copy_from_slice(&siginfo_mem[SI_HEADER_SIZE..SI_MAX_SIZE as usize]);
1010 let details = SignalDetail::Raw { data: bytes };
1011 let unchecked_signal = UncheckedSignal::new(header.signo as u64);
1012 let signal = Signal::try_from(unchecked_signal)?;
1013
1014 let siginfo = SignalInfo {
1015 signal,
1016 errno: header.errno,
1017 code: header.code,
1018 detail: details,
1019 force: false,
1020 source: SignalSource::capture(),
1021 };
1022 if let Some(ptrace) = &mut state.ptrace {
1023 ptrace.last_signal = Some(siginfo);
1024 }
1025 Ok(starnix_syscalls::SUCCESS)
1026 }
1027 PTRACE_GET_SYSCALL_INFO => {
1028 if let Some(ptrace) = &state.ptrace {
1029 let (size, info) = ptrace.get_target_syscall(&tracee, &state)?;
1030 let dst: UserRef<ptrace_syscall_info> = UserRef::from(data);
1031 let len = std::cmp::min(std::mem::size_of::<ptrace_syscall_info>(), addr.ptr());
1032 let src = unsafe {
1035 std::slice::from_raw_parts(
1036 &info as *const ptrace_syscall_info as *const u8,
1037 len as usize,
1038 )
1039 };
1040 current_task.write_memory(dst.addr(), src)?;
1041 Ok(size.into())
1042 } else {
1043 error!(ESRCH)
1044 }
1045 }
1046 PTRACE_SETOPTIONS => {
1047 let mask = data.ptr() as u32;
1048 if mask != 0
1050 && (mask
1051 & !(PTRACE_O_TRACESYSGOOD
1052 | PTRACE_O_TRACECLONE
1053 | PTRACE_O_TRACEFORK
1054 | PTRACE_O_TRACEVFORK
1055 | PTRACE_O_TRACEVFORKDONE
1056 | PTRACE_O_TRACEEXEC
1057 | PTRACE_O_TRACEEXIT
1058 | PTRACE_O_EXITKILL)
1059 != 0)
1060 {
1061 track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace(PTRACE_SETOPTIONS)", mask);
1062 return error!(ENOSYS);
1063 }
1064 if let Some(ptrace) = &mut state.ptrace {
1065 ptrace.set_options_from_bits(mask)?;
1066 }
1067 Ok(starnix_syscalls::SUCCESS)
1068 }
1069 PTRACE_GETEVENTMSG => {
1070 if let Some(ptrace) = &state.ptrace {
1071 if let Some(event_data) = &ptrace.event_data {
1072 let dst = LongPtr::new(current_task, data);
1073 current_task.write_multi_arch_object(dst, event_data.msg)?;
1074 return Ok(starnix_syscalls::SUCCESS);
1075 }
1076 }
1077 error!(EIO)
1078 }
1079 _ => {
1080 track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace", request);
1081 error!(ENOSYS)
1082 }
1083 }
1084}
1085
1086fn do_attach(
1088 thread_group: &ThreadGroup,
1089 task: WeakRef<Task>,
1090 attach_type: PtraceAttachType,
1091 options: PtraceOptions,
1092) -> Result<(), Errno> {
1093 if let Some(task_ref) = task.upgrade() {
1094 thread_group.ptracees.lock().insert(task_ref.get_tid(), (&task_ref).into());
1095 {
1096 let process_state = &mut task_ref.thread_group().write();
1097 let mut state = task_ref.write();
1098 state.set_ptrace(Some(PtraceState::new(
1099 thread_group.leader,
1100 thread_group.weak_self.clone(),
1101 attach_type,
1102 options,
1103 )))?;
1104 if process_state.is_waitable()
1107 && process_state.base.load_stopped() == StopState::GroupStopped
1108 && task_ref.load_stopped() == StopState::GroupStopped
1109 {
1110 if let Some(ptrace) = &mut state.ptrace {
1111 ptrace.last_signal_waitable = true;
1112 }
1113 }
1114 }
1115 return Ok(());
1116 }
1117 unreachable!("Tracee thread not found");
1120}
1121
1122pub fn ptrace_attach_from_state<L>(
1126 locked: &mut Locked<L>,
1127 tracee_task: &OwnedRef<Task>,
1128 ptrace_state: PtraceCoreState,
1129) -> Result<(), Errno>
1130where
1131 L: LockBefore<ThreadGroupLimits>,
1132{
1133 {
1134 let weak_tg =
1135 tracee_task.thread_group().kernel.pids.read().get_thread_group(ptrace_state.pid);
1136 let tracer_tg = weak_tg.ok_or_else(|| errno!(ESRCH))?;
1137 do_attach(
1138 &tracer_tg,
1139 WeakRef::from(tracee_task),
1140 ptrace_state.attach_type,
1141 ptrace_state.options,
1142 )?;
1143 }
1144 let mut state = tracee_task.write();
1145 if let Some(ptrace) = &mut state.ptrace {
1146 ptrace.core_state.tracer_waiters = Arc::clone(&ptrace_state.tracer_waiters);
1147 }
1148
1149 let signal = if ptrace_state.attach_type == PtraceAttachType::Seize {
1151 if let Some(ptrace) = &mut state.ptrace {
1152 ptrace.set_last_event(Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0)));
1153 }
1154 SignalInfo::default(SIGTRAP)
1155 } else {
1156 SignalInfo::default(SIGSTOP)
1157 };
1158 send_signal_first(locked, tracee_task, state, signal);
1159
1160 Ok(())
1161}
1162
1163pub fn ptrace_traceme(current_task: &mut CurrentTask) -> Result<SyscallResult, Errno> {
1164 let parent = current_task.thread_group().read().parent.clone();
1165 if let Some(parent) = parent {
1166 let parent = parent.upgrade();
1167 {
1169 let pids = current_task.kernel().pids.read();
1170 let parent_task = pids.get_task(parent.leader);
1171 security::ptrace_traceme(
1172 current_task,
1173 parent_task.upgrade().ok_or_else(|| errno!(EINVAL))?.as_ref(),
1174 )?;
1175 }
1176
1177 let task_ref = OwnedRef::temp(¤t_task.task);
1178 do_attach(&parent, (&task_ref).into(), PtraceAttachType::Attach, PtraceOptions::empty())?;
1179 Ok(starnix_syscalls::SUCCESS)
1180 } else {
1181 error!(EPERM)
1182 }
1183}
1184
1185pub fn ptrace_attach<L>(
1186 locked: &mut Locked<L>,
1187 current_task: &mut CurrentTask,
1188 pid: pid_t,
1189 attach_type: PtraceAttachType,
1190 data: UserAddress,
1191) -> Result<SyscallResult, Errno>
1192where
1193 L: LockBefore<MmDumpable>,
1194{
1195 let weak_task = current_task.kernel().pids.read().get_task(pid);
1196 let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
1197
1198 if tracee.thread_group == current_task.thread_group {
1199 return error!(EPERM);
1200 }
1201
1202 current_task.check_ptrace_access_mode(locked, PTRACE_MODE_ATTACH_REALCREDS, &tracee)?;
1203 do_attach(current_task.thread_group(), weak_task.clone(), attach_type, PtraceOptions::empty())?;
1204 if attach_type == PtraceAttachType::Attach {
1205 send_standard_signal(
1206 locked.cast_locked::<MmDumpable>(),
1207 &tracee,
1208 SignalInfo::default(SIGSTOP),
1209 );
1210 } else if attach_type == PtraceAttachType::Seize {
1211 if let Some(task_ref) = weak_task.upgrade() {
1213 let mut state = task_ref.write();
1214 if let Some(ptrace) = &mut state.ptrace {
1215 ptrace.set_options_from_bits(data.ptr() as u32)?;
1216 }
1217 }
1218 }
1219 Ok(starnix_syscalls::SUCCESS)
1220}
1221
1222pub fn ptrace_peekuser(thread_state: &mut ThreadState, offset: usize) -> Result<usize, Errno> {
1226 #[cfg(any(target_arch = "x86_64"))]
1227 if offset >= std::mem::size_of::<user>() {
1228 return error!(EIO);
1229 }
1230 if offset < UserRegsStructPtr::size_of_object_for(thread_state) {
1231 let result = thread_state.get_user_register(offset)?;
1232 return Ok(result);
1233 }
1234 error!(EIO)
1235}
1236
1237pub fn ptrace_pokeuser(
1238 state: &mut TaskMutableState,
1239 value: usize,
1240 offset: usize,
1241) -> Result<(), Errno> {
1242 if let Some(ref mut thread_state) = state.captured_thread_state {
1243 thread_state.dirty = true;
1244
1245 #[cfg(any(target_arch = "x86_64"))]
1246 if offset >= std::mem::size_of::<user>() {
1247 return error!(EIO);
1248 }
1249 if offset < UserRegsStructPtr::size_of_object_for(thread_state.as_ref()) {
1250 return thread_state.thread_state.set_user_register(offset, value);
1251 }
1252 }
1253 error!(EIO)
1254}
1255
1256pub fn ptrace_getregset(
1257 current_task: &CurrentTask,
1258 thread_state: &mut ThreadState,
1259 regset_type: ElfNoteType,
1260 base: u64,
1261 len: &mut usize,
1262) -> Result<(), Errno> {
1263 match regset_type {
1264 ElfNoteType::PrStatus => {
1265 let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1266 if *len < user_regs_struct_len {
1267 return error!(EINVAL);
1268 }
1269 *len = user_regs_struct_len;
1270 let mut i: usize = 0;
1271 let mut reg_ptr = LongPtr::new(thread_state, base);
1272 while i < *len {
1273 let mut val = None;
1274 thread_state
1275 .registers
1276 .apply_user_register(i, &mut |register| val = Some(*register as usize))?;
1277 if let Some(val) = val {
1278 current_task.write_multi_arch_object(reg_ptr, val as u64)?;
1279 }
1280 i += reg_ptr.size_of_object();
1281 reg_ptr = reg_ptr.next()?;
1282 }
1283 Ok(())
1284 }
1285 _ => {
1286 error!(EINVAL)
1287 }
1288 }
1289}
1290
1291pub fn ptrace_setregset(
1292 current_task: &CurrentTask,
1293 thread_state: &mut ThreadState,
1294 regset_type: ElfNoteType,
1295 base: u64,
1296 mut len: usize,
1297) -> Result<(), Errno> {
1298 match regset_type {
1299 ElfNoteType::PrStatus => {
1300 let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1301 if len < user_regs_struct_len {
1302 return error!(EINVAL);
1303 }
1304 len = user_regs_struct_len;
1305 let mut i: usize = 0;
1306 let mut reg_ptr = LongPtr::new(thread_state, base);
1307 while i < len {
1308 let val = current_task.read_multi_arch_object(reg_ptr)?;
1309 thread_state.registers.apply_user_register(i, &mut |register| *register = val)?;
1310 i += reg_ptr.size_of_object();
1311 reg_ptr = reg_ptr.next()?;
1312 }
1313 Ok(())
1314 }
1315 _ => {
1316 error!(EINVAL)
1317 }
1318 }
1319}
1320
1321#[inline(never)]
1322pub fn ptrace_syscall_enter(locked: &mut Locked<Unlocked>, current_task: &mut CurrentTask) {
1323 let block = {
1324 let mut state = current_task.write();
1325 if state.ptrace.is_some() {
1326 current_task.trace_syscalls.store(false, Ordering::Relaxed);
1327 let mut sig = SignalInfo::default(SIGTRAP);
1328 sig.code = (linux_uapi::SIGTRAP | 0x80) as i32;
1329 if state
1330 .ptrace
1331 .as_ref()
1332 .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1333 {
1334 sig.signal.set_ptrace_syscall_bit();
1335 }
1336 state.set_stopped(StopState::SyscallEnterStopping, Some(sig), None, None);
1337 true
1338 } else {
1339 false
1340 }
1341 };
1342 if block {
1343 current_task.block_while_stopped(locked);
1344 }
1345}
1346
1347#[inline(never)]
1348pub fn ptrace_syscall_exit(
1349 locked: &mut Locked<Unlocked>,
1350 current_task: &mut CurrentTask,
1351 is_error: bool,
1352) {
1353 let block = {
1354 let mut state = current_task.write();
1355 current_task.trace_syscalls.store(false, Ordering::Relaxed);
1356 if state.ptrace.is_some() {
1357 let mut sig = SignalInfo::default(SIGTRAP);
1358 sig.code = (linux_uapi::SIGTRAP | 0x80) as i32;
1359 if state
1360 .ptrace
1361 .as_ref()
1362 .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1363 {
1364 sig.signal.set_ptrace_syscall_bit();
1365 }
1366
1367 state.set_stopped(StopState::SyscallExitStopping, Some(sig), None, None);
1368 if let Some(ptrace) = &mut state.ptrace {
1369 ptrace.last_syscall_was_error = is_error;
1370 }
1371 true
1372 } else {
1373 false
1374 }
1375 };
1376 if block {
1377 current_task.block_while_stopped(locked);
1378 }
1379}
1380
1381#[cfg(test)]
1382mod tests {
1383 use super::*;
1384 use crate::task::syscalls::sys_prctl;
1385 use crate::testing::{create_task, spawn_kernel_and_run};
1386 use starnix_uapi::PR_SET_PTRACER;
1387 use starnix_uapi::auth::CAP_SYS_PTRACE;
1388
1389 #[::fuchsia::test]
1390 async fn test_set_ptracer() {
1391 spawn_kernel_and_run(async |locked, current_task| {
1392 let kernel = current_task.kernel().clone();
1393 let mut tracee = create_task(locked, &kernel, "tracee");
1394 let mut tracer = create_task(locked, &kernel, "tracer");
1395
1396 let mut creds = tracer.real_creds().clone();
1397 creds.cap_effective &= !CAP_SYS_PTRACE;
1398 tracer.set_creds(creds);
1399
1400 kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1401 assert_eq!(
1402 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1403 error!(EINVAL)
1404 );
1405
1406 assert_eq!(
1407 ptrace_attach(
1408 locked,
1409 &mut tracer,
1410 tracee.as_ref().task.tid,
1411 PtraceAttachType::Attach,
1412 UserAddress::NULL,
1413 ),
1414 error!(EPERM)
1415 );
1416
1417 assert!(
1418 sys_prctl(
1419 locked,
1420 &mut tracee,
1421 PR_SET_PTRACER,
1422 tracer.thread_group().leader as u64,
1423 0,
1424 0,
1425 0
1426 )
1427 .is_ok()
1428 );
1429
1430 let mut not_tracer = create_task(locked, &kernel, "not-tracer");
1431 not_tracer.set_creds(tracer.real_creds());
1432 assert_eq!(
1433 ptrace_attach(
1434 locked,
1435 &mut not_tracer,
1436 tracee.as_ref().task.tid,
1437 PtraceAttachType::Attach,
1438 UserAddress::NULL,
1439 ),
1440 error!(EPERM)
1441 );
1442
1443 assert!(
1444 ptrace_attach(
1445 locked,
1446 &mut tracer,
1447 tracee.as_ref().task.tid,
1448 PtraceAttachType::Attach,
1449 UserAddress::NULL,
1450 )
1451 .is_ok()
1452 );
1453 })
1454 .await;
1455 }
1456
1457 #[::fuchsia::test]
1458 async fn test_set_ptracer_any() {
1459 spawn_kernel_and_run(async |locked, current_task| {
1460 let kernel = current_task.kernel().clone();
1461 let mut tracee = create_task(locked, &kernel, "tracee");
1462 let mut tracer = create_task(locked, &kernel, "tracer");
1463
1464 let mut creds = tracer.real_creds().clone();
1465 creds.cap_effective &= !CAP_SYS_PTRACE;
1466 tracer.set_creds(creds);
1467
1468 kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1469 assert_eq!(
1470 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1471 error!(EINVAL)
1472 );
1473
1474 assert_eq!(
1475 ptrace_attach(
1476 locked,
1477 &mut tracer,
1478 tracee.as_ref().task.tid,
1479 PtraceAttachType::Attach,
1480 UserAddress::NULL,
1481 ),
1482 error!(EPERM)
1483 );
1484
1485 assert!(
1486 sys_prctl(locked, &mut tracee, PR_SET_PTRACER, PR_SET_PTRACER_ANY as u64, 0, 0, 0)
1487 .is_ok()
1488 );
1489
1490 assert!(
1491 ptrace_attach(
1492 locked,
1493 &mut tracer,
1494 tracee.as_ref().task.tid,
1495 PtraceAttachType::Attach,
1496 UserAddress::NULL,
1497 )
1498 .is_ok()
1499 );
1500 })
1501 .await;
1502 }
1503}