starnix_core/task/
ptrace.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::arch::execution::new_syscall_from_state;
6use crate::mm::{IOVecPtr, MemoryAccessor, MemoryAccessorExt};
7use crate::security;
8use crate::signals::syscalls::WaitingOptions;
9use crate::signals::{
10    SI_HEADER_SIZE, SignalDetail, SignalInfo, SignalInfoHeader, SignalSource, send_signal_first,
11    send_standard_signal,
12};
13use crate::task::waiter::WaitQueue;
14use crate::task::{
15    CurrentTask, PidTable, ProcessSelector, StopState, Task, TaskMutableState, ThreadGroup,
16    ThreadState, ZombieProcess,
17};
18use bitflags::bitflags;
19use starnix_logging::track_stub;
20use starnix_sync::{LockBefore, Locked, MmDumpable, ThreadGroupLimits, Unlocked};
21use starnix_syscalls::SyscallResult;
22use starnix_syscalls::decls::SyscallDecl;
23use starnix_types::ownership::{OwnedRef, Releasable, ReleaseGuard, WeakRef};
24use starnix_uapi::auth::PTRACE_MODE_ATTACH_REALCREDS;
25use starnix_uapi::elf::ElfNoteType;
26use starnix_uapi::errors::Errno;
27use starnix_uapi::signals::{SIGKILL, SIGSTOP, SIGTRAP, SigSet, Signal, UncheckedSignal};
28#[allow(unused_imports)]
29use starnix_uapi::user_address::ArchSpecific;
30use starnix_uapi::user_address::{LongPtr, MultiArchUserRef, UserAddress, UserRef};
31use starnix_uapi::{
32    PTRACE_CONT, PTRACE_DETACH, PTRACE_EVENT_CLONE, PTRACE_EVENT_EXEC, PTRACE_EVENT_EXIT,
33    PTRACE_EVENT_FORK, PTRACE_EVENT_SECCOMP, PTRACE_EVENT_STOP, PTRACE_EVENT_VFORK,
34    PTRACE_EVENT_VFORK_DONE, PTRACE_GET_SYSCALL_INFO, PTRACE_GETEVENTMSG, PTRACE_GETREGSET,
35    PTRACE_GETSIGINFO, PTRACE_GETSIGMASK, PTRACE_INTERRUPT, PTRACE_KILL, PTRACE_LISTEN,
36    PTRACE_O_EXITKILL, PTRACE_O_TRACECLONE, PTRACE_O_TRACEEXEC, PTRACE_O_TRACEEXIT,
37    PTRACE_O_TRACEFORK, PTRACE_O_TRACESYSGOOD, PTRACE_O_TRACEVFORK, PTRACE_O_TRACEVFORKDONE,
38    PTRACE_PEEKDATA, PTRACE_PEEKTEXT, PTRACE_PEEKUSR, PTRACE_POKEDATA, PTRACE_POKETEXT,
39    PTRACE_POKEUSR, PTRACE_SETOPTIONS, PTRACE_SETREGSET, PTRACE_SETSIGINFO, PTRACE_SETSIGMASK,
40    PTRACE_SYSCALL, PTRACE_SYSCALL_INFO_ENTRY, PTRACE_SYSCALL_INFO_EXIT, PTRACE_SYSCALL_INFO_NONE,
41    SI_MAX_SIZE, clone_args, errno, error, pid_t, ptrace_syscall_info, tid_t, uapi,
42};
43
44use std::collections::BTreeMap;
45use std::sync::atomic::Ordering;
46use std::sync::{Arc, Weak};
47use zerocopy::FromBytes;
48
49#[cfg(target_arch = "x86_64")]
50use starnix_uapi::{PTRACE_GETREGS, user};
51
52#[cfg(all(target_arch = "aarch64"))]
53use starnix_uapi::arch32::PTRACE_GETREGS;
54
55type UserRegsStructPtr =
56    MultiArchUserRef<starnix_uapi::user_regs_struct, starnix_uapi::arch32::user_regs_struct>;
57
58uapi::check_arch_independent_layout! {
59    ptrace_syscall_info {
60        op,
61        arch,
62        instruction_pointer,
63        stack_pointer,
64        __bindgen_anon_1,
65    }
66
67    ptrace_syscall_info__bindgen_ty_1 {
68        entry,
69        exit,
70        seccomp,
71    }
72
73    ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
74        nr,
75        args,
76    }
77
78    ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
79        rval,
80        is_error,
81    }
82
83    ptrace_syscall_info__bindgen_ty_1__bindgen_ty_3 {
84        nr,
85        args,
86        ret_data,
87    }
88}
89
90/// For most of the time, for the purposes of ptrace, a tracee is either "going"
91/// or "stopped".  However, after certain ptrace calls, there are special rules
92/// to be followed.
93#[derive(Clone, Default, PartialEq)]
94pub enum PtraceStatus {
95    /// Proceed as otherwise indicated by the task's stop status.
96    #[default]
97    Default,
98    /// Resuming after a ptrace_cont with a signal, so do not stop for signal-delivery-stop
99    Continuing,
100    /// "The state of the tracee after PTRACE_LISTEN is somewhat of a
101    /// gray area: it is not in any ptrace-stop (ptrace commands won't work on it,
102    /// and it will deliver waitpid(2) notifications), but it also may be considered
103    /// "stopped" because it is not executing instructions (is not scheduled), and
104    /// if it was in group-stop before PTRACE_LISTEN, it will not respond to signals
105    /// until SIGCONT is received."
106    Listening,
107}
108
109impl PtraceStatus {
110    pub fn is_continuing(&self) -> bool {
111        *self == PtraceStatus::Continuing
112    }
113}
114
115/// Indicates the way that ptrace attached to the task.
116#[derive(Copy, Clone, PartialEq)]
117pub enum PtraceAttachType {
118    /// Attached with PTRACE_ATTACH
119    Attach,
120    /// Attached with PTRACE_SEIZE
121    Seize,
122}
123
124bitflags! {
125    #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
126    #[repr(transparent)]
127    pub struct PtraceOptions: u32 {
128        const EXITKILL = starnix_uapi::PTRACE_O_EXITKILL;
129        const TRACECLONE = starnix_uapi::PTRACE_O_TRACECLONE;
130        const TRACEEXEC = starnix_uapi::PTRACE_O_TRACEEXEC;
131        const TRACEEXIT = starnix_uapi::PTRACE_O_TRACEEXIT;
132        const TRACEFORK = starnix_uapi::PTRACE_O_TRACEFORK;
133        const TRACESYSGOOD = starnix_uapi::PTRACE_O_TRACESYSGOOD;
134        const TRACEVFORK = starnix_uapi::PTRACE_O_TRACEVFORK;
135        const TRACEVFORKDONE = starnix_uapi::PTRACE_O_TRACEVFORKDONE;
136        const TRACESECCOMP = starnix_uapi::PTRACE_O_TRACESECCOMP;
137        const SUSPEND_SECCOMP = starnix_uapi::PTRACE_O_SUSPEND_SECCOMP;
138    }
139}
140
141#[repr(u32)]
142#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
143pub enum PtraceEvent {
144    #[default]
145    None = 0,
146    Stop = PTRACE_EVENT_STOP,
147    Clone = PTRACE_EVENT_CLONE,
148    Fork = PTRACE_EVENT_FORK,
149    Vfork = PTRACE_EVENT_VFORK,
150    VforkDone = PTRACE_EVENT_VFORK_DONE,
151    Exec = PTRACE_EVENT_EXEC,
152    Exit = PTRACE_EVENT_EXIT,
153    Seccomp = PTRACE_EVENT_SECCOMP,
154}
155
156impl PtraceEvent {
157    pub fn from_option(option: &PtraceOptions) -> Self {
158        match *option {
159            PtraceOptions::TRACECLONE => PtraceEvent::Clone,
160            PtraceOptions::TRACEFORK => PtraceEvent::Fork,
161            PtraceOptions::TRACEVFORK => PtraceEvent::Vfork,
162            PtraceOptions::TRACEVFORKDONE => PtraceEvent::VforkDone,
163            PtraceOptions::TRACEEXEC => PtraceEvent::Exec,
164            PtraceOptions::TRACEEXIT => PtraceEvent::Exit,
165            PtraceOptions::TRACESECCOMP => PtraceEvent::Seccomp,
166            _ => unreachable!("Bad ptrace event specified"),
167        }
168    }
169}
170
171/// Information about what caused a ptrace-event-stop.
172pub struct PtraceEventData {
173    /// The event that caused the task to stop (e.g., PTRACE_EVENT_TRACEFORK or PTRACE_EVENT_EXIT).
174    pub event: PtraceEvent,
175
176    /// The message associated with the event (e.g., tid, exit status)..
177    pub msg: u64,
178}
179
180impl PtraceEventData {
181    pub fn new(option: PtraceOptions, msg: u64) -> Self {
182        Self { event: PtraceEvent::from_option(&option), msg }
183    }
184    pub fn new_from_event(event: PtraceEvent, msg: u64) -> Self {
185        Self { event, msg }
186    }
187}
188
189/// The ptrace state that a new task needs to connect to the same tracer as the
190/// task that clones it.
191#[derive(Clone)]
192pub struct PtraceCoreState {
193    /// The pid of the tracer
194    pub pid: pid_t,
195
196    /// Whether the attach was a seize or an attach.  There are a few subtle
197    /// differences in behavior of the different attach types - see ptrace(2).
198    pub attach_type: PtraceAttachType,
199
200    /// The options set by PTRACE_SETOPTIONS
201    pub options: PtraceOptions,
202
203    /// The tracer waits on this WaitQueue to find out if the tracee has done
204    /// something worth being notified about.
205    pub tracer_waiters: Arc<WaitQueue>,
206}
207
208impl PtraceCoreState {
209    pub fn has_option(&self, option: PtraceOptions) -> bool {
210        self.options.contains(option)
211    }
212}
213
214/// Per-task ptrace-related state
215pub struct PtraceState {
216    /// The core state of the tracer, which can be shared between processes
217    pub core_state: PtraceCoreState,
218
219    /// The tracee waits on this WaitQueue to find out when it should stop or wake
220    /// for ptrace-related shenanigans.
221    pub tracee_waiters: WaitQueue,
222
223    /// The signal that caused the task to enter the given state (for
224    /// signal-delivery-stop)
225    pub last_signal: Option<SignalInfo>,
226
227    /// Whether waitpid() will return the last signal.  The presence of last_signal
228    /// can't be used for that, because that needs to be saved for GETSIGINFO.
229    pub last_signal_waitable: bool,
230
231    /// Data about the PTRACE_EVENT that caused the most recent stop (if any).
232    pub event_data: Option<PtraceEventData>,
233
234    /// Indicates whether the last ptrace call put this thread into a state with
235    /// special semantics for stopping behavior.
236    pub stop_status: PtraceStatus,
237
238    /// For SYSCALL_INFO_EXIT
239    pub last_syscall_was_error: bool,
240}
241
242impl PtraceState {
243    pub fn new(pid: pid_t, attach_type: PtraceAttachType, options: PtraceOptions) -> Box<Self> {
244        Box::new(PtraceState {
245            core_state: PtraceCoreState {
246                pid,
247                attach_type,
248                options,
249                tracer_waiters: Arc::new(WaitQueue::default()),
250            },
251            tracee_waiters: WaitQueue::default(),
252            last_signal: None,
253            last_signal_waitable: false,
254            event_data: None,
255            stop_status: PtraceStatus::default(),
256            last_syscall_was_error: false,
257        })
258    }
259
260    pub fn get_pid(&self) -> pid_t {
261        self.core_state.pid
262    }
263
264    pub fn set_pid(&mut self, pid: pid_t) {
265        self.core_state.pid = pid;
266    }
267
268    pub fn is_seized(&self) -> bool {
269        self.core_state.attach_type == PtraceAttachType::Seize
270    }
271
272    pub fn get_attach_type(&self) -> PtraceAttachType {
273        self.core_state.attach_type
274    }
275
276    pub fn is_waitable(&self, stop: StopState, options: &WaitingOptions) -> bool {
277        if self.stop_status == PtraceStatus::Listening {
278            // Waiting for any change of state
279            return self.last_signal_waitable;
280        }
281        if !options.wait_for_continued && !stop.is_stopping_or_stopped() {
282            // Only waiting for stops, but is not stopped.
283            return false;
284        }
285        self.last_signal_waitable && !stop.is_in_progress()
286    }
287
288    pub fn set_last_signal(&mut self, mut signal: Option<SignalInfo>) {
289        if let Some(ref mut siginfo) = signal {
290            // We don't want waiters to think the process was unstopped because
291            // of a sigkill. They will get woken when the process dies.
292            if siginfo.signal == SIGKILL {
293                return;
294            }
295            self.last_signal_waitable = true;
296            self.last_signal = signal;
297        }
298    }
299
300    pub fn set_last_event(&mut self, event: Option<PtraceEventData>) {
301        if event.is_some() {
302            self.event_data = event;
303        }
304    }
305
306    // Gets the last signal, and optionally clears the wait state of the ptrace.
307    pub fn get_last_signal(&mut self, keep_signal_waitable: bool) -> Option<SignalInfo> {
308        self.last_signal_waitable = keep_signal_waitable;
309        self.last_signal.clone()
310    }
311
312    pub fn has_option(&self, option: PtraceOptions) -> bool {
313        self.core_state.has_option(option)
314    }
315
316    pub fn set_options_from_bits(&mut self, option: u32) -> Result<(), Errno> {
317        if let Some(options) = PtraceOptions::from_bits(option) {
318            self.core_state.options = options;
319            Ok(())
320        } else {
321            error!(EINVAL)
322        }
323    }
324
325    pub fn get_options(&self) -> PtraceOptions {
326        self.core_state.options
327    }
328
329    /// Returns enough of the ptrace state to propagate it to a fork / clone / vforked task.
330    pub fn get_core_state(&self) -> PtraceCoreState {
331        self.core_state.clone()
332    }
333
334    pub fn tracer_waiters(&self) -> &Arc<WaitQueue> {
335        &self.core_state.tracer_waiters
336    }
337
338    /// Returns an (i32, ptrace_syscall_info) pair.  The ptrace_syscall_info is
339    /// the info associated with the syscall that the target task is currently
340    /// blocked on, The i32 is (per ptrace(2)) "the number of bytes available to
341    /// be written by the kernel.  If the size of the data to be written by the
342    /// kernel exceeds the size specified by the addr argument, the output data
343    /// is truncated."; ptrace(PTRACE_GET_SYSCALL_INFO) returns that value"
344    pub fn get_target_syscall(
345        &self,
346        target: &Task,
347        state: &TaskMutableState,
348    ) -> Result<(i32, ptrace_syscall_info), Errno> {
349        #[cfg(target_arch = "x86_64")]
350        let arch = starnix_uapi::AUDIT_ARCH_X86_64;
351        #[cfg(target_arch = "aarch64")]
352        let arch = starnix_uapi::AUDIT_ARCH_AARCH64;
353        #[cfg(target_arch = "riscv64")]
354        let arch = starnix_uapi::AUDIT_ARCH_RISCV64;
355
356        let mut info = ptrace_syscall_info { arch, ..Default::default() };
357        let mut info_len = memoffset::offset_of!(ptrace_syscall_info, __bindgen_anon_1);
358
359        match &state.captured_thread_state {
360            Some(captured) => {
361                let registers = captured.thread_state.registers;
362                info.instruction_pointer = registers.instruction_pointer_register();
363                info.stack_pointer = registers.stack_pointer_register();
364                #[cfg(target_arch = "aarch64")]
365                if captured.thread_state.arch_width.is_arch32() {
366                    // If any additional arch32 archs are added, just use a cfg
367                    // macro here.
368                    info.arch = starnix_uapi::AUDIT_ARCH_ARM;
369                }
370                match target.load_stopped() {
371                    StopState::SyscallEnterStopped => {
372                        let syscall_decl = SyscallDecl::from_number(
373                            registers.syscall_register(),
374                            captured.thread_state.arch_width,
375                        );
376                        let syscall = new_syscall_from_state(syscall_decl, &captured.thread_state);
377                        info.op = PTRACE_SYSCALL_INFO_ENTRY as u8;
378                        let entry = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1 {
379                            nr: syscall.decl.number,
380                            args: [
381                                syscall.arg0.raw(),
382                                syscall.arg1.raw(),
383                                syscall.arg2.raw(),
384                                syscall.arg3.raw(),
385                                syscall.arg4.raw(),
386                                syscall.arg5.raw(),
387                            ],
388                        };
389                        info_len += memoffset::offset_of!(
390                            linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_1,
391                            args
392                        ) + std::mem::size_of_val(&entry.args);
393                        info.__bindgen_anon_1.entry = entry;
394                    }
395                    StopState::SyscallExitStopped => {
396                        info.op = PTRACE_SYSCALL_INFO_EXIT as u8;
397                        let exit = linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2 {
398                            rval: registers.return_register() as i64,
399                            is_error: state
400                                .ptrace
401                                .as_ref()
402                                .map_or(0, |ptrace| ptrace.last_syscall_was_error as u8),
403                            ..Default::default()
404                        };
405                        info_len += memoffset::offset_of!(
406                            linux_uapi::ptrace_syscall_info__bindgen_ty_1__bindgen_ty_2,
407                            is_error
408                        ) + std::mem::size_of_val(&exit.is_error);
409                        info.__bindgen_anon_1.exit = exit;
410                    }
411                    _ => {
412                        info.op = PTRACE_SYSCALL_INFO_NONE as u8;
413                    }
414                };
415            }
416            _ => (),
417        }
418        Ok((info_len as i32, info))
419    }
420
421    /// Gets the core state for this ptrace if the options set on this ptrace
422    /// match |trace_kind|.  Returns a pair: the trace option you *should* use
423    /// (sometimes this is different from the one that the caller thinks it
424    /// should use), and the core state.
425    pub fn get_core_state_for_clone(
426        &self,
427        clone_args: &clone_args,
428    ) -> (PtraceOptions, Option<PtraceCoreState>) {
429        // ptrace(2): If the tracee calls clone(2) with the CLONE_VFORK flag,
430        // PTRACE_EVENT_VFORK will be delivered instead if PTRACE_O_TRACEVFORK
431        // is set, otherwise if the tracee calls clone(2) with the exit signal
432        // set to SIGCHLD, PTRACE_EVENT_FORK will be delivered if
433        // PTRACE_O_TRACEFORK is set.
434        let trace_type = if clone_args.flags & (starnix_uapi::CLONE_UNTRACED as u64) != 0 {
435            PtraceOptions::empty()
436        } else {
437            if clone_args.flags & (starnix_uapi::CLONE_VFORK as u64) != 0 {
438                PtraceOptions::TRACEVFORK
439            } else if clone_args.exit_signal != (starnix_uapi::SIGCHLD as u64) {
440                PtraceOptions::TRACECLONE
441            } else {
442                PtraceOptions::TRACEFORK
443            }
444        };
445
446        if !self.has_option(trace_type)
447            && (clone_args.flags & (starnix_uapi::CLONE_PTRACE as u64) == 0)
448        {
449            return (PtraceOptions::empty(), None);
450        }
451
452        (trace_type, Some(self.get_core_state()))
453    }
454}
455
456/// A zombie that must delivered to a tracer process for a traced process.
457struct TracedZombie {
458    /// An artificial zombie that must be delivered to the tracer program.
459    artificial_zombie: ZombieProcess,
460
461    /// An optional real zombie to be sent to the given ThreadGroup after the zomboe has been
462    /// delivered to the tracer.
463    delegate: Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>,
464}
465
466impl Releasable for TracedZombie {
467    type Context<'a> = &'a mut PidTable;
468
469    fn release<'a>(self, pids: &'a mut PidTable) {
470        self.artificial_zombie.release(pids);
471        if let Some((_, z)) = self.delegate {
472            z.release(pids);
473        }
474    }
475}
476
477impl TracedZombie {
478    fn new(artificial_zombie: ZombieProcess) -> ReleaseGuard<Self> {
479        ReleaseGuard::from(Self { artificial_zombie, delegate: None })
480    }
481
482    fn new_with_delegate(
483        artificial_zombie: ZombieProcess,
484        delegate: (Weak<ThreadGroup>, OwnedRef<ZombieProcess>),
485    ) -> ReleaseGuard<Self> {
486        ReleaseGuard::from(Self { artificial_zombie, delegate: Some(delegate) })
487    }
488
489    fn set_parent(
490        &mut self,
491        new_zombie: Option<OwnedRef<ZombieProcess>>,
492        new_parent: &ThreadGroup,
493    ) {
494        if let Some(new_zombie) = new_zombie {
495            self.delegate = Some((new_parent.weak_self.clone(), new_zombie));
496        } else {
497            self.delegate = self.delegate.take().map(|(_, z)| (new_parent.weak_self.clone(), z));
498        }
499    }
500}
501
502/// A list of zombie processes that were traced by a given tracer, but which
503/// have not yet notified that tracer of their exit.  Once the tracer is
504/// notified, the original parent will be notified.
505#[derive(Default)]
506pub struct ZombiePtracees {
507    /// A list of zombies that have to be delivered to the ptracer.  The key is
508    /// the tid of the traced process.
509    zombies: BTreeMap<tid_t, ReleaseGuard<TracedZombie>>,
510}
511
512impl ZombiePtracees {
513    pub fn new() -> Self {
514        Self::default()
515    }
516
517    /// Adds a zombie tracee to the list, but does not provide a parent task to
518    /// notify when the tracer is done.
519    pub fn add(&mut self, pids: &mut PidTable, tid: tid_t, zombie: ZombieProcess) {
520        if let std::collections::btree_map::Entry::Vacant(entry) = self.zombies.entry(tid) {
521            entry.insert(TracedZombie::new(zombie));
522        } else {
523            zombie.release(pids);
524        }
525    }
526
527    /// Delete any zombie ptracees for the given tid.
528    pub fn remove(&mut self, pids: &mut PidTable, tid: tid_t) {
529        self.zombies.remove(&tid).release(pids);
530    }
531
532    pub fn is_empty(&self) -> bool {
533        self.zombies.is_empty()
534    }
535
536    /// Provide a parent task and a zombie to notify when the tracer has been
537    /// notified.
538    pub fn set_parent_of(
539        &mut self,
540        tracee: tid_t,
541        new_zombie: Option<OwnedRef<ZombieProcess>>,
542        new_parent: &ThreadGroup,
543    ) {
544        match self.zombies.entry(tracee) {
545            std::collections::btree_map::Entry::Vacant(entry) => {
546                if let Some(new_zombie) = new_zombie {
547                    entry.insert(TracedZombie::new_with_delegate(
548                        new_zombie.as_artificial(),
549                        (new_parent.weak_self.clone(), new_zombie),
550                    ));
551                }
552            }
553            std::collections::btree_map::Entry::Occupied(mut entry) => {
554                entry.get_mut().set_parent(new_zombie, new_parent);
555            }
556        }
557    }
558
559    /// When a parent dies without having been notified, replace it with a given
560    /// new parent.
561    pub fn reparent(old_parent: &ThreadGroup, new_parent: &ThreadGroup) {
562        let mut lockless_list = old_parent.read().deferred_zombie_ptracers.clone();
563
564        for deferred_zombie_ptracer in &lockless_list {
565            if let Some(tg) = deferred_zombie_ptracer.tracer_thread_group_key.upgrade() {
566                tg.write().zombie_ptracees.set_parent_of(
567                    deferred_zombie_ptracer.tracee_tid,
568                    None,
569                    new_parent,
570                );
571            }
572        }
573        let mut new_state = new_parent.write();
574        new_state.deferred_zombie_ptracers.append(&mut lockless_list);
575    }
576
577    /// Empty the table and notify all of the remaining parents.  Used if the
578    /// tracer terminates or detaches without acknowledging all pending tracees.
579    pub fn release(&mut self, pids: &mut PidTable) {
580        let mut entry = self.zombies.pop_first();
581        while let Some((_, mut zombie)) = entry {
582            if let Some((tg, z)) = zombie.delegate.take() {
583                if let Some(tg) = tg.upgrade() {
584                    tg.do_zombie_notifications(z);
585                }
586            }
587            zombie.release(pids);
588
589            entry = self.zombies.pop_first();
590        }
591    }
592
593    /// Returns true iff there is a zombie waiting to be delivered to the tracers matching the
594    /// given selector.
595    pub fn has_zombie_matching(&self, selector: &ProcessSelector) -> bool {
596        self.zombies.values().any(|z| z.artificial_zombie.matches_selector(selector))
597    }
598
599    /// Returns true iff the given `tid` is a traced thread that needs to deliver a zombie to the
600    /// tracer.
601    pub fn has_tracee(&self, tid: tid_t) -> bool {
602        self.zombies.contains_key(&tid)
603    }
604
605    /// Returns a zombie matching the given selector and options, and
606    /// (optionally) a thread group to notify after the caller has consumed that
607    /// zombie.
608    pub fn get_waitable_entry(
609        &mut self,
610        selector: &ProcessSelector,
611        options: &WaitingOptions,
612    ) -> Option<(ZombieProcess, Option<(Weak<ThreadGroup>, OwnedRef<ZombieProcess>)>)> {
613        // We look for the last zombie in the vector that matches pid
614        // selector and waiting options
615        let Some((t, found_zombie)) = self
616            .zombies
617            .iter()
618            .map(|(t, z)| (*t, &z.artificial_zombie))
619            .rfind(|(_, zombie)| zombie.matches_selector_and_waiting_option(selector, options))
620        else {
621            return None;
622        };
623
624        let result;
625        if !options.keep_waitable_state {
626            // Maybe notify child waiters.
627            result = self.zombies.remove(&t).map(|traced_zombie| {
628                let traced_zombie = ReleaseGuard::take(traced_zombie);
629                (traced_zombie.artificial_zombie, traced_zombie.delegate)
630            });
631        } else {
632            result = Some((found_zombie.as_artificial(), None));
633        }
634
635        result
636    }
637}
638
639// PR_SET_PTRACER_ANY is defined as ((unsigned long) -1),
640// which is not understood by bindgen.
641pub const PR_SET_PTRACER_ANY: i32 = -1;
642
643/// Indicates processes specifically allowed to trace a given process if using
644/// SCOPE_RESTRICTED.  Used by prctl(PR_SET_PTRACER).
645#[derive(Copy, Clone, Default, PartialEq)]
646pub enum PtraceAllowedPtracers {
647    #[default]
648    None,
649    Some(pid_t),
650    Any,
651}
652
653/// Continues the target thread, optionally detaching from it.
654/// |data| is treated as it is in PTRACE_CONT.
655/// |new_status| is the PtraceStatus to set for this trace.
656/// |detach| will cause the tracer to detach from the tracee.
657fn ptrace_cont<L>(
658    locked: &mut Locked<L>,
659    tracee: &Task,
660    data: &UserAddress,
661    detach: bool,
662) -> Result<(), Errno>
663where
664    L: LockBefore<ThreadGroupLimits>,
665{
666    let data = data.ptr() as u64;
667    let new_state;
668    let mut siginfo = if data != 0 {
669        let signal = Signal::try_from(UncheckedSignal::new(data))?;
670        Some(SignalInfo::default(signal))
671    } else {
672        None
673    };
674
675    let mut state = tracee.write();
676    let is_listen = state.is_ptrace_listening();
677
678    if tracee.load_stopped().is_waking_or_awake() && !is_listen {
679        if detach {
680            state.set_ptrace(None)?;
681        }
682        return error!(EIO);
683    }
684
685    if !state.can_accept_ptrace_commands() && !detach {
686        return error!(ESRCH);
687    }
688
689    if let Some(ptrace) = &mut state.ptrace {
690        if data != 0 {
691            new_state = PtraceStatus::Continuing;
692            if let Some(last_signal) = &mut ptrace.last_signal {
693                if let Some(si) = siginfo {
694                    let new_signal = si.signal;
695                    last_signal.signal = new_signal;
696                }
697                siginfo = Some(last_signal.clone());
698            }
699        } else {
700            new_state = PtraceStatus::Default;
701            ptrace.last_signal = None;
702            ptrace.event_data = None;
703        }
704        ptrace.stop_status = new_state;
705
706        if is_listen {
707            state.notify_ptracees();
708        }
709    }
710
711    if let Some(siginfo) = siginfo {
712        // This will wake up the task for us, and also release state
713        send_signal_first(locked, &tracee, state, siginfo);
714    } else {
715        state.set_stopped(StopState::Waking, None, None, None);
716        drop(state);
717        tracee.thread_group().set_stopped(StopState::Waking, None, false);
718    }
719    if detach {
720        tracee.write().set_ptrace(None)?;
721    }
722    Ok(())
723}
724
725fn ptrace_interrupt(tracee: &Task) -> Result<(), Errno> {
726    let mut state = tracee.write();
727    if let Some(ptrace) = &mut state.ptrace {
728        if !ptrace.is_seized() {
729            return error!(EIO);
730        }
731        let status = ptrace.stop_status.clone();
732        ptrace.stop_status = PtraceStatus::Default;
733        let event_data = Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0));
734        if status == PtraceStatus::Listening {
735            let signal = ptrace.last_signal.clone();
736            // "If the tracee was already stopped by a signal and PTRACE_LISTEN
737            // was sent to it, the tracee stops with PTRACE_EVENT_STOP and
738            // WSTOPSIG(status) returns the stop signal"
739            state.set_stopped(StopState::PtraceEventStopped, signal, None, event_data);
740        } else {
741            state.set_stopped(
742                StopState::PtraceEventStopping,
743                Some(SignalInfo::default(SIGTRAP)),
744                None,
745                event_data,
746            );
747            drop(state);
748            tracee.interrupt();
749        }
750    }
751    Ok(())
752}
753
754fn ptrace_listen(tracee: &Task) -> Result<(), Errno> {
755    let mut state = tracee.write();
756    if let Some(ptrace) = &mut state.ptrace {
757        if !ptrace.is_seized()
758            || (ptrace.last_signal_waitable
759                && ptrace
760                    .event_data
761                    .as_ref()
762                    .is_some_and(|event_data| event_data.event != PtraceEvent::Stop))
763        {
764            return error!(EIO);
765        }
766        ptrace.stop_status = PtraceStatus::Listening;
767    }
768    Ok(())
769}
770
771pub fn ptrace_detach<L>(
772    locked: &mut Locked<L>,
773    pids: &mut PidTable,
774    thread_group: &ThreadGroup,
775    tracee: &Task,
776    data: &UserAddress,
777) -> Result<(), Errno>
778where
779    L: LockBefore<ThreadGroupLimits>,
780{
781    if let Err(x) = ptrace_cont(locked, &tracee, &data, true) {
782        return Err(x);
783    }
784    let tid = tracee.get_tid();
785    thread_group.ptracees.lock().remove(&tid);
786    thread_group.write().zombie_ptracees.remove(pids, tid);
787    Ok(())
788}
789
790/// For all ptrace requests that require an attached tracee
791
792pub fn ptrace_dispatch<L>(
793    locked: &mut Locked<L>,
794    current_task: &mut CurrentTask,
795    request: u32,
796    pid: pid_t,
797    addr: UserAddress,
798    data: UserAddress,
799) -> Result<SyscallResult, Errno>
800where
801    L: LockBefore<ThreadGroupLimits>,
802{
803    let mut pids = current_task.kernel().pids.write();
804    let weak_task = pids.get_task(pid);
805    let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
806
807    if let Some(ptrace) = &tracee.read().ptrace {
808        if ptrace.get_pid() != current_task.get_pid() {
809            return error!(ESRCH);
810        }
811    }
812
813    // These requests may be run without the thread in a stop state, or
814    // check the stop state themselves.
815    match request {
816        PTRACE_KILL => {
817            let mut siginfo = SignalInfo::default(SIGKILL);
818            siginfo.code = (linux_uapi::SIGTRAP | PTRACE_KILL << 8) as i32;
819            send_standard_signal(locked, &tracee, siginfo);
820            return Ok(starnix_syscalls::SUCCESS);
821        }
822        PTRACE_INTERRUPT => {
823            ptrace_interrupt(tracee.as_ref())?;
824            return Ok(starnix_syscalls::SUCCESS);
825        }
826        PTRACE_LISTEN => {
827            ptrace_listen(&tracee)?;
828            return Ok(starnix_syscalls::SUCCESS);
829        }
830        PTRACE_CONT => {
831            ptrace_cont(locked, &tracee, &data, false)?;
832            return Ok(starnix_syscalls::SUCCESS);
833        }
834        PTRACE_SYSCALL => {
835            tracee.trace_syscalls.store(true, std::sync::atomic::Ordering::Relaxed);
836            ptrace_cont(locked, &tracee, &data, false)?;
837            return Ok(starnix_syscalls::SUCCESS);
838        }
839        PTRACE_DETACH => {
840            ptrace_detach(locked, &mut pids, current_task.thread_group(), tracee.as_ref(), &data)?;
841            return Ok(starnix_syscalls::SUCCESS);
842        }
843        _ => {}
844    }
845
846    // The remaining requests (to be added) require the thread to be stopped.
847    let mut state = tracee.write();
848    if !state.can_accept_ptrace_commands() {
849        return error!(ESRCH);
850    }
851
852    match request {
853        PTRACE_PEEKDATA | PTRACE_PEEKTEXT => {
854            let Some(captured) = &mut state.captured_thread_state else {
855                return error!(ESRCH);
856            };
857
858            // NB: The behavior of the syscall is different from the behavior in ptrace(2),
859            // which is provided by libc.
860            let src = LongPtr::new(captured.as_ref(), addr);
861            let val = tracee.read_multi_arch_object(src)?;
862
863            let dst = LongPtr::new(&src, data);
864            current_task.write_multi_arch_object(dst, val)?;
865            Ok(starnix_syscalls::SUCCESS)
866        }
867        PTRACE_POKEDATA | PTRACE_POKETEXT => {
868            let Some(captured) = &mut state.captured_thread_state else {
869                return error!(ESRCH);
870            };
871
872            let bytes = if captured.is_arch32() {
873                u32::try_from(data.ptr()).map_err(|_| errno!(EINVAL))?.to_ne_bytes().to_vec()
874            } else {
875                data.ptr().to_ne_bytes().to_vec()
876            };
877
878            tracee.mm()?.force_write_memory(addr, &bytes)?;
879
880            Ok(starnix_syscalls::SUCCESS)
881        }
882        PTRACE_PEEKUSR => {
883            let Some(captured) = &mut state.captured_thread_state else {
884                return error!(ESRCH);
885            };
886
887            let dst = LongPtr::new(captured.as_ref(), data);
888            let val = ptrace_peekuser(&mut captured.thread_state, addr.ptr() as usize)?;
889            current_task.write_multi_arch_object(dst, val as u64)?;
890            return Ok(starnix_syscalls::SUCCESS);
891        }
892        PTRACE_POKEUSR => {
893            ptrace_pokeuser(&mut *state, data.ptr() as usize, addr.ptr() as usize)?;
894            return Ok(starnix_syscalls::SUCCESS);
895        }
896        PTRACE_GETREGSET => {
897            if let Some(ref mut captured) = state.captured_thread_state {
898                let uiv = IOVecPtr::new(current_task, data);
899                let mut iv = current_task.read_multi_arch_object(uiv)?;
900                let base = iv.iov_base.addr;
901                let mut len = iv.iov_len as usize;
902                ptrace_getregset(
903                    current_task,
904                    &mut captured.thread_state,
905                    ElfNoteType::try_from(addr.ptr() as usize)?,
906                    base,
907                    &mut len,
908                )?;
909                iv.iov_len = len as u64;
910                current_task.write_multi_arch_object(uiv, iv)?;
911                return Ok(starnix_syscalls::SUCCESS);
912            }
913            error!(ESRCH)
914        }
915        PTRACE_SETREGSET => {
916            if let Some(ref mut captured) = state.captured_thread_state {
917                captured.dirty = true;
918                let uiv = IOVecPtr::new(current_task, data);
919                let iv = current_task.read_multi_arch_object(uiv)?;
920                let base = iv.iov_base.addr;
921                let len = iv.iov_len as usize;
922                ptrace_setregset(
923                    current_task,
924                    &mut captured.thread_state,
925                    ElfNoteType::try_from(addr.ptr() as usize)?,
926                    base,
927                    len,
928                )?;
929                return Ok(starnix_syscalls::SUCCESS);
930            }
931            error!(ESRCH)
932        }
933        #[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
934        PTRACE_GETREGS => {
935            if let Some(captured) = &mut state.captured_thread_state {
936                let mut len = usize::MAX;
937                ptrace_getregset(
938                    current_task,
939                    &mut captured.thread_state,
940                    ElfNoteType::PrStatus,
941                    data.ptr() as u64,
942                    &mut len,
943                )?;
944                return Ok(starnix_syscalls::SUCCESS);
945            }
946            error!(ESRCH)
947        }
948        PTRACE_SETSIGMASK => {
949            // addr is the size of the buffer pointed to
950            // by data, but has to be sizeof(sigset_t).
951            if addr.ptr() != std::mem::size_of::<SigSet>() {
952                return error!(EINVAL);
953            }
954            // sigset comes from *data.
955            let src: UserRef<SigSet> = UserRef::from(data);
956            let val = current_task.read_object(src)?;
957            state.set_signal_mask(val);
958
959            Ok(starnix_syscalls::SUCCESS)
960        }
961        PTRACE_GETSIGMASK => {
962            // addr is the size of the buffer pointed to
963            // by data, but has to be sizeof(sigset_t).
964            if addr.ptr() != std::mem::size_of::<SigSet>() {
965                return error!(EINVAL);
966            }
967            // sigset goes in *data.
968            let dst: UserRef<SigSet> = UserRef::from(data);
969            let val = state.signal_mask();
970            current_task.write_object(dst, &val)?;
971            Ok(starnix_syscalls::SUCCESS)
972        }
973        PTRACE_GETSIGINFO => {
974            if let Some(ptrace) = &state.ptrace {
975                if let Some(signal) = ptrace.last_signal.as_ref() {
976                    let dst = MultiArchUserRef::<uapi::siginfo_t, uapi::arch32::siginfo_t>::new(
977                        current_task,
978                        data,
979                    );
980                    signal.write(current_task, dst)?;
981                } else {
982                    return error!(EINVAL);
983                }
984            }
985            Ok(starnix_syscalls::SUCCESS)
986        }
987        PTRACE_SETSIGINFO => {
988            // Rust will let us do this cast in a const assignment but not in a
989            // const generic constraint.
990            const SI_MAX_SIZE_AS_USIZE: usize = SI_MAX_SIZE as usize;
991
992            let siginfo_mem = current_task.read_memory_to_array::<SI_MAX_SIZE_AS_USIZE>(data)?;
993            let header = SignalInfoHeader::read_from_bytes(&siginfo_mem[..SI_HEADER_SIZE]).unwrap();
994
995            let mut bytes = [0u8; SI_MAX_SIZE as usize - SI_HEADER_SIZE];
996            bytes.copy_from_slice(&siginfo_mem[SI_HEADER_SIZE..SI_MAX_SIZE as usize]);
997            let details = SignalDetail::Raw { data: bytes };
998            let unchecked_signal = UncheckedSignal::new(header.signo as u64);
999            let signal = Signal::try_from(unchecked_signal)?;
1000
1001            let siginfo = SignalInfo {
1002                signal,
1003                errno: header.errno,
1004                code: header.code,
1005                detail: details,
1006                force: false,
1007                source: SignalSource::capture(),
1008            };
1009            if let Some(ptrace) = &mut state.ptrace {
1010                ptrace.last_signal = Some(siginfo);
1011            }
1012            Ok(starnix_syscalls::SUCCESS)
1013        }
1014        PTRACE_GET_SYSCALL_INFO => {
1015            if let Some(ptrace) = &state.ptrace {
1016                let (size, info) = ptrace.get_target_syscall(&tracee, &state)?;
1017                let dst: UserRef<ptrace_syscall_info> = UserRef::from(data);
1018                let len = std::cmp::min(std::mem::size_of::<ptrace_syscall_info>(), addr.ptr());
1019                // SAFETY: ptrace_syscall_info does not implement FromBytes/IntoBytes,
1020                // so this has to happen manually.
1021                let src = unsafe {
1022                    std::slice::from_raw_parts(
1023                        &info as *const ptrace_syscall_info as *const u8,
1024                        len as usize,
1025                    )
1026                };
1027                current_task.write_memory(dst.addr(), src)?;
1028                Ok(size.into())
1029            } else {
1030                error!(ESRCH)
1031            }
1032        }
1033        PTRACE_SETOPTIONS => {
1034            let mask = data.ptr() as u32;
1035            // This is what we currently support.
1036            if mask != 0
1037                && (mask
1038                    & !(PTRACE_O_TRACESYSGOOD
1039                        | PTRACE_O_TRACECLONE
1040                        | PTRACE_O_TRACEFORK
1041                        | PTRACE_O_TRACEVFORK
1042                        | PTRACE_O_TRACEVFORKDONE
1043                        | PTRACE_O_TRACEEXEC
1044                        | PTRACE_O_TRACEEXIT
1045                        | PTRACE_O_EXITKILL)
1046                    != 0)
1047            {
1048                track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace(PTRACE_SETOPTIONS)", mask);
1049                return error!(ENOSYS);
1050            }
1051            if let Some(ptrace) = &mut state.ptrace {
1052                ptrace.set_options_from_bits(mask)?;
1053            }
1054            Ok(starnix_syscalls::SUCCESS)
1055        }
1056        PTRACE_GETEVENTMSG => {
1057            if let Some(ptrace) = &state.ptrace {
1058                if let Some(event_data) = &ptrace.event_data {
1059                    let dst = LongPtr::new(current_task, data);
1060                    current_task.write_multi_arch_object(dst, event_data.msg)?;
1061                    return Ok(starnix_syscalls::SUCCESS);
1062                }
1063            }
1064            error!(EIO)
1065        }
1066        _ => {
1067            track_stub!(TODO("https://fxbug.dev/322874463"), "ptrace", request);
1068            error!(ENOSYS)
1069        }
1070    }
1071}
1072
1073/// Makes the given thread group trace the given task.
1074fn do_attach(
1075    thread_group: &ThreadGroup,
1076    task: WeakRef<Task>,
1077    attach_type: PtraceAttachType,
1078    options: PtraceOptions,
1079) -> Result<(), Errno> {
1080    if let Some(task_ref) = task.upgrade() {
1081        thread_group.ptracees.lock().insert(task_ref.get_tid(), (&task_ref).into());
1082        {
1083            let process_state = &mut task_ref.thread_group().write();
1084            let mut state = task_ref.write();
1085            state.set_ptrace(Some(PtraceState::new(thread_group.leader, attach_type, options)))?;
1086            // If the tracee is already stopped, make sure that the tracer can
1087            // identify that right away.
1088            if process_state.is_waitable()
1089                && process_state.base.load_stopped() == StopState::GroupStopped
1090                && task_ref.load_stopped() == StopState::GroupStopped
1091            {
1092                if let Some(ptrace) = &mut state.ptrace {
1093                    ptrace.last_signal_waitable = true;
1094                }
1095            }
1096        }
1097        return Ok(());
1098    }
1099    // The tracee is either the current thread, or there is a live ref to it outside
1100    // this function.
1101    unreachable!("Tracee thread not found");
1102}
1103
1104/// Uses the given core ptrace state (including tracer, attach type, etc) to
1105/// attach to another task, given by `tracee_task`.  Also sends a signal to stop
1106/// tracee_task.  Typical for when inheriting ptrace state from another task.
1107pub fn ptrace_attach_from_state<L>(
1108    locked: &mut Locked<L>,
1109    tracee_task: &OwnedRef<Task>,
1110    ptrace_state: PtraceCoreState,
1111) -> Result<(), Errno>
1112where
1113    L: LockBefore<ThreadGroupLimits>,
1114{
1115    {
1116        let weak_tg =
1117            tracee_task.thread_group().kernel.pids.read().get_thread_group(ptrace_state.pid);
1118        let tracer_tg = weak_tg.ok_or_else(|| errno!(ESRCH))?;
1119        do_attach(
1120            &tracer_tg,
1121            WeakRef::from(tracee_task),
1122            ptrace_state.attach_type,
1123            ptrace_state.options,
1124        )?;
1125    }
1126    let mut state = tracee_task.write();
1127    if let Some(ptrace) = &mut state.ptrace {
1128        ptrace.core_state.tracer_waiters = Arc::clone(&ptrace_state.tracer_waiters);
1129    }
1130
1131    // The newly started tracee starts with a signal that depends on the attach type.
1132    let signal = if ptrace_state.attach_type == PtraceAttachType::Seize {
1133        if let Some(ptrace) = &mut state.ptrace {
1134            ptrace.set_last_event(Some(PtraceEventData::new_from_event(PtraceEvent::Stop, 0)));
1135        }
1136        SignalInfo::default(SIGTRAP)
1137    } else {
1138        SignalInfo::default(SIGSTOP)
1139    };
1140    send_signal_first(locked, tracee_task, state, signal);
1141
1142    Ok(())
1143}
1144
1145pub fn ptrace_traceme(current_task: &mut CurrentTask) -> Result<SyscallResult, Errno> {
1146    let parent = current_task.thread_group().read().parent.clone();
1147    if let Some(parent) = parent {
1148        let parent = parent.upgrade();
1149        // TODO: Move this check into `do_attach()` so that there is a single `ptrace_access_check(tracer, tracee)`?
1150        {
1151            let pids = current_task.kernel().pids.read();
1152            let parent_task = pids.get_task(parent.leader);
1153            security::ptrace_traceme(
1154                current_task,
1155                parent_task.upgrade().ok_or_else(|| errno!(EINVAL))?.as_ref(),
1156            )?;
1157        }
1158
1159        let task_ref = OwnedRef::temp(&current_task.task);
1160        do_attach(&parent, (&task_ref).into(), PtraceAttachType::Attach, PtraceOptions::empty())?;
1161        Ok(starnix_syscalls::SUCCESS)
1162    } else {
1163        error!(EPERM)
1164    }
1165}
1166
1167pub fn ptrace_attach<L>(
1168    locked: &mut Locked<L>,
1169    current_task: &mut CurrentTask,
1170    pid: pid_t,
1171    attach_type: PtraceAttachType,
1172    data: UserAddress,
1173) -> Result<SyscallResult, Errno>
1174where
1175    L: LockBefore<MmDumpable>,
1176{
1177    let weak_task = current_task.kernel().pids.read().get_task(pid);
1178    let tracee = weak_task.upgrade().ok_or_else(|| errno!(ESRCH))?;
1179
1180    if tracee.thread_group == current_task.thread_group {
1181        return error!(EPERM);
1182    }
1183
1184    current_task.check_ptrace_access_mode(locked, PTRACE_MODE_ATTACH_REALCREDS, &tracee)?;
1185    do_attach(current_task.thread_group(), weak_task.clone(), attach_type, PtraceOptions::empty())?;
1186    if attach_type == PtraceAttachType::Attach {
1187        send_standard_signal(
1188            locked.cast_locked::<MmDumpable>(),
1189            &tracee,
1190            SignalInfo::default(SIGSTOP),
1191        );
1192    } else if attach_type == PtraceAttachType::Seize {
1193        // When seizing, |data| should be used as the options bitmask.
1194        if let Some(task_ref) = weak_task.upgrade() {
1195            let mut state = task_ref.write();
1196            if let Some(ptrace) = &mut state.ptrace {
1197                ptrace.set_options_from_bits(data.ptr() as u32)?;
1198            }
1199        }
1200    }
1201    Ok(starnix_syscalls::SUCCESS)
1202}
1203
1204/// Implementation of ptrace(PTRACE_PEEKUSER).  The user struct holds the
1205/// registers and other information about the process.  See ptrace(2) and
1206/// sys/user.h for full details.
1207pub fn ptrace_peekuser(thread_state: &mut ThreadState, offset: usize) -> Result<usize, Errno> {
1208    #[cfg(any(target_arch = "x86_64"))]
1209    if offset >= std::mem::size_of::<user>() {
1210        return error!(EIO);
1211    }
1212    if offset < UserRegsStructPtr::size_of_object_for(thread_state) {
1213        let result = thread_state.get_user_register(offset)?;
1214        return Ok(result);
1215    }
1216    error!(EIO)
1217}
1218
1219pub fn ptrace_pokeuser(
1220    state: &mut TaskMutableState,
1221    value: usize,
1222    offset: usize,
1223) -> Result<(), Errno> {
1224    if let Some(ref mut thread_state) = state.captured_thread_state {
1225        thread_state.dirty = true;
1226
1227        #[cfg(any(target_arch = "x86_64"))]
1228        if offset >= std::mem::size_of::<user>() {
1229            return error!(EIO);
1230        }
1231        if offset < UserRegsStructPtr::size_of_object_for(thread_state.as_ref()) {
1232            return thread_state.thread_state.set_user_register(offset, value);
1233        }
1234    }
1235    error!(EIO)
1236}
1237
1238pub fn ptrace_getregset(
1239    current_task: &CurrentTask,
1240    thread_state: &mut ThreadState,
1241    regset_type: ElfNoteType,
1242    base: u64,
1243    len: &mut usize,
1244) -> Result<(), Errno> {
1245    match regset_type {
1246        ElfNoteType::PrStatus => {
1247            let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1248            if *len < user_regs_struct_len {
1249                return error!(EINVAL);
1250            }
1251            *len = user_regs_struct_len;
1252            let mut i: usize = 0;
1253            let mut reg_ptr = LongPtr::new(thread_state, base);
1254            while i < *len {
1255                let mut val = None;
1256                thread_state
1257                    .registers
1258                    .apply_user_register(i, &mut |register| val = Some(*register as usize))?;
1259                if let Some(val) = val {
1260                    current_task.write_multi_arch_object(reg_ptr, val as u64)?;
1261                }
1262                i += reg_ptr.size_of_object();
1263                reg_ptr = reg_ptr.next()?;
1264            }
1265            Ok(())
1266        }
1267        _ => {
1268            error!(EINVAL)
1269        }
1270    }
1271}
1272
1273pub fn ptrace_setregset(
1274    current_task: &CurrentTask,
1275    thread_state: &mut ThreadState,
1276    regset_type: ElfNoteType,
1277    base: u64,
1278    mut len: usize,
1279) -> Result<(), Errno> {
1280    match regset_type {
1281        ElfNoteType::PrStatus => {
1282            let user_regs_struct_len = UserRegsStructPtr::size_of_object_for(thread_state);
1283            if len < user_regs_struct_len {
1284                return error!(EINVAL);
1285            }
1286            len = user_regs_struct_len;
1287            let mut i: usize = 0;
1288            let mut reg_ptr = LongPtr::new(thread_state, base);
1289            while i < len {
1290                let val = current_task.read_multi_arch_object(reg_ptr)?;
1291                thread_state.registers.apply_user_register(i, &mut |register| *register = val)?;
1292                i += reg_ptr.size_of_object();
1293                reg_ptr = reg_ptr.next()?;
1294            }
1295            Ok(())
1296        }
1297        _ => {
1298            error!(EINVAL)
1299        }
1300    }
1301}
1302
1303#[inline(never)]
1304pub fn ptrace_syscall_enter(locked: &mut Locked<Unlocked>, current_task: &mut CurrentTask) {
1305    let block = {
1306        let mut state = current_task.write();
1307        if state.ptrace.is_some() {
1308            current_task.trace_syscalls.store(false, Ordering::Relaxed);
1309            let mut sig = SignalInfo::default(SIGTRAP);
1310            sig.code = (linux_uapi::SIGTRAP | 0x80) as i32;
1311            if state
1312                .ptrace
1313                .as_ref()
1314                .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1315            {
1316                sig.signal.set_ptrace_syscall_bit();
1317            }
1318            state.set_stopped(StopState::SyscallEnterStopping, Some(sig), None, None);
1319            true
1320        } else {
1321            false
1322        }
1323    };
1324    if block {
1325        current_task.block_while_stopped(locked);
1326    }
1327}
1328
1329#[inline(never)]
1330pub fn ptrace_syscall_exit(
1331    locked: &mut Locked<Unlocked>,
1332    current_task: &mut CurrentTask,
1333    is_error: bool,
1334) {
1335    let block = {
1336        let mut state = current_task.write();
1337        current_task.trace_syscalls.store(false, Ordering::Relaxed);
1338        if state.ptrace.is_some() {
1339            let mut sig = SignalInfo::default(SIGTRAP);
1340            sig.code = (linux_uapi::SIGTRAP | 0x80) as i32;
1341            if state
1342                .ptrace
1343                .as_ref()
1344                .is_some_and(|ptrace| ptrace.has_option(PtraceOptions::TRACESYSGOOD))
1345            {
1346                sig.signal.set_ptrace_syscall_bit();
1347            }
1348
1349            state.set_stopped(StopState::SyscallExitStopping, Some(sig), None, None);
1350            if let Some(ptrace) = &mut state.ptrace {
1351                ptrace.last_syscall_was_error = is_error;
1352            }
1353            true
1354        } else {
1355            false
1356        }
1357    };
1358    if block {
1359        current_task.block_while_stopped(locked);
1360    }
1361}
1362
1363#[cfg(test)]
1364mod tests {
1365    use super::*;
1366    use crate::task::syscalls::sys_prctl;
1367    use crate::testing::{create_task, spawn_kernel_and_run};
1368    use starnix_uapi::PR_SET_PTRACER;
1369    use starnix_uapi::auth::CAP_SYS_PTRACE;
1370
1371    #[::fuchsia::test]
1372    async fn test_set_ptracer() {
1373        spawn_kernel_and_run(async |locked, current_task| {
1374            let kernel = current_task.kernel().clone();
1375            let mut tracee = create_task(locked, &kernel, "tracee");
1376            let mut tracer = create_task(locked, &kernel, "tracer");
1377
1378            let mut creds = tracer.real_creds().clone();
1379            creds.cap_effective &= !CAP_SYS_PTRACE;
1380            tracer.set_creds(creds);
1381
1382            kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1383            assert_eq!(
1384                sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1385                error!(EINVAL)
1386            );
1387
1388            assert_eq!(
1389                ptrace_attach(
1390                    locked,
1391                    &mut tracer,
1392                    tracee.as_ref().task.tid,
1393                    PtraceAttachType::Attach,
1394                    UserAddress::NULL,
1395                ),
1396                error!(EPERM)
1397            );
1398
1399            assert!(
1400                sys_prctl(
1401                    locked,
1402                    &mut tracee,
1403                    PR_SET_PTRACER,
1404                    tracer.thread_group().leader as u64,
1405                    0,
1406                    0,
1407                    0
1408                )
1409                .is_ok()
1410            );
1411
1412            let mut not_tracer = create_task(locked, &kernel, "not-tracer");
1413            not_tracer.set_creds(tracer.real_creds());
1414            assert_eq!(
1415                ptrace_attach(
1416                    locked,
1417                    &mut not_tracer,
1418                    tracee.as_ref().task.tid,
1419                    PtraceAttachType::Attach,
1420                    UserAddress::NULL,
1421                ),
1422                error!(EPERM)
1423            );
1424
1425            assert!(
1426                ptrace_attach(
1427                    locked,
1428                    &mut tracer,
1429                    tracee.as_ref().task.tid,
1430                    PtraceAttachType::Attach,
1431                    UserAddress::NULL,
1432                )
1433                .is_ok()
1434            );
1435        })
1436        .await;
1437    }
1438
1439    #[::fuchsia::test]
1440    async fn test_set_ptracer_any() {
1441        spawn_kernel_and_run(async |locked, current_task| {
1442            let kernel = current_task.kernel().clone();
1443            let mut tracee = create_task(locked, &kernel, "tracee");
1444            let mut tracer = create_task(locked, &kernel, "tracer");
1445
1446            let mut creds = tracer.real_creds().clone();
1447            creds.cap_effective &= !CAP_SYS_PTRACE;
1448            tracer.set_creds(creds);
1449
1450            kernel.ptrace_scope.store(security::yama::SCOPE_RESTRICTED, Ordering::Relaxed);
1451            assert_eq!(
1452                sys_prctl(locked, &mut tracee, PR_SET_PTRACER, 0xFFF, 0, 0, 0),
1453                error!(EINVAL)
1454            );
1455
1456            assert_eq!(
1457                ptrace_attach(
1458                    locked,
1459                    &mut tracer,
1460                    tracee.as_ref().task.tid,
1461                    PtraceAttachType::Attach,
1462                    UserAddress::NULL,
1463                ),
1464                error!(EPERM)
1465            );
1466
1467            assert!(
1468                sys_prctl(locked, &mut tracee, PR_SET_PTRACER, PR_SET_PTRACER_ANY as u64, 0, 0, 0)
1469                    .is_ok()
1470            );
1471
1472            assert!(
1473                ptrace_attach(
1474                    locked,
1475                    &mut tracer,
1476                    tracee.as_ref().task.tid,
1477                    PtraceAttachType::Attach,
1478                    UserAddress::NULL,
1479                )
1480                .is_ok()
1481            );
1482        })
1483        .await;
1484    }
1485}