Skip to main content

starnix_core/task/
task.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::mm::{MemoryAccessor, MemoryAccessorExt, MemoryManager, TaskMemoryAccessor};
6use crate::mutable_state::{state_accessor, state_implementation};
7use crate::ptrace::{
8    AtomicStopState, PtraceEvent, PtraceEventData, PtraceState, PtraceStatus, StopState,
9};
10use crate::signals::{KernelSignal, RunState, SignalDetail, SignalInfo, SignalState};
11use crate::task::memory_attribution::MemoryAttributionLifecycleEvent;
12use crate::task::tracing::KoidPair;
13use crate::task::{
14    AbstractUnixSocketNamespace, AbstractVsockSocketNamespace, CurrentTask, EventHandler, Kernel,
15    NormalPriority, PidTable, ProcessEntryRef, ProcessExitInfo, RealtimePriority, SchedulerState,
16    SchedulingPolicy, SeccompFilterContainer, SeccompState, SeccompStateValue, ThreadGroup,
17    ThreadGroupKey, ThreadState, UtsNamespaceHandle, WaitCanceler, Waiter, ZombieProcess,
18};
19use crate::vfs::{FdTable, FsContext, FsNodeHandle, FsString};
20use atomic_bitflags::atomic_bitflags;
21use fuchsia_rcu::{RcuArc, RcuOptionArc, RcuReadGuard};
22use macro_rules_attribute::apply;
23use starnix_logging::{log_warn, set_zx_name};
24use starnix_registers::{HeapRegs, RegisterStorageEnum};
25use starnix_sync::{
26    LockBefore, Locked, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard, TaskRelease,
27    TerminalLock,
28};
29use starnix_task_command::TaskCommand;
30use starnix_types::arch::ArchWidth;
31use starnix_types::ownership::{OwnedRef, Releasable, ReleaseGuard, TempRef, WeakRef};
32use starnix_types::stats::TaskTimeStats;
33use starnix_uapi::auth::{Credentials, FsCred};
34use starnix_uapi::errors::Errno;
35use starnix_uapi::signals::{SIGCHLD, SigSet, Signal, sigaltstack_contains_pointer};
36use starnix_uapi::user_address::{
37    ArchSpecific, MappingMultiArchUserRef, UserAddress, UserCString, UserRef,
38};
39use starnix_uapi::{
40    CLD_CONTINUED, CLD_DUMPED, CLD_EXITED, CLD_KILLED, CLD_STOPPED, CLD_TRAPPED,
41    FUTEX_BITSET_MATCH_ANY, errno, error, from_status_like_fdio, pid_t, sigaction_t, sigaltstack,
42    tid_t, uapi,
43};
44use std::collections::VecDeque;
45use std::mem::MaybeUninit;
46use std::ops::Deref;
47use std::sync::atomic::{AtomicBool, Ordering};
48use std::sync::{Arc, Weak};
49use std::{cmp, fmt};
50use zx::{Signals, Task as _};
51
52#[derive(Clone, Debug, Eq, PartialEq)]
53pub enum ExitStatus {
54    Exit(u8),
55    Kill(SignalInfo),
56    CoreDump(SignalInfo),
57    // The second field for Stop and Continue contains the type of ptrace stop
58    // event that made it stop / continue, if applicable (PTRACE_EVENT_STOP,
59    // PTRACE_EVENT_FORK, etc)
60    Stop(SignalInfo, PtraceEvent),
61    Continue(SignalInfo, PtraceEvent),
62}
63impl ExitStatus {
64    /// Converts the given exit status to a status code suitable for returning from wait syscalls.
65    pub fn wait_status(&self) -> i32 {
66        match self {
67            ExitStatus::Exit(status) => (*status as i32) << 8,
68            ExitStatus::Kill(siginfo) => siginfo.signal.number() as i32,
69            ExitStatus::CoreDump(siginfo) => (siginfo.signal.number() as i32) | 0x80,
70            ExitStatus::Continue(siginfo, trace_event) => {
71                let trace_event_val = *trace_event as u32;
72                if trace_event_val != 0 {
73                    (siginfo.signal.number() as i32) | (trace_event_val << 16) as i32
74                } else {
75                    0xffff
76                }
77            }
78            ExitStatus::Stop(siginfo, trace_event) => {
79                let trace_event_val = *trace_event as u32;
80                (0x7f + ((siginfo.signal.number() as i32) << 8)) | (trace_event_val << 16) as i32
81            }
82        }
83    }
84
85    pub fn signal_info_code(&self) -> i32 {
86        match self {
87            ExitStatus::Exit(_) => CLD_EXITED as i32,
88            ExitStatus::Kill(_) => CLD_KILLED as i32,
89            ExitStatus::CoreDump(_) => CLD_DUMPED as i32,
90            ExitStatus::Stop(_, _) => CLD_STOPPED as i32,
91            ExitStatus::Continue(_, _) => CLD_CONTINUED as i32,
92        }
93    }
94
95    pub fn signal_info_status(&self) -> i32 {
96        match self {
97            ExitStatus::Exit(status) => *status as i32,
98            ExitStatus::Kill(siginfo)
99            | ExitStatus::CoreDump(siginfo)
100            | ExitStatus::Continue(siginfo, _)
101            | ExitStatus::Stop(siginfo, _) => siginfo.signal.number() as i32,
102        }
103    }
104}
105
106atomic_bitflags! {
107    #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
108    pub struct TaskFlags: u8 {
109        const EXITED = 0x1;
110        const SIGNALS_AVAILABLE = 0x2;
111        const TEMPORARY_SIGNAL_MASK = 0x4;
112        /// Whether the executor should dump the stack of this task when it exits.
113        /// Currently used to implement ExitStatus::CoreDump.
114        const DUMP_ON_EXIT = 0x8;
115        const KERNEL_SIGNALS_AVAILABLE = 0x10;
116    }
117}
118
119/// This contains thread state that tracers can inspect and modify.  It is
120/// captured when a thread stops, and optionally copied back (if dirty) when a
121/// thread starts again.  An alternative implementation would involve the
122/// tracers acting on thread state directly; however, this would involve sharing
123/// CurrentTask structures across multiple threads, which goes against the
124/// intent of the design of CurrentTask.
125pub struct CapturedThreadState {
126    /// The thread state of the traced task.  This is copied out when the thread
127    /// stops.
128    pub thread_state: ThreadState<HeapRegs>,
129
130    /// Indicates that the last ptrace operation changed the thread state, so it
131    /// should be written back to the original thread.
132    pub dirty: bool,
133}
134
135impl ArchSpecific for CapturedThreadState {
136    fn is_arch32(&self) -> bool {
137        self.thread_state.is_arch32()
138    }
139}
140
141#[derive(Debug)]
142pub struct RobustList {
143    pub next: RobustListPtr,
144}
145
146pub type RobustListPtr =
147    MappingMultiArchUserRef<RobustList, uapi::robust_list, uapi::arch32::robust_list>;
148
149impl From<uapi::robust_list> for RobustList {
150    fn from(robust_list: uapi::robust_list) -> Self {
151        Self { next: RobustListPtr::from(robust_list.next) }
152    }
153}
154
155#[cfg(target_arch = "aarch64")]
156impl From<uapi::arch32::robust_list> for RobustList {
157    fn from(robust_list: uapi::arch32::robust_list) -> Self {
158        Self { next: RobustListPtr::from(robust_list.next) }
159    }
160}
161
162#[derive(Debug)]
163pub struct RobustListHead {
164    pub list: RobustList,
165    pub futex_offset: isize,
166}
167
168pub type RobustListHeadPtr =
169    MappingMultiArchUserRef<RobustListHead, uapi::robust_list_head, uapi::arch32::robust_list_head>;
170
171impl From<uapi::robust_list_head> for RobustListHead {
172    fn from(robust_list_head: uapi::robust_list_head) -> Self {
173        Self {
174            list: robust_list_head.list.into(),
175            futex_offset: robust_list_head.futex_offset as isize,
176        }
177    }
178}
179
180#[cfg(target_arch = "aarch64")]
181impl From<uapi::arch32::robust_list_head> for RobustListHead {
182    fn from(robust_list_head: uapi::arch32::robust_list_head) -> Self {
183        Self {
184            list: robust_list_head.list.into(),
185            futex_offset: robust_list_head.futex_offset as isize,
186        }
187    }
188}
189
190pub struct TaskMutableState {
191    // See https://man7.org/linux/man-pages/man2/set_tid_address.2.html
192    pub clear_child_tid: UserRef<tid_t>,
193
194    /// Signal handler related state. This is grouped together for when atomicity is needed during
195    /// signal sending and delivery.
196    signals: SignalState,
197
198    /// Internal signals that have a higher priority than a regular signal.
199    ///
200    /// Storing in a separate queue outside of `SignalState` ensures the internal signals will
201    /// never be ignored or masked when dequeuing. Higher priority ensures that no user signals
202    /// will jump the queue, e.g. ptrace, which delays the delivery.
203    ///
204    /// This design is not about observable consequence, but about convenient implementation.
205    kernel_signals: VecDeque<KernelSignal>,
206
207    /// The exit status that this task exited with.
208    exit_status: Option<ExitStatus>,
209
210    /// Desired scheduler state for the task.
211    pub scheduler_state: SchedulerState,
212
213    /// The UTS namespace assigned to this thread.
214    ///
215    /// This field is kept in the mutable state because the UTS namespace of a thread
216    /// can be forked using `clone()` or `unshare()` syscalls.
217    ///
218    /// We use UtsNamespaceHandle because the UTS properties can be modified
219    /// by any other thread that shares this namespace.
220    pub uts_ns: UtsNamespaceHandle,
221
222    /// Bit that determines whether a newly started program can have privileges its parent does
223    /// not have.  See Documentation/prctl/no_new_privs.txt in the Linux kernel for details.
224    /// Note that Starnix does not currently implement the relevant privileges (e.g.,
225    /// setuid/setgid binaries).  So, you can set this, but it does nothing other than get
226    /// propagated to children.
227    ///
228    /// The documentation indicates that this can only ever be set to
229    /// true, and it cannot be reverted to false.  Accessor methods
230    /// for this field ensure this property.
231    no_new_privs: bool,
232
233    /// Userspace hint about how to adjust the OOM score for this process.
234    pub oom_score_adj: i32,
235
236    /// List of currently installed seccomp_filters
237    pub seccomp_filters: SeccompFilterContainer,
238
239    /// A pointer to the head of the robust futex list of this thread in
240    /// userspace. See get_robust_list(2)
241    pub robust_list_head: RobustListHeadPtr,
242
243    /// The timer slack used to group timer expirations for the calling thread.
244    ///
245    /// Timers may expire up to `timerslack_ns` late, but never early.
246    ///
247    /// If this value is 0, the task's default timerslack is used.
248    pub timerslack_ns: u64,
249
250    /// The default value for `timerslack_ns`. This value cannot change during the lifetime of a
251    /// task.
252    ///
253    /// This value is set to the `timerslack_ns` of the creating thread, and thus is not constant
254    /// across tasks.
255    pub default_timerslack_ns: u64,
256
257    /// Information that a tracer needs to communicate with this process, if it
258    /// is being traced.
259    pub ptrace: Option<Box<PtraceState>>,
260
261    /// Information that a tracer needs to inspect this process.
262    pub captured_thread_state: Option<Box<CapturedThreadState>>,
263}
264
265impl TaskMutableState {
266    pub fn no_new_privs(&self) -> bool {
267        self.no_new_privs
268    }
269
270    /// Sets the value of no_new_privs to true.  It is an error to set
271    /// it to anything else.
272    pub fn enable_no_new_privs(&mut self) {
273        self.no_new_privs = true;
274    }
275
276    pub fn get_timerslack<T: zx::Timeline>(&self) -> zx::Duration<T> {
277        zx::Duration::from_nanos(self.timerslack_ns as i64)
278    }
279
280    /// Sets the current timerslack of the task to `ns`.
281    ///
282    /// If `ns` is zero, the current timerslack gets reset to the task's default timerslack.
283    pub fn set_timerslack_ns(&mut self, ns: u64) {
284        if ns == 0 {
285            self.timerslack_ns = self.default_timerslack_ns;
286        } else {
287            self.timerslack_ns = ns;
288        }
289    }
290
291    pub fn is_ptraced(&self) -> bool {
292        self.ptrace.is_some()
293    }
294
295    pub fn is_ptrace_listening(&self) -> bool {
296        self.ptrace.as_ref().is_some_and(|ptrace| ptrace.stop_status == PtraceStatus::Listening)
297    }
298
299    pub fn ptrace_on_signal_consume(&mut self) -> bool {
300        self.ptrace.as_mut().is_some_and(|ptrace: &mut Box<PtraceState>| {
301            if ptrace.stop_status.is_continuing() {
302                ptrace.stop_status = PtraceStatus::Default;
303                false
304            } else {
305                true
306            }
307        })
308    }
309
310    pub fn notify_ptracers(&mut self) {
311        if let Some(ptrace) = &self.ptrace {
312            ptrace.tracer_waiters().notify_all();
313        }
314    }
315
316    pub fn wait_on_ptracer(&self, waiter: &Waiter) {
317        if let Some(ptrace) = &self.ptrace {
318            ptrace.tracee_waiters.wait_async(&waiter);
319        }
320    }
321
322    pub fn notify_ptracees(&mut self) {
323        if let Some(ptrace) = &self.ptrace {
324            ptrace.tracee_waiters.notify_all();
325        }
326    }
327
328    pub fn take_captured_state(&mut self) -> Option<Box<CapturedThreadState>> {
329        if self.captured_thread_state.is_some() {
330            let mut state = None;
331            std::mem::swap(&mut state, &mut self.captured_thread_state);
332            return state;
333        }
334        None
335    }
336
337    pub fn copy_state_from(&mut self, current_task: &CurrentTask) {
338        self.captured_thread_state = Some(Box::new(CapturedThreadState {
339            thread_state: current_task.thread_state.extended_snapshot::<HeapRegs>(),
340            dirty: false,
341        }));
342    }
343
344    /// Returns the task's currently active signal mask.
345    pub fn signal_mask(&self) -> SigSet {
346        self.signals.mask()
347    }
348
349    /// Returns true if `signal` is currently blocked by this task's signal mask.
350    pub fn is_signal_masked(&self, signal: Signal) -> bool {
351        self.signals.mask().has_signal(signal)
352    }
353
354    /// Returns true if `signal` is blocked by the saved signal mask.
355    ///
356    /// Note that the current signal mask may still not be blocking the signal.
357    pub fn is_signal_masked_by_saved_mask(&self, signal: Signal) -> bool {
358        self.signals.saved_mask().is_some_and(|mask| mask.has_signal(signal))
359    }
360
361    /// Removes the currently active, temporary, signal mask and restores the
362    /// previously active signal mask.
363    pub fn restore_signal_mask(&mut self) {
364        self.signals.restore_mask();
365    }
366
367    /// Returns true if the task's current `RunState` is blocked.
368    pub fn is_blocked(&self) -> bool {
369        self.signals.run_state.is_blocked()
370    }
371
372    /// Sets the task's `RunState` to `run_state`.
373    pub fn set_run_state(&mut self, run_state: RunState) {
374        self.signals.run_state = run_state;
375    }
376
377    pub fn run_state(&self) -> RunState {
378        self.signals.run_state.clone()
379    }
380
381    pub fn on_signal_stack(&self, stack_pointer_register: u64) -> bool {
382        self.signals
383            .alt_stack
384            .map(|signal_stack| sigaltstack_contains_pointer(&signal_stack, stack_pointer_register))
385            .unwrap_or(false)
386    }
387
388    pub fn set_sigaltstack(&mut self, stack: Option<sigaltstack>) {
389        self.signals.alt_stack = stack;
390    }
391
392    pub fn sigaltstack(&self) -> Option<sigaltstack> {
393        self.signals.alt_stack
394    }
395
396    pub fn wait_on_signal(&mut self, waiter: &Waiter) {
397        self.signals.signal_wait.wait_async(waiter);
398    }
399
400    pub fn signals_mut(&mut self) -> &mut SignalState {
401        &mut self.signals
402    }
403
404    pub fn wait_on_signal_fd_events(
405        &self,
406        waiter: &Waiter,
407        mask: SigSet,
408        handler: EventHandler,
409    ) -> WaitCanceler {
410        self.signals.signal_wait.wait_async_signal_mask(waiter, mask, handler)
411    }
412
413    pub fn notify_signal_waiters(&self, signal: &Signal) {
414        self.signals.signal_wait.notify_signal(signal);
415    }
416
417    /// Thaw the task if has been frozen
418    pub fn thaw(&mut self) {
419        if let RunState::Frozen(waiter) = self.run_state() {
420            waiter.notify();
421        }
422    }
423
424    pub fn is_frozen(&self) -> bool {
425        matches!(self.run_state(), RunState::Frozen(_))
426    }
427
428    #[cfg(test)]
429    pub fn kernel_signals_for_test(&self) -> &VecDeque<KernelSignal> {
430        &self.kernel_signals
431    }
432}
433
434#[apply(state_implementation!)]
435impl TaskMutableState<Base = Task> {
436    pub fn set_stopped(
437        &mut self,
438        stopped: StopState,
439        siginfo: Option<SignalInfo>,
440        current_task: Option<&CurrentTask>,
441        event: Option<PtraceEventData>,
442    ) {
443        if stopped.ptrace_only() && self.ptrace.is_none() {
444            return;
445        }
446
447        if self.base.load_stopped().is_illegal_transition(stopped) {
448            return;
449        }
450
451        // TODO(https://g-issues.fuchsia.dev/issues/306438676): When task can be
452        // stopped inside user code, task will need to be either restarted or
453        // stopped here.
454        self.store_stopped(stopped);
455        if stopped.is_stopped() {
456            if let Some(ref current_task) = current_task {
457                self.copy_state_from(current_task);
458            }
459        }
460        if let Some(ptrace) = &mut self.ptrace {
461            ptrace.set_last_signal(siginfo);
462            ptrace.set_last_event(event);
463        }
464        if stopped == StopState::Waking || stopped == StopState::ForceWaking {
465            self.notify_ptracees();
466        }
467        if !stopped.is_in_progress() {
468            self.notify_ptracers();
469        }
470    }
471
472    /// Enqueues a signal at the back of the task's signal queue.
473    pub fn enqueue_signal(&mut self, signal: SignalInfo) {
474        self.signals.enqueue(signal);
475        self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
476    }
477
478    /// Enqueues the signal, allowing the signal to skip straight to the front of the task's queue.
479    ///
480    /// `enqueue_signal` is the more common API to use.
481    ///
482    /// Note that this will not guarantee that the signal is dequeued before any process-directed
483    /// signals.
484    pub fn enqueue_signal_front(&mut self, signal: SignalInfo) {
485        self.signals.enqueue(signal);
486        self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
487    }
488
489    /// Sets the current signal mask of the task.
490    pub fn set_signal_mask(&mut self, mask: SigSet) {
491        self.signals.set_mask(mask);
492        self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
493    }
494
495    /// Sets a temporary signal mask for the task.
496    ///
497    /// This mask should be removed by a matching call to `restore_signal_mask`.
498    pub fn set_temporary_signal_mask(&mut self, mask: SigSet) {
499        self.signals.set_temporary_mask(mask);
500        self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
501    }
502
503    /// Returns the number of pending signals for this task, without considering the signal mask.
504    pub fn pending_signal_count(&self) -> usize {
505        self.signals.num_queued() + self.base.thread_group().num_signals_queued()
506    }
507
508    /// Returns `true` if `signal` is pending for this task, without considering the signal mask.
509    pub fn has_signal_pending(&self, signal: Signal) -> bool {
510        self.signals.has_queued(signal) || self.base.thread_group().has_signal_queued(signal)
511    }
512
513    // Prepare a SignalInfo to be sent to the tracer, if any.
514    pub fn prepare_signal_info(
515        &mut self,
516        stopped: StopState,
517    ) -> Option<(Weak<ThreadGroup>, SignalInfo)> {
518        if !stopped.is_stopped() {
519            return None;
520        }
521
522        if let Some(ptrace) = &self.ptrace {
523            if let Some(last_signal) = ptrace.get_last_signal_ref() {
524                let signal_info = SignalInfo::with_detail(
525                    SIGCHLD,
526                    CLD_TRAPPED as i32,
527                    SignalDetail::SIGCHLD {
528                        pid: self.base.tid,
529                        uid: self.base.real_creds().uid,
530                        status: last_signal.signal.number() as i32,
531                    },
532                );
533
534                return Some((ptrace.core_state.thread_group.clone(), signal_info));
535            }
536        }
537
538        None
539    }
540
541    pub fn set_ptrace(&mut self, tracer: Option<Box<PtraceState>>) -> Result<(), Errno> {
542        if tracer.is_some() && self.ptrace.is_some() {
543            return error!(EPERM);
544        }
545
546        if tracer.is_none() {
547            // Handle the case where this is called while the thread group is being released.
548            if let Ok(tg_stop_state) = self.base.thread_group().load_stopped().as_in_progress() {
549                self.set_stopped(tg_stop_state, None, None, None);
550            }
551        }
552        self.ptrace = tracer;
553        Ok(())
554    }
555
556    pub fn can_accept_ptrace_commands(&mut self) -> bool {
557        !self.base.load_stopped().is_waking_or_awake()
558            && self.is_ptraced()
559            && !self.is_ptrace_listening()
560    }
561
562    fn store_stopped(&mut self, state: StopState) {
563        // We don't actually use the guard but we require it to enforce that the
564        // caller holds the thread group's mutable state lock (identified by
565        // mutable access to the thread group's mutable state).
566
567        self.base.stop_state.store(state, Ordering::Relaxed)
568    }
569
570    pub fn update_flags(&mut self, clear: TaskFlags, set: TaskFlags) {
571        // We don't actually use the guard but we require it to enforce that the
572        // caller holds the task's mutable state lock (identified by mutable
573        // access to the task's mutable state).
574
575        debug_assert_eq!(clear ^ set, clear | set);
576        let observed = self.base.flags();
577        let swapped = self.base.flags.swap((observed | set) & !clear, Ordering::Relaxed);
578        debug_assert_eq!(swapped, observed);
579    }
580
581    pub fn set_flags(&mut self, flag: TaskFlags, v: bool) {
582        let (clear, set) = if v { (TaskFlags::empty(), flag) } else { (flag, TaskFlags::empty()) };
583
584        self.update_flags(clear, set);
585    }
586
587    pub fn set_exit_status(&mut self, status: ExitStatus) {
588        self.set_flags(TaskFlags::EXITED, true);
589        self.exit_status = Some(status);
590    }
591
592    pub fn set_exit_status_if_not_already(&mut self, status: ExitStatus) {
593        self.set_flags(TaskFlags::EXITED, true);
594        self.exit_status.get_or_insert(status);
595    }
596
597    /// The set of pending signals for the task, including the signals pending for the thread
598    /// group.
599    pub fn pending_signals(&self) -> SigSet {
600        self.signals.pending() | self.base.thread_group().get_pending_signals()
601    }
602
603    /// The set of pending signals for the task specifically, not including the signals pending
604    /// for the thread group.
605    pub fn task_specific_pending_signals(&self) -> SigSet {
606        self.signals.pending()
607    }
608
609    /// Returns true if any currently pending signal is allowed by `mask`.
610    pub fn is_any_signal_allowed_by_mask(&self, mask: SigSet) -> bool {
611        self.signals.is_any_allowed_by_mask(mask)
612            || self.base.thread_group().is_any_signal_allowed_by_mask(mask)
613    }
614
615    /// Returns whether or not a signal is pending for this task, taking the current
616    /// signal mask into account.
617    pub fn is_any_signal_pending(&self) -> bool {
618        let mask = self.signal_mask();
619        self.signals.is_any_pending()
620            || self.base.thread_group().is_any_signal_allowed_by_mask(mask)
621    }
622
623    /// Returns the next pending signal that passes `predicate`.
624    fn take_next_signal_where<F>(&mut self, predicate: F) -> Option<SignalInfo>
625    where
626        F: Fn(&SignalInfo) -> bool,
627    {
628        if let Some(signal) = self.base.thread_group().take_next_signal_where(&predicate) {
629            Some(signal)
630        } else {
631            let s = self.signals.take_next_where(&predicate);
632            self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
633            s
634        }
635    }
636
637    /// Removes and returns the next pending `signal` for this task.
638    ///
639    /// Returns `None` if `siginfo` is a blocked signal, or no such signal is pending.
640    pub fn take_specific_signal(&mut self, siginfo: SignalInfo) -> Option<SignalInfo> {
641        let signal_mask = self.signal_mask();
642        if signal_mask.has_signal(siginfo.signal) {
643            return None;
644        }
645
646        let predicate = |s: &SignalInfo| s.signal == siginfo.signal;
647        self.take_next_signal_where(predicate)
648    }
649
650    /// Removes and returns a pending signal that is unblocked by the current signal mask.
651    ///
652    /// Returns `None` if there are no unblocked signals pending.
653    pub fn take_any_signal(&mut self) -> Option<SignalInfo> {
654        self.take_signal_with_mask(self.signal_mask())
655    }
656
657    /// Removes and returns a pending signal that is unblocked by `signal_mask`.
658    ///
659    /// Returns `None` if there are no signals pending that are unblocked by `signal_mask`.
660    pub fn take_signal_with_mask(&mut self, signal_mask: SigSet) -> Option<SignalInfo> {
661        let predicate = |s: &SignalInfo| !signal_mask.has_signal(s.signal) || s.force;
662        self.take_next_signal_where(predicate)
663    }
664
665    /// Enqueues an internal signal at the back of the task's kernel signal queue.
666    pub fn enqueue_kernel_signal(&mut self, signal: KernelSignal) {
667        self.kernel_signals.push_back(signal);
668        self.set_flags(TaskFlags::KERNEL_SIGNALS_AVAILABLE, true);
669    }
670
671    /// Removes and returns a pending internal signal.
672    ///
673    /// Returns `None` if there are no signals pending.
674    pub fn take_kernel_signal(&mut self) -> Option<KernelSignal> {
675        let signal = self.kernel_signals.pop_front();
676        if self.kernel_signals.is_empty() {
677            self.set_flags(TaskFlags::KERNEL_SIGNALS_AVAILABLE, false);
678        }
679        signal
680    }
681
682    #[cfg(test)]
683    pub fn queued_signal_count(&self, signal: Signal) -> usize {
684        self.signals.queued_count(signal)
685            + self.base.thread_group().pending_signals.lock().queued_count(signal)
686    }
687}
688
689/// The live state of a task.
690///
691/// This structure contains the state of a task that is only relevant while the task is alive. It
692/// is dropped when the task enters the zombie state.
693pub struct TaskLiveState {
694    /// A handle to the underlying Zircon thread object.
695    ///
696    /// Some tasks lack an underlying Zircon thread. These tasks are used internally by the
697    /// Starnix kernel to track background work, typically on a `kthread`.
698    pub thread: RwLock<Option<Arc<zx::Thread>>>,
699
700    /// The file descriptor table for this task.
701    ///
702    /// This table can be share by many tasks.
703    pub files: FdTable,
704
705    /// The memory manager for this task.  This is `None` only for system tasks.
706    pub mm: RcuOptionArc<MemoryManager>,
707
708    /// The file system for this task.
709    pub fs: RcuArc<FsContext>,
710
711    /// The namespace for abstract AF_UNIX sockets for this task.
712    pub abstract_socket_namespace: Arc<AbstractUnixSocketNamespace>,
713
714    /// The namespace for AF_VSOCK for this task.
715    pub abstract_vsock_namespace: Arc<AbstractVsockSocketNamespace>,
716}
717
718impl TaskLiveState {
719    pub fn mm(&self) -> Result<Arc<MemoryManager>, Errno> {
720        self.mm.to_option_arc().ok_or_else(|| errno!(EINVAL))
721    }
722
723    pub fn fs(&self) -> Arc<FsContext> {
724        self.fs.to_arc()
725    }
726}
727
728#[derive(Debug, Clone, Copy, PartialEq, Eq)]
729pub enum TaskStateCode {
730    // Task is being executed.
731    Running,
732
733    // Task is waiting for an event.
734    Sleeping,
735
736    // Tracing stop
737    TracingStop,
738
739    // Task has exited.
740    Zombie,
741}
742
743impl TaskStateCode {
744    pub fn code_char(&self) -> char {
745        match self {
746            TaskStateCode::Running => 'R',
747            TaskStateCode::Sleeping => 'S',
748            TaskStateCode::TracingStop => 't',
749            TaskStateCode::Zombie => 'Z',
750        }
751    }
752
753    pub fn name(&self) -> &'static str {
754        match self {
755            TaskStateCode::Running => "running",
756            TaskStateCode::Sleeping => "sleeping",
757            TaskStateCode::TracingStop => "tracing stop",
758            TaskStateCode::Zombie => "zombie",
759        }
760    }
761}
762
763/// The information of the task that needs to be available to the `ThreadGroup` while computing
764/// which process a wait can target. It is necessary to shared this data with the `ThreadGroup` so
765/// that it is available while the task is being dropped and so is not accessible from a weak
766/// pointer.
767#[derive(Debug)]
768pub struct TaskPersistentInfoState {
769    /// Immutable information about the task
770    tid: tid_t,
771    thread_group_key: ThreadGroupKey,
772
773    /// The command of this task.
774    command: Mutex<TaskCommand>,
775
776    /// The security credentials for this task. These are only set when the task is the CurrentTask,
777    /// or on task creation.
778    creds: RcuArc<Credentials>,
779
780    // A lock for the security credentials. Writers must take the lock, readers that need to ensure
781    // that the task state does not change may take the lock.
782    creds_lock: RwLock<()>,
783}
784
785/// Guard for reading locked credentials.
786pub struct CredentialsReadGuard<'a> {
787    _lock: RwLockReadGuard<'a, ()>,
788    creds: RcuReadGuard<Credentials>,
789}
790
791impl<'a> Deref for CredentialsReadGuard<'a> {
792    type Target = Credentials;
793
794    fn deref(&self) -> &Self::Target {
795        self.creds.deref()
796    }
797}
798
799/// Guard for writing credentials. No `CredentialsReadGuard` to the same task can concurrently
800///  exist.
801pub struct CredentialsWriteGuard<'a> {
802    _lock: RwLockWriteGuard<'a, ()>,
803    creds: &'a RcuArc<Credentials>,
804}
805
806impl<'a> CredentialsWriteGuard<'a> {
807    pub fn update(&mut self, creds: Arc<Credentials>) {
808        self.creds.update(creds);
809    }
810}
811
812impl TaskPersistentInfoState {
813    fn new(
814        tid: tid_t,
815        thread_group_key: ThreadGroupKey,
816        command: TaskCommand,
817        creds: Arc<Credentials>,
818    ) -> TaskPersistentInfo {
819        Arc::new(Self {
820            tid,
821            thread_group_key,
822            command: Mutex::new(command),
823            creds: RcuArc::new(creds),
824            creds_lock: RwLock::new(()),
825        })
826    }
827
828    pub fn tid(&self) -> tid_t {
829        self.tid
830    }
831
832    pub fn pid(&self) -> pid_t {
833        self.thread_group_key.pid()
834    }
835
836    pub fn command_guard(&self) -> MutexGuard<'_, TaskCommand> {
837        self.command.lock()
838    }
839
840    /// Snapshots the credentials, returning a short-lived RCU-guarded reference.
841    pub fn real_creds(&self) -> RcuReadGuard<Credentials> {
842        self.creds.read()
843    }
844
845    /// Snapshots the credentials, returning a new reference. Use this if you need to stash the
846    /// credentials somewhere.
847    pub fn clone_creds(&self) -> Arc<Credentials> {
848        self.creds.to_arc()
849    }
850
851    /// Returns a read lock on the credentials. This is appropriate if you need to guarantee that
852    ///  the Task's credentials will not change during a security-sensitive operation.
853    pub fn lock_creds(&self) -> CredentialsReadGuard<'_> {
854        let lock = self.creds_lock.read();
855        CredentialsReadGuard { _lock: lock, creds: self.creds.read() }
856    }
857
858    /// Locks the credentials for writing.
859    /// SAFETY: Only use from CurrentTask, and keep the subjective credentials stored in CurrentTask
860    /// in sync.
861    pub(in crate::task) unsafe fn write_creds(&self) -> CredentialsWriteGuard<'_> {
862        let lock = self.creds_lock.write();
863        CredentialsWriteGuard { _lock: lock, creds: &self.creds }
864    }
865}
866
867pub type TaskPersistentInfo = Arc<TaskPersistentInfoState>;
868
869/// A unit of execution.
870///
871/// A task is the primary unit of execution in the Starnix kernel. Most tasks are *user* tasks,
872/// which have an associated Zircon thread. The Zircon thread switches between restricted mode,
873/// in which the thread runs userspace code, and normal mode, in which the thread runs Starnix
874/// code.
875///
876/// Tasks track the resources used by userspace by referencing various objects, such as an
877/// `FdTable`, a `MemoryManager`, and an `FsContext`. Many tasks can share references to these
878/// objects. In principle, which objects are shared between which tasks can be largely arbitrary,
879/// but there are common patterns of sharing. For example, tasks created with `pthread_create`
880/// will share the `FdTable`, `MemoryManager`, and `FsContext` and are often called "threads" by
881/// userspace programmers. Tasks created by `posix_spawn` do not share these objects and are often
882/// called "processes" by userspace programmers. However, inside the kernel, there is no clear
883/// definition of a "thread" or a "process".
884///
885/// During boot, the kernel creates the first task, often called `init`. The vast majority of other
886/// tasks are created as transitive clones (e.g., using `clone(2)`) of that task. Sometimes, the
887/// kernel will create new tasks from whole cloth, either with a corresponding userspace component
888/// or to represent some background work inside the kernel.
889///
890/// See also `CurrentTask`, which represents the task corresponding to the thread that is currently
891/// executing.
892pub struct Task {
893    /// Weak reference to the `OwnedRef` of this `Task`. This allows to retrieve the
894    /// `TempRef` from a raw `Task`.
895    pub weak_self: WeakRef<Self>,
896
897    /// A unique identifier for this task.
898    ///
899    /// This value can be read in userspace using `gettid(2)`. In general, this value
900    /// is different from the value return by `getpid(2)`, which returns the `id` of the leader
901    /// of the `thread_group`.
902    pub tid: tid_t,
903
904    /// The process key of this task.
905    pub thread_group_key: ThreadGroupKey,
906
907    /// The kernel to which this thread group belongs.
908    pub kernel: Arc<Kernel>,
909
910    /// The thread group to which this task belongs.
911    ///
912    /// The group of tasks in a thread group roughly corresponds to the userspace notion of a
913    /// process.
914    pub thread_group: Arc<ThreadGroup>,
915
916    /// The live state of the task.
917    ///
918    /// This is `None` for zombie tasks.
919    pub live_state: RcuOptionArc<TaskLiveState>,
920
921    /// The stop state of the task, distinct from the stop state of the thread group.
922    ///
923    /// Must only be set when the `mutable_state` write lock is held.
924    stop_state: AtomicStopState,
925
926    /// The flags for the task.
927    ///
928    /// Must only be set the then `mutable_state` write lock is held.
929    flags: AtomicTaskFlags,
930
931    /// The mutable state of the Task.
932    mutable_state: RwLock<TaskMutableState>,
933
934    /// The information of the task that needs to be available to the `ThreadGroup` while computing
935    /// which process a wait can target.
936    /// Contains the command line, the task credentials and the exit signal.
937    /// See `TaskPersistentInfo` for more information.
938    pub persistent_info: TaskPersistentInfo,
939
940    /// For vfork and clone() with CLONE_VFORK, this is set when the task exits or calls execve().
941    /// It allows the calling task to block until the fork has been completed. Only populated
942    /// when created with the CLONE_VFORK flag.
943    vfork_event: Option<Arc<zx::Event>>,
944
945    /// Variable that can tell you whether there are currently seccomp
946    /// filters without holding a lock
947    pub seccomp_filter_state: SeccompState,
948
949    /// Tell you whether you are tracing syscall entry / exit without a lock.
950    pub trace_syscalls: AtomicBool,
951
952    // The pid directory, so it doesn't have to be generated and thrown away on every access.
953    // See https://fxbug.dev/291962828 for details.
954    pub proc_pid_directory_cache: Mutex<Option<FsNodeHandle>>,
955}
956
957/// The decoded cross-platform parts we care about for page fault exception reports.
958#[derive(Debug)]
959pub struct PageFaultExceptionReport {
960    pub faulting_address: u64,
961    pub not_present: bool, // Set when the page fault was due to a not-present page.
962    pub is_write: bool,    // Set when the triggering memory operation was a write.
963    pub is_execute: bool,  // Set when the triggering memory operation was an execute.
964}
965
966impl Task {
967    pub fn kernel(&self) -> &Arc<Kernel> {
968        &self.kernel
969    }
970
971    pub fn thread_group(&self) -> &Arc<ThreadGroup> {
972        &self.thread_group
973    }
974
975    pub fn has_same_address_space(&self, other: Option<&Arc<MemoryManager>>) -> bool {
976        match (self.mm(), other) {
977            (Ok(this), Some(other)) => Arc::ptr_eq(&this, other),
978            (Err(_), None) => true,
979            _ => false,
980        }
981    }
982
983    pub fn flags(&self) -> TaskFlags {
984        self.flags.load(Ordering::Relaxed)
985    }
986
987    /// When the task exits, if there is a notification that needs to propagate
988    /// to a ptracer, make sure it will propagate.
989    pub fn set_ptrace_zombie(&self, pids: &mut crate::task::PidTable) {
990        let pgid = self.thread_group().read().process_group.leader;
991        let exit_signal = self.thread_group().read().exit_signal.clone();
992        let mut state = self.write();
993        state.set_stopped(StopState::ForceAwake, None, None, None);
994        if let Some(ptrace) = &mut state.ptrace {
995            // Add a zombie that the ptracer will notice.
996            ptrace.last_signal_waitable = true;
997            let tracer_pid = ptrace.get_pid();
998            let tracer_tg = pids.get_thread_group(tracer_pid);
999            if let Some(tracer_tg) = tracer_tg {
1000                drop(state);
1001                let mut tracer_state = tracer_tg.write();
1002
1003                let exit_status = self.exit_status().unwrap_or_else(|| {
1004                    starnix_logging::log_error!("Exiting without an exit code.");
1005                    ExitStatus::Exit(u8::MAX)
1006                });
1007                let uid = self.real_creds().uid;
1008                let exit_info = ProcessExitInfo { status: exit_status, exit_signal };
1009                let zombie = ZombieProcess {
1010                    thread_group_key: self.thread_group_key.clone(),
1011                    pgid,
1012                    uid,
1013                    exit_info: exit_info,
1014                    // ptrace doesn't need this.
1015                    time_stats: TaskTimeStats::default(),
1016                    is_canonical: false,
1017                };
1018
1019                tracer_state.zombie_ptracees.add(pids, self.tid, zombie);
1020            };
1021        }
1022    }
1023
1024    /// Disconnects this task from the tracer, if the tracer is still running.
1025    pub fn ptrace_disconnect(&mut self, pids: &PidTable) {
1026        let mut state = self.write();
1027        let ptracer_pid = state.ptrace.as_ref().map(|ptrace| ptrace.get_pid());
1028        if let Some(ptracer_pid) = ptracer_pid {
1029            let _ = state.set_ptrace(None);
1030            if let Some(ProcessEntryRef::Process(tg)) = pids.get_process(ptracer_pid) {
1031                let tid = self.get_tid();
1032                drop(state);
1033                tg.ptracees.lock().remove(&tid);
1034            }
1035        }
1036    }
1037
1038    pub fn exit_status(&self) -> Option<ExitStatus> {
1039        self.is_exitted().then(|| self.read().exit_status.clone()).flatten()
1040    }
1041
1042    pub fn is_exitted(&self) -> bool {
1043        self.flags().contains(TaskFlags::EXITED)
1044    }
1045
1046    pub fn load_stopped(&self) -> StopState {
1047        self.stop_state.load(Ordering::Relaxed)
1048    }
1049
1050    /// Upgrade a Reference to a Task, returning a ESRCH errno if the reference cannot be borrowed.
1051    pub fn from_weak(weak: &WeakRef<Task>) -> Result<TempRef<'_, Task>, Errno> {
1052        weak.upgrade().ok_or_else(|| errno!(ESRCH))
1053    }
1054
1055    /// Internal function for creating a Task object. Useful when you need to specify the value of
1056    /// every field. create_process and create_thread are more likely to be what you want.
1057    ///
1058    /// Any fields that should be initialized fresh for every task, even if the task was created
1059    /// with fork, are initialized to their defaults inside this function. All other fields are
1060    /// passed as parameters.
1061    #[allow(clippy::let_and_return)]
1062    pub fn new(
1063        tid: tid_t,
1064        command: TaskCommand,
1065        thread_group: Arc<ThreadGroup>,
1066        thread: Option<zx::Thread>,
1067        files: FdTable,
1068        mm: Option<Arc<MemoryManager>>,
1069        // The only case where fs should be None if when building the initial task that is the
1070        // used to build the initial FsContext.
1071        fs: Arc<FsContext>,
1072        creds: Arc<Credentials>,
1073        abstract_socket_namespace: Arc<AbstractUnixSocketNamespace>,
1074        abstract_vsock_namespace: Arc<AbstractVsockSocketNamespace>,
1075        signal_mask: SigSet,
1076        kernel_signals: VecDeque<KernelSignal>,
1077        vfork_event: Option<Arc<zx::Event>>,
1078        scheduler_state: SchedulerState,
1079        uts_ns: UtsNamespaceHandle,
1080        no_new_privs: bool,
1081        seccomp_filter_state: SeccompState,
1082        seccomp_filters: SeccompFilterContainer,
1083        robust_list_head: RobustListHeadPtr,
1084        timerslack_ns: u64,
1085    ) -> OwnedRef<Self> {
1086        let thread_group_key = ThreadGroupKey::from(&thread_group);
1087        OwnedRef::new_cyclic(|weak_self| {
1088            let task_live = Arc::new(TaskLiveState {
1089                thread: RwLock::new(thread.map(Arc::new)),
1090                files,
1091                mm: RcuOptionArc::new(mm),
1092                fs: RcuArc::new(fs),
1093                abstract_socket_namespace,
1094                abstract_vsock_namespace,
1095            });
1096            let task = Task {
1097                weak_self,
1098                tid,
1099                thread_group_key: thread_group_key.clone(),
1100                kernel: Arc::clone(&thread_group.kernel),
1101                thread_group,
1102                live_state: RcuOptionArc::new(Some(task_live)),
1103                vfork_event,
1104                stop_state: AtomicStopState::new(StopState::Awake),
1105                flags: AtomicTaskFlags::new(TaskFlags::empty()),
1106                mutable_state: RwLock::new(TaskMutableState {
1107                    clear_child_tid: UserRef::default(),
1108                    signals: SignalState::with_mask(signal_mask),
1109                    kernel_signals,
1110                    exit_status: None,
1111                    scheduler_state,
1112                    uts_ns,
1113                    no_new_privs,
1114                    oom_score_adj: Default::default(),
1115                    seccomp_filters,
1116                    robust_list_head,
1117                    timerslack_ns,
1118                    // The default timerslack is set to the current timerslack of the creating thread.
1119                    default_timerslack_ns: timerslack_ns,
1120                    ptrace: None,
1121                    captured_thread_state: None,
1122                }),
1123                persistent_info: TaskPersistentInfoState::new(
1124                    tid,
1125                    thread_group_key,
1126                    command,
1127                    creds,
1128                ),
1129                seccomp_filter_state,
1130                trace_syscalls: AtomicBool::new(false),
1131                proc_pid_directory_cache: Mutex::new(None),
1132            };
1133
1134            #[cfg(any(test, debug_assertions))]
1135            {
1136                // Note that `Kernel::pids` is already locked by the caller of `Task::new()`.
1137                let _l1 = task.read();
1138                let _l2 = task.persistent_info.lock_creds();
1139                let _l3 = task.persistent_info.command_guard();
1140            }
1141            task
1142        })
1143    }
1144
1145    state_accessor!(Task, mutable_state);
1146
1147    /// Returns the real credentials of the task as a short-lived RCU-guarded reference. These
1148    /// credentials are used to check permissions for actions performed on the task. If the task
1149    /// itself is performing an action, use `CurrentTask::current_creds` instead. This does not
1150    /// lock the credentials.
1151    pub fn real_creds(&self) -> RcuReadGuard<Credentials> {
1152        self.persistent_info.real_creds()
1153    }
1154
1155    /// Returns a new long-lived reference to the real credentials of the task.  These credentials
1156    /// are used to check permissions for actions performed on the task. If the task itself is
1157    /// performing an action, use `CurrentTask::current_creds` instead. This does not lock the
1158    /// credentials.
1159    pub fn clone_creds(&self) -> Arc<Credentials> {
1160        self.persistent_info.clone_creds()
1161    }
1162
1163    pub fn ptracer_task(&self) -> WeakRef<Task> {
1164        let ptracer = {
1165            let state = self.read();
1166            state.ptrace.as_ref().map(|p| p.core_state.pid)
1167        };
1168
1169        let Some(ptracer) = ptracer else {
1170            return WeakRef::default();
1171        };
1172
1173        self.get_task(ptracer)
1174    }
1175
1176    /// Returns the live state of the task, if it exists.
1177    ///
1178    /// # Errors
1179    ///
1180    /// Returns [`Err(ESRCH)`] if the task has already transitioned to a zombie state and its live
1181    /// resources have been dropped.
1182    #[track_caller]
1183    pub fn live(&self) -> Result<Arc<TaskLiveState>, Errno> {
1184        self.live_state.to_option_arc().ok_or_else(|| errno!(ESRCH))
1185    }
1186
1187    /// Returns the memory manager of the task, if it exists.
1188    ///
1189    /// # Errors
1190    ///
1191    /// Returns [`Err(errno)`] where `errno` is:
1192    ///
1193    ///   - `ESRCH`: the task is dead and its live resources have been dropped.
1194    ///   - `EINVAL`: the task does not have a memory manager.
1195    #[track_caller]
1196    pub fn mm(&self) -> Result<Arc<MemoryManager>, Errno> {
1197        self.live()?.mm.to_option_arc().ok_or_else(|| errno!(EINVAL))
1198    }
1199
1200    /// Modify the given elements of the scheduler state with new values and update the
1201    /// task's thread's role.
1202    pub(crate) fn set_scheduler_policy_priority_and_reset_on_fork(
1203        &self,
1204        policy: SchedulingPolicy,
1205        priority: RealtimePriority,
1206        reset_on_fork: bool,
1207    ) -> Result<(), Errno> {
1208        self.update_scheduler_state_then_role(|scheduler_state| {
1209            scheduler_state.policy = policy;
1210            scheduler_state.realtime_priority = priority;
1211            scheduler_state.reset_on_fork = reset_on_fork;
1212        })
1213    }
1214
1215    /// Modify the scheduler state's priority and update the task's thread's role.
1216    pub(crate) fn set_scheduler_priority(&self, priority: RealtimePriority) -> Result<(), Errno> {
1217        self.update_scheduler_state_then_role(|scheduler_state| {
1218            scheduler_state.realtime_priority = priority
1219        })
1220    }
1221
1222    /// Modify the scheduler state's nice and update the task's thread's role.
1223    pub(crate) fn set_scheduler_nice(&self, nice: NormalPriority) -> Result<(), Errno> {
1224        self.update_scheduler_state_then_role(|scheduler_state| {
1225            scheduler_state.normal_priority = nice
1226        })
1227    }
1228
1229    /// Overwrite the existing scheduler state with a new one and update the task's thread's role.
1230    pub fn set_scheduler_state(&self, scheduler_state: SchedulerState) -> Result<(), Errno> {
1231        self.update_scheduler_state_then_role(|task_scheduler_state| {
1232            *task_scheduler_state = scheduler_state
1233        })
1234    }
1235
1236    /// Update the task's thread's role based on its current scheduler state without making any
1237    /// changes to the state.
1238    ///
1239    /// This should be called on tasks that have newly created threads, e.g. after cloning.
1240    pub fn sync_scheduler_state_to_role(&self) -> Result<(), Errno> {
1241        self.update_scheduler_state_then_role(|_| {})
1242    }
1243
1244    fn update_scheduler_state_then_role(
1245        &self,
1246        updater: impl FnOnce(&mut SchedulerState),
1247    ) -> Result<(), Errno> {
1248        let new_scheduler_state = {
1249            // Hold the task state lock as briefly as possible, it's not needed to update the role.
1250            let mut state = self.write();
1251            updater(&mut state.scheduler_state);
1252            state.scheduler_state
1253        };
1254        self.thread_group().kernel.scheduler.set_thread_role(self, new_scheduler_state)?;
1255        Ok(())
1256    }
1257
1258    /// Signals the vfork event, if any, to unblock waiters.
1259    pub fn signal_vfork(&self) {
1260        if let Some(event) = &self.vfork_event {
1261            if let Err(status) = event.signal(Signals::NONE, Signals::USER_0) {
1262                log_warn!("Failed to set vfork signal {status}");
1263            }
1264        };
1265    }
1266
1267    /// Blocks the caller until the task has exited or executed execve(). This is used to implement
1268    /// vfork() and clone(... CLONE_VFORK, ...). The task must have created with CLONE_EXECVE.
1269    pub fn wait_for_execve(&self, task_to_wait: WeakRef<Task>) -> Result<(), Errno> {
1270        let event = task_to_wait.upgrade().and_then(|t| t.vfork_event.clone());
1271        if let Some(event) = event {
1272            event
1273                .wait_one(zx::Signals::USER_0, zx::MonotonicInstant::INFINITE)
1274                .map_err(|status| from_status_like_fdio!(status))?;
1275        }
1276        Ok(())
1277    }
1278
1279    /// If needed, clear the child tid for this task.
1280    ///
1281    /// Userspace can ask us to clear the child tid and issue a futex wake at
1282    /// the child tid address when we tear down a task. For example, bionic
1283    /// uses this mechanism to implement pthread_join. The thread that calls
1284    /// pthread_join sleeps using FUTEX_WAIT on the child tid address. We wake
1285    /// them up here to let them know the thread is done.
1286    pub fn clear_child_tid_if_needed<L>(&self, locked: &mut Locked<L>) -> Result<(), Errno>
1287    where
1288        L: LockBefore<TerminalLock>,
1289    {
1290        let mut state = self.write();
1291        let user_tid = state.clear_child_tid;
1292        if !user_tid.is_null() {
1293            let zero: tid_t = 0;
1294            self.write_object(user_tid, &zero)?;
1295            self.kernel().shared_futexes.wake(
1296                locked,
1297                self,
1298                user_tid.addr(),
1299                usize::MAX,
1300                FUTEX_BITSET_MATCH_ANY,
1301            )?;
1302            state.clear_child_tid = UserRef::default();
1303        }
1304        Ok(())
1305    }
1306
1307    pub fn get_task(&self, tid: tid_t) -> WeakRef<Task> {
1308        self.kernel().pids.read().get_task(tid)
1309    }
1310
1311    pub fn get_pid(&self) -> pid_t {
1312        self.thread_group_key.pid()
1313    }
1314
1315    pub fn get_tid(&self) -> tid_t {
1316        self.tid
1317    }
1318
1319    pub fn is_leader(&self) -> bool {
1320        self.get_pid() == self.get_tid()
1321    }
1322
1323    pub fn read_argv(&self, max_len: usize) -> Result<Vec<FsString>, Errno> {
1324        // argv is empty for kthreads
1325        let Ok(mm) = self.mm() else {
1326            return Ok(vec![]);
1327        };
1328        let (argv_start, argv_end) = {
1329            let mm_state = mm.state.read();
1330            (mm_state.argv_start, mm_state.argv_end)
1331        };
1332
1333        let len_to_read = std::cmp::min(argv_end - argv_start, max_len);
1334        self.read_nul_delimited_c_string_list(argv_start, len_to_read)
1335    }
1336
1337    pub fn read_argv0(&self) -> Result<FsString, Errno> {
1338        // argv is empty for kthreads
1339        let Ok(mm) = self.mm() else {
1340            return Ok(FsString::default());
1341        };
1342        let argv_start = {
1343            let mm_state = mm.state.read();
1344            mm_state.argv_start
1345        };
1346        // Assuming a 64-bit arch width is fine for a type that's just u8's on all arches.
1347        let argv_start = UserCString::new(&ArchWidth::Arch64, argv_start);
1348        self.read_path(argv_start)
1349    }
1350
1351    pub fn read_env(&self, max_len: usize) -> Result<Vec<FsString>, Errno> {
1352        // environment is empty for kthreads
1353        let Ok(mm) = self.mm() else { return Ok(vec![]) };
1354        let (env_start, env_end) = {
1355            let mm_state = mm.state.read();
1356            (mm_state.environ_start, mm_state.environ_end)
1357        };
1358
1359        let len_to_read = std::cmp::min(env_end - env_start, max_len);
1360        self.read_nul_delimited_c_string_list(env_start, len_to_read)
1361    }
1362
1363    pub fn thread_runtime_info(&self) -> Result<zx::TaskRuntimeInfo, Errno> {
1364        self.live()?
1365            .thread
1366            .read()
1367            .as_ref()
1368            .ok_or_else(|| errno!(EINVAL))?
1369            .get_runtime_info()
1370            .map_err(|status| from_status_like_fdio!(status))
1371    }
1372
1373    pub fn real_fscred(&self) -> FsCred {
1374        self.real_creds().as_fscred()
1375    }
1376
1377    /// Interrupts the current task.
1378    ///
1379    /// This will interrupt any blocking syscalls if the task is blocked on one.
1380    /// The signal_state of the task must not be locked.
1381    pub fn interrupt(&self) {
1382        let Ok(live) = self.live() else {
1383            log_warn!("Cannot interrupt dead task {}", self.get_tid());
1384            return;
1385        };
1386
1387        self.read().signals.run_state.wake();
1388        if let Some(thread) = live.thread.read().as_ref() {
1389            #[allow(
1390                clippy::undocumented_unsafe_blocks,
1391                reason = "Force documented unsafe blocks in Starnix"
1392            )]
1393            let status = unsafe { zx::sys::zx_restricted_kick(thread.raw_handle(), 0) };
1394            if status != zx::sys::ZX_OK {
1395                // zx_restricted_kick() could return ZX_ERR_BAD_STATE if the target thread is already in the
1396                // DYING or DEAD states. That's fine since it means that the task is in the process of
1397                // tearing down, so allow it.
1398                assert_eq!(status, zx::sys::ZX_ERR_BAD_STATE);
1399            }
1400        }
1401    }
1402
1403    pub fn command(&self) -> TaskCommand {
1404        self.persistent_info.command.lock().clone()
1405    }
1406
1407    pub fn set_command_name(&self, mut new_name: TaskCommand) {
1408        let Ok(live) = self.live() else {
1409            log_warn!("Cannot set command name for dead task {}", self.get_tid());
1410            return;
1411        };
1412
1413        // If we're going to update the process name, see if we can get a longer one than normally
1414        // provided in the Linux uapi. Only choose the argv0-based name if it's a superset of the
1415        // uapi-provided name to avoid clobbering the name provided by the user.
1416        if let Ok(argv0) = self.read_argv0() {
1417            let argv0 = TaskCommand::from_path_bytes(&argv0);
1418            if let Some(embedded_name) = argv0.try_embed(&new_name) {
1419                new_name = embedded_name;
1420            }
1421        }
1422
1423        // Acquire this before modifying Zircon state to ensure consistency under concurrent access.
1424        // Ideally this would also guard the logic above to read argv[0] but we can't due to lock
1425        // cycles with SELinux checks.
1426        let mut command_guard = self.persistent_info.command_guard();
1427
1428        // Set the name on the Linux thread.
1429        if let Some(thread) = live.thread.read().as_ref() {
1430            set_zx_name(&**thread, new_name.as_bytes());
1431        }
1432
1433        // If this is the thread group leader, use this name for the process too.
1434        if self.is_leader() {
1435            set_zx_name(&self.thread_group().process, new_name.as_bytes());
1436            let _ = zx::Thread::raise_user_exception(
1437                zx::RaiseExceptionOptions::TARGET_JOB_DEBUGGER,
1438                zx::sys::ZX_EXCP_USER_CODE_PROCESS_NAME_CHANGED,
1439                0,
1440            );
1441        }
1442
1443        // Avoid a lock cycle by dropping the guard before notifying memory attribution of the
1444        // change.
1445        *command_guard = new_name;
1446        drop(command_guard);
1447
1448        if self.is_leader() {
1449            if let Some(notifier) = &self.thread_group().read().notifier {
1450                let _ = notifier.send(MemoryAttributionLifecycleEvent::name_change(self.tid));
1451            }
1452        }
1453    }
1454
1455    pub fn set_seccomp_state(&self, state: SeccompStateValue) -> Result<(), Errno> {
1456        self.seccomp_filter_state.set(&state)
1457    }
1458
1459    pub fn state_code(&self) -> TaskStateCode {
1460        let status = self.read();
1461        if status.exit_status.is_some() {
1462            TaskStateCode::Zombie
1463        } else if status.signals.run_state.is_blocked() {
1464            let stop_state = self.load_stopped();
1465            if stop_state.ptrace_only() && stop_state.is_stopped() {
1466                TaskStateCode::TracingStop
1467            } else {
1468                TaskStateCode::Sleeping
1469            }
1470        } else {
1471            TaskStateCode::Running
1472        }
1473    }
1474
1475    pub fn time_stats(&self) -> TaskTimeStats {
1476        use zx::Task;
1477        // TODO(https://fxbug.dev/297440106): Return time stats for zombie tasks.
1478        let live = match self.live() {
1479            Ok(live) => live,
1480            Err(_) => return TaskTimeStats::default(),
1481        };
1482        let info = match &*live.thread.read() {
1483            Some(thread) => thread.get_runtime_info().expect("Failed to get thread stats"),
1484            None => return TaskTimeStats::default(),
1485        };
1486
1487        TaskTimeStats {
1488            user_time: zx::MonotonicDuration::from_nanos(info.cpu_time),
1489            // TODO(https://fxbug.dev/42078242): How can we calculate system time?
1490            system_time: zx::MonotonicDuration::default(),
1491        }
1492    }
1493
1494    pub fn get_signal_action(&self, signal: Signal) -> sigaction_t {
1495        self.thread_group().signal_actions.get(signal)
1496    }
1497
1498    pub fn should_check_for_pending_signals(&self) -> bool {
1499        self.flags().intersects(
1500            TaskFlags::KERNEL_SIGNALS_AVAILABLE
1501                | TaskFlags::SIGNALS_AVAILABLE
1502                | TaskFlags::TEMPORARY_SIGNAL_MASK,
1503        ) || self.thread_group.has_pending_signals.load(Ordering::Relaxed)
1504    }
1505
1506    pub fn record_pid_koid_mapping(&self) {
1507        let Ok(live) = self.live() else {
1508            log_warn!("Cannot record pid/koid mapping for dead task {}", self.get_tid());
1509            return;
1510        };
1511
1512        let Some(ref mapping_table) = *self.kernel().pid_to_koid_mapping.read() else { return };
1513
1514        let pkoid = self.thread_group().get_process_koid().ok();
1515        let tkoid = live.thread.read().as_ref().and_then(|t| t.koid().ok());
1516        mapping_table.write().insert(self.tid, KoidPair { process: pkoid, thread: tkoid });
1517    }
1518}
1519
1520impl Releasable for Task {
1521    type Context<'a> = (
1522        ThreadState<RegisterStorageEnum>,
1523        &'a mut Locked<TaskRelease>,
1524        RwLockWriteGuard<'a, PidTable>,
1525    );
1526
1527    fn release<'a>(mut self, context: Self::Context<'a>) {
1528        let (thread_state, locked, pids) = context;
1529
1530        *self.proc_pid_directory_cache.get_mut() = None;
1531        self.ptrace_disconnect(&pids);
1532
1533        std::mem::drop(pids);
1534
1535        self.signal_vfork();
1536
1537        // Drop fields that can end up owning a FsNode to ensure no FsNode are owned by this task.
1538        if let Ok(live) = self.live() {
1539            live.files.release();
1540            live.mm.update(None);
1541        }
1542        self.live_state.update(None);
1543
1544        // Rebuild a temporary CurrentTask to run the release actions that requires a CurrentState.
1545        let current_task = CurrentTask::new(OwnedRef::new(self), thread_state.into());
1546
1547        // Apply any delayed releasers left.
1548        current_task.trigger_delayed_releaser(locked);
1549
1550        // Drop the task now that is has been released. This requires to take it from the OwnedRef
1551        // and from the resulting ReleaseGuard.
1552        let CurrentTask { mut task, .. } = current_task;
1553        let task = OwnedRef::take(&mut task).expect("task should not have been re-owned");
1554        let _task: Self = ReleaseGuard::take(task);
1555    }
1556}
1557
1558impl MemoryAccessor for Task {
1559    fn read_memory<'a>(
1560        &self,
1561        addr: UserAddress,
1562        bytes: &'a mut [MaybeUninit<u8>],
1563    ) -> Result<&'a mut [u8], Errno> {
1564        // Using a `Task` to read memory generally indicates that the memory
1565        // is being read from a task different than the `CurrentTask`. When
1566        // this `Task` is not current, its address space is not mapped
1567        // so we need to go through the VMO.
1568        self.mm()?.syscall_read_memory(addr, bytes)
1569    }
1570
1571    fn read_memory_partial_until_null_byte<'a>(
1572        &self,
1573        addr: UserAddress,
1574        bytes: &'a mut [MaybeUninit<u8>],
1575    ) -> Result<&'a mut [u8], Errno> {
1576        // Using a `Task` to read memory generally indicates that the memory
1577        // is being read from a task different than the `CurrentTask`. When
1578        // this `Task` is not current, its address space is not mapped
1579        // so we need to go through the VMO.
1580        self.mm()?.syscall_read_memory_partial_until_null_byte(addr, bytes)
1581    }
1582
1583    fn read_memory_partial<'a>(
1584        &self,
1585        addr: UserAddress,
1586        bytes: &'a mut [MaybeUninit<u8>],
1587    ) -> Result<&'a mut [u8], Errno> {
1588        // Using a `Task` to read memory generally indicates that the memory
1589        // is being read from a task different than the `CurrentTask`. When
1590        // this `Task` is not current, its address space is not mapped
1591        // so we need to go through the VMO.
1592        self.mm()?.syscall_read_memory_partial(addr, bytes)
1593    }
1594
1595    fn write_memory(&self, addr: UserAddress, bytes: &[u8]) -> Result<usize, Errno> {
1596        // Using a `Task` to write memory generally indicates that the memory
1597        // is being written to a task different than the `CurrentTask`. When
1598        // this `Task` is not current, its address space is not mapped
1599        // so we need to go through the VMO.
1600        self.mm()?.syscall_write_memory(addr, bytes)
1601    }
1602
1603    fn write_memory_partial(&self, addr: UserAddress, bytes: &[u8]) -> Result<usize, Errno> {
1604        // Using a `Task` to write memory generally indicates that the memory
1605        // is being written to a task different than the `CurrentTask`. When
1606        // this `Task` is not current, its address space is not mapped
1607        // so we need to go through the VMO.
1608        self.mm()?.syscall_write_memory_partial(addr, bytes)
1609    }
1610
1611    fn zero(&self, addr: UserAddress, length: usize) -> Result<usize, Errno> {
1612        // Using a `Task` to zero memory generally indicates that the memory
1613        // is being zeroed from a task different than the `CurrentTask`. When
1614        // this `Task` is not current, its address space is not mapped
1615        // so we need to go through the VMO.
1616        self.mm()?.syscall_zero(addr, length)
1617    }
1618}
1619
1620impl TaskMemoryAccessor for Task {
1621    fn maximum_valid_address(&self) -> Option<UserAddress> {
1622        self.mm().map(|mm| mm.maximum_valid_user_address).ok()
1623    }
1624}
1625
1626impl fmt::Debug for Task {
1627    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1628        write!(
1629            f,
1630            "{}:{}[{}]",
1631            self.thread_group().leader,
1632            self.tid,
1633            self.persistent_info.command.lock()
1634        )
1635    }
1636}
1637
1638impl cmp::PartialEq for Task {
1639    fn eq(&self, other: &Self) -> bool {
1640        let ptr: *const Task = self;
1641        let other_ptr: *const Task = other;
1642        ptr == other_ptr
1643    }
1644}
1645
1646impl cmp::Eq for Task {}
1647
1648#[cfg(test)]
1649mod test {
1650    use super::*;
1651    use crate::security;
1652    use crate::testing::*;
1653    use starnix_uapi::auth::{CAP_SYS_ADMIN, Capabilities};
1654    use starnix_uapi::resource_limits::Resource;
1655    use starnix_uapi::signals::SIGCHLD;
1656    use starnix_uapi::{CLONE_SIGHAND, CLONE_THREAD, CLONE_VM, rlimit};
1657
1658    #[::fuchsia::test]
1659    async fn test_tid_allocation() {
1660        spawn_kernel_and_run(async |locked, current_task| {
1661            let kernel = current_task.kernel();
1662            assert_eq!(current_task.get_tid(), 1);
1663            let another_current = create_task(locked, &kernel, "another-task");
1664            let another_tid = another_current.get_tid();
1665            assert!(another_tid >= 2);
1666
1667            let pids = kernel.pids.read();
1668            assert_eq!(pids.get_task(1).upgrade().unwrap().get_tid(), 1);
1669            assert_eq!(pids.get_task(another_tid).upgrade().unwrap().get_tid(), another_tid);
1670        })
1671        .await;
1672    }
1673
1674    #[::fuchsia::test]
1675    async fn test_clone_pid_and_parent_pid() {
1676        spawn_kernel_and_run(async |locked, current_task| {
1677            let thread = current_task.clone_task_for_test(
1678                locked,
1679                (CLONE_THREAD | CLONE_VM | CLONE_SIGHAND) as u64,
1680                Some(SIGCHLD),
1681            );
1682            assert_eq!(current_task.get_pid(), thread.get_pid());
1683            assert_ne!(current_task.get_tid(), thread.get_tid());
1684            assert_eq!(current_task.thread_group().leader, thread.thread_group().leader);
1685
1686            let child_task = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1687            assert_ne!(current_task.get_pid(), child_task.get_pid());
1688            assert_ne!(current_task.get_tid(), child_task.get_tid());
1689            assert_eq!(current_task.get_pid(), child_task.thread_group().read().get_ppid());
1690        })
1691        .await;
1692    }
1693
1694    #[::fuchsia::test]
1695    async fn test_root_capabilities() {
1696        spawn_kernel_and_run(async |_, current_task| {
1697            assert!(security::is_task_capable_noaudit(current_task, CAP_SYS_ADMIN));
1698            assert_eq!(current_task.real_creds().cap_inheritable, Capabilities::empty());
1699
1700            current_task.set_creds(Credentials::with_ids(1, 1));
1701            assert!(!security::is_task_capable_noaudit(current_task, CAP_SYS_ADMIN));
1702        })
1703        .await;
1704    }
1705
1706    #[::fuchsia::test]
1707    async fn test_clone_rlimit() {
1708        spawn_kernel_and_run(async |locked, current_task| {
1709            let prev_fsize = current_task.thread_group().get_rlimit(locked, Resource::FSIZE);
1710            assert_ne!(prev_fsize, 10);
1711            current_task
1712                .thread_group()
1713                .limits
1714                .lock(locked)
1715                .set(Resource::FSIZE, rlimit { rlim_cur: 10, rlim_max: 100 });
1716            let current_fsize = current_task.thread_group().get_rlimit(locked, Resource::FSIZE);
1717            assert_eq!(current_fsize, 10);
1718
1719            let child_task = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1720            let child_fsize = child_task.thread_group().get_rlimit(locked, Resource::FSIZE);
1721            assert_eq!(child_fsize, 10)
1722        })
1723        .await;
1724    }
1725}