1use crate::mm::{MemoryAccessor, MemoryAccessorExt, MemoryManager, TaskMemoryAccessor};
6use crate::mutable_state::{state_accessor, state_implementation};
7use crate::ptrace::{
8 AtomicStopState, PtraceEvent, PtraceEventData, PtraceState, PtraceStatus, StopState,
9};
10use crate::signals::{KernelSignal, RunState, SignalDetail, SignalInfo, SignalState};
11use crate::task::memory_attribution::MemoryAttributionLifecycleEvent;
12use crate::task::tracing::KoidPair;
13use crate::task::{
14 AbstractUnixSocketNamespace, AbstractVsockSocketNamespace, CurrentTask, EventHandler, Kernel,
15 NormalPriority, PidTable, ProcessEntryRef, ProcessExitInfo, RealtimePriority, SchedulerState,
16 SchedulingPolicy, SeccompFilterContainer, SeccompState, SeccompStateValue, ThreadGroup,
17 ThreadGroupKey, ThreadState, UtsNamespaceHandle, WaitCanceler, Waiter, ZombieProcess,
18};
19use crate::vfs::{FdTable, FsContext, FsNodeHandle, FsString};
20use atomic_bitflags::atomic_bitflags;
21use fuchsia_rcu::{RcuArc, RcuOptionArc, RcuReadGuard};
22use macro_rules_attribute::apply;
23use starnix_logging::{log_warn, set_zx_name};
24use starnix_registers::{HeapRegs, RegisterStorageEnum};
25use starnix_sync::{
26 LockBefore, Locked, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard, TaskRelease,
27 TerminalLock,
28};
29use starnix_task_command::TaskCommand;
30use starnix_types::arch::ArchWidth;
31use starnix_types::ownership::{OwnedRef, Releasable, ReleaseGuard, TempRef, WeakRef};
32use starnix_types::stats::TaskTimeStats;
33use starnix_uapi::auth::{Credentials, FsCred};
34use starnix_uapi::errors::Errno;
35use starnix_uapi::signals::{SIGCHLD, SigSet, Signal, sigaltstack_contains_pointer};
36use starnix_uapi::user_address::{
37 ArchSpecific, MappingMultiArchUserRef, UserAddress, UserCString, UserRef,
38};
39use starnix_uapi::{
40 CLD_CONTINUED, CLD_DUMPED, CLD_EXITED, CLD_KILLED, CLD_STOPPED, CLD_TRAPPED,
41 FUTEX_BITSET_MATCH_ANY, errno, error, from_status_like_fdio, pid_t, sigaction_t, sigaltstack,
42 tid_t, uapi,
43};
44use std::collections::VecDeque;
45use std::mem::MaybeUninit;
46use std::ops::Deref;
47use std::sync::atomic::{AtomicBool, Ordering};
48use std::sync::{Arc, Weak};
49use std::{cmp, fmt};
50use zx::{Signals, Task as _};
51
52#[derive(Clone, Debug, Eq, PartialEq)]
53pub enum ExitStatus {
54 Exit(u8),
55 Kill(SignalInfo),
56 CoreDump(SignalInfo),
57 Stop(SignalInfo, PtraceEvent),
61 Continue(SignalInfo, PtraceEvent),
62}
63impl ExitStatus {
64 pub fn wait_status(&self) -> i32 {
66 match self {
67 ExitStatus::Exit(status) => (*status as i32) << 8,
68 ExitStatus::Kill(siginfo) => siginfo.signal.number() as i32,
69 ExitStatus::CoreDump(siginfo) => (siginfo.signal.number() as i32) | 0x80,
70 ExitStatus::Continue(siginfo, trace_event) => {
71 let trace_event_val = *trace_event as u32;
72 if trace_event_val != 0 {
73 (siginfo.signal.number() as i32) | (trace_event_val << 16) as i32
74 } else {
75 0xffff
76 }
77 }
78 ExitStatus::Stop(siginfo, trace_event) => {
79 let trace_event_val = *trace_event as u32;
80 (0x7f + ((siginfo.signal.number() as i32) << 8)) | (trace_event_val << 16) as i32
81 }
82 }
83 }
84
85 pub fn signal_info_code(&self) -> i32 {
86 match self {
87 ExitStatus::Exit(_) => CLD_EXITED as i32,
88 ExitStatus::Kill(_) => CLD_KILLED as i32,
89 ExitStatus::CoreDump(_) => CLD_DUMPED as i32,
90 ExitStatus::Stop(_, _) => CLD_STOPPED as i32,
91 ExitStatus::Continue(_, _) => CLD_CONTINUED as i32,
92 }
93 }
94
95 pub fn signal_info_status(&self) -> i32 {
96 match self {
97 ExitStatus::Exit(status) => *status as i32,
98 ExitStatus::Kill(siginfo)
99 | ExitStatus::CoreDump(siginfo)
100 | ExitStatus::Continue(siginfo, _)
101 | ExitStatus::Stop(siginfo, _) => siginfo.signal.number() as i32,
102 }
103 }
104}
105
106atomic_bitflags! {
107 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
108 pub struct TaskFlags: u8 {
109 const EXITED = 0x1;
110 const SIGNALS_AVAILABLE = 0x2;
111 const TEMPORARY_SIGNAL_MASK = 0x4;
112 const DUMP_ON_EXIT = 0x8;
115 const KERNEL_SIGNALS_AVAILABLE = 0x10;
116 }
117}
118
119pub struct CapturedThreadState {
126 pub thread_state: ThreadState<HeapRegs>,
129
130 pub dirty: bool,
133}
134
135impl ArchSpecific for CapturedThreadState {
136 fn is_arch32(&self) -> bool {
137 self.thread_state.is_arch32()
138 }
139}
140
141#[derive(Debug)]
142pub struct RobustList {
143 pub next: RobustListPtr,
144}
145
146pub type RobustListPtr =
147 MappingMultiArchUserRef<RobustList, uapi::robust_list, uapi::arch32::robust_list>;
148
149impl From<uapi::robust_list> for RobustList {
150 fn from(robust_list: uapi::robust_list) -> Self {
151 Self { next: RobustListPtr::from(robust_list.next) }
152 }
153}
154
155#[cfg(target_arch = "aarch64")]
156impl From<uapi::arch32::robust_list> for RobustList {
157 fn from(robust_list: uapi::arch32::robust_list) -> Self {
158 Self { next: RobustListPtr::from(robust_list.next) }
159 }
160}
161
162#[derive(Debug)]
163pub struct RobustListHead {
164 pub list: RobustList,
165 pub futex_offset: isize,
166}
167
168pub type RobustListHeadPtr =
169 MappingMultiArchUserRef<RobustListHead, uapi::robust_list_head, uapi::arch32::robust_list_head>;
170
171impl From<uapi::robust_list_head> for RobustListHead {
172 fn from(robust_list_head: uapi::robust_list_head) -> Self {
173 Self {
174 list: robust_list_head.list.into(),
175 futex_offset: robust_list_head.futex_offset as isize,
176 }
177 }
178}
179
180#[cfg(target_arch = "aarch64")]
181impl From<uapi::arch32::robust_list_head> for RobustListHead {
182 fn from(robust_list_head: uapi::arch32::robust_list_head) -> Self {
183 Self {
184 list: robust_list_head.list.into(),
185 futex_offset: robust_list_head.futex_offset as isize,
186 }
187 }
188}
189
190pub struct TaskMutableState {
191 pub clear_child_tid: UserRef<tid_t>,
193
194 signals: SignalState,
197
198 kernel_signals: VecDeque<KernelSignal>,
206
207 exit_status: Option<ExitStatus>,
209
210 pub scheduler_state: SchedulerState,
212
213 pub uts_ns: UtsNamespaceHandle,
221
222 no_new_privs: bool,
232
233 pub oom_score_adj: i32,
235
236 pub seccomp_filters: SeccompFilterContainer,
238
239 pub robust_list_head: RobustListHeadPtr,
242
243 pub timerslack_ns: u64,
249
250 pub default_timerslack_ns: u64,
256
257 pub ptrace: Option<Box<PtraceState>>,
260
261 pub captured_thread_state: Option<Box<CapturedThreadState>>,
263}
264
265impl TaskMutableState {
266 pub fn no_new_privs(&self) -> bool {
267 self.no_new_privs
268 }
269
270 pub fn enable_no_new_privs(&mut self) {
273 self.no_new_privs = true;
274 }
275
276 pub fn get_timerslack<T: zx::Timeline>(&self) -> zx::Duration<T> {
277 zx::Duration::from_nanos(self.timerslack_ns as i64)
278 }
279
280 pub fn set_timerslack_ns(&mut self, ns: u64) {
284 if ns == 0 {
285 self.timerslack_ns = self.default_timerslack_ns;
286 } else {
287 self.timerslack_ns = ns;
288 }
289 }
290
291 pub fn is_ptraced(&self) -> bool {
292 self.ptrace.is_some()
293 }
294
295 pub fn is_ptrace_listening(&self) -> bool {
296 self.ptrace.as_ref().is_some_and(|ptrace| ptrace.stop_status == PtraceStatus::Listening)
297 }
298
299 pub fn ptrace_on_signal_consume(&mut self) -> bool {
300 self.ptrace.as_mut().is_some_and(|ptrace: &mut Box<PtraceState>| {
301 if ptrace.stop_status.is_continuing() {
302 ptrace.stop_status = PtraceStatus::Default;
303 false
304 } else {
305 true
306 }
307 })
308 }
309
310 pub fn notify_ptracers(&mut self) {
311 if let Some(ptrace) = &self.ptrace {
312 ptrace.tracer_waiters().notify_all();
313 }
314 }
315
316 pub fn wait_on_ptracer(&self, waiter: &Waiter) {
317 if let Some(ptrace) = &self.ptrace {
318 ptrace.tracee_waiters.wait_async(&waiter);
319 }
320 }
321
322 pub fn notify_ptracees(&mut self) {
323 if let Some(ptrace) = &self.ptrace {
324 ptrace.tracee_waiters.notify_all();
325 }
326 }
327
328 pub fn take_captured_state(&mut self) -> Option<Box<CapturedThreadState>> {
329 if self.captured_thread_state.is_some() {
330 let mut state = None;
331 std::mem::swap(&mut state, &mut self.captured_thread_state);
332 return state;
333 }
334 None
335 }
336
337 pub fn copy_state_from(&mut self, current_task: &CurrentTask) {
338 self.captured_thread_state = Some(Box::new(CapturedThreadState {
339 thread_state: current_task.thread_state.extended_snapshot::<HeapRegs>(),
340 dirty: false,
341 }));
342 }
343
344 pub fn signal_mask(&self) -> SigSet {
346 self.signals.mask()
347 }
348
349 pub fn is_signal_masked(&self, signal: Signal) -> bool {
351 self.signals.mask().has_signal(signal)
352 }
353
354 pub fn is_signal_masked_by_saved_mask(&self, signal: Signal) -> bool {
358 self.signals.saved_mask().is_some_and(|mask| mask.has_signal(signal))
359 }
360
361 pub fn restore_signal_mask(&mut self) {
364 self.signals.restore_mask();
365 }
366
367 pub fn is_blocked(&self) -> bool {
369 self.signals.run_state.is_blocked()
370 }
371
372 pub fn set_run_state(&mut self, run_state: RunState) {
374 self.signals.run_state = run_state;
375 }
376
377 pub fn run_state(&self) -> RunState {
378 self.signals.run_state.clone()
379 }
380
381 pub fn on_signal_stack(&self, stack_pointer_register: u64) -> bool {
382 self.signals
383 .alt_stack
384 .map(|signal_stack| sigaltstack_contains_pointer(&signal_stack, stack_pointer_register))
385 .unwrap_or(false)
386 }
387
388 pub fn set_sigaltstack(&mut self, stack: Option<sigaltstack>) {
389 self.signals.alt_stack = stack;
390 }
391
392 pub fn sigaltstack(&self) -> Option<sigaltstack> {
393 self.signals.alt_stack
394 }
395
396 pub fn wait_on_signal(&mut self, waiter: &Waiter) {
397 self.signals.signal_wait.wait_async(waiter);
398 }
399
400 pub fn signals_mut(&mut self) -> &mut SignalState {
401 &mut self.signals
402 }
403
404 pub fn wait_on_signal_fd_events(
405 &self,
406 waiter: &Waiter,
407 mask: SigSet,
408 handler: EventHandler,
409 ) -> WaitCanceler {
410 self.signals.signal_wait.wait_async_signal_mask(waiter, mask, handler)
411 }
412
413 pub fn notify_signal_waiters(&self, signal: &Signal) {
414 self.signals.signal_wait.notify_signal(signal);
415 }
416
417 pub fn thaw(&mut self) {
419 if let RunState::Frozen(waiter) = self.run_state() {
420 waiter.notify();
421 }
422 }
423
424 pub fn is_frozen(&self) -> bool {
425 matches!(self.run_state(), RunState::Frozen(_))
426 }
427
428 #[cfg(test)]
429 pub fn kernel_signals_for_test(&self) -> &VecDeque<KernelSignal> {
430 &self.kernel_signals
431 }
432}
433
434#[apply(state_implementation!)]
435impl TaskMutableState<Base = Task> {
436 pub fn set_stopped(
437 &mut self,
438 stopped: StopState,
439 siginfo: Option<SignalInfo>,
440 current_task: Option<&CurrentTask>,
441 event: Option<PtraceEventData>,
442 ) {
443 if stopped.ptrace_only() && self.ptrace.is_none() {
444 return;
445 }
446
447 if self.base.load_stopped().is_illegal_transition(stopped) {
448 return;
449 }
450
451 self.store_stopped(stopped);
455 if stopped.is_stopped() {
456 if let Some(ref current_task) = current_task {
457 self.copy_state_from(current_task);
458 }
459 }
460 if let Some(ptrace) = &mut self.ptrace {
461 ptrace.set_last_signal(siginfo);
462 ptrace.set_last_event(event);
463 }
464 if stopped == StopState::Waking || stopped == StopState::ForceWaking {
465 self.notify_ptracees();
466 }
467 if !stopped.is_in_progress() {
468 self.notify_ptracers();
469 }
470 }
471
472 pub fn enqueue_signal(&mut self, signal: SignalInfo) {
474 self.signals.enqueue(signal);
475 self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
476 }
477
478 pub fn enqueue_signal_front(&mut self, signal: SignalInfo) {
485 self.signals.enqueue(signal);
486 self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
487 }
488
489 pub fn set_signal_mask(&mut self, mask: SigSet) {
491 self.signals.set_mask(mask);
492 self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
493 }
494
495 pub fn set_temporary_signal_mask(&mut self, mask: SigSet) {
499 self.signals.set_temporary_mask(mask);
500 self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
501 }
502
503 pub fn pending_signal_count(&self) -> usize {
505 self.signals.num_queued() + self.base.thread_group().num_signals_queued()
506 }
507
508 pub fn has_signal_pending(&self, signal: Signal) -> bool {
510 self.signals.has_queued(signal) || self.base.thread_group().has_signal_queued(signal)
511 }
512
513 pub fn prepare_signal_info(
515 &mut self,
516 stopped: StopState,
517 ) -> Option<(Weak<ThreadGroup>, SignalInfo)> {
518 if !stopped.is_stopped() {
519 return None;
520 }
521
522 if let Some(ptrace) = &self.ptrace {
523 if let Some(last_signal) = ptrace.get_last_signal_ref() {
524 let signal_info = SignalInfo::with_detail(
525 SIGCHLD,
526 CLD_TRAPPED as i32,
527 SignalDetail::SIGCHLD {
528 pid: self.base.tid,
529 uid: self.base.real_creds().uid,
530 status: last_signal.signal.number() as i32,
531 },
532 );
533
534 return Some((ptrace.core_state.thread_group.clone(), signal_info));
535 }
536 }
537
538 None
539 }
540
541 pub fn set_ptrace(&mut self, tracer: Option<Box<PtraceState>>) -> Result<(), Errno> {
542 if tracer.is_some() && self.ptrace.is_some() {
543 return error!(EPERM);
544 }
545
546 if tracer.is_none() {
547 if let Ok(tg_stop_state) = self.base.thread_group().load_stopped().as_in_progress() {
549 self.set_stopped(tg_stop_state, None, None, None);
550 }
551 }
552 self.ptrace = tracer;
553 Ok(())
554 }
555
556 pub fn can_accept_ptrace_commands(&mut self) -> bool {
557 !self.base.load_stopped().is_waking_or_awake()
558 && self.is_ptraced()
559 && !self.is_ptrace_listening()
560 }
561
562 fn store_stopped(&mut self, state: StopState) {
563 self.base.stop_state.store(state, Ordering::Relaxed)
568 }
569
570 pub fn update_flags(&mut self, clear: TaskFlags, set: TaskFlags) {
571 debug_assert_eq!(clear ^ set, clear | set);
576 let observed = self.base.flags();
577 let swapped = self.base.flags.swap((observed | set) & !clear, Ordering::Relaxed);
578 debug_assert_eq!(swapped, observed);
579 }
580
581 pub fn set_flags(&mut self, flag: TaskFlags, v: bool) {
582 let (clear, set) = if v { (TaskFlags::empty(), flag) } else { (flag, TaskFlags::empty()) };
583
584 self.update_flags(clear, set);
585 }
586
587 pub fn set_exit_status(&mut self, status: ExitStatus) {
588 self.set_flags(TaskFlags::EXITED, true);
589 self.exit_status = Some(status);
590 }
591
592 pub fn set_exit_status_if_not_already(&mut self, status: ExitStatus) {
593 self.set_flags(TaskFlags::EXITED, true);
594 self.exit_status.get_or_insert(status);
595 }
596
597 pub fn pending_signals(&self) -> SigSet {
600 self.signals.pending() | self.base.thread_group().get_pending_signals()
601 }
602
603 pub fn task_specific_pending_signals(&self) -> SigSet {
606 self.signals.pending()
607 }
608
609 pub fn is_any_signal_allowed_by_mask(&self, mask: SigSet) -> bool {
611 self.signals.is_any_allowed_by_mask(mask)
612 || self.base.thread_group().is_any_signal_allowed_by_mask(mask)
613 }
614
615 pub fn is_any_signal_pending(&self) -> bool {
618 let mask = self.signal_mask();
619 self.signals.is_any_pending()
620 || self.base.thread_group().is_any_signal_allowed_by_mask(mask)
621 }
622
623 fn take_next_signal_where<F>(&mut self, predicate: F) -> Option<SignalInfo>
625 where
626 F: Fn(&SignalInfo) -> bool,
627 {
628 if let Some(signal) = self.base.thread_group().take_next_signal_where(&predicate) {
629 Some(signal)
630 } else {
631 let s = self.signals.take_next_where(&predicate);
632 self.set_flags(TaskFlags::SIGNALS_AVAILABLE, self.signals.is_any_pending());
633 s
634 }
635 }
636
637 pub fn take_specific_signal(&mut self, siginfo: SignalInfo) -> Option<SignalInfo> {
641 let signal_mask = self.signal_mask();
642 if signal_mask.has_signal(siginfo.signal) {
643 return None;
644 }
645
646 let predicate = |s: &SignalInfo| s.signal == siginfo.signal;
647 self.take_next_signal_where(predicate)
648 }
649
650 pub fn take_any_signal(&mut self) -> Option<SignalInfo> {
654 self.take_signal_with_mask(self.signal_mask())
655 }
656
657 pub fn take_signal_with_mask(&mut self, signal_mask: SigSet) -> Option<SignalInfo> {
661 let predicate = |s: &SignalInfo| !signal_mask.has_signal(s.signal) || s.force;
662 self.take_next_signal_where(predicate)
663 }
664
665 pub fn enqueue_kernel_signal(&mut self, signal: KernelSignal) {
667 self.kernel_signals.push_back(signal);
668 self.set_flags(TaskFlags::KERNEL_SIGNALS_AVAILABLE, true);
669 }
670
671 pub fn take_kernel_signal(&mut self) -> Option<KernelSignal> {
675 let signal = self.kernel_signals.pop_front();
676 if self.kernel_signals.is_empty() {
677 self.set_flags(TaskFlags::KERNEL_SIGNALS_AVAILABLE, false);
678 }
679 signal
680 }
681
682 #[cfg(test)]
683 pub fn queued_signal_count(&self, signal: Signal) -> usize {
684 self.signals.queued_count(signal)
685 + self.base.thread_group().pending_signals.lock().queued_count(signal)
686 }
687}
688
689pub struct TaskLiveState {
694 pub thread: RwLock<Option<Arc<zx::Thread>>>,
699
700 pub files: FdTable,
704
705 pub mm: RcuOptionArc<MemoryManager>,
707
708 pub fs: RcuArc<FsContext>,
710
711 pub abstract_socket_namespace: Arc<AbstractUnixSocketNamespace>,
713
714 pub abstract_vsock_namespace: Arc<AbstractVsockSocketNamespace>,
716}
717
718impl TaskLiveState {
719 pub fn mm(&self) -> Result<Arc<MemoryManager>, Errno> {
720 self.mm.to_option_arc().ok_or_else(|| errno!(EINVAL))
721 }
722
723 pub fn fs(&self) -> Arc<FsContext> {
724 self.fs.to_arc()
725 }
726}
727
728#[derive(Debug, Clone, Copy, PartialEq, Eq)]
729pub enum TaskStateCode {
730 Running,
732
733 Sleeping,
735
736 TracingStop,
738
739 Zombie,
741}
742
743impl TaskStateCode {
744 pub fn code_char(&self) -> char {
745 match self {
746 TaskStateCode::Running => 'R',
747 TaskStateCode::Sleeping => 'S',
748 TaskStateCode::TracingStop => 't',
749 TaskStateCode::Zombie => 'Z',
750 }
751 }
752
753 pub fn name(&self) -> &'static str {
754 match self {
755 TaskStateCode::Running => "running",
756 TaskStateCode::Sleeping => "sleeping",
757 TaskStateCode::TracingStop => "tracing stop",
758 TaskStateCode::Zombie => "zombie",
759 }
760 }
761}
762
763#[derive(Debug)]
768pub struct TaskPersistentInfoState {
769 tid: tid_t,
771 thread_group_key: ThreadGroupKey,
772
773 command: Mutex<TaskCommand>,
775
776 creds: RcuArc<Credentials>,
779
780 creds_lock: RwLock<()>,
783}
784
785pub struct CredentialsReadGuard<'a> {
787 _lock: RwLockReadGuard<'a, ()>,
788 creds: RcuReadGuard<Credentials>,
789}
790
791impl<'a> Deref for CredentialsReadGuard<'a> {
792 type Target = Credentials;
793
794 fn deref(&self) -> &Self::Target {
795 self.creds.deref()
796 }
797}
798
799pub struct CredentialsWriteGuard<'a> {
802 _lock: RwLockWriteGuard<'a, ()>,
803 creds: &'a RcuArc<Credentials>,
804}
805
806impl<'a> CredentialsWriteGuard<'a> {
807 pub fn update(&mut self, creds: Arc<Credentials>) {
808 self.creds.update(creds);
809 }
810}
811
812impl TaskPersistentInfoState {
813 fn new(
814 tid: tid_t,
815 thread_group_key: ThreadGroupKey,
816 command: TaskCommand,
817 creds: Arc<Credentials>,
818 ) -> TaskPersistentInfo {
819 Arc::new(Self {
820 tid,
821 thread_group_key,
822 command: Mutex::new(command),
823 creds: RcuArc::new(creds),
824 creds_lock: RwLock::new(()),
825 })
826 }
827
828 pub fn tid(&self) -> tid_t {
829 self.tid
830 }
831
832 pub fn pid(&self) -> pid_t {
833 self.thread_group_key.pid()
834 }
835
836 pub fn command_guard(&self) -> MutexGuard<'_, TaskCommand> {
837 self.command.lock()
838 }
839
840 pub fn real_creds(&self) -> RcuReadGuard<Credentials> {
842 self.creds.read()
843 }
844
845 pub fn clone_creds(&self) -> Arc<Credentials> {
848 self.creds.to_arc()
849 }
850
851 pub fn lock_creds(&self) -> CredentialsReadGuard<'_> {
854 let lock = self.creds_lock.read();
855 CredentialsReadGuard { _lock: lock, creds: self.creds.read() }
856 }
857
858 pub(in crate::task) unsafe fn write_creds(&self) -> CredentialsWriteGuard<'_> {
862 let lock = self.creds_lock.write();
863 CredentialsWriteGuard { _lock: lock, creds: &self.creds }
864 }
865}
866
867pub type TaskPersistentInfo = Arc<TaskPersistentInfoState>;
868
869pub struct Task {
893 pub weak_self: WeakRef<Self>,
896
897 pub tid: tid_t,
903
904 pub thread_group_key: ThreadGroupKey,
906
907 pub kernel: Arc<Kernel>,
909
910 pub thread_group: Arc<ThreadGroup>,
915
916 pub live_state: RcuOptionArc<TaskLiveState>,
920
921 stop_state: AtomicStopState,
925
926 flags: AtomicTaskFlags,
930
931 mutable_state: RwLock<TaskMutableState>,
933
934 pub persistent_info: TaskPersistentInfo,
939
940 vfork_event: Option<Arc<zx::Event>>,
944
945 pub seccomp_filter_state: SeccompState,
948
949 pub trace_syscalls: AtomicBool,
951
952 pub proc_pid_directory_cache: Mutex<Option<FsNodeHandle>>,
955}
956
957#[derive(Debug)]
959pub struct PageFaultExceptionReport {
960 pub faulting_address: u64,
961 pub not_present: bool, pub is_write: bool, pub is_execute: bool, }
965
966impl Task {
967 pub fn kernel(&self) -> &Arc<Kernel> {
968 &self.kernel
969 }
970
971 pub fn thread_group(&self) -> &Arc<ThreadGroup> {
972 &self.thread_group
973 }
974
975 pub fn has_same_address_space(&self, other: Option<&Arc<MemoryManager>>) -> bool {
976 match (self.mm(), other) {
977 (Ok(this), Some(other)) => Arc::ptr_eq(&this, other),
978 (Err(_), None) => true,
979 _ => false,
980 }
981 }
982
983 pub fn flags(&self) -> TaskFlags {
984 self.flags.load(Ordering::Relaxed)
985 }
986
987 pub fn set_ptrace_zombie(&self, pids: &mut crate::task::PidTable) {
990 let pgid = self.thread_group().read().process_group.leader;
991 let exit_signal = self.thread_group().read().exit_signal.clone();
992 let mut state = self.write();
993 state.set_stopped(StopState::ForceAwake, None, None, None);
994 if let Some(ptrace) = &mut state.ptrace {
995 ptrace.last_signal_waitable = true;
997 let tracer_pid = ptrace.get_pid();
998 let tracer_tg = pids.get_thread_group(tracer_pid);
999 if let Some(tracer_tg) = tracer_tg {
1000 drop(state);
1001 let mut tracer_state = tracer_tg.write();
1002
1003 let exit_status = self.exit_status().unwrap_or_else(|| {
1004 starnix_logging::log_error!("Exiting without an exit code.");
1005 ExitStatus::Exit(u8::MAX)
1006 });
1007 let uid = self.real_creds().uid;
1008 let exit_info = ProcessExitInfo { status: exit_status, exit_signal };
1009 let zombie = ZombieProcess {
1010 thread_group_key: self.thread_group_key.clone(),
1011 pgid,
1012 uid,
1013 exit_info: exit_info,
1014 time_stats: TaskTimeStats::default(),
1016 is_canonical: false,
1017 };
1018
1019 tracer_state.zombie_ptracees.add(pids, self.tid, zombie);
1020 };
1021 }
1022 }
1023
1024 pub fn ptrace_disconnect(&mut self, pids: &PidTable) {
1026 let mut state = self.write();
1027 let ptracer_pid = state.ptrace.as_ref().map(|ptrace| ptrace.get_pid());
1028 if let Some(ptracer_pid) = ptracer_pid {
1029 let _ = state.set_ptrace(None);
1030 if let Some(ProcessEntryRef::Process(tg)) = pids.get_process(ptracer_pid) {
1031 let tid = self.get_tid();
1032 drop(state);
1033 tg.ptracees.lock().remove(&tid);
1034 }
1035 }
1036 }
1037
1038 pub fn exit_status(&self) -> Option<ExitStatus> {
1039 self.is_exitted().then(|| self.read().exit_status.clone()).flatten()
1040 }
1041
1042 pub fn is_exitted(&self) -> bool {
1043 self.flags().contains(TaskFlags::EXITED)
1044 }
1045
1046 pub fn load_stopped(&self) -> StopState {
1047 self.stop_state.load(Ordering::Relaxed)
1048 }
1049
1050 pub fn from_weak(weak: &WeakRef<Task>) -> Result<TempRef<'_, Task>, Errno> {
1052 weak.upgrade().ok_or_else(|| errno!(ESRCH))
1053 }
1054
1055 #[allow(clippy::let_and_return)]
1062 pub fn new(
1063 tid: tid_t,
1064 command: TaskCommand,
1065 thread_group: Arc<ThreadGroup>,
1066 thread: Option<zx::Thread>,
1067 files: FdTable,
1068 mm: Option<Arc<MemoryManager>>,
1069 fs: Arc<FsContext>,
1072 creds: Arc<Credentials>,
1073 abstract_socket_namespace: Arc<AbstractUnixSocketNamespace>,
1074 abstract_vsock_namespace: Arc<AbstractVsockSocketNamespace>,
1075 signal_mask: SigSet,
1076 kernel_signals: VecDeque<KernelSignal>,
1077 vfork_event: Option<Arc<zx::Event>>,
1078 scheduler_state: SchedulerState,
1079 uts_ns: UtsNamespaceHandle,
1080 no_new_privs: bool,
1081 seccomp_filter_state: SeccompState,
1082 seccomp_filters: SeccompFilterContainer,
1083 robust_list_head: RobustListHeadPtr,
1084 timerslack_ns: u64,
1085 ) -> OwnedRef<Self> {
1086 let thread_group_key = ThreadGroupKey::from(&thread_group);
1087 OwnedRef::new_cyclic(|weak_self| {
1088 let task_live = Arc::new(TaskLiveState {
1089 thread: RwLock::new(thread.map(Arc::new)),
1090 files,
1091 mm: RcuOptionArc::new(mm),
1092 fs: RcuArc::new(fs),
1093 abstract_socket_namespace,
1094 abstract_vsock_namespace,
1095 });
1096 let task = Task {
1097 weak_self,
1098 tid,
1099 thread_group_key: thread_group_key.clone(),
1100 kernel: Arc::clone(&thread_group.kernel),
1101 thread_group,
1102 live_state: RcuOptionArc::new(Some(task_live)),
1103 vfork_event,
1104 stop_state: AtomicStopState::new(StopState::Awake),
1105 flags: AtomicTaskFlags::new(TaskFlags::empty()),
1106 mutable_state: RwLock::new(TaskMutableState {
1107 clear_child_tid: UserRef::default(),
1108 signals: SignalState::with_mask(signal_mask),
1109 kernel_signals,
1110 exit_status: None,
1111 scheduler_state,
1112 uts_ns,
1113 no_new_privs,
1114 oom_score_adj: Default::default(),
1115 seccomp_filters,
1116 robust_list_head,
1117 timerslack_ns,
1118 default_timerslack_ns: timerslack_ns,
1120 ptrace: None,
1121 captured_thread_state: None,
1122 }),
1123 persistent_info: TaskPersistentInfoState::new(
1124 tid,
1125 thread_group_key,
1126 command,
1127 creds,
1128 ),
1129 seccomp_filter_state,
1130 trace_syscalls: AtomicBool::new(false),
1131 proc_pid_directory_cache: Mutex::new(None),
1132 };
1133
1134 #[cfg(any(test, debug_assertions))]
1135 {
1136 let _l1 = task.read();
1138 let _l2 = task.persistent_info.lock_creds();
1139 let _l3 = task.persistent_info.command_guard();
1140 }
1141 task
1142 })
1143 }
1144
1145 state_accessor!(Task, mutable_state);
1146
1147 pub fn real_creds(&self) -> RcuReadGuard<Credentials> {
1152 self.persistent_info.real_creds()
1153 }
1154
1155 pub fn clone_creds(&self) -> Arc<Credentials> {
1160 self.persistent_info.clone_creds()
1161 }
1162
1163 pub fn ptracer_task(&self) -> WeakRef<Task> {
1164 let ptracer = {
1165 let state = self.read();
1166 state.ptrace.as_ref().map(|p| p.core_state.pid)
1167 };
1168
1169 let Some(ptracer) = ptracer else {
1170 return WeakRef::default();
1171 };
1172
1173 self.get_task(ptracer)
1174 }
1175
1176 #[track_caller]
1183 pub fn live(&self) -> Result<Arc<TaskLiveState>, Errno> {
1184 self.live_state.to_option_arc().ok_or_else(|| errno!(ESRCH))
1185 }
1186
1187 #[track_caller]
1196 pub fn mm(&self) -> Result<Arc<MemoryManager>, Errno> {
1197 self.live()?.mm.to_option_arc().ok_or_else(|| errno!(EINVAL))
1198 }
1199
1200 pub(crate) fn set_scheduler_policy_priority_and_reset_on_fork(
1203 &self,
1204 policy: SchedulingPolicy,
1205 priority: RealtimePriority,
1206 reset_on_fork: bool,
1207 ) -> Result<(), Errno> {
1208 self.update_scheduler_state_then_role(|scheduler_state| {
1209 scheduler_state.policy = policy;
1210 scheduler_state.realtime_priority = priority;
1211 scheduler_state.reset_on_fork = reset_on_fork;
1212 })
1213 }
1214
1215 pub(crate) fn set_scheduler_priority(&self, priority: RealtimePriority) -> Result<(), Errno> {
1217 self.update_scheduler_state_then_role(|scheduler_state| {
1218 scheduler_state.realtime_priority = priority
1219 })
1220 }
1221
1222 pub(crate) fn set_scheduler_nice(&self, nice: NormalPriority) -> Result<(), Errno> {
1224 self.update_scheduler_state_then_role(|scheduler_state| {
1225 scheduler_state.normal_priority = nice
1226 })
1227 }
1228
1229 pub fn set_scheduler_state(&self, scheduler_state: SchedulerState) -> Result<(), Errno> {
1231 self.update_scheduler_state_then_role(|task_scheduler_state| {
1232 *task_scheduler_state = scheduler_state
1233 })
1234 }
1235
1236 pub fn sync_scheduler_state_to_role(&self) -> Result<(), Errno> {
1241 self.update_scheduler_state_then_role(|_| {})
1242 }
1243
1244 fn update_scheduler_state_then_role(
1245 &self,
1246 updater: impl FnOnce(&mut SchedulerState),
1247 ) -> Result<(), Errno> {
1248 let new_scheduler_state = {
1249 let mut state = self.write();
1251 updater(&mut state.scheduler_state);
1252 state.scheduler_state
1253 };
1254 self.thread_group().kernel.scheduler.set_thread_role(self, new_scheduler_state)?;
1255 Ok(())
1256 }
1257
1258 pub fn signal_vfork(&self) {
1260 if let Some(event) = &self.vfork_event {
1261 if let Err(status) = event.signal(Signals::NONE, Signals::USER_0) {
1262 log_warn!("Failed to set vfork signal {status}");
1263 }
1264 };
1265 }
1266
1267 pub fn wait_for_execve(&self, task_to_wait: WeakRef<Task>) -> Result<(), Errno> {
1270 let event = task_to_wait.upgrade().and_then(|t| t.vfork_event.clone());
1271 if let Some(event) = event {
1272 event
1273 .wait_one(zx::Signals::USER_0, zx::MonotonicInstant::INFINITE)
1274 .map_err(|status| from_status_like_fdio!(status))?;
1275 }
1276 Ok(())
1277 }
1278
1279 pub fn clear_child_tid_if_needed<L>(&self, locked: &mut Locked<L>) -> Result<(), Errno>
1287 where
1288 L: LockBefore<TerminalLock>,
1289 {
1290 let mut state = self.write();
1291 let user_tid = state.clear_child_tid;
1292 if !user_tid.is_null() {
1293 let zero: tid_t = 0;
1294 self.write_object(user_tid, &zero)?;
1295 self.kernel().shared_futexes.wake(
1296 locked,
1297 self,
1298 user_tid.addr(),
1299 usize::MAX,
1300 FUTEX_BITSET_MATCH_ANY,
1301 )?;
1302 state.clear_child_tid = UserRef::default();
1303 }
1304 Ok(())
1305 }
1306
1307 pub fn get_task(&self, tid: tid_t) -> WeakRef<Task> {
1308 self.kernel().pids.read().get_task(tid)
1309 }
1310
1311 pub fn get_pid(&self) -> pid_t {
1312 self.thread_group_key.pid()
1313 }
1314
1315 pub fn get_tid(&self) -> tid_t {
1316 self.tid
1317 }
1318
1319 pub fn is_leader(&self) -> bool {
1320 self.get_pid() == self.get_tid()
1321 }
1322
1323 pub fn read_argv(&self, max_len: usize) -> Result<Vec<FsString>, Errno> {
1324 let Ok(mm) = self.mm() else {
1326 return Ok(vec![]);
1327 };
1328 let (argv_start, argv_end) = {
1329 let mm_state = mm.state.read();
1330 (mm_state.argv_start, mm_state.argv_end)
1331 };
1332
1333 let len_to_read = std::cmp::min(argv_end - argv_start, max_len);
1334 self.read_nul_delimited_c_string_list(argv_start, len_to_read)
1335 }
1336
1337 pub fn read_argv0(&self) -> Result<FsString, Errno> {
1338 let Ok(mm) = self.mm() else {
1340 return Ok(FsString::default());
1341 };
1342 let argv_start = {
1343 let mm_state = mm.state.read();
1344 mm_state.argv_start
1345 };
1346 let argv_start = UserCString::new(&ArchWidth::Arch64, argv_start);
1348 self.read_path(argv_start)
1349 }
1350
1351 pub fn read_env(&self, max_len: usize) -> Result<Vec<FsString>, Errno> {
1352 let Ok(mm) = self.mm() else { return Ok(vec![]) };
1354 let (env_start, env_end) = {
1355 let mm_state = mm.state.read();
1356 (mm_state.environ_start, mm_state.environ_end)
1357 };
1358
1359 let len_to_read = std::cmp::min(env_end - env_start, max_len);
1360 self.read_nul_delimited_c_string_list(env_start, len_to_read)
1361 }
1362
1363 pub fn thread_runtime_info(&self) -> Result<zx::TaskRuntimeInfo, Errno> {
1364 self.live()?
1365 .thread
1366 .read()
1367 .as_ref()
1368 .ok_or_else(|| errno!(EINVAL))?
1369 .get_runtime_info()
1370 .map_err(|status| from_status_like_fdio!(status))
1371 }
1372
1373 pub fn real_fscred(&self) -> FsCred {
1374 self.real_creds().as_fscred()
1375 }
1376
1377 pub fn interrupt(&self) {
1382 let Ok(live) = self.live() else {
1383 log_warn!("Cannot interrupt dead task {}", self.get_tid());
1384 return;
1385 };
1386
1387 self.read().signals.run_state.wake();
1388 if let Some(thread) = live.thread.read().as_ref() {
1389 #[allow(
1390 clippy::undocumented_unsafe_blocks,
1391 reason = "Force documented unsafe blocks in Starnix"
1392 )]
1393 let status = unsafe { zx::sys::zx_restricted_kick(thread.raw_handle(), 0) };
1394 if status != zx::sys::ZX_OK {
1395 assert_eq!(status, zx::sys::ZX_ERR_BAD_STATE);
1399 }
1400 }
1401 }
1402
1403 pub fn command(&self) -> TaskCommand {
1404 self.persistent_info.command.lock().clone()
1405 }
1406
1407 pub fn set_command_name(&self, mut new_name: TaskCommand) {
1408 let Ok(live) = self.live() else {
1409 log_warn!("Cannot set command name for dead task {}", self.get_tid());
1410 return;
1411 };
1412
1413 if let Ok(argv0) = self.read_argv0() {
1417 let argv0 = TaskCommand::from_path_bytes(&argv0);
1418 if let Some(embedded_name) = argv0.try_embed(&new_name) {
1419 new_name = embedded_name;
1420 }
1421 }
1422
1423 let mut command_guard = self.persistent_info.command_guard();
1427
1428 if let Some(thread) = live.thread.read().as_ref() {
1430 set_zx_name(&**thread, new_name.as_bytes());
1431 }
1432
1433 if self.is_leader() {
1435 set_zx_name(&self.thread_group().process, new_name.as_bytes());
1436 let _ = zx::Thread::raise_user_exception(
1437 zx::RaiseExceptionOptions::TARGET_JOB_DEBUGGER,
1438 zx::sys::ZX_EXCP_USER_CODE_PROCESS_NAME_CHANGED,
1439 0,
1440 );
1441 }
1442
1443 *command_guard = new_name;
1446 drop(command_guard);
1447
1448 if self.is_leader() {
1449 if let Some(notifier) = &self.thread_group().read().notifier {
1450 let _ = notifier.send(MemoryAttributionLifecycleEvent::name_change(self.tid));
1451 }
1452 }
1453 }
1454
1455 pub fn set_seccomp_state(&self, state: SeccompStateValue) -> Result<(), Errno> {
1456 self.seccomp_filter_state.set(&state)
1457 }
1458
1459 pub fn state_code(&self) -> TaskStateCode {
1460 let status = self.read();
1461 if status.exit_status.is_some() {
1462 TaskStateCode::Zombie
1463 } else if status.signals.run_state.is_blocked() {
1464 let stop_state = self.load_stopped();
1465 if stop_state.ptrace_only() && stop_state.is_stopped() {
1466 TaskStateCode::TracingStop
1467 } else {
1468 TaskStateCode::Sleeping
1469 }
1470 } else {
1471 TaskStateCode::Running
1472 }
1473 }
1474
1475 pub fn time_stats(&self) -> TaskTimeStats {
1476 use zx::Task;
1477 let live = match self.live() {
1479 Ok(live) => live,
1480 Err(_) => return TaskTimeStats::default(),
1481 };
1482 let info = match &*live.thread.read() {
1483 Some(thread) => thread.get_runtime_info().expect("Failed to get thread stats"),
1484 None => return TaskTimeStats::default(),
1485 };
1486
1487 TaskTimeStats {
1488 user_time: zx::MonotonicDuration::from_nanos(info.cpu_time),
1489 system_time: zx::MonotonicDuration::default(),
1491 }
1492 }
1493
1494 pub fn get_signal_action(&self, signal: Signal) -> sigaction_t {
1495 self.thread_group().signal_actions.get(signal)
1496 }
1497
1498 pub fn should_check_for_pending_signals(&self) -> bool {
1499 self.flags().intersects(
1500 TaskFlags::KERNEL_SIGNALS_AVAILABLE
1501 | TaskFlags::SIGNALS_AVAILABLE
1502 | TaskFlags::TEMPORARY_SIGNAL_MASK,
1503 ) || self.thread_group.has_pending_signals.load(Ordering::Relaxed)
1504 }
1505
1506 pub fn record_pid_koid_mapping(&self) {
1507 let Ok(live) = self.live() else {
1508 log_warn!("Cannot record pid/koid mapping for dead task {}", self.get_tid());
1509 return;
1510 };
1511
1512 let Some(ref mapping_table) = *self.kernel().pid_to_koid_mapping.read() else { return };
1513
1514 let pkoid = self.thread_group().get_process_koid().ok();
1515 let tkoid = live.thread.read().as_ref().and_then(|t| t.koid().ok());
1516 mapping_table.write().insert(self.tid, KoidPair { process: pkoid, thread: tkoid });
1517 }
1518}
1519
1520impl Releasable for Task {
1521 type Context<'a> = (
1522 ThreadState<RegisterStorageEnum>,
1523 &'a mut Locked<TaskRelease>,
1524 RwLockWriteGuard<'a, PidTable>,
1525 );
1526
1527 fn release<'a>(mut self, context: Self::Context<'a>) {
1528 let (thread_state, locked, pids) = context;
1529
1530 *self.proc_pid_directory_cache.get_mut() = None;
1531 self.ptrace_disconnect(&pids);
1532
1533 std::mem::drop(pids);
1534
1535 self.signal_vfork();
1536
1537 if let Ok(live) = self.live() {
1539 live.files.release();
1540 live.mm.update(None);
1541 }
1542 self.live_state.update(None);
1543
1544 let current_task = CurrentTask::new(OwnedRef::new(self), thread_state.into());
1546
1547 current_task.trigger_delayed_releaser(locked);
1549
1550 let CurrentTask { mut task, .. } = current_task;
1553 let task = OwnedRef::take(&mut task).expect("task should not have been re-owned");
1554 let _task: Self = ReleaseGuard::take(task);
1555 }
1556}
1557
1558impl MemoryAccessor for Task {
1559 fn read_memory<'a>(
1560 &self,
1561 addr: UserAddress,
1562 bytes: &'a mut [MaybeUninit<u8>],
1563 ) -> Result<&'a mut [u8], Errno> {
1564 self.mm()?.syscall_read_memory(addr, bytes)
1569 }
1570
1571 fn read_memory_partial_until_null_byte<'a>(
1572 &self,
1573 addr: UserAddress,
1574 bytes: &'a mut [MaybeUninit<u8>],
1575 ) -> Result<&'a mut [u8], Errno> {
1576 self.mm()?.syscall_read_memory_partial_until_null_byte(addr, bytes)
1581 }
1582
1583 fn read_memory_partial<'a>(
1584 &self,
1585 addr: UserAddress,
1586 bytes: &'a mut [MaybeUninit<u8>],
1587 ) -> Result<&'a mut [u8], Errno> {
1588 self.mm()?.syscall_read_memory_partial(addr, bytes)
1593 }
1594
1595 fn write_memory(&self, addr: UserAddress, bytes: &[u8]) -> Result<usize, Errno> {
1596 self.mm()?.syscall_write_memory(addr, bytes)
1601 }
1602
1603 fn write_memory_partial(&self, addr: UserAddress, bytes: &[u8]) -> Result<usize, Errno> {
1604 self.mm()?.syscall_write_memory_partial(addr, bytes)
1609 }
1610
1611 fn zero(&self, addr: UserAddress, length: usize) -> Result<usize, Errno> {
1612 self.mm()?.syscall_zero(addr, length)
1617 }
1618}
1619
1620impl TaskMemoryAccessor for Task {
1621 fn maximum_valid_address(&self) -> Option<UserAddress> {
1622 self.mm().map(|mm| mm.maximum_valid_user_address).ok()
1623 }
1624}
1625
1626impl fmt::Debug for Task {
1627 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1628 write!(
1629 f,
1630 "{}:{}[{}]",
1631 self.thread_group().leader,
1632 self.tid,
1633 self.persistent_info.command.lock()
1634 )
1635 }
1636}
1637
1638impl cmp::PartialEq for Task {
1639 fn eq(&self, other: &Self) -> bool {
1640 let ptr: *const Task = self;
1641 let other_ptr: *const Task = other;
1642 ptr == other_ptr
1643 }
1644}
1645
1646impl cmp::Eq for Task {}
1647
1648#[cfg(test)]
1649mod test {
1650 use super::*;
1651 use crate::security;
1652 use crate::testing::*;
1653 use starnix_uapi::auth::{CAP_SYS_ADMIN, Capabilities};
1654 use starnix_uapi::resource_limits::Resource;
1655 use starnix_uapi::signals::SIGCHLD;
1656 use starnix_uapi::{CLONE_SIGHAND, CLONE_THREAD, CLONE_VM, rlimit};
1657
1658 #[::fuchsia::test]
1659 async fn test_tid_allocation() {
1660 spawn_kernel_and_run(async |locked, current_task| {
1661 let kernel = current_task.kernel();
1662 assert_eq!(current_task.get_tid(), 1);
1663 let another_current = create_task(locked, &kernel, "another-task");
1664 let another_tid = another_current.get_tid();
1665 assert!(another_tid >= 2);
1666
1667 let pids = kernel.pids.read();
1668 assert_eq!(pids.get_task(1).upgrade().unwrap().get_tid(), 1);
1669 assert_eq!(pids.get_task(another_tid).upgrade().unwrap().get_tid(), another_tid);
1670 })
1671 .await;
1672 }
1673
1674 #[::fuchsia::test]
1675 async fn test_clone_pid_and_parent_pid() {
1676 spawn_kernel_and_run(async |locked, current_task| {
1677 let thread = current_task.clone_task_for_test(
1678 locked,
1679 (CLONE_THREAD | CLONE_VM | CLONE_SIGHAND) as u64,
1680 Some(SIGCHLD),
1681 );
1682 assert_eq!(current_task.get_pid(), thread.get_pid());
1683 assert_ne!(current_task.get_tid(), thread.get_tid());
1684 assert_eq!(current_task.thread_group().leader, thread.thread_group().leader);
1685
1686 let child_task = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1687 assert_ne!(current_task.get_pid(), child_task.get_pid());
1688 assert_ne!(current_task.get_tid(), child_task.get_tid());
1689 assert_eq!(current_task.get_pid(), child_task.thread_group().read().get_ppid());
1690 })
1691 .await;
1692 }
1693
1694 #[::fuchsia::test]
1695 async fn test_root_capabilities() {
1696 spawn_kernel_and_run(async |_, current_task| {
1697 assert!(security::is_task_capable_noaudit(current_task, CAP_SYS_ADMIN));
1698 assert_eq!(current_task.real_creds().cap_inheritable, Capabilities::empty());
1699
1700 current_task.set_creds(Credentials::with_ids(1, 1));
1701 assert!(!security::is_task_capable_noaudit(current_task, CAP_SYS_ADMIN));
1702 })
1703 .await;
1704 }
1705
1706 #[::fuchsia::test]
1707 async fn test_clone_rlimit() {
1708 spawn_kernel_and_run(async |locked, current_task| {
1709 let prev_fsize = current_task.thread_group().get_rlimit(locked, Resource::FSIZE);
1710 assert_ne!(prev_fsize, 10);
1711 current_task
1712 .thread_group()
1713 .limits
1714 .lock(locked)
1715 .set(Resource::FSIZE, rlimit { rlim_cur: 10, rlim_max: 100 });
1716 let current_fsize = current_task.thread_group().get_rlimit(locked, Resource::FSIZE);
1717 assert_eq!(current_fsize, 10);
1718
1719 let child_task = current_task.clone_task_for_test(locked, 0, Some(SIGCHLD));
1720 let child_fsize = child_task.thread_group().get_rlimit(locked, Resource::FSIZE);
1721 assert_eq!(child_fsize, 10)
1722 })
1723 .await;
1724 }
1725}