starnix_core/task/
pid_table.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::task::memory_attribution::MemoryAttributionLifecycleEvent;
6use crate::task::{ProcessGroup, Task, ThreadGroup, ZombieProcess};
7use fuchsia_rcu::rcu_option_cell::RcuOptionCell;
8use starnix_logging::track_stub;
9use starnix_rcu::{RcuHashMap, RcuReadScope};
10use starnix_types::ownership::{TempRef, WeakRef};
11use starnix_uapi::{pid_t, tid_t};
12use std::collections::HashMap;
13use std::sync::{Arc, Weak};
14
15// The maximal pid considered.
16const PID_MAX_LIMIT: pid_t = 1 << 15;
17
18#[derive(Default, Debug)]
19enum ProcessEntry {
20    #[default]
21    None,
22    ThreadGroup(Weak<ThreadGroup>),
23    Zombie(WeakRef<ZombieProcess>),
24}
25
26impl ProcessEntry {
27    fn is_none(&self) -> bool {
28        matches!(self, Self::None)
29    }
30
31    fn thread_group(&self) -> Option<&Weak<ThreadGroup>> {
32        match self {
33            Self::ThreadGroup(group) => Some(group),
34            _ => None,
35        }
36    }
37}
38
39/// Entities identified by a pid.
40#[derive(Default, Debug)]
41struct PidEntry {
42    task: Option<WeakRef<Task>>,
43    process: ProcessEntry,
44}
45
46impl PidEntry {
47    fn is_empty(&self) -> bool {
48        self.task.is_none() && self.process.is_none()
49    }
50}
51
52pub enum ProcessEntryRef<'a> {
53    Process(Arc<ThreadGroup>),
54    Zombie(TempRef<'a, ZombieProcess>),
55}
56
57#[derive(Default, Debug)]
58pub struct PidTable {
59    /// The most-recently allocated pid in this table.
60    last_pid: pid_t,
61
62    /// The tasks in this table, organized by pid_t.
63    table: HashMap<pid_t, PidEntry>,
64
65    /// The process groups in this table, organized by pid_t.
66    process_groups: RcuHashMap<pid_t, Arc<ProcessGroup>>,
67
68    /// Used to notify thread group changes.
69    thread_group_notifier: RcuOptionCell<std::sync::mpsc::Sender<MemoryAttributionLifecycleEvent>>,
70}
71
72impl PidTable {
73    fn get_entry(&self, pid: pid_t) -> Option<&PidEntry> {
74        self.table.get(&pid)
75    }
76
77    fn get_entry_mut(&mut self, pid: pid_t) -> &mut PidEntry {
78        self.table.entry(pid).or_insert_with(Default::default)
79    }
80
81    fn remove_item<F>(&mut self, pid: pid_t, do_remove: F)
82    where
83        F: FnOnce(&mut PidEntry),
84    {
85        let entry = self.get_entry_mut(pid);
86        do_remove(entry);
87        if entry.is_empty() {
88            self.table.remove(&pid);
89        }
90    }
91
92    pub fn set_thread_group_notifier(
93        &self,
94        notifier: std::sync::mpsc::Sender<MemoryAttributionLifecycleEvent>,
95    ) {
96        self.thread_group_notifier.update(Some(notifier));
97    }
98
99    pub fn allocate_pid(&mut self) -> pid_t {
100        loop {
101            self.last_pid = {
102                let r = self.last_pid + 1;
103                if r > PID_MAX_LIMIT {
104                    track_stub!(TODO("https://fxbug.dev/322874557"), "pid wraparound");
105                    2
106                } else {
107                    r
108                }
109            };
110            if self.get_entry(self.last_pid).is_none() {
111                break;
112            }
113        }
114        self.last_pid
115    }
116
117    pub fn get_task(&self, tid: tid_t) -> WeakRef<Task> {
118        self.get_entry(tid).and_then(|entry| entry.task.clone()).unwrap_or_else(WeakRef::new)
119    }
120
121    pub fn add_task(&mut self, task: &TempRef<'_, Task>) {
122        let entry = self.get_entry_mut(task.tid);
123        assert!(entry.task.is_none());
124        entry.task = Some(WeakRef::from(task));
125
126        // If we're not cloning a thread, add its thread group
127        if task.is_leader() {
128            assert!(entry.process.is_none());
129            entry.process = ProcessEntry::ThreadGroup(Arc::downgrade(task.thread_group()));
130
131            let scope = RcuReadScope::new();
132            // Notify thread group changes.
133            if let Some(notifier) = self.thread_group_notifier.as_ref(&scope) {
134                task.thread_group.write().notifier = Some(notifier.clone());
135                let _ = notifier.send(MemoryAttributionLifecycleEvent::creation(task.tid));
136            }
137        }
138    }
139
140    pub fn remove_task(&mut self, tid: tid_t) {
141        self.remove_item(tid, |entry| {
142            let removed = entry.task.take();
143            assert!(removed.is_some())
144        });
145    }
146
147    pub fn get_process(&self, pid: pid_t) -> Option<ProcessEntryRef<'_>> {
148        match self.get_entry(pid) {
149            None => None,
150            Some(PidEntry { process: ProcessEntry::None, .. }) => None,
151            Some(PidEntry { process: ProcessEntry::ThreadGroup(thread_group), .. }) => {
152                let thread_group = thread_group
153                    .upgrade()
154                    .expect("ThreadGroup was released, but not removed from PidTable");
155                Some(ProcessEntryRef::Process(thread_group))
156            }
157            Some(PidEntry { process: ProcessEntry::Zombie(zombie), .. }) => {
158                let zombie = zombie
159                    .upgrade()
160                    .expect("ZombieProcess was released, but not removed from PidTable");
161                Some(ProcessEntryRef::Zombie(zombie))
162            }
163        }
164    }
165
166    pub fn get_thread_group(&self, pid: pid_t) -> Option<Arc<ThreadGroup>> {
167        match self.get_process(pid) {
168            Some(ProcessEntryRef::Process(tg)) => Some(tg),
169            _ => None,
170        }
171    }
172
173    pub fn get_thread_groups(&self) -> impl Iterator<Item = Arc<ThreadGroup>> + '_ {
174        self.table
175            .iter()
176            .flat_map(|(_pid, entry)| entry.process.thread_group())
177            .flat_map(|g| g.upgrade())
178    }
179
180    /// Replace process with the specified `pid` with the `zombie`.
181    pub fn kill_process(&mut self, pid: pid_t, zombie: WeakRef<ZombieProcess>) {
182        let entry = self.get_entry_mut(pid);
183        assert!(matches!(entry.process, ProcessEntry::ThreadGroup(_)));
184
185        // All tasks from the process are expected to be cleared from the table before the process
186        // becomes a zombie. We can't verify this for all tasks here, check it just for the leader.
187        assert!(entry.task.is_none());
188
189        entry.process = ProcessEntry::Zombie(zombie);
190    }
191
192    pub fn remove_zombie(&mut self, pid: pid_t) {
193        self.remove_item(pid, |entry| {
194            assert!(matches!(entry.process, ProcessEntry::Zombie(_)));
195            entry.process = ProcessEntry::None;
196        });
197
198        let scope = RcuReadScope::new();
199        // Notify thread group changes.
200        if let Some(notifier) = self.thread_group_notifier.as_ref(&scope) {
201            let _ = notifier.send(MemoryAttributionLifecycleEvent::destruction(pid));
202        }
203    }
204
205    pub fn get_process_group(&self, pid: pid_t) -> Option<Arc<ProcessGroup>> {
206        let scope = RcuReadScope::new();
207        self.process_groups.get(&scope, &pid).cloned()
208    }
209
210    pub fn add_process_group(&self, process_group: Arc<ProcessGroup>) {
211        let removed = self.process_groups.insert(process_group.leader, process_group);
212        assert!(removed.is_none());
213    }
214
215    pub fn remove_process_group(&self, pid: pid_t) {
216        let removed = self.process_groups.remove(&pid);
217        assert!(removed.is_some());
218    }
219
220    /// Returns the process ids for all processes, including zombies.
221    pub fn process_ids(&self) -> Vec<pid_t> {
222        self.table
223            .iter()
224            .flat_map(|(pid, entry)| if entry.process.is_none() { None } else { Some(*pid) })
225            .collect()
226    }
227
228    /// Returns the task ids for all the currently running tasks.
229    pub fn task_ids(&self) -> Vec<pid_t> {
230        self.table.iter().flat_map(|(pid, entry)| entry.task.as_ref().and(Some(*pid))).collect()
231    }
232
233    pub fn last_pid(&self) -> pid_t {
234        self.last_pid
235    }
236
237    pub fn len(&self) -> usize {
238        self.table.len()
239    }
240}