1use itertools::Itertools;
6use regex_lite::Regex;
7use starnix_core::mm::{
8 MemoryAccessor, MemoryAccessorExt, MemoryManager, PAGE_SIZE, ProcMapsFile, ProcSmapsFile,
9};
10use starnix_core::security;
11use starnix_core::task::{
12 CurrentTask, Task, TaskPersistentInfo, TaskStateCode, ThreadGroup, ThreadGroupKey,
13 path_from_root,
14};
15use starnix_core::vfs::buffers::{InputBuffer, OutputBuffer};
16use starnix_core::vfs::pseudo::dynamic_file::{DynamicFile, DynamicFileBuf, DynamicFileSource};
17use starnix_core::vfs::pseudo::simple_directory::SimpleDirectory;
18use starnix_core::vfs::pseudo::simple_file::{
19 BytesFile, BytesFileOps, SimpleFileNode, parse_i32_file, parse_unsigned_file,
20 serialize_for_file,
21};
22use starnix_core::vfs::pseudo::stub_empty_file::StubEmptyFile;
23use starnix_core::vfs::pseudo::vec_directory::{VecDirectory, VecDirectoryEntry};
24use starnix_core::vfs::{
25 CallbackSymlinkNode, CloseFreeSafe, DirectoryEntryType, DirentSink, FdNumber, FileObject,
26 FileOps, FileSystemHandle, FsNode, FsNodeHandle, FsNodeInfo, FsNodeOps, FsStr, FsString,
27 ProcMountinfoFile, ProcMountsFile, SeekTarget, SymlinkTarget, default_seek, emit_dotdot,
28 fileops_impl_directory, fileops_impl_noop_sync, fileops_impl_seekable,
29 fileops_impl_unbounded_seek, fs_node_impl_dir_readonly,
30};
31use starnix_logging::{bug_ref, track_stub};
32use starnix_sync::{FileOpsCore, Locked};
33use starnix_task_command::TaskCommand;
34use starnix_types::ownership::{TempRef, WeakRef};
35use starnix_types::time::duration_to_scheduler_clock;
36use starnix_uapi::auth::{
37 CAP_SYS_NICE, CAP_SYS_RESOURCE, Capabilities, PTRACE_MODE_ATTACH_REALCREDS,
38 PTRACE_MODE_NOAUDIT, PTRACE_MODE_READ_FSCREDS, PtraceAccessMode,
39};
40use starnix_uapi::device_type::DeviceType;
41use starnix_uapi::errors::Errno;
42use starnix_uapi::file_mode::{Access, FileMode, mode};
43use starnix_uapi::open_flags::OpenFlags;
44use starnix_uapi::resource_limits::Resource;
45use starnix_uapi::user_address::UserAddress;
46use starnix_uapi::{
47 OOM_ADJUST_MIN, OOM_DISABLE, OOM_SCORE_ADJ_MIN, RLIM_INFINITY, errno, error, ino_t, off_t,
48 pid_t, uapi,
49};
50use std::borrow::Cow;
51use std::ops::{Deref, Range};
52use std::sync::{Arc, LazyLock, Weak};
53
54fn task_entries(scope: TaskEntryScope) -> Vec<(FsString, FileMode)> {
56 let mut entries = vec![
58 (b"cgroup".into(), mode!(IFREG, 0o444)),
59 (b"cwd".into(), mode!(IFLNK, 0o777)),
60 (b"exe".into(), mode!(IFLNK, 0o777)),
61 (b"fd".into(), mode!(IFDIR, 0o500)),
62 (b"fdinfo".into(), mode!(IFDIR, 0o777)),
63 (b"io".into(), mode!(IFREG, 0o400)),
64 (b"limits".into(), mode!(IFREG, 0o444)),
65 (b"maps".into(), mode!(IFREG, 0o444)),
66 (b"mem".into(), mode!(IFREG, 0o600)),
67 (b"root".into(), mode!(IFLNK, 0o777)),
68 (b"sched".into(), mode!(IFREG, 0o644)),
69 (b"schedstat".into(), mode!(IFREG, 0o444)),
70 (b"smaps".into(), mode!(IFREG, 0o444)),
71 (b"stat".into(), mode!(IFREG, 0o444)),
72 (b"statm".into(), mode!(IFREG, 0o444)),
73 (b"status".into(), mode!(IFREG, 0o444)),
74 (b"cmdline".into(), mode!(IFREG, 0o444)),
75 (b"environ".into(), mode!(IFREG, 0o400)),
76 (b"auxv".into(), mode!(IFREG, 0o400)),
77 (b"comm".into(), mode!(IFREG, 0o644)),
78 (b"attr".into(), mode!(IFDIR, 0o555)),
79 (b"ns".into(), mode!(IFDIR, 0o511)),
80 (b"mountinfo".into(), mode!(IFREG, 0o444)),
81 (b"mounts".into(), mode!(IFREG, 0o444)),
82 (b"oom_adj".into(), mode!(IFREG, 0o744)),
83 (b"oom_score".into(), mode!(IFREG, 0o444)),
84 (b"oom_score_adj".into(), mode!(IFREG, 0o744)),
85 (b"timerslack_ns".into(), mode!(IFREG, 0o666)),
86 (b"wchan".into(), mode!(IFREG, 0o444)),
87 (b"clear_refs".into(), mode!(IFREG, 0o200)),
88 (b"pagemap".into(), mode!(IFREG, 0o400)),
89 ];
90
91 if scope == TaskEntryScope::ThreadGroup {
92 entries.push((b"task".into(), mode!(IFDIR, 0o555)));
93 }
94
95 entries
96}
97
98#[derive(Copy, Clone, Eq, PartialEq)]
99pub enum TaskEntryScope {
100 Task,
101 ThreadGroup,
102}
103
104pub struct TaskDirectory {
112 task_weak: WeakRef<Task>,
113 scope: TaskEntryScope,
114 inode_range: Range<ino_t>,
115}
116
117#[derive(Clone)]
118struct TaskDirectoryNode(Arc<TaskDirectory>);
119
120impl Deref for TaskDirectoryNode {
121 type Target = TaskDirectory;
122
123 fn deref(&self) -> &Self::Target {
124 &self.0
125 }
126}
127
128impl TaskDirectory {
129 fn new(fs: &FileSystemHandle, task: &TempRef<'_, Task>, scope: TaskEntryScope) -> FsNodeHandle {
130 let creds = task.real_creds().euid_as_fscred();
131 let task_weak = WeakRef::from(task);
132 fs.create_node_and_allocate_node_id(
133 TaskDirectoryNode(Arc::new(TaskDirectory {
134 task_weak,
135 scope,
136 inode_range: fs.allocate_ino_range(task_entries(scope).len()),
137 })),
138 FsNodeInfo::new(mode!(IFDIR, 0o555), creds),
139 )
140 }
141}
142
143impl FsNodeOps for TaskDirectoryNode {
144 fs_node_impl_dir_readonly!();
145
146 fn create_file_ops(
147 &self,
148 _locked: &mut Locked<FileOpsCore>,
149 _node: &FsNode,
150 _current_task: &CurrentTask,
151 _flags: OpenFlags,
152 ) -> Result<Box<dyn FileOps>, Errno> {
153 Ok(Box::new(self.clone()))
154 }
155
156 fn lookup(
157 &self,
158 _locked: &mut Locked<FileOpsCore>,
159 node: &FsNode,
160 _current_task: &CurrentTask,
161 name: &FsStr,
162 ) -> Result<FsNodeHandle, Errno> {
163 let task_weak = self.task_weak.clone();
164 let creds = node.info().cred();
165 let fs = node.fs();
166 let (mode, ino) = task_entries(self.scope)
167 .into_iter()
168 .enumerate()
169 .find_map(|(index, (n, mode))| {
170 if name == *n {
171 Some((mode, self.inode_range.start + index as ino_t))
172 } else {
173 None
174 }
175 })
176 .ok_or_else(|| errno!(ENOENT))?;
177
178 let ops: Box<dyn FsNodeOps> = match &**name {
180 b"cgroup" => Box::new(CgroupFile::new_node(task_weak)),
181 b"cwd" => Box::new(CallbackSymlinkNode::new({
182 move || Ok(SymlinkTarget::Node(Task::from_weak(&task_weak)?.live()?.fs().cwd()))
183 })),
184 b"exe" => Box::new(CallbackSymlinkNode::new({
185 move || {
186 let task = Task::from_weak(&task_weak)?;
187 if let Some(node) = task.mm().ok().and_then(|mm| mm.executable_node()) {
188 Ok(SymlinkTarget::Node(node))
189 } else {
190 error!(ENOENT)
191 }
192 }
193 })),
194 b"fd" => Box::new(FdDirectory::new(task_weak)),
195 b"fdinfo" => Box::new(FdInfoDirectory::new(task_weak)),
196 b"io" => Box::new(IoFile::new_node()),
197 b"limits" => Box::new(LimitsFile::new_node(task_weak)),
198 b"maps" => Box::new(PtraceCheckedNode::new_node(
199 task_weak,
200 PTRACE_MODE_READ_FSCREDS,
201 |_, _, task| Ok(ProcMapsFile::new(task)),
202 )),
203 b"mem" => Box::new(MemFile::new_node(task_weak)),
204 b"root" => Box::new(CallbackSymlinkNode::new({
205 move || Ok(SymlinkTarget::Node(Task::from_weak(&task_weak)?.live()?.fs().root()))
206 })),
207 b"sched" => Box::new(StubEmptyFile::new_node(bug_ref!("https://fxbug.dev/322893980"))),
208 b"schedstat" => {
209 Box::new(StubEmptyFile::new_node(bug_ref!("https://fxbug.dev/322894256")))
210 }
211 b"smaps" => Box::new(PtraceCheckedNode::new_node(
212 task_weak,
213 PTRACE_MODE_READ_FSCREDS,
214 |_, _, task| Ok(ProcSmapsFile::new(task)),
215 )),
216 b"stat" => Box::new(StatFile::new_node(task_weak, self.scope)),
217 b"statm" => Box::new(StatmFile::new_node(task_weak)),
218 b"status" => Box::new(StatusFile::new_node(task_weak)),
219 b"cmdline" => Box::new(CmdlineFile::new_node(task_weak)),
220 b"environ" => Box::new(EnvironFile::new_node(task_weak)),
221 b"auxv" => Box::new(AuxvFile::new_node(task_weak)),
222 b"comm" => {
223 let task = self.task_weak.upgrade().ok_or_else(|| errno!(ESRCH))?;
224 Box::new(CommFile::new_node(task_weak, task.persistent_info.clone()))
225 }
226 b"attr" => {
227 let dir = SimpleDirectory::new();
228 dir.edit(&fs, |dir| {
229 for (attr, name) in [
230 (security::ProcAttr::Current, "current"),
231 (security::ProcAttr::Exec, "exec"),
232 (security::ProcAttr::FsCreate, "fscreate"),
233 (security::ProcAttr::KeyCreate, "keycreate"),
234 (security::ProcAttr::SockCreate, "sockcreate"),
235 ] {
236 dir.entry_etc(
237 name.into(),
238 AttrNode::new(task_weak.clone(), attr),
239 mode!(IFREG, 0o666),
240 DeviceType::NONE,
241 creds,
242 );
243 }
244 dir.entry_etc(
245 "prev".into(),
246 AttrNode::new(task_weak, security::ProcAttr::Previous),
247 mode!(IFREG, 0o444),
248 DeviceType::NONE,
249 creds,
250 );
251 });
252 Box::new(dir)
253 }
254 b"ns" => Box::new(NsDirectory { task: task_weak }),
255 b"mountinfo" => Box::new(ProcMountinfoFile::new_node(task_weak)),
256 b"mounts" => Box::new(ProcMountsFile::new_node(task_weak)),
257 b"oom_adj" => Box::new(OomAdjFile::new_node(task_weak)),
258 b"oom_score" => Box::new(OomScoreFile::new_node(task_weak)),
259 b"oom_score_adj" => Box::new(OomScoreAdjFile::new_node(task_weak)),
260 b"timerslack_ns" => Box::new(TimerslackNsFile::new_node(task_weak)),
261 b"wchan" => Box::new(BytesFile::new_node(b"0".to_vec())),
262 b"clear_refs" => Box::new(ClearRefsFile::new_node(task_weak)),
263 b"pagemap" => Box::new(PtraceCheckedNode::new_node(
264 task_weak,
265 PTRACE_MODE_READ_FSCREDS,
266 |_, _, _| Ok(StubEmptyFile::new(bug_ref!("https://fxbug.dev/452096300"))),
267 )),
268 b"task" => {
269 let task = self.task_weak.upgrade().ok_or_else(|| errno!(ESRCH))?;
270 Box::new(TaskListDirectory { thread_group: Arc::downgrade(&task.thread_group()) })
271 }
272 name => unreachable!(
273 "entry \"{:?}\" should be supported to keep in sync with task_entries()",
274 name
275 ),
276 };
277
278 Ok(fs.create_node(ino, ops, FsNodeInfo::new(mode, creds)))
279 }
280}
281
282impl CloseFreeSafe for TaskDirectory {}
284impl FileOps for TaskDirectory {
285 fileops_impl_directory!();
286 fileops_impl_noop_sync!();
287 fileops_impl_unbounded_seek!();
288
289 fn readdir(
290 &self,
291 _locked: &mut Locked<FileOpsCore>,
292 file: &FileObject,
293 _current_task: &CurrentTask,
294 sink: &mut dyn DirentSink,
295 ) -> Result<(), Errno> {
296 emit_dotdot(file, sink)?;
297
298 for (index, (name, mode)) in
301 task_entries(self.scope).into_iter().enumerate().skip(sink.offset() as usize - 2)
302 {
303 sink.add(
304 self.inode_range.start + index as ino_t,
305 sink.offset() + 1,
306 DirectoryEntryType::from_mode(mode),
307 name.as_ref(),
308 )?;
309 }
310 Ok(())
311 }
312
313 fn as_thread_group_key(&self, _file: &FileObject) -> Result<ThreadGroupKey, Errno> {
314 let task = self.task_weak.upgrade().ok_or_else(|| errno!(ESRCH))?;
315 Ok(task.thread_group().into())
316 }
317}
318
319pub fn pid_directory(
321 current_task: &CurrentTask,
322 fs: &FileSystemHandle,
323 task: &TempRef<'_, Task>,
324) -> FsNodeHandle {
325 let fs_node = TaskDirectory::new(fs, task, TaskEntryScope::ThreadGroup);
328
329 security::task_to_fs_node(current_task, task, &fs_node);
330 fs_node
331}
332
333fn tid_directory(fs: &FileSystemHandle, task: &TempRef<'_, Task>) -> FsNodeHandle {
335 TaskDirectory::new(fs, task, TaskEntryScope::Task)
336}
337
338struct FdDirectory {
343 task: WeakRef<Task>,
344}
345
346impl FdDirectory {
347 fn new(task: WeakRef<Task>) -> Self {
348 Self { task }
349 }
350}
351
352impl FsNodeOps for FdDirectory {
353 fs_node_impl_dir_readonly!();
354
355 fn create_file_ops(
356 &self,
357 _locked: &mut Locked<FileOpsCore>,
358 _node: &FsNode,
359 _current_task: &CurrentTask,
360 _flags: OpenFlags,
361 ) -> Result<Box<dyn FileOps>, Errno> {
362 Ok(VecDirectory::new_file(fds_to_directory_entries(
363 Task::from_weak(&self.task)?.live()?.files.get_all_fds(),
364 )))
365 }
366
367 fn lookup(
368 &self,
369 _locked: &mut Locked<FileOpsCore>,
370 node: &FsNode,
371 _current_task: &CurrentTask,
372 name: &FsStr,
373 ) -> Result<FsNodeHandle, Errno> {
374 let fd = FdNumber::from_fs_str(name).map_err(|_| errno!(ENOENT))?;
375 let task = Task::from_weak(&self.task)?;
376 let file = task.live()?.files.get_allowing_opath(fd).map_err(|_| errno!(ENOENT))?;
378 let mode = FileMode::IFLNK | Access::from_open_flags(file.flags()).user_mode();
380 let task_reference = self.task.clone();
381 Ok(node.fs().create_node_and_allocate_node_id(
382 CallbackSymlinkNode::new(move || {
383 let task = Task::from_weak(&task_reference)?;
384 let file = task.live()?.files.get_allowing_opath(fd).map_err(|_| errno!(ENOENT))?;
385 Ok(SymlinkTarget::Node(file.name.to_passive()))
386 }),
387 FsNodeInfo::new(mode, task.real_fscred()),
388 ))
389 }
390}
391
392const NS_ENTRIES: &[&str] = &[
393 "cgroup",
394 "ipc",
395 "mnt",
396 "net",
397 "pid",
398 "pid_for_children",
399 "time",
400 "time_for_children",
401 "user",
402 "uts",
403];
404
405struct AttrNode {
407 attr: security::ProcAttr,
408 task: WeakRef<Task>,
409}
410
411impl AttrNode {
412 fn new(task: WeakRef<Task>, attr: security::ProcAttr) -> impl FsNodeOps {
413 SimpleFileNode::new(move |_, _| Ok(AttrNode { attr, task: task.clone() }))
414 }
415}
416
417impl FileOps for AttrNode {
418 fileops_impl_seekable!();
419 fileops_impl_noop_sync!();
420
421 fn writes_update_seek_offset(&self) -> bool {
422 false
423 }
424
425 fn read(
426 &self,
427 _locked: &mut Locked<FileOpsCore>,
428 _file: &FileObject,
429 current_task: &CurrentTask,
430 offset: usize,
431 data: &mut dyn OutputBuffer,
432 ) -> Result<usize, Errno> {
433 let task = Task::from_weak(&self.task)?;
434 let response = security::get_procattr(current_task, &task, self.attr)?;
435 data.write(&response[offset..])
436 }
437
438 fn write(
439 &self,
440 _locked: &mut Locked<FileOpsCore>,
441 _file: &FileObject,
442 current_task: &CurrentTask,
443 offset: usize,
444 data: &mut dyn InputBuffer,
445 ) -> Result<usize, Errno> {
446 let task = Task::from_weak(&self.task)?;
447
448 if current_task.temp_task() != task {
450 return error!(EPERM);
451 }
452 if offset != 0 {
453 return error!(EINVAL);
454 }
455
456 let data = data.read_all()?;
457 let data_len = data.len();
458 security::set_procattr(current_task, self.attr, data.as_slice())?;
459 Ok(data_len)
460 }
461}
462
463struct NsDirectory {
465 task: WeakRef<Task>,
466}
467
468impl FsNodeOps for NsDirectory {
469 fs_node_impl_dir_readonly!();
470
471 fn create_file_ops(
472 &self,
473 _locked: &mut Locked<FileOpsCore>,
474 _node: &FsNode,
475 _current_task: &CurrentTask,
476 _flags: OpenFlags,
477 ) -> Result<Box<dyn FileOps>, Errno> {
478 Ok(VecDirectory::new_file(
481 NS_ENTRIES
482 .iter()
483 .map(|&name| VecDirectoryEntry {
484 entry_type: DirectoryEntryType::LNK,
485 name: FsString::from(name),
486 inode: None,
487 })
488 .collect(),
489 ))
490 }
491
492 fn lookup(
493 &self,
494 _locked: &mut Locked<FileOpsCore>,
495 node: &FsNode,
496 current_task: &CurrentTask,
497 name: &FsStr,
498 ) -> Result<FsNodeHandle, Errno> {
499 let name = String::from_utf8(name.to_vec()).map_err(|_| errno!(ENOENT))?;
504 let mut elements = name.split(':');
505 let ns = elements.next().expect("name must not be empty");
506 if !NS_ENTRIES.contains(&ns) {
508 return error!(ENOENT);
509 }
510
511 let task = Task::from_weak(&self.task)?;
512 if let Some(id) = elements.next() {
513 static NS_IDENTIFIER_RE: LazyLock<Regex> =
515 LazyLock::new(|| Regex::new("^\\[[0-9]+\\]$").unwrap());
516 if !NS_IDENTIFIER_RE.is_match(id) {
517 return error!(ENOENT);
518 }
519 let node_info = || FsNodeInfo::new(mode!(IFREG, 0o444), task.real_fscred());
520 let fallback = || {
521 node.fs().create_node_and_allocate_node_id(BytesFile::new_node(vec![]), node_info())
522 };
523 Ok(match ns {
524 "cgroup" => {
525 track_stub!(TODO("https://fxbug.dev/297313673"), "cgroup namespaces");
526 fallback()
527 }
528 "ipc" => {
529 track_stub!(TODO("https://fxbug.dev/297313673"), "ipc namespaces");
530 fallback()
531 }
532 "mnt" => node
533 .fs()
534 .create_node_and_allocate_node_id(current_task.fs().namespace(), node_info()),
535 "net" => {
536 track_stub!(TODO("https://fxbug.dev/297313673"), "net namespaces");
537 fallback()
538 }
539 "pid" => {
540 track_stub!(TODO("https://fxbug.dev/297313673"), "pid namespaces");
541 fallback()
542 }
543 "pid_for_children" => {
544 track_stub!(TODO("https://fxbug.dev/297313673"), "pid_for_children namespaces");
545 fallback()
546 }
547 "time" => {
548 track_stub!(TODO("https://fxbug.dev/297313673"), "time namespaces");
549 fallback()
550 }
551 "time_for_children" => {
552 track_stub!(
553 TODO("https://fxbug.dev/297313673"),
554 "time_for_children namespaces"
555 );
556 fallback()
557 }
558 "user" => {
559 track_stub!(TODO("https://fxbug.dev/297313673"), "user namespaces");
560 fallback()
561 }
562 "uts" => {
563 track_stub!(TODO("https://fxbug.dev/297313673"), "uts namespaces");
564 fallback()
565 }
566 _ => return error!(ENOENT),
567 })
568 } else {
569 let id = current_task.fs().namespace().id;
571 Ok(node.fs().create_node_and_allocate_node_id(
572 CallbackSymlinkNode::new(move || {
573 Ok(SymlinkTarget::Path(format!("{name}:[{id}]").into()))
574 }),
575 FsNodeInfo::new(mode!(IFLNK, 0o7777), task.real_fscred()),
576 ))
577 }
578 }
579}
580
581struct FdInfoDirectory {
587 task: WeakRef<Task>,
588}
589
590impl FdInfoDirectory {
591 fn new(task: WeakRef<Task>) -> Self {
592 Self { task }
593 }
594}
595
596impl FsNodeOps for FdInfoDirectory {
597 fs_node_impl_dir_readonly!();
598
599 fn create_file_ops(
600 &self,
601 _locked: &mut Locked<FileOpsCore>,
602 _node: &FsNode,
603 _current_task: &CurrentTask,
604 _flags: OpenFlags,
605 ) -> Result<Box<dyn FileOps>, Errno> {
606 Ok(VecDirectory::new_file(fds_to_directory_entries(
607 Task::from_weak(&self.task)?.live()?.files.get_all_fds(),
608 )))
609 }
610
611 fn lookup(
612 &self,
613 locked: &mut Locked<FileOpsCore>,
614 node: &FsNode,
615 current_task: &CurrentTask,
616 name: &FsStr,
617 ) -> Result<FsNodeHandle, Errno> {
618 let task = Task::from_weak(&self.task)?;
619 let fd = FdNumber::from_fs_str(name).map_err(|_| errno!(ENOENT))?;
620 let file = task.live()?.files.get_allowing_opath(fd).map_err(|_| errno!(ENOENT))?;
621 let pos = *file.offset.lock();
622 let flags = file.flags();
623 let mut data = format!("pos:\t{}\nflags:\t0{:o}\n", pos, flags.bits()).into_bytes();
624 if let Some(extra_fdinfo) = file.extra_fdinfo(locked, current_task) {
625 data.extend_from_slice(extra_fdinfo.as_slice());
626 }
627 Ok(node.fs().create_node_and_allocate_node_id(
628 BytesFile::new_node(data),
629 FsNodeInfo::new(mode!(IFREG, 0o444), task.real_fscred()),
630 ))
631 }
632}
633
634fn fds_to_directory_entries(fds: Vec<FdNumber>) -> Vec<VecDirectoryEntry> {
635 fds.into_iter()
636 .map(|fd| VecDirectoryEntry {
637 entry_type: DirectoryEntryType::DIR,
638 name: fd.raw().to_string().into(),
639 inode: None,
640 })
641 .collect()
642}
643
644struct TaskListDirectory {
646 thread_group: Weak<ThreadGroup>,
647}
648
649impl TaskListDirectory {
650 fn thread_group(&self) -> Result<Arc<ThreadGroup>, Errno> {
651 self.thread_group.upgrade().ok_or_else(|| errno!(ESRCH))
652 }
653}
654
655impl FsNodeOps for TaskListDirectory {
656 fs_node_impl_dir_readonly!();
657
658 fn create_file_ops(
659 &self,
660 _locked: &mut Locked<FileOpsCore>,
661 _node: &FsNode,
662 _current_task: &CurrentTask,
663 _flags: OpenFlags,
664 ) -> Result<Box<dyn FileOps>, Errno> {
665 Ok(VecDirectory::new_file(
666 self.thread_group()?
667 .read()
668 .task_ids()
669 .map(|tid| VecDirectoryEntry {
670 entry_type: DirectoryEntryType::DIR,
671 name: tid.to_string().into(),
672 inode: None,
673 })
674 .collect(),
675 ))
676 }
677
678 fn lookup(
679 &self,
680 _locked: &mut Locked<FileOpsCore>,
681 node: &FsNode,
682 _current_task: &CurrentTask,
683 name: &FsStr,
684 ) -> Result<FsNodeHandle, Errno> {
685 let thread_group = self.thread_group()?;
686 let tid = std::str::from_utf8(name)
687 .map_err(|_| errno!(ENOENT))?
688 .parse::<pid_t>()
689 .map_err(|_| errno!(ENOENT))?;
690 if !thread_group.read().contains_task(tid) {
692 return error!(ENOENT);
693 }
694
695 let pid_state = thread_group.kernel.pids.read();
696 let weak_task = pid_state.get_task(tid);
697 let task = weak_task.upgrade().ok_or_else(|| errno!(ENOENT))?;
698 std::mem::drop(pid_state);
699
700 Ok(tid_directory(&node.fs(), &task))
701 }
702}
703
704#[derive(Clone)]
705struct CgroupFile(WeakRef<Task>);
706impl CgroupFile {
707 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
708 DynamicFile::new_node(Self(task))
709 }
710}
711impl DynamicFileSource for CgroupFile {
712 fn generate(
713 &self,
714 _current_task: &CurrentTask,
715 sink: &mut DynamicFileBuf,
716 ) -> Result<(), Errno> {
717 let task = Task::from_weak(&self.0)?;
718 let cgroup = task.kernel().cgroups.cgroup2.get_cgroup(task.thread_group());
719 let path = path_from_root(cgroup)?;
720 sink.write(format!("0::{}\n", path).as_bytes());
721 Ok(())
722 }
723}
724
725fn fill_buf_from_addr_range(
726 task: &Task,
727 range_start: UserAddress,
728 range_end: UserAddress,
729 sink: &mut DynamicFileBuf,
730) -> Result<(), Errno> {
731 #[allow(clippy::manual_saturating_arithmetic)]
732 let len = range_end.ptr().checked_sub(range_start.ptr()).unwrap_or(0);
733 let buf = task.read_memory_partial_to_vec(range_start, len)?;
737 sink.write(&buf[..]);
738 Ok(())
739}
740
741#[derive(Clone)]
743pub struct CmdlineFile {
744 task: WeakRef<Task>,
745}
746impl CmdlineFile {
747 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
748 DynamicFile::new_node(Self { task })
749 }
750}
751impl DynamicFileSource for CmdlineFile {
752 fn generate(
753 &self,
754 _current_task: &CurrentTask,
755 sink: &mut DynamicFileBuf,
756 ) -> Result<(), Errno> {
757 let Some(task) = self.task.upgrade() else {
759 return Ok(());
760 };
761 let Ok(mm) = task.mm() else {
763 return Ok(());
764 };
765 let (start, end) = {
766 let mm_state = mm.state.read();
767 (mm_state.argv_start, mm_state.argv_end)
768 };
769 fill_buf_from_addr_range(&task, start, end, sink)
770 }
771}
772
773struct PtraceCheckedNode {}
774
775impl PtraceCheckedNode {
776 pub fn new_node<F, O>(
777 task: WeakRef<Task>,
778 mode: PtraceAccessMode,
779 create_ops: F,
780 ) -> impl FsNodeOps
781 where
782 F: Fn(&mut Locked<FileOpsCore>, &CurrentTask, TempRef<'_, Task>) -> Result<O, Errno>
783 + Send
784 + Sync
785 + 'static,
786 O: FileOps,
787 {
788 SimpleFileNode::new(move |locked, current_task: &CurrentTask| {
789 let task = Task::from_weak(&task)?;
790 if task.mm().is_ok() {
792 current_task
793 .check_ptrace_access_mode(locked, mode, &task)
794 .map_err(|_| errno!(EACCES))?;
795 }
796 create_ops(locked, current_task, task)
797 })
798 }
799}
800
801#[derive(Clone)]
803pub struct EnvironFile {
804 task: WeakRef<Task>,
805}
806impl EnvironFile {
807 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
808 PtraceCheckedNode::new_node(task, PTRACE_MODE_READ_FSCREDS, |_, _, task| {
809 Ok(DynamicFile::new(Self { task: task.into() }))
810 })
811 }
812}
813impl DynamicFileSource for EnvironFile {
814 fn generate(
815 &self,
816 _current_task: &CurrentTask,
817 sink: &mut DynamicFileBuf,
818 ) -> Result<(), Errno> {
819 let task = Task::from_weak(&self.task)?;
820 let Ok(mm) = task.mm() else {
822 return Ok(());
823 };
824 let (start, end) = {
825 let mm_state = mm.state.read();
826 (mm_state.environ_start, mm_state.environ_end)
827 };
828 fill_buf_from_addr_range(&task, start, end, sink)
829 }
830}
831
832#[derive(Clone)]
834pub struct AuxvFile {
835 task: WeakRef<Task>,
836}
837impl AuxvFile {
838 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
839 PtraceCheckedNode::new_node(task, PTRACE_MODE_READ_FSCREDS, |_, _, task| {
840 Ok(DynamicFile::new(Self { task: task.into() }))
841 })
842 }
843}
844impl DynamicFileSource for AuxvFile {
845 fn generate(
846 &self,
847 _current_task: &CurrentTask,
848 sink: &mut DynamicFileBuf,
849 ) -> Result<(), Errno> {
850 let task = Task::from_weak(&self.task)?;
851 let Ok(mm) = task.mm() else {
853 return Ok(());
854 };
855 let (start, end) = {
856 let mm_state = mm.state.read();
857 (mm_state.auxv_start, mm_state.auxv_end)
858 };
859 fill_buf_from_addr_range(&task, start, end, sink)
860 }
861}
862
863pub struct CommFile {
865 task: WeakRef<Task>,
866 info: TaskPersistentInfo,
867}
868impl CommFile {
869 pub fn new_node(task: WeakRef<Task>, info: TaskPersistentInfo) -> impl FsNodeOps {
870 SimpleFileNode::new(move |_, _| {
871 Ok(DynamicFile::new(CommFile { task: task.clone(), info: info.clone() }))
872 })
873 }
874}
875
876impl DynamicFileSource for CommFile {
877 fn generate(
878 &self,
879 _current_task: &CurrentTask,
880 sink: &mut DynamicFileBuf,
881 ) -> Result<(), Errno> {
882 sink.write(self.info.command_guard().comm_name());
883 sink.write(b"\n");
884 Ok(())
885 }
886
887 fn write(
888 &self,
889 _locked: &mut Locked<FileOpsCore>,
890 current_task: &CurrentTask,
891 _offset: usize,
892 data: &mut dyn InputBuffer,
893 ) -> Result<usize, Errno> {
894 let task = Task::from_weak(&self.task)?;
895 if !Arc::ptr_eq(&task.thread_group(), ¤t_task.thread_group()) {
896 return error!(EINVAL);
897 }
898 let bytes = data.read_all()?;
901 task.set_command_name(TaskCommand::new(&bytes));
902 Ok(bytes.len())
903 }
904}
905
906#[derive(Clone)]
908pub struct IoFile {}
909impl IoFile {
910 pub fn new_node() -> impl FsNodeOps {
911 DynamicFile::new_node(Self {})
912 }
913}
914impl DynamicFileSource for IoFile {
915 fn generate(
916 &self,
917 _current_task: &CurrentTask,
918 sink: &mut DynamicFileBuf,
919 ) -> Result<(), Errno> {
920 track_stub!(TODO("https://fxbug.dev/322874250"), "/proc/pid/io");
921 sink.write(b"rchar: 0\n");
922 sink.write(b"wchar: 0\n");
923 sink.write(b"syscr: 0\n");
924 sink.write(b"syscw: 0\n");
925 sink.write(b"read_bytes: 0\n");
926 sink.write(b"write_bytes: 0\n");
927 sink.write(b"cancelled_write_bytes: 0\n");
928 Ok(())
929 }
930}
931
932#[derive(Clone)]
934pub struct LimitsFile(WeakRef<Task>);
935impl LimitsFile {
936 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
937 DynamicFile::new_node(Self(task))
938 }
939}
940impl DynamicFileSource for LimitsFile {
941 fn generate_locked(
942 &self,
943 locked: &mut Locked<FileOpsCore>,
944 _current_task: &CurrentTask,
945 sink: &mut DynamicFileBuf,
946 ) -> Result<(), Errno> {
947 let task = Task::from_weak(&self.0)?;
948 let limits = task.thread_group().limits.lock(locked);
949
950 let write_limit = |sink: &mut DynamicFileBuf, value| {
951 if value == RLIM_INFINITY as u64 {
952 sink.write(format!("{:<20}", "unlimited").as_bytes());
953 } else {
954 sink.write(format!("{:<20}", value).as_bytes());
955 }
956 };
957 sink.write(
958 format!("{:<25}{:<20}{:<20}{:<10}\n", "Limit", "Soft Limit", "Hard Limit", "Units")
959 .as_bytes(),
960 );
961 for resource in Resource::ALL {
962 let desc = resource.desc();
963 let limit = limits.get(resource);
964 sink.write(format!("{:<25}", desc.name).as_bytes());
965 write_limit(sink, limit.rlim_cur);
966 write_limit(sink, limit.rlim_max);
967 if !desc.unit.is_empty() {
968 sink.write(format!("{:<10}", desc.unit).as_bytes());
969 }
970 sink.write(b"\n");
971 }
972 Ok(())
973 }
974}
975
976pub struct MemFile {
978 mm: Weak<MemoryManager>,
979
980 task: WeakRef<Task>,
984}
985
986impl MemFile {
987 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
988 PtraceCheckedNode::new_node(task, PTRACE_MODE_ATTACH_REALCREDS, |_, _, task| {
989 let mm = task.mm().ok().as_ref().map(Arc::downgrade).unwrap_or_default();
990 Ok(Self { mm, task: task.into() })
991 })
992 }
993}
994
995impl FileOps for MemFile {
996 fileops_impl_noop_sync!();
997
998 fn is_seekable(&self) -> bool {
999 true
1000 }
1001
1002 fn seek(
1003 &self,
1004 _locked: &mut Locked<FileOpsCore>,
1005 _file: &FileObject,
1006 _current_task: &CurrentTask,
1007 current_offset: off_t,
1008 target: SeekTarget,
1009 ) -> Result<off_t, Errno> {
1010 default_seek(current_offset, target, || error!(EINVAL))
1011 }
1012
1013 fn read(
1014 &self,
1015 _locked: &mut Locked<FileOpsCore>,
1016 _file: &FileObject,
1017 current_task: &CurrentTask,
1018 offset: usize,
1019 data: &mut dyn OutputBuffer,
1020 ) -> Result<usize, Errno> {
1021 let Some(_task) = self.task.upgrade() else {
1022 return Ok(0);
1023 };
1024 let Some(mm) = self.mm.upgrade() else {
1025 return Ok(0);
1026 };
1027 let mut addr = UserAddress::from(offset as u64);
1028 data.write_each(&mut |bytes| {
1029 let read_bytes = if current_task.has_same_address_space(Some(&mm)) {
1030 current_task.read_memory_partial(addr, bytes)
1031 } else {
1032 mm.syscall_read_memory_partial(addr, bytes)
1033 }
1034 .map_err(|_| errno!(EIO))?;
1035 let actual = read_bytes.len();
1036 addr = (addr + actual)?;
1037 Ok(actual)
1038 })
1039 }
1040
1041 fn write(
1042 &self,
1043 _locked: &mut Locked<FileOpsCore>,
1044 _file: &FileObject,
1045 current_task: &CurrentTask,
1046 offset: usize,
1047 data: &mut dyn InputBuffer,
1048 ) -> Result<usize, Errno> {
1049 let Some(_task) = self.task.upgrade() else {
1050 return Ok(0);
1051 };
1052 let Some(mm) = self.mm.upgrade() else {
1053 return Ok(0);
1054 };
1055 let addr = UserAddress::from(offset as u64);
1056 let mut written = 0;
1057 let result = data.peek_each(&mut |bytes| {
1058 let actual = if current_task.has_same_address_space(Some(&mm)) {
1059 current_task.write_memory_partial((addr + written)?, bytes)
1060 } else {
1061 mm.syscall_write_memory_partial((addr + written)?, bytes)
1062 }
1063 .map_err(|_| errno!(EIO))?;
1064 written += actual;
1065 Ok(actual)
1066 });
1067 data.advance(written)?;
1068 result
1069 }
1070}
1071
1072#[derive(Clone)]
1073pub struct StatFile {
1074 task: WeakRef<Task>,
1075 scope: TaskEntryScope,
1076}
1077
1078impl StatFile {
1079 pub fn new_node(task: WeakRef<Task>, scope: TaskEntryScope) -> impl FsNodeOps {
1080 DynamicFile::new_node(Self { task, scope })
1081 }
1082}
1083impl DynamicFileSource for StatFile {
1084 fn generate_locked(
1085 &self,
1086 locked: &mut Locked<FileOpsCore>,
1087 current_task: &CurrentTask,
1088 sink: &mut DynamicFileBuf,
1089 ) -> Result<(), Errno> {
1090 let task = Task::from_weak(&self.task)?;
1091
1092 let pid: pid_t; let comm: TaskCommand;
1096 let state: char;
1097 let ppid: pid_t;
1098 let pgrp: pid_t; let session: pid_t;
1100 let tty_nr: i32;
1101 let tpgid: i32 = 0;
1102 let flags: u32 = 0;
1103 let minflt: u64 = 0; let cminflt: u64 = 0;
1105 let majflt: u64 = 0;
1106 let cmajflt: u64 = 0;
1107 let utime: i64;
1108 let stime: i64; let cutime: i64;
1110 let cstime: i64;
1111 let priority: i64 = 0;
1112 let nice: i64;
1113 let num_threads: i64; let itrealvalue: i64 = 0;
1115 let mut starttime: u64 = 0;
1116 let mut vsize: usize = 0;
1117 let mut rss: usize = 0;
1118 let mut rsslim: u64 = 0; let mut startcode: u64 = 0;
1120 let mut endcode: u64 = 0;
1121 let mut startstack: usize = 0;
1122 let mut kstkesp: u64 = 0;
1123 let mut kstkeip: u64 = 0; let signal: u64 = 0;
1125 let blocked: u64 = 0;
1126 let siginore: u64 = 0;
1127 let sigcatch: u64 = 0;
1128 let mut wchan: u64 = 0; let nswap: u64 = 0;
1130 let cnswap: u64 = 0;
1131 let exit_signal: i32 = 0;
1132 let processor: i32 = 0;
1133 let rt_priority: u32 = 0; let policy: u32 = 0;
1135 let delayacct_blkio_ticks: u64 = 0;
1136 let guest_time: u64 = 0;
1137 let cguest_time: i64 = 0;
1138 let mut start_data: u64 = 0; let mut end_data: u64 = 0;
1140 let mut start_brk: u64 = 0;
1141 let mut arg_start: usize = 0;
1142 let mut arg_end: usize = 0;
1143 let mut env_start: usize = 0; let mut env_end: usize = 0;
1145 let mut exit_code: i32 = 0;
1146
1147 pid = task.get_tid();
1148 comm = task.command();
1149 state = task.state_code().code_char();
1150 nice = task.read().scheduler_state.normal_priority().as_nice() as i64;
1151
1152 {
1153 let thread_group = task.thread_group().read();
1154 ppid = thread_group.get_ppid();
1155 pgrp = thread_group.process_group.leader;
1156 session = thread_group.process_group.session.leader;
1157
1158 {
1160 let session = thread_group.process_group.session.read();
1161 tty_nr = session
1162 .controlling_terminal
1163 .as_ref()
1164 .map(|t| t.terminal.device().bits())
1165 .unwrap_or(0) as i32;
1166 }
1167
1168 cutime = duration_to_scheduler_clock(thread_group.children_time_stats.user_time);
1169 cstime = duration_to_scheduler_clock(thread_group.children_time_stats.system_time);
1170
1171 num_threads = thread_group.tasks_count() as i64;
1172 }
1173
1174 let time_stats = match self.scope {
1175 TaskEntryScope::Task => task.time_stats(),
1176 TaskEntryScope::ThreadGroup => task.thread_group().time_stats(),
1177 };
1178 utime = duration_to_scheduler_clock(time_stats.user_time);
1179 stime = duration_to_scheduler_clock(time_stats.system_time);
1180
1181 if let Ok(info) = task.thread_group().process.info() {
1182 starttime =
1183 duration_to_scheduler_clock(info.start_time - zx::MonotonicInstant::ZERO) as u64;
1184 }
1185
1186 if let Ok(mm) = task.mm() {
1187 let mem_stats = mm.get_stats(current_task);
1188 let page_size = *PAGE_SIZE as usize;
1189 vsize = mem_stats.vm_size;
1190 rss = mem_stats.vm_rss / page_size;
1191 rsslim = task.thread_group().limits.lock(locked).get(Resource::RSS).rlim_max;
1192
1193 {
1194 let mm_state = mm.state.read();
1195 startstack = mm_state.stack_start.ptr();
1196 arg_start = mm_state.argv_start.ptr();
1197 arg_end = mm_state.argv_end.ptr();
1198 env_start = mm_state.environ_start.ptr();
1199 env_end = mm_state.environ_end.ptr();
1200 }
1201 }
1202
1203 if !current_task
1207 .check_ptrace_access_mode(locked, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT, &task)
1208 .is_ok()
1209 {
1210 startcode = 1;
1211 endcode = 1;
1212 startstack = 0;
1213 kstkesp = 0;
1214 kstkeip = 0;
1215 wchan = 0;
1216 start_data = 0;
1217 end_data = 0;
1218 start_brk = 0;
1219 arg_start = 0;
1220 arg_end = 0;
1221 env_start = 0;
1222 env_end = 0;
1223 exit_code = 0;
1224 }
1225
1226 writeln!(
1227 sink,
1228 "{pid} ({comm}) {state} {ppid} {pgrp} {session} {tty_nr} {tpgid} {flags} {minflt} {cminflt} {majflt} {cmajflt} {utime} {stime} {cutime} {cstime} {priority} {nice} {num_threads} {itrealvalue} {starttime} {vsize} {rss} {rsslim} {startcode} {endcode} {startstack} {kstkesp} {kstkeip} {signal} {blocked} {siginore} {sigcatch} {wchan} {nswap} {cnswap} {exit_signal} {processor} {rt_priority} {policy} {delayacct_blkio_ticks} {guest_time} {cguest_time} {start_data} {end_data} {start_brk} {arg_start} {arg_end} {env_start} {env_end} {exit_code}"
1229 )?;
1230
1231 Ok(())
1232 }
1233}
1234
1235#[derive(Clone)]
1236pub struct StatmFile {
1237 task: WeakRef<Task>,
1238}
1239impl StatmFile {
1240 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
1241 DynamicFile::new_node(Self { task })
1242 }
1243}
1244impl DynamicFileSource for StatmFile {
1245 fn generate(&self, current_task: &CurrentTask, sink: &mut DynamicFileBuf) -> Result<(), Errno> {
1246 let task = Task::from_weak(&self.task)?;
1248 let mem_stats = match task.mm() {
1249 Ok(mm) => mm.get_stats(current_task),
1250 Err(_) => Default::default(),
1251 };
1252 let page_size = *PAGE_SIZE as usize;
1253
1254 writeln!(
1256 sink,
1257 "{} {} {} {} 0 {} 0",
1258 mem_stats.vm_size / page_size,
1259 mem_stats.vm_rss / page_size,
1260 mem_stats.rss_shared / page_size,
1261 mem_stats.vm_exe / page_size,
1262 (mem_stats.vm_data + mem_stats.vm_stack) / page_size
1263 )?;
1264 Ok(())
1265 }
1266}
1267
1268#[derive(Clone)]
1269pub struct StatusFile(WeakRef<Task>);
1270impl StatusFile {
1271 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
1272 DynamicFile::new_node(Self(task))
1273 }
1274}
1275impl DynamicFileSource for StatusFile {
1276 fn generate(&self, current_task: &CurrentTask, sink: &mut DynamicFileBuf) -> Result<(), Errno> {
1277 let task = &self.0.upgrade();
1278
1279 if String::from_utf8_lossy(current_task.task.persistent_info.command_guard().comm_name())
1285 .contains("traced_probes")
1286 {
1287 let target_pid = task.as_ref().map(|t| t.persistent_info.pid()).unwrap_or(0);
1288 starnix_logging::log_info!(
1289 "WATCHDOG_DEBUG: traced_probes generating StatusFile for target pid {}",
1290 target_pid
1291 );
1292 }
1293 let (tgid, pid, creds_string) = {
1294 if let Some(task) = task {
1295 track_stub!(TODO("https://fxbug.dev/297440106"), "/proc/pid/status zombies");
1296 write!(sink, "Name:\t")?;
1300 sink.write(task.persistent_info.command_guard().comm_name());
1301 let creds = task.persistent_info.real_creds();
1302 (
1303 Some(task.persistent_info.pid()),
1304 Some(task.persistent_info.tid()),
1305 Some(format!(
1306 "Uid:\t{}\t{}\t{}\t{}\nGid:\t{}\t{}\t{}\t{}\nGroups:\t{}",
1307 creds.uid,
1308 creds.euid,
1309 creds.saved_uid,
1310 creds.fsuid,
1311 creds.gid,
1312 creds.egid,
1313 creds.saved_gid,
1314 creds.fsgid,
1315 creds.groups.iter().map(|n| n.to_string()).join(" ")
1316 )),
1317 )
1318 } else {
1319 (None, None, None)
1320 }
1321 };
1322
1323 writeln!(sink)?;
1324
1325 if let Some(task) = task {
1326 writeln!(sink, "Umask:\t0{:03o}", task.live()?.fs().umask().bits())?;
1327 let task_state = task.read();
1328 writeln!(sink, "SigBlk:\t{:016x}", task_state.signal_mask().0)?;
1329 writeln!(sink, "SigPnd:\t{:016x}", task_state.task_specific_pending_signals().0)?;
1330 writeln!(
1331 sink,
1332 "ShdPnd:\t{:x}",
1333 task.thread_group().pending_signals.lock().pending().0
1334 )?;
1335 writeln!(sink, "NoNewPrivs:\t{}", task_state.no_new_privs() as u8)?;
1336
1337 let creds = task.real_creds();
1339 let cap_mask = Capabilities::all_existent();
1340 writeln!(sink, "CapInh:\t{:016x}", creds.cap_inheritable & cap_mask)?;
1341 writeln!(sink, "CapPrm:\t{:016x}", creds.cap_permitted & cap_mask)?;
1342 writeln!(sink, "CapEff:\t{:016x}", creds.cap_effective & cap_mask)?;
1343 writeln!(sink, "CapBnd:\t{:016x}", creds.cap_bounding & cap_mask)?;
1344 writeln!(sink, "CapAmb:\t{:016x}", creds.cap_ambient & cap_mask)?;
1345 }
1346
1347 let state_code =
1348 if let Some(task) = task { task.state_code() } else { TaskStateCode::Zombie };
1349 writeln!(sink, "State:\t{} ({})", state_code.code_char(), state_code.name())?;
1350
1351 if let Some(tgid) = tgid {
1352 writeln!(sink, "Tgid:\t{}", tgid)?;
1353 }
1354 if let Some(pid) = pid {
1355 writeln!(sink, "Pid:\t{}", pid)?;
1356 }
1357 let (ppid, threads, tracer_pid) = if let Some(task) = task {
1358 let tracer_pid = task.read().ptrace.as_ref().map_or(0, |p| p.get_pid());
1359 let task_group = task.thread_group().read();
1360 (task_group.get_ppid(), task_group.tasks_count(), tracer_pid)
1361 } else {
1362 (1, 1, 0)
1363 };
1364 writeln!(sink, "PPid:\t{}", ppid)?;
1365 writeln!(sink, "TracerPid:\t{}", tracer_pid)?;
1366
1367 if let Some(creds_string) = creds_string {
1368 writeln!(sink, "{}", creds_string)?;
1369 }
1370
1371 if let Some(task) = task {
1372 if let Ok(mm) = task.mm() {
1373 let mem_stats = mm.get_stats(current_task);
1374 writeln!(sink, "VmSize:\t{} kB", mem_stats.vm_size / 1024)?;
1375 writeln!(sink, "VmLck:\t{} kB", mem_stats.vm_lck / 1024)?;
1376 writeln!(sink, "VmRSS:\t{} kB", mem_stats.vm_rss / 1024)?;
1377 writeln!(sink, "RssAnon:\t{} kB", mem_stats.rss_anonymous / 1024)?;
1378 writeln!(sink, "RssFile:\t{} kB", mem_stats.rss_file / 1024)?;
1379 writeln!(sink, "RssShmem:\t{} kB", mem_stats.rss_shared / 1024)?;
1380 writeln!(sink, "VmData:\t{} kB", mem_stats.vm_data / 1024)?;
1381 writeln!(sink, "VmStk:\t{} kB", mem_stats.vm_stack / 1024)?;
1382 writeln!(sink, "VmExe:\t{} kB", mem_stats.vm_exe / 1024)?;
1383 writeln!(sink, "VmSwap:\t{} kB", mem_stats.vm_swap / 1024)?;
1384 writeln!(sink, "VmHWM:\t{} kB", mem_stats.vm_rss_hwm / 1024)?;
1385 }
1386 let seccomp = task.seccomp_filter_state.get() as u8;
1388 writeln!(sink, "Seccomp:\t{}", seccomp)?;
1389 }
1390
1391 writeln!(sink, "Threads:\t{}", std::cmp::max(1, threads))?;
1393
1394 Ok(())
1395 }
1396}
1397
1398struct OomScoreFile(WeakRef<Task>);
1399
1400impl OomScoreFile {
1401 fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
1402 BytesFile::new_node(Self(task))
1403 }
1404}
1405
1406impl BytesFileOps for OomScoreFile {
1407 fn read(&self, _current_task: &CurrentTask) -> Result<Cow<'_, [u8]>, Errno> {
1408 let _task = Task::from_weak(&self.0)?;
1409 track_stub!(TODO("https://fxbug.dev/322873459"), "/proc/pid/oom_score");
1410 Ok(serialize_for_file(0).into())
1411 }
1412}
1413
1414const OOM_ADJUST_MAX: i32 = uapi::OOM_ADJUST_MAX as i32;
1416const OOM_SCORE_ADJ_MAX: i32 = uapi::OOM_SCORE_ADJ_MAX as i32;
1417
1418struct OomAdjFile(WeakRef<Task>);
1419impl OomAdjFile {
1420 fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
1421 BytesFile::new_node(Self(task))
1422 }
1423}
1424
1425impl BytesFileOps for OomAdjFile {
1426 fn write(&self, current_task: &CurrentTask, data: Vec<u8>) -> Result<(), Errno> {
1427 let value = parse_i32_file(&data)?;
1428 let oom_score_adj = if value == OOM_DISABLE {
1429 OOM_SCORE_ADJ_MIN
1430 } else {
1431 if !(OOM_ADJUST_MIN..=OOM_ADJUST_MAX).contains(&value) {
1432 return error!(EINVAL);
1433 }
1434 let fraction = (value - OOM_ADJUST_MIN) / (OOM_ADJUST_MAX - OOM_ADJUST_MIN);
1435 fraction * (OOM_SCORE_ADJ_MAX - OOM_SCORE_ADJ_MIN) + OOM_SCORE_ADJ_MIN
1436 };
1437 security::check_task_capable(current_task, CAP_SYS_RESOURCE)?;
1438 let task = Task::from_weak(&self.0)?;
1439 task.write().oom_score_adj = oom_score_adj;
1440 Ok(())
1441 }
1442
1443 fn read(&self, _current_task: &CurrentTask) -> Result<Cow<'_, [u8]>, Errno> {
1444 let task = Task::from_weak(&self.0)?;
1445 let oom_score_adj = task.read().oom_score_adj;
1446 let oom_adj = if oom_score_adj == OOM_SCORE_ADJ_MIN {
1447 OOM_DISABLE
1448 } else {
1449 let fraction =
1450 (oom_score_adj - OOM_SCORE_ADJ_MIN) / (OOM_SCORE_ADJ_MAX - OOM_SCORE_ADJ_MIN);
1451 fraction * (OOM_ADJUST_MAX - OOM_ADJUST_MIN) + OOM_ADJUST_MIN
1452 };
1453 Ok(serialize_for_file(oom_adj).into())
1454 }
1455}
1456
1457struct OomScoreAdjFile(WeakRef<Task>);
1458
1459impl OomScoreAdjFile {
1460 fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
1461 BytesFile::new_node(Self(task))
1462 }
1463}
1464
1465impl BytesFileOps for OomScoreAdjFile {
1466 fn write(&self, current_task: &CurrentTask, data: Vec<u8>) -> Result<(), Errno> {
1467 let value = parse_i32_file(&data)?;
1468 if !(OOM_SCORE_ADJ_MIN..=OOM_SCORE_ADJ_MAX).contains(&value) {
1469 return error!(EINVAL);
1470 }
1471 security::check_task_capable(current_task, CAP_SYS_RESOURCE)?;
1472 let task = Task::from_weak(&self.0)?;
1473 task.write().oom_score_adj = value;
1474 Ok(())
1475 }
1476
1477 fn read(&self, _current_task: &CurrentTask) -> Result<Cow<'_, [u8]>, Errno> {
1478 let task = Task::from_weak(&self.0)?;
1479 let oom_score_adj = task.read().oom_score_adj;
1480 Ok(serialize_for_file(oom_score_adj).into())
1481 }
1482}
1483
1484struct TimerslackNsFile(WeakRef<Task>);
1485
1486impl TimerslackNsFile {
1487 fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
1488 BytesFile::new_node(Self(task))
1489 }
1490}
1491
1492impl BytesFileOps for TimerslackNsFile {
1493 fn write(&self, current_task: &CurrentTask, data: Vec<u8>) -> Result<(), Errno> {
1494 let target_task = Task::from_weak(&self.0)?;
1495 let same_task =
1496 current_task.task.thread_group().leader == target_task.thread_group().leader;
1497 if !same_task {
1498 security::check_task_capable(current_task, CAP_SYS_NICE)?;
1499 security::check_setsched_access(current_task, &target_task)?;
1500 };
1501
1502 let value = parse_unsigned_file(&data)?;
1503 target_task.write().set_timerslack_ns(value);
1504 Ok(())
1505 }
1506
1507 fn read(&self, current_task: &CurrentTask) -> Result<Cow<'_, [u8]>, Errno> {
1508 let target_task = Task::from_weak(&self.0)?;
1509 let same_task =
1510 current_task.task.thread_group().leader == target_task.thread_group().leader;
1511 if !same_task {
1512 security::check_task_capable(current_task, CAP_SYS_NICE)?;
1513 security::check_getsched_access(current_task, &target_task)?;
1514 };
1515
1516 let timerslack_ns = target_task.read().timerslack_ns;
1517 Ok(serialize_for_file(timerslack_ns).into())
1518 }
1519}
1520
1521struct ClearRefsFile(WeakRef<Task>);
1522
1523impl ClearRefsFile {
1524 fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
1525 BytesFile::new_node(Self(task))
1526 }
1527}
1528
1529impl BytesFileOps for ClearRefsFile {
1530 fn write(&self, _current_task: &CurrentTask, _data: Vec<u8>) -> Result<(), Errno> {
1531 let _task = Task::from_weak(&self.0)?;
1532 track_stub!(TODO("https://fxbug.dev/396221597"), "/proc/pid/clear_refs");
1533 Ok(())
1534 }
1535}