1use crate::mutable_state::{state_accessor, state_implementation};
6use crate::security;
7use crate::task::{CurrentTask, EventHandler, Kernel, Task, WaitCanceler, Waiter};
8use crate::time::utc;
9use crate::vfs::buffers::InputBuffer;
10use crate::vfs::fs_registry::FsRegistry;
11use crate::vfs::pseudo::dynamic_file::{DynamicFile, DynamicFileBuf, DynamicFileSource};
12use crate::vfs::pseudo::simple_file::SimpleFileNode;
13use crate::vfs::socket::{SocketAddress, SocketHandle, UnixSocket};
14use crate::vfs::{
15 CheckAccessReason, DirEntry, DirEntryHandle, FileHandle, FileObject, FileOps, FileSystemHandle,
16 FileSystemOptions, FileWriteGuardMode, FsNode, FsNodeHandle, FsNodeOps, FsStr, FsString,
17 PathBuilder, RenameFlags, SymlinkTarget, UnlinkKind, fileops_impl_dataless,
18 fileops_impl_delegate_read_and_seek, fileops_impl_nonseekable, fileops_impl_noop_sync,
19 fs_node_impl_not_dir,
20};
21use fuchsia_rcu::RcuReadScope;
22use macro_rules_attribute::apply;
23use ref_cast::RefCast;
24use starnix_logging::log_warn;
25use starnix_rcu::RcuHashMap;
26use starnix_sync::{
27 BeforeFsNodeAppend, FileOpsCore, LockBefore, LockEqualOrBefore, Locked, Mutex, RwLock, Unlocked,
28};
29use starnix_types::ownership::WeakRef;
30use starnix_uapi::arc_key::{ArcKey, PtrKey, WeakKey};
31use starnix_uapi::auth::UserAndOrGroupId;
32use starnix_uapi::device_type::DeviceType;
33use starnix_uapi::errors::Errno;
34use starnix_uapi::file_mode::{AccessCheck, FileMode};
35use starnix_uapi::inotify_mask::InotifyMask;
36use starnix_uapi::mount_flags::MountFlags;
37use starnix_uapi::open_flags::OpenFlags;
38use starnix_uapi::unmount_flags::UnmountFlags;
39use starnix_uapi::vfs::{FdEvents, ResolveFlags};
40use starnix_uapi::{NAME_MAX, errno, error};
41use std::borrow::Borrow;
42use std::collections::HashSet;
43use std::fmt;
44use std::hash::{Hash, Hasher};
45use std::ops::{Deref, DerefMut};
46use std::sync::{Arc, Weak};
47
48#[derive(Debug)]
52pub struct Namespace {
53 root_mount: MountHandle,
54
55 pub id: u64,
57}
58
59impl Namespace {
60 pub fn new(fs: FileSystemHandle) -> Arc<Namespace> {
61 Self::new_with_flags(fs, MountFlags::empty())
62 }
63
64 pub fn new_with_flags(fs: FileSystemHandle, flags: MountFlags) -> Arc<Namespace> {
65 let kernel = fs.kernel.upgrade().expect("can't create namespace without a kernel");
66 let root_mount = Mount::new(WhatToMount::Fs(fs), flags);
67 Arc::new(Self { root_mount, id: kernel.get_next_namespace_id() })
68 }
69
70 pub fn root(&self) -> NamespaceNode {
71 self.root_mount.root()
72 }
73
74 pub fn clone_namespace(&self) -> Arc<Namespace> {
75 let kernel =
76 self.root_mount.fs.kernel.upgrade().expect("can't clone namespace without a kernel");
77 Arc::new(Self {
78 root_mount: self.root_mount.clone_mount_recursive(),
79 id: kernel.get_next_namespace_id(),
80 })
81 }
82
83 pub fn translate_node(mut node: NamespaceNode, new_ns: &Namespace) -> Option<NamespaceNode> {
86 let mut mountpoints = vec![];
88 let mut mount = node.mount;
89 while let Some(mountpoint) = mount.as_ref().and_then(|m| m.read().mountpoint()) {
90 mountpoints.push(mountpoint.entry);
91 mount = mountpoint.mount;
92 }
93
94 let mut mount = Arc::clone(&new_ns.root_mount);
96 for mountpoint in mountpoints.iter().rev() {
97 let next_mount =
98 mount.read().submounts.get(ArcKey::ref_cast(mountpoint))?.mount.clone();
99 mount = next_mount;
100 }
101 node.mount = Some(mount).into();
102 Some(node)
103 }
104}
105
106impl FsNodeOps for Arc<Namespace> {
107 fs_node_impl_not_dir!();
108
109 fn create_file_ops(
110 &self,
111 _locked: &mut Locked<FileOpsCore>,
112 _node: &FsNode,
113 _current_task: &CurrentTask,
114 _flags: OpenFlags,
115 ) -> Result<Box<dyn FileOps>, Errno> {
116 Ok(Box::new(MountNamespaceFile(self.clone())))
117 }
118}
119
120pub struct MountNamespaceFile(pub Arc<Namespace>);
121
122impl FileOps for MountNamespaceFile {
123 fileops_impl_nonseekable!();
124 fileops_impl_dataless!();
125 fileops_impl_noop_sync!();
126}
127
128type MountClientMarker = Arc<()>;
133
134pub struct Mount {
143 root: DirEntryHandle,
144 flags: Mutex<MountFlags>,
145 fs: FileSystemHandle,
146
147 id: u64,
149
150 active_client_counter: MountClientMarker,
152
153 state: RwLock<MountState>,
155 }
162type MountHandle = Arc<Mount>;
163
164#[derive(Clone, Debug)]
166pub struct MountInfo {
167 handle: Option<MountHandle>,
168}
169
170impl MountInfo {
171 pub fn detached() -> Self {
174 None.into()
175 }
176
177 pub fn flags(&self) -> MountFlags {
179 if let Some(handle) = &self.handle {
180 handle.flags()
181 } else {
182 MountFlags::NOATIME
184 }
185 }
186
187 pub fn check_readonly_filesystem(&self) -> Result<(), Errno> {
189 if self.flags().contains(MountFlags::RDONLY) {
190 return error!(EROFS);
191 }
192 Ok(())
193 }
194
195 pub fn check_noexec_filesystem(&self) -> Result<(), Errno> {
197 if self.flags().contains(MountFlags::NOEXEC) {
198 return error!(EACCES);
199 }
200 Ok(())
201 }
202}
203
204impl Deref for MountInfo {
205 type Target = Option<MountHandle>;
206
207 fn deref(&self) -> &Self::Target {
208 &self.handle
209 }
210}
211
212impl DerefMut for MountInfo {
213 fn deref_mut(&mut self) -> &mut Self::Target {
214 &mut self.handle
215 }
216}
217
218impl std::cmp::PartialEq for MountInfo {
219 fn eq(&self, other: &Self) -> bool {
220 self.handle.as_ref().map(Arc::as_ptr) == other.handle.as_ref().map(Arc::as_ptr)
221 }
222}
223
224impl std::cmp::Eq for MountInfo {}
225
226impl Into<MountInfo> for Option<MountHandle> {
227 fn into(self) -> MountInfo {
228 MountInfo { handle: self }
229 }
230}
231
232#[derive(Default)]
233pub struct MountState {
234 mountpoint: Option<(Weak<Mount>, DirEntryHandle)>,
239
240 submounts: HashSet<Submount>,
249
250 peer_group_: Option<(Arc<PeerGroup>, PtrKey<Mount>)>,
255 upstream_: Option<(Weak<PeerGroup>, PtrKey<Mount>)>,
258}
259
260#[derive(Default)]
264struct PeerGroup {
265 id: u64,
266 state: RwLock<PeerGroupState>,
267}
268#[derive(Default)]
269struct PeerGroupState {
270 mounts: HashSet<WeakKey<Mount>>,
271 downstream: HashSet<WeakKey<Mount>>,
272}
273
274pub enum WhatToMount {
275 Fs(FileSystemHandle),
276 Bind(NamespaceNode),
277}
278
279impl Mount {
280 pub fn new(what: WhatToMount, flags: MountFlags) -> MountHandle {
281 match what {
282 WhatToMount::Fs(fs) => Self::new_with_root(fs.root().clone(), flags),
283 WhatToMount::Bind(node) => {
284 let mount = node.mount.as_ref().expect("can't bind mount from an anonymous node");
285 mount.clone_mount(&node.entry, flags)
286 }
287 }
288 }
289
290 fn new_with_root(root: DirEntryHandle, flags: MountFlags) -> MountHandle {
291 let known_flags = MountFlags::STORED_ON_MOUNT;
292 assert!(
293 !flags.intersects(!known_flags),
294 "mount created with extra flags {:?}",
295 flags - known_flags
296 );
297 let fs = root.node.fs();
298 let kernel = fs.kernel.upgrade().expect("can't create mount without kernel");
299 Arc::new(Self {
300 id: kernel.get_next_mount_id(),
301 flags: Mutex::new(flags),
302 root,
303 active_client_counter: Default::default(),
304 fs,
305 state: Default::default(),
306 })
307 }
308
309 pub fn root(self: &MountHandle) -> NamespaceNode {
311 NamespaceNode::new(Arc::clone(self), Arc::clone(&self.root))
312 }
313
314 fn create_submount(
316 self: &MountHandle,
317 dir: &DirEntryHandle,
318 what: WhatToMount,
319 flags: MountFlags,
320 ) {
321 let peers = {
334 let state = self.state.read();
335 state.peer_group().map(|g| g.copy_propagation_targets()).unwrap_or_default()
336 };
337
338 let mount = Mount::new(what, flags);
343
344 if self.read().is_shared() {
345 mount.write().make_shared();
346 }
347
348 for peer in peers {
349 if Arc::ptr_eq(self, &peer) {
350 continue;
351 }
352 let clone = mount.clone_mount_recursive();
353 peer.write().add_submount_internal(dir, clone);
354 }
355
356 self.write().add_submount_internal(dir, mount)
357 }
358
359 fn remove_submount(self: &MountHandle, mount_hash_key: &ArcKey<DirEntry>) -> Result<(), Errno> {
360 let peers = {
362 let state = self.state.read();
363 state.peer_group().map(|g| g.copy_propagation_targets()).unwrap_or_default()
364 };
365
366 for peer in peers {
367 if Arc::ptr_eq(self, &peer) {
368 continue;
369 }
370 let mut peer = peer.write();
374 if let Some(submount) = peer.submounts.get(mount_hash_key) {
375 if !submount.mount.read().submounts.is_empty() {
376 continue;
377 }
378 }
379 let _ = peer.remove_submount_internal(mount_hash_key);
380 }
381
382 self.write().remove_submount_internal(mount_hash_key)
383 }
384
385 fn clone_mount(
388 self: &MountHandle,
389 new_root: &DirEntryHandle,
390 flags: MountFlags,
391 ) -> MountHandle {
392 assert!(new_root.is_descendant_of(&self.root));
393 let clone = Self::new_with_root(Arc::clone(new_root), self.flags());
396
397 if flags.contains(MountFlags::REC) {
398 let mut submounts = vec![];
404 for Submount { dir, mount } in &self.state.read().submounts {
405 submounts.push((dir.clone(), mount.clone_mount_recursive()));
406 }
407 let mut clone_state = clone.write();
408 for (dir, submount) in submounts {
409 clone_state.add_submount_internal(&dir, submount);
410 }
411 }
412
413 let peer_group = self.state.read().peer_group().map(Arc::clone);
415 if let Some(peer_group) = peer_group {
416 clone.write().set_peer_group(peer_group);
417 }
418
419 clone
420 }
421
422 fn clone_mount_recursive(self: &MountHandle) -> MountHandle {
425 self.clone_mount(&self.root, MountFlags::REC)
426 }
427
428 pub fn change_propagation(self: &MountHandle, flag: MountFlags, recursive: bool) {
429 let mut state = self.write();
430 match flag {
431 MountFlags::SHARED => state.make_shared(),
432 MountFlags::PRIVATE => state.make_private(),
433 MountFlags::DOWNSTREAM => state.make_downstream(),
434 _ => {
435 log_warn!("mount propagation {:?}", flag);
436 return;
437 }
438 }
439
440 if recursive {
441 for submount in &state.submounts {
442 submount.mount.change_propagation(flag, recursive);
443 }
444 }
445 }
446
447 fn flags(&self) -> MountFlags {
448 *self.flags.lock()
449 }
450
451 pub fn update_flags(self: &MountHandle, mut flags: MountFlags) {
452 flags &= MountFlags::STORED_ON_MOUNT;
453 let atime_flags = MountFlags::NOATIME
454 | MountFlags::NODIRATIME
455 | MountFlags::RELATIME
456 | MountFlags::STRICTATIME;
457 let mut stored_flags = self.flags.lock();
458 if !flags.intersects(atime_flags) {
459 flags |= *stored_flags & atime_flags;
464 }
465 flags &= !MountFlags::STRICTATIME;
467 *stored_flags = flags;
468 }
469
470 fn active_clients(&self) -> usize {
474 Arc::strong_count(&self.active_client_counter) - 1
476 }
477
478 pub fn unmount(&self, flags: UnmountFlags) -> Result<(), Errno> {
479 if !flags.contains(UnmountFlags::DETACH) {
480 if self.active_clients() > 0 || !self.state.read().submounts.is_empty() {
481 return error!(EBUSY);
482 }
483 }
484 let mountpoint = self.state.read().mountpoint().ok_or_else(|| errno!(EINVAL))?;
485 let parent_mount = mountpoint.mount.as_ref().expect("a mountpoint must be part of a mount");
486 parent_mount.remove_submount(mountpoint.mount_hash_key())
487 }
488
489 pub fn security_state(&self) -> &security::FileSystemState {
491 &self.fs.security_state
492 }
493
494 pub fn fs_name(&self) -> &'static FsStr {
496 self.fs.name()
497 }
498
499 state_accessor!(Mount, state, Arc<Mount>);
500}
501
502impl MountState {
503 pub fn has_submount(&self, dir_entry: &DirEntryHandle) -> bool {
505 self.submounts.contains(ArcKey::ref_cast(dir_entry))
506 }
507
508 fn mountpoint(&self) -> Option<NamespaceNode> {
510 let (mount, entry) = self.mountpoint.as_ref()?;
511 Some(NamespaceNode::new(mount.upgrade()?, entry.clone()))
512 }
513
514 fn peer_group(&self) -> Option<&Arc<PeerGroup>> {
516 let (group, _) = self.peer_group_.as_ref()?;
517 Some(group)
518 }
519
520 fn take_from_peer_group(&mut self) -> Option<Arc<PeerGroup>> {
522 let (old_group, old_mount) = self.peer_group_.take()?;
523 old_group.remove(old_mount);
524 if let Some(upstream) = self.take_from_upstream() {
525 let next_mount =
526 old_group.state.read().mounts.iter().next().map(|w| w.0.upgrade().unwrap());
527 if let Some(next_mount) = next_mount {
528 next_mount.write().set_upstream(upstream);
532 }
533 }
534 Some(old_group)
535 }
536
537 fn upstream(&self) -> Option<Arc<PeerGroup>> {
538 self.upstream_.as_ref().and_then(|g| g.0.upgrade())
539 }
540
541 fn take_from_upstream(&mut self) -> Option<Arc<PeerGroup>> {
542 let (old_upstream, old_mount) = self.upstream_.take()?;
543 let old_upstream = old_upstream.upgrade()?;
546 old_upstream.remove_downstream(old_mount);
547 Some(old_upstream)
548 }
549}
550
551#[apply(state_implementation!)]
552impl MountState<Base = Mount, BaseType = Arc<Mount>> {
553 fn add_submount_internal(&mut self, dir: &DirEntryHandle, mount: MountHandle) {
555 if !dir.is_descendant_of(&self.base.root) {
556 return;
557 }
558
559 let submount = mount.fs.kernel.upgrade().unwrap().mounts.register_mount(dir, mount.clone());
560 let old_mountpoint =
561 mount.state.write().mountpoint.replace((Arc::downgrade(self.base), Arc::clone(dir)));
562 assert!(old_mountpoint.is_none(), "add_submount can only take a newly created mount");
563 let old_mount = self.submounts.replace(submount);
566
567 if let Some(mut old_mount) = old_mount {
571 old_mount.mount.write().mountpoint = Some((Arc::downgrade(&mount), Arc::clone(dir)));
576 old_mount.dir = ArcKey(mount.root.clone());
577 mount.write().submounts.insert(old_mount);
578 }
579 }
580
581 fn remove_submount_internal(&mut self, mount_hash_key: &ArcKey<DirEntry>) -> Result<(), Errno> {
582 if self.submounts.remove(mount_hash_key) { Ok(()) } else { error!(EINVAL) }
583 }
584
585 fn set_peer_group(&mut self, group: Arc<PeerGroup>) {
587 self.take_from_peer_group();
588 group.add(self.base);
589 self.peer_group_ = Some((group, Arc::as_ptr(self.base).into()));
590 }
591
592 fn set_upstream(&mut self, group: Arc<PeerGroup>) {
593 self.take_from_upstream();
594 group.add_downstream(self.base);
595 self.upstream_ = Some((Arc::downgrade(&group), Arc::as_ptr(self.base).into()));
596 }
597
598 pub fn is_shared(&self) -> bool {
600 self.peer_group().is_some()
601 }
602
603 pub fn make_shared(&mut self) {
605 if self.is_shared() {
606 return;
607 }
608 let kernel =
609 self.base.fs.kernel.upgrade().expect("can't create new peer group without kernel");
610 self.set_peer_group(PeerGroup::new(kernel.get_next_peer_group_id()));
611 }
612
613 pub fn make_private(&mut self) {
615 self.take_from_peer_group();
616 self.take_from_upstream();
617 }
618
619 pub fn make_downstream(&mut self) {
622 if let Some(peer_group) = self.take_from_peer_group() {
623 self.set_upstream(peer_group);
624 }
625 }
626}
627
628impl PeerGroup {
629 fn new(id: u64) -> Arc<Self> {
630 Arc::new(Self { id, state: Default::default() })
631 }
632
633 fn add(&self, mount: &Arc<Mount>) {
634 self.state.write().mounts.insert(WeakKey::from(mount));
635 }
636
637 fn remove(&self, mount: PtrKey<Mount>) {
638 self.state.write().mounts.remove(&mount);
639 }
640
641 fn add_downstream(&self, mount: &Arc<Mount>) {
642 self.state.write().downstream.insert(WeakKey::from(mount));
643 }
644
645 fn remove_downstream(&self, mount: PtrKey<Mount>) {
646 self.state.write().downstream.remove(&mount);
647 }
648
649 fn copy_propagation_targets(&self) -> Vec<MountHandle> {
650 let mut buf = vec![];
651 self.collect_propagation_targets(&mut buf);
652 buf
653 }
654
655 fn collect_propagation_targets(&self, buf: &mut Vec<MountHandle>) {
656 let downstream_mounts: Vec<_> = {
657 let state = self.state.read();
658 buf.extend(state.mounts.iter().filter_map(|m| m.0.upgrade()));
659 state.downstream.iter().filter_map(|m| m.0.upgrade()).collect()
660 };
661 for mount in downstream_mounts {
662 let peer_group = mount.read().peer_group().map(Arc::clone);
663 match peer_group {
664 Some(group) => group.collect_propagation_targets(buf),
665 None => buf.push(mount),
666 }
667 }
668 }
669}
670
671impl Drop for Mount {
672 fn drop(&mut self) {
673 let state = self.state.get_mut();
674 state.take_from_peer_group();
675 state.take_from_upstream();
676 }
677}
678
679impl fmt::Debug for Mount {
680 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
681 let state = self.state.read();
682 f.debug_struct("Mount")
683 .field("id", &(self as *const Mount))
684 .field("root", &self.root)
685 .field("mountpoint", &state.mountpoint)
686 .field("submounts", &state.submounts)
687 .finish()
688 }
689}
690
691impl Kernel {
692 pub fn get_next_mount_id(&self) -> u64 {
693 self.next_mount_id.next()
694 }
695
696 pub fn get_next_peer_group_id(&self) -> u64 {
697 self.next_peer_group_id.next()
698 }
699
700 pub fn get_next_namespace_id(&self) -> u64 {
701 self.next_namespace_id.next()
702 }
703}
704
705impl CurrentTask {
706 pub fn create_filesystem(
707 &self,
708 locked: &mut Locked<Unlocked>,
709 fs_type: &FsStr,
710 options: FileSystemOptions,
711 ) -> Result<FileSystemHandle, Errno> {
712 self.kernel()
719 .expando
720 .get::<FsRegistry>()
721 .create(locked, self, fs_type, options)
722 .ok_or_else(|| errno!(ENODEV, fs_type))?
723 }
724}
725
726fn write_mount_info(task: &Task, sink: &mut DynamicFileBuf, mount: &Mount) -> Result<(), Errno> {
728 write!(sink, "{}", mount.flags())?;
729 security::sb_show_options(&task.kernel(), sink, &mount)
730}
731
732struct ProcMountsFileSource(WeakRef<Task>);
733
734impl DynamicFileSource for ProcMountsFileSource {
735 fn generate(
736 &self,
737 _current_task: &CurrentTask,
738 sink: &mut DynamicFileBuf,
739 ) -> Result<(), Errno> {
740 let task = Task::from_weak(&self.0)?;
745 let root = task.fs().root();
746 let ns = task.fs().namespace();
747 for_each_mount(&ns.root_mount, &mut |mount| {
748 let mountpoint = mount.read().mountpoint().unwrap_or_else(|| mount.root());
749 if !mountpoint.is_descendant_of(&root) {
750 return Ok(());
751 }
752 write!(
753 sink,
754 "{} {} {} ",
755 mount.fs.options.source_for_display(),
756 mountpoint.path(&task),
757 mount.fs.name(),
758 )?;
759 write_mount_info(&task, sink, mount)?;
760 writeln!(sink, " 0 0")?;
761 Ok(())
762 })?;
763 Ok(())
764 }
765}
766
767pub struct ProcMountsFile {
768 dynamic_file: DynamicFile<ProcMountsFileSource>,
769}
770
771impl ProcMountsFile {
772 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
773 SimpleFileNode::new(move |_, _| {
774 Ok(Self { dynamic_file: DynamicFile::new(ProcMountsFileSource(task.clone())) })
775 })
776 }
777}
778
779impl FileOps for ProcMountsFile {
780 fileops_impl_delegate_read_and_seek!(self, self.dynamic_file);
781 fileops_impl_noop_sync!();
782
783 fn write(
784 &self,
785 _locked: &mut Locked<FileOpsCore>,
786 _file: &FileObject,
787 _current_task: &CurrentTask,
788 _offset: usize,
789 _data: &mut dyn InputBuffer,
790 ) -> Result<usize, Errno> {
791 error!(ENOSYS)
792 }
793
794 fn wait_async(
795 &self,
796 _locked: &mut Locked<FileOpsCore>,
797 _file: &FileObject,
798 _current_task: &CurrentTask,
799 waiter: &Waiter,
800 _events: FdEvents,
801 _handler: EventHandler,
802 ) -> Option<WaitCanceler> {
803 Some(waiter.fake_wait())
806 }
807
808 fn query_events(
809 &self,
810 _locked: &mut Locked<FileOpsCore>,
811 _file: &FileObject,
812 _current_task: &CurrentTask,
813 ) -> Result<FdEvents, Errno> {
814 Ok(FdEvents::empty())
815 }
816}
817
818#[derive(Clone)]
819pub struct ProcMountinfoFile(WeakRef<Task>);
820impl ProcMountinfoFile {
821 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
822 DynamicFile::new_node(Self(task))
823 }
824}
825impl DynamicFileSource for ProcMountinfoFile {
826 fn generate(
827 &self,
828 _current_task: &CurrentTask,
829 sink: &mut DynamicFileBuf,
830 ) -> Result<(), Errno> {
831 fn path_from_fs_root(dir: &DirEntryHandle) -> FsString {
833 let mut path = PathBuilder::new();
834 if dir.is_dead() {
835 path.prepend_element("/deleted".into());
837 }
838 let scope = RcuReadScope::new();
839 let mut current = dir.deref();
840 while let Some(parent) = current.parent_ref(&scope) {
841 path.prepend_element(current.local_name(&scope));
842 current = parent;
843 }
844 path.build_absolute()
845 }
846
847 let task = Task::from_weak(&self.0)?;
852 let root = task.fs().root();
853 let ns = task.fs().namespace();
854 for_each_mount(&ns.root_mount, &mut |mount| {
855 let mountpoint = mount.read().mountpoint().unwrap_or_else(|| mount.root());
856 if !mountpoint.is_descendant_of(&root) {
857 return Ok(());
858 }
859 let parent = mountpoint.mount.as_ref().unwrap();
861 write!(
862 sink,
863 "{} {} {} {} {} ",
864 mount.id,
865 parent.id,
866 mount.root.node.fs().dev_id,
867 path_from_fs_root(&mount.root),
868 mountpoint.path(&task),
869 )?;
870 write_mount_info(&task, sink, mount)?;
871 if let Some(peer_group) = mount.read().peer_group() {
872 write!(sink, " shared:{}", peer_group.id)?;
873 }
874 if let Some(upstream) = mount.read().upstream() {
875 write!(sink, " master:{}", upstream.id)?;
876 }
877 writeln!(
878 sink,
879 " - {} {} {}",
880 mount.fs.name(),
881 mount.fs.options.source_for_display(),
882 mount.fs.options.flags,
883 )?;
884 Ok(())
885 })?;
886 Ok(())
887 }
888}
889
890fn for_each_mount<E>(
891 mount: &MountHandle,
892 callback: &mut impl FnMut(&MountHandle) -> Result<(), E>,
893) -> Result<(), E> {
894 callback(mount)?;
895 let submounts: Vec<_> = mount.read().submounts.iter().map(|s| s.mount.clone()).collect();
898 for submount in submounts {
899 for_each_mount(&submount, callback)?;
900 }
901 Ok(())
902}
903
904#[derive(Default, PartialEq, Eq, Copy, Clone, Debug)]
906pub enum SymlinkMode {
907 #[default]
909 Follow,
910
911 NoFollow,
913}
914
915pub const MAX_SYMLINK_FOLLOWS: u8 = 40;
917
918pub struct LookupContext {
923 pub symlink_mode: SymlinkMode,
928
929 pub remaining_follows: u8,
933
934 pub must_be_directory: bool,
940
941 pub resolve_flags: ResolveFlags,
943
944 pub resolve_base: ResolveBase,
947}
948
949#[derive(Clone, Eq, PartialEq)]
952pub enum ResolveBase {
953 None,
954
955 Beneath(NamespaceNode),
957
958 InRoot(NamespaceNode),
960}
961
962impl LookupContext {
963 pub fn new(symlink_mode: SymlinkMode) -> LookupContext {
964 LookupContext {
965 symlink_mode,
966 remaining_follows: MAX_SYMLINK_FOLLOWS,
967 must_be_directory: false,
968 resolve_flags: ResolveFlags::empty(),
969 resolve_base: ResolveBase::None,
970 }
971 }
972
973 pub fn with(&self, symlink_mode: SymlinkMode) -> LookupContext {
974 LookupContext { symlink_mode, resolve_base: self.resolve_base.clone(), ..*self }
975 }
976
977 pub fn update_for_path(&mut self, path: &FsStr) {
978 if path.last() == Some(&b'/') {
979 self.must_be_directory = true;
982 self.symlink_mode = SymlinkMode::Follow;
985 }
986 }
987}
988
989impl Default for LookupContext {
990 fn default() -> Self {
991 LookupContext::new(SymlinkMode::Follow)
992 }
993}
994
995pub enum PathWithReachability {
997 Reachable(FsString),
999
1000 Unreachable(FsString),
1002}
1003
1004impl PathWithReachability {
1005 pub fn into_path(self) -> FsString {
1006 match self {
1007 PathWithReachability::Reachable(path) => path,
1008 PathWithReachability::Unreachable(path) => path,
1009 }
1010 }
1011}
1012
1013#[derive(Clone)]
1021pub struct NamespaceNode {
1022 pub mount: MountInfo,
1027
1028 pub entry: DirEntryHandle,
1030}
1031
1032impl NamespaceNode {
1033 pub fn new(mount: MountHandle, entry: DirEntryHandle) -> Self {
1034 Self { mount: Some(mount).into(), entry }
1035 }
1036
1037 pub fn new_anonymous(entry: DirEntryHandle) -> Self {
1039 Self { mount: None.into(), entry }
1040 }
1041
1042 pub fn new_anonymous_unrooted(current_task: &CurrentTask, node: FsNodeHandle) -> Self {
1045 let dir_entry = DirEntry::new_unrooted(node);
1046 let _ = security::fs_node_init_with_dentry_no_xattr(current_task, &dir_entry);
1047 Self::new_anonymous(dir_entry)
1048 }
1049
1050 pub fn open(
1056 &self,
1057 locked: &mut Locked<Unlocked>,
1058 current_task: &CurrentTask,
1059 flags: OpenFlags,
1060 access_check: AccessCheck,
1061 ) -> Result<FileHandle, Errno> {
1062 let ops = self.entry.node.open(locked, current_task, self, flags, access_check)?;
1063 FileObject::new(locked, current_task, ops, self.clone(), flags)
1064 }
1065
1066 pub fn open_create_node<L>(
1072 &self,
1073 locked: &mut Locked<L>,
1074 current_task: &CurrentTask,
1075 name: &FsStr,
1076 mode: FileMode,
1077 dev: DeviceType,
1078 flags: OpenFlags,
1079 ) -> Result<NamespaceNode, Errno>
1080 where
1081 L: LockEqualOrBefore<FileOpsCore>,
1082 {
1083 let owner = current_task.current_fscred();
1084 let mode = current_task.fs().apply_umask(mode);
1085 let create_fn =
1086 |locked: &mut Locked<L>, dir: &FsNodeHandle, mount: &MountInfo, name: &_| {
1087 dir.create_node(locked, current_task, mount, name, mode, dev, owner)
1088 };
1089 let entry = if flags.contains(OpenFlags::EXCL) {
1090 self.entry.create_entry(locked, current_task, &self.mount, name, create_fn)
1091 } else {
1092 self.entry.get_or_create_entry(locked, current_task, &self.mount, name, create_fn)
1093 }?;
1094 Ok(self.with_new_entry(entry))
1095 }
1096
1097 pub fn into_active(self) -> ActiveNamespaceNode {
1098 ActiveNamespaceNode::new(self)
1099 }
1100
1101 pub fn into_mapping(self, mode: Option<FileWriteGuardMode>) -> Result<Arc<FileMapping>, Errno> {
1102 self.into_active().into_mapping(mode)
1103 }
1104
1105 pub fn create_node<L>(
1111 &self,
1112 locked: &mut Locked<L>,
1113 current_task: &CurrentTask,
1114 name: &FsStr,
1115 mode: FileMode,
1116 dev: DeviceType,
1117 ) -> Result<NamespaceNode, Errno>
1118 where
1119 L: LockEqualOrBefore<FileOpsCore>,
1120 {
1121 let owner = current_task.current_fscred();
1122 let mode = current_task.fs().apply_umask(mode);
1123 let entry = self.entry.create_entry(
1124 locked,
1125 current_task,
1126 &self.mount,
1127 name,
1128 |locked, dir, mount, name| {
1129 dir.create_node(locked, current_task, mount, name, mode, dev, owner)
1130 },
1131 )?;
1132 Ok(self.with_new_entry(entry))
1133 }
1134
1135 pub fn create_symlink<L>(
1139 &self,
1140 locked: &mut Locked<L>,
1141 current_task: &CurrentTask,
1142 name: &FsStr,
1143 target: &FsStr,
1144 ) -> Result<NamespaceNode, Errno>
1145 where
1146 L: LockEqualOrBefore<FileOpsCore>,
1147 {
1148 let owner = current_task.current_fscred();
1149 let entry = self.entry.create_entry(
1150 locked,
1151 current_task,
1152 &self.mount,
1153 name,
1154 |locked, dir, mount, name| {
1155 dir.create_symlink(locked, current_task, mount, name, target, owner)
1156 },
1157 )?;
1158 Ok(self.with_new_entry(entry))
1159 }
1160
1161 pub fn create_tmpfile<L>(
1167 &self,
1168 locked: &mut Locked<L>,
1169 current_task: &CurrentTask,
1170 mode: FileMode,
1171 flags: OpenFlags,
1172 ) -> Result<NamespaceNode, Errno>
1173 where
1174 L: LockEqualOrBefore<FileOpsCore>,
1175 {
1176 let owner = current_task.current_fscred();
1177 let mode = current_task.fs().apply_umask(mode);
1178 Ok(self.with_new_entry(self.entry.create_tmpfile(
1179 locked,
1180 current_task,
1181 &self.mount,
1182 mode,
1183 owner,
1184 flags,
1185 )?))
1186 }
1187
1188 pub fn link<L>(
1189 &self,
1190 locked: &mut Locked<L>,
1191 current_task: &CurrentTask,
1192 name: &FsStr,
1193 child: &FsNodeHandle,
1194 ) -> Result<NamespaceNode, Errno>
1195 where
1196 L: LockEqualOrBefore<FileOpsCore>,
1197 {
1198 let dir_entry = self.entry.create_entry(
1199 locked,
1200 current_task,
1201 &self.mount,
1202 name,
1203 |locked, dir, mount, name| dir.link(locked, current_task, mount, name, child),
1204 )?;
1205 Ok(self.with_new_entry(dir_entry))
1206 }
1207
1208 pub fn bind_socket<L>(
1209 &self,
1210 locked: &mut Locked<L>,
1211 current_task: &CurrentTask,
1212 name: &FsStr,
1213 socket: SocketHandle,
1214 socket_address: SocketAddress,
1215 mode: FileMode,
1216 ) -> Result<NamespaceNode, Errno>
1217 where
1218 L: LockEqualOrBefore<FileOpsCore>,
1219 {
1220 let dir_entry = self.entry.create_entry(
1221 locked,
1222 current_task,
1223 &self.mount,
1224 name,
1225 |locked, dir, mount, name| {
1226 let node = dir.create_node(
1227 locked,
1228 current_task,
1229 mount,
1230 name,
1231 mode,
1232 DeviceType::NONE,
1233 current_task.current_fscred(),
1234 )?;
1235 if let Some(unix_socket) = socket.downcast_socket::<UnixSocket>() {
1236 unix_socket.bind_socket_to_node(&socket, socket_address, &node)?;
1237 } else {
1238 return error!(ENOTSUP);
1239 }
1240 Ok(node)
1241 },
1242 )?;
1243 Ok(self.with_new_entry(dir_entry))
1244 }
1245
1246 pub fn unlink<L>(
1247 &self,
1248 locked: &mut Locked<L>,
1249 current_task: &CurrentTask,
1250 name: &FsStr,
1251 kind: UnlinkKind,
1252 must_be_directory: bool,
1253 ) -> Result<(), Errno>
1254 where
1255 L: LockEqualOrBefore<FileOpsCore>,
1256 {
1257 if DirEntry::is_reserved_name(name) {
1258 match kind {
1259 UnlinkKind::Directory => {
1260 if name == ".." {
1261 error!(ENOTEMPTY)
1262 } else if self.parent().is_none() {
1263 error!(EBUSY)
1265 } else {
1266 error!(EINVAL)
1267 }
1268 }
1269 UnlinkKind::NonDirectory => error!(ENOTDIR),
1270 }
1271 } else {
1272 self.entry.unlink(locked, current_task, &self.mount, name, kind, must_be_directory)
1273 }
1274 }
1275
1276 pub fn lookup_child<L>(
1278 &self,
1279 locked: &mut Locked<L>,
1280 current_task: &CurrentTask,
1281 context: &mut LookupContext,
1282 basename: &FsStr,
1283 ) -> Result<NamespaceNode, Errno>
1284 where
1285 L: LockEqualOrBefore<FileOpsCore>,
1286 {
1287 if !self.entry.node.is_dir() {
1288 return error!(ENOTDIR);
1289 }
1290
1291 if basename.len() > NAME_MAX as usize {
1292 return error!(ENAMETOOLONG);
1293 }
1294
1295 let child = if basename.is_empty() || basename == "." {
1296 self.clone()
1297 } else if basename == ".." {
1298 let root = match &context.resolve_base {
1299 ResolveBase::None => current_task.fs().root(),
1300 ResolveBase::Beneath(node) => {
1301 if *self == *node {
1303 return error!(EXDEV);
1304 }
1305 current_task.fs().root()
1306 }
1307 ResolveBase::InRoot(root) => root.clone(),
1308 };
1309
1310 if *self == root { root } else { self.parent().unwrap_or_else(|| self.clone()) }
1312 } else {
1313 let mut child = self.with_new_entry(self.entry.component_lookup(
1314 locked,
1315 current_task,
1316 &self.mount,
1317 basename,
1318 )?);
1319 while child.entry.node.is_lnk() {
1320 match context.symlink_mode {
1321 SymlinkMode::NoFollow => {
1322 break;
1323 }
1324 SymlinkMode::Follow => {
1325 if context.remaining_follows == 0
1326 || context.resolve_flags.contains(ResolveFlags::NO_SYMLINKS)
1327 {
1328 return error!(ELOOP);
1329 }
1330 context.remaining_follows -= 1;
1331 child = match child.readlink(locked, current_task)? {
1332 SymlinkTarget::Path(link_target) => {
1333 let link_directory = if link_target[0] == b'/' {
1334 match &context.resolve_base {
1336 ResolveBase::None => current_task.fs().root(),
1337 ResolveBase::Beneath(_) => return error!(EXDEV),
1338 ResolveBase::InRoot(root) => root.clone(),
1339 }
1340 } else {
1341 child.parent().unwrap_or(child)
1345 };
1346 current_task.lookup_path(
1347 locked,
1348 context,
1349 link_directory,
1350 link_target.as_ref(),
1351 )?
1352 }
1353 SymlinkTarget::Node(node) => {
1354 if context.resolve_flags.contains(ResolveFlags::NO_MAGICLINKS) {
1355 return error!(ELOOP);
1356 }
1357 node
1358 }
1359 }
1360 }
1361 };
1362 }
1363
1364 child.enter_mount()
1365 };
1366
1367 if context.resolve_flags.contains(ResolveFlags::NO_XDEV) && child.mount != self.mount {
1368 return error!(EXDEV);
1369 }
1370
1371 if context.must_be_directory && !child.entry.node.is_dir() {
1372 return error!(ENOTDIR);
1373 }
1374
1375 Ok(child)
1376 }
1377
1378 pub fn parent(&self) -> Option<NamespaceNode> {
1384 let mountpoint_or_self = self.escape_mount();
1385 let parent = mountpoint_or_self.entry.parent()?;
1386 Some(mountpoint_or_self.with_new_entry(parent))
1387 }
1388
1389 pub fn parent_within_mount(&self) -> Option<DirEntryHandle> {
1392 if let Ok(_) = self.mount_if_root() {
1393 return None;
1394 }
1395 self.entry.parent()
1396 }
1397
1398 pub fn is_descendant_of(&self, ancestor: &NamespaceNode) -> bool {
1403 let ancestor = ancestor.escape_mount();
1404 let mut current = self.escape_mount();
1405 while current != ancestor {
1406 if let Some(parent) = current.parent() {
1407 current = parent.escape_mount();
1408 } else {
1409 return false;
1410 }
1411 }
1412 true
1413 }
1414
1415 fn enter_mount(&self) -> NamespaceNode {
1417 fn enter_one_mount(node: &NamespaceNode) -> Option<NamespaceNode> {
1419 if let Some(mount) = node.mount.deref() {
1420 if let Some(submount) =
1421 mount.state.read().submounts.get(ArcKey::ref_cast(&node.entry))
1422 {
1423 return Some(submount.mount.root());
1424 }
1425 }
1426 None
1427 }
1428 let mut inner = self.clone();
1429 while let Some(inner_root) = enter_one_mount(&inner) {
1430 inner = inner_root;
1431 }
1432 inner
1433 }
1434
1435 fn escape_mount(&self) -> NamespaceNode {
1440 let mut mountpoint_or_self = self.clone();
1441 while let Some(mountpoint) = mountpoint_or_self.mountpoint() {
1442 mountpoint_or_self = mountpoint;
1443 }
1444 mountpoint_or_self
1445 }
1446
1447 pub fn mount_if_root(&self) -> Result<&MountHandle, Errno> {
1449 if let Some(mount) = self.mount.deref() {
1450 if Arc::ptr_eq(&self.entry, &mount.root) {
1451 return Ok(mount);
1452 }
1453 }
1454 error!(EINVAL)
1455 }
1456
1457 fn mountpoint(&self) -> Option<NamespaceNode> {
1462 self.mount_if_root().ok()?.read().mountpoint()
1463 }
1464
1465 pub fn path(&self, task: &Task) -> FsString {
1467 self.path_from_root(Some(&task.fs().root())).into_path()
1468 }
1469
1470 pub fn path_escaping_chroot(&self) -> FsString {
1472 self.path_from_root(None).into_path()
1473 }
1474
1475 pub fn path_from_root(&self, root: Option<&NamespaceNode>) -> PathWithReachability {
1478 if self.mount.is_none() {
1479 return PathWithReachability::Reachable(self.entry.node.internal_name());
1480 }
1481
1482 let mut path = PathBuilder::new();
1483 let mut current = self.escape_mount();
1484 if let Some(root) = root {
1485 let scope = RcuReadScope::new();
1486 let root = root.escape_mount();
1488 while current != root {
1489 if let Some(parent) = current.parent() {
1490 path.prepend_element(current.entry.local_name(&scope));
1491 current = parent.escape_mount();
1492 } else {
1493 let mut absolute_path = path.build_absolute();
1495 if self.entry.is_dead() {
1496 absolute_path.extend_from_slice(b" (deleted)");
1497 }
1498
1499 return PathWithReachability::Unreachable(absolute_path);
1500 }
1501 }
1502 } else {
1503 let scope = RcuReadScope::new();
1505 while let Some(parent) = current.parent() {
1506 path.prepend_element(current.entry.local_name(&scope));
1507 current = parent.escape_mount();
1508 }
1509 }
1510
1511 let mut absolute_path = path.build_absolute();
1512 if self.entry.is_dead() {
1513 absolute_path.extend_from_slice(b" (deleted)");
1514 }
1515
1516 PathWithReachability::Reachable(absolute_path)
1517 }
1518
1519 pub fn mount(&self, what: WhatToMount, flags: MountFlags) -> Result<(), Errno> {
1520 let flags = flags & (MountFlags::STORED_ON_MOUNT | MountFlags::REC);
1521 let mountpoint = self.enter_mount();
1522 let mount = mountpoint.mount.as_ref().expect("a mountpoint must be part of a mount");
1523 mount.create_submount(&mountpoint.entry, what, flags);
1524 Ok(())
1525 }
1526
1527 pub fn unmount(&self, flags: UnmountFlags) -> Result<(), Errno> {
1529 let mount = self.enter_mount().mount_if_root()?.clone();
1530 mount.unmount(flags)
1531 }
1532
1533 pub fn rename<L>(
1534 locked: &mut Locked<L>,
1535 current_task: &CurrentTask,
1536 old_parent: &NamespaceNode,
1537 old_name: &FsStr,
1538 new_parent: &NamespaceNode,
1539 new_name: &FsStr,
1540 flags: RenameFlags,
1541 ) -> Result<(), Errno>
1542 where
1543 L: LockEqualOrBefore<FileOpsCore>,
1544 {
1545 DirEntry::rename(
1546 locked,
1547 current_task,
1548 &old_parent.entry,
1549 &old_parent.mount,
1550 old_name,
1551 &new_parent.entry,
1552 &new_parent.mount,
1553 new_name,
1554 flags,
1555 )
1556 }
1557
1558 fn with_new_entry(&self, entry: DirEntryHandle) -> NamespaceNode {
1559 Self { mount: self.mount.clone(), entry }
1560 }
1561
1562 fn mount_hash_key(&self) -> &ArcKey<DirEntry> {
1563 ArcKey::ref_cast(&self.entry)
1564 }
1565
1566 pub fn suid_and_sgid(&self, current_task: &CurrentTask) -> Result<UserAndOrGroupId, Errno> {
1567 if self.mount.flags().contains(MountFlags::NOSUID) {
1568 Ok(UserAndOrGroupId::default())
1569 } else {
1570 self.entry.node.info().suid_and_sgid(current_task, &self.entry.node)
1571 }
1572 }
1573
1574 pub fn update_atime(&self) {
1575 if !self.mount.flags().contains(MountFlags::NOATIME) {
1577 self.entry.node.update_info(|info| {
1578 let now = utc::utc_now();
1579 info.time_access = now;
1580 info.pending_time_access_update = true;
1581 });
1582 }
1583 }
1584
1585 pub fn readlink<L>(
1586 &self,
1587 locked: &mut Locked<L>,
1588 current_task: &CurrentTask,
1589 ) -> Result<SymlinkTarget, Errno>
1590 where
1591 L: LockEqualOrBefore<FileOpsCore>,
1592 {
1593 self.update_atime();
1594 self.entry.node.readlink(locked, current_task)
1595 }
1596
1597 pub fn notify(&self, event_mask: InotifyMask) {
1598 if self.mount.is_some() {
1599 self.entry.notify(event_mask);
1600 }
1601 }
1602
1603 pub fn check_access<L>(
1607 &self,
1608 locked: &mut Locked<L>,
1609 current_task: &CurrentTask,
1610 permission_flags: impl Into<security::PermissionFlags>,
1611 reason: CheckAccessReason,
1612 ) -> Result<(), Errno>
1613 where
1614 L: LockEqualOrBefore<FileOpsCore>,
1615 {
1616 self.entry.node.check_access(
1617 locked,
1618 current_task,
1619 &self.mount,
1620 permission_flags,
1621 reason,
1622 self,
1623 )
1624 }
1625
1626 pub fn check_o_noatime_allowed(&self, current_task: &CurrentTask) -> Result<(), Errno> {
1628 self.entry.node.check_o_noatime_allowed(current_task)
1629 }
1630
1631 pub fn truncate<L>(
1632 &self,
1633 locked: &mut Locked<L>,
1634 current_task: &CurrentTask,
1635 length: u64,
1636 ) -> Result<(), Errno>
1637 where
1638 L: LockBefore<BeforeFsNodeAppend>,
1639 {
1640 self.entry.node.truncate(locked, current_task, &self.mount, length)?;
1641 self.entry.notify_ignoring_excl_unlink(InotifyMask::MODIFY);
1642 Ok(())
1643 }
1644}
1645
1646impl fmt::Debug for NamespaceNode {
1647 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1648 f.debug_struct("NamespaceNode")
1649 .field("path", &self.path_escaping_chroot())
1650 .field("mount", &self.mount)
1651 .field("entry", &self.entry)
1652 .finish()
1653 }
1654}
1655
1656impl PartialEq for NamespaceNode {
1658 fn eq(&self, other: &Self) -> bool {
1659 self.mount.as_ref().map(Arc::as_ptr).eq(&other.mount.as_ref().map(Arc::as_ptr))
1660 && Arc::ptr_eq(&self.entry, &other.entry)
1661 }
1662}
1663impl Eq for NamespaceNode {}
1664impl Hash for NamespaceNode {
1665 fn hash<H: Hasher>(&self, state: &mut H) {
1666 self.mount.as_ref().map(Arc::as_ptr).hash(state);
1667 Arc::as_ptr(&self.entry).hash(state);
1668 }
1669}
1670
1671#[derive(Debug, Clone)]
1673pub struct ActiveNamespaceNode {
1674 name: NamespaceNode,
1676
1677 _marker: Option<MountClientMarker>,
1681}
1682
1683impl ActiveNamespaceNode {
1684 pub fn new(name: NamespaceNode) -> Self {
1685 let marker = name.mount.as_ref().map(|mount| mount.active_client_counter.clone());
1686 Self { name, _marker: marker }
1687 }
1688
1689 pub fn to_passive(&self) -> NamespaceNode {
1690 self.deref().clone()
1691 }
1692
1693 pub fn into_mapping(self, mode: Option<FileWriteGuardMode>) -> Result<Arc<FileMapping>, Errno> {
1694 if let Some(mode) = mode {
1695 self.entry.node.write_guard_state.lock().acquire(mode)?;
1696 }
1697 Ok(Arc::new(FileMapping { name: self, mode }))
1698 }
1699}
1700
1701impl Deref for ActiveNamespaceNode {
1702 type Target = NamespaceNode;
1703
1704 fn deref(&self) -> &Self::Target {
1705 &self.name
1706 }
1707}
1708
1709impl PartialEq for ActiveNamespaceNode {
1710 fn eq(&self, other: &Self) -> bool {
1711 self.deref().eq(other.deref())
1712 }
1713}
1714impl Eq for ActiveNamespaceNode {}
1715impl Hash for ActiveNamespaceNode {
1716 fn hash<H: Hasher>(&self, state: &mut H) {
1717 self.deref().hash(state)
1718 }
1719}
1720
1721#[derive(Debug, Clone, PartialEq, Eq)]
1722#[must_use]
1723pub struct FileMapping {
1724 pub name: ActiveNamespaceNode,
1725 mode: Option<FileWriteGuardMode>,
1726}
1727
1728impl Drop for FileMapping {
1729 fn drop(&mut self) {
1730 if let Some(mode) = self.mode {
1731 self.name.entry.node.write_guard_state.lock().release(mode);
1732 }
1733 }
1734}
1735
1736pub struct Mounts {
1738 mounts: RcuHashMap<WeakKey<DirEntry>, Vec<ArcKey<Mount>>>,
1739}
1740
1741impl Mounts {
1742 pub fn new() -> Self {
1743 Mounts { mounts: RcuHashMap::default() }
1744 }
1745
1746 fn register_mount(&self, dir_entry: &Arc<DirEntry>, mount: MountHandle) -> Submount {
1748 let mut mounts = self.mounts.lock();
1749 let key = WeakKey::from(dir_entry);
1750 let mut vec = mounts.get(&key).unwrap_or_else(|| {
1751 dir_entry.set_has_mounts(true);
1752 Vec::new()
1753 });
1754 vec.push(ArcKey(mount.clone()));
1755 mounts.insert(key, vec);
1756 Submount { dir: ArcKey(dir_entry.clone()), mount }
1757 }
1758
1759 fn unregister_mount(&self, dir_entry: &Arc<DirEntry>, mount: &MountHandle) {
1761 let mut mounts = self.mounts.lock();
1762 let key = WeakKey::from(dir_entry);
1763 if let Some(mut vec) = mounts.get(&key) {
1764 let index = vec.iter().position(|e| e == ArcKey::ref_cast(mount)).unwrap();
1765 if vec.len() == 1 {
1766 mounts.remove(&key);
1767 dir_entry.set_has_mounts(false);
1768 } else {
1769 vec.swap_remove(index);
1770 mounts.insert(key, vec);
1771 }
1772 }
1773 }
1774
1775 pub fn unmount(&self, dir_entry: &DirEntry) {
1779 let mounts = self.mounts.lock().remove(&PtrKey::from(dir_entry as *const _));
1780 if let Some(mounts) = mounts {
1781 for mount in mounts {
1782 let _ = mount.unmount(UnmountFlags::DETACH);
1784 }
1785 }
1786 }
1787
1788 pub fn clear(&self) {
1792 for (_dir_entry, mounts) in self.mounts.lock().drain() {
1793 for mount in mounts {
1794 mount.fs.force_unmount_ops();
1795 }
1796 }
1797 }
1798}
1799
1800#[derive(Debug)]
1802struct Submount {
1803 dir: ArcKey<DirEntry>,
1804 mount: MountHandle,
1805}
1806
1807impl Drop for Submount {
1808 fn drop(&mut self) {
1809 self.mount.fs.kernel.upgrade().unwrap().mounts.unregister_mount(&self.dir, &self.mount)
1810 }
1811}
1812
1813impl Eq for Submount {}
1815impl PartialEq<Self> for Submount {
1816 fn eq(&self, other: &Self) -> bool {
1817 self.dir == other.dir
1818 }
1819}
1820impl Hash for Submount {
1821 fn hash<H: Hasher>(&self, state: &mut H) {
1822 self.dir.hash(state)
1823 }
1824}
1825
1826impl Borrow<ArcKey<DirEntry>> for Submount {
1827 fn borrow(&self) -> &ArcKey<DirEntry> {
1828 &self.dir
1829 }
1830}
1831
1832#[cfg(test)]
1833mod test {
1834 use crate::fs::tmpfs::TmpFs;
1835 use crate::testing::spawn_kernel_and_run;
1836 use crate::vfs::namespace::DeviceType;
1837 use crate::vfs::{
1838 CallbackSymlinkNode, FsNodeInfo, LookupContext, MountInfo, Namespace, NamespaceNode,
1839 RenameFlags, SymlinkMode, SymlinkTarget, UnlinkKind, WhatToMount,
1840 };
1841 use starnix_uapi::mount_flags::MountFlags;
1842 use starnix_uapi::{errno, mode};
1843 use std::sync::Arc;
1844
1845 #[::fuchsia::test]
1846 async fn test_namespace() {
1847 spawn_kernel_and_run(async |locked, current_task| {
1848 let kernel = current_task.kernel();
1849 let root_fs = TmpFs::new_fs(locked, &kernel);
1850 let root_node = Arc::clone(root_fs.root());
1851 let _dev_node = root_node
1852 .create_dir(locked, ¤t_task, "dev".into())
1853 .expect("failed to mkdir dev");
1854 let dev_fs = TmpFs::new_fs(locked, &kernel);
1855 let dev_root_node = Arc::clone(dev_fs.root());
1856 let _dev_pts_node = dev_root_node
1857 .create_dir(locked, ¤t_task, "pts".into())
1858 .expect("failed to mkdir pts");
1859
1860 let ns = Namespace::new(root_fs);
1861 let mut context = LookupContext::default();
1862 let dev = ns
1863 .root()
1864 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1865 .expect("failed to lookup dev");
1866 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1867 .expect("failed to mount dev root node");
1868
1869 let mut context = LookupContext::default();
1870 let dev = ns
1871 .root()
1872 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1873 .expect("failed to lookup dev");
1874 let mut context = LookupContext::default();
1875 let pts = dev
1876 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1877 .expect("failed to lookup pts");
1878 let pts_parent =
1879 pts.parent().ok_or_else(|| errno!(ENOENT)).expect("failed to get parent of pts");
1880 assert!(Arc::ptr_eq(&pts_parent.entry, &dev.entry));
1881
1882 let dev_parent =
1883 dev.parent().ok_or_else(|| errno!(ENOENT)).expect("failed to get parent of dev");
1884 assert!(Arc::ptr_eq(&dev_parent.entry, &ns.root().entry));
1885 })
1886 .await;
1887 }
1888
1889 #[::fuchsia::test]
1890 async fn test_mount_does_not_upgrade() {
1891 spawn_kernel_and_run(async |locked, current_task| {
1892 let kernel = current_task.kernel();
1893 let root_fs = TmpFs::new_fs(locked, &kernel);
1894 let root_node = Arc::clone(root_fs.root());
1895 let _dev_node = root_node
1896 .create_dir(locked, ¤t_task, "dev".into())
1897 .expect("failed to mkdir dev");
1898 let dev_fs = TmpFs::new_fs(locked, &kernel);
1899 let dev_root_node = Arc::clone(dev_fs.root());
1900 let _dev_pts_node = dev_root_node
1901 .create_dir(locked, ¤t_task, "pts".into())
1902 .expect("failed to mkdir pts");
1903
1904 let ns = Namespace::new(root_fs);
1905 let mut context = LookupContext::default();
1906 let dev = ns
1907 .root()
1908 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1909 .expect("failed to lookup dev");
1910 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1911 .expect("failed to mount dev root node");
1912 let mut context = LookupContext::default();
1913 let new_dev = ns
1914 .root()
1915 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1916 .expect("failed to lookup dev again");
1917 assert!(!Arc::ptr_eq(&dev.entry, &new_dev.entry));
1918 assert_ne!(&dev, &new_dev);
1919
1920 let mut context = LookupContext::default();
1921 let _new_pts = new_dev
1922 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1923 .expect("failed to lookup pts");
1924 let mut context = LookupContext::default();
1925 assert!(dev.lookup_child(locked, ¤t_task, &mut context, "pts".into()).is_err());
1926 })
1927 .await;
1928 }
1929
1930 #[::fuchsia::test]
1931 async fn test_path() {
1932 spawn_kernel_and_run(async |locked, current_task| {
1933 let kernel = current_task.kernel();
1934 let root_fs = TmpFs::new_fs(locked, &kernel);
1935 let root_node = Arc::clone(root_fs.root());
1936 let _dev_node = root_node
1937 .create_dir(locked, ¤t_task, "dev".into())
1938 .expect("failed to mkdir dev");
1939 let dev_fs = TmpFs::new_fs(locked, &kernel);
1940 let dev_root_node = Arc::clone(dev_fs.root());
1941 let _dev_pts_node = dev_root_node
1942 .create_dir(locked, ¤t_task, "pts".into())
1943 .expect("failed to mkdir pts");
1944
1945 let ns = Namespace::new(root_fs);
1946 let mut context = LookupContext::default();
1947 let dev = ns
1948 .root()
1949 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1950 .expect("failed to lookup dev");
1951 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1952 .expect("failed to mount dev root node");
1953
1954 let mut context = LookupContext::default();
1955 let dev = ns
1956 .root()
1957 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1958 .expect("failed to lookup dev");
1959 let mut context = LookupContext::default();
1960 let pts = dev
1961 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1962 .expect("failed to lookup pts");
1963
1964 assert_eq!("/", ns.root().path_escaping_chroot());
1965 assert_eq!("/dev", dev.path_escaping_chroot());
1966 assert_eq!("/dev/pts", pts.path_escaping_chroot());
1967 })
1968 .await;
1969 }
1970
1971 #[::fuchsia::test]
1972 async fn test_shadowing() {
1973 spawn_kernel_and_run(async |locked, current_task| {
1974 let kernel = current_task.kernel();
1975 let root_fs = TmpFs::new_fs(locked, &kernel);
1976 let ns = Namespace::new(root_fs.clone());
1977 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
1978 let mut context = LookupContext::default();
1979 let foo_dir =
1980 ns.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
1981
1982 let foofs1 = TmpFs::new_fs(locked, &kernel);
1983 foo_dir.mount(WhatToMount::Fs(foofs1.clone()), MountFlags::empty()).unwrap();
1984 let mut context = LookupContext::default();
1985 assert!(Arc::ptr_eq(
1986 &ns.root()
1987 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
1988 .unwrap()
1989 .entry,
1990 foofs1.root()
1991 ));
1992 let foo_dir =
1993 ns.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
1994
1995 let ns_clone = ns.clone_namespace();
1996
1997 let foofs2 = TmpFs::new_fs(locked, &kernel);
1998 foo_dir.mount(WhatToMount::Fs(foofs2.clone()), MountFlags::empty()).unwrap();
1999 let mut context = LookupContext::default();
2000 assert!(Arc::ptr_eq(
2001 &ns.root()
2002 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
2003 .unwrap()
2004 .entry,
2005 foofs2.root()
2006 ));
2007
2008 assert!(Arc::ptr_eq(
2009 &ns_clone
2010 .root()
2011 .lookup_child(
2012 locked,
2013 ¤t_task,
2014 &mut LookupContext::default(),
2015 "foo".into()
2016 )
2017 .unwrap()
2018 .entry,
2019 foofs1.root()
2020 ));
2021 })
2022 .await;
2023 }
2024
2025 #[::fuchsia::test]
2026 async fn test_unlink_mounted_directory() {
2027 spawn_kernel_and_run(async |locked, current_task| {
2028 let kernel = current_task.kernel();
2029 let root_fs = TmpFs::new_fs(locked, &kernel);
2030 let ns1 = Namespace::new(root_fs.clone());
2031 let ns2 = Namespace::new(root_fs.clone());
2032 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
2033 let mut context = LookupContext::default();
2034 let foo_dir =
2035 ns1.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
2036
2037 let foofs = TmpFs::new_fs(locked, &kernel);
2038 foo_dir.mount(WhatToMount::Fs(foofs), MountFlags::empty()).unwrap();
2039
2040 assert_eq!(
2042 ns1.root()
2043 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2044 .unwrap_err(),
2045 errno!(EBUSY),
2046 );
2047
2048 ns2.root()
2050 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2051 .expect("unlink failed");
2052
2053 assert_eq!(
2055 ns1.root()
2056 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2057 .unwrap_err(),
2058 errno!(ENOENT),
2059 );
2060 })
2061 .await;
2062 }
2063
2064 #[::fuchsia::test]
2065 async fn test_rename_mounted_directory() {
2066 spawn_kernel_and_run(async |locked, current_task| {
2067 let kernel = current_task.kernel();
2068 let root_fs = TmpFs::new_fs(locked, &kernel);
2069 let ns1 = Namespace::new(root_fs.clone());
2070 let ns2 = Namespace::new(root_fs.clone());
2071 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
2072 let _bar_node = root_fs.root().create_dir(locked, ¤t_task, "bar".into()).unwrap();
2073 let _baz_node = root_fs.root().create_dir(locked, ¤t_task, "baz".into()).unwrap();
2074 let mut context = LookupContext::default();
2075 let foo_dir =
2076 ns1.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
2077
2078 let foofs = TmpFs::new_fs(locked, &kernel);
2079 foo_dir.mount(WhatToMount::Fs(foofs), MountFlags::empty()).unwrap();
2080
2081 let root = ns1.root();
2083 assert_eq!(
2084 NamespaceNode::rename(
2085 locked,
2086 ¤t_task,
2087 &root,
2088 "bar".into(),
2089 &root,
2090 "foo".into(),
2091 RenameFlags::empty()
2092 )
2093 .unwrap_err(),
2094 errno!(EBUSY),
2095 );
2096 assert_eq!(
2098 NamespaceNode::rename(
2099 locked,
2100 ¤t_task,
2101 &root,
2102 "foo".into(),
2103 &root,
2104 "bar".into(),
2105 RenameFlags::empty()
2106 )
2107 .unwrap_err(),
2108 errno!(EBUSY),
2109 );
2110
2111 let root = ns2.root();
2113
2114 NamespaceNode::rename(
2116 locked,
2117 ¤t_task,
2118 &root,
2119 "foo".into(),
2120 &root,
2121 "bar".into(),
2122 RenameFlags::empty(),
2123 )
2124 .expect("rename failed");
2125
2126 NamespaceNode::rename(
2128 locked,
2129 ¤t_task,
2130 &root,
2131 "baz".into(),
2132 &root,
2133 "bar".into(),
2134 RenameFlags::empty(),
2135 )
2136 .expect("rename failed");
2137
2138 assert_eq!(
2140 ns1.root()
2141 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
2142 .unwrap_err(),
2143 errno!(ENOENT)
2144 );
2145 assert_eq!(
2146 ns1.root()
2147 .lookup_child(locked, ¤t_task, &mut context, "baz".into())
2148 .unwrap_err(),
2149 errno!(ENOENT)
2150 );
2151 })
2152 .await;
2153 }
2154
2155 #[::fuchsia::test]
2158 async fn test_lookup_with_symlink_chain() {
2159 spawn_kernel_and_run(async |locked, current_task| {
2160 let kernel = current_task.kernel();
2162 let root_fs = TmpFs::new_fs(locked, &kernel);
2163 let root_node = Arc::clone(root_fs.root());
2164 let _first_subdir_node = root_node
2165 .create_dir(locked, ¤t_task, "first_subdir".into())
2166 .expect("failed to mkdir dev");
2167 let _second_subdir_node = root_node
2168 .create_dir(locked, ¤t_task, "second_subdir".into())
2169 .expect("failed to mkdir dev");
2170
2171 let first_subdir_fs = TmpFs::new_fs(locked, &kernel);
2173 let second_subdir_fs = TmpFs::new_fs(locked, &kernel);
2174
2175 let ns = Namespace::new(root_fs);
2176 let mut context = LookupContext::default();
2177 let first_subdir = ns
2178 .root()
2179 .lookup_child(locked, ¤t_task, &mut context, "first_subdir".into())
2180 .expect("failed to lookup first_subdir");
2181 first_subdir
2182 .mount(WhatToMount::Fs(first_subdir_fs), MountFlags::empty())
2183 .expect("failed to mount first_subdir fs node");
2184 let second_subdir = ns
2185 .root()
2186 .lookup_child(locked, ¤t_task, &mut context, "second_subdir".into())
2187 .expect("failed to lookup second_subdir");
2188 second_subdir
2189 .mount(WhatToMount::Fs(second_subdir_fs), MountFlags::empty())
2190 .expect("failed to mount second_subdir fs node");
2191
2192 let real_file_node = first_subdir
2201 .create_node(
2202 locked,
2203 ¤t_task,
2204 "real_file".into(),
2205 mode!(IFREG, 0o777),
2206 DeviceType::NONE,
2207 )
2208 .expect("failed to create real_file");
2209 first_subdir
2210 .create_symlink(locked, ¤t_task, "path_symlink".into(), "real_file".into())
2211 .expect("failed to create path_symlink");
2212
2213 let mut no_follow_lookup_context = LookupContext::new(SymlinkMode::NoFollow);
2214 let path_symlink_node = first_subdir
2215 .lookup_child(
2216 locked,
2217 ¤t_task,
2218 &mut no_follow_lookup_context,
2219 "path_symlink".into(),
2220 )
2221 .expect("Failed to lookup path_symlink");
2222
2223 let node_symlink_node = second_subdir.entry.node.fs().create_node_and_allocate_node_id(
2227 CallbackSymlinkNode::new(move || {
2228 let node = path_symlink_node.clone();
2229 Ok(SymlinkTarget::Node(node))
2230 }),
2231 FsNodeInfo::new(mode!(IFLNK, 0o777), current_task.current_fscred()),
2232 );
2233 second_subdir
2234 .entry
2235 .create_entry(
2236 locked,
2237 ¤t_task,
2238 &MountInfo::detached(),
2239 "node_symlink".into(),
2240 move |_locked, _dir, _mount, _name| Ok(node_symlink_node),
2241 )
2242 .expect("failed to create node_symlink entry");
2243
2244 let mut follow_lookup_context = LookupContext::new(SymlinkMode::Follow);
2246 let node_symlink_resolution = second_subdir
2247 .lookup_child(
2248 locked,
2249 ¤t_task,
2250 &mut follow_lookup_context,
2251 "node_symlink".into(),
2252 )
2253 .expect("lookup with symlink chain failed");
2254
2255 assert!(node_symlink_resolution.entry.node.ino == real_file_node.entry.node.ino);
2257 })
2258 .await;
2259 }
2260}