1use crate::mutable_state::{state_accessor, state_implementation};
6use crate::security;
7use crate::task::{CurrentTask, EventHandler, Kernel, Task, WaitCanceler, Waiter};
8use crate::time::utc;
9use crate::vfs::fs_registry::FsRegistry;
10use crate::vfs::pseudo::dynamic_file::{DynamicFile, DynamicFileBuf, DynamicFileSource};
11use crate::vfs::pseudo::simple_file::SimpleFileNode;
12use crate::vfs::socket::{SocketAddress, SocketHandle, UnixSocket};
13use crate::vfs::{
14 CheckAccessReason, DirEntry, DirEntryHandle, FileHandle, FileObject, FileOps, FileSystemHandle,
15 FileSystemOptions, FileWriteGuardMode, FsNode, FsNodeHandle, FsNodeOps, FsStr, FsString,
16 PathBuilder, RenameFlags, SymlinkTarget, UnlinkKind, fileops_impl_dataless,
17 fileops_impl_delegate_read_write_and_seek, fileops_impl_nonseekable, fileops_impl_noop_sync,
18 fs_node_impl_not_dir,
19};
20use fuchsia_rcu::RcuReadScope;
21use macro_rules_attribute::apply;
22use ref_cast::RefCast;
23use starnix_logging::log_warn;
24use starnix_rcu::RcuHashMap;
25use starnix_sync::{
26 BeforeFsNodeAppend, FileOpsCore, LockBefore, LockEqualOrBefore, Locked, Mutex, RwLock, Unlocked,
27};
28use starnix_types::ownership::WeakRef;
29use starnix_uapi::arc_key::{ArcKey, PtrKey, WeakKey};
30use starnix_uapi::auth::UserAndOrGroupId;
31use starnix_uapi::device_type::DeviceType;
32use starnix_uapi::errors::Errno;
33use starnix_uapi::file_mode::{AccessCheck, FileMode};
34use starnix_uapi::inotify_mask::InotifyMask;
35use starnix_uapi::mount_flags::MountFlags;
36use starnix_uapi::open_flags::OpenFlags;
37use starnix_uapi::unmount_flags::UnmountFlags;
38use starnix_uapi::vfs::{FdEvents, ResolveFlags};
39use starnix_uapi::{NAME_MAX, errno, error};
40use std::borrow::Borrow;
41use std::collections::HashSet;
42use std::fmt;
43use std::hash::{Hash, Hasher};
44use std::ops::{Deref, DerefMut};
45use std::sync::{Arc, Weak};
46
47#[derive(Debug)]
51pub struct Namespace {
52 root_mount: MountHandle,
53
54 pub id: u64,
56}
57
58impl Namespace {
59 pub fn new(fs: FileSystemHandle) -> Arc<Namespace> {
60 Self::new_with_flags(fs, MountFlags::empty())
61 }
62
63 pub fn new_with_flags(fs: FileSystemHandle, flags: MountFlags) -> Arc<Namespace> {
64 let kernel = fs.kernel.upgrade().expect("can't create namespace without a kernel");
65 let root_mount = Mount::new(WhatToMount::Fs(fs), flags);
66 Arc::new(Self { root_mount, id: kernel.get_next_namespace_id() })
67 }
68
69 pub fn root(&self) -> NamespaceNode {
70 self.root_mount.root()
71 }
72
73 pub fn clone_namespace(&self) -> Arc<Namespace> {
74 let kernel =
75 self.root_mount.fs.kernel.upgrade().expect("can't clone namespace without a kernel");
76 Arc::new(Self {
77 root_mount: self.root_mount.clone_mount_recursive(),
78 id: kernel.get_next_namespace_id(),
79 })
80 }
81
82 pub fn translate_node(mut node: NamespaceNode, new_ns: &Namespace) -> Option<NamespaceNode> {
85 let mut mountpoints = vec![];
87 let mut mount = node.mount;
88 while let Some(mountpoint) = mount.as_ref().and_then(|m| m.read().mountpoint()) {
89 mountpoints.push(mountpoint.entry);
90 mount = mountpoint.mount;
91 }
92
93 let mut mount = Arc::clone(&new_ns.root_mount);
95 for mountpoint in mountpoints.iter().rev() {
96 let next_mount =
97 mount.read().submounts.get(ArcKey::ref_cast(mountpoint))?.mount.clone();
98 mount = next_mount;
99 }
100 node.mount = Some(mount).into();
101 Some(node)
102 }
103}
104
105impl FsNodeOps for Arc<Namespace> {
106 fs_node_impl_not_dir!();
107
108 fn create_file_ops(
109 &self,
110 _locked: &mut Locked<FileOpsCore>,
111 _node: &FsNode,
112 _current_task: &CurrentTask,
113 _flags: OpenFlags,
114 ) -> Result<Box<dyn FileOps>, Errno> {
115 Ok(Box::new(MountNamespaceFile(self.clone())))
116 }
117}
118
119pub struct MountNamespaceFile(pub Arc<Namespace>);
120
121impl FileOps for MountNamespaceFile {
122 fileops_impl_nonseekable!();
123 fileops_impl_dataless!();
124 fileops_impl_noop_sync!();
125}
126
127type MountClientMarker = Arc<()>;
132
133pub struct Mount {
142 root: DirEntryHandle,
143 flags: Mutex<MountFlags>,
144 fs: FileSystemHandle,
145
146 id: u64,
148
149 active_client_counter: MountClientMarker,
151
152 state: RwLock<MountState>,
154 }
161type MountHandle = Arc<Mount>;
162
163#[derive(Clone, Debug)]
165pub struct MountInfo {
166 handle: Option<MountHandle>,
167}
168
169impl MountInfo {
170 pub fn detached() -> Self {
173 None.into()
174 }
175
176 pub fn flags(&self) -> MountFlags {
178 if let Some(handle) = &self.handle {
179 handle.flags()
180 } else {
181 MountFlags::NOATIME
183 }
184 }
185
186 pub fn check_readonly_filesystem(&self) -> Result<(), Errno> {
188 if self.flags().contains(MountFlags::RDONLY) {
189 return error!(EROFS);
190 }
191 Ok(())
192 }
193
194 pub fn check_noexec_filesystem(&self) -> Result<(), Errno> {
196 if self.flags().contains(MountFlags::NOEXEC) {
197 return error!(EACCES);
198 }
199 Ok(())
200 }
201}
202
203impl Deref for MountInfo {
204 type Target = Option<MountHandle>;
205
206 fn deref(&self) -> &Self::Target {
207 &self.handle
208 }
209}
210
211impl DerefMut for MountInfo {
212 fn deref_mut(&mut self) -> &mut Self::Target {
213 &mut self.handle
214 }
215}
216
217impl std::cmp::PartialEq for MountInfo {
218 fn eq(&self, other: &Self) -> bool {
219 self.handle.as_ref().map(Arc::as_ptr) == other.handle.as_ref().map(Arc::as_ptr)
220 }
221}
222
223impl std::cmp::Eq for MountInfo {}
224
225impl Into<MountInfo> for Option<MountHandle> {
226 fn into(self) -> MountInfo {
227 MountInfo { handle: self }
228 }
229}
230
231#[derive(Default)]
232pub struct MountState {
233 mountpoint: Option<(Weak<Mount>, DirEntryHandle)>,
238
239 submounts: HashSet<Submount>,
248
249 peer_group_: Option<(Arc<PeerGroup>, PtrKey<Mount>)>,
254 upstream_: Option<(Weak<PeerGroup>, PtrKey<Mount>)>,
257}
258
259#[derive(Default)]
263struct PeerGroup {
264 id: u64,
265 state: RwLock<PeerGroupState>,
266}
267#[derive(Default)]
268struct PeerGroupState {
269 mounts: HashSet<WeakKey<Mount>>,
270 downstream: HashSet<WeakKey<Mount>>,
271}
272
273pub enum WhatToMount {
274 Fs(FileSystemHandle),
275 Bind(NamespaceNode),
276}
277
278impl Mount {
279 pub fn new(what: WhatToMount, flags: MountFlags) -> MountHandle {
280 match what {
281 WhatToMount::Fs(fs) => Self::new_with_root(fs.root().clone(), flags),
282 WhatToMount::Bind(node) => {
283 let mount = node.mount.as_ref().expect("can't bind mount from an anonymous node");
284 mount.clone_mount(&node.entry, flags)
285 }
286 }
287 }
288
289 fn new_with_root(root: DirEntryHandle, flags: MountFlags) -> MountHandle {
290 let known_flags = MountFlags::STORED_ON_MOUNT;
291 assert!(
292 !flags.intersects(!known_flags),
293 "mount created with extra flags {:?}",
294 flags - known_flags
295 );
296 let fs = root.node.fs();
297 let kernel = fs.kernel.upgrade().expect("can't create mount without kernel");
298 Arc::new(Self {
299 id: kernel.get_next_mount_id(),
300 flags: Mutex::new(flags),
301 root,
302 active_client_counter: Default::default(),
303 fs,
304 state: Default::default(),
305 })
306 }
307
308 pub fn root(self: &MountHandle) -> NamespaceNode {
310 NamespaceNode::new(Arc::clone(self), Arc::clone(&self.root))
311 }
312
313 fn create_submount(
315 self: &MountHandle,
316 dir: &DirEntryHandle,
317 what: WhatToMount,
318 flags: MountFlags,
319 ) {
320 let peers = {
333 let state = self.state.read();
334 state.peer_group().map(|g| g.copy_propagation_targets()).unwrap_or_default()
335 };
336
337 let mount = Mount::new(what, flags);
342
343 if self.read().is_shared() {
344 mount.write().make_shared();
345 }
346
347 for peer in peers {
348 if Arc::ptr_eq(self, &peer) {
349 continue;
350 }
351 let clone = mount.clone_mount_recursive();
352 peer.write().add_submount_internal(dir, clone);
353 }
354
355 self.write().add_submount_internal(dir, mount)
356 }
357
358 fn remove_submount(self: &MountHandle, mount_hash_key: &ArcKey<DirEntry>) -> Result<(), Errno> {
359 let peers = {
361 let state = self.state.read();
362 state.peer_group().map(|g| g.copy_propagation_targets()).unwrap_or_default()
363 };
364
365 for peer in peers {
366 if Arc::ptr_eq(self, &peer) {
367 continue;
368 }
369 let mut peer = peer.write();
373 if let Some(submount) = peer.submounts.get(mount_hash_key) {
374 if !submount.mount.read().submounts.is_empty() {
375 continue;
376 }
377 }
378 let _ = peer.remove_submount_internal(mount_hash_key);
379 }
380
381 self.write().remove_submount_internal(mount_hash_key)
382 }
383
384 fn clone_mount(
387 self: &MountHandle,
388 new_root: &DirEntryHandle,
389 flags: MountFlags,
390 ) -> MountHandle {
391 assert!(new_root.is_descendant_of(&self.root));
392 let clone = Self::new_with_root(Arc::clone(new_root), self.flags());
395
396 if flags.contains(MountFlags::REC) {
397 let mut submounts = vec![];
403 for Submount { dir, mount } in &self.state.read().submounts {
404 submounts.push((dir.clone(), mount.clone_mount_recursive()));
405 }
406 let mut clone_state = clone.write();
407 for (dir, submount) in submounts {
408 clone_state.add_submount_internal(&dir, submount);
409 }
410 }
411
412 let peer_group = self.state.read().peer_group().map(Arc::clone);
414 if let Some(peer_group) = peer_group {
415 clone.write().set_peer_group(peer_group);
416 }
417
418 clone
419 }
420
421 fn clone_mount_recursive(self: &MountHandle) -> MountHandle {
424 self.clone_mount(&self.root, MountFlags::REC)
425 }
426
427 pub fn change_propagation(self: &MountHandle, flag: MountFlags, recursive: bool) {
428 let mut state = self.write();
429 match flag {
430 MountFlags::SHARED => state.make_shared(),
431 MountFlags::PRIVATE => state.make_private(),
432 MountFlags::DOWNSTREAM => state.make_downstream(),
433 _ => {
434 log_warn!("mount propagation {:?}", flag);
435 return;
436 }
437 }
438
439 if recursive {
440 for submount in &state.submounts {
441 submount.mount.change_propagation(flag, recursive);
442 }
443 }
444 }
445
446 fn flags(&self) -> MountFlags {
447 *self.flags.lock()
448 }
449
450 pub fn update_flags(self: &MountHandle, mut flags: MountFlags) {
451 flags &= MountFlags::STORED_ON_MOUNT;
452 let atime_flags = MountFlags::NOATIME
453 | MountFlags::NODIRATIME
454 | MountFlags::RELATIME
455 | MountFlags::STRICTATIME;
456 let mut stored_flags = self.flags.lock();
457 if !flags.intersects(atime_flags) {
458 flags |= *stored_flags & atime_flags;
463 }
464 flags &= !MountFlags::STRICTATIME;
466 *stored_flags = flags;
467 }
468
469 fn active_clients(&self) -> usize {
473 Arc::strong_count(&self.active_client_counter) - 1
475 }
476
477 pub fn unmount(&self, flags: UnmountFlags) -> Result<(), Errno> {
478 if !flags.contains(UnmountFlags::DETACH) {
479 if self.active_clients() > 0 || !self.state.read().submounts.is_empty() {
480 return error!(EBUSY);
481 }
482 }
483 let mountpoint = self.state.read().mountpoint().ok_or_else(|| errno!(EINVAL))?;
484 let parent_mount = mountpoint.mount.as_ref().expect("a mountpoint must be part of a mount");
485 parent_mount.remove_submount(mountpoint.mount_hash_key())
486 }
487
488 pub fn security_state(&self) -> &security::FileSystemState {
490 &self.fs.security_state
491 }
492
493 pub fn fs_name(&self) -> &'static FsStr {
495 self.fs.name()
496 }
497
498 state_accessor!(Mount, state, Arc<Mount>);
499}
500
501impl MountState {
502 pub fn has_submount(&self, dir_entry: &DirEntryHandle) -> bool {
504 self.submounts.contains(ArcKey::ref_cast(dir_entry))
505 }
506
507 fn mountpoint(&self) -> Option<NamespaceNode> {
509 let (mount, entry) = self.mountpoint.as_ref()?;
510 Some(NamespaceNode::new(mount.upgrade()?, entry.clone()))
511 }
512
513 fn peer_group(&self) -> Option<&Arc<PeerGroup>> {
515 let (group, _) = self.peer_group_.as_ref()?;
516 Some(group)
517 }
518
519 fn take_from_peer_group(&mut self) -> Option<Arc<PeerGroup>> {
521 let (old_group, old_mount) = self.peer_group_.take()?;
522 old_group.remove(old_mount);
523 if let Some(upstream) = self.take_from_upstream() {
524 let next_mount =
525 old_group.state.read().mounts.iter().next().map(|w| w.0.upgrade().unwrap());
526 if let Some(next_mount) = next_mount {
527 next_mount.write().set_upstream(upstream);
531 }
532 }
533 Some(old_group)
534 }
535
536 fn upstream(&self) -> Option<Arc<PeerGroup>> {
537 self.upstream_.as_ref().and_then(|g| g.0.upgrade())
538 }
539
540 fn take_from_upstream(&mut self) -> Option<Arc<PeerGroup>> {
541 let (old_upstream, old_mount) = self.upstream_.take()?;
542 let old_upstream = old_upstream.upgrade()?;
545 old_upstream.remove_downstream(old_mount);
546 Some(old_upstream)
547 }
548}
549
550#[apply(state_implementation!)]
551impl MountState<Base = Mount, BaseType = Arc<Mount>> {
552 fn add_submount_internal(&mut self, dir: &DirEntryHandle, mount: MountHandle) {
554 if !dir.is_descendant_of(&self.base.root) {
555 return;
556 }
557
558 let submount = mount.fs.kernel.upgrade().unwrap().mounts.register_mount(dir, mount.clone());
559 let old_mountpoint =
560 mount.state.write().mountpoint.replace((Arc::downgrade(self.base), Arc::clone(dir)));
561 assert!(old_mountpoint.is_none(), "add_submount can only take a newly created mount");
562 let old_mount = self.submounts.replace(submount);
565
566 if let Some(mut old_mount) = old_mount {
570 old_mount.mount.write().mountpoint = Some((Arc::downgrade(&mount), Arc::clone(dir)));
575 old_mount.dir = ArcKey(mount.root.clone());
576 mount.write().submounts.insert(old_mount);
577 }
578 }
579
580 fn remove_submount_internal(&mut self, mount_hash_key: &ArcKey<DirEntry>) -> Result<(), Errno> {
581 if self.submounts.remove(mount_hash_key) { Ok(()) } else { error!(EINVAL) }
582 }
583
584 fn set_peer_group(&mut self, group: Arc<PeerGroup>) {
586 self.take_from_peer_group();
587 group.add(self.base);
588 self.peer_group_ = Some((group, Arc::as_ptr(self.base).into()));
589 }
590
591 fn set_upstream(&mut self, group: Arc<PeerGroup>) {
592 self.take_from_upstream();
593 group.add_downstream(self.base);
594 self.upstream_ = Some((Arc::downgrade(&group), Arc::as_ptr(self.base).into()));
595 }
596
597 pub fn is_shared(&self) -> bool {
599 self.peer_group().is_some()
600 }
601
602 pub fn make_shared(&mut self) {
604 if self.is_shared() {
605 return;
606 }
607 let kernel =
608 self.base.fs.kernel.upgrade().expect("can't create new peer group without kernel");
609 self.set_peer_group(PeerGroup::new(kernel.get_next_peer_group_id()));
610 }
611
612 pub fn make_private(&mut self) {
614 self.take_from_peer_group();
615 self.take_from_upstream();
616 }
617
618 pub fn make_downstream(&mut self) {
621 if let Some(peer_group) = self.take_from_peer_group() {
622 self.set_upstream(peer_group);
623 }
624 }
625}
626
627impl PeerGroup {
628 fn new(id: u64) -> Arc<Self> {
629 Arc::new(Self { id, state: Default::default() })
630 }
631
632 fn add(&self, mount: &Arc<Mount>) {
633 self.state.write().mounts.insert(WeakKey::from(mount));
634 }
635
636 fn remove(&self, mount: PtrKey<Mount>) {
637 self.state.write().mounts.remove(&mount);
638 }
639
640 fn add_downstream(&self, mount: &Arc<Mount>) {
641 self.state.write().downstream.insert(WeakKey::from(mount));
642 }
643
644 fn remove_downstream(&self, mount: PtrKey<Mount>) {
645 self.state.write().downstream.remove(&mount);
646 }
647
648 fn copy_propagation_targets(&self) -> Vec<MountHandle> {
649 let mut buf = vec![];
650 self.collect_propagation_targets(&mut buf);
651 buf
652 }
653
654 fn collect_propagation_targets(&self, buf: &mut Vec<MountHandle>) {
655 let downstream_mounts: Vec<_> = {
656 let state = self.state.read();
657 buf.extend(state.mounts.iter().filter_map(|m| m.0.upgrade()));
658 state.downstream.iter().filter_map(|m| m.0.upgrade()).collect()
659 };
660 for mount in downstream_mounts {
661 let peer_group = mount.read().peer_group().map(Arc::clone);
662 match peer_group {
663 Some(group) => group.collect_propagation_targets(buf),
664 None => buf.push(mount),
665 }
666 }
667 }
668}
669
670impl Drop for Mount {
671 fn drop(&mut self) {
672 let state = self.state.get_mut();
673 state.take_from_peer_group();
674 state.take_from_upstream();
675 }
676}
677
678impl fmt::Debug for Mount {
679 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
680 let state = self.state.read();
681 f.debug_struct("Mount")
682 .field("id", &(self as *const Mount))
683 .field("root", &self.root)
684 .field("mountpoint", &state.mountpoint)
685 .field("submounts", &state.submounts)
686 .finish()
687 }
688}
689
690impl Kernel {
691 pub fn get_next_mount_id(&self) -> u64 {
692 self.next_mount_id.next()
693 }
694
695 pub fn get_next_peer_group_id(&self) -> u64 {
696 self.next_peer_group_id.next()
697 }
698
699 pub fn get_next_namespace_id(&self) -> u64 {
700 self.next_namespace_id.next()
701 }
702}
703
704impl CurrentTask {
705 pub fn create_filesystem(
706 &self,
707 locked: &mut Locked<Unlocked>,
708 fs_type: &FsStr,
709 options: FileSystemOptions,
710 ) -> Result<FileSystemHandle, Errno> {
711 self.kernel()
718 .expando
719 .get::<FsRegistry>()
720 .create(locked, self, fs_type, options)
721 .ok_or_else(|| errno!(ENODEV, fs_type))?
722 }
723}
724
725fn write_mount_info(task: &Task, sink: &mut DynamicFileBuf, mount: &Mount) -> Result<(), Errno> {
727 write!(sink, "{}", mount.flags())?;
728 security::sb_show_options(&task.kernel(), sink, &mount)
729}
730
731struct ProcMountsFileSource(WeakRef<Task>);
732
733impl DynamicFileSource for ProcMountsFileSource {
734 fn generate(
735 &self,
736 _current_task: &CurrentTask,
737 sink: &mut DynamicFileBuf,
738 ) -> Result<(), Errno> {
739 let task = Task::from_weak(&self.0)?;
744 let root = task.fs().root();
745 let ns = task.fs().namespace();
746 for_each_mount(&ns.root_mount, &mut |mount| {
747 let mountpoint = mount.read().mountpoint().unwrap_or_else(|| mount.root());
748 if !mountpoint.is_descendant_of(&root) {
749 return Ok(());
750 }
751 write!(
752 sink,
753 "{} {} {} ",
754 mount.fs.options.source_for_display(),
755 mountpoint.path(&task),
756 mount.fs.name(),
757 )?;
758 write_mount_info(&task, sink, mount)?;
759 writeln!(sink, " 0 0")?;
760 Ok(())
761 })?;
762 Ok(())
763 }
764}
765
766pub struct ProcMountsFile {
767 dynamic_file: DynamicFile<ProcMountsFileSource>,
768}
769
770impl ProcMountsFile {
771 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
772 SimpleFileNode::new(move |_, _| {
773 Ok(Self { dynamic_file: DynamicFile::new(ProcMountsFileSource(task.clone())) })
774 })
775 }
776}
777
778impl FileOps for ProcMountsFile {
779 fileops_impl_delegate_read_write_and_seek!(self, self.dynamic_file);
780 fileops_impl_noop_sync!();
781
782 fn wait_async(
783 &self,
784 _locked: &mut Locked<FileOpsCore>,
785 _file: &FileObject,
786 _current_task: &CurrentTask,
787 waiter: &Waiter,
788 _events: FdEvents,
789 _handler: EventHandler,
790 ) -> Option<WaitCanceler> {
791 Some(waiter.fake_wait())
794 }
795
796 fn query_events(
797 &self,
798 _locked: &mut Locked<FileOpsCore>,
799 _file: &FileObject,
800 _current_task: &CurrentTask,
801 ) -> Result<FdEvents, Errno> {
802 Ok(FdEvents::empty())
803 }
804}
805
806#[derive(Clone)]
807pub struct ProcMountinfoFile(WeakRef<Task>);
808impl ProcMountinfoFile {
809 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
810 DynamicFile::new_node(Self(task))
811 }
812}
813impl DynamicFileSource for ProcMountinfoFile {
814 fn generate(
815 &self,
816 _current_task: &CurrentTask,
817 sink: &mut DynamicFileBuf,
818 ) -> Result<(), Errno> {
819 fn path_from_fs_root(dir: &DirEntryHandle) -> FsString {
821 let mut path = PathBuilder::new();
822 if dir.is_dead() {
823 path.prepend_element("/deleted".into());
825 }
826 let scope = RcuReadScope::new();
827 let mut current = dir.deref();
828 while let Some(parent) = current.parent_ref(&scope) {
829 path.prepend_element(current.local_name(&scope));
830 current = parent;
831 }
832 path.build_absolute()
833 }
834
835 let task = Task::from_weak(&self.0)?;
840 let root = task.fs().root();
841 let ns = task.fs().namespace();
842 for_each_mount(&ns.root_mount, &mut |mount| {
843 let mountpoint = mount.read().mountpoint().unwrap_or_else(|| mount.root());
844 if !mountpoint.is_descendant_of(&root) {
845 return Ok(());
846 }
847 let parent = mountpoint.mount.as_ref().unwrap();
849 write!(
850 sink,
851 "{} {} {} {} {} ",
852 mount.id,
853 parent.id,
854 mount.root.node.fs().dev_id,
855 path_from_fs_root(&mount.root),
856 mountpoint.path(&task),
857 )?;
858 write_mount_info(&task, sink, mount)?;
859 if let Some(peer_group) = mount.read().peer_group() {
860 write!(sink, " shared:{}", peer_group.id)?;
861 }
862 if let Some(upstream) = mount.read().upstream() {
863 write!(sink, " master:{}", upstream.id)?;
864 }
865 writeln!(
866 sink,
867 " - {} {} {}",
868 mount.fs.name(),
869 mount.fs.options.source_for_display(),
870 mount.fs.options.flags,
871 )?;
872 Ok(())
873 })?;
874 Ok(())
875 }
876}
877
878fn for_each_mount<E>(
879 mount: &MountHandle,
880 callback: &mut impl FnMut(&MountHandle) -> Result<(), E>,
881) -> Result<(), E> {
882 callback(mount)?;
883 let submounts: Vec<_> = mount.read().submounts.iter().map(|s| s.mount.clone()).collect();
886 for submount in submounts {
887 for_each_mount(&submount, callback)?;
888 }
889 Ok(())
890}
891
892#[derive(Default, PartialEq, Eq, Copy, Clone, Debug)]
894pub enum SymlinkMode {
895 #[default]
897 Follow,
898
899 NoFollow,
901}
902
903pub const MAX_SYMLINK_FOLLOWS: u8 = 40;
905
906pub struct LookupContext {
911 pub symlink_mode: SymlinkMode,
916
917 pub remaining_follows: u8,
921
922 pub must_be_directory: bool,
928
929 pub resolve_flags: ResolveFlags,
931
932 pub resolve_base: ResolveBase,
935}
936
937#[derive(Clone, Eq, PartialEq)]
940pub enum ResolveBase {
941 None,
942
943 Beneath(NamespaceNode),
945
946 InRoot(NamespaceNode),
948}
949
950impl LookupContext {
951 pub fn new(symlink_mode: SymlinkMode) -> LookupContext {
952 LookupContext {
953 symlink_mode,
954 remaining_follows: MAX_SYMLINK_FOLLOWS,
955 must_be_directory: false,
956 resolve_flags: ResolveFlags::empty(),
957 resolve_base: ResolveBase::None,
958 }
959 }
960
961 pub fn with(&self, symlink_mode: SymlinkMode) -> LookupContext {
962 LookupContext { symlink_mode, resolve_base: self.resolve_base.clone(), ..*self }
963 }
964
965 pub fn update_for_path(&mut self, path: &FsStr) {
966 if path.last() == Some(&b'/') {
967 self.must_be_directory = true;
970 self.symlink_mode = SymlinkMode::Follow;
973 }
974 }
975}
976
977impl Default for LookupContext {
978 fn default() -> Self {
979 LookupContext::new(SymlinkMode::Follow)
980 }
981}
982
983pub enum PathWithReachability {
985 Reachable(FsString),
987
988 Unreachable(FsString),
990}
991
992impl PathWithReachability {
993 pub fn into_path(self) -> FsString {
994 match self {
995 PathWithReachability::Reachable(path) => path,
996 PathWithReachability::Unreachable(path) => path,
997 }
998 }
999}
1000
1001#[derive(Clone)]
1009pub struct NamespaceNode {
1010 pub mount: MountInfo,
1015
1016 pub entry: DirEntryHandle,
1018}
1019
1020impl NamespaceNode {
1021 pub fn new(mount: MountHandle, entry: DirEntryHandle) -> Self {
1022 Self { mount: Some(mount).into(), entry }
1023 }
1024
1025 pub fn new_anonymous(entry: DirEntryHandle) -> Self {
1027 Self { mount: None.into(), entry }
1028 }
1029
1030 pub fn new_anonymous_unrooted(current_task: &CurrentTask, node: FsNodeHandle) -> Self {
1033 let dir_entry = DirEntry::new_unrooted(node);
1034 let _ = security::fs_node_init_with_dentry_no_xattr(current_task, &dir_entry);
1035 Self::new_anonymous(dir_entry)
1036 }
1037
1038 pub fn open(
1044 &self,
1045 locked: &mut Locked<Unlocked>,
1046 current_task: &CurrentTask,
1047 flags: OpenFlags,
1048 access_check: AccessCheck,
1049 ) -> Result<FileHandle, Errno> {
1050 let ops = self.entry.node.open(locked, current_task, self, flags, access_check)?;
1051 FileObject::new(locked, current_task, ops, self.clone(), flags)
1052 }
1053
1054 pub fn open_create_node<L>(
1060 &self,
1061 locked: &mut Locked<L>,
1062 current_task: &CurrentTask,
1063 name: &FsStr,
1064 mode: FileMode,
1065 dev: DeviceType,
1066 flags: OpenFlags,
1067 ) -> Result<NamespaceNode, Errno>
1068 where
1069 L: LockEqualOrBefore<FileOpsCore>,
1070 {
1071 let owner = current_task.current_fscred();
1072 let mode = current_task.fs().apply_umask(mode);
1073 let create_fn =
1074 |locked: &mut Locked<L>, dir: &FsNodeHandle, mount: &MountInfo, name: &_| {
1075 dir.create_node(locked, current_task, mount, name, mode, dev, owner)
1076 };
1077 let entry = if flags.contains(OpenFlags::EXCL) {
1078 self.entry.create_entry(locked, current_task, &self.mount, name, create_fn)
1079 } else {
1080 self.entry.get_or_create_entry(locked, current_task, &self.mount, name, create_fn)
1081 }?;
1082 Ok(self.with_new_entry(entry))
1083 }
1084
1085 pub fn into_active(self) -> ActiveNamespaceNode {
1086 ActiveNamespaceNode::new(self)
1087 }
1088
1089 pub fn into_mapping(self, mode: Option<FileWriteGuardMode>) -> Result<Arc<FileMapping>, Errno> {
1090 self.into_active().into_mapping(mode)
1091 }
1092
1093 pub fn create_node<L>(
1099 &self,
1100 locked: &mut Locked<L>,
1101 current_task: &CurrentTask,
1102 name: &FsStr,
1103 mode: FileMode,
1104 dev: DeviceType,
1105 ) -> Result<NamespaceNode, Errno>
1106 where
1107 L: LockEqualOrBefore<FileOpsCore>,
1108 {
1109 let owner = current_task.current_fscred();
1110 let mode = current_task.fs().apply_umask(mode);
1111 let entry = self.entry.create_entry(
1112 locked,
1113 current_task,
1114 &self.mount,
1115 name,
1116 |locked, dir, mount, name| {
1117 dir.create_node(locked, current_task, mount, name, mode, dev, owner)
1118 },
1119 )?;
1120 Ok(self.with_new_entry(entry))
1121 }
1122
1123 pub fn create_symlink<L>(
1127 &self,
1128 locked: &mut Locked<L>,
1129 current_task: &CurrentTask,
1130 name: &FsStr,
1131 target: &FsStr,
1132 ) -> Result<NamespaceNode, Errno>
1133 where
1134 L: LockEqualOrBefore<FileOpsCore>,
1135 {
1136 let owner = current_task.current_fscred();
1137 let entry = self.entry.create_entry(
1138 locked,
1139 current_task,
1140 &self.mount,
1141 name,
1142 |locked, dir, mount, name| {
1143 dir.create_symlink(locked, current_task, mount, name, target, owner)
1144 },
1145 )?;
1146 Ok(self.with_new_entry(entry))
1147 }
1148
1149 pub fn create_tmpfile<L>(
1155 &self,
1156 locked: &mut Locked<L>,
1157 current_task: &CurrentTask,
1158 mode: FileMode,
1159 flags: OpenFlags,
1160 ) -> Result<NamespaceNode, Errno>
1161 where
1162 L: LockEqualOrBefore<FileOpsCore>,
1163 {
1164 let owner = current_task.current_fscred();
1165 let mode = current_task.fs().apply_umask(mode);
1166 Ok(self.with_new_entry(self.entry.create_tmpfile(
1167 locked,
1168 current_task,
1169 &self.mount,
1170 mode,
1171 owner,
1172 flags,
1173 )?))
1174 }
1175
1176 pub fn link<L>(
1177 &self,
1178 locked: &mut Locked<L>,
1179 current_task: &CurrentTask,
1180 name: &FsStr,
1181 child: &FsNodeHandle,
1182 ) -> Result<NamespaceNode, Errno>
1183 where
1184 L: LockEqualOrBefore<FileOpsCore>,
1185 {
1186 let dir_entry = self.entry.create_entry(
1187 locked,
1188 current_task,
1189 &self.mount,
1190 name,
1191 |locked, dir, mount, name| dir.link(locked, current_task, mount, name, child),
1192 )?;
1193 Ok(self.with_new_entry(dir_entry))
1194 }
1195
1196 pub fn bind_socket<L>(
1197 &self,
1198 locked: &mut Locked<L>,
1199 current_task: &CurrentTask,
1200 name: &FsStr,
1201 socket: SocketHandle,
1202 socket_address: SocketAddress,
1203 mode: FileMode,
1204 ) -> Result<NamespaceNode, Errno>
1205 where
1206 L: LockEqualOrBefore<FileOpsCore>,
1207 {
1208 let dir_entry = self.entry.create_entry(
1209 locked,
1210 current_task,
1211 &self.mount,
1212 name,
1213 |locked, dir, mount, name| {
1214 let node = dir.create_node(
1215 locked,
1216 current_task,
1217 mount,
1218 name,
1219 mode,
1220 DeviceType::NONE,
1221 current_task.current_fscred(),
1222 )?;
1223 if let Some(unix_socket) = socket.downcast_socket::<UnixSocket>() {
1224 unix_socket.bind_socket_to_node(&socket, socket_address, &node)?;
1225 } else {
1226 return error!(ENOTSUP);
1227 }
1228 Ok(node)
1229 },
1230 )?;
1231 Ok(self.with_new_entry(dir_entry))
1232 }
1233
1234 pub fn unlink<L>(
1235 &self,
1236 locked: &mut Locked<L>,
1237 current_task: &CurrentTask,
1238 name: &FsStr,
1239 kind: UnlinkKind,
1240 must_be_directory: bool,
1241 ) -> Result<(), Errno>
1242 where
1243 L: LockEqualOrBefore<FileOpsCore>,
1244 {
1245 if DirEntry::is_reserved_name(name) {
1246 match kind {
1247 UnlinkKind::Directory => {
1248 if name == ".." {
1249 error!(ENOTEMPTY)
1250 } else if self.parent().is_none() {
1251 error!(EBUSY)
1253 } else {
1254 error!(EINVAL)
1255 }
1256 }
1257 UnlinkKind::NonDirectory => error!(ENOTDIR),
1258 }
1259 } else {
1260 self.entry.unlink(locked, current_task, &self.mount, name, kind, must_be_directory)
1261 }
1262 }
1263
1264 pub fn lookup_child<L>(
1266 &self,
1267 locked: &mut Locked<L>,
1268 current_task: &CurrentTask,
1269 context: &mut LookupContext,
1270 basename: &FsStr,
1271 ) -> Result<NamespaceNode, Errno>
1272 where
1273 L: LockEqualOrBefore<FileOpsCore>,
1274 {
1275 if !self.entry.node.is_dir() {
1276 return error!(ENOTDIR);
1277 }
1278
1279 if basename.len() > NAME_MAX as usize {
1280 return error!(ENAMETOOLONG);
1281 }
1282
1283 let child = if basename.is_empty() || basename == "." {
1284 self.clone()
1285 } else if basename == ".." {
1286 let root = match &context.resolve_base {
1287 ResolveBase::None => current_task.fs().root(),
1288 ResolveBase::Beneath(node) => {
1289 if *self == *node {
1291 return error!(EXDEV);
1292 }
1293 current_task.fs().root()
1294 }
1295 ResolveBase::InRoot(root) => root.clone(),
1296 };
1297
1298 if *self == root { root } else { self.parent().unwrap_or_else(|| self.clone()) }
1300 } else {
1301 let mut child = self.with_new_entry(self.entry.component_lookup(
1302 locked,
1303 current_task,
1304 &self.mount,
1305 basename,
1306 )?);
1307 while child.entry.node.is_lnk() {
1308 match context.symlink_mode {
1309 SymlinkMode::NoFollow => {
1310 break;
1311 }
1312 SymlinkMode::Follow => {
1313 if context.remaining_follows == 0
1314 || context.resolve_flags.contains(ResolveFlags::NO_SYMLINKS)
1315 {
1316 return error!(ELOOP);
1317 }
1318 context.remaining_follows -= 1;
1319 child = match child.readlink(locked, current_task)? {
1320 SymlinkTarget::Path(link_target) => {
1321 let link_directory = if link_target[0] == b'/' {
1322 match &context.resolve_base {
1324 ResolveBase::None => current_task.fs().root(),
1325 ResolveBase::Beneath(_) => return error!(EXDEV),
1326 ResolveBase::InRoot(root) => root.clone(),
1327 }
1328 } else {
1329 child.parent().unwrap_or(child)
1333 };
1334 current_task.lookup_path(
1335 locked,
1336 context,
1337 link_directory,
1338 link_target.as_ref(),
1339 )?
1340 }
1341 SymlinkTarget::Node(node) => {
1342 if context.resolve_flags.contains(ResolveFlags::NO_MAGICLINKS) {
1343 return error!(ELOOP);
1344 }
1345 node
1346 }
1347 }
1348 }
1349 };
1350 }
1351
1352 child.enter_mount()
1353 };
1354
1355 if context.resolve_flags.contains(ResolveFlags::NO_XDEV) && child.mount != self.mount {
1356 return error!(EXDEV);
1357 }
1358
1359 if context.must_be_directory && !child.entry.node.is_dir() {
1360 return error!(ENOTDIR);
1361 }
1362
1363 Ok(child)
1364 }
1365
1366 pub fn parent(&self) -> Option<NamespaceNode> {
1372 let mountpoint_or_self = self.escape_mount();
1373 let parent = mountpoint_or_self.entry.parent()?;
1374 Some(mountpoint_or_self.with_new_entry(parent))
1375 }
1376
1377 pub fn parent_within_mount(&self) -> Option<DirEntryHandle> {
1380 if let Ok(_) = self.mount_if_root() {
1381 return None;
1382 }
1383 self.entry.parent()
1384 }
1385
1386 pub fn is_descendant_of(&self, ancestor: &NamespaceNode) -> bool {
1391 let ancestor = ancestor.escape_mount();
1392 let mut current = self.escape_mount();
1393 while current != ancestor {
1394 if let Some(parent) = current.parent() {
1395 current = parent.escape_mount();
1396 } else {
1397 return false;
1398 }
1399 }
1400 true
1401 }
1402
1403 fn enter_mount(&self) -> NamespaceNode {
1405 fn enter_one_mount(node: &NamespaceNode) -> Option<NamespaceNode> {
1407 if let Some(mount) = node.mount.deref() {
1408 if let Some(submount) =
1409 mount.state.read().submounts.get(ArcKey::ref_cast(&node.entry))
1410 {
1411 return Some(submount.mount.root());
1412 }
1413 }
1414 None
1415 }
1416 let mut inner = self.clone();
1417 while let Some(inner_root) = enter_one_mount(&inner) {
1418 inner = inner_root;
1419 }
1420 inner
1421 }
1422
1423 fn escape_mount(&self) -> NamespaceNode {
1428 let mut mountpoint_or_self = self.clone();
1429 while let Some(mountpoint) = mountpoint_or_self.mountpoint() {
1430 mountpoint_or_self = mountpoint;
1431 }
1432 mountpoint_or_self
1433 }
1434
1435 pub fn mount_if_root(&self) -> Result<&MountHandle, Errno> {
1437 if let Some(mount) = self.mount.deref() {
1438 if Arc::ptr_eq(&self.entry, &mount.root) {
1439 return Ok(mount);
1440 }
1441 }
1442 error!(EINVAL)
1443 }
1444
1445 fn mountpoint(&self) -> Option<NamespaceNode> {
1450 self.mount_if_root().ok()?.read().mountpoint()
1451 }
1452
1453 pub fn path(&self, task: &Task) -> FsString {
1455 self.path_from_root(Some(&task.fs().root())).into_path()
1456 }
1457
1458 pub fn path_escaping_chroot(&self) -> FsString {
1460 self.path_from_root(None).into_path()
1461 }
1462
1463 pub fn path_from_root(&self, root: Option<&NamespaceNode>) -> PathWithReachability {
1466 if self.mount.is_none() {
1467 return PathWithReachability::Reachable(self.entry.node.internal_name());
1468 }
1469
1470 let mut path = PathBuilder::new();
1471 let mut current = self.escape_mount();
1472 if let Some(root) = root {
1473 let scope = RcuReadScope::new();
1474 let root = root.escape_mount();
1476 while current != root {
1477 if let Some(parent) = current.parent() {
1478 path.prepend_element(current.entry.local_name(&scope));
1479 current = parent.escape_mount();
1480 } else {
1481 let mut absolute_path = path.build_absolute();
1483 if self.entry.is_dead() {
1484 absolute_path.extend_from_slice(b" (deleted)");
1485 }
1486
1487 return PathWithReachability::Unreachable(absolute_path);
1488 }
1489 }
1490 } else {
1491 let scope = RcuReadScope::new();
1493 while let Some(parent) = current.parent() {
1494 path.prepend_element(current.entry.local_name(&scope));
1495 current = parent.escape_mount();
1496 }
1497 }
1498
1499 let mut absolute_path = path.build_absolute();
1500 if self.entry.is_dead() {
1501 absolute_path.extend_from_slice(b" (deleted)");
1502 }
1503
1504 PathWithReachability::Reachable(absolute_path)
1505 }
1506
1507 pub fn mount(&self, what: WhatToMount, flags: MountFlags) -> Result<(), Errno> {
1508 let flags = flags & (MountFlags::STORED_ON_MOUNT | MountFlags::REC);
1509 let mountpoint = self.enter_mount();
1510 let mount = mountpoint.mount.as_ref().expect("a mountpoint must be part of a mount");
1511 mount.create_submount(&mountpoint.entry, what, flags);
1512 Ok(())
1513 }
1514
1515 pub fn unmount(&self, flags: UnmountFlags) -> Result<(), Errno> {
1517 let mount = self.enter_mount().mount_if_root()?.clone();
1518 mount.unmount(flags)
1519 }
1520
1521 pub fn rename<L>(
1522 locked: &mut Locked<L>,
1523 current_task: &CurrentTask,
1524 old_parent: &NamespaceNode,
1525 old_name: &FsStr,
1526 new_parent: &NamespaceNode,
1527 new_name: &FsStr,
1528 flags: RenameFlags,
1529 ) -> Result<(), Errno>
1530 where
1531 L: LockEqualOrBefore<FileOpsCore>,
1532 {
1533 DirEntry::rename(
1534 locked,
1535 current_task,
1536 &old_parent.entry,
1537 &old_parent.mount,
1538 old_name,
1539 &new_parent.entry,
1540 &new_parent.mount,
1541 new_name,
1542 flags,
1543 )
1544 }
1545
1546 fn with_new_entry(&self, entry: DirEntryHandle) -> NamespaceNode {
1547 Self { mount: self.mount.clone(), entry }
1548 }
1549
1550 fn mount_hash_key(&self) -> &ArcKey<DirEntry> {
1551 ArcKey::ref_cast(&self.entry)
1552 }
1553
1554 pub fn suid_and_sgid(&self, current_task: &CurrentTask) -> Result<UserAndOrGroupId, Errno> {
1555 if self.mount.flags().contains(MountFlags::NOSUID) {
1556 Ok(UserAndOrGroupId::default())
1557 } else {
1558 self.entry.node.info().suid_and_sgid(current_task, &self.entry.node)
1559 }
1560 }
1561
1562 pub fn update_atime(&self) {
1563 if !self.mount.flags().contains(MountFlags::NOATIME) {
1565 self.entry.node.update_info(|info| {
1566 let now = utc::utc_now();
1567 info.time_access = now;
1568 info.pending_time_access_update = true;
1569 });
1570 }
1571 }
1572
1573 pub fn readlink<L>(
1574 &self,
1575 locked: &mut Locked<L>,
1576 current_task: &CurrentTask,
1577 ) -> Result<SymlinkTarget, Errno>
1578 where
1579 L: LockEqualOrBefore<FileOpsCore>,
1580 {
1581 self.update_atime();
1582 self.entry.node.readlink(locked, current_task)
1583 }
1584
1585 pub fn notify(&self, event_mask: InotifyMask) {
1586 if self.mount.is_some() {
1587 self.entry.notify(event_mask);
1588 }
1589 }
1590
1591 pub fn check_access<L>(
1595 &self,
1596 locked: &mut Locked<L>,
1597 current_task: &CurrentTask,
1598 permission_flags: impl Into<security::PermissionFlags>,
1599 reason: CheckAccessReason,
1600 ) -> Result<(), Errno>
1601 where
1602 L: LockEqualOrBefore<FileOpsCore>,
1603 {
1604 self.entry.node.check_access(
1605 locked,
1606 current_task,
1607 &self.mount,
1608 permission_flags,
1609 reason,
1610 self,
1611 )
1612 }
1613
1614 pub fn check_o_noatime_allowed(&self, current_task: &CurrentTask) -> Result<(), Errno> {
1616 self.entry.node.check_o_noatime_allowed(current_task)
1617 }
1618
1619 pub fn truncate<L>(
1620 &self,
1621 locked: &mut Locked<L>,
1622 current_task: &CurrentTask,
1623 length: u64,
1624 ) -> Result<(), Errno>
1625 where
1626 L: LockBefore<BeforeFsNodeAppend>,
1627 {
1628 self.entry.node.truncate(locked, current_task, &self.mount, length)?;
1629 self.entry.notify_ignoring_excl_unlink(InotifyMask::MODIFY);
1630 Ok(())
1631 }
1632}
1633
1634impl fmt::Debug for NamespaceNode {
1635 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1636 f.debug_struct("NamespaceNode")
1637 .field("path", &self.path_escaping_chroot())
1638 .field("mount", &self.mount)
1639 .field("entry", &self.entry)
1640 .finish()
1641 }
1642}
1643
1644impl PartialEq for NamespaceNode {
1646 fn eq(&self, other: &Self) -> bool {
1647 self.mount.as_ref().map(Arc::as_ptr).eq(&other.mount.as_ref().map(Arc::as_ptr))
1648 && Arc::ptr_eq(&self.entry, &other.entry)
1649 }
1650}
1651impl Eq for NamespaceNode {}
1652impl Hash for NamespaceNode {
1653 fn hash<H: Hasher>(&self, state: &mut H) {
1654 self.mount.as_ref().map(Arc::as_ptr).hash(state);
1655 Arc::as_ptr(&self.entry).hash(state);
1656 }
1657}
1658
1659#[derive(Debug, Clone)]
1661pub struct ActiveNamespaceNode {
1662 name: NamespaceNode,
1664
1665 _marker: Option<MountClientMarker>,
1669}
1670
1671impl ActiveNamespaceNode {
1672 pub fn new(name: NamespaceNode) -> Self {
1673 let marker = name.mount.as_ref().map(|mount| mount.active_client_counter.clone());
1674 Self { name, _marker: marker }
1675 }
1676
1677 pub fn to_passive(&self) -> NamespaceNode {
1678 self.deref().clone()
1679 }
1680
1681 pub fn into_mapping(self, mode: Option<FileWriteGuardMode>) -> Result<Arc<FileMapping>, Errno> {
1682 if let Some(mode) = mode {
1683 self.entry.node.write_guard_state.lock().acquire(mode)?;
1684 }
1685 Ok(Arc::new(FileMapping { name: self, mode }))
1686 }
1687}
1688
1689impl Deref for ActiveNamespaceNode {
1690 type Target = NamespaceNode;
1691
1692 fn deref(&self) -> &Self::Target {
1693 &self.name
1694 }
1695}
1696
1697impl PartialEq for ActiveNamespaceNode {
1698 fn eq(&self, other: &Self) -> bool {
1699 self.deref().eq(other.deref())
1700 }
1701}
1702impl Eq for ActiveNamespaceNode {}
1703impl Hash for ActiveNamespaceNode {
1704 fn hash<H: Hasher>(&self, state: &mut H) {
1705 self.deref().hash(state)
1706 }
1707}
1708
1709#[derive(Debug, Clone, PartialEq, Eq)]
1710#[must_use]
1711pub struct FileMapping {
1712 pub name: ActiveNamespaceNode,
1713 mode: Option<FileWriteGuardMode>,
1714}
1715
1716impl Drop for FileMapping {
1717 fn drop(&mut self) {
1718 if let Some(mode) = self.mode {
1719 self.name.entry.node.write_guard_state.lock().release(mode);
1720 }
1721 }
1722}
1723
1724pub struct Mounts {
1726 mounts: RcuHashMap<WeakKey<DirEntry>, Vec<ArcKey<Mount>>>,
1727}
1728
1729impl Mounts {
1730 pub fn new() -> Self {
1731 Mounts { mounts: RcuHashMap::default() }
1732 }
1733
1734 fn register_mount(&self, dir_entry: &Arc<DirEntry>, mount: MountHandle) -> Submount {
1736 let mut mounts = self.mounts.lock();
1737 let key = WeakKey::from(dir_entry);
1738 let mut vec = mounts.get(&key).unwrap_or_else(|| {
1739 dir_entry.set_has_mounts(true);
1740 Vec::new()
1741 });
1742 vec.push(ArcKey(mount.clone()));
1743 mounts.insert(key, vec);
1744 Submount { dir: ArcKey(dir_entry.clone()), mount }
1745 }
1746
1747 fn unregister_mount(&self, dir_entry: &Arc<DirEntry>, mount: &MountHandle) {
1749 let mut mounts = self.mounts.lock();
1750 let key = WeakKey::from(dir_entry);
1751 if let Some(mut vec) = mounts.get(&key) {
1752 let index = vec.iter().position(|e| e == ArcKey::ref_cast(mount)).unwrap();
1753 if vec.len() == 1 {
1754 mounts.remove(&key);
1755 dir_entry.set_has_mounts(false);
1756 } else {
1757 vec.swap_remove(index);
1758 mounts.insert(key, vec);
1759 }
1760 }
1761 }
1762
1763 pub fn unmount(&self, dir_entry: &DirEntry) {
1767 let mounts = self.mounts.lock().remove(&PtrKey::from(dir_entry as *const _));
1768 if let Some(mounts) = mounts {
1769 for mount in mounts {
1770 let _ = mount.unmount(UnmountFlags::DETACH);
1772 }
1773 }
1774 }
1775
1776 pub fn clear(&self) {
1780 for (_dir_entry, mounts) in self.mounts.lock().drain() {
1781 for mount in mounts {
1782 mount.fs.force_unmount_ops();
1783 }
1784 }
1785 }
1786
1787 pub fn sync_all(
1788 &self,
1789 locked: &mut Locked<Unlocked>,
1790 current_task: &CurrentTask,
1791 ) -> Result<(), Errno> {
1792 let mut filesystems = Vec::new();
1793 {
1794 let scope = RcuReadScope::new();
1795 let mut seen = HashSet::new();
1796 for (_dir_entry, m_list) in self.mounts.iter(&scope) {
1797 for m in m_list {
1798 if seen.insert(Arc::as_ptr(&m.fs)) {
1799 filesystems.push(m.fs.clone());
1800 }
1801 }
1802 }
1803 }
1804
1805 for fs in filesystems {
1806 if let Err(e) = fs.sync(locked, current_task) {
1807 log_warn!("sync failed for filesystem {:?}: {:?}", fs.name(), e);
1808 }
1809 }
1810 Ok(())
1811 }
1812}
1813
1814#[derive(Debug)]
1816struct Submount {
1817 dir: ArcKey<DirEntry>,
1818 mount: MountHandle,
1819}
1820
1821impl Drop for Submount {
1822 fn drop(&mut self) {
1823 self.mount.fs.kernel.upgrade().unwrap().mounts.unregister_mount(&self.dir, &self.mount)
1824 }
1825}
1826
1827impl Eq for Submount {}
1829impl PartialEq<Self> for Submount {
1830 fn eq(&self, other: &Self) -> bool {
1831 self.dir == other.dir
1832 }
1833}
1834impl Hash for Submount {
1835 fn hash<H: Hasher>(&self, state: &mut H) {
1836 self.dir.hash(state)
1837 }
1838}
1839
1840impl Borrow<ArcKey<DirEntry>> for Submount {
1841 fn borrow(&self) -> &ArcKey<DirEntry> {
1842 &self.dir
1843 }
1844}
1845
1846#[cfg(test)]
1847mod test {
1848 use crate::fs::tmpfs::TmpFs;
1849 use crate::testing::spawn_kernel_and_run;
1850 use crate::vfs::namespace::DeviceType;
1851 use crate::vfs::{
1852 CallbackSymlinkNode, FsNodeInfo, LookupContext, MountInfo, Namespace, NamespaceNode,
1853 RenameFlags, SymlinkMode, SymlinkTarget, UnlinkKind, WhatToMount,
1854 };
1855 use starnix_uapi::mount_flags::MountFlags;
1856 use starnix_uapi::{errno, mode};
1857 use std::sync::Arc;
1858
1859 #[::fuchsia::test]
1860 async fn test_namespace() {
1861 spawn_kernel_and_run(async |locked, current_task| {
1862 let kernel = current_task.kernel();
1863 let root_fs = TmpFs::new_fs(locked, &kernel);
1864 let root_node = Arc::clone(root_fs.root());
1865 let _dev_node = root_node
1866 .create_dir(locked, ¤t_task, "dev".into())
1867 .expect("failed to mkdir dev");
1868 let dev_fs = TmpFs::new_fs(locked, &kernel);
1869 let dev_root_node = Arc::clone(dev_fs.root());
1870 let _dev_pts_node = dev_root_node
1871 .create_dir(locked, ¤t_task, "pts".into())
1872 .expect("failed to mkdir pts");
1873
1874 let ns = Namespace::new(root_fs);
1875 let mut context = LookupContext::default();
1876 let dev = ns
1877 .root()
1878 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1879 .expect("failed to lookup dev");
1880 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1881 .expect("failed to mount dev root node");
1882
1883 let mut context = LookupContext::default();
1884 let dev = ns
1885 .root()
1886 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1887 .expect("failed to lookup dev");
1888 let mut context = LookupContext::default();
1889 let pts = dev
1890 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1891 .expect("failed to lookup pts");
1892 let pts_parent =
1893 pts.parent().ok_or_else(|| errno!(ENOENT)).expect("failed to get parent of pts");
1894 assert!(Arc::ptr_eq(&pts_parent.entry, &dev.entry));
1895
1896 let dev_parent =
1897 dev.parent().ok_or_else(|| errno!(ENOENT)).expect("failed to get parent of dev");
1898 assert!(Arc::ptr_eq(&dev_parent.entry, &ns.root().entry));
1899 })
1900 .await;
1901 }
1902
1903 #[::fuchsia::test]
1904 async fn test_mount_does_not_upgrade() {
1905 spawn_kernel_and_run(async |locked, current_task| {
1906 let kernel = current_task.kernel();
1907 let root_fs = TmpFs::new_fs(locked, &kernel);
1908 let root_node = Arc::clone(root_fs.root());
1909 let _dev_node = root_node
1910 .create_dir(locked, ¤t_task, "dev".into())
1911 .expect("failed to mkdir dev");
1912 let dev_fs = TmpFs::new_fs(locked, &kernel);
1913 let dev_root_node = Arc::clone(dev_fs.root());
1914 let _dev_pts_node = dev_root_node
1915 .create_dir(locked, ¤t_task, "pts".into())
1916 .expect("failed to mkdir pts");
1917
1918 let ns = Namespace::new(root_fs);
1919 let mut context = LookupContext::default();
1920 let dev = ns
1921 .root()
1922 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1923 .expect("failed to lookup dev");
1924 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1925 .expect("failed to mount dev root node");
1926 let mut context = LookupContext::default();
1927 let new_dev = ns
1928 .root()
1929 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1930 .expect("failed to lookup dev again");
1931 assert!(!Arc::ptr_eq(&dev.entry, &new_dev.entry));
1932 assert_ne!(&dev, &new_dev);
1933
1934 let mut context = LookupContext::default();
1935 let _new_pts = new_dev
1936 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1937 .expect("failed to lookup pts");
1938 let mut context = LookupContext::default();
1939 assert!(dev.lookup_child(locked, ¤t_task, &mut context, "pts".into()).is_err());
1940 })
1941 .await;
1942 }
1943
1944 #[::fuchsia::test]
1945 async fn test_path() {
1946 spawn_kernel_and_run(async |locked, current_task| {
1947 let kernel = current_task.kernel();
1948 let root_fs = TmpFs::new_fs(locked, &kernel);
1949 let root_node = Arc::clone(root_fs.root());
1950 let _dev_node = root_node
1951 .create_dir(locked, ¤t_task, "dev".into())
1952 .expect("failed to mkdir dev");
1953 let dev_fs = TmpFs::new_fs(locked, &kernel);
1954 let dev_root_node = Arc::clone(dev_fs.root());
1955 let _dev_pts_node = dev_root_node
1956 .create_dir(locked, ¤t_task, "pts".into())
1957 .expect("failed to mkdir pts");
1958
1959 let ns = Namespace::new(root_fs);
1960 let mut context = LookupContext::default();
1961 let dev = ns
1962 .root()
1963 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1964 .expect("failed to lookup dev");
1965 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1966 .expect("failed to mount dev root node");
1967
1968 let mut context = LookupContext::default();
1969 let dev = ns
1970 .root()
1971 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1972 .expect("failed to lookup dev");
1973 let mut context = LookupContext::default();
1974 let pts = dev
1975 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1976 .expect("failed to lookup pts");
1977
1978 assert_eq!("/", ns.root().path_escaping_chroot());
1979 assert_eq!("/dev", dev.path_escaping_chroot());
1980 assert_eq!("/dev/pts", pts.path_escaping_chroot());
1981 })
1982 .await;
1983 }
1984
1985 #[::fuchsia::test]
1986 async fn test_shadowing() {
1987 spawn_kernel_and_run(async |locked, current_task| {
1988 let kernel = current_task.kernel();
1989 let root_fs = TmpFs::new_fs(locked, &kernel);
1990 let ns = Namespace::new(root_fs.clone());
1991 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
1992 let mut context = LookupContext::default();
1993 let foo_dir =
1994 ns.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
1995
1996 let foofs1 = TmpFs::new_fs(locked, &kernel);
1997 foo_dir.mount(WhatToMount::Fs(foofs1.clone()), MountFlags::empty()).unwrap();
1998 let mut context = LookupContext::default();
1999 assert!(Arc::ptr_eq(
2000 &ns.root()
2001 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
2002 .unwrap()
2003 .entry,
2004 foofs1.root()
2005 ));
2006 let foo_dir =
2007 ns.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
2008
2009 let ns_clone = ns.clone_namespace();
2010
2011 let foofs2 = TmpFs::new_fs(locked, &kernel);
2012 foo_dir.mount(WhatToMount::Fs(foofs2.clone()), MountFlags::empty()).unwrap();
2013 let mut context = LookupContext::default();
2014 assert!(Arc::ptr_eq(
2015 &ns.root()
2016 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
2017 .unwrap()
2018 .entry,
2019 foofs2.root()
2020 ));
2021
2022 assert!(Arc::ptr_eq(
2023 &ns_clone
2024 .root()
2025 .lookup_child(
2026 locked,
2027 ¤t_task,
2028 &mut LookupContext::default(),
2029 "foo".into()
2030 )
2031 .unwrap()
2032 .entry,
2033 foofs1.root()
2034 ));
2035 })
2036 .await;
2037 }
2038
2039 #[::fuchsia::test]
2040 async fn test_unlink_mounted_directory() {
2041 spawn_kernel_and_run(async |locked, current_task| {
2042 let kernel = current_task.kernel();
2043 let root_fs = TmpFs::new_fs(locked, &kernel);
2044 let ns1 = Namespace::new(root_fs.clone());
2045 let ns2 = Namespace::new(root_fs.clone());
2046 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
2047 let mut context = LookupContext::default();
2048 let foo_dir =
2049 ns1.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
2050
2051 let foofs = TmpFs::new_fs(locked, &kernel);
2052 foo_dir.mount(WhatToMount::Fs(foofs), MountFlags::empty()).unwrap();
2053
2054 assert_eq!(
2056 ns1.root()
2057 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2058 .unwrap_err(),
2059 errno!(EBUSY),
2060 );
2061
2062 ns2.root()
2064 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2065 .expect("unlink failed");
2066
2067 assert_eq!(
2069 ns1.root()
2070 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2071 .unwrap_err(),
2072 errno!(ENOENT),
2073 );
2074 })
2075 .await;
2076 }
2077
2078 #[::fuchsia::test]
2079 async fn test_rename_mounted_directory() {
2080 spawn_kernel_and_run(async |locked, current_task| {
2081 let kernel = current_task.kernel();
2082 let root_fs = TmpFs::new_fs(locked, &kernel);
2083 let ns1 = Namespace::new(root_fs.clone());
2084 let ns2 = Namespace::new(root_fs.clone());
2085 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
2086 let _bar_node = root_fs.root().create_dir(locked, ¤t_task, "bar".into()).unwrap();
2087 let _baz_node = root_fs.root().create_dir(locked, ¤t_task, "baz".into()).unwrap();
2088 let mut context = LookupContext::default();
2089 let foo_dir =
2090 ns1.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
2091
2092 let foofs = TmpFs::new_fs(locked, &kernel);
2093 foo_dir.mount(WhatToMount::Fs(foofs), MountFlags::empty()).unwrap();
2094
2095 let root = ns1.root();
2097 assert_eq!(
2098 NamespaceNode::rename(
2099 locked,
2100 ¤t_task,
2101 &root,
2102 "bar".into(),
2103 &root,
2104 "foo".into(),
2105 RenameFlags::empty()
2106 )
2107 .unwrap_err(),
2108 errno!(EBUSY),
2109 );
2110 assert_eq!(
2112 NamespaceNode::rename(
2113 locked,
2114 ¤t_task,
2115 &root,
2116 "foo".into(),
2117 &root,
2118 "bar".into(),
2119 RenameFlags::empty()
2120 )
2121 .unwrap_err(),
2122 errno!(EBUSY),
2123 );
2124
2125 let root = ns2.root();
2127
2128 NamespaceNode::rename(
2130 locked,
2131 ¤t_task,
2132 &root,
2133 "foo".into(),
2134 &root,
2135 "bar".into(),
2136 RenameFlags::empty(),
2137 )
2138 .expect("rename failed");
2139
2140 NamespaceNode::rename(
2142 locked,
2143 ¤t_task,
2144 &root,
2145 "baz".into(),
2146 &root,
2147 "bar".into(),
2148 RenameFlags::empty(),
2149 )
2150 .expect("rename failed");
2151
2152 assert_eq!(
2154 ns1.root()
2155 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
2156 .unwrap_err(),
2157 errno!(ENOENT)
2158 );
2159 assert_eq!(
2160 ns1.root()
2161 .lookup_child(locked, ¤t_task, &mut context, "baz".into())
2162 .unwrap_err(),
2163 errno!(ENOENT)
2164 );
2165 })
2166 .await;
2167 }
2168
2169 #[::fuchsia::test]
2172 async fn test_lookup_with_symlink_chain() {
2173 spawn_kernel_and_run(async |locked, current_task| {
2174 let kernel = current_task.kernel();
2176 let root_fs = TmpFs::new_fs(locked, &kernel);
2177 let root_node = Arc::clone(root_fs.root());
2178 let _first_subdir_node = root_node
2179 .create_dir(locked, ¤t_task, "first_subdir".into())
2180 .expect("failed to mkdir dev");
2181 let _second_subdir_node = root_node
2182 .create_dir(locked, ¤t_task, "second_subdir".into())
2183 .expect("failed to mkdir dev");
2184
2185 let first_subdir_fs = TmpFs::new_fs(locked, &kernel);
2187 let second_subdir_fs = TmpFs::new_fs(locked, &kernel);
2188
2189 let ns = Namespace::new(root_fs);
2190 let mut context = LookupContext::default();
2191 let first_subdir = ns
2192 .root()
2193 .lookup_child(locked, ¤t_task, &mut context, "first_subdir".into())
2194 .expect("failed to lookup first_subdir");
2195 first_subdir
2196 .mount(WhatToMount::Fs(first_subdir_fs), MountFlags::empty())
2197 .expect("failed to mount first_subdir fs node");
2198 let second_subdir = ns
2199 .root()
2200 .lookup_child(locked, ¤t_task, &mut context, "second_subdir".into())
2201 .expect("failed to lookup second_subdir");
2202 second_subdir
2203 .mount(WhatToMount::Fs(second_subdir_fs), MountFlags::empty())
2204 .expect("failed to mount second_subdir fs node");
2205
2206 let real_file_node = first_subdir
2215 .create_node(
2216 locked,
2217 ¤t_task,
2218 "real_file".into(),
2219 mode!(IFREG, 0o777),
2220 DeviceType::NONE,
2221 )
2222 .expect("failed to create real_file");
2223 first_subdir
2224 .create_symlink(locked, ¤t_task, "path_symlink".into(), "real_file".into())
2225 .expect("failed to create path_symlink");
2226
2227 let mut no_follow_lookup_context = LookupContext::new(SymlinkMode::NoFollow);
2228 let path_symlink_node = first_subdir
2229 .lookup_child(
2230 locked,
2231 ¤t_task,
2232 &mut no_follow_lookup_context,
2233 "path_symlink".into(),
2234 )
2235 .expect("Failed to lookup path_symlink");
2236
2237 let node_symlink_node = second_subdir.entry.node.fs().create_node_and_allocate_node_id(
2241 CallbackSymlinkNode::new(move || {
2242 let node = path_symlink_node.clone();
2243 Ok(SymlinkTarget::Node(node))
2244 }),
2245 FsNodeInfo::new(mode!(IFLNK, 0o777), current_task.current_fscred()),
2246 );
2247 second_subdir
2248 .entry
2249 .create_entry(
2250 locked,
2251 ¤t_task,
2252 &MountInfo::detached(),
2253 "node_symlink".into(),
2254 move |_locked, _dir, _mount, _name| Ok(node_symlink_node),
2255 )
2256 .expect("failed to create node_symlink entry");
2257
2258 let mut follow_lookup_context = LookupContext::new(SymlinkMode::Follow);
2260 let node_symlink_resolution = second_subdir
2261 .lookup_child(
2262 locked,
2263 ¤t_task,
2264 &mut follow_lookup_context,
2265 "node_symlink".into(),
2266 )
2267 .expect("lookup with symlink chain failed");
2268
2269 assert!(node_symlink_resolution.entry.node.ino == real_file_node.entry.node.ino);
2271 })
2272 .await;
2273 }
2274}