1use crate::mutable_state::{state_accessor, state_implementation};
6use crate::security;
7use crate::task::{CurrentTask, EventHandler, Kernel, Task, WaitCanceler, Waiter};
8use crate::time::utc;
9use crate::vfs::buffers::InputBuffer;
10use crate::vfs::fs_registry::FsRegistry;
11use crate::vfs::pseudo::dynamic_file::{DynamicFile, DynamicFileBuf, DynamicFileSource};
12use crate::vfs::pseudo::simple_file::SimpleFileNode;
13use crate::vfs::socket::{SocketAddress, SocketHandle, UnixSocket};
14use crate::vfs::{
15 CheckAccessReason, DirEntry, DirEntryHandle, FileHandle, FileObject, FileOps, FileSystemHandle,
16 FileSystemOptions, FileWriteGuardMode, FsNode, FsNodeHandle, FsNodeOps, FsStr, FsString,
17 PathBuilder, RenameFlags, SymlinkTarget, UnlinkKind, fileops_impl_dataless,
18 fileops_impl_delegate_read_and_seek, fileops_impl_nonseekable, fileops_impl_noop_sync,
19 fs_node_impl_not_dir,
20};
21use macro_rules_attribute::apply;
22use ref_cast::RefCast;
23use starnix_logging::log_warn;
24use starnix_rcu::RcuHashMap;
25use starnix_sync::{
26 BeforeFsNodeAppend, FileOpsCore, LockBefore, LockEqualOrBefore, Locked, Mutex, RwLock, Unlocked,
27};
28use starnix_types::ownership::WeakRef;
29use starnix_uapi::arc_key::{ArcKey, PtrKey, WeakKey};
30use starnix_uapi::auth::UserAndOrGroupId;
31use starnix_uapi::device_type::DeviceType;
32use starnix_uapi::errors::Errno;
33use starnix_uapi::file_mode::{AccessCheck, FileMode};
34use starnix_uapi::inotify_mask::InotifyMask;
35use starnix_uapi::mount_flags::MountFlags;
36use starnix_uapi::open_flags::OpenFlags;
37use starnix_uapi::unmount_flags::UnmountFlags;
38use starnix_uapi::vfs::{FdEvents, ResolveFlags};
39use starnix_uapi::{NAME_MAX, errno, error};
40use std::borrow::Borrow;
41use std::collections::HashSet;
42use std::fmt;
43use std::hash::{Hash, Hasher};
44use std::ops::{Deref, DerefMut};
45use std::sync::{Arc, Weak};
46
47#[derive(Debug)]
51pub struct Namespace {
52 root_mount: MountHandle,
53
54 pub id: u64,
56}
57
58impl Namespace {
59 pub fn new(fs: FileSystemHandle) -> Arc<Namespace> {
60 Self::new_with_flags(fs, MountFlags::empty())
61 }
62
63 pub fn new_with_flags(fs: FileSystemHandle, flags: MountFlags) -> Arc<Namespace> {
64 let kernel = fs.kernel.upgrade().expect("can't create namespace without a kernel");
65 let root_mount = Mount::new(WhatToMount::Fs(fs), flags);
66 Arc::new(Self { root_mount, id: kernel.get_next_namespace_id() })
67 }
68
69 pub fn root(&self) -> NamespaceNode {
70 self.root_mount.root()
71 }
72
73 pub fn clone_namespace(&self) -> Arc<Namespace> {
74 let kernel =
75 self.root_mount.fs.kernel.upgrade().expect("can't clone namespace without a kernel");
76 Arc::new(Self {
77 root_mount: self.root_mount.clone_mount_recursive(),
78 id: kernel.get_next_namespace_id(),
79 })
80 }
81
82 pub fn translate_node(mut node: NamespaceNode, new_ns: &Namespace) -> Option<NamespaceNode> {
85 let mut mountpoints = vec![];
87 let mut mount = node.mount;
88 while let Some(mountpoint) = mount.as_ref().and_then(|m| m.mountpoint()) {
89 mountpoints.push(mountpoint.entry);
90 mount = mountpoint.mount;
91 }
92
93 let mut mount = Arc::clone(&new_ns.root_mount);
95 for mountpoint in mountpoints.iter().rev() {
96 let next_mount =
97 mount.read().submounts.get(ArcKey::ref_cast(mountpoint))?.mount.clone();
98 mount = next_mount;
99 }
100 node.mount = Some(mount).into();
101 Some(node)
102 }
103}
104
105impl FsNodeOps for Arc<Namespace> {
106 fs_node_impl_not_dir!();
107
108 fn create_file_ops(
109 &self,
110 _locked: &mut Locked<FileOpsCore>,
111 _node: &FsNode,
112 _current_task: &CurrentTask,
113 _flags: OpenFlags,
114 ) -> Result<Box<dyn FileOps>, Errno> {
115 Ok(Box::new(MountNamespaceFile(self.clone())))
116 }
117}
118
119pub struct MountNamespaceFile(pub Arc<Namespace>);
120
121impl FileOps for MountNamespaceFile {
122 fileops_impl_nonseekable!();
123 fileops_impl_dataless!();
124 fileops_impl_noop_sync!();
125}
126
127type MountClientMarker = Arc<()>;
132
133pub struct Mount {
142 root: DirEntryHandle,
143 flags: Mutex<MountFlags>,
144 fs: FileSystemHandle,
145
146 id: u64,
148
149 active_client_counter: MountClientMarker,
151
152 state: RwLock<MountState>,
154 }
161type MountHandle = Arc<Mount>;
162
163#[derive(Clone, Debug)]
165pub struct MountInfo {
166 handle: Option<MountHandle>,
167}
168
169impl MountInfo {
170 pub fn detached() -> Self {
173 None.into()
174 }
175
176 pub fn flags(&self) -> MountFlags {
178 if let Some(handle) = &self.handle {
179 handle.flags()
180 } else {
181 MountFlags::NOATIME
183 }
184 }
185
186 pub fn check_readonly_filesystem(&self) -> Result<(), Errno> {
188 if self.flags().contains(MountFlags::RDONLY) {
189 return error!(EROFS);
190 }
191 Ok(())
192 }
193
194 pub fn check_noexec_filesystem(&self) -> Result<(), Errno> {
196 if self.flags().contains(MountFlags::NOEXEC) {
197 return error!(EACCES);
198 }
199 Ok(())
200 }
201}
202
203impl Deref for MountInfo {
204 type Target = Option<MountHandle>;
205
206 fn deref(&self) -> &Self::Target {
207 &self.handle
208 }
209}
210
211impl DerefMut for MountInfo {
212 fn deref_mut(&mut self) -> &mut Self::Target {
213 &mut self.handle
214 }
215}
216
217impl std::cmp::PartialEq for MountInfo {
218 fn eq(&self, other: &Self) -> bool {
219 self.handle.as_ref().map(Arc::as_ptr) == other.handle.as_ref().map(Arc::as_ptr)
220 }
221}
222
223impl std::cmp::Eq for MountInfo {}
224
225impl Into<MountInfo> for Option<MountHandle> {
226 fn into(self) -> MountInfo {
227 MountInfo { handle: self }
228 }
229}
230
231#[derive(Default)]
232pub struct MountState {
233 mountpoint: Option<(Weak<Mount>, DirEntryHandle)>,
238
239 submounts: HashSet<Submount>,
248
249 peer_group_: Option<(Arc<PeerGroup>, PtrKey<Mount>)>,
254 upstream_: Option<(Weak<PeerGroup>, PtrKey<Mount>)>,
257}
258
259#[derive(Default)]
263struct PeerGroup {
264 id: u64,
265 state: RwLock<PeerGroupState>,
266}
267#[derive(Default)]
268struct PeerGroupState {
269 mounts: HashSet<WeakKey<Mount>>,
270 downstream: HashSet<WeakKey<Mount>>,
271}
272
273pub enum WhatToMount {
274 Fs(FileSystemHandle),
275 Bind(NamespaceNode),
276}
277
278impl Mount {
279 pub fn new(what: WhatToMount, flags: MountFlags) -> MountHandle {
280 match what {
281 WhatToMount::Fs(fs) => Self::new_with_root(fs.root().clone(), flags),
282 WhatToMount::Bind(node) => {
283 let mount = node.mount.as_ref().expect("can't bind mount from an anonymous node");
284 mount.clone_mount(&node.entry, flags)
285 }
286 }
287 }
288
289 fn new_with_root(root: DirEntryHandle, flags: MountFlags) -> MountHandle {
290 let known_flags = MountFlags::STORED_ON_MOUNT;
291 assert!(
292 !flags.intersects(!known_flags),
293 "mount created with extra flags {:?}",
294 flags - known_flags
295 );
296 let fs = root.node.fs();
297 let kernel = fs.kernel.upgrade().expect("can't create mount without kernel");
298 Arc::new(Self {
299 id: kernel.get_next_mount_id(),
300 flags: Mutex::new(flags),
301 root,
302 active_client_counter: Default::default(),
303 fs,
304 state: Default::default(),
305 })
306 }
307
308 pub fn root(self: &MountHandle) -> NamespaceNode {
310 NamespaceNode::new(Arc::clone(self), Arc::clone(&self.root))
311 }
312
313 pub fn has_submount(&self, dir_entry: &DirEntryHandle) -> bool {
315 self.state.read().submounts.contains(ArcKey::ref_cast(dir_entry))
316 }
317
318 fn mountpoint(&self) -> Option<NamespaceNode> {
320 let state = self.state.read();
321 let (mount, entry) = state.mountpoint.as_ref()?;
322 Some(NamespaceNode::new(mount.upgrade()?, entry.clone()))
323 }
324
325 fn create_submount(
327 self: &MountHandle,
328 dir: &DirEntryHandle,
329 what: WhatToMount,
330 flags: MountFlags,
331 ) {
332 let peers = {
345 let state = self.state.read();
346 state.peer_group().map(|g| g.copy_propagation_targets()).unwrap_or_default()
347 };
348
349 let mount = Mount::new(what, flags);
354
355 if self.read().is_shared() {
356 mount.write().make_shared();
357 }
358
359 for peer in peers {
360 if Arc::ptr_eq(self, &peer) {
361 continue;
362 }
363 let clone = mount.clone_mount_recursive();
364 peer.write().add_submount_internal(dir, clone);
365 }
366
367 self.write().add_submount_internal(dir, mount)
368 }
369
370 fn remove_submount(
371 self: &MountHandle,
372 mount_hash_key: &ArcKey<DirEntry>,
373 propagate: bool,
374 ) -> Result<(), Errno> {
375 if propagate {
376 let peers = {
378 let state = self.state.read();
379 state.peer_group().map(|g| g.copy_propagation_targets()).unwrap_or_default()
380 };
381
382 for peer in peers {
383 if Arc::ptr_eq(self, &peer) {
384 continue;
385 }
386 let _ = peer.write().remove_submount_internal(mount_hash_key);
387 }
388 }
389
390 self.write().remove_submount_internal(mount_hash_key)
391 }
392
393 fn clone_mount(
396 self: &MountHandle,
397 new_root: &DirEntryHandle,
398 flags: MountFlags,
399 ) -> MountHandle {
400 assert!(new_root.is_descendant_of(&self.root));
401 let clone = Self::new_with_root(Arc::clone(new_root), self.flags());
404
405 if flags.contains(MountFlags::REC) {
406 let mut submounts = vec![];
412 for Submount { dir, mount } in &self.state.read().submounts {
413 submounts.push((dir.clone(), mount.clone_mount_recursive()));
414 }
415 let mut clone_state = clone.write();
416 for (dir, submount) in submounts {
417 clone_state.add_submount_internal(&dir, submount);
418 }
419 }
420
421 let peer_group = self.state.read().peer_group().map(Arc::clone);
423 if let Some(peer_group) = peer_group {
424 clone.write().set_peer_group(peer_group);
425 }
426
427 clone
428 }
429
430 fn clone_mount_recursive(self: &MountHandle) -> MountHandle {
433 self.clone_mount(&self.root, MountFlags::REC)
434 }
435
436 pub fn change_propagation(self: &MountHandle, flag: MountFlags, recursive: bool) {
437 let mut state = self.write();
438 match flag {
439 MountFlags::SHARED => state.make_shared(),
440 MountFlags::PRIVATE => state.make_private(),
441 MountFlags::DOWNSTREAM => state.make_downstream(),
442 _ => {
443 log_warn!("mount propagation {:?}", flag);
444 return;
445 }
446 }
447
448 if recursive {
449 for submount in &state.submounts {
450 submount.mount.change_propagation(flag, recursive);
451 }
452 }
453 }
454
455 fn flags(&self) -> MountFlags {
456 *self.flags.lock()
457 }
458
459 pub fn update_flags(self: &MountHandle, mut flags: MountFlags) {
460 flags &= MountFlags::STORED_ON_MOUNT;
461 let atime_flags = MountFlags::NOATIME
462 | MountFlags::NODIRATIME
463 | MountFlags::RELATIME
464 | MountFlags::STRICTATIME;
465 let mut stored_flags = self.flags.lock();
466 if !flags.intersects(atime_flags) {
467 flags |= *stored_flags & atime_flags;
472 }
473 flags &= !MountFlags::STRICTATIME;
475 *stored_flags = flags;
476 }
477
478 fn active_clients(&self) -> usize {
482 Arc::strong_count(&self.active_client_counter) - 1
484 }
485
486 pub fn unmount(&self, flags: UnmountFlags, propagate: bool) -> Result<(), Errno> {
487 if !flags.contains(UnmountFlags::DETACH) {
488 if self.active_clients() > 0 || !self.state.read().submounts.is_empty() {
489 return error!(EBUSY);
490 }
491 }
492 let mountpoint = self.mountpoint().ok_or_else(|| errno!(EINVAL))?;
493 let parent_mount = mountpoint.mount.as_ref().expect("a mountpoint must be part of a mount");
494 parent_mount.remove_submount(mountpoint.mount_hash_key(), propagate)
495 }
496
497 pub fn security_state(&self) -> &security::FileSystemState {
499 &self.fs.security_state
500 }
501
502 pub fn fs_name(&self) -> &'static FsStr {
504 self.fs.name()
505 }
506
507 state_accessor!(Mount, state, Arc<Mount>);
508}
509
510impl MountState {
511 fn peer_group(&self) -> Option<&Arc<PeerGroup>> {
513 let (group, _) = self.peer_group_.as_ref()?;
514 Some(group)
515 }
516
517 fn take_from_peer_group(&mut self) -> Option<Arc<PeerGroup>> {
519 let (old_group, old_mount) = self.peer_group_.take()?;
520 old_group.remove(old_mount);
521 if let Some(upstream) = self.take_from_upstream() {
522 let next_mount =
523 old_group.state.read().mounts.iter().next().map(|w| w.0.upgrade().unwrap());
524 if let Some(next_mount) = next_mount {
525 next_mount.write().set_upstream(upstream);
529 }
530 }
531 Some(old_group)
532 }
533
534 fn upstream(&self) -> Option<Arc<PeerGroup>> {
535 self.upstream_.as_ref().and_then(|g| g.0.upgrade())
536 }
537
538 fn take_from_upstream(&mut self) -> Option<Arc<PeerGroup>> {
539 let (old_upstream, old_mount) = self.upstream_.take()?;
540 let old_upstream = old_upstream.upgrade()?;
543 old_upstream.remove_downstream(old_mount);
544 Some(old_upstream)
545 }
546}
547
548#[apply(state_implementation!)]
549impl MountState<Base = Mount, BaseType = Arc<Mount>> {
550 fn add_submount_internal(&mut self, dir: &DirEntryHandle, mount: MountHandle) {
552 if !dir.is_descendant_of(&self.base.root) {
553 return;
554 }
555
556 let submount = mount.fs.kernel.upgrade().unwrap().mounts.register_mount(dir, mount.clone());
557 let old_mountpoint =
558 mount.state.write().mountpoint.replace((Arc::downgrade(self.base), Arc::clone(dir)));
559 assert!(old_mountpoint.is_none(), "add_submount can only take a newly created mount");
560 let old_mount = self.submounts.replace(submount);
563
564 if let Some(mut old_mount) = old_mount {
568 old_mount.mount.write().mountpoint = Some((Arc::downgrade(&mount), Arc::clone(dir)));
573 old_mount.dir = ArcKey(mount.root.clone());
574 mount.write().submounts.insert(old_mount);
575 }
576 }
577
578 fn remove_submount_internal(&mut self, mount_hash_key: &ArcKey<DirEntry>) -> Result<(), Errno> {
579 if self.submounts.remove(mount_hash_key) { Ok(()) } else { error!(EINVAL) }
580 }
581
582 fn set_peer_group(&mut self, group: Arc<PeerGroup>) {
584 self.take_from_peer_group();
585 group.add(self.base);
586 self.peer_group_ = Some((group, Arc::as_ptr(self.base).into()));
587 }
588
589 fn set_upstream(&mut self, group: Arc<PeerGroup>) {
590 self.take_from_upstream();
591 group.add_downstream(self.base);
592 self.upstream_ = Some((Arc::downgrade(&group), Arc::as_ptr(self.base).into()));
593 }
594
595 pub fn is_shared(&self) -> bool {
597 self.peer_group().is_some()
598 }
599
600 pub fn make_shared(&mut self) {
602 if self.is_shared() {
603 return;
604 }
605 let kernel =
606 self.base.fs.kernel.upgrade().expect("can't create new peer group without kernel");
607 self.set_peer_group(PeerGroup::new(kernel.get_next_peer_group_id()));
608 }
609
610 pub fn make_private(&mut self) {
612 self.take_from_peer_group();
613 self.take_from_upstream();
614 }
615
616 pub fn make_downstream(&mut self) {
619 if let Some(peer_group) = self.take_from_peer_group() {
620 self.set_upstream(peer_group);
621 }
622 }
623}
624
625impl PeerGroup {
626 fn new(id: u64) -> Arc<Self> {
627 Arc::new(Self { id, state: Default::default() })
628 }
629
630 fn add(&self, mount: &Arc<Mount>) {
631 self.state.write().mounts.insert(WeakKey::from(mount));
632 }
633
634 fn remove(&self, mount: PtrKey<Mount>) {
635 self.state.write().mounts.remove(&mount);
636 }
637
638 fn add_downstream(&self, mount: &Arc<Mount>) {
639 self.state.write().downstream.insert(WeakKey::from(mount));
640 }
641
642 fn remove_downstream(&self, mount: PtrKey<Mount>) {
643 self.state.write().downstream.remove(&mount);
644 }
645
646 fn copy_propagation_targets(&self) -> Vec<MountHandle> {
647 let mut buf = vec![];
648 self.collect_propagation_targets(&mut buf);
649 buf
650 }
651
652 fn collect_propagation_targets(&self, buf: &mut Vec<MountHandle>) {
653 let downstream_mounts: Vec<_> = {
654 let state = self.state.read();
655 buf.extend(state.mounts.iter().filter_map(|m| m.0.upgrade()));
656 state.downstream.iter().filter_map(|m| m.0.upgrade()).collect()
657 };
658 for mount in downstream_mounts {
659 let peer_group = mount.read().peer_group().map(Arc::clone);
660 match peer_group {
661 Some(group) => group.collect_propagation_targets(buf),
662 None => buf.push(mount),
663 }
664 }
665 }
666}
667
668impl Drop for Mount {
669 fn drop(&mut self) {
670 let state = self.state.get_mut();
671 state.take_from_peer_group();
672 state.take_from_upstream();
673 }
674}
675
676impl fmt::Debug for Mount {
677 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
678 let state = self.state.read();
679 f.debug_struct("Mount")
680 .field("id", &(self as *const Mount))
681 .field("root", &self.root)
682 .field("mountpoint", &state.mountpoint)
683 .field("submounts", &state.submounts)
684 .finish()
685 }
686}
687
688impl Kernel {
689 pub fn get_next_mount_id(&self) -> u64 {
690 self.next_mount_id.next()
691 }
692
693 pub fn get_next_peer_group_id(&self) -> u64 {
694 self.next_peer_group_id.next()
695 }
696
697 pub fn get_next_namespace_id(&self) -> u64 {
698 self.next_namespace_id.next()
699 }
700}
701
702impl CurrentTask {
703 pub fn create_filesystem(
704 &self,
705 locked: &mut Locked<Unlocked>,
706 fs_type: &FsStr,
707 options: FileSystemOptions,
708 ) -> Result<FileSystemHandle, Errno> {
709 self.kernel()
716 .expando
717 .get::<FsRegistry>()
718 .create(locked, self, fs_type, options)
719 .ok_or_else(|| errno!(ENODEV, fs_type))?
720 }
721}
722
723fn write_mount_info(task: &Task, sink: &mut DynamicFileBuf, mount: &Mount) -> Result<(), Errno> {
725 write!(sink, "{}", mount.flags())?;
726 security::sb_show_options(&task.kernel(), sink, &mount)
727}
728
729struct ProcMountsFileSource(WeakRef<Task>);
730
731impl DynamicFileSource for ProcMountsFileSource {
732 fn generate(
733 &self,
734 _current_task: &CurrentTask,
735 sink: &mut DynamicFileBuf,
736 ) -> Result<(), Errno> {
737 let task = Task::from_weak(&self.0)?;
742 let root = task.fs().root();
743 let ns = task.fs().namespace();
744 for_each_mount(&ns.root_mount, &mut |mount| {
745 let mountpoint = mount.mountpoint().unwrap_or_else(|| mount.root());
746 if !mountpoint.is_descendant_of(&root) {
747 return Ok(());
748 }
749 write!(
750 sink,
751 "{} {} {} ",
752 mount.fs.options.source_for_display(),
753 mountpoint.path(&task),
754 mount.fs.name(),
755 )?;
756 write_mount_info(&task, sink, mount)?;
757 writeln!(sink, " 0 0")?;
758 Ok(())
759 })?;
760 Ok(())
761 }
762}
763
764pub struct ProcMountsFile {
765 dynamic_file: DynamicFile<ProcMountsFileSource>,
766}
767
768impl ProcMountsFile {
769 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
770 SimpleFileNode::new(move |_, _| {
771 Ok(Self { dynamic_file: DynamicFile::new(ProcMountsFileSource(task.clone())) })
772 })
773 }
774}
775
776impl FileOps for ProcMountsFile {
777 fileops_impl_delegate_read_and_seek!(self, self.dynamic_file);
778 fileops_impl_noop_sync!();
779
780 fn write(
781 &self,
782 _locked: &mut Locked<FileOpsCore>,
783 _file: &FileObject,
784 _current_task: &CurrentTask,
785 _offset: usize,
786 _data: &mut dyn InputBuffer,
787 ) -> Result<usize, Errno> {
788 error!(ENOSYS)
789 }
790
791 fn wait_async(
792 &self,
793 _locked: &mut Locked<FileOpsCore>,
794 _file: &FileObject,
795 _current_task: &CurrentTask,
796 waiter: &Waiter,
797 _events: FdEvents,
798 _handler: EventHandler,
799 ) -> Option<WaitCanceler> {
800 Some(waiter.fake_wait())
803 }
804
805 fn query_events(
806 &self,
807 _locked: &mut Locked<FileOpsCore>,
808 _file: &FileObject,
809 _current_task: &CurrentTask,
810 ) -> Result<FdEvents, Errno> {
811 Ok(FdEvents::empty())
812 }
813}
814
815#[derive(Clone)]
816pub struct ProcMountinfoFile(WeakRef<Task>);
817impl ProcMountinfoFile {
818 pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
819 DynamicFile::new_node(Self(task))
820 }
821}
822impl DynamicFileSource for ProcMountinfoFile {
823 fn generate(
824 &self,
825 _current_task: &CurrentTask,
826 sink: &mut DynamicFileBuf,
827 ) -> Result<(), Errno> {
828 fn path_from_fs_root(dir: &DirEntryHandle) -> FsString {
830 let mut path = PathBuilder::new();
831 if dir.read().is_dead() {
832 path.prepend_element("/deleted".into());
834 }
835 let mut current = dir.clone();
836 loop {
837 let parent = {
838 let state = current.read();
839 if state.parent().is_some() {
840 path.prepend_element(state.local_name());
841 }
842 state.parent().clone()
843 };
844 if let Some(next) = parent {
845 current = next
846 } else {
847 break;
848 }
849 }
850 path.build_absolute()
851 }
852
853 let task = Task::from_weak(&self.0)?;
858 let root = task.fs().root();
859 let ns = task.fs().namespace();
860 for_each_mount(&ns.root_mount, &mut |mount| {
861 let mountpoint = mount.mountpoint().unwrap_or_else(|| mount.root());
862 if !mountpoint.is_descendant_of(&root) {
863 return Ok(());
864 }
865 let parent = mountpoint.mount.as_ref().unwrap();
867 write!(
868 sink,
869 "{} {} {} {} {} ",
870 mount.id,
871 parent.id,
872 mount.root.node.fs().dev_id,
873 path_from_fs_root(&mount.root),
874 mountpoint.path(&task),
875 )?;
876 write_mount_info(&task, sink, mount)?;
877 if let Some(peer_group) = mount.read().peer_group() {
878 write!(sink, " shared:{}", peer_group.id)?;
879 }
880 if let Some(upstream) = mount.read().upstream() {
881 write!(sink, " master:{}", upstream.id)?;
882 }
883 writeln!(
884 sink,
885 " - {} {} {}",
886 mount.fs.name(),
887 mount.fs.options.source_for_display(),
888 mount.fs.options.flags,
889 )?;
890 Ok(())
891 })?;
892 Ok(())
893 }
894}
895
896fn for_each_mount<E>(
897 mount: &MountHandle,
898 callback: &mut impl FnMut(&MountHandle) -> Result<(), E>,
899) -> Result<(), E> {
900 callback(mount)?;
901 let submounts: Vec<_> = mount.read().submounts.iter().map(|s| s.mount.clone()).collect();
904 for submount in submounts {
905 for_each_mount(&submount, callback)?;
906 }
907 Ok(())
908}
909
910#[derive(Default, PartialEq, Eq, Copy, Clone, Debug)]
912pub enum SymlinkMode {
913 #[default]
915 Follow,
916
917 NoFollow,
919}
920
921pub const MAX_SYMLINK_FOLLOWS: u8 = 40;
923
924pub struct LookupContext {
929 pub symlink_mode: SymlinkMode,
934
935 pub remaining_follows: u8,
939
940 pub must_be_directory: bool,
946
947 pub resolve_flags: ResolveFlags,
949
950 pub resolve_base: ResolveBase,
953}
954
955#[derive(Clone, Eq, PartialEq)]
958pub enum ResolveBase {
959 None,
960
961 Beneath(NamespaceNode),
963
964 InRoot(NamespaceNode),
966}
967
968impl LookupContext {
969 pub fn new(symlink_mode: SymlinkMode) -> LookupContext {
970 LookupContext {
971 symlink_mode,
972 remaining_follows: MAX_SYMLINK_FOLLOWS,
973 must_be_directory: false,
974 resolve_flags: ResolveFlags::empty(),
975 resolve_base: ResolveBase::None,
976 }
977 }
978
979 pub fn with(&self, symlink_mode: SymlinkMode) -> LookupContext {
980 LookupContext { symlink_mode, resolve_base: self.resolve_base.clone(), ..*self }
981 }
982
983 pub fn update_for_path(&mut self, path: &FsStr) {
984 if path.last() == Some(&b'/') {
985 self.must_be_directory = true;
988 self.symlink_mode = SymlinkMode::Follow;
991 }
992 }
993}
994
995impl Default for LookupContext {
996 fn default() -> Self {
997 LookupContext::new(SymlinkMode::Follow)
998 }
999}
1000
1001pub enum PathWithReachability {
1003 Reachable(FsString),
1005
1006 Unreachable(FsString),
1008}
1009
1010impl PathWithReachability {
1011 pub fn into_path(self) -> FsString {
1012 match self {
1013 PathWithReachability::Reachable(path) => path,
1014 PathWithReachability::Unreachable(path) => path,
1015 }
1016 }
1017}
1018
1019#[derive(Clone)]
1027pub struct NamespaceNode {
1028 pub mount: MountInfo,
1033
1034 pub entry: DirEntryHandle,
1036}
1037
1038impl NamespaceNode {
1039 pub fn new(mount: MountHandle, entry: DirEntryHandle) -> Self {
1040 Self { mount: Some(mount).into(), entry }
1041 }
1042
1043 pub fn new_anonymous(entry: DirEntryHandle) -> Self {
1045 Self { mount: None.into(), entry }
1046 }
1047
1048 pub fn new_anonymous_unrooted(current_task: &CurrentTask, node: FsNodeHandle) -> Self {
1051 let dir_entry = DirEntry::new_unrooted(node);
1052 let _ = security::fs_node_init_with_dentry_no_xattr(current_task, &dir_entry);
1053 Self::new_anonymous(dir_entry)
1054 }
1055
1056 pub fn open(
1062 &self,
1063 locked: &mut Locked<Unlocked>,
1064 current_task: &CurrentTask,
1065 flags: OpenFlags,
1066 access_check: AccessCheck,
1067 ) -> Result<FileHandle, Errno> {
1068 let ops = self.entry.node.open(locked, current_task, self, flags, access_check)?;
1069 FileObject::new(locked, current_task, ops, self.clone(), flags)
1070 }
1071
1072 pub fn open_create_node<L>(
1078 &self,
1079 locked: &mut Locked<L>,
1080 current_task: &CurrentTask,
1081 name: &FsStr,
1082 mode: FileMode,
1083 dev: DeviceType,
1084 flags: OpenFlags,
1085 ) -> Result<NamespaceNode, Errno>
1086 where
1087 L: LockEqualOrBefore<FileOpsCore>,
1088 {
1089 let owner = current_task.current_fscred();
1090 let mode = current_task.fs().apply_umask(mode);
1091 let create_fn =
1092 |locked: &mut Locked<L>, dir: &FsNodeHandle, mount: &MountInfo, name: &_| {
1093 dir.create_node(locked, current_task, mount, name, mode, dev, owner)
1094 };
1095 let entry = if flags.contains(OpenFlags::EXCL) {
1096 self.entry.create_entry(locked, current_task, &self.mount, name, create_fn)
1097 } else {
1098 self.entry.get_or_create_entry(locked, current_task, &self.mount, name, create_fn)
1099 }?;
1100 Ok(self.with_new_entry(entry))
1101 }
1102
1103 pub fn into_active(self) -> ActiveNamespaceNode {
1104 ActiveNamespaceNode::new(self)
1105 }
1106
1107 pub fn into_mapping(self, mode: Option<FileWriteGuardMode>) -> Result<Arc<FileMapping>, Errno> {
1108 self.into_active().into_mapping(mode)
1109 }
1110
1111 pub fn create_node<L>(
1117 &self,
1118 locked: &mut Locked<L>,
1119 current_task: &CurrentTask,
1120 name: &FsStr,
1121 mode: FileMode,
1122 dev: DeviceType,
1123 ) -> Result<NamespaceNode, Errno>
1124 where
1125 L: LockEqualOrBefore<FileOpsCore>,
1126 {
1127 let owner = current_task.current_fscred();
1128 let mode = current_task.fs().apply_umask(mode);
1129 let entry = self.entry.create_entry(
1130 locked,
1131 current_task,
1132 &self.mount,
1133 name,
1134 |locked, dir, mount, name| {
1135 dir.create_node(locked, current_task, mount, name, mode, dev, owner)
1136 },
1137 )?;
1138 Ok(self.with_new_entry(entry))
1139 }
1140
1141 pub fn create_symlink<L>(
1145 &self,
1146 locked: &mut Locked<L>,
1147 current_task: &CurrentTask,
1148 name: &FsStr,
1149 target: &FsStr,
1150 ) -> Result<NamespaceNode, Errno>
1151 where
1152 L: LockEqualOrBefore<FileOpsCore>,
1153 {
1154 let owner = current_task.current_fscred();
1155 let entry = self.entry.create_entry(
1156 locked,
1157 current_task,
1158 &self.mount,
1159 name,
1160 |locked, dir, mount, name| {
1161 dir.create_symlink(locked, current_task, mount, name, target, owner)
1162 },
1163 )?;
1164 Ok(self.with_new_entry(entry))
1165 }
1166
1167 pub fn create_tmpfile<L>(
1173 &self,
1174 locked: &mut Locked<L>,
1175 current_task: &CurrentTask,
1176 mode: FileMode,
1177 flags: OpenFlags,
1178 ) -> Result<NamespaceNode, Errno>
1179 where
1180 L: LockEqualOrBefore<FileOpsCore>,
1181 {
1182 let owner = current_task.current_fscred();
1183 let mode = current_task.fs().apply_umask(mode);
1184 Ok(self.with_new_entry(self.entry.create_tmpfile(
1185 locked,
1186 current_task,
1187 &self.mount,
1188 mode,
1189 owner,
1190 flags,
1191 )?))
1192 }
1193
1194 pub fn link<L>(
1195 &self,
1196 locked: &mut Locked<L>,
1197 current_task: &CurrentTask,
1198 name: &FsStr,
1199 child: &FsNodeHandle,
1200 ) -> Result<NamespaceNode, Errno>
1201 where
1202 L: LockEqualOrBefore<FileOpsCore>,
1203 {
1204 let dir_entry = self.entry.create_entry(
1205 locked,
1206 current_task,
1207 &self.mount,
1208 name,
1209 |locked, dir, mount, name| dir.link(locked, current_task, mount, name, child),
1210 )?;
1211 Ok(self.with_new_entry(dir_entry))
1212 }
1213
1214 pub fn bind_socket<L>(
1215 &self,
1216 locked: &mut Locked<L>,
1217 current_task: &CurrentTask,
1218 name: &FsStr,
1219 socket: SocketHandle,
1220 socket_address: SocketAddress,
1221 mode: FileMode,
1222 ) -> Result<NamespaceNode, Errno>
1223 where
1224 L: LockEqualOrBefore<FileOpsCore>,
1225 {
1226 let dir_entry = self.entry.create_entry(
1227 locked,
1228 current_task,
1229 &self.mount,
1230 name,
1231 |locked, dir, mount, name| {
1232 let node = dir.create_node(
1233 locked,
1234 current_task,
1235 mount,
1236 name,
1237 mode,
1238 DeviceType::NONE,
1239 current_task.current_fscred(),
1240 )?;
1241 if let Some(unix_socket) = socket.downcast_socket::<UnixSocket>() {
1242 unix_socket.bind_socket_to_node(&socket, socket_address, &node)?;
1243 } else {
1244 return error!(ENOTSUP);
1245 }
1246 Ok(node)
1247 },
1248 )?;
1249 Ok(self.with_new_entry(dir_entry))
1250 }
1251
1252 pub fn unlink<L>(
1253 &self,
1254 locked: &mut Locked<L>,
1255 current_task: &CurrentTask,
1256 name: &FsStr,
1257 kind: UnlinkKind,
1258 must_be_directory: bool,
1259 ) -> Result<(), Errno>
1260 where
1261 L: LockEqualOrBefore<FileOpsCore>,
1262 {
1263 if DirEntry::is_reserved_name(name) {
1264 match kind {
1265 UnlinkKind::Directory => {
1266 if name == ".." {
1267 error!(ENOTEMPTY)
1268 } else if self.parent().is_none() {
1269 error!(EBUSY)
1271 } else {
1272 error!(EINVAL)
1273 }
1274 }
1275 UnlinkKind::NonDirectory => error!(ENOTDIR),
1276 }
1277 } else {
1278 self.entry.unlink(locked, current_task, &self.mount, name, kind, must_be_directory)
1279 }
1280 }
1281
1282 pub fn lookup_child<L>(
1284 &self,
1285 locked: &mut Locked<L>,
1286 current_task: &CurrentTask,
1287 context: &mut LookupContext,
1288 basename: &FsStr,
1289 ) -> Result<NamespaceNode, Errno>
1290 where
1291 L: LockEqualOrBefore<FileOpsCore>,
1292 {
1293 if !self.entry.node.is_dir() {
1294 return error!(ENOTDIR);
1295 }
1296
1297 if basename.len() > NAME_MAX as usize {
1298 return error!(ENAMETOOLONG);
1299 }
1300
1301 let child = if basename.is_empty() || basename == "." {
1302 self.clone()
1303 } else if basename == ".." {
1304 let root = match &context.resolve_base {
1305 ResolveBase::None => current_task.fs().root(),
1306 ResolveBase::Beneath(node) => {
1307 if *self == *node {
1309 return error!(EXDEV);
1310 }
1311 current_task.fs().root()
1312 }
1313 ResolveBase::InRoot(root) => root.clone(),
1314 };
1315
1316 if *self == root { root } else { self.parent().unwrap_or_else(|| self.clone()) }
1318 } else {
1319 let mut child = self.with_new_entry(self.entry.component_lookup(
1320 locked,
1321 current_task,
1322 &self.mount,
1323 basename,
1324 )?);
1325 while child.entry.node.is_lnk() {
1326 match context.symlink_mode {
1327 SymlinkMode::NoFollow => {
1328 break;
1329 }
1330 SymlinkMode::Follow => {
1331 if context.remaining_follows == 0
1332 || context.resolve_flags.contains(ResolveFlags::NO_SYMLINKS)
1333 {
1334 return error!(ELOOP);
1335 }
1336 context.remaining_follows -= 1;
1337 child = match child.readlink(locked, current_task)? {
1338 SymlinkTarget::Path(link_target) => {
1339 let link_directory = if link_target[0] == b'/' {
1340 match &context.resolve_base {
1342 ResolveBase::None => current_task.fs().root(),
1343 ResolveBase::Beneath(_) => return error!(EXDEV),
1344 ResolveBase::InRoot(root) => root.clone(),
1345 }
1346 } else {
1347 child.parent().unwrap_or(child)
1351 };
1352 current_task.lookup_path(
1353 locked,
1354 context,
1355 link_directory,
1356 link_target.as_ref(),
1357 )?
1358 }
1359 SymlinkTarget::Node(node) => {
1360 if context.resolve_flags.contains(ResolveFlags::NO_MAGICLINKS) {
1361 return error!(ELOOP);
1362 }
1363 node
1364 }
1365 }
1366 }
1367 };
1368 }
1369
1370 child.enter_mount()
1371 };
1372
1373 if context.resolve_flags.contains(ResolveFlags::NO_XDEV) && child.mount != self.mount {
1374 return error!(EXDEV);
1375 }
1376
1377 if context.must_be_directory && !child.entry.node.is_dir() {
1378 return error!(ENOTDIR);
1379 }
1380
1381 Ok(child)
1382 }
1383
1384 pub fn parent(&self) -> Option<NamespaceNode> {
1390 let mountpoint_or_self = self.escape_mount();
1391 let parent = mountpoint_or_self.entry.read().parent().clone()?;
1392 Some(mountpoint_or_self.with_new_entry(parent))
1393 }
1394
1395 pub fn parent_within_mount(&self) -> Option<DirEntryHandle> {
1398 if let Ok(_) = self.mount_if_root() {
1399 return None;
1400 }
1401 self.entry.read().parent().clone()
1402 }
1403
1404 pub fn is_descendant_of(&self, ancestor: &NamespaceNode) -> bool {
1409 let ancestor = ancestor.escape_mount();
1410 let mut current = self.escape_mount();
1411 while current != ancestor {
1412 if let Some(parent) = current.parent() {
1413 current = parent.escape_mount();
1414 } else {
1415 return false;
1416 }
1417 }
1418 true
1419 }
1420
1421 fn enter_mount(&self) -> NamespaceNode {
1423 fn enter_one_mount(node: &NamespaceNode) -> Option<NamespaceNode> {
1425 if let Some(mount) = node.mount.deref() {
1426 if let Some(submount) =
1427 mount.state.read().submounts.get(ArcKey::ref_cast(&node.entry))
1428 {
1429 return Some(submount.mount.root());
1430 }
1431 }
1432 None
1433 }
1434 let mut inner = self.clone();
1435 while let Some(inner_root) = enter_one_mount(&inner) {
1436 inner = inner_root;
1437 }
1438 inner
1439 }
1440
1441 fn escape_mount(&self) -> NamespaceNode {
1446 let mut mountpoint_or_self = self.clone();
1447 while let Some(mountpoint) = mountpoint_or_self.mountpoint() {
1448 mountpoint_or_self = mountpoint;
1449 }
1450 mountpoint_or_self
1451 }
1452
1453 pub fn mount_if_root(&self) -> Result<&MountHandle, Errno> {
1455 if let Some(mount) = self.mount.deref() {
1456 if Arc::ptr_eq(&self.entry, &mount.root) {
1457 return Ok(mount);
1458 }
1459 }
1460 error!(EINVAL)
1461 }
1462
1463 fn mountpoint(&self) -> Option<NamespaceNode> {
1468 self.mount_if_root().ok()?.mountpoint()
1469 }
1470
1471 pub fn path(&self, task: &Task) -> FsString {
1473 self.path_from_root(Some(&task.fs().root())).into_path()
1474 }
1475
1476 pub fn path_escaping_chroot(&self) -> FsString {
1478 self.path_from_root(None).into_path()
1479 }
1480
1481 pub fn path_from_root(&self, root: Option<&NamespaceNode>) -> PathWithReachability {
1484 if self.mount.is_none() {
1485 return PathWithReachability::Reachable(self.entry.node.internal_name());
1486 }
1487
1488 let mut path = PathBuilder::new();
1489 let mut current = self.escape_mount();
1490 if let Some(root) = root {
1491 let root = root.escape_mount();
1493 while current != root {
1494 if let Some(parent) = current.parent() {
1495 path.prepend_element(current.entry.read().local_name());
1496 current = parent.escape_mount();
1497 } else {
1498 let mut absolute_path = path.build_absolute();
1500 if self.entry.read().is_dead() {
1501 absolute_path.extend_from_slice(b" (deleted)");
1502 }
1503
1504 return PathWithReachability::Unreachable(absolute_path);
1505 }
1506 }
1507 } else {
1508 while let Some(parent) = current.parent() {
1510 path.prepend_element(current.entry.read().local_name());
1511 current = parent.escape_mount();
1512 }
1513 }
1514
1515 let mut absolute_path = path.build_absolute();
1516 if self.entry.read().is_dead() {
1517 absolute_path.extend_from_slice(b" (deleted)");
1518 }
1519
1520 PathWithReachability::Reachable(absolute_path)
1521 }
1522
1523 pub fn mount(&self, what: WhatToMount, flags: MountFlags) -> Result<(), Errno> {
1524 let flags = flags & (MountFlags::STORED_ON_MOUNT | MountFlags::REC);
1525 let mountpoint = self.enter_mount();
1526 let mount = mountpoint.mount.as_ref().expect("a mountpoint must be part of a mount");
1527 mount.create_submount(&mountpoint.entry, what, flags);
1528 Ok(())
1529 }
1530
1531 pub fn unmount(&self, flags: UnmountFlags) -> Result<(), Errno> {
1533 let propagate = self.mount_if_root().map_or(false, |mount| mount.read().is_shared());
1534 let mount = self.enter_mount().mount_if_root()?.clone();
1535 mount.unmount(flags, propagate)
1536 }
1537
1538 pub fn rename<L>(
1539 locked: &mut Locked<L>,
1540 current_task: &CurrentTask,
1541 old_parent: &NamespaceNode,
1542 old_name: &FsStr,
1543 new_parent: &NamespaceNode,
1544 new_name: &FsStr,
1545 flags: RenameFlags,
1546 ) -> Result<(), Errno>
1547 where
1548 L: LockEqualOrBefore<FileOpsCore>,
1549 {
1550 DirEntry::rename(
1551 locked,
1552 current_task,
1553 &old_parent.entry,
1554 &old_parent.mount,
1555 old_name,
1556 &new_parent.entry,
1557 &new_parent.mount,
1558 new_name,
1559 flags,
1560 )
1561 }
1562
1563 fn with_new_entry(&self, entry: DirEntryHandle) -> NamespaceNode {
1564 Self { mount: self.mount.clone(), entry }
1565 }
1566
1567 fn mount_hash_key(&self) -> &ArcKey<DirEntry> {
1568 ArcKey::ref_cast(&self.entry)
1569 }
1570
1571 pub fn suid_and_sgid(&self, current_task: &CurrentTask) -> Result<UserAndOrGroupId, Errno> {
1572 if self.mount.flags().contains(MountFlags::NOSUID) {
1573 Ok(UserAndOrGroupId::default())
1574 } else {
1575 self.entry.node.info().suid_and_sgid(current_task, &self.entry.node)
1576 }
1577 }
1578
1579 pub fn update_atime(&self) {
1580 if !self.mount.flags().contains(MountFlags::NOATIME) {
1582 self.entry.node.update_info(|info| {
1583 let now = utc::utc_now();
1584 info.time_access = now;
1585 info.pending_time_access_update = true;
1586 });
1587 }
1588 }
1589
1590 pub fn readlink<L>(
1591 &self,
1592 locked: &mut Locked<L>,
1593 current_task: &CurrentTask,
1594 ) -> Result<SymlinkTarget, Errno>
1595 where
1596 L: LockEqualOrBefore<FileOpsCore>,
1597 {
1598 self.update_atime();
1599 self.entry.node.readlink(locked, current_task)
1600 }
1601
1602 pub fn notify(&self, event_mask: InotifyMask) {
1603 if self.mount.is_some() {
1604 self.entry.notify(event_mask);
1605 }
1606 }
1607
1608 pub fn check_access<L>(
1612 &self,
1613 locked: &mut Locked<L>,
1614 current_task: &CurrentTask,
1615 permission_flags: impl Into<security::PermissionFlags>,
1616 reason: CheckAccessReason,
1617 ) -> Result<(), Errno>
1618 where
1619 L: LockEqualOrBefore<FileOpsCore>,
1620 {
1621 self.entry.node.check_access(
1622 locked,
1623 current_task,
1624 &self.mount,
1625 permission_flags,
1626 reason,
1627 self,
1628 )
1629 }
1630
1631 pub fn check_o_noatime_allowed(&self, current_task: &CurrentTask) -> Result<(), Errno> {
1633 self.entry.node.check_o_noatime_allowed(current_task)
1634 }
1635
1636 pub fn truncate<L>(
1637 &self,
1638 locked: &mut Locked<L>,
1639 current_task: &CurrentTask,
1640 length: u64,
1641 ) -> Result<(), Errno>
1642 where
1643 L: LockBefore<BeforeFsNodeAppend>,
1644 {
1645 self.entry.node.truncate(locked, current_task, &self.mount, length)?;
1646 self.entry.notify_ignoring_excl_unlink(InotifyMask::MODIFY);
1647 Ok(())
1648 }
1649}
1650
1651impl fmt::Debug for NamespaceNode {
1652 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1653 f.debug_struct("NamespaceNode")
1654 .field("path", &self.path_escaping_chroot())
1655 .field("mount", &self.mount)
1656 .field("entry", &self.entry)
1657 .finish()
1658 }
1659}
1660
1661impl PartialEq for NamespaceNode {
1663 fn eq(&self, other: &Self) -> bool {
1664 self.mount.as_ref().map(Arc::as_ptr).eq(&other.mount.as_ref().map(Arc::as_ptr))
1665 && Arc::ptr_eq(&self.entry, &other.entry)
1666 }
1667}
1668impl Eq for NamespaceNode {}
1669impl Hash for NamespaceNode {
1670 fn hash<H: Hasher>(&self, state: &mut H) {
1671 self.mount.as_ref().map(Arc::as_ptr).hash(state);
1672 Arc::as_ptr(&self.entry).hash(state);
1673 }
1674}
1675
1676#[derive(Debug, Clone)]
1678pub struct ActiveNamespaceNode {
1679 name: NamespaceNode,
1681
1682 _marker: Option<MountClientMarker>,
1686}
1687
1688impl ActiveNamespaceNode {
1689 pub fn new(name: NamespaceNode) -> Self {
1690 let marker = name.mount.as_ref().map(|mount| mount.active_client_counter.clone());
1691 Self { name, _marker: marker }
1692 }
1693
1694 pub fn to_passive(&self) -> NamespaceNode {
1695 self.deref().clone()
1696 }
1697
1698 pub fn into_mapping(self, mode: Option<FileWriteGuardMode>) -> Result<Arc<FileMapping>, Errno> {
1699 if let Some(mode) = mode {
1700 self.entry.node.write_guard_state.lock().acquire(mode)?;
1701 }
1702 Ok(Arc::new(FileMapping { name: self, mode }))
1703 }
1704}
1705
1706impl Deref for ActiveNamespaceNode {
1707 type Target = NamespaceNode;
1708
1709 fn deref(&self) -> &Self::Target {
1710 &self.name
1711 }
1712}
1713
1714impl PartialEq for ActiveNamespaceNode {
1715 fn eq(&self, other: &Self) -> bool {
1716 self.deref().eq(other.deref())
1717 }
1718}
1719impl Eq for ActiveNamespaceNode {}
1720impl Hash for ActiveNamespaceNode {
1721 fn hash<H: Hasher>(&self, state: &mut H) {
1722 self.deref().hash(state)
1723 }
1724}
1725
1726#[derive(Debug, Clone, PartialEq, Eq)]
1727#[must_use]
1728pub struct FileMapping {
1729 pub name: ActiveNamespaceNode,
1730 mode: Option<FileWriteGuardMode>,
1731}
1732
1733impl Drop for FileMapping {
1734 fn drop(&mut self) {
1735 if let Some(mode) = self.mode {
1736 self.name.entry.node.write_guard_state.lock().release(mode);
1737 }
1738 }
1739}
1740
1741pub struct Mounts {
1743 mounts: RcuHashMap<WeakKey<DirEntry>, Vec<ArcKey<Mount>>>,
1744}
1745
1746impl Mounts {
1747 pub fn new() -> Self {
1748 Mounts { mounts: RcuHashMap::default() }
1749 }
1750
1751 fn register_mount(&self, dir_entry: &Arc<DirEntry>, mount: MountHandle) -> Submount {
1753 let mut mounts = self.mounts.lock();
1754 let key = WeakKey::from(dir_entry);
1755 let mut vec = mounts.get(&key).unwrap_or_else(|| {
1756 dir_entry.set_has_mounts(true);
1757 Vec::new()
1758 });
1759 vec.push(ArcKey(mount.clone()));
1760 mounts.insert(key, vec);
1761 Submount { dir: ArcKey(dir_entry.clone()), mount }
1762 }
1763
1764 fn unregister_mount(&self, dir_entry: &Arc<DirEntry>, mount: &MountHandle) {
1766 let mut mounts = self.mounts.lock();
1767 let key = WeakKey::from(dir_entry);
1768 if let Some(mut vec) = mounts.get(&key) {
1769 let index = vec.iter().position(|e| e == ArcKey::ref_cast(mount)).unwrap();
1770 if vec.len() == 1 {
1771 mounts.remove(&key);
1772 dir_entry.set_has_mounts(false);
1773 } else {
1774 vec.swap_remove(index);
1775 mounts.insert(key, vec);
1776 }
1777 }
1778 }
1779
1780 pub fn unmount(&self, dir_entry: &DirEntry) {
1784 let mounts = self.mounts.lock().remove(&PtrKey::from(dir_entry as *const _));
1785 if let Some(mounts) = mounts {
1786 for mount in mounts {
1787 let _ = mount.unmount(UnmountFlags::default(), false);
1789 }
1790 }
1791 }
1792
1793 pub fn clear(&self) {
1797 for (_dir_entry, mounts) in self.mounts.lock().drain() {
1798 for mount in mounts {
1799 mount.fs.force_unmount_ops();
1800 }
1801 }
1802 }
1803}
1804
1805#[derive(Debug)]
1807struct Submount {
1808 dir: ArcKey<DirEntry>,
1809 mount: MountHandle,
1810}
1811
1812impl Drop for Submount {
1813 fn drop(&mut self) {
1814 self.mount.fs.kernel.upgrade().unwrap().mounts.unregister_mount(&self.dir, &self.mount)
1815 }
1816}
1817
1818impl Eq for Submount {}
1820impl PartialEq<Self> for Submount {
1821 fn eq(&self, other: &Self) -> bool {
1822 self.dir == other.dir
1823 }
1824}
1825impl Hash for Submount {
1826 fn hash<H: Hasher>(&self, state: &mut H) {
1827 self.dir.hash(state)
1828 }
1829}
1830
1831impl Borrow<ArcKey<DirEntry>> for Submount {
1832 fn borrow(&self) -> &ArcKey<DirEntry> {
1833 &self.dir
1834 }
1835}
1836
1837#[cfg(test)]
1838mod test {
1839 use crate::fs::tmpfs::TmpFs;
1840 use crate::testing::spawn_kernel_and_run;
1841 use crate::vfs::namespace::DeviceType;
1842 use crate::vfs::{
1843 CallbackSymlinkNode, FsNodeInfo, LookupContext, MountInfo, Namespace, NamespaceNode,
1844 RenameFlags, SymlinkMode, SymlinkTarget, UnlinkKind, WhatToMount,
1845 };
1846 use starnix_uapi::mount_flags::MountFlags;
1847 use starnix_uapi::{errno, mode};
1848 use std::sync::Arc;
1849
1850 #[::fuchsia::test]
1851 async fn test_namespace() {
1852 spawn_kernel_and_run(async |locked, current_task| {
1853 let kernel = current_task.kernel();
1854 let root_fs = TmpFs::new_fs(locked, &kernel);
1855 let root_node = Arc::clone(root_fs.root());
1856 let _dev_node = root_node
1857 .create_dir(locked, ¤t_task, "dev".into())
1858 .expect("failed to mkdir dev");
1859 let dev_fs = TmpFs::new_fs(locked, &kernel);
1860 let dev_root_node = Arc::clone(dev_fs.root());
1861 let _dev_pts_node = dev_root_node
1862 .create_dir(locked, ¤t_task, "pts".into())
1863 .expect("failed to mkdir pts");
1864
1865 let ns = Namespace::new(root_fs);
1866 let mut context = LookupContext::default();
1867 let dev = ns
1868 .root()
1869 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1870 .expect("failed to lookup dev");
1871 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1872 .expect("failed to mount dev root node");
1873
1874 let mut context = LookupContext::default();
1875 let dev = ns
1876 .root()
1877 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1878 .expect("failed to lookup dev");
1879 let mut context = LookupContext::default();
1880 let pts = dev
1881 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1882 .expect("failed to lookup pts");
1883 let pts_parent =
1884 pts.parent().ok_or_else(|| errno!(ENOENT)).expect("failed to get parent of pts");
1885 assert!(Arc::ptr_eq(&pts_parent.entry, &dev.entry));
1886
1887 let dev_parent =
1888 dev.parent().ok_or_else(|| errno!(ENOENT)).expect("failed to get parent of dev");
1889 assert!(Arc::ptr_eq(&dev_parent.entry, &ns.root().entry));
1890 })
1891 .await;
1892 }
1893
1894 #[::fuchsia::test]
1895 async fn test_mount_does_not_upgrade() {
1896 spawn_kernel_and_run(async |locked, current_task| {
1897 let kernel = current_task.kernel();
1898 let root_fs = TmpFs::new_fs(locked, &kernel);
1899 let root_node = Arc::clone(root_fs.root());
1900 let _dev_node = root_node
1901 .create_dir(locked, ¤t_task, "dev".into())
1902 .expect("failed to mkdir dev");
1903 let dev_fs = TmpFs::new_fs(locked, &kernel);
1904 let dev_root_node = Arc::clone(dev_fs.root());
1905 let _dev_pts_node = dev_root_node
1906 .create_dir(locked, ¤t_task, "pts".into())
1907 .expect("failed to mkdir pts");
1908
1909 let ns = Namespace::new(root_fs);
1910 let mut context = LookupContext::default();
1911 let dev = ns
1912 .root()
1913 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1914 .expect("failed to lookup dev");
1915 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1916 .expect("failed to mount dev root node");
1917 let mut context = LookupContext::default();
1918 let new_dev = ns
1919 .root()
1920 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1921 .expect("failed to lookup dev again");
1922 assert!(!Arc::ptr_eq(&dev.entry, &new_dev.entry));
1923 assert_ne!(&dev, &new_dev);
1924
1925 let mut context = LookupContext::default();
1926 let _new_pts = new_dev
1927 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1928 .expect("failed to lookup pts");
1929 let mut context = LookupContext::default();
1930 assert!(dev.lookup_child(locked, ¤t_task, &mut context, "pts".into()).is_err());
1931 })
1932 .await;
1933 }
1934
1935 #[::fuchsia::test]
1936 async fn test_path() {
1937 spawn_kernel_and_run(async |locked, current_task| {
1938 let kernel = current_task.kernel();
1939 let root_fs = TmpFs::new_fs(locked, &kernel);
1940 let root_node = Arc::clone(root_fs.root());
1941 let _dev_node = root_node
1942 .create_dir(locked, ¤t_task, "dev".into())
1943 .expect("failed to mkdir dev");
1944 let dev_fs = TmpFs::new_fs(locked, &kernel);
1945 let dev_root_node = Arc::clone(dev_fs.root());
1946 let _dev_pts_node = dev_root_node
1947 .create_dir(locked, ¤t_task, "pts".into())
1948 .expect("failed to mkdir pts");
1949
1950 let ns = Namespace::new(root_fs);
1951 let mut context = LookupContext::default();
1952 let dev = ns
1953 .root()
1954 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1955 .expect("failed to lookup dev");
1956 dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1957 .expect("failed to mount dev root node");
1958
1959 let mut context = LookupContext::default();
1960 let dev = ns
1961 .root()
1962 .lookup_child(locked, ¤t_task, &mut context, "dev".into())
1963 .expect("failed to lookup dev");
1964 let mut context = LookupContext::default();
1965 let pts = dev
1966 .lookup_child(locked, ¤t_task, &mut context, "pts".into())
1967 .expect("failed to lookup pts");
1968
1969 assert_eq!("/", ns.root().path_escaping_chroot());
1970 assert_eq!("/dev", dev.path_escaping_chroot());
1971 assert_eq!("/dev/pts", pts.path_escaping_chroot());
1972 })
1973 .await;
1974 }
1975
1976 #[::fuchsia::test]
1977 async fn test_shadowing() {
1978 spawn_kernel_and_run(async |locked, current_task| {
1979 let kernel = current_task.kernel();
1980 let root_fs = TmpFs::new_fs(locked, &kernel);
1981 let ns = Namespace::new(root_fs.clone());
1982 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
1983 let mut context = LookupContext::default();
1984 let foo_dir =
1985 ns.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
1986
1987 let foofs1 = TmpFs::new_fs(locked, &kernel);
1988 foo_dir.mount(WhatToMount::Fs(foofs1.clone()), MountFlags::empty()).unwrap();
1989 let mut context = LookupContext::default();
1990 assert!(Arc::ptr_eq(
1991 &ns.root()
1992 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
1993 .unwrap()
1994 .entry,
1995 foofs1.root()
1996 ));
1997 let foo_dir =
1998 ns.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
1999
2000 let ns_clone = ns.clone_namespace();
2001
2002 let foofs2 = TmpFs::new_fs(locked, &kernel);
2003 foo_dir.mount(WhatToMount::Fs(foofs2.clone()), MountFlags::empty()).unwrap();
2004 let mut context = LookupContext::default();
2005 assert!(Arc::ptr_eq(
2006 &ns.root()
2007 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
2008 .unwrap()
2009 .entry,
2010 foofs2.root()
2011 ));
2012
2013 assert!(Arc::ptr_eq(
2014 &ns_clone
2015 .root()
2016 .lookup_child(
2017 locked,
2018 ¤t_task,
2019 &mut LookupContext::default(),
2020 "foo".into()
2021 )
2022 .unwrap()
2023 .entry,
2024 foofs1.root()
2025 ));
2026 })
2027 .await;
2028 }
2029
2030 #[::fuchsia::test]
2031 async fn test_unlink_mounted_directory() {
2032 spawn_kernel_and_run(async |locked, current_task| {
2033 let kernel = current_task.kernel();
2034 let root_fs = TmpFs::new_fs(locked, &kernel);
2035 let ns1 = Namespace::new(root_fs.clone());
2036 let ns2 = Namespace::new(root_fs.clone());
2037 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
2038 let mut context = LookupContext::default();
2039 let foo_dir =
2040 ns1.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
2041
2042 let foofs = TmpFs::new_fs(locked, &kernel);
2043 foo_dir.mount(WhatToMount::Fs(foofs), MountFlags::empty()).unwrap();
2044
2045 assert_eq!(
2047 ns1.root()
2048 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2049 .unwrap_err(),
2050 errno!(EBUSY),
2051 );
2052
2053 ns2.root()
2055 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2056 .expect("unlink failed");
2057
2058 assert_eq!(
2060 ns1.root()
2061 .unlink(locked, ¤t_task, "foo".into(), UnlinkKind::Directory, false)
2062 .unwrap_err(),
2063 errno!(ENOENT),
2064 );
2065 })
2066 .await;
2067 }
2068
2069 #[::fuchsia::test]
2070 async fn test_rename_mounted_directory() {
2071 spawn_kernel_and_run(async |locked, current_task| {
2072 let kernel = current_task.kernel();
2073 let root_fs = TmpFs::new_fs(locked, &kernel);
2074 let ns1 = Namespace::new(root_fs.clone());
2075 let ns2 = Namespace::new(root_fs.clone());
2076 let _foo_node = root_fs.root().create_dir(locked, ¤t_task, "foo".into()).unwrap();
2077 let _bar_node = root_fs.root().create_dir(locked, ¤t_task, "bar".into()).unwrap();
2078 let _baz_node = root_fs.root().create_dir(locked, ¤t_task, "baz".into()).unwrap();
2079 let mut context = LookupContext::default();
2080 let foo_dir =
2081 ns1.root().lookup_child(locked, ¤t_task, &mut context, "foo".into()).unwrap();
2082
2083 let foofs = TmpFs::new_fs(locked, &kernel);
2084 foo_dir.mount(WhatToMount::Fs(foofs), MountFlags::empty()).unwrap();
2085
2086 let root = ns1.root();
2088 assert_eq!(
2089 NamespaceNode::rename(
2090 locked,
2091 ¤t_task,
2092 &root,
2093 "bar".into(),
2094 &root,
2095 "foo".into(),
2096 RenameFlags::empty()
2097 )
2098 .unwrap_err(),
2099 errno!(EBUSY),
2100 );
2101 assert_eq!(
2103 NamespaceNode::rename(
2104 locked,
2105 ¤t_task,
2106 &root,
2107 "foo".into(),
2108 &root,
2109 "bar".into(),
2110 RenameFlags::empty()
2111 )
2112 .unwrap_err(),
2113 errno!(EBUSY),
2114 );
2115
2116 let root = ns2.root();
2118
2119 NamespaceNode::rename(
2121 locked,
2122 ¤t_task,
2123 &root,
2124 "foo".into(),
2125 &root,
2126 "bar".into(),
2127 RenameFlags::empty(),
2128 )
2129 .expect("rename failed");
2130
2131 NamespaceNode::rename(
2133 locked,
2134 ¤t_task,
2135 &root,
2136 "baz".into(),
2137 &root,
2138 "bar".into(),
2139 RenameFlags::empty(),
2140 )
2141 .expect("rename failed");
2142
2143 assert_eq!(
2145 ns1.root()
2146 .lookup_child(locked, ¤t_task, &mut context, "foo".into())
2147 .unwrap_err(),
2148 errno!(ENOENT)
2149 );
2150 assert_eq!(
2151 ns1.root()
2152 .lookup_child(locked, ¤t_task, &mut context, "baz".into())
2153 .unwrap_err(),
2154 errno!(ENOENT)
2155 );
2156 })
2157 .await;
2158 }
2159
2160 #[::fuchsia::test]
2163 async fn test_lookup_with_symlink_chain() {
2164 spawn_kernel_and_run(async |locked, current_task| {
2165 let kernel = current_task.kernel();
2167 let root_fs = TmpFs::new_fs(locked, &kernel);
2168 let root_node = Arc::clone(root_fs.root());
2169 let _first_subdir_node = root_node
2170 .create_dir(locked, ¤t_task, "first_subdir".into())
2171 .expect("failed to mkdir dev");
2172 let _second_subdir_node = root_node
2173 .create_dir(locked, ¤t_task, "second_subdir".into())
2174 .expect("failed to mkdir dev");
2175
2176 let first_subdir_fs = TmpFs::new_fs(locked, &kernel);
2178 let second_subdir_fs = TmpFs::new_fs(locked, &kernel);
2179
2180 let ns = Namespace::new(root_fs);
2181 let mut context = LookupContext::default();
2182 let first_subdir = ns
2183 .root()
2184 .lookup_child(locked, ¤t_task, &mut context, "first_subdir".into())
2185 .expect("failed to lookup first_subdir");
2186 first_subdir
2187 .mount(WhatToMount::Fs(first_subdir_fs), MountFlags::empty())
2188 .expect("failed to mount first_subdir fs node");
2189 let second_subdir = ns
2190 .root()
2191 .lookup_child(locked, ¤t_task, &mut context, "second_subdir".into())
2192 .expect("failed to lookup second_subdir");
2193 second_subdir
2194 .mount(WhatToMount::Fs(second_subdir_fs), MountFlags::empty())
2195 .expect("failed to mount second_subdir fs node");
2196
2197 let real_file_node = first_subdir
2206 .create_node(
2207 locked,
2208 ¤t_task,
2209 "real_file".into(),
2210 mode!(IFREG, 0o777),
2211 DeviceType::NONE,
2212 )
2213 .expect("failed to create real_file");
2214 first_subdir
2215 .create_symlink(locked, ¤t_task, "path_symlink".into(), "real_file".into())
2216 .expect("failed to create path_symlink");
2217
2218 let mut no_follow_lookup_context = LookupContext::new(SymlinkMode::NoFollow);
2219 let path_symlink_node = first_subdir
2220 .lookup_child(
2221 locked,
2222 ¤t_task,
2223 &mut no_follow_lookup_context,
2224 "path_symlink".into(),
2225 )
2226 .expect("Failed to lookup path_symlink");
2227
2228 let node_symlink_node = second_subdir.entry.node.fs().create_node_and_allocate_node_id(
2232 CallbackSymlinkNode::new(move || {
2233 let node = path_symlink_node.clone();
2234 Ok(SymlinkTarget::Node(node))
2235 }),
2236 FsNodeInfo::new(mode!(IFLNK, 0o777), current_task.current_fscred()),
2237 );
2238 second_subdir
2239 .entry
2240 .create_entry(
2241 locked,
2242 ¤t_task,
2243 &MountInfo::detached(),
2244 "node_symlink".into(),
2245 move |_locked, _dir, _mount, _name| Ok(node_symlink_node),
2246 )
2247 .expect("failed to create node_symlink entry");
2248
2249 let mut follow_lookup_context = LookupContext::new(SymlinkMode::Follow);
2251 let node_symlink_resolution = second_subdir
2252 .lookup_child(
2253 locked,
2254 ¤t_task,
2255 &mut follow_lookup_context,
2256 "node_symlink".into(),
2257 )
2258 .expect("lookup with symlink chain failed");
2259
2260 assert!(node_symlink_resolution.entry.node.ino == real_file_node.entry.node.ino);
2262 })
2263 .await;
2264 }
2265}