Skip to main content

starnix_core/vfs/
namespace.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::mutable_state::{state_accessor, state_implementation};
6use crate::security;
7use crate::task::{CurrentTask, EventHandler, Kernel, Task, WaitCanceler, Waiter};
8use crate::time::utc;
9use crate::vfs::fs_registry::FsRegistry;
10use crate::vfs::pseudo::dynamic_file::{DynamicFile, DynamicFileBuf, DynamicFileSource};
11use crate::vfs::pseudo::simple_file::SimpleFileNode;
12use crate::vfs::socket::{SocketAddress, SocketHandle, UnixSocket};
13use crate::vfs::{
14    CheckAccessReason, DirEntry, DirEntryHandle, FileHandle, FileObject, FileOps, FileSystemHandle,
15    FileSystemOptions, FileWriteGuardMode, FsNode, FsNodeHandle, FsNodeOps, FsStr, FsString,
16    PathBuilder, RenameFlags, SymlinkTarget, UnlinkKind, fileops_impl_dataless,
17    fileops_impl_delegate_read_write_and_seek, fileops_impl_nonseekable, fileops_impl_noop_sync,
18    fs_node_impl_not_dir,
19};
20use fuchsia_rcu::RcuReadScope;
21use macro_rules_attribute::apply;
22use ref_cast::RefCast;
23use starnix_logging::log_warn;
24use starnix_rcu::RcuHashMap;
25use starnix_sync::{
26    BeforeFsNodeAppend, FileOpsCore, LockBefore, LockEqualOrBefore, Locked, Mutex, RwLock, Unlocked,
27};
28use starnix_types::ownership::WeakRef;
29use starnix_uapi::arc_key::{ArcKey, PtrKey, WeakKey};
30use starnix_uapi::auth::UserAndOrGroupId;
31use starnix_uapi::device_type::DeviceType;
32use starnix_uapi::errors::Errno;
33use starnix_uapi::file_mode::{AccessCheck, FileMode};
34use starnix_uapi::inotify_mask::InotifyMask;
35use starnix_uapi::mount_flags::MountFlags;
36use starnix_uapi::open_flags::OpenFlags;
37use starnix_uapi::unmount_flags::UnmountFlags;
38use starnix_uapi::vfs::{FdEvents, ResolveFlags};
39use starnix_uapi::{NAME_MAX, errno, error};
40use std::borrow::Borrow;
41use std::collections::HashSet;
42use std::fmt;
43use std::hash::{Hash, Hasher};
44use std::ops::{Deref, DerefMut};
45use std::sync::{Arc, Weak};
46
47/// A mount namespace.
48///
49/// The namespace records at which entries filesystems are mounted.
50#[derive(Debug)]
51pub struct Namespace {
52    root_mount: MountHandle,
53
54    // Unique ID of this namespace.
55    pub id: u64,
56}
57
58impl Namespace {
59    pub fn new(fs: FileSystemHandle) -> Arc<Namespace> {
60        Self::new_with_flags(fs, MountFlags::empty())
61    }
62
63    pub fn new_with_flags(fs: FileSystemHandle, flags: MountFlags) -> Arc<Namespace> {
64        let kernel = fs.kernel.upgrade().expect("can't create namespace without a kernel");
65        let root_mount = Mount::new(WhatToMount::Fs(fs), flags);
66        Arc::new(Self { root_mount, id: kernel.get_next_namespace_id() })
67    }
68
69    pub fn root(&self) -> NamespaceNode {
70        self.root_mount.root()
71    }
72
73    pub fn clone_namespace(&self) -> Arc<Namespace> {
74        let kernel =
75            self.root_mount.fs.kernel.upgrade().expect("can't clone namespace without a kernel");
76        Arc::new(Self {
77            root_mount: self.root_mount.clone_mount_recursive(),
78            id: kernel.get_next_namespace_id(),
79        })
80    }
81
82    /// Assuming new_ns is a clone of the namespace that node is from, return the equivalent of
83    /// node in new_ns. If this assumption is violated, returns None.
84    pub fn translate_node(mut node: NamespaceNode, new_ns: &Namespace) -> Option<NamespaceNode> {
85        // Collect the list of mountpoints that leads to this node's mount
86        let mut mountpoints = vec![];
87        let mut mount = node.mount;
88        while let Some(mountpoint) = mount.as_ref().and_then(|m| m.read().mountpoint()) {
89            mountpoints.push(mountpoint.entry);
90            mount = mountpoint.mount;
91        }
92
93        // Follow the same path in the new namespace
94        let mut mount = Arc::clone(&new_ns.root_mount);
95        for mountpoint in mountpoints.iter().rev() {
96            let next_mount =
97                mount.read().submounts.get(ArcKey::ref_cast(mountpoint))?.mount.clone();
98            mount = next_mount;
99        }
100        node.mount = Some(mount).into();
101        Some(node)
102    }
103}
104
105impl FsNodeOps for Arc<Namespace> {
106    fs_node_impl_not_dir!();
107
108    fn create_file_ops(
109        &self,
110        _locked: &mut Locked<FileOpsCore>,
111        _node: &FsNode,
112        _current_task: &CurrentTask,
113        _flags: OpenFlags,
114    ) -> Result<Box<dyn FileOps>, Errno> {
115        Ok(Box::new(MountNamespaceFile(self.clone())))
116    }
117}
118
119pub struct MountNamespaceFile(pub Arc<Namespace>);
120
121impl FileOps for MountNamespaceFile {
122    fileops_impl_nonseekable!();
123    fileops_impl_dataless!();
124    fileops_impl_noop_sync!();
125}
126
127/// An empty struct that we use to track the number of active clients for a mount.
128///
129/// Each active client takes a reference to this object. The unmount operation fails
130/// if there are any active clients of the mount.
131type MountClientMarker = Arc<()>;
132
133/// An instance of a filesystem mounted in a namespace.
134///
135/// At a mount, path traversal switches from one filesystem to another.
136/// The client sees a composed directory structure that glues together the
137/// directories from the underlying FsNodes from those filesystems.
138///
139/// The mounts in a namespace form a mount tree, with `mountpoint` pointing to the parent and
140/// `submounts` pointing to the children.
141pub struct Mount {
142    root: DirEntryHandle,
143    flags: Mutex<MountFlags>,
144    fs: FileSystemHandle,
145
146    /// A unique identifier for this mount reported in /proc/pid/mountinfo.
147    id: u64,
148
149    /// A count of the number of active clients.
150    active_client_counter: MountClientMarker,
151
152    // Lock ordering: mount -> submount
153    state: RwLock<MountState>,
154    // Mount used to contain a Weak<Namespace>. It no longer does because since the mount point
155    // hash was moved from Namespace to Mount, nothing actually uses it. Now that
156    // Namespace::clone_namespace() is implemented in terms of Mount::clone_mount_recursive, it
157    // won't be trivial to add it back. I recommend turning the mountpoint field into an enum of
158    // Mountpoint or Namespace, maybe called "parent", and then traverse up to the top of the tree
159    // if you need to find a Mount's Namespace.
160}
161type MountHandle = Arc<Mount>;
162
163/// Public representation of the mount options.
164#[derive(Clone, Debug)]
165pub struct MountInfo {
166    handle: Option<MountHandle>,
167}
168
169impl MountInfo {
170    /// `MountInfo` for a element that is not tied to a given mount. Mount flags will be considered
171    /// empty.
172    pub fn detached() -> Self {
173        None.into()
174    }
175
176    /// The mount flags of the represented mount.
177    pub fn flags(&self) -> MountFlags {
178        if let Some(handle) = &self.handle {
179            handle.flags()
180        } else {
181            // Consider not mounted node have the NOATIME flags.
182            MountFlags::NOATIME
183        }
184    }
185
186    /// Checks whether this `MountInfo` represents a writable file system mount.
187    pub fn check_readonly_filesystem(&self) -> Result<(), Errno> {
188        if self.flags().contains(MountFlags::RDONLY) {
189            return error!(EROFS);
190        }
191        Ok(())
192    }
193
194    /// Checks whether this `MountInfo` represents an executable file system mount.
195    pub fn check_noexec_filesystem(&self) -> Result<(), Errno> {
196        if self.flags().contains(MountFlags::NOEXEC) {
197            return error!(EACCES);
198        }
199        Ok(())
200    }
201}
202
203impl Deref for MountInfo {
204    type Target = Option<MountHandle>;
205
206    fn deref(&self) -> &Self::Target {
207        &self.handle
208    }
209}
210
211impl DerefMut for MountInfo {
212    fn deref_mut(&mut self) -> &mut Self::Target {
213        &mut self.handle
214    }
215}
216
217impl std::cmp::PartialEq for MountInfo {
218    fn eq(&self, other: &Self) -> bool {
219        self.handle.as_ref().map(Arc::as_ptr) == other.handle.as_ref().map(Arc::as_ptr)
220    }
221}
222
223impl std::cmp::Eq for MountInfo {}
224
225impl Into<MountInfo> for Option<MountHandle> {
226    fn into(self) -> MountInfo {
227        MountInfo { handle: self }
228    }
229}
230
231#[derive(Default)]
232pub struct MountState {
233    /// The namespace node that this mount is mounted on. This is a tuple instead of a
234    /// NamespaceNode because the Mount pointer has to be weak because this is the pointer to the
235    /// parent mount, the parent has a pointer to the children too, and making both strong would be
236    /// a cycle.
237    mountpoint: Option<(Weak<Mount>, DirEntryHandle)>,
238
239    // The set is keyed by the mountpoints which are always descendants of this mount's root.
240    // Conceptually, the set is more akin to a map: `DirEntry -> MountHandle`, but we use a set
241    // instead because `Submount` has a drop implementation that needs both the key and value.
242    //
243    // Each directory entry can only have one mount attached. Mount shadowing works by using the
244    // root of the inner mount as a mountpoint. For example, if filesystem A is mounted at /foo,
245    // mounting filesystem B on /foo will create the mount as a child of the A mount, attached to
246    // A's root, instead of the root mount.
247    submounts: HashSet<Submount>,
248
249    /// The membership of this mount in its peer group. Do not access directly. Instead use
250    /// peer_group(), take_from_peer_group(), and set_peer_group().
251    // TODO(tbodt): Refactor the links into, some kind of extra struct or something? This is hard
252    // because setting this field requires the Arc<Mount>.
253    peer_group_: Option<(Arc<PeerGroup>, PtrKey<Mount>)>,
254    /// The membership of this mount in a PeerGroup's downstream. Do not access directly. Instead
255    /// use upstream(), take_from_upstream(), and set_upstream().
256    upstream_: Option<(Weak<PeerGroup>, PtrKey<Mount>)>,
257}
258
259/// A group of mounts. Setting MS_SHARED on a mount puts it in its own peer group. Any bind mounts
260/// of a mount in the group are also added to the group. A mount created in any mount in a peer
261/// group will be automatically propagated (recreated) in every other mount in the group.
262#[derive(Default)]
263struct PeerGroup {
264    id: u64,
265    state: RwLock<PeerGroupState>,
266}
267#[derive(Default)]
268struct PeerGroupState {
269    mounts: HashSet<WeakKey<Mount>>,
270    downstream: HashSet<WeakKey<Mount>>,
271}
272
273pub enum WhatToMount {
274    Fs(FileSystemHandle),
275    Bind(NamespaceNode),
276}
277
278impl Mount {
279    pub fn new(what: WhatToMount, flags: MountFlags) -> MountHandle {
280        match what {
281            WhatToMount::Fs(fs) => Self::new_with_root(fs.root().clone(), flags),
282            WhatToMount::Bind(node) => {
283                let mount = node.mount.as_ref().expect("can't bind mount from an anonymous node");
284                mount.clone_mount(&node.entry, flags)
285            }
286        }
287    }
288
289    fn new_with_root(root: DirEntryHandle, flags: MountFlags) -> MountHandle {
290        let known_flags = MountFlags::STORED_ON_MOUNT;
291        assert!(
292            !flags.intersects(!known_flags),
293            "mount created with extra flags {:?}",
294            flags - known_flags
295        );
296        let fs = root.node.fs();
297        let kernel = fs.kernel.upgrade().expect("can't create mount without kernel");
298        Arc::new(Self {
299            id: kernel.get_next_mount_id(),
300            flags: Mutex::new(flags),
301            root,
302            active_client_counter: Default::default(),
303            fs,
304            state: Default::default(),
305        })
306    }
307
308    /// A namespace node referring to the root of the mount.
309    pub fn root(self: &MountHandle) -> NamespaceNode {
310        NamespaceNode::new(Arc::clone(self), Arc::clone(&self.root))
311    }
312
313    /// Create the specified mount as a child. Also propagate it to the mount's peer group.
314    fn create_submount(
315        self: &MountHandle,
316        dir: &DirEntryHandle,
317        what: WhatToMount,
318        flags: MountFlags,
319    ) {
320        // TODO(tbodt): Making a copy here is necessary for lock ordering, because the peer group
321        // lock nests inside all mount locks (it would be impractical to reverse this because you
322        // need to lock a mount to get its peer group.) But it opens the door to race conditions
323        // where if a peer are concurrently being added, the mount might not get propagated to the
324        // new peer. The only true solution to this is bigger locks, somehow using the same lock
325        // for the peer group and all of the mounts in the group. Since peer groups are fluid and
326        // can have mounts constantly joining and leaving and then joining other groups, the only
327        // sensible locking option is to use a single global lock for all mounts and peer groups.
328        // This is almost impossible to express in rust. Help.
329        //
330        // Update: Also necessary to make a copy to prevent excess replication, see the comment on
331        // the following Mount::new call.
332        let peers = {
333            let state = self.state.read();
334            state.peer_group().map(|g| g.copy_propagation_targets()).unwrap_or_default()
335        };
336
337        // Create the mount after copying the peer groups, because in the case of creating a bind
338        // mount inside itself, the new mount would get added to our peer group during the
339        // Mount::new call, but we don't want to replicate into it already. For an example see
340        // MountTest.QuizBRecursion.
341        let mount = Mount::new(what, flags);
342
343        if self.read().is_shared() {
344            mount.write().make_shared();
345        }
346
347        for peer in peers {
348            if Arc::ptr_eq(self, &peer) {
349                continue;
350            }
351            let clone = mount.clone_mount_recursive();
352            peer.write().add_submount_internal(dir, clone);
353        }
354
355        self.write().add_submount_internal(dir, mount)
356    }
357
358    fn remove_submount(self: &MountHandle, mount_hash_key: &ArcKey<DirEntry>) -> Result<(), Errno> {
359        // create_submount explains why we need to make a copy of peers.
360        let peers = {
361            let state = self.state.read();
362            state.peer_group().map(|g| g.copy_propagation_targets()).unwrap_or_default()
363        };
364
365        for peer in peers {
366            if Arc::ptr_eq(self, &peer) {
367                continue;
368            }
369            // mount_namespaces(7): If B is shared, then all most-recently-mounted mounts at b on
370            // mounts that receive propagation from mount B and do not have submounts under them are
371            // unmounted.
372            let mut peer = peer.write();
373            if let Some(submount) = peer.submounts.get(mount_hash_key) {
374                if !submount.mount.read().submounts.is_empty() {
375                    continue;
376                }
377            }
378            let _ = peer.remove_submount_internal(mount_hash_key);
379        }
380
381        self.write().remove_submount_internal(mount_hash_key)
382    }
383
384    /// Create a new mount with the same filesystem, flags, and peer group. Used to implement bind
385    /// mounts.
386    fn clone_mount(
387        self: &MountHandle,
388        new_root: &DirEntryHandle,
389        flags: MountFlags,
390    ) -> MountHandle {
391        assert!(new_root.is_descendant_of(&self.root));
392        // According to mount(2) on bind mounts, all flags other than MS_REC are ignored when doing
393        // a bind mount.
394        let clone = Self::new_with_root(Arc::clone(new_root), self.flags());
395
396        if flags.contains(MountFlags::REC) {
397            // This is two steps because the alternative (locking clone.state while iterating over
398            // self.state.submounts) trips tracing_mutex. The lock ordering is parent -> child, and
399            // if the clone is eventually made a child of self, this looks like an ordering
400            // violation. I'm not convinced it's a real issue, but I can't convince myself it's not
401            // either.
402            let mut submounts = vec![];
403            for Submount { dir, mount } in &self.state.read().submounts {
404                submounts.push((dir.clone(), mount.clone_mount_recursive()));
405            }
406            let mut clone_state = clone.write();
407            for (dir, submount) in submounts {
408                clone_state.add_submount_internal(&dir, submount);
409            }
410        }
411
412        // Put the clone in the same peer group
413        let peer_group = self.state.read().peer_group().map(Arc::clone);
414        if let Some(peer_group) = peer_group {
415            clone.write().set_peer_group(peer_group);
416        }
417
418        clone
419    }
420
421    /// Do a clone of the full mount hierarchy below this mount. Used for creating mount
422    /// namespaces and creating copies to use for propagation.
423    fn clone_mount_recursive(self: &MountHandle) -> MountHandle {
424        self.clone_mount(&self.root, MountFlags::REC)
425    }
426
427    pub fn change_propagation(self: &MountHandle, flag: MountFlags, recursive: bool) {
428        let mut state = self.write();
429        match flag {
430            MountFlags::SHARED => state.make_shared(),
431            MountFlags::PRIVATE => state.make_private(),
432            MountFlags::DOWNSTREAM => state.make_downstream(),
433            _ => {
434                log_warn!("mount propagation {:?}", flag);
435                return;
436            }
437        }
438
439        if recursive {
440            for submount in &state.submounts {
441                submount.mount.change_propagation(flag, recursive);
442            }
443        }
444    }
445
446    fn flags(&self) -> MountFlags {
447        *self.flags.lock()
448    }
449
450    pub fn update_flags(self: &MountHandle, mut flags: MountFlags) {
451        flags &= MountFlags::STORED_ON_MOUNT;
452        let atime_flags = MountFlags::NOATIME
453            | MountFlags::NODIRATIME
454            | MountFlags::RELATIME
455            | MountFlags::STRICTATIME;
456        let mut stored_flags = self.flags.lock();
457        if !flags.intersects(atime_flags) {
458            // Since Linux 3.17, if none of MS_NOATIME, MS_NODIRATIME,
459            // MS_RELATIME, or MS_STRICTATIME is specified in mountflags, then
460            // the remount operation preserves the existing values of these
461            // flags (rather than defaulting to MS_RELATIME).
462            flags |= *stored_flags & atime_flags;
463        }
464        // The "effect [of MS_STRICTATIME] is to clear the MS_NOATIME and MS_RELATIME flags."
465        flags &= !MountFlags::STRICTATIME;
466        *stored_flags = flags;
467    }
468
469    /// The number of active clients of this mount.
470    ///
471    /// The mount cannot be unmounted if there are any active clients.
472    fn active_clients(&self) -> usize {
473        // We need to subtract one for our own reference. We are not a real client.
474        Arc::strong_count(&self.active_client_counter) - 1
475    }
476
477    pub fn unmount(&self, flags: UnmountFlags) -> Result<(), Errno> {
478        if !flags.contains(UnmountFlags::DETACH) {
479            if self.active_clients() > 0 || !self.state.read().submounts.is_empty() {
480                return error!(EBUSY);
481            }
482        }
483        let mountpoint = self.state.read().mountpoint().ok_or_else(|| errno!(EINVAL))?;
484        let parent_mount = mountpoint.mount.as_ref().expect("a mountpoint must be part of a mount");
485        parent_mount.remove_submount(mountpoint.mount_hash_key())
486    }
487
488    /// Returns the security state of the fs.
489    pub fn security_state(&self) -> &security::FileSystemState {
490        &self.fs.security_state
491    }
492
493    /// Returns the name of the fs.
494    pub fn fs_name(&self) -> &'static FsStr {
495        self.fs.name()
496    }
497
498    state_accessor!(Mount, state, Arc<Mount>);
499}
500
501impl MountState {
502    /// Returns true if there is a submount on top of `dir_entry`.
503    pub fn has_submount(&self, dir_entry: &DirEntryHandle) -> bool {
504        self.submounts.contains(ArcKey::ref_cast(dir_entry))
505    }
506
507    /// The NamespaceNode on which this Mount is mounted.
508    fn mountpoint(&self) -> Option<NamespaceNode> {
509        let (mount, entry) = self.mountpoint.as_ref()?;
510        Some(NamespaceNode::new(mount.upgrade()?, entry.clone()))
511    }
512
513    /// Return this mount's current peer group.
514    fn peer_group(&self) -> Option<&Arc<PeerGroup>> {
515        let (group, _) = self.peer_group_.as_ref()?;
516        Some(group)
517    }
518
519    /// Remove this mount from its peer group and return the peer group.
520    fn take_from_peer_group(&mut self) -> Option<Arc<PeerGroup>> {
521        let (old_group, old_mount) = self.peer_group_.take()?;
522        old_group.remove(old_mount);
523        if let Some(upstream) = self.take_from_upstream() {
524            let next_mount =
525                old_group.state.read().mounts.iter().next().map(|w| w.0.upgrade().unwrap());
526            if let Some(next_mount) = next_mount {
527                // TODO(https://fxbug.dev/42065259): Fix the lock ordering here. We've locked next_mount
528                // while self is locked, and since the propagation tree and mount tree are
529                // separate, this could violate the mount -> submount order previously established.
530                next_mount.write().set_upstream(upstream);
531            }
532        }
533        Some(old_group)
534    }
535
536    fn upstream(&self) -> Option<Arc<PeerGroup>> {
537        self.upstream_.as_ref().and_then(|g| g.0.upgrade())
538    }
539
540    fn take_from_upstream(&mut self) -> Option<Arc<PeerGroup>> {
541        let (old_upstream, old_mount) = self.upstream_.take()?;
542        // TODO(tbodt): Reason about whether the upgrade() could possibly return None, and what we
543        // should actually do in that case.
544        let old_upstream = old_upstream.upgrade()?;
545        old_upstream.remove_downstream(old_mount);
546        Some(old_upstream)
547    }
548}
549
550#[apply(state_implementation!)]
551impl MountState<Base = Mount, BaseType = Arc<Mount>> {
552    /// Add a child mount *without propagating it to the peer group*. For internal use only.
553    fn add_submount_internal(&mut self, dir: &DirEntryHandle, mount: MountHandle) {
554        if !dir.is_descendant_of(&self.base.root) {
555            return;
556        }
557
558        let submount = mount.fs.kernel.upgrade().unwrap().mounts.register_mount(dir, mount.clone());
559        let old_mountpoint =
560            mount.state.write().mountpoint.replace((Arc::downgrade(self.base), Arc::clone(dir)));
561        assert!(old_mountpoint.is_none(), "add_submount can only take a newly created mount");
562        // Mount shadowing is implemented by mounting onto the root of the first mount, not by
563        // creating two mounts on the same mountpoint.
564        let old_mount = self.submounts.replace(submount);
565
566        // In rare cases, mount propagation might result in a request to mount on a directory where
567        // something is already mounted. MountTest.LotsOfShadowing will trigger this. Linux handles
568        // this by inserting the new mount between the old mount and the current mount.
569        if let Some(mut old_mount) = old_mount {
570            // Previous state: self[dir] = old_mount
571            // New state: self[dir] = new_mount, new_mount[new_mount.root] = old_mount
572            // The new mount has already been inserted into self, now just update the old mount to
573            // be a child of the new mount.
574            old_mount.mount.write().mountpoint = Some((Arc::downgrade(&mount), Arc::clone(dir)));
575            old_mount.dir = ArcKey(mount.root.clone());
576            mount.write().submounts.insert(old_mount);
577        }
578    }
579
580    fn remove_submount_internal(&mut self, mount_hash_key: &ArcKey<DirEntry>) -> Result<(), Errno> {
581        if self.submounts.remove(mount_hash_key) { Ok(()) } else { error!(EINVAL) }
582    }
583
584    /// Set this mount's peer group.
585    fn set_peer_group(&mut self, group: Arc<PeerGroup>) {
586        self.take_from_peer_group();
587        group.add(self.base);
588        self.peer_group_ = Some((group, Arc::as_ptr(self.base).into()));
589    }
590
591    fn set_upstream(&mut self, group: Arc<PeerGroup>) {
592        self.take_from_upstream();
593        group.add_downstream(self.base);
594        self.upstream_ = Some((Arc::downgrade(&group), Arc::as_ptr(self.base).into()));
595    }
596
597    /// Is the mount in a peer group? Corresponds to MS_SHARED.
598    pub fn is_shared(&self) -> bool {
599        self.peer_group().is_some()
600    }
601
602    /// Put the mount in a peer group. Implements MS_SHARED.
603    pub fn make_shared(&mut self) {
604        if self.is_shared() {
605            return;
606        }
607        let kernel =
608            self.base.fs.kernel.upgrade().expect("can't create new peer group without kernel");
609        self.set_peer_group(PeerGroup::new(kernel.get_next_peer_group_id()));
610    }
611
612    /// Take the mount out of its peer group, also remove upstream if any. Implements MS_PRIVATE.
613    pub fn make_private(&mut self) {
614        self.take_from_peer_group();
615        self.take_from_upstream();
616    }
617
618    /// Take the mount out of its peer group and make it downstream instead. Implements
619    /// MountFlags::DOWNSTREAM (MS_SLAVE).
620    pub fn make_downstream(&mut self) {
621        if let Some(peer_group) = self.take_from_peer_group() {
622            self.set_upstream(peer_group);
623        }
624    }
625}
626
627impl PeerGroup {
628    fn new(id: u64) -> Arc<Self> {
629        Arc::new(Self { id, state: Default::default() })
630    }
631
632    fn add(&self, mount: &Arc<Mount>) {
633        self.state.write().mounts.insert(WeakKey::from(mount));
634    }
635
636    fn remove(&self, mount: PtrKey<Mount>) {
637        self.state.write().mounts.remove(&mount);
638    }
639
640    fn add_downstream(&self, mount: &Arc<Mount>) {
641        self.state.write().downstream.insert(WeakKey::from(mount));
642    }
643
644    fn remove_downstream(&self, mount: PtrKey<Mount>) {
645        self.state.write().downstream.remove(&mount);
646    }
647
648    fn copy_propagation_targets(&self) -> Vec<MountHandle> {
649        let mut buf = vec![];
650        self.collect_propagation_targets(&mut buf);
651        buf
652    }
653
654    fn collect_propagation_targets(&self, buf: &mut Vec<MountHandle>) {
655        let downstream_mounts: Vec<_> = {
656            let state = self.state.read();
657            buf.extend(state.mounts.iter().filter_map(|m| m.0.upgrade()));
658            state.downstream.iter().filter_map(|m| m.0.upgrade()).collect()
659        };
660        for mount in downstream_mounts {
661            let peer_group = mount.read().peer_group().map(Arc::clone);
662            match peer_group {
663                Some(group) => group.collect_propagation_targets(buf),
664                None => buf.push(mount),
665            }
666        }
667    }
668}
669
670impl Drop for Mount {
671    fn drop(&mut self) {
672        let state = self.state.get_mut();
673        state.take_from_peer_group();
674        state.take_from_upstream();
675    }
676}
677
678impl fmt::Debug for Mount {
679    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
680        let state = self.state.read();
681        f.debug_struct("Mount")
682            .field("id", &(self as *const Mount))
683            .field("root", &self.root)
684            .field("mountpoint", &state.mountpoint)
685            .field("submounts", &state.submounts)
686            .finish()
687    }
688}
689
690impl Kernel {
691    pub fn get_next_mount_id(&self) -> u64 {
692        self.next_mount_id.next()
693    }
694
695    pub fn get_next_peer_group_id(&self) -> u64 {
696        self.next_peer_group_id.next()
697    }
698
699    pub fn get_next_namespace_id(&self) -> u64 {
700        self.next_namespace_id.next()
701    }
702}
703
704impl CurrentTask {
705    pub fn create_filesystem(
706        &self,
707        locked: &mut Locked<Unlocked>,
708        fs_type: &FsStr,
709        options: FileSystemOptions,
710    ) -> Result<FileSystemHandle, Errno> {
711        // Please register new file systems via //src/starnix/modules/lib.rs, even if the file
712        // system is implemented inside starnix_core.
713        //
714        // Most file systems should be implemented as modules. The VFS provides various traits that
715        // let starnix_core integrate file systems without needing to depend on the file systems
716        // directly.
717        self.kernel()
718            .expando
719            .get::<FsRegistry>()
720            .create(locked, self, fs_type, options)
721            .ok_or_else(|| errno!(ENODEV, fs_type))?
722    }
723}
724
725// Writes to `sink` the mount flags and LSM mount options for the given `mount`.
726fn write_mount_info(task: &Task, sink: &mut DynamicFileBuf, mount: &Mount) -> Result<(), Errno> {
727    write!(sink, "{}", mount.flags())?;
728    security::sb_show_options(&task.kernel(), sink, &mount)
729}
730
731struct ProcMountsFileSource(WeakRef<Task>);
732
733impl DynamicFileSource for ProcMountsFileSource {
734    fn generate(
735        &self,
736        _current_task: &CurrentTask,
737        sink: &mut DynamicFileBuf,
738    ) -> Result<(), Errno> {
739        // TODO(tbodt): We should figure out a way to have a real iterator instead of grabbing the
740        // entire list in one go. Should we have a BTreeMap<u64, Weak<Mount>> in the Namespace?
741        // Also has the benefit of correct (i.e. chronological) ordering. But then we have to do
742        // extra work to maintain it.
743        let task = Task::from_weak(&self.0)?;
744        let root = task.fs().root();
745        let ns = task.fs().namespace();
746        for_each_mount(&ns.root_mount, &mut |mount| {
747            let mountpoint = mount.read().mountpoint().unwrap_or_else(|| mount.root());
748            if !mountpoint.is_descendant_of(&root) {
749                return Ok(());
750            }
751            write!(
752                sink,
753                "{} {} {} ",
754                mount.fs.options.source_for_display(),
755                mountpoint.path(&task),
756                mount.fs.name(),
757            )?;
758            write_mount_info(&task, sink, mount)?;
759            writeln!(sink, " 0 0")?;
760            Ok(())
761        })?;
762        Ok(())
763    }
764}
765
766pub struct ProcMountsFile {
767    dynamic_file: DynamicFile<ProcMountsFileSource>,
768}
769
770impl ProcMountsFile {
771    pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
772        SimpleFileNode::new(move |_, _| {
773            Ok(Self { dynamic_file: DynamicFile::new(ProcMountsFileSource(task.clone())) })
774        })
775    }
776}
777
778impl FileOps for ProcMountsFile {
779    fileops_impl_delegate_read_write_and_seek!(self, self.dynamic_file);
780    fileops_impl_noop_sync!();
781
782    fn wait_async(
783        &self,
784        _locked: &mut Locked<FileOpsCore>,
785        _file: &FileObject,
786        _current_task: &CurrentTask,
787        waiter: &Waiter,
788        _events: FdEvents,
789        _handler: EventHandler,
790    ) -> Option<WaitCanceler> {
791        // Polling this file gives notifications when any change to mounts occurs. This is not
792        // implemented yet, but stubbed for Android init.
793        Some(waiter.fake_wait())
794    }
795
796    fn query_events(
797        &self,
798        _locked: &mut Locked<FileOpsCore>,
799        _file: &FileObject,
800        _current_task: &CurrentTask,
801    ) -> Result<FdEvents, Errno> {
802        Ok(FdEvents::empty())
803    }
804}
805
806#[derive(Clone)]
807pub struct ProcMountinfoFile(WeakRef<Task>);
808impl ProcMountinfoFile {
809    pub fn new_node(task: WeakRef<Task>) -> impl FsNodeOps {
810        DynamicFile::new_node(Self(task))
811    }
812}
813impl DynamicFileSource for ProcMountinfoFile {
814    fn generate(
815        &self,
816        _current_task: &CurrentTask,
817        sink: &mut DynamicFileBuf,
818    ) -> Result<(), Errno> {
819        // Returns path to the `dir` from the root of the file system.
820        fn path_from_fs_root(dir: &DirEntryHandle) -> FsString {
821            let mut path = PathBuilder::new();
822            if dir.is_dead() {
823                // Return `/foo/dir//deleted` if the dir was deleted.
824                path.prepend_element("/deleted".into());
825            }
826            let scope = RcuReadScope::new();
827            let mut current = dir.deref();
828            while let Some(parent) = current.parent_ref(&scope) {
829                path.prepend_element(current.local_name(&scope));
830                current = parent;
831            }
832            path.build_absolute()
833        }
834
835        // TODO(tbodt): We should figure out a way to have a real iterator instead of grabbing the
836        // entire list in one go. Should we have a BTreeMap<u64, Weak<Mount>> in the Namespace?
837        // Also has the benefit of correct (i.e. chronological) ordering. But then we have to do
838        // extra work to maintain it.
839        let task = Task::from_weak(&self.0)?;
840        let root = task.fs().root();
841        let ns = task.fs().namespace();
842        for_each_mount(&ns.root_mount, &mut |mount| {
843            let mountpoint = mount.read().mountpoint().unwrap_or_else(|| mount.root());
844            if !mountpoint.is_descendant_of(&root) {
845                return Ok(());
846            }
847            // Can't fail, mountpoint() and root() can't return a NamespaceNode with no mount
848            let parent = mountpoint.mount.as_ref().unwrap();
849            write!(
850                sink,
851                "{} {} {} {} {} ",
852                mount.id,
853                parent.id,
854                mount.root.node.fs().dev_id,
855                path_from_fs_root(&mount.root),
856                mountpoint.path(&task),
857            )?;
858            write_mount_info(&task, sink, mount)?;
859            if let Some(peer_group) = mount.read().peer_group() {
860                write!(sink, " shared:{}", peer_group.id)?;
861            }
862            if let Some(upstream) = mount.read().upstream() {
863                write!(sink, " master:{}", upstream.id)?;
864            }
865            writeln!(
866                sink,
867                " - {} {} {}",
868                mount.fs.name(),
869                mount.fs.options.source_for_display(),
870                mount.fs.options.flags,
871            )?;
872            Ok(())
873        })?;
874        Ok(())
875    }
876}
877
878fn for_each_mount<E>(
879    mount: &MountHandle,
880    callback: &mut impl FnMut(&MountHandle) -> Result<(), E>,
881) -> Result<(), E> {
882    callback(mount)?;
883    // Collect list first to avoid self deadlock when ProcMountinfoFile::read_at tries to call
884    // NamespaceNode::path()
885    let submounts: Vec<_> = mount.read().submounts.iter().map(|s| s.mount.clone()).collect();
886    for submount in submounts {
887        for_each_mount(&submount, callback)?;
888    }
889    Ok(())
890}
891
892/// The `SymlinkMode` enum encodes how symlinks are followed during path traversal.
893#[derive(Default, PartialEq, Eq, Copy, Clone, Debug)]
894pub enum SymlinkMode {
895    /// Follow a symlink at the end of a path resolution.
896    #[default]
897    Follow,
898
899    /// Do not follow a symlink at the end of a path resolution.
900    NoFollow,
901}
902
903/// The maximum number of symlink traversals that can be made during path resolution.
904pub const MAX_SYMLINK_FOLLOWS: u8 = 40;
905
906/// The context passed during namespace lookups.
907///
908/// Namespace lookups need to mutate a shared context in order to correctly
909/// count the number of remaining symlink traversals.
910pub struct LookupContext {
911    /// The SymlinkMode for the lookup.
912    ///
913    /// As the lookup proceeds, the follow count is decremented each time the
914    /// lookup traverses a symlink.
915    pub symlink_mode: SymlinkMode,
916
917    /// The number of symlinks remaining the follow.
918    ///
919    /// Each time path resolution calls readlink, this value is decremented.
920    pub remaining_follows: u8,
921
922    /// Whether the result of the lookup must be a directory.
923    ///
924    /// For example, if the path ends with a `/` or if userspace passes
925    /// O_DIRECTORY. This flag can be set to true if the lookup encounters a
926    /// symlink that ends with a `/`.
927    pub must_be_directory: bool,
928
929    /// Resolve flags passed to `openat2`. Empty if the lookup originated in any other syscall.
930    pub resolve_flags: ResolveFlags,
931
932    /// Base directory for the lookup. Set only when either `RESOLVE_BENEATH` or `RESOLVE_IN_ROOT`
933    /// is passed to `openat2`.
934    pub resolve_base: ResolveBase,
935}
936
937/// Used to specify base directory in `LookupContext` for lookups originating in the `openat2`
938/// syscall with either `RESOLVE_BENEATH` or `RESOLVE_IN_ROOT` flag.
939#[derive(Clone, Eq, PartialEq)]
940pub enum ResolveBase {
941    None,
942
943    /// The lookup is not allowed to traverse any node that's not beneath the specified node.
944    Beneath(NamespaceNode),
945
946    /// The lookup should be handled as if the root specified node is the file-system root.
947    InRoot(NamespaceNode),
948}
949
950impl LookupContext {
951    pub fn new(symlink_mode: SymlinkMode) -> LookupContext {
952        LookupContext {
953            symlink_mode,
954            remaining_follows: MAX_SYMLINK_FOLLOWS,
955            must_be_directory: false,
956            resolve_flags: ResolveFlags::empty(),
957            resolve_base: ResolveBase::None,
958        }
959    }
960
961    pub fn with(&self, symlink_mode: SymlinkMode) -> LookupContext {
962        LookupContext { symlink_mode, resolve_base: self.resolve_base.clone(), ..*self }
963    }
964
965    pub fn update_for_path(&mut self, path: &FsStr) {
966        if path.last() == Some(&b'/') {
967            // The last path element must resolve to a directory. This is because a trailing slash
968            // was found in the path.
969            self.must_be_directory = true;
970            // If the last path element is a symlink, we should follow it.
971            // See https://pubs.opengroup.org/onlinepubs/9699919799/xrat/V4_xbd_chap03.html#tag_21_03_00_75
972            self.symlink_mode = SymlinkMode::Follow;
973        }
974    }
975}
976
977impl Default for LookupContext {
978    fn default() -> Self {
979        LookupContext::new(SymlinkMode::Follow)
980    }
981}
982
983/// Whether the path is reachable from the given root.
984pub enum PathWithReachability {
985    /// The path is reachable from the given root.
986    Reachable(FsString),
987
988    /// The path is not reachable from the given root.
989    Unreachable(FsString),
990}
991
992impl PathWithReachability {
993    pub fn into_path(self) -> FsString {
994        match self {
995            PathWithReachability::Reachable(path) => path,
996            PathWithReachability::Unreachable(path) => path,
997        }
998    }
999}
1000
1001/// A node in a mount namespace.
1002///
1003/// This tree is a composite of the mount tree and the FsNode tree.
1004///
1005/// These nodes are used when traversing paths in a namespace in order to
1006/// present the client the directory structure that includes the mounted
1007/// filesystems.
1008#[derive(Clone)]
1009pub struct NamespaceNode {
1010    /// The mount where this namespace node is mounted.
1011    ///
1012    /// A given FsNode can be mounted in multiple places in a namespace. This
1013    /// field distinguishes between them.
1014    pub mount: MountInfo,
1015
1016    /// The FsNode that corresponds to this namespace entry.
1017    pub entry: DirEntryHandle,
1018}
1019
1020impl NamespaceNode {
1021    pub fn new(mount: MountHandle, entry: DirEntryHandle) -> Self {
1022        Self { mount: Some(mount).into(), entry }
1023    }
1024
1025    /// Create a namespace node that is not mounted in a namespace.
1026    pub fn new_anonymous(entry: DirEntryHandle) -> Self {
1027        Self { mount: None.into(), entry }
1028    }
1029
1030    /// Create a namespace node that is not mounted in a namespace and that refers to a node that
1031    /// is not rooted in a hierarchy and has no name.
1032    pub fn new_anonymous_unrooted(current_task: &CurrentTask, node: FsNodeHandle) -> Self {
1033        let dir_entry = DirEntry::new_unrooted(node);
1034        let _ = security::fs_node_init_with_dentry_no_xattr(current_task, &dir_entry);
1035        Self::new_anonymous(dir_entry)
1036    }
1037
1038    /// Create a FileObject corresponding to this namespace node.
1039    ///
1040    /// This function is the primary way of instantiating FileObjects. Each
1041    /// FileObject records the NamespaceNode that created it in order to
1042    /// remember its path in the Namespace.
1043    pub fn open(
1044        &self,
1045        locked: &mut Locked<Unlocked>,
1046        current_task: &CurrentTask,
1047        flags: OpenFlags,
1048        access_check: AccessCheck,
1049    ) -> Result<FileHandle, Errno> {
1050        let ops = self.entry.node.open(locked, current_task, self, flags, access_check)?;
1051        FileObject::new(locked, current_task, ops, self.clone(), flags)
1052    }
1053
1054    /// Create or open a node in the file system.
1055    ///
1056    /// Works for any type of node other than a symlink.
1057    ///
1058    /// Will return an existing node unless `flags` contains `OpenFlags::EXCL`.
1059    pub fn open_create_node<L>(
1060        &self,
1061        locked: &mut Locked<L>,
1062        current_task: &CurrentTask,
1063        name: &FsStr,
1064        mode: FileMode,
1065        dev: DeviceType,
1066        flags: OpenFlags,
1067    ) -> Result<NamespaceNode, Errno>
1068    where
1069        L: LockEqualOrBefore<FileOpsCore>,
1070    {
1071        let owner = current_task.current_fscred();
1072        let mode = current_task.fs().apply_umask(mode);
1073        let create_fn =
1074            |locked: &mut Locked<L>, dir: &FsNodeHandle, mount: &MountInfo, name: &_| {
1075                dir.create_node(locked, current_task, mount, name, mode, dev, owner)
1076            };
1077        let entry = if flags.contains(OpenFlags::EXCL) {
1078            self.entry.create_entry(locked, current_task, &self.mount, name, create_fn)
1079        } else {
1080            self.entry.get_or_create_entry(locked, current_task, &self.mount, name, create_fn)
1081        }?;
1082        Ok(self.with_new_entry(entry))
1083    }
1084
1085    pub fn into_active(self) -> ActiveNamespaceNode {
1086        ActiveNamespaceNode::new(self)
1087    }
1088
1089    pub fn into_mapping(self, mode: Option<FileWriteGuardMode>) -> Result<Arc<FileMapping>, Errno> {
1090        self.into_active().into_mapping(mode)
1091    }
1092
1093    /// Create a node in the file system.
1094    ///
1095    /// Works for any type of node other than a symlink.
1096    ///
1097    /// Does not return an existing node.
1098    pub fn create_node<L>(
1099        &self,
1100        locked: &mut Locked<L>,
1101        current_task: &CurrentTask,
1102        name: &FsStr,
1103        mode: FileMode,
1104        dev: DeviceType,
1105    ) -> Result<NamespaceNode, Errno>
1106    where
1107        L: LockEqualOrBefore<FileOpsCore>,
1108    {
1109        let owner = current_task.current_fscred();
1110        let mode = current_task.fs().apply_umask(mode);
1111        let entry = self.entry.create_entry(
1112            locked,
1113            current_task,
1114            &self.mount,
1115            name,
1116            |locked, dir, mount, name| {
1117                dir.create_node(locked, current_task, mount, name, mode, dev, owner)
1118            },
1119        )?;
1120        Ok(self.with_new_entry(entry))
1121    }
1122
1123    /// Create a symlink in the file system.
1124    ///
1125    /// To create another type of node, use `create_node`.
1126    pub fn create_symlink<L>(
1127        &self,
1128        locked: &mut Locked<L>,
1129        current_task: &CurrentTask,
1130        name: &FsStr,
1131        target: &FsStr,
1132    ) -> Result<NamespaceNode, Errno>
1133    where
1134        L: LockEqualOrBefore<FileOpsCore>,
1135    {
1136        let owner = current_task.current_fscred();
1137        let entry = self.entry.create_entry(
1138            locked,
1139            current_task,
1140            &self.mount,
1141            name,
1142            |locked, dir, mount, name| {
1143                dir.create_symlink(locked, current_task, mount, name, target, owner)
1144            },
1145        )?;
1146        Ok(self.with_new_entry(entry))
1147    }
1148
1149    /// Creates an anonymous file.
1150    ///
1151    /// The FileMode::IFMT of the FileMode is always FileMode::IFREG.
1152    ///
1153    /// Used by O_TMPFILE.
1154    pub fn create_tmpfile<L>(
1155        &self,
1156        locked: &mut Locked<L>,
1157        current_task: &CurrentTask,
1158        mode: FileMode,
1159        flags: OpenFlags,
1160    ) -> Result<NamespaceNode, Errno>
1161    where
1162        L: LockEqualOrBefore<FileOpsCore>,
1163    {
1164        let owner = current_task.current_fscred();
1165        let mode = current_task.fs().apply_umask(mode);
1166        Ok(self.with_new_entry(self.entry.create_tmpfile(
1167            locked,
1168            current_task,
1169            &self.mount,
1170            mode,
1171            owner,
1172            flags,
1173        )?))
1174    }
1175
1176    pub fn link<L>(
1177        &self,
1178        locked: &mut Locked<L>,
1179        current_task: &CurrentTask,
1180        name: &FsStr,
1181        child: &FsNodeHandle,
1182    ) -> Result<NamespaceNode, Errno>
1183    where
1184        L: LockEqualOrBefore<FileOpsCore>,
1185    {
1186        let dir_entry = self.entry.create_entry(
1187            locked,
1188            current_task,
1189            &self.mount,
1190            name,
1191            |locked, dir, mount, name| dir.link(locked, current_task, mount, name, child),
1192        )?;
1193        Ok(self.with_new_entry(dir_entry))
1194    }
1195
1196    pub fn bind_socket<L>(
1197        &self,
1198        locked: &mut Locked<L>,
1199        current_task: &CurrentTask,
1200        name: &FsStr,
1201        socket: SocketHandle,
1202        socket_address: SocketAddress,
1203        mode: FileMode,
1204    ) -> Result<NamespaceNode, Errno>
1205    where
1206        L: LockEqualOrBefore<FileOpsCore>,
1207    {
1208        let dir_entry = self.entry.create_entry(
1209            locked,
1210            current_task,
1211            &self.mount,
1212            name,
1213            |locked, dir, mount, name| {
1214                let node = dir.create_node(
1215                    locked,
1216                    current_task,
1217                    mount,
1218                    name,
1219                    mode,
1220                    DeviceType::NONE,
1221                    current_task.current_fscred(),
1222                )?;
1223                if let Some(unix_socket) = socket.downcast_socket::<UnixSocket>() {
1224                    unix_socket.bind_socket_to_node(&socket, socket_address, &node)?;
1225                } else {
1226                    return error!(ENOTSUP);
1227                }
1228                Ok(node)
1229            },
1230        )?;
1231        Ok(self.with_new_entry(dir_entry))
1232    }
1233
1234    pub fn unlink<L>(
1235        &self,
1236        locked: &mut Locked<L>,
1237        current_task: &CurrentTask,
1238        name: &FsStr,
1239        kind: UnlinkKind,
1240        must_be_directory: bool,
1241    ) -> Result<(), Errno>
1242    where
1243        L: LockEqualOrBefore<FileOpsCore>,
1244    {
1245        if DirEntry::is_reserved_name(name) {
1246            match kind {
1247                UnlinkKind::Directory => {
1248                    if name == ".." {
1249                        error!(ENOTEMPTY)
1250                    } else if self.parent().is_none() {
1251                        // The client is attempting to remove the root.
1252                        error!(EBUSY)
1253                    } else {
1254                        error!(EINVAL)
1255                    }
1256                }
1257                UnlinkKind::NonDirectory => error!(ENOTDIR),
1258            }
1259        } else {
1260            self.entry.unlink(locked, current_task, &self.mount, name, kind, must_be_directory)
1261        }
1262    }
1263
1264    /// Traverse down a parent-to-child link in the namespace.
1265    pub fn lookup_child<L>(
1266        &self,
1267        locked: &mut Locked<L>,
1268        current_task: &CurrentTask,
1269        context: &mut LookupContext,
1270        basename: &FsStr,
1271    ) -> Result<NamespaceNode, Errno>
1272    where
1273        L: LockEqualOrBefore<FileOpsCore>,
1274    {
1275        if !self.entry.node.is_dir() {
1276            return error!(ENOTDIR);
1277        }
1278
1279        if basename.len() > NAME_MAX as usize {
1280            return error!(ENAMETOOLONG);
1281        }
1282
1283        let child = if basename.is_empty() || basename == "." {
1284            self.clone()
1285        } else if basename == ".." {
1286            let root = match &context.resolve_base {
1287                ResolveBase::None => current_task.fs().root(),
1288                ResolveBase::Beneath(node) => {
1289                    // Do not allow traversal out of the 'node'.
1290                    if *self == *node {
1291                        return error!(EXDEV);
1292                    }
1293                    current_task.fs().root()
1294                }
1295                ResolveBase::InRoot(root) => root.clone(),
1296            };
1297
1298            // Make sure this can't escape a chroot.
1299            if *self == root { root } else { self.parent().unwrap_or_else(|| self.clone()) }
1300        } else {
1301            let mut child = self.with_new_entry(self.entry.component_lookup(
1302                locked,
1303                current_task,
1304                &self.mount,
1305                basename,
1306            )?);
1307            while child.entry.node.is_lnk() {
1308                match context.symlink_mode {
1309                    SymlinkMode::NoFollow => {
1310                        break;
1311                    }
1312                    SymlinkMode::Follow => {
1313                        if context.remaining_follows == 0
1314                            || context.resolve_flags.contains(ResolveFlags::NO_SYMLINKS)
1315                        {
1316                            return error!(ELOOP);
1317                        }
1318                        context.remaining_follows -= 1;
1319                        child = match child.readlink(locked, current_task)? {
1320                            SymlinkTarget::Path(link_target) => {
1321                                let link_directory = if link_target[0] == b'/' {
1322                                    // If the path is absolute, we'll resolve the root directory.
1323                                    match &context.resolve_base {
1324                                        ResolveBase::None => current_task.fs().root(),
1325                                        ResolveBase::Beneath(_) => return error!(EXDEV),
1326                                        ResolveBase::InRoot(root) => root.clone(),
1327                                    }
1328                                } else {
1329                                    // If the path is not absolute, it's a relative directory. Let's
1330                                    // try to get the parent of the current child, or in the case
1331                                    // that the child is the root we can just use that directly.
1332                                    child.parent().unwrap_or(child)
1333                                };
1334                                current_task.lookup_path(
1335                                    locked,
1336                                    context,
1337                                    link_directory,
1338                                    link_target.as_ref(),
1339                                )?
1340                            }
1341                            SymlinkTarget::Node(node) => {
1342                                if context.resolve_flags.contains(ResolveFlags::NO_MAGICLINKS) {
1343                                    return error!(ELOOP);
1344                                }
1345                                node
1346                            }
1347                        }
1348                    }
1349                };
1350            }
1351
1352            child.enter_mount()
1353        };
1354
1355        if context.resolve_flags.contains(ResolveFlags::NO_XDEV) && child.mount != self.mount {
1356            return error!(EXDEV);
1357        }
1358
1359        if context.must_be_directory && !child.entry.node.is_dir() {
1360            return error!(ENOTDIR);
1361        }
1362
1363        Ok(child)
1364    }
1365
1366    /// Traverse up a child-to-parent link in the namespace.
1367    ///
1368    /// This traversal matches the child-to-parent link in the underlying
1369    /// FsNode except at mountpoints, where the link switches from one
1370    /// filesystem to another.
1371    pub fn parent(&self) -> Option<NamespaceNode> {
1372        let mountpoint_or_self = self.escape_mount();
1373        let parent = mountpoint_or_self.entry.parent()?;
1374        Some(mountpoint_or_self.with_new_entry(parent))
1375    }
1376
1377    /// Returns the parent, but does not escape mounts i.e. returns None if this node
1378    /// is the root of a mount.
1379    pub fn parent_within_mount(&self) -> Option<DirEntryHandle> {
1380        if let Ok(_) = self.mount_if_root() {
1381            return None;
1382        }
1383        self.entry.parent()
1384    }
1385
1386    /// Whether this namespace node is a descendant of the given node.
1387    ///
1388    /// Walks up the namespace node tree looking for ancestor. If ancestor is
1389    /// found, returns true. Otherwise, returns false.
1390    pub fn is_descendant_of(&self, ancestor: &NamespaceNode) -> bool {
1391        let ancestor = ancestor.escape_mount();
1392        let mut current = self.escape_mount();
1393        while current != ancestor {
1394            if let Some(parent) = current.parent() {
1395                current = parent.escape_mount();
1396            } else {
1397                return false;
1398            }
1399        }
1400        true
1401    }
1402
1403    /// If this is a mount point, return the root of the mount. Otherwise return self.
1404    fn enter_mount(&self) -> NamespaceNode {
1405        // While the child is a mountpoint, replace child with the mount's root.
1406        fn enter_one_mount(node: &NamespaceNode) -> Option<NamespaceNode> {
1407            if let Some(mount) = node.mount.deref() {
1408                if let Some(submount) =
1409                    mount.state.read().submounts.get(ArcKey::ref_cast(&node.entry))
1410                {
1411                    return Some(submount.mount.root());
1412                }
1413            }
1414            None
1415        }
1416        let mut inner = self.clone();
1417        while let Some(inner_root) = enter_one_mount(&inner) {
1418            inner = inner_root;
1419        }
1420        inner
1421    }
1422
1423    /// If this is the root of a mount, return the mount point. Otherwise return self.
1424    ///
1425    /// This is not exactly the same as parent(). If parent() is called on a root, it will escape
1426    /// the mount, but then return the parent of the mount point instead of the mount point.
1427    fn escape_mount(&self) -> NamespaceNode {
1428        let mut mountpoint_or_self = self.clone();
1429        while let Some(mountpoint) = mountpoint_or_self.mountpoint() {
1430            mountpoint_or_self = mountpoint;
1431        }
1432        mountpoint_or_self
1433    }
1434
1435    /// If this node is the root of a mount, return it. Otherwise EINVAL.
1436    pub fn mount_if_root(&self) -> Result<&MountHandle, Errno> {
1437        if let Some(mount) = self.mount.deref() {
1438            if Arc::ptr_eq(&self.entry, &mount.root) {
1439                return Ok(mount);
1440            }
1441        }
1442        error!(EINVAL)
1443    }
1444
1445    /// Returns the mountpoint at this location in the namespace.
1446    ///
1447    /// If this node is mounted in another node, this function returns the node
1448    /// at which this node is mounted. Otherwise, returns None.
1449    fn mountpoint(&self) -> Option<NamespaceNode> {
1450        self.mount_if_root().ok()?.read().mountpoint()
1451    }
1452
1453    /// The path from the task's root to this node.
1454    pub fn path(&self, task: &Task) -> FsString {
1455        self.path_from_root(Some(&task.fs().root())).into_path()
1456    }
1457
1458    /// The path from the root of the namespace to this node.
1459    pub fn path_escaping_chroot(&self) -> FsString {
1460        self.path_from_root(None).into_path()
1461    }
1462
1463    /// Returns the path to this node, accounting for a custom root.
1464    /// A task may have a custom root set by `chroot`.
1465    pub fn path_from_root(&self, root: Option<&NamespaceNode>) -> PathWithReachability {
1466        if self.mount.is_none() {
1467            return PathWithReachability::Reachable(self.entry.node.internal_name());
1468        }
1469
1470        let mut path = PathBuilder::new();
1471        let mut current = self.escape_mount();
1472        if let Some(root) = root {
1473            let scope = RcuReadScope::new();
1474            // The current node is expected to intersect with the custom root as we travel up the tree.
1475            let root = root.escape_mount();
1476            while current != root {
1477                if let Some(parent) = current.parent() {
1478                    path.prepend_element(current.entry.local_name(&scope));
1479                    current = parent.escape_mount();
1480                } else {
1481                    // This node hasn't intersected with the custom root and has reached the namespace root.
1482                    let mut absolute_path = path.build_absolute();
1483                    if self.entry.is_dead() {
1484                        absolute_path.extend_from_slice(b" (deleted)");
1485                    }
1486
1487                    return PathWithReachability::Unreachable(absolute_path);
1488                }
1489            }
1490        } else {
1491            // No custom root, so travel up the tree to the namespace root.
1492            let scope = RcuReadScope::new();
1493            while let Some(parent) = current.parent() {
1494                path.prepend_element(current.entry.local_name(&scope));
1495                current = parent.escape_mount();
1496            }
1497        }
1498
1499        let mut absolute_path = path.build_absolute();
1500        if self.entry.is_dead() {
1501            absolute_path.extend_from_slice(b" (deleted)");
1502        }
1503
1504        PathWithReachability::Reachable(absolute_path)
1505    }
1506
1507    pub fn mount(&self, what: WhatToMount, flags: MountFlags) -> Result<(), Errno> {
1508        let flags = flags & (MountFlags::STORED_ON_MOUNT | MountFlags::REC);
1509        let mountpoint = self.enter_mount();
1510        let mount = mountpoint.mount.as_ref().expect("a mountpoint must be part of a mount");
1511        mount.create_submount(&mountpoint.entry, what, flags);
1512        Ok(())
1513    }
1514
1515    /// If this is the root of a filesystem, unmount. Otherwise return EINVAL.
1516    pub fn unmount(&self, flags: UnmountFlags) -> Result<(), Errno> {
1517        let mount = self.enter_mount().mount_if_root()?.clone();
1518        mount.unmount(flags)
1519    }
1520
1521    pub fn rename<L>(
1522        locked: &mut Locked<L>,
1523        current_task: &CurrentTask,
1524        old_parent: &NamespaceNode,
1525        old_name: &FsStr,
1526        new_parent: &NamespaceNode,
1527        new_name: &FsStr,
1528        flags: RenameFlags,
1529    ) -> Result<(), Errno>
1530    where
1531        L: LockEqualOrBefore<FileOpsCore>,
1532    {
1533        DirEntry::rename(
1534            locked,
1535            current_task,
1536            &old_parent.entry,
1537            &old_parent.mount,
1538            old_name,
1539            &new_parent.entry,
1540            &new_parent.mount,
1541            new_name,
1542            flags,
1543        )
1544    }
1545
1546    fn with_new_entry(&self, entry: DirEntryHandle) -> NamespaceNode {
1547        Self { mount: self.mount.clone(), entry }
1548    }
1549
1550    fn mount_hash_key(&self) -> &ArcKey<DirEntry> {
1551        ArcKey::ref_cast(&self.entry)
1552    }
1553
1554    pub fn suid_and_sgid(&self, current_task: &CurrentTask) -> Result<UserAndOrGroupId, Errno> {
1555        if self.mount.flags().contains(MountFlags::NOSUID) {
1556            Ok(UserAndOrGroupId::default())
1557        } else {
1558            self.entry.node.info().suid_and_sgid(current_task, &self.entry.node)
1559        }
1560    }
1561
1562    pub fn update_atime(&self) {
1563        // Do not update the atime of this node if it is mounted with the NOATIME flag.
1564        if !self.mount.flags().contains(MountFlags::NOATIME) {
1565            self.entry.node.update_info(|info| {
1566                let now = utc::utc_now();
1567                info.time_access = now;
1568                info.pending_time_access_update = true;
1569            });
1570        }
1571    }
1572
1573    pub fn readlink<L>(
1574        &self,
1575        locked: &mut Locked<L>,
1576        current_task: &CurrentTask,
1577    ) -> Result<SymlinkTarget, Errno>
1578    where
1579        L: LockEqualOrBefore<FileOpsCore>,
1580    {
1581        self.update_atime();
1582        self.entry.node.readlink(locked, current_task)
1583    }
1584
1585    pub fn notify(&self, event_mask: InotifyMask) {
1586        if self.mount.is_some() {
1587            self.entry.notify(event_mask);
1588        }
1589    }
1590
1591    /// Check whether the node can be accessed in the current context with the specified access
1592    /// flags (read, write, or exec). Accounts for capabilities and whether the current user is the
1593    /// owner or is in the file's group.
1594    pub fn check_access<L>(
1595        &self,
1596        locked: &mut Locked<L>,
1597        current_task: &CurrentTask,
1598        permission_flags: impl Into<security::PermissionFlags>,
1599        reason: CheckAccessReason,
1600    ) -> Result<(), Errno>
1601    where
1602        L: LockEqualOrBefore<FileOpsCore>,
1603    {
1604        self.entry.node.check_access(
1605            locked,
1606            current_task,
1607            &self.mount,
1608            permission_flags,
1609            reason,
1610            self,
1611        )
1612    }
1613
1614    /// Checks if O_NOATIME is allowed,
1615    pub fn check_o_noatime_allowed(&self, current_task: &CurrentTask) -> Result<(), Errno> {
1616        self.entry.node.check_o_noatime_allowed(current_task)
1617    }
1618
1619    pub fn truncate<L>(
1620        &self,
1621        locked: &mut Locked<L>,
1622        current_task: &CurrentTask,
1623        length: u64,
1624    ) -> Result<(), Errno>
1625    where
1626        L: LockBefore<BeforeFsNodeAppend>,
1627    {
1628        self.entry.node.truncate(locked, current_task, &self.mount, length)?;
1629        self.entry.notify_ignoring_excl_unlink(InotifyMask::MODIFY);
1630        Ok(())
1631    }
1632}
1633
1634impl fmt::Debug for NamespaceNode {
1635    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1636        f.debug_struct("NamespaceNode")
1637            .field("path", &self.path_escaping_chroot())
1638            .field("mount", &self.mount)
1639            .field("entry", &self.entry)
1640            .finish()
1641    }
1642}
1643
1644// Eq/Hash impls intended for the MOUNT_POINTS hash
1645impl PartialEq for NamespaceNode {
1646    fn eq(&self, other: &Self) -> bool {
1647        self.mount.as_ref().map(Arc::as_ptr).eq(&other.mount.as_ref().map(Arc::as_ptr))
1648            && Arc::ptr_eq(&self.entry, &other.entry)
1649    }
1650}
1651impl Eq for NamespaceNode {}
1652impl Hash for NamespaceNode {
1653    fn hash<H: Hasher>(&self, state: &mut H) {
1654        self.mount.as_ref().map(Arc::as_ptr).hash(state);
1655        Arc::as_ptr(&self.entry).hash(state);
1656    }
1657}
1658
1659/// A namespace node that keeps the underly mount busy.
1660#[derive(Debug, Clone)]
1661pub struct ActiveNamespaceNode {
1662    /// The underlying namespace node.
1663    name: NamespaceNode,
1664
1665    /// Adds a reference to the mount client marker to prevent the mount from
1666    /// being removed while the NamespaceNode is active. Is None iff mount is
1667    /// None.
1668    _marker: Option<MountClientMarker>,
1669}
1670
1671impl ActiveNamespaceNode {
1672    pub fn new(name: NamespaceNode) -> Self {
1673        let marker = name.mount.as_ref().map(|mount| mount.active_client_counter.clone());
1674        Self { name, _marker: marker }
1675    }
1676
1677    pub fn to_passive(&self) -> NamespaceNode {
1678        self.deref().clone()
1679    }
1680
1681    pub fn into_mapping(self, mode: Option<FileWriteGuardMode>) -> Result<Arc<FileMapping>, Errno> {
1682        if let Some(mode) = mode {
1683            self.entry.node.write_guard_state.lock().acquire(mode)?;
1684        }
1685        Ok(Arc::new(FileMapping { name: self, mode }))
1686    }
1687}
1688
1689impl Deref for ActiveNamespaceNode {
1690    type Target = NamespaceNode;
1691
1692    fn deref(&self) -> &Self::Target {
1693        &self.name
1694    }
1695}
1696
1697impl PartialEq for ActiveNamespaceNode {
1698    fn eq(&self, other: &Self) -> bool {
1699        self.deref().eq(other.deref())
1700    }
1701}
1702impl Eq for ActiveNamespaceNode {}
1703impl Hash for ActiveNamespaceNode {
1704    fn hash<H: Hasher>(&self, state: &mut H) {
1705        self.deref().hash(state)
1706    }
1707}
1708
1709#[derive(Debug, Clone, PartialEq, Eq)]
1710#[must_use]
1711pub struct FileMapping {
1712    pub name: ActiveNamespaceNode,
1713    mode: Option<FileWriteGuardMode>,
1714}
1715
1716impl Drop for FileMapping {
1717    fn drop(&mut self) {
1718        if let Some(mode) = self.mode {
1719            self.name.entry.node.write_guard_state.lock().release(mode);
1720        }
1721    }
1722}
1723
1724/// Tracks all mounts, keyed by mount point.
1725pub struct Mounts {
1726    mounts: RcuHashMap<WeakKey<DirEntry>, Vec<ArcKey<Mount>>>,
1727}
1728
1729impl Mounts {
1730    pub fn new() -> Self {
1731        Mounts { mounts: RcuHashMap::default() }
1732    }
1733
1734    /// Registers the mount in the global mounts map.
1735    fn register_mount(&self, dir_entry: &Arc<DirEntry>, mount: MountHandle) -> Submount {
1736        let mut mounts = self.mounts.lock();
1737        let key = WeakKey::from(dir_entry);
1738        let mut vec = mounts.get(&key).unwrap_or_else(|| {
1739            dir_entry.set_has_mounts(true);
1740            Vec::new()
1741        });
1742        vec.push(ArcKey(mount.clone()));
1743        mounts.insert(key, vec);
1744        Submount { dir: ArcKey(dir_entry.clone()), mount }
1745    }
1746
1747    /// Unregisters the mount.  This is called by `Submount::drop`.
1748    fn unregister_mount(&self, dir_entry: &Arc<DirEntry>, mount: &MountHandle) {
1749        let mut mounts = self.mounts.lock();
1750        let key = WeakKey::from(dir_entry);
1751        if let Some(mut vec) = mounts.get(&key) {
1752            let index = vec.iter().position(|e| e == ArcKey::ref_cast(mount)).unwrap();
1753            if vec.len() == 1 {
1754                mounts.remove(&key);
1755                dir_entry.set_has_mounts(false);
1756            } else {
1757                vec.swap_remove(index);
1758                mounts.insert(key, vec);
1759            }
1760        }
1761    }
1762
1763    /// Unmounts all mounts associated with `dir_entry`.  This is called when `dir_entry` is
1764    /// unlinked (which would normally result in EBUSY, but not if it isn't mounted in the local
1765    /// namespace).
1766    pub fn unmount(&self, dir_entry: &DirEntry) {
1767        let mounts = self.mounts.lock().remove(&PtrKey::from(dir_entry as *const _));
1768        if let Some(mounts) = mounts {
1769            for mount in mounts {
1770                // Ignore errors.
1771                let _ = mount.unmount(UnmountFlags::DETACH);
1772            }
1773        }
1774    }
1775
1776    /// Drain mounts. For each drained mount, force a FileSystem unmount.
1777    // TODO(https://fxbug.dev/295073633): Graceful shutdown should try to first unmount the mounts
1778    // and only force a FileSystem unmount on failure.
1779    pub fn clear(&self) {
1780        for (_dir_entry, mounts) in self.mounts.lock().drain() {
1781            for mount in mounts {
1782                mount.fs.force_unmount_ops();
1783            }
1784        }
1785    }
1786
1787    pub fn sync_all(
1788        &self,
1789        locked: &mut Locked<Unlocked>,
1790        current_task: &CurrentTask,
1791    ) -> Result<(), Errno> {
1792        let mut filesystems = Vec::new();
1793        {
1794            let scope = RcuReadScope::new();
1795            let mut seen = HashSet::new();
1796            for (_dir_entry, m_list) in self.mounts.iter(&scope) {
1797                for m in m_list {
1798                    if seen.insert(Arc::as_ptr(&m.fs)) {
1799                        filesystems.push(m.fs.clone());
1800                    }
1801                }
1802            }
1803        }
1804
1805        for fs in filesystems {
1806            if let Err(e) = fs.sync(locked, current_task) {
1807                log_warn!("sync failed for filesystem {:?}: {:?}", fs.name(), e);
1808            }
1809        }
1810        Ok(())
1811    }
1812}
1813
1814/// A RAII object that unregisters a mount when dropped.
1815#[derive(Debug)]
1816struct Submount {
1817    dir: ArcKey<DirEntry>,
1818    mount: MountHandle,
1819}
1820
1821impl Drop for Submount {
1822    fn drop(&mut self) {
1823        self.mount.fs.kernel.upgrade().unwrap().mounts.unregister_mount(&self.dir, &self.mount)
1824    }
1825}
1826
1827/// Submount is stored in a mount's submounts hash set, which is keyed by the mountpoint.
1828impl Eq for Submount {}
1829impl PartialEq<Self> for Submount {
1830    fn eq(&self, other: &Self) -> bool {
1831        self.dir == other.dir
1832    }
1833}
1834impl Hash for Submount {
1835    fn hash<H: Hasher>(&self, state: &mut H) {
1836        self.dir.hash(state)
1837    }
1838}
1839
1840impl Borrow<ArcKey<DirEntry>> for Submount {
1841    fn borrow(&self) -> &ArcKey<DirEntry> {
1842        &self.dir
1843    }
1844}
1845
1846#[cfg(test)]
1847mod test {
1848    use crate::fs::tmpfs::TmpFs;
1849    use crate::testing::spawn_kernel_and_run;
1850    use crate::vfs::namespace::DeviceType;
1851    use crate::vfs::{
1852        CallbackSymlinkNode, FsNodeInfo, LookupContext, MountInfo, Namespace, NamespaceNode,
1853        RenameFlags, SymlinkMode, SymlinkTarget, UnlinkKind, WhatToMount,
1854    };
1855    use starnix_uapi::mount_flags::MountFlags;
1856    use starnix_uapi::{errno, mode};
1857    use std::sync::Arc;
1858
1859    #[::fuchsia::test]
1860    async fn test_namespace() {
1861        spawn_kernel_and_run(async |locked, current_task| {
1862            let kernel = current_task.kernel();
1863            let root_fs = TmpFs::new_fs(locked, &kernel);
1864            let root_node = Arc::clone(root_fs.root());
1865            let _dev_node = root_node
1866                .create_dir(locked, &current_task, "dev".into())
1867                .expect("failed to mkdir dev");
1868            let dev_fs = TmpFs::new_fs(locked, &kernel);
1869            let dev_root_node = Arc::clone(dev_fs.root());
1870            let _dev_pts_node = dev_root_node
1871                .create_dir(locked, &current_task, "pts".into())
1872                .expect("failed to mkdir pts");
1873
1874            let ns = Namespace::new(root_fs);
1875            let mut context = LookupContext::default();
1876            let dev = ns
1877                .root()
1878                .lookup_child(locked, &current_task, &mut context, "dev".into())
1879                .expect("failed to lookup dev");
1880            dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1881                .expect("failed to mount dev root node");
1882
1883            let mut context = LookupContext::default();
1884            let dev = ns
1885                .root()
1886                .lookup_child(locked, &current_task, &mut context, "dev".into())
1887                .expect("failed to lookup dev");
1888            let mut context = LookupContext::default();
1889            let pts = dev
1890                .lookup_child(locked, &current_task, &mut context, "pts".into())
1891                .expect("failed to lookup pts");
1892            let pts_parent =
1893                pts.parent().ok_or_else(|| errno!(ENOENT)).expect("failed to get parent of pts");
1894            assert!(Arc::ptr_eq(&pts_parent.entry, &dev.entry));
1895
1896            let dev_parent =
1897                dev.parent().ok_or_else(|| errno!(ENOENT)).expect("failed to get parent of dev");
1898            assert!(Arc::ptr_eq(&dev_parent.entry, &ns.root().entry));
1899        })
1900        .await;
1901    }
1902
1903    #[::fuchsia::test]
1904    async fn test_mount_does_not_upgrade() {
1905        spawn_kernel_and_run(async |locked, current_task| {
1906            let kernel = current_task.kernel();
1907            let root_fs = TmpFs::new_fs(locked, &kernel);
1908            let root_node = Arc::clone(root_fs.root());
1909            let _dev_node = root_node
1910                .create_dir(locked, &current_task, "dev".into())
1911                .expect("failed to mkdir dev");
1912            let dev_fs = TmpFs::new_fs(locked, &kernel);
1913            let dev_root_node = Arc::clone(dev_fs.root());
1914            let _dev_pts_node = dev_root_node
1915                .create_dir(locked, &current_task, "pts".into())
1916                .expect("failed to mkdir pts");
1917
1918            let ns = Namespace::new(root_fs);
1919            let mut context = LookupContext::default();
1920            let dev = ns
1921                .root()
1922                .lookup_child(locked, &current_task, &mut context, "dev".into())
1923                .expect("failed to lookup dev");
1924            dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1925                .expect("failed to mount dev root node");
1926            let mut context = LookupContext::default();
1927            let new_dev = ns
1928                .root()
1929                .lookup_child(locked, &current_task, &mut context, "dev".into())
1930                .expect("failed to lookup dev again");
1931            assert!(!Arc::ptr_eq(&dev.entry, &new_dev.entry));
1932            assert_ne!(&dev, &new_dev);
1933
1934            let mut context = LookupContext::default();
1935            let _new_pts = new_dev
1936                .lookup_child(locked, &current_task, &mut context, "pts".into())
1937                .expect("failed to lookup pts");
1938            let mut context = LookupContext::default();
1939            assert!(dev.lookup_child(locked, &current_task, &mut context, "pts".into()).is_err());
1940        })
1941        .await;
1942    }
1943
1944    #[::fuchsia::test]
1945    async fn test_path() {
1946        spawn_kernel_and_run(async |locked, current_task| {
1947            let kernel = current_task.kernel();
1948            let root_fs = TmpFs::new_fs(locked, &kernel);
1949            let root_node = Arc::clone(root_fs.root());
1950            let _dev_node = root_node
1951                .create_dir(locked, &current_task, "dev".into())
1952                .expect("failed to mkdir dev");
1953            let dev_fs = TmpFs::new_fs(locked, &kernel);
1954            let dev_root_node = Arc::clone(dev_fs.root());
1955            let _dev_pts_node = dev_root_node
1956                .create_dir(locked, &current_task, "pts".into())
1957                .expect("failed to mkdir pts");
1958
1959            let ns = Namespace::new(root_fs);
1960            let mut context = LookupContext::default();
1961            let dev = ns
1962                .root()
1963                .lookup_child(locked, &current_task, &mut context, "dev".into())
1964                .expect("failed to lookup dev");
1965            dev.mount(WhatToMount::Fs(dev_fs), MountFlags::empty())
1966                .expect("failed to mount dev root node");
1967
1968            let mut context = LookupContext::default();
1969            let dev = ns
1970                .root()
1971                .lookup_child(locked, &current_task, &mut context, "dev".into())
1972                .expect("failed to lookup dev");
1973            let mut context = LookupContext::default();
1974            let pts = dev
1975                .lookup_child(locked, &current_task, &mut context, "pts".into())
1976                .expect("failed to lookup pts");
1977
1978            assert_eq!("/", ns.root().path_escaping_chroot());
1979            assert_eq!("/dev", dev.path_escaping_chroot());
1980            assert_eq!("/dev/pts", pts.path_escaping_chroot());
1981        })
1982        .await;
1983    }
1984
1985    #[::fuchsia::test]
1986    async fn test_shadowing() {
1987        spawn_kernel_and_run(async |locked, current_task| {
1988            let kernel = current_task.kernel();
1989            let root_fs = TmpFs::new_fs(locked, &kernel);
1990            let ns = Namespace::new(root_fs.clone());
1991            let _foo_node = root_fs.root().create_dir(locked, &current_task, "foo".into()).unwrap();
1992            let mut context = LookupContext::default();
1993            let foo_dir =
1994                ns.root().lookup_child(locked, &current_task, &mut context, "foo".into()).unwrap();
1995
1996            let foofs1 = TmpFs::new_fs(locked, &kernel);
1997            foo_dir.mount(WhatToMount::Fs(foofs1.clone()), MountFlags::empty()).unwrap();
1998            let mut context = LookupContext::default();
1999            assert!(Arc::ptr_eq(
2000                &ns.root()
2001                    .lookup_child(locked, &current_task, &mut context, "foo".into())
2002                    .unwrap()
2003                    .entry,
2004                foofs1.root()
2005            ));
2006            let foo_dir =
2007                ns.root().lookup_child(locked, &current_task, &mut context, "foo".into()).unwrap();
2008
2009            let ns_clone = ns.clone_namespace();
2010
2011            let foofs2 = TmpFs::new_fs(locked, &kernel);
2012            foo_dir.mount(WhatToMount::Fs(foofs2.clone()), MountFlags::empty()).unwrap();
2013            let mut context = LookupContext::default();
2014            assert!(Arc::ptr_eq(
2015                &ns.root()
2016                    .lookup_child(locked, &current_task, &mut context, "foo".into())
2017                    .unwrap()
2018                    .entry,
2019                foofs2.root()
2020            ));
2021
2022            assert!(Arc::ptr_eq(
2023                &ns_clone
2024                    .root()
2025                    .lookup_child(
2026                        locked,
2027                        &current_task,
2028                        &mut LookupContext::default(),
2029                        "foo".into()
2030                    )
2031                    .unwrap()
2032                    .entry,
2033                foofs1.root()
2034            ));
2035        })
2036        .await;
2037    }
2038
2039    #[::fuchsia::test]
2040    async fn test_unlink_mounted_directory() {
2041        spawn_kernel_and_run(async |locked, current_task| {
2042            let kernel = current_task.kernel();
2043            let root_fs = TmpFs::new_fs(locked, &kernel);
2044            let ns1 = Namespace::new(root_fs.clone());
2045            let ns2 = Namespace::new(root_fs.clone());
2046            let _foo_node = root_fs.root().create_dir(locked, &current_task, "foo".into()).unwrap();
2047            let mut context = LookupContext::default();
2048            let foo_dir =
2049                ns1.root().lookup_child(locked, &current_task, &mut context, "foo".into()).unwrap();
2050
2051            let foofs = TmpFs::new_fs(locked, &kernel);
2052            foo_dir.mount(WhatToMount::Fs(foofs), MountFlags::empty()).unwrap();
2053
2054            // Trying to unlink from ns1 should fail.
2055            assert_eq!(
2056                ns1.root()
2057                    .unlink(locked, &current_task, "foo".into(), UnlinkKind::Directory, false)
2058                    .unwrap_err(),
2059                errno!(EBUSY),
2060            );
2061
2062            // But unlinking from ns2 should succeed.
2063            ns2.root()
2064                .unlink(locked, &current_task, "foo".into(), UnlinkKind::Directory, false)
2065                .expect("unlink failed");
2066
2067            // And it should no longer show up in ns1.
2068            assert_eq!(
2069                ns1.root()
2070                    .unlink(locked, &current_task, "foo".into(), UnlinkKind::Directory, false)
2071                    .unwrap_err(),
2072                errno!(ENOENT),
2073            );
2074        })
2075        .await;
2076    }
2077
2078    #[::fuchsia::test]
2079    async fn test_rename_mounted_directory() {
2080        spawn_kernel_and_run(async |locked, current_task| {
2081            let kernel = current_task.kernel();
2082            let root_fs = TmpFs::new_fs(locked, &kernel);
2083            let ns1 = Namespace::new(root_fs.clone());
2084            let ns2 = Namespace::new(root_fs.clone());
2085            let _foo_node = root_fs.root().create_dir(locked, &current_task, "foo".into()).unwrap();
2086            let _bar_node = root_fs.root().create_dir(locked, &current_task, "bar".into()).unwrap();
2087            let _baz_node = root_fs.root().create_dir(locked, &current_task, "baz".into()).unwrap();
2088            let mut context = LookupContext::default();
2089            let foo_dir =
2090                ns1.root().lookup_child(locked, &current_task, &mut context, "foo".into()).unwrap();
2091
2092            let foofs = TmpFs::new_fs(locked, &kernel);
2093            foo_dir.mount(WhatToMount::Fs(foofs), MountFlags::empty()).unwrap();
2094
2095            // Trying to rename over foo from ns1 should fail.
2096            let root = ns1.root();
2097            assert_eq!(
2098                NamespaceNode::rename(
2099                    locked,
2100                    &current_task,
2101                    &root,
2102                    "bar".into(),
2103                    &root,
2104                    "foo".into(),
2105                    RenameFlags::empty()
2106                )
2107                .unwrap_err(),
2108                errno!(EBUSY),
2109            );
2110            // Likewise the other way.
2111            assert_eq!(
2112                NamespaceNode::rename(
2113                    locked,
2114                    &current_task,
2115                    &root,
2116                    "foo".into(),
2117                    &root,
2118                    "bar".into(),
2119                    RenameFlags::empty()
2120                )
2121                .unwrap_err(),
2122                errno!(EBUSY),
2123            );
2124
2125            // But renaming from ns2 should succeed.
2126            let root = ns2.root();
2127
2128            // First rename the directory with the mount.
2129            NamespaceNode::rename(
2130                locked,
2131                &current_task,
2132                &root,
2133                "foo".into(),
2134                &root,
2135                "bar".into(),
2136                RenameFlags::empty(),
2137            )
2138            .expect("rename failed");
2139
2140            // Renaming over a directory with a mount should also work.
2141            NamespaceNode::rename(
2142                locked,
2143                &current_task,
2144                &root,
2145                "baz".into(),
2146                &root,
2147                "bar".into(),
2148                RenameFlags::empty(),
2149            )
2150            .expect("rename failed");
2151
2152            // "foo" and "baz" should no longer show up in ns1.
2153            assert_eq!(
2154                ns1.root()
2155                    .lookup_child(locked, &current_task, &mut context, "foo".into())
2156                    .unwrap_err(),
2157                errno!(ENOENT)
2158            );
2159            assert_eq!(
2160                ns1.root()
2161                    .lookup_child(locked, &current_task, &mut context, "baz".into())
2162                    .unwrap_err(),
2163                errno!(ENOENT)
2164            );
2165        })
2166        .await;
2167    }
2168
2169    /// Symlinks which need to be traversed across types (nodes and paths), as well as across
2170    /// owning directories, can be tricky to get right.
2171    #[::fuchsia::test]
2172    async fn test_lookup_with_symlink_chain() {
2173        spawn_kernel_and_run(async |locked, current_task| {
2174            // Set up the root filesystem
2175            let kernel = current_task.kernel();
2176            let root_fs = TmpFs::new_fs(locked, &kernel);
2177            let root_node = Arc::clone(root_fs.root());
2178            let _first_subdir_node = root_node
2179                .create_dir(locked, &current_task, "first_subdir".into())
2180                .expect("failed to mkdir dev");
2181            let _second_subdir_node = root_node
2182                .create_dir(locked, &current_task, "second_subdir".into())
2183                .expect("failed to mkdir dev");
2184
2185            // Set up two subdirectories under the root filesystem
2186            let first_subdir_fs = TmpFs::new_fs(locked, &kernel);
2187            let second_subdir_fs = TmpFs::new_fs(locked, &kernel);
2188
2189            let ns = Namespace::new(root_fs);
2190            let mut context = LookupContext::default();
2191            let first_subdir = ns
2192                .root()
2193                .lookup_child(locked, &current_task, &mut context, "first_subdir".into())
2194                .expect("failed to lookup first_subdir");
2195            first_subdir
2196                .mount(WhatToMount::Fs(first_subdir_fs), MountFlags::empty())
2197                .expect("failed to mount first_subdir fs node");
2198            let second_subdir = ns
2199                .root()
2200                .lookup_child(locked, &current_task, &mut context, "second_subdir".into())
2201                .expect("failed to lookup second_subdir");
2202            second_subdir
2203                .mount(WhatToMount::Fs(second_subdir_fs), MountFlags::empty())
2204                .expect("failed to mount second_subdir fs node");
2205
2206            // Create the symlink structure. To trigger potential symlink traversal bugs, we're going
2207            // for the following directory structure:
2208            // / (root)
2209            //     + first_subdir/
2210            //         - real_file
2211            //         - path_symlink (-> real_file)
2212            //     + second_subdir/
2213            //         - node_symlink (-> path_symlink)
2214            let real_file_node = first_subdir
2215                .create_node(
2216                    locked,
2217                    &current_task,
2218                    "real_file".into(),
2219                    mode!(IFREG, 0o777),
2220                    DeviceType::NONE,
2221                )
2222                .expect("failed to create real_file");
2223            first_subdir
2224                .create_symlink(locked, &current_task, "path_symlink".into(), "real_file".into())
2225                .expect("failed to create path_symlink");
2226
2227            let mut no_follow_lookup_context = LookupContext::new(SymlinkMode::NoFollow);
2228            let path_symlink_node = first_subdir
2229                .lookup_child(
2230                    locked,
2231                    &current_task,
2232                    &mut no_follow_lookup_context,
2233                    "path_symlink".into(),
2234                )
2235                .expect("Failed to lookup path_symlink");
2236
2237            // The second symlink needs to be of type SymlinkTarget::Node in order to trip the sensitive
2238            // code path. There's no easy method for creating this type of symlink target, so we'll need
2239            // to construct a node from scratch and insert it into the directory manually.
2240            let node_symlink_node = second_subdir.entry.node.fs().create_node_and_allocate_node_id(
2241                CallbackSymlinkNode::new(move || {
2242                    let node = path_symlink_node.clone();
2243                    Ok(SymlinkTarget::Node(node))
2244                }),
2245                FsNodeInfo::new(mode!(IFLNK, 0o777), current_task.current_fscred()),
2246            );
2247            second_subdir
2248                .entry
2249                .create_entry(
2250                    locked,
2251                    &current_task,
2252                    &MountInfo::detached(),
2253                    "node_symlink".into(),
2254                    move |_locked, _dir, _mount, _name| Ok(node_symlink_node),
2255                )
2256                .expect("failed to create node_symlink entry");
2257
2258            // Finally, exercise the lookup under test.
2259            let mut follow_lookup_context = LookupContext::new(SymlinkMode::Follow);
2260            let node_symlink_resolution = second_subdir
2261                .lookup_child(
2262                    locked,
2263                    &current_task,
2264                    &mut follow_lookup_context,
2265                    "node_symlink".into(),
2266                )
2267                .expect("lookup with symlink chain failed");
2268
2269            // The lookup resolution should have correctly followed the symlinks to the real_file node.
2270            assert!(node_symlink_resolution.entry.node.ino == real_file_node.entry.node.ino);
2271        })
2272        .await;
2273    }
2274}