Skip to main content

starnix_modules_layeredfs/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#![recursion_limit = "512"]
6
7use starnix_core::task::{CurrentTask, Kernel};
8use starnix_core::vfs::{
9    CacheMode, DirectoryEntryType, DirentSink, FileHandle, FileObject, FileOps, FileSystem,
10    FileSystemHandle, FileSystemOps, FsNode, FsNodeHandle, FsNodeOps, FsStr, FsString, MountInfo,
11    SeekTarget, ValueOrSize, WhatToMount, XattrOp, fileops_impl_directory, fileops_impl_noop_sync,
12    fs_node_impl_dir_readonly, unbounded_seek,
13};
14use starnix_sync::{FileOpsCore, LockEqualOrBefore, Locked, Unlocked};
15use starnix_uapi::errors::Errno;
16use starnix_uapi::mount_flags::{FileSystemFlags, MountpointFlags};
17use starnix_uapi::open_flags::OpenFlags;
18use starnix_uapi::{errno, ino_t, off_t, statfs};
19use std::collections::BTreeMap;
20use std::sync::Arc;
21
22struct LayeredMountAction {
23    path: FsString,
24    fs: FileSystemHandle,
25}
26
27/// A callback used to complete the initialization of a `LayeredFs`.
28///
29/// After the `FileSystem` has been created by [`LayeredFsBuilder::build`], this closure
30/// must be invoked to create the sub-mounts that layer the additional filesystems
31/// at their specified paths.
32pub type LayeredFsMounts =
33    Box<dyn FnOnce(&mut Locked<Unlocked>, &CurrentTask) -> Result<(), Errno>>;
34
35/// `FileSystem` builder that allows a set of auxiliary `FileSystem`s to be mounted at specified
36/// paths relative to the base filesystem, regardless of whether the base filesystem has directories
37/// at those paths, that may be mounted-onto.
38///
39/// Auxiliary `FileSystem`s and their mount paths are provided via calls to `add()`, and the layered
40/// filesystem created using `build()`.
41pub struct LayeredFsBuilder {
42    fs: FileSystemHandle,
43    subdirs: BTreeMap<FsString, LayeredFsBuilder>,
44}
45
46fn split_path(path: &FsStr) -> Vec<&FsStr> {
47    path.split(|c| *c == b'/').map(<&FsStr>::from).collect()
48}
49
50impl LayeredFsBuilder {
51    /// Returns a `LayeredFsBuilder` with `root_fs` as the underlying base filesystem.
52    pub fn new(root_fs: FileSystemHandle) -> Self {
53        Self { fs: root_fs, subdirs: Default::default() }
54    }
55
56    /// Specifies that filesystem `fs` should be mounted at the specified `path` relative to the
57    /// base filesystem.
58    ///
59    /// `path` must specify an absolute path under the base filesystem (i.e. starting with "/").
60    /// If `path` has multiple components then intermediate components must already have been
61    /// added to the builder.
62    pub fn add(&mut self, path: &str, fs: FileSystemHandle) {
63        let path = FsStr::new(path);
64        assert_eq!(path[0], b'/');
65        let parts = split_path(&path[1..]);
66        assert!(!parts.is_empty());
67        let final_part = parts.len() - 1;
68
69        let mut parent = self;
70        for i in 0..final_part {
71            parent = parent.subdirs.get_mut(parts[i]).unwrap();
72        }
73
74        parent.subdirs.insert(parts[parts.len() - 1].into(), Self::new(fs));
75    }
76
77    /// Returns the new `FileSystem` handle, and a finalization callback that must be invoked to
78    /// set up the subordinate mount points.
79    ///
80    /// The underlying base `FileSystem` will be returned directly if no sub-mounts were specified
81    /// via `add()`. Otherwise a `LayeredFs` instance will be returned, to provide stub directory
82    /// entries for the sub-mounts to be mounted onto.
83    pub fn build<L>(
84        self,
85        locked: &mut Locked<L>,
86        kernel: &Kernel,
87    ) -> (FileSystemHandle, LayeredFsMounts)
88    where
89        L: LockEqualOrBefore<FileOpsCore>,
90    {
91        let (fs, actions) = self.build_internal(locked, kernel, Default::default());
92        let cb = Box::new(move |locked: &mut Locked<Unlocked>, current_task: &CurrentTask| {
93            for action in actions {
94                let mount_point = current_task
95                    .lookup_path_from_root(locked, action.path.as_ref())
96                    .map_err(|e| {
97                        Errno::with_context(
98                            e.code,
99                            format!("lookup path from root: {}", action.path),
100                        )
101                    })?;
102                mount_point.mount(WhatToMount::Fs(action.fs), MountpointFlags::empty()).map_err(
103                    |e| {
104                        Errno::with_context(e.code, format!("mount layered fs at: {}", action.path))
105                    },
106                )?;
107            }
108            Ok(())
109        });
110        (fs, cb)
111    }
112
113    fn build_internal<L>(
114        self,
115        locked: &mut Locked<L>,
116        kernel: &Kernel,
117        prefix: &FsStr,
118    ) -> (FileSystemHandle, Vec<LayeredMountAction>)
119    where
120        L: LockEqualOrBefore<FileOpsCore>,
121    {
122        if self.subdirs.is_empty() {
123            return (self.fs, Vec::new());
124        }
125
126        let names =
127            self.subdirs.iter().map(|(name, entry)| (name.clone(), entry.fs.clone())).collect();
128        let fs = LayeredFs::new_fs(locked, kernel, self.fs, names);
129
130        let mut mount_actions = Vec::new();
131        for (subpath, builder) in self.subdirs {
132            let path = FsString::from(format!("{}/{}", prefix, subpath));
133            let (fs, subdir_actions) = builder.build_internal(locked, kernel, path.as_ref());
134            mount_actions.push(LayeredMountAction { path, fs });
135            mount_actions.extend(subdir_actions.into_iter());
136        }
137
138        (fs, mount_actions)
139    }
140}
141
142/// A filesystem that will delegate most operation to a base one, but have a number of top level
143/// directory that points to other filesystems.
144struct LayeredFs {
145    base_fs: FileSystemHandle,
146    mappings: BTreeMap<FsString, FileSystemHandle>,
147}
148
149impl LayeredFs {
150    /// Build a new filesystem.
151    ///
152    /// `base_fs`: The base file system that this file system will delegate to.
153    /// `mappings`: The map of top level directory to filesystems that will be layered on top of
154    /// `base_fs`.
155    fn new_fs<L>(
156        locked: &mut Locked<L>,
157        kernel: &Kernel,
158        base_fs: FileSystemHandle,
159        mappings: BTreeMap<FsString, FileSystemHandle>,
160    ) -> FileSystemHandle
161    where
162        L: LockEqualOrBefore<FileOpsCore>,
163    {
164        let options = base_fs.options.clone();
165        let layered_fs = Arc::new(LayeredFs { base_fs, mappings });
166        let fs = FileSystem::new(
167            locked,
168            kernel,
169            CacheMode::Uncached,
170            LayeredFileSystemOps { fs: layered_fs.clone() },
171            options,
172        )
173        .expect("layeredfs constructed with valid options");
174        let root_ino = fs.allocate_ino();
175        fs.create_root(root_ino, LayeredNodeOps { fs: layered_fs });
176        fs
177    }
178}
179
180struct LayeredFileSystemOps {
181    fs: Arc<LayeredFs>,
182}
183
184impl FileSystemOps for LayeredFileSystemOps {
185    fn statfs(
186        &self,
187        locked: &mut Locked<FileOpsCore>,
188        _fs: &FileSystem,
189        current_task: &CurrentTask,
190    ) -> Result<statfs, Errno> {
191        self.fs.base_fs.statfs(locked, current_task)
192    }
193    fn name(&self) -> &'static FsStr {
194        self.fs.base_fs.name()
195    }
196    fn is_readonly(&self) -> bool {
197        self.fs.base_fs.options.flags & FileSystemFlags::RDONLY == FileSystemFlags::RDONLY
198    }
199}
200
201struct LayeredNodeOps {
202    fs: Arc<LayeredFs>,
203}
204
205impl FsNodeOps for LayeredNodeOps {
206    fs_node_impl_dir_readonly!();
207
208    fn create_file_ops(
209        &self,
210        locked: &mut Locked<FileOpsCore>,
211        _node: &FsNode,
212        current_task: &CurrentTask,
213        flags: OpenFlags,
214    ) -> Result<Box<dyn FileOps>, Errno> {
215        Ok(Box::new(LayeredFileOps {
216            fs: self.fs.clone(),
217            root_file: self.fs.base_fs.root().open_anonymous(locked, current_task, flags)?,
218        }))
219    }
220
221    fn lookup(
222        &self,
223        locked: &mut Locked<FileOpsCore>,
224        _node: &FsNode,
225        current_task: &CurrentTask,
226        name: &FsStr,
227    ) -> Result<FsNodeHandle, Errno> {
228        if let Some(fs) = self.fs.mappings.get(name) {
229            Ok(fs.root().node.clone())
230        } else {
231            self.fs.base_fs.root().node.lookup(locked, current_task, &MountInfo::detached(), name)
232        }
233    }
234
235    fn get_xattr(
236        &self,
237        locked: &mut Locked<FileOpsCore>,
238        _node: &FsNode,
239        current_task: &CurrentTask,
240        name: &FsStr,
241        max_size: usize,
242    ) -> Result<ValueOrSize<FsString>, Errno> {
243        self.fs.base_fs.root().node.ops().get_xattr(
244            locked,
245            &*self.fs.base_fs.root().node,
246            current_task,
247            name,
248            max_size,
249        )
250    }
251
252    /// Set an extended attribute on the node.
253    fn set_xattr(
254        &self,
255        locked: &mut Locked<FileOpsCore>,
256        _node: &FsNode,
257        current_task: &CurrentTask,
258        name: &FsStr,
259        value: &FsStr,
260        op: XattrOp,
261    ) -> Result<(), Errno> {
262        self.fs.base_fs.root().node.set_xattr(
263            locked,
264            current_task,
265            &MountInfo::detached(),
266            name,
267            value,
268            op,
269        )
270    }
271
272    fn remove_xattr(
273        &self,
274        locked: &mut Locked<FileOpsCore>,
275        _node: &FsNode,
276        current_task: &CurrentTask,
277        name: &FsStr,
278    ) -> Result<(), Errno> {
279        self.fs.base_fs.root().node.remove_xattr(locked, current_task, &MountInfo::detached(), name)
280    }
281
282    fn list_xattrs(
283        &self,
284        locked: &mut Locked<FileOpsCore>,
285        _node: &FsNode,
286        current_task: &CurrentTask,
287        max_size: usize,
288    ) -> Result<ValueOrSize<Vec<FsString>>, Errno> {
289        self.fs.base_fs.root().node.list_xattrs(locked, current_task, max_size)
290    }
291}
292
293struct LayeredFileOps {
294    fs: Arc<LayeredFs>,
295    root_file: FileHandle,
296}
297
298impl FileOps for LayeredFileOps {
299    fileops_impl_directory!();
300    fileops_impl_noop_sync!();
301
302    fn seek(
303        &self,
304        locked: &mut Locked<FileOpsCore>,
305        _file: &FileObject,
306        current_task: &CurrentTask,
307        current_offset: off_t,
308        target: SeekTarget,
309    ) -> Result<off_t, Errno> {
310        let mut new_offset = unbounded_seek(current_offset, target)?;
311        if new_offset >= self.fs.mappings.len() as off_t {
312            new_offset = self
313                .root_file
314                .seek(
315                    locked,
316                    current_task,
317                    SeekTarget::Set(new_offset - self.fs.mappings.len() as off_t),
318                )?
319                .checked_add(self.fs.mappings.len() as off_t)
320                .ok_or_else(|| errno!(EINVAL))?;
321        }
322        Ok(new_offset)
323    }
324
325    fn readdir(
326        &self,
327        locked: &mut Locked<FileOpsCore>,
328        _file: &FileObject,
329        current_task: &CurrentTask,
330        sink: &mut dyn DirentSink,
331    ) -> Result<(), Errno> {
332        for (key, fs) in self.fs.mappings.iter().skip(sink.offset() as usize) {
333            sink.add(fs.root().node.ino, sink.offset() + 1, DirectoryEntryType::DIR, key.as_ref())?;
334        }
335
336        struct DirentSinkWrapper<'a> {
337            sink: &'a mut dyn DirentSink,
338            mappings: &'a BTreeMap<FsString, FileSystemHandle>,
339            offset: &'a mut off_t,
340        }
341
342        impl<'a> DirentSink for DirentSinkWrapper<'a> {
343            fn add(
344                &mut self,
345                inode_num: ino_t,
346                offset: off_t,
347                entry_type: DirectoryEntryType,
348                name: &FsStr,
349            ) -> Result<(), Errno> {
350                if !self.mappings.contains_key(name) {
351                    self.sink.add(
352                        inode_num,
353                        offset + (self.mappings.len() as off_t),
354                        entry_type,
355                        name,
356                    )?;
357                }
358                *self.offset = offset;
359                Ok(())
360            }
361            fn offset(&self) -> off_t {
362                *self.offset
363            }
364        }
365
366        let mut root_file_offset = self.root_file.offset.lock();
367        let mut wrapper =
368            DirentSinkWrapper { sink, mappings: &self.fs.mappings, offset: &mut root_file_offset };
369
370        self.root_file.readdir(locked, current_task, &mut wrapper)
371    }
372}
373
374#[cfg(test)]
375mod test {
376    use super::*;
377    use starnix_core::fs::tmpfs::TmpFs;
378    use starnix_core::testing::*;
379    use starnix_sync::Unlocked;
380
381    fn get_root_entry_names(
382        locked: &mut Locked<Unlocked>,
383        current_task: &CurrentTask,
384        fs: &FileSystem,
385    ) -> Vec<Vec<u8>> {
386        struct DirentNameCapturer {
387            pub names: Vec<Vec<u8>>,
388            offset: off_t,
389        }
390        impl DirentSink for DirentNameCapturer {
391            fn add(
392                &mut self,
393                _inode_num: ino_t,
394                offset: off_t,
395                _entry_type: DirectoryEntryType,
396                name: &FsStr,
397            ) -> Result<(), Errno> {
398                self.names.push(name.to_vec());
399                self.offset = offset;
400                Ok(())
401            }
402            fn offset(&self) -> off_t {
403                self.offset
404            }
405        }
406        let mut sink = DirentNameCapturer { names: vec![], offset: 0 };
407        fs.root()
408            .open_anonymous(locked, current_task, OpenFlags::RDONLY)
409            .expect("open")
410            .readdir(locked, current_task, &mut sink)
411            .expect("readdir");
412        std::mem::take(&mut sink.names)
413    }
414
415    #[::fuchsia::test]
416    async fn test_remove_duplicates() {
417        #[allow(deprecated, reason = "pre-existing usage")]
418        let (kernel, current_task, locked) = create_kernel_task_and_unlocked();
419        let base = TmpFs::new_fs(locked, &kernel);
420        base.root().create_dir_for_testing(locked, &current_task, "d1".into()).expect("create_dir");
421        base.root().create_dir_for_testing(locked, &current_task, "d2".into()).expect("create_dir");
422        let base_entries = get_root_entry_names(locked, &current_task, &base);
423        assert_eq!(base_entries.len(), 4);
424        assert!(base_entries.contains(&b".".to_vec()));
425        assert!(base_entries.contains(&b"..".to_vec()));
426        assert!(base_entries.contains(&b"d1".to_vec()));
427        assert!(base_entries.contains(&b"d2".to_vec()));
428
429        let tmpfs1 = TmpFs::new_fs(locked, &kernel);
430        let tmpfs2 = TmpFs::new_fs(locked, &kernel);
431        let layered_fs = LayeredFs::new_fs(
432            locked,
433            &kernel,
434            base,
435            BTreeMap::from([("d1".into(), tmpfs1), ("d3".into(), tmpfs2)]),
436        );
437        let layered_fs_entries = get_root_entry_names(locked, &current_task, &layered_fs);
438        assert_eq!(layered_fs_entries.len(), 5);
439        assert!(layered_fs_entries.contains(&b".".to_vec()));
440        assert!(layered_fs_entries.contains(&b"..".to_vec()));
441        assert!(layered_fs_entries.contains(&b"d1".to_vec()));
442        assert!(layered_fs_entries.contains(&b"d2".to_vec()));
443        assert!(layered_fs_entries.contains(&b"d3".to_vec()));
444    }
445}