Skip to main content

starnix_modules_layeredfs/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#![recursion_limit = "512"]
6
7use starnix_core::task::{CurrentTask, Kernel};
8use starnix_core::vfs::{
9    CacheMode, DirectoryEntryType, DirentSink, FileHandle, FileObject, FileOps, FileSystem,
10    FileSystemHandle, FileSystemOps, FsNode, FsNodeHandle, FsNodeOps, FsStr, FsString, MountInfo,
11    SeekTarget, ValueOrSize, XattrOp, fileops_impl_directory, fileops_impl_noop_sync,
12    fs_node_impl_dir_readonly, unbounded_seek,
13};
14use starnix_sync::{FileOpsCore, LockEqualOrBefore, Locked};
15use starnix_uapi::errors::Errno;
16use starnix_uapi::open_flags::OpenFlags;
17use starnix_uapi::{errno, ino_t, off_t, statfs};
18use std::collections::BTreeMap;
19use std::sync::Arc;
20
21/// A filesystem that will delegate most operation to a base one, but have a number of top level
22/// directory that points to other filesystems.
23pub struct LayeredFs {
24    base_fs: FileSystemHandle,
25    mappings: BTreeMap<FsString, FileSystemHandle>,
26}
27
28impl LayeredFs {
29    /// Build a new filesystem.
30    ///
31    /// `base_fs`: The base file system that this file system will delegate to.
32    /// `mappings`: The map of top level directory to filesystems that will be layered on top of
33    /// `base_fs`.
34    pub fn new_fs<L>(
35        locked: &mut Locked<L>,
36        kernel: &Kernel,
37        base_fs: FileSystemHandle,
38        mappings: BTreeMap<FsString, FileSystemHandle>,
39    ) -> FileSystemHandle
40    where
41        L: LockEqualOrBefore<FileOpsCore>,
42    {
43        let options = base_fs.options.clone();
44        let layered_fs = Arc::new(LayeredFs { base_fs, mappings });
45        let fs = FileSystem::new(
46            locked,
47            kernel,
48            CacheMode::Uncached,
49            LayeredFileSystemOps { fs: layered_fs.clone() },
50            options,
51        )
52        .expect("layeredfs constructed with valid options");
53        let root_ino = fs.allocate_ino();
54        fs.create_root(root_ino, LayeredNodeOps { fs: layered_fs });
55        fs
56    }
57}
58
59struct LayeredFileSystemOps {
60    fs: Arc<LayeredFs>,
61}
62
63impl FileSystemOps for LayeredFileSystemOps {
64    fn statfs(
65        &self,
66        locked: &mut Locked<FileOpsCore>,
67        _fs: &FileSystem,
68        current_task: &CurrentTask,
69    ) -> Result<statfs, Errno> {
70        self.fs.base_fs.statfs(locked, current_task)
71    }
72    fn name(&self) -> &'static FsStr {
73        self.fs.base_fs.name()
74    }
75}
76
77struct LayeredNodeOps {
78    fs: Arc<LayeredFs>,
79}
80
81impl FsNodeOps for LayeredNodeOps {
82    fs_node_impl_dir_readonly!();
83
84    fn create_file_ops(
85        &self,
86        locked: &mut Locked<FileOpsCore>,
87        _node: &FsNode,
88        current_task: &CurrentTask,
89        flags: OpenFlags,
90    ) -> Result<Box<dyn FileOps>, Errno> {
91        Ok(Box::new(LayeredFileOps {
92            fs: self.fs.clone(),
93            root_file: self.fs.base_fs.root().open_anonymous(locked, current_task, flags)?,
94        }))
95    }
96
97    fn lookup(
98        &self,
99        locked: &mut Locked<FileOpsCore>,
100        _node: &FsNode,
101        current_task: &CurrentTask,
102        name: &FsStr,
103    ) -> Result<FsNodeHandle, Errno> {
104        if let Some(fs) = self.fs.mappings.get(name) {
105            Ok(fs.root().node.clone())
106        } else {
107            self.fs.base_fs.root().node.lookup(locked, current_task, &MountInfo::detached(), name)
108        }
109    }
110
111    fn get_xattr(
112        &self,
113        locked: &mut Locked<FileOpsCore>,
114        _node: &FsNode,
115        current_task: &CurrentTask,
116        name: &FsStr,
117        max_size: usize,
118    ) -> Result<ValueOrSize<FsString>, Errno> {
119        self.fs.base_fs.root().node.ops().get_xattr(
120            locked,
121            &*self.fs.base_fs.root().node,
122            current_task,
123            name,
124            max_size,
125        )
126    }
127
128    /// Set an extended attribute on the node.
129    fn set_xattr(
130        &self,
131        locked: &mut Locked<FileOpsCore>,
132        _node: &FsNode,
133        current_task: &CurrentTask,
134        name: &FsStr,
135        value: &FsStr,
136        op: XattrOp,
137    ) -> Result<(), Errno> {
138        self.fs.base_fs.root().node.set_xattr(
139            locked,
140            current_task,
141            &MountInfo::detached(),
142            name,
143            value,
144            op,
145        )
146    }
147
148    fn remove_xattr(
149        &self,
150        locked: &mut Locked<FileOpsCore>,
151        _node: &FsNode,
152        current_task: &CurrentTask,
153        name: &FsStr,
154    ) -> Result<(), Errno> {
155        self.fs.base_fs.root().node.remove_xattr(locked, current_task, &MountInfo::detached(), name)
156    }
157
158    fn list_xattrs(
159        &self,
160        locked: &mut Locked<FileOpsCore>,
161        _node: &FsNode,
162        current_task: &CurrentTask,
163        max_size: usize,
164    ) -> Result<ValueOrSize<Vec<FsString>>, Errno> {
165        self.fs.base_fs.root().node.list_xattrs(locked, current_task, max_size)
166    }
167}
168
169struct LayeredFileOps {
170    fs: Arc<LayeredFs>,
171    root_file: FileHandle,
172}
173
174impl FileOps for LayeredFileOps {
175    fileops_impl_directory!();
176    fileops_impl_noop_sync!();
177
178    fn seek(
179        &self,
180        locked: &mut Locked<FileOpsCore>,
181        _file: &FileObject,
182        current_task: &CurrentTask,
183        current_offset: off_t,
184        target: SeekTarget,
185    ) -> Result<off_t, Errno> {
186        let mut new_offset = unbounded_seek(current_offset, target)?;
187        if new_offset >= self.fs.mappings.len() as off_t {
188            new_offset = self
189                .root_file
190                .seek(
191                    locked,
192                    current_task,
193                    SeekTarget::Set(new_offset - self.fs.mappings.len() as off_t),
194                )?
195                .checked_add(self.fs.mappings.len() as off_t)
196                .ok_or_else(|| errno!(EINVAL))?;
197        }
198        Ok(new_offset)
199    }
200
201    fn readdir(
202        &self,
203        locked: &mut Locked<FileOpsCore>,
204        _file: &FileObject,
205        current_task: &CurrentTask,
206        sink: &mut dyn DirentSink,
207    ) -> Result<(), Errno> {
208        for (key, fs) in self.fs.mappings.iter().skip(sink.offset() as usize) {
209            sink.add(fs.root().node.ino, sink.offset() + 1, DirectoryEntryType::DIR, key.as_ref())?;
210        }
211
212        struct DirentSinkWrapper<'a> {
213            sink: &'a mut dyn DirentSink,
214            mappings: &'a BTreeMap<FsString, FileSystemHandle>,
215            offset: &'a mut off_t,
216        }
217
218        impl<'a> DirentSink for DirentSinkWrapper<'a> {
219            fn add(
220                &mut self,
221                inode_num: ino_t,
222                offset: off_t,
223                entry_type: DirectoryEntryType,
224                name: &FsStr,
225            ) -> Result<(), Errno> {
226                if !self.mappings.contains_key(name) {
227                    self.sink.add(
228                        inode_num,
229                        offset + (self.mappings.len() as off_t),
230                        entry_type,
231                        name,
232                    )?;
233                }
234                *self.offset = offset;
235                Ok(())
236            }
237            fn offset(&self) -> off_t {
238                *self.offset
239            }
240        }
241
242        let mut root_file_offset = self.root_file.offset.lock();
243        let mut wrapper =
244            DirentSinkWrapper { sink, mappings: &self.fs.mappings, offset: &mut root_file_offset };
245
246        self.root_file.readdir(locked, current_task, &mut wrapper)
247    }
248}
249
250#[cfg(test)]
251mod test {
252    use super::*;
253    use starnix_core::fs::tmpfs::TmpFs;
254    use starnix_core::testing::*;
255    use starnix_sync::Unlocked;
256
257    fn get_root_entry_names(
258        locked: &mut Locked<Unlocked>,
259        current_task: &CurrentTask,
260        fs: &FileSystem,
261    ) -> Vec<Vec<u8>> {
262        struct DirentNameCapturer {
263            pub names: Vec<Vec<u8>>,
264            offset: off_t,
265        }
266        impl DirentSink for DirentNameCapturer {
267            fn add(
268                &mut self,
269                _inode_num: ino_t,
270                offset: off_t,
271                _entry_type: DirectoryEntryType,
272                name: &FsStr,
273            ) -> Result<(), Errno> {
274                self.names.push(name.to_vec());
275                self.offset = offset;
276                Ok(())
277            }
278            fn offset(&self) -> off_t {
279                self.offset
280            }
281        }
282        let mut sink = DirentNameCapturer { names: vec![], offset: 0 };
283        fs.root()
284            .open_anonymous(locked, current_task, OpenFlags::RDONLY)
285            .expect("open")
286            .readdir(locked, current_task, &mut sink)
287            .expect("readdir");
288        std::mem::take(&mut sink.names)
289    }
290
291    #[::fuchsia::test]
292    async fn test_remove_duplicates() {
293        #[allow(deprecated, reason = "pre-existing usage")]
294        let (kernel, current_task, locked) = create_kernel_task_and_unlocked();
295        let base = TmpFs::new_fs(locked, &kernel);
296        base.root().create_dir_for_testing(locked, &current_task, "d1".into()).expect("create_dir");
297        base.root().create_dir_for_testing(locked, &current_task, "d2".into()).expect("create_dir");
298        let base_entries = get_root_entry_names(locked, &current_task, &base);
299        assert_eq!(base_entries.len(), 4);
300        assert!(base_entries.contains(&b".".to_vec()));
301        assert!(base_entries.contains(&b"..".to_vec()));
302        assert!(base_entries.contains(&b"d1".to_vec()));
303        assert!(base_entries.contains(&b"d2".to_vec()));
304
305        let tmpfs1 = TmpFs::new_fs(locked, &kernel);
306        let tmpfs2 = TmpFs::new_fs(locked, &kernel);
307        let layered_fs = LayeredFs::new_fs(
308            locked,
309            &kernel,
310            base,
311            BTreeMap::from([("d1".into(), tmpfs1), ("d3".into(), tmpfs2)]),
312        );
313        let layered_fs_entries = get_root_entry_names(locked, &current_task, &layered_fs);
314        assert_eq!(layered_fs_entries.len(), 5);
315        assert!(layered_fs_entries.contains(&b".".to_vec()));
316        assert!(layered_fs_entries.contains(&b"..".to_vec()));
317        assert!(layered_fs_entries.contains(&b"d1".to_vec()));
318        assert!(layered_fs_entries.contains(&b"d2".to_vec()));
319        assert!(layered_fs_entries.contains(&b"d3".to_vec()));
320    }
321}