Skip to main content

starnix_core/fs/fuchsia/
remote.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fs::fuchsia::RemoteUnixDomainSocket;
6use crate::fs::fuchsia::remote_volume::RemoteVolume;
7use crate::fs::fuchsia::sync_file::{SyncFence, SyncFile, SyncPoint, Timeline};
8use crate::mm::memory::MemoryObject;
9use crate::mm::{ProtectionFlags, VMEX_RESOURCE};
10use crate::security;
11use crate::task::{CurrentTask, Kernel};
12use crate::vfs::buffers::{InputBuffer, OutputBuffer, with_iovec_segments};
13use crate::vfs::file_server::serve_file_tagged;
14use crate::vfs::fsverity::FsVerityState;
15use crate::vfs::socket::{Socket, SocketFile, ZxioBackedSocket};
16use crate::vfs::{
17    Anon, AppendLockGuard, CacheMode, DEFAULT_BYTES_PER_BLOCK, DirectoryEntryType, DirentSink,
18    FallocMode, FileHandle, FileObject, FileOps, FileSystem, FileSystemHandle, FileSystemOps,
19    FileSystemOptions, FsNode, FsNodeHandle, FsNodeInfo, FsNodeOps, FsStr, FsString, SeekTarget,
20    SymlinkTarget, XattrOp, XattrStorage, default_ioctl, default_seek, fileops_impl_directory,
21    fileops_impl_nonseekable, fileops_impl_noop_sync, fileops_impl_seekable, fs_node_impl_not_dir,
22    fs_node_impl_symlink, fs_node_impl_xattr_delegate,
23};
24use bstr::ByteSlice;
25use fidl::endpoints::DiscoverableProtocolMarker as _;
26use fuchsia_runtime::UtcInstant;
27use linux_uapi::SYNC_IOC_MAGIC;
28use once_cell::sync::OnceCell;
29use smallvec::{SmallVec, smallvec};
30use starnix_crypt::EncryptionKeyId;
31use starnix_logging::{CATEGORY_STARNIX_MM, impossible_error, log_warn, trace_duration};
32use starnix_sync::{
33    FileOpsCore, LockEqualOrBefore, Locked, RwLock, RwLockReadGuard, RwLockWriteGuard, Unlocked,
34};
35use starnix_syscalls::{SyscallArg, SyscallResult};
36use starnix_types::vfs::default_statfs;
37use starnix_uapi::auth::{Credentials, FsCred};
38use starnix_uapi::device_type::DeviceType;
39use starnix_uapi::errors::Errno;
40use starnix_uapi::file_mode::FileMode;
41use starnix_uapi::mount_flags::MountFlags;
42use starnix_uapi::open_flags::OpenFlags;
43use starnix_uapi::{
44    __kernel_fsid_t, errno, error, from_status_like_fdio, fsverity_descriptor, mode, off_t, statfs,
45};
46use std::ops::ControlFlow;
47use std::sync::Arc;
48use std::sync::atomic::{AtomicU32, Ordering};
49use sync_io_client::{RemoteIo, create_with_on_representation};
50use syncio::zxio::{
51    ZXIO_NODE_PROTOCOL_DIRECTORY, ZXIO_NODE_PROTOCOL_FILE, ZXIO_NODE_PROTOCOL_SYMLINK,
52    ZXIO_OBJECT_TYPE_DATAGRAM_SOCKET, ZXIO_OBJECT_TYPE_NONE, ZXIO_OBJECT_TYPE_PACKET_SOCKET,
53    ZXIO_OBJECT_TYPE_RAW_SOCKET, ZXIO_OBJECT_TYPE_STREAM_SOCKET,
54    ZXIO_OBJECT_TYPE_SYNCHRONOUS_DATAGRAM_SOCKET, zxio_node_attr,
55};
56use syncio::{
57    AllocateMode, XattrSetMode, Zxio, zxio_fsverity_descriptor_t, zxio_node_attr_has_t,
58    zxio_node_attributes_t,
59};
60use zx::{Counter, HandleBased as _};
61use {
62    fidl_fuchsia_io as fio, fidl_fuchsia_starnix_binder as fbinder,
63    fidl_fuchsia_unknown as funknown,
64};
65
66fn is_special(file_info: &fio::FileInfo) -> bool {
67    matches!(
68        file_info,
69        fio::FileInfo {
70            attributes:
71                Some(fio::NodeAttributes2 {
72                    mutable_attributes: fio::MutableNodeAttributes { mode: Some(mode), .. },
73                    ..
74                }),
75            ..
76        } if {
77            let mode = FileMode::from_bits(*mode);
78            mode.is_chr() || mode.is_blk() || mode.is_fifo() || mode.is_sock()
79        }
80    )
81}
82
83pub fn new_remote_fs(
84    locked: &mut Locked<Unlocked>,
85    current_task: &CurrentTask,
86    options: FileSystemOptions,
87) -> Result<FileSystemHandle, Errno> {
88    let kernel = current_task.kernel();
89    let requested_path = std::str::from_utf8(&options.source)
90        .map_err(|_| errno!(EINVAL, "source path is not utf8"))?;
91    let mut create_flags =
92        fio::PERM_READABLE | fio::Flags::FLAG_MAYBE_CREATE | fio::Flags::PROTOCOL_DIRECTORY;
93    if !options.flags.contains(MountFlags::RDONLY) {
94        create_flags |= fio::PERM_WRITABLE;
95    }
96    let (root_proxy, subdir) = kernel.open_ns_dir(requested_path, create_flags)?;
97
98    let subdir = if subdir.is_empty() { ".".to_string() } else { subdir };
99    let mut open_rights = fio::PERM_READABLE;
100    if !options.flags.contains(MountFlags::RDONLY) {
101        open_rights |= fio::PERM_WRITABLE;
102    }
103    let mut subdir_options = options;
104    subdir_options.source = subdir.into();
105    new_remotefs_in_root(locked, kernel, &root_proxy, subdir_options, open_rights)
106}
107
108/// Create a filesystem to access the content of the fuchsia directory available
109/// at `options.source` inside `root`.
110pub fn new_remotefs_in_root<L>(
111    locked: &mut Locked<L>,
112    kernel: &Kernel,
113    root: &fio::DirectorySynchronousProxy,
114    options: FileSystemOptions,
115    rights: fio::Flags,
116) -> Result<FileSystemHandle, Errno>
117where
118    L: LockEqualOrBefore<FileOpsCore>,
119{
120    let root = syncio::directory_open_directory_async(
121        root,
122        std::str::from_utf8(&options.source)
123            .map_err(|_| errno!(EINVAL, "source path is not utf8"))?,
124        rights,
125    )
126    .map_err(|e| errno!(EIO, format!("Failed to open root: {e}")))?;
127    RemoteFs::new_fs(locked, kernel, root.into_channel(), options, rights)
128}
129
130pub struct RemoteFs {
131    // If true, trust the remote file system's IDs (which requires that the remote file system does
132    // not span mounts).  This must be true to properly support hard links.  If this is false, the
133    // same node can end up having different IDs as it leaves and reenters the node cache.
134    // TODO(https://fxbug.dev/42081972): At the time of writing, package directories do not have
135    // unique IDs so this *must* be false in that case.
136    use_remote_ids: bool,
137
138    root_proxy: fio::DirectorySynchronousProxy,
139
140    // The rights used for the root node.
141    root_rights: fio::Flags,
142}
143
144impl RemoteFs {
145    /// Returns a reference to a RemoteFs given a reference to a FileSystem.
146    ///
147    /// # Panics
148    ///
149    /// This will panic if `fs`'s ops aren't `RemoteFs`, so this should only be called when this is
150    /// known to be the case.
151    fn from_fs(fs: &FileSystem) -> &RemoteFs {
152        if let Some(remote_vol) = fs.downcast_ops::<RemoteVolume>() {
153            remote_vol.remotefs()
154        } else {
155            fs.downcast_ops::<RemoteFs>().unwrap()
156        }
157    }
158}
159
160const REMOTE_FS_MAGIC: u32 = u32::from_be_bytes(*b"f.io");
161const SYNC_IOC_FILE_INFO: u8 = 4;
162const SYNC_IOC_MERGE: u8 = 3;
163
164impl FileSystemOps for RemoteFs {
165    fn statfs(
166        &self,
167        _locked: &mut Locked<FileOpsCore>,
168        _fs: &FileSystem,
169        _current_task: &CurrentTask,
170    ) -> Result<statfs, Errno> {
171        let (status, info) = self
172            .root_proxy
173            .query_filesystem(zx::MonotonicInstant::INFINITE)
174            .map_err(|_| errno!(EIO))?;
175        // Not all remote filesystems support `QueryFilesystem`, many return ZX_ERR_NOT_SUPPORTED.
176        if status == 0 {
177            if let Some(info) = info {
178                let (total_blocks, free_blocks) = if info.block_size > 0 {
179                    (
180                        (info.total_bytes / u64::from(info.block_size))
181                            .try_into()
182                            .unwrap_or(i64::MAX),
183                        ((info.total_bytes.saturating_sub(info.used_bytes))
184                            / u64::from(info.block_size))
185                        .try_into()
186                        .unwrap_or(i64::MAX),
187                    )
188                } else {
189                    (0, 0)
190                };
191
192                let fsid = __kernel_fsid_t {
193                    val: [
194                        (info.fs_id & 0xffffffff) as i32,
195                        ((info.fs_id >> 32) & 0xffffffff) as i32,
196                    ],
197                };
198
199                return Ok(statfs {
200                    f_type: info.fs_type as i64,
201                    f_bsize: info.block_size.into(),
202                    f_blocks: total_blocks,
203                    f_bfree: free_blocks,
204                    f_bavail: free_blocks,
205                    f_files: info.total_nodes.try_into().unwrap_or(i64::MAX),
206                    f_ffree: (info.total_nodes.saturating_sub(info.used_nodes))
207                        .try_into()
208                        .unwrap_or(i64::MAX),
209                    f_fsid: fsid,
210                    f_namelen: info.max_filename_size.try_into().unwrap_or(0),
211                    f_frsize: info.block_size.into(),
212                    ..statfs::default()
213                });
214            }
215        }
216        Ok(default_statfs(REMOTE_FS_MAGIC))
217    }
218
219    fn name(&self) -> &'static FsStr {
220        "remotefs".into()
221    }
222
223    fn uses_external_node_ids(&self) -> bool {
224        self.use_remote_ids
225    }
226
227    fn rename(
228        &self,
229        _locked: &mut Locked<FileOpsCore>,
230        _fs: &FileSystem,
231        current_task: &CurrentTask,
232        old_parent: &FsNodeHandle,
233        old_name: &FsStr,
234        new_parent: &FsNodeHandle,
235        new_name: &FsStr,
236        renamed: &FsNodeHandle,
237        replaced: Option<&FsNodeHandle>,
238    ) -> Result<(), Errno> {
239        // Renames should fail if the src or target directory is encrypted and locked.
240        old_parent.fail_if_locked(current_task)?;
241        new_parent.fail_if_locked(current_task)?;
242
243        let Some((old_parent_ops, new_parent_ops)) =
244            old_parent.downcast_ops::<RemoteNode>().zip(new_parent.downcast_ops::<RemoteNode>())
245        else {
246            return error!(EXDEV);
247        };
248
249        let mut nodes: SmallVec<[&FsNode; 4]> =
250            smallvec![&***old_parent, &***new_parent, &***renamed];
251        if let Some(r) = replaced {
252            nodes.push(r);
253        }
254
255        will_dirty(&nodes, || {
256            old_parent_ops
257                .node
258                .io
259                .rename(get_name_str(old_name)?, &new_parent_ops.node.io, get_name_str(new_name)?)
260                .map_err(map_sync_io_client_error)
261        })
262    }
263
264    fn sync(
265        &self,
266        _locked: &mut Locked<FileOpsCore>,
267        _fs: &FileSystem,
268        _current_task: &CurrentTask,
269    ) -> Result<(), Errno> {
270        self.root_proxy
271            .sync(zx::MonotonicInstant::INFINITE)
272            .map_err(|_| errno!(EIO))?
273            .map_err(|status| map_sync_error(zx::Status::from_raw(status)))
274    }
275
276    fn manages_timestamps(&self) -> bool {
277        true
278    }
279}
280
281struct Factory {
282    assume_special: bool,
283    start_dirty: bool,
284}
285
286impl sync_io_client::Factory for Factory {
287    type Result = Box<dyn FsNodeOps>;
288
289    fn create_node(self, io: RemoteIo) -> Self::Result {
290        Box::new(RemoteNode::new(io, self.start_dirty))
291    }
292
293    fn create_directory(self, io: RemoteIo) -> Self::Result {
294        Box::new(RemoteNode::new(io, self.start_dirty))
295    }
296
297    fn create_file(self, io: RemoteIo, info: &fio::FileInfo) -> Self::Result {
298        if self.assume_special || is_special(info) {
299            Box::new(RemoteSpecialNode { node: BaseNode::new(io, self.start_dirty) })
300        } else {
301            Box::new(RemoteNode::new(io, self.start_dirty))
302        }
303    }
304
305    fn create_symlink(self, io: RemoteIo, target: Vec<u8>) -> Self::Result {
306        Box::new(RemoteSymlink::new(BaseNode::new(io, self.start_dirty), target))
307    }
308}
309
310impl RemoteFs {
311    pub(super) fn new(
312        root: zx::Channel,
313        root_rights: fio::Flags,
314    ) -> Result<(RemoteFs, Box<dyn FsNodeOps>, zxio_node_attributes_t, Option<[u8; 16]>), Errno>
315    {
316        let (client_end, server_end) = zx::Channel::create();
317        let root_proxy = fio::DirectorySynchronousProxy::new(root);
318        root_proxy
319            .open(
320                ".",
321                fio::Flags::PROTOCOL_DIRECTORY
322                    | fio::PERM_READABLE
323                    | fio::Flags::PERM_INHERIT_WRITE
324                    | fio::Flags::PERM_INHERIT_EXECUTE
325                    | fio::Flags::FLAG_SEND_REPRESENTATION,
326                &fio::Options {
327                    attributes: Some(
328                        fio::NodeAttributesQuery::ID | fio::NodeAttributesQuery::WRAPPING_KEY_ID,
329                    ),
330                    ..Default::default()
331                },
332                server_end,
333            )
334            .map_err(|_| errno!(EIO))?;
335
336        // Use remote IDs if the filesystem is Fxfs which we know will give us unique IDs.  Hard
337        // links need to resolve to the same underlying FsNode, so we can only support hard links if
338        // the remote file system will give us unique IDs.  The IDs are also used as the key in
339        // caches, so we can't use remote IDs if the remote filesystem is not guaranteed to provide
340        // unique IDs, or if the remote filesystem spans multiple filesystems.
341        let (status, info) =
342            root_proxy.query_filesystem(zx::MonotonicInstant::INFINITE).map_err(|_| errno!(EIO))?;
343
344        // Be tolerant of errors here; many filesystems return `ZX_ERR_NOT_SUPPORTED`.
345        let use_remote_ids = status == 0
346            && info
347                .map(|i| i.fs_type == fidl_fuchsia_fs::VfsType::Fxfs.into_primitive())
348                .unwrap_or(false);
349
350        // The OnRepresentation response will return an initial set of `attrs`, so create the node
351        // with `start_dirty=false`.
352        let (remote_node, attrs, _) = create_with_on_representation(
353            client_end.into(),
354            Factory { assume_special: false, start_dirty: false },
355        )
356        .map_err(map_sync_io_client_error)?;
357
358        Ok((
359            RemoteFs { use_remote_ids, root_proxy, root_rights },
360            remote_node,
361            attrs,
362            attrs.has.wrapping_key_id.then_some(attrs.wrapping_key_id),
363        ))
364    }
365
366    pub fn new_fs<L>(
367        locked: &mut Locked<L>,
368        kernel: &Kernel,
369        root: zx::Channel,
370        mut options: FileSystemOptions,
371        rights: fio::Flags,
372    ) -> Result<FileSystemHandle, Errno>
373    where
374        L: LockEqualOrBefore<FileOpsCore>,
375    {
376        let (remotefs, root_node, attrs, wrapping_key_id) = RemoteFs::new(root, rights)?;
377
378        if !rights.contains(fio::PERM_WRITABLE) {
379            options.flags |= MountFlags::RDONLY;
380        }
381        let use_remote_ids = remotefs.use_remote_ids;
382        let fs = FileSystem::new(
383            locked,
384            kernel,
385            CacheMode::Cached(kernel.fs_cache_config()),
386            remotefs,
387            options,
388        )?;
389
390        let mut info =
391            FsNodeInfo { wrapping_key_id, ..FsNodeInfo::new(mode!(IFDIR, 0o777), FsCred::root()) };
392        update_info_from_attrs(&mut info, &attrs);
393
394        if use_remote_ids {
395            fs.create_root_with_info(attrs.id, root_node, info);
396        } else {
397            let root_ino = fs.allocate_ino();
398            fs.create_root_with_info(root_ino, root_node, info);
399        }
400
401        Ok(fs)
402    }
403
404    pub(super) fn use_remote_ids(&self) -> bool {
405        self.use_remote_ids
406    }
407}
408
409/// All nodes compose `BaseNode`.
410///
411/// NOTE: If new node types are created, the `TryFrom` implementation needs updating below.
412struct BaseNode {
413    /// The underlying I/O object for this remote node.
414    io: RemoteIo,
415
416    /// The number of active dirty operations on this node and whether the node info is in sync.
417    /// See the `will_dirty` function for semantics.
418    info_state: InfoState,
419}
420
421impl BaseNode {
422    fn new(io: RemoteIo, dirty: bool) -> Self {
423        Self { io, info_state: InfoState::new(dirty) }
424    }
425
426    fn fetch_and_refresh_info<'a>(
427        &self,
428        info: &'a RwLock<FsNodeInfo>,
429    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
430        self.info_state.maybe_refresh(
431            info,
432            |info| {
433                let mut query = NODE_INFO_ATTRIBUTES;
434                if info.read().pending_time_access_update {
435                    query |= fio::NodeAttributesQuery::PENDING_ACCESS_TIME_UPDATE;
436                }
437                let attrs = self.io.attr_get_zxio(query).map_err(map_sync_io_client_error)?;
438                let mut info = info.write();
439                update_info_from_attrs(&mut info, &attrs);
440                info.pending_time_access_update = false;
441                Ok(RwLockWriteGuard::downgrade(info))
442            },
443            |info| Ok(info.read()),
444        )
445    }
446}
447
448impl<'a> TryFrom<&'a FsNode> for &'a BaseNode {
449    type Error = ();
450    fn try_from(value: &FsNode) -> Result<&BaseNode, ()> {
451        value
452            .downcast_ops::<RemoteNode>()
453            .map(|n| &n.node)
454            .or_else(|| value.downcast_ops::<RemoteSpecialNode>().map(|n| &n.node))
455            .or_else(|| value.downcast_ops::<RemoteSymlink>().map(|n| &n.node))
456            .ok_or(())
457    }
458}
459
460/// This is the most common type of node.  It is used for files and directories.  Symlinks and
461/// special nodes use RemoteSymlink and RemoteSpecialNode respectively.
462struct RemoteNode {
463    node: BaseNode,
464}
465
466impl RemoteNode {
467    fn new(io: RemoteIo, dirty: bool) -> Self {
468        Self { node: BaseNode::new(io, dirty) }
469    }
470}
471
472/// Creates a file handle from a zx::NullableHandle.
473///
474/// The handle must be a channel, socket, vmo or debuglog object.  If the handle is a channel, then
475/// the channel must implement the `fuchsia.unknown/Queryable` protocol.  Not all protocols are
476/// supported; files and directories are, but symlinks are not.
477///
478/// The resulting object will be owned by root, and will have permissions derived from the `flags`
479/// used to open this object. This is not the same as the permissions set if the object was created
480/// using Starnix itself. We use this mainly for interfacing with objects created outside of Starnix
481/// where these flags represent the desired permissions already.
482pub fn new_remote_file<L>(
483    locked: &mut Locked<L>,
484    current_task: &CurrentTask,
485    handle: zx::NullableHandle,
486    flags: OpenFlags,
487) -> Result<FileHandle, Errno>
488where
489    L: LockEqualOrBefore<FileOpsCore>,
490{
491    let remote_creds = current_task.current_creds().clone();
492    let (attrs, ops) = remote_file_attrs_and_ops(current_task, handle.into(), remote_creds)?;
493    let mut rights = fio::Flags::empty();
494    if flags.can_read() {
495        rights |= fio::PERM_READABLE;
496    }
497    if flags.can_write() {
498        rights |= fio::PERM_WRITABLE;
499    }
500    let mode = get_mode(&attrs, rights);
501    // TODO: https://fxbug.dev/407611229 - Give these nodes valid labels.
502    let mut info = FsNodeInfo::new(mode, FsCred::root());
503    update_info_from_attrs(&mut info, &attrs);
504    Ok(Anon::new_private_file_extended(locked, current_task, ops, flags, "[fuchsia:remote]", info))
505}
506
507/// Creates a FileOps from a zx::NullableHandle.
508///
509/// The handle must satisfy the same requirements as `new_remote_file`.
510pub fn new_remote_file_ops(
511    current_task: &CurrentTask,
512    handle: zx::NullableHandle,
513    creds: Arc<Credentials>,
514) -> Result<Box<dyn FileOps>, Errno> {
515    let (_, ops) = remote_file_attrs_and_ops(current_task, handle, creds)?;
516    Ok(ops)
517}
518
519fn remote_file_attrs_and_ops(
520    current_task: &CurrentTask,
521    mut handle: zx::NullableHandle,
522    remote_creds: Arc<Credentials>,
523) -> Result<(zxio_node_attr, Box<dyn FileOps>), Errno> {
524    let handle_type =
525        handle.basic_info().map_err(|status| from_status_like_fdio!(status))?.object_type;
526
527    if handle_type == zx::ObjectType::CHANNEL {
528        let channel = zx::Channel::from(handle);
529        let queryable = funknown::QueryableSynchronousProxy::new(channel);
530        let protocol = queryable.query(zx::MonotonicInstant::INFINITE).map_err(|_| errno!(EIO))?;
531        const UNIX_DOMAIN_SOCKET_PROTOCOL: &[u8] =
532            fbinder::UnixDomainSocketMarker::PROTOCOL_NAME.as_bytes();
533        const FILE_PROTOCOL: &[u8] = fio::FileMarker::PROTOCOL_NAME.as_bytes();
534        const DIRECTORY_PROTOCOL: &[u8] = fio::DirectoryMarker::PROTOCOL_NAME.as_bytes();
535        match &protocol[..] {
536            UNIX_DOMAIN_SOCKET_PROTOCOL => {
537                let socket_ops =
538                    RemoteUnixDomainSocket::new(queryable.into_channel(), remote_creds)?;
539                let socket = Socket::new_with_ops(Box::new(socket_ops))?;
540                let file_ops = SocketFile::new(socket);
541                let attr = zxio_node_attr {
542                    has: zxio_node_attr_has_t { mode: true, ..zxio_node_attr_has_t::default() },
543                    mode: 0o777 | FileMode::IFSOCK.bits(),
544                    ..zxio_node_attr::default()
545                };
546                return Ok((attr, file_ops));
547            }
548            FILE_PROTOCOL => {
549                let file_proxy = fio::FileSynchronousProxy::from(queryable.into_channel());
550                let info =
551                    file_proxy.describe(zx::MonotonicInstant::INFINITE).map_err(|_| errno!(EIO))?;
552                let io = RemoteIo::with_stream(
553                    file_proxy.into_channel().into(),
554                    info.stream.unwrap_or_else(|| zx::NullableHandle::invalid().into()),
555                );
556                let attr = io
557                    .attr_get_zxio(MODE_ATTRIBUTES | NODE_INFO_ATTRIBUTES)
558                    .map_err(map_sync_io_client_error)?;
559                return Ok((attr, Box::new(AnonymousRemoteFileObject::new(io))));
560            }
561            DIRECTORY_PROTOCOL => {
562                let io = RemoteIo::new(queryable.into_channel().into());
563                let attr = io
564                    .attr_get_zxio(MODE_ATTRIBUTES | NODE_INFO_ATTRIBUTES)
565                    .map_err(map_sync_io_client_error)?;
566                return Ok((
567                    attr,
568                    Box::new(RemoteDirectoryObject::new(io.into_proxy().into_channel().into())),
569                ));
570            }
571            _ => {
572                handle = queryable.into_channel().into_handle();
573                // Fall through for zxio.
574            }
575        }
576    } else if handle_type == zx::ObjectType::COUNTER {
577        let attr = zxio_node_attr::default();
578        let file_ops = Box::new(RemoteCounter::new(handle.into()));
579        return Ok((attr, file_ops));
580    }
581
582    // Otherwise, use zxio based objects.
583
584    // NOTE: If it's a channel, this will repeat the query, which is something we can optimize if we
585    // need to.
586    let zxio = Zxio::create(handle).map_err(|status| from_status_like_fdio!(status))?;
587    let mut attrs = zxio
588        .attr_get(zxio_node_attr_has_t {
589            protocols: true,
590            content_size: true,
591            storage_size: true,
592            link_count: true,
593            object_type: true,
594            ..Default::default()
595        })
596        .map_err(|status| from_status_like_fdio!(status))?;
597    let ops: Box<dyn FileOps> = match (handle_type, attrs.object_type) {
598        (zx::ObjectType::VMO, _) | (zx::ObjectType::DEBUGLOG, _) | (_, ZXIO_OBJECT_TYPE_NONE) => {
599            Box::new(RemoteZxioFileObject::new(zxio))
600        }
601        (zx::ObjectType::SOCKET, _)
602        | (_, ZXIO_OBJECT_TYPE_SYNCHRONOUS_DATAGRAM_SOCKET)
603        | (_, ZXIO_OBJECT_TYPE_DATAGRAM_SOCKET)
604        | (_, ZXIO_OBJECT_TYPE_STREAM_SOCKET)
605        | (_, ZXIO_OBJECT_TYPE_RAW_SOCKET)
606        | (_, ZXIO_OBJECT_TYPE_PACKET_SOCKET) => {
607            let socket_ops = ZxioBackedSocket::new_with_zxio(current_task, zxio);
608            let socket = Socket::new_with_ops(Box::new(socket_ops))?;
609            attrs.has.mode = true;
610            attrs.mode = FileMode::IFSOCK.bits();
611            SocketFile::new(socket)
612        }
613        _ => return error!(ENOTSUP),
614    };
615    Ok((attrs, ops))
616}
617
618pub fn create_fuchsia_pipe<L>(
619    locked: &mut Locked<L>,
620    current_task: &CurrentTask,
621    socket: zx::Socket,
622    flags: OpenFlags,
623) -> Result<FileHandle, Errno>
624where
625    L: LockEqualOrBefore<FileOpsCore>,
626{
627    new_remote_file(locked, current_task, socket.into(), flags)
628}
629
630// This only needs to include attributes that can be out of date.  There are other attributes that
631// we read when we first look up the node (see `lookup`).
632const NODE_INFO_ATTRIBUTES: fio::NodeAttributesQuery = fio::NodeAttributesQuery::CONTENT_SIZE
633    .union(fio::NodeAttributesQuery::STORAGE_SIZE)
634    .union(fio::NodeAttributesQuery::LINK_COUNT)
635    .union(fio::NodeAttributesQuery::MODIFICATION_TIME)
636    .union(fio::NodeAttributesQuery::CHANGE_TIME)
637    .union(fio::NodeAttributesQuery::ACCESS_TIME);
638
639/// Updates info from attrs if they are set.
640///
641// Keep in sync with `NODE_INFO_ATTRIBUTES`.
642pub(super) fn update_info_from_attrs(info: &mut FsNodeInfo, attrs: &zxio_node_attributes_t) {
643    // TODO - store these in FsNodeState and convert on fstat
644    if attrs.has.content_size {
645        info.size = attrs.content_size.try_into().unwrap_or(std::usize::MAX);
646    }
647    if attrs.has.storage_size {
648        info.blocks = usize::try_from(attrs.storage_size)
649            .unwrap_or(std::usize::MAX)
650            .div_ceil(DEFAULT_BYTES_PER_BLOCK)
651    }
652    info.blksize = DEFAULT_BYTES_PER_BLOCK;
653    if attrs.has.link_count {
654        info.link_count = attrs.link_count.try_into().unwrap_or(std::usize::MAX);
655    }
656    if attrs.has.modification_time {
657        info.time_modify =
658            UtcInstant::from_nanos(attrs.modification_time.try_into().unwrap_or(i64::MAX));
659    }
660    if attrs.has.change_time {
661        info.time_status_change =
662            UtcInstant::from_nanos(attrs.change_time.try_into().unwrap_or(i64::MAX));
663    }
664    if attrs.has.access_time {
665        info.time_access = UtcInstant::from_nanos(attrs.access_time.try_into().unwrap_or(i64::MAX));
666    }
667}
668
669const MODE_ATTRIBUTES: fio::NodeAttributesQuery =
670    fio::NodeAttributesQuery::PROTOCOLS.union(fio::NodeAttributesQuery::MODE);
671
672// NOTE: Keep in sync with `MODE_ATTRIBUTES`.
673fn get_mode(attrs: &zxio_node_attributes_t, rights: fio::Flags) -> FileMode {
674    if attrs.protocols & ZXIO_NODE_PROTOCOL_SYMLINK != 0 {
675        // We don't set the mode for symbolic links , so we synthesize it instead.
676        FileMode::IFLNK | FileMode::ALLOW_ALL
677    } else if attrs.has.mode {
678        // If the filesystem supports POSIX mode bits, use that directly.
679        FileMode::from_bits(attrs.mode)
680    } else {
681        // The filesystem doesn't support the `mode` attribute, so synthesize it from the protocols
682        // this node supports, and the rights used to open it.
683        let is_directory =
684            attrs.protocols & ZXIO_NODE_PROTOCOL_DIRECTORY == ZXIO_NODE_PROTOCOL_DIRECTORY;
685        let mode = if is_directory { FileMode::IFDIR } else { FileMode::IFREG };
686        let mut permissions = FileMode::EMPTY;
687        if rights.contains(fio::PERM_READABLE) {
688            permissions |= FileMode::IRUSR;
689        }
690        if rights.contains(fio::PERM_WRITABLE) {
691            permissions |= FileMode::IWUSR;
692        }
693        if rights.contains(fio::PERM_EXECUTABLE) {
694            permissions |= FileMode::IXUSR;
695        }
696        // Make sure the same permissions are granted to user, group, and other.
697        permissions |= FileMode::from_bits((permissions.bits() >> 3) | (permissions.bits() >> 6));
698        mode | permissions
699    }
700}
701
702fn get_name_str<'a>(name_bytes: &'a FsStr) -> Result<&'a str, Errno> {
703    std::str::from_utf8(name_bytes.as_ref()).map_err(|_| {
704        log_warn!("bad utf8 in pathname! remote filesystems can't handle this");
705        errno!(EINVAL)
706    })
707}
708
709impl XattrStorage for BaseNode {
710    fn get_xattr(
711        &self,
712        _locked: &mut Locked<FileOpsCore>,
713        name: &FsStr,
714    ) -> Result<FsString, Errno> {
715        Ok(self
716            .io
717            .xattr_get(name)
718            .map_err(|status| match status {
719                zx::Status::NOT_FOUND => errno!(ENODATA),
720                status => from_status_like_fdio!(status),
721            })?
722            .into())
723    }
724
725    fn set_xattr(
726        &self,
727        _locked: &mut Locked<FileOpsCore>,
728        name: &FsStr,
729        value: &FsStr,
730        op: XattrOp,
731    ) -> Result<(), Errno> {
732        let mode = match op {
733            XattrOp::Set => XattrSetMode::Set,
734            XattrOp::Create => XattrSetMode::Create,
735            XattrOp::Replace => XattrSetMode::Replace,
736        };
737
738        will_dirty(&[self], || {
739            self.io.xattr_set(name, value, mode).map_err(|status| match status {
740                zx::Status::NOT_FOUND => errno!(ENODATA),
741                status => from_status_like_fdio!(status),
742            })
743        })
744    }
745
746    fn remove_xattr(&self, _locked: &mut Locked<FileOpsCore>, name: &FsStr) -> Result<(), Errno> {
747        will_dirty(&[self], || {
748            self.io.xattr_remove(name).map_err(|status| match status {
749                zx::Status::NOT_FOUND => errno!(ENODATA),
750                _ => from_status_like_fdio!(status),
751            })
752        })
753    }
754
755    fn list_xattrs(&self, _locked: &mut Locked<FileOpsCore>) -> Result<Vec<FsString>, Errno> {
756        self.io
757            .xattr_list()
758            .map(|attrs| attrs.into_iter().map(FsString::new).collect::<Vec<_>>())
759            .map_err(map_sync_io_client_error)
760    }
761}
762
763impl FsNodeOps for RemoteNode {
764    fs_node_impl_xattr_delegate!(self, self.node);
765
766    fn create_file_ops(
767        &self,
768        _locked: &mut Locked<FileOpsCore>,
769        node: &FsNode,
770        current_task: &CurrentTask,
771        flags: OpenFlags,
772    ) -> Result<Box<dyn FileOps>, Errno> {
773        {
774            // It is safe to read the cached node info here because the `wrapping_key_id` is
775            // fetched when the node is first opened, and updated when set. We don't expect this to
776            // change out from under Starnix.
777            let node_info = node.info();
778            if node_info.mode.is_dir() {
779                if let Some(wrapping_key_id) = node_info.wrapping_key_id {
780                    if flags.can_write() {
781                        // Locked encrypted directories cannot be opened with write access.
782                        let crypt_service =
783                            node.fs().crypt_service().ok_or_else(|| errno!(ENOKEY))?;
784                        if !crypt_service.contains_key(EncryptionKeyId::from(wrapping_key_id)) {
785                            return error!(ENOKEY);
786                        }
787                    }
788                }
789                // For directories we need to clone the connection because we rely on the seek
790                // offset.
791                return Ok(Box::new(RemoteDirectoryObject::new(
792                    self.node
793                        .io
794                        .clone_proxy()
795                        .map(|p| p.into_channel().into())
796                        .map_err(map_sync_io_client_error)?,
797                )));
798            }
799        }
800
801        // Locked encrypted files cannot be opened.
802        node.fail_if_locked(current_task)?;
803
804        // fsverity files cannot be opened in write mode, including while building.
805        if flags.can_write() {
806            node.fsverity.lock().check_writable()?;
807        }
808
809        Ok(Box::new(RemoteFileObject::default()))
810    }
811
812    fn sync(&self, _node: &FsNode, _current_task: &CurrentTask) -> Result<(), Errno> {
813        self.node.io.sync().map_err(map_sync_io_client_error)
814    }
815
816    fn mknod(
817        &self,
818        _locked: &mut Locked<FileOpsCore>,
819        node: &FsNode,
820        current_task: &CurrentTask,
821        name: &FsStr,
822        mode: FileMode,
823        dev: DeviceType,
824        owner: FsCred,
825    ) -> Result<FsNodeHandle, Errno> {
826        node.fail_if_locked(current_task)?;
827        let name = get_name_str(name)?;
828
829        let fs = node.fs();
830        let fs_ops = RemoteFs::from_fs(&fs);
831
832        if !(mode.is_reg() || mode.is_chr() || mode.is_blk() || mode.is_fifo() || mode.is_sock()) {
833            return error!(EINVAL, name);
834        }
835
836        let (ops, attrs, _) = will_dirty(&[&self.node], || {
837            self.node
838                .io
839                .open(
840                    name,
841                    fio::Flags::FLAG_MUST_CREATE
842                        | fio::Flags::PROTOCOL_FILE
843                        | fio::PERM_READABLE
844                        | fio::PERM_WRITABLE,
845                    Some(fio::MutableNodeAttributes {
846                        mode: Some(mode.bits()),
847                        uid: Some(owner.uid),
848                        gid: Some(owner.gid),
849                        rdev: Some(dev.bits()),
850                        ..Default::default()
851                    }),
852                    fio::NodeAttributesQuery::ID | fio::NodeAttributesQuery::WRAPPING_KEY_ID,
853                    Factory { assume_special: !mode.is_reg(), start_dirty: true },
854                )
855                .map_err(|status| from_status_like_fdio!(status, name))
856        })?;
857
858        let node_id = if fs_ops.use_remote_ids { attrs.id } else { fs.allocate_ino() };
859
860        let mut node_info = FsNodeInfo { rdev: dev, ..FsNodeInfo::new(mode, owner) };
861        if attrs.has.wrapping_key_id {
862            node_info.wrapping_key_id = Some(attrs.wrapping_key_id);
863        }
864
865        let child = fs.create_node(node_id, ops, node_info);
866        Ok(child)
867    }
868
869    fn mkdir(
870        &self,
871        _locked: &mut Locked<FileOpsCore>,
872        node: &FsNode,
873        current_task: &CurrentTask,
874        name: &FsStr,
875        mode: FileMode,
876        owner: FsCred,
877    ) -> Result<FsNodeHandle, Errno> {
878        node.fail_if_locked(current_task)?;
879        let name = get_name_str(name)?;
880
881        let fs = node.fs();
882        let fs_ops = RemoteFs::from_fs(&fs);
883
884        let mut node_id;
885        let (ops, attrs, _) = will_dirty(&[&self.node], || {
886            self.node
887                .io
888                .open(
889                    name,
890                    fio::Flags::FLAG_MUST_CREATE
891                        | fio::Flags::PROTOCOL_DIRECTORY
892                        | fio::PERM_READABLE
893                        | fio::PERM_WRITABLE,
894                    Some(fio::MutableNodeAttributes {
895                        mode: Some(mode.bits()),
896                        uid: Some(owner.uid),
897                        gid: Some(owner.gid),
898                        ..Default::default()
899                    }),
900                    fio::NodeAttributesQuery::ID | fio::NodeAttributesQuery::WRAPPING_KEY_ID,
901                    Factory { assume_special: false, start_dirty: true },
902                )
903                .map_err(|status| from_status_like_fdio!(status, name))
904        })?;
905        node_id = attrs.id;
906
907        if !fs_ops.use_remote_ids {
908            node_id = fs.allocate_ino();
909        }
910
911        let mut node_info = FsNodeInfo::new(mode, owner);
912        if attrs.has.wrapping_key_id {
913            node_info.wrapping_key_id = Some(attrs.wrapping_key_id);
914        }
915
916        let child = fs.create_node(node_id, ops, node_info);
917        Ok(child)
918    }
919
920    fn lookup(
921        &self,
922        _locked: &mut Locked<FileOpsCore>,
923        node: &FsNode,
924        current_task: &CurrentTask,
925        name: &FsStr,
926    ) -> Result<FsNodeHandle, Errno> {
927        let name = get_name_str(name)?;
928
929        let fs = node.fs();
930        let fs_ops = RemoteFs::from_fs(&fs);
931
932        let mut query = MODE_ATTRIBUTES
933            | NODE_INFO_ATTRIBUTES
934            | fio::NodeAttributesQuery::ID
935            | fio::NodeAttributesQuery::UID
936            | fio::NodeAttributesQuery::GID
937            | fio::NodeAttributesQuery::RDEV
938            | fio::NodeAttributesQuery::WRAPPING_KEY_ID
939            | fio::NodeAttributesQuery::VERITY_ENABLED
940            | fio::NodeAttributesQuery::CASEFOLD;
941
942        if security::fs_is_xattr_labeled(node.fs()) {
943            query |= fio::NodeAttributesQuery::SELINUX_CONTEXT;
944        }
945        let (ops, attrs, context) = self
946            .node
947            .io
948            .open(
949                name,
950                fs_ops.root_rights,
951                None,
952                query,
953                Factory {
954                    assume_special: false,
955                    // We can start not dirty because we call `update_info_from_attrs` below.
956                    start_dirty: false,
957                },
958            )
959            .map_err(|status| from_status_like_fdio!(status, name))?;
960        let node_id = if fs_ops.use_remote_ids {
961            if attrs.id == fio::INO_UNKNOWN {
962                return error!(ENOTSUP);
963            }
964            attrs.id
965        } else {
966            fs.allocate_ino()
967        };
968        let owner = FsCred { uid: attrs.uid, gid: attrs.gid };
969        let rdev = DeviceType::from_bits(attrs.rdev);
970        let fsverity_enabled = attrs.fsverity_enabled;
971        // fsverity should not be enabled for non-file nodes.
972        if fsverity_enabled && (attrs.protocols & ZXIO_NODE_PROTOCOL_FILE == 0) {
973            return error!(EINVAL);
974        }
975        let casefold = attrs.casefold;
976
977        let mut ops = Some(ops);
978        let node = fs.get_or_create_node(node_id, || {
979            let child = FsNode::new_uncached(
980                node_id,
981                ops.take().unwrap(),
982                &fs,
983                FsNodeInfo {
984                    rdev,
985                    casefold,
986                    wrapping_key_id: attrs.has.wrapping_key_id.then_some(attrs.wrapping_key_id),
987                    ..FsNodeInfo::new(get_mode(&attrs, fs_ops.root_rights), owner)
988                },
989            );
990            if fsverity_enabled {
991                *child.fsverity.lock() = FsVerityState::FsVerity;
992            }
993            // This is valid to fail if we're using mount point labelling or the provided context
994            // string is invalid.
995            if let Some(fio::SelinuxContext::Data(data)) = &context {
996                let _ = security::fs_node_notify_security_context(
997                    current_task,
998                    &child,
999                    FsStr::new(&data),
1000                );
1001            }
1002            Ok(child)
1003        })?;
1004
1005        node.update_info(|info| update_info_from_attrs(info, &attrs));
1006
1007        // Encrypted symlinks that use fscrypt can be read as encrypted links when no key is
1008        // available.  When no key is available, directories will not cache their entries.  When,
1009        // the key is subsequently provided, the next time the symlink is read, we will come through
1010        // here, but since the node is cached, `get_or_create_node` will not create a new node
1011        // which, if we were to do nothing, would mean we'd keep the encrypted value for the target.
1012        // To address this, if no new node was created, we update the target of the existing node
1013        // here.  Once the key has been provided, the entry will be cached with the directory and
1014        // whilst the entry remains cached, `lookup` will not be called.
1015        if let Some(ops) = ops
1016            && let Some(new_symlink) = ops.as_any().downcast_ref::<RemoteSymlink>()
1017            && let Some(symlink) = node.downcast_ops::<RemoteSymlink>()
1018        {
1019            *symlink.target.write() = std::mem::take(&mut new_symlink.target.write());
1020        }
1021
1022        Ok(node)
1023    }
1024
1025    fn truncate(
1026        &self,
1027        _locked: &mut Locked<FileOpsCore>,
1028        _guard: &AppendLockGuard<'_>,
1029        node: &FsNode,
1030        current_task: &CurrentTask,
1031        length: u64,
1032    ) -> Result<(), Errno> {
1033        node.fail_if_locked(current_task)?;
1034
1035        will_dirty(&[&self.node], || {
1036            self.node.io.truncate(length).map_err(|status| from_status_like_fdio!(status))
1037        })
1038    }
1039
1040    fn allocate(
1041        &self,
1042        _locked: &mut Locked<FileOpsCore>,
1043        _guard: &AppendLockGuard<'_>,
1044        node: &FsNode,
1045        current_task: &CurrentTask,
1046        mode: FallocMode,
1047        offset: u64,
1048        length: u64,
1049    ) -> Result<(), Errno> {
1050        match mode {
1051            FallocMode::Allocate { keep_size: false } => {
1052                node.fail_if_locked(current_task)?;
1053
1054                will_dirty(&[&self.node], || {
1055                    self.node
1056                        .io
1057                        .allocate(offset, length, AllocateMode::empty())
1058                        .map_err(|status| from_status_like_fdio!(status))
1059                })?;
1060                Ok(())
1061            }
1062            _ => error!(EINVAL),
1063        }
1064    }
1065
1066    fn fetch_and_refresh_info<'a>(
1067        &self,
1068        _locked: &mut Locked<FileOpsCore>,
1069        _node: &FsNode,
1070        _current_task: &CurrentTask,
1071        info: &'a RwLock<FsNodeInfo>,
1072    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
1073        self.node.fetch_and_refresh_info(info)
1074    }
1075
1076    fn update_attributes(
1077        &self,
1078        _locked: &mut Locked<FileOpsCore>,
1079        _node: &FsNode,
1080        _current_task: &CurrentTask,
1081        info: &FsNodeInfo,
1082        has: zxio_node_attr_has_t,
1083    ) -> Result<(), Errno> {
1084        // Omit updating creation_time. By definition, there shouldn't be a change in creation_time.
1085        will_dirty(&[&self.node], || {
1086            self.node
1087                .io
1088                .attr_set(fio::MutableNodeAttributes {
1089                    modification_time: has
1090                        .modification_time
1091                        .then_some(info.time_modify.into_nanos() as u64),
1092                    access_time: has.access_time.then_some(info.time_access.into_nanos() as u64),
1093                    mode: has.mode.then_some(info.mode.bits()),
1094                    uid: has.uid.then_some(info.uid),
1095                    gid: has.gid.then_some(info.gid),
1096                    rdev: has.rdev.then_some(info.rdev.bits()),
1097                    casefold: has.casefold.then_some(info.casefold),
1098                    wrapping_key_id: if has.wrapping_key_id { info.wrapping_key_id } else { None },
1099                    ..Default::default()
1100                })
1101                .map_err(|status| from_status_like_fdio!(status))
1102        })
1103    }
1104
1105    fn unlink(
1106        &self,
1107        _locked: &mut Locked<FileOpsCore>,
1108        node: &FsNode,
1109        _current_task: &CurrentTask,
1110        name: &FsStr,
1111        child: &FsNodeHandle,
1112    ) -> Result<(), Errno> {
1113        // We don't care about the child argument because 1. unlinking already takes the parent's
1114        // children lock, so we don't have to worry about conflicts on this path, and 2. the remote
1115        // filesystem tracks the link counts so we don't need to update them here.
1116        let name = get_name_str(name)?;
1117        will_dirty(&[node, child], || {
1118            self.node
1119                .io
1120                .unlink(name, fio::UnlinkFlags::empty())
1121                .map_err(|status| from_status_like_fdio!(status))
1122        })
1123    }
1124
1125    fn create_symlink(
1126        &self,
1127        _locked: &mut Locked<FileOpsCore>,
1128        node: &FsNode,
1129        current_task: &CurrentTask,
1130        name: &FsStr,
1131        target: &FsStr,
1132        owner: FsCred,
1133    ) -> Result<FsNodeHandle, Errno> {
1134        node.fail_if_locked(current_task)?;
1135
1136        let name = get_name_str(name)?;
1137        let io = will_dirty(&[&self.node], || {
1138            self.node
1139                .io
1140                .create_symlink(name, target)
1141                .map_err(|status| from_status_like_fdio!(status))
1142        })?;
1143
1144        let fs = node.fs();
1145        let fs_ops = RemoteFs::from_fs(&fs);
1146
1147        let node_id = if fs_ops.use_remote_ids {
1148            io.attr_get(fio::NodeAttributesQuery::ID)
1149                .map_err(|status| from_status_like_fdio!(status))?
1150                .1
1151                .id
1152                .unwrap_or_default()
1153        } else {
1154            fs.allocate_ino()
1155        };
1156        Ok(fs.create_node(
1157            node_id,
1158            RemoteSymlink::new(BaseNode::new(io, true), target.as_bytes()),
1159            FsNodeInfo {
1160                size: target.len(),
1161                ..FsNodeInfo::new(FileMode::IFLNK | FileMode::ALLOW_ALL, owner)
1162            },
1163        ))
1164    }
1165
1166    fn create_tmpfile(
1167        &self,
1168        node: &FsNode,
1169        _current_task: &CurrentTask,
1170        mode: FileMode,
1171        owner: FsCred,
1172    ) -> Result<FsNodeHandle, Errno> {
1173        let fs = node.fs();
1174        let fs_ops = RemoteFs::from_fs(&fs);
1175
1176        let mut node_id;
1177        if !mode.is_reg() {
1178            return error!(EINVAL);
1179        }
1180
1181        // `create_tmpfile` is used by O_TMPFILE. Note that
1182        // <https://man7.org/linux/man-pages/man2/open.2.html> states that if O_EXCL is specified
1183        // with O_TMPFILE, the temporary file created cannot be linked into the filesystem. Although
1184        // there exist fuchsia flags `fio::FLAG_TEMPORARY_AS_NOT_LINKABLE`, the starnix vfs already
1185        // handles this case and makes sure that the created file is not linkable. There is also no
1186        // current way of passing the open flags to this function.
1187        let (ops, attrs, _) = will_dirty(&[&self.node], || {
1188            self.node
1189                .io
1190                .open(
1191                    ".",
1192                    fio::Flags::PROTOCOL_FILE
1193                        | fio::Flags::FLAG_CREATE_AS_UNNAMED_TEMPORARY
1194                        | fio::PERM_READABLE
1195                        | fio::PERM_WRITABLE,
1196                    Some(fio::MutableNodeAttributes {
1197                        mode: Some(mode.bits()),
1198                        uid: Some(owner.uid),
1199                        gid: Some(owner.gid),
1200                        ..Default::default()
1201                    }),
1202                    fio::NodeAttributesQuery::ID,
1203                    Factory { assume_special: false, start_dirty: true },
1204                )
1205                .map_err(|status| from_status_like_fdio!(status))
1206        })?;
1207        node_id = attrs.id;
1208
1209        if !fs_ops.use_remote_ids {
1210            node_id = fs.allocate_ino();
1211        }
1212        Ok(fs.create_node(node_id, ops, FsNodeInfo::new(mode, owner)))
1213    }
1214
1215    fn link(
1216        &self,
1217        _locked: &mut Locked<FileOpsCore>,
1218        node: &FsNode,
1219        _current_task: &CurrentTask,
1220        name: &FsStr,
1221        child: &FsNodeHandle,
1222    ) -> Result<(), Errno> {
1223        if !RemoteFs::from_fs(&node.fs()).use_remote_ids {
1224            return error!(EPERM);
1225        }
1226        let name = get_name_str(name)?;
1227
1228        will_dirty(&[node, child], || {
1229            if let Some(child) = child.downcast_ops::<RemoteNode>() {
1230                child.node.io.link_into(&self.node.io, name).map_err(|status| match status {
1231                    zx::Status::BAD_STATE => errno!(EXDEV),
1232                    zx::Status::ACCESS_DENIED => errno!(ENOKEY),
1233                    s => from_status_like_fdio!(s),
1234                })
1235            } else if let Some(child) = child.downcast_ops::<RemoteSymlink>() {
1236                child.node.io.link_into(&self.node.io, name).map_err(|status| match status {
1237                    zx::Status::BAD_STATE => errno!(EXDEV),
1238                    zx::Status::ACCESS_DENIED => errno!(ENOKEY),
1239                    s => from_status_like_fdio!(s),
1240                })
1241            } else {
1242                error!(EXDEV)
1243            }
1244        })
1245    }
1246
1247    fn forget(
1248        self: Box<Self>,
1249        _locked: &mut Locked<FileOpsCore>,
1250        _current_task: &CurrentTask,
1251        info: FsNodeInfo,
1252    ) -> Result<(), Errno> {
1253        // Before forgetting this node, update atime if we need to.
1254        if info.pending_time_access_update {
1255            self.node
1256                .io
1257                .attr_get(fio::NodeAttributesQuery::PENDING_ACCESS_TIME_UPDATE)
1258                .map_err(|status| from_status_like_fdio!(status))?;
1259        }
1260        Ok(())
1261    }
1262
1263    fn enable_fsverity(
1264        &self,
1265        _locked: &mut Locked<FileOpsCore>,
1266        _node: &FsNode,
1267        _current_task: &CurrentTask,
1268        descriptor: &fsverity_descriptor,
1269    ) -> Result<(), Errno> {
1270        let descr = zxio_fsverity_descriptor_t {
1271            hash_algorithm: descriptor.hash_algorithm,
1272            salt_size: descriptor.salt_size,
1273            salt: descriptor.salt,
1274        };
1275        will_dirty(&[&self.node], || {
1276            self.node.io.enable_verity(&descr).map_err(|status| from_status_like_fdio!(status))
1277        })
1278    }
1279
1280    fn get_fsverity_descriptor(&self, log_blocksize: u8) -> Result<fsverity_descriptor, Errno> {
1281        let (_, attrs) = self
1282            .node
1283            .io
1284            .attr_get(
1285                fio::NodeAttributesQuery::CONTENT_SIZE
1286                    | fio::NodeAttributesQuery::OPTIONS
1287                    | fio::NodeAttributesQuery::ROOT_HASH,
1288            )
1289            .map_err(|status| from_status_like_fdio!(status))?;
1290        let fio::ImmutableNodeAttributes {
1291            content_size: Some(data_size),
1292            options:
1293                Some(fio::VerificationOptions {
1294                    hash_algorithm: Some(hash_algorithm),
1295                    salt: Some(salt),
1296                    ..
1297                }),
1298            root_hash: Some(root_hash),
1299            ..
1300        } = attrs
1301        else {
1302            return error!(ENODATA);
1303        };
1304        let mut descriptor = fsverity_descriptor {
1305            version: 1,
1306            hash_algorithm: hash_algorithm.into_primitive(),
1307            log_blocksize,
1308            __reserved_0x04: 0u32,
1309            data_size,
1310            ..Default::default()
1311        };
1312        if salt.len() > std::mem::size_of_val(&descriptor.salt)
1313            || root_hash.len() > std::mem::size_of_val(&descriptor.root_hash)
1314        {
1315            return error!(EIO);
1316        }
1317        descriptor.salt_size = salt.len() as u8;
1318        descriptor.salt[..salt.len()].copy_from_slice(&salt);
1319        descriptor.root_hash[..root_hash.len()].copy_from_slice(&root_hash);
1320        Ok(descriptor)
1321    }
1322}
1323
1324struct RemoteSpecialNode {
1325    node: BaseNode,
1326}
1327
1328impl FsNodeOps for RemoteSpecialNode {
1329    fs_node_impl_not_dir!();
1330    fs_node_impl_xattr_delegate!(self, self.node);
1331
1332    fn create_file_ops(
1333        &self,
1334        _locked: &mut Locked<FileOpsCore>,
1335        _node: &FsNode,
1336        _current_task: &CurrentTask,
1337        _flags: OpenFlags,
1338    ) -> Result<Box<dyn FileOps>, Errno> {
1339        unreachable!("Special nodes cannot be opened.");
1340    }
1341}
1342
1343struct RemoteDirectoryObject(sync_io_client::RemoteDirectory);
1344
1345impl RemoteDirectoryObject {
1346    fn new(proxy: fio::DirectorySynchronousProxy) -> Self {
1347        Self(sync_io_client::RemoteDirectory::new(proxy))
1348    }
1349}
1350
1351impl FileOps for RemoteDirectoryObject {
1352    fileops_impl_directory!();
1353
1354    fn seek(
1355        &self,
1356        _locked: &mut Locked<FileOpsCore>,
1357        _file: &FileObject,
1358        _current_task: &CurrentTask,
1359        current_offset: off_t,
1360        target: SeekTarget,
1361    ) -> Result<off_t, Errno> {
1362        Ok(self
1363            .0
1364            .seek(default_seek(current_offset, target, || error!(EINVAL))? as u64)
1365            .map_err(map_sync_io_client_error)? as i64)
1366    }
1367
1368    fn readdir(
1369        &self,
1370        _locked: &mut Locked<FileOpsCore>,
1371        file: &FileObject,
1372        _current_task: &CurrentTask,
1373        sink: &mut dyn DirentSink,
1374    ) -> Result<(), Errno> {
1375        match self
1376            .0
1377            .readdir(|mut inode_num, entry_type, name| {
1378                if name == b".." {
1379                    inode_num = if let Some(parent) = file.name.parent_within_mount() {
1380                        parent.node.ino
1381                    } else {
1382                        // For the root .. should have the same inode number as .
1383                        file.name.entry.node.ino
1384                    };
1385                }
1386                let entry_type = match entry_type {
1387                    fio::DirentType::Directory => DirectoryEntryType::DIR,
1388                    fio::DirentType::File => DirectoryEntryType::REG,
1389                    fio::DirentType::Symlink => DirectoryEntryType::LNK,
1390                    _ => DirectoryEntryType::UNKNOWN,
1391                };
1392                match sink.add(inode_num, sink.offset() + 1, entry_type, name.into()) {
1393                    Ok(()) => ControlFlow::Continue(()),
1394                    Err(e) => ControlFlow::Break(e),
1395                }
1396            })
1397            .map_err(map_sync_io_client_error)?
1398        {
1399            None => Ok(()),
1400            Some(e) => Err(e),
1401        }
1402    }
1403
1404    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1405        self.0.sync().map_err(map_sync_error)
1406    }
1407
1408    fn to_handle(
1409        &self,
1410        _file: &FileObject,
1411        _current_task: &CurrentTask,
1412    ) -> Result<Option<zx::NullableHandle>, Errno> {
1413        // If expose a handle to a directory to a Fuchsia component, we trust that it will not
1414        // modify the directory in a way that will confuse Starnix.
1415        self.0
1416            .clone_proxy()
1417            .map_err(map_sync_io_client_error)
1418            .map(|p| Some(p.into_channel().into()))
1419    }
1420}
1421
1422#[derive(Default)]
1423pub struct RemoteFileObject {
1424    /// Cached read-only VMO handle.
1425    read_only_memory: OnceCell<Arc<MemoryObject>>,
1426
1427    /// Cached read/exec VMO handle.
1428    read_exec_memory: OnceCell<Arc<MemoryObject>>,
1429}
1430
1431impl RemoteFileObject {
1432    /// # Panics
1433    ///
1434    /// This will panic if the node's ops are not `RemoteNode`; `AnonymousRemoteFileObject` should
1435    /// be used if this won't be the case.
1436    fn io(file: &FileObject) -> &RemoteIo {
1437        &file.node().downcast_ops::<RemoteNode>().unwrap().node.io
1438    }
1439}
1440
1441trait RemoteIoExt {
1442    fn read_to_output_buffer(
1443        &self,
1444        offset: u64,
1445        buffer: &mut dyn OutputBuffer,
1446    ) -> Result<usize, Errno>;
1447    fn write_from_input_buffer(
1448        &self,
1449        offset: u64,
1450        buffer: &mut dyn InputBuffer,
1451    ) -> Result<usize, Errno>;
1452    fn fetch_remote_memory(&self, prot: ProtectionFlags) -> Result<Arc<MemoryObject>, Errno>;
1453}
1454
1455impl RemoteIoExt for RemoteIo {
1456    fn read_to_output_buffer(
1457        &self,
1458        offset: u64,
1459        buffer: &mut dyn OutputBuffer,
1460    ) -> Result<usize, Errno> {
1461        if self.supports_vectored()
1462            && let Some(actual) = with_iovec_segments(buffer, |iovecs| {
1463                // SAFETY: The iovecs are known to point to userspace, so any damage we do here is
1464                // limited to userspace.  Zircon will catch faults and return an error.
1465                unsafe { self.readv(offset, iovecs).map_err(map_stream_error) }
1466            })
1467        {
1468            let actual = actual?;
1469            // SAFETY: we successfully read `actual` bytes directly to the user's buffer
1470            // segments.
1471            unsafe { buffer.advance(actual) }?;
1472            Ok(actual)
1473        } else {
1474            self.read(
1475                offset,
1476                buffer.available(),
1477                |data| buffer.write(&data),
1478                map_sync_io_client_error,
1479            )
1480        }
1481    }
1482
1483    fn write_from_input_buffer(
1484        &self,
1485        offset: u64,
1486        buffer: &mut dyn InputBuffer,
1487    ) -> Result<usize, Errno> {
1488        let actual = if self.supports_vectored()
1489            && let Some(actual) = with_iovec_segments(buffer, |iovecs| {
1490                self.writev(offset, iovecs).map_err(map_stream_error)
1491            }) {
1492            actual?
1493        } else {
1494            self.write(offset as u64, &buffer.peek_all()?).map_err(map_sync_io_client_error)?
1495        };
1496        buffer.advance(actual)?;
1497        Ok(actual)
1498    }
1499
1500    fn fetch_remote_memory(&self, prot: ProtectionFlags) -> Result<Arc<MemoryObject>, Errno> {
1501        let without_exec = self
1502            .vmo_get(prot.to_vmar_flags() - zx::VmarFlags::PERM_EXECUTE)
1503            .map_err(|status| from_status_like_fdio!(status))?;
1504        let all_flags = if prot.contains(ProtectionFlags::EXEC) {
1505            without_exec.replace_as_executable(&VMEX_RESOURCE).map_err(impossible_error)?
1506        } else {
1507            without_exec
1508        };
1509        Ok(Arc::new(MemoryObject::from(all_flags)))
1510    }
1511}
1512
1513impl FileOps for RemoteFileObject {
1514    fileops_impl_seekable!();
1515
1516    fn read(
1517        &self,
1518        _locked: &mut Locked<FileOpsCore>,
1519        file: &FileObject,
1520        _current_task: &CurrentTask,
1521        offset: usize,
1522        data: &mut dyn OutputBuffer,
1523    ) -> Result<usize, Errno> {
1524        Self::io(file).read_to_output_buffer(offset as u64, data)
1525    }
1526
1527    fn write(
1528        &self,
1529        _locked: &mut Locked<FileOpsCore>,
1530        file: &FileObject,
1531        _current_task: &CurrentTask,
1532        offset: usize,
1533        data: &mut dyn InputBuffer,
1534    ) -> Result<usize, Errno> {
1535        will_dirty(&[&***file.node()], || {
1536            Self::io(file).write_from_input_buffer(offset as u64, data)
1537        })
1538    }
1539
1540    fn get_memory(
1541        &self,
1542        _locked: &mut Locked<FileOpsCore>,
1543        file: &FileObject,
1544        _current_task: &CurrentTask,
1545        _length: Option<usize>,
1546        prot: ProtectionFlags,
1547    ) -> Result<Arc<MemoryObject>, Errno> {
1548        trace_duration!(CATEGORY_STARNIX_MM, "RemoteFileGetVmo");
1549        let memory_cache = if prot == (ProtectionFlags::READ | ProtectionFlags::EXEC) {
1550            Some(&self.read_exec_memory)
1551        } else if prot == ProtectionFlags::READ {
1552            Some(&self.read_only_memory)
1553        } else {
1554            None
1555        };
1556
1557        let io = Self::io(file);
1558
1559        memory_cache
1560            .map(|c| c.get_or_try_init(|| io.fetch_remote_memory(prot)).cloned())
1561            .unwrap_or_else(|| io.fetch_remote_memory(prot))
1562    }
1563
1564    fn to_handle(
1565        &self,
1566        file: &FileObject,
1567        current_task: &CurrentTask,
1568    ) -> Result<Option<zx::NullableHandle>, Errno> {
1569        // To avoid cache coherency and security issues, we proxy remote files via the Starnix file
1570        // server.  This will incur a performance penalty which we can optimize later if we need to.
1571        serve_file_tagged(current_task, file, current_task.current_creds().clone(), "remote_files")
1572            .map(|c| Some(c.0.into_handle().into()))
1573    }
1574
1575    fn ioctl(
1576        &self,
1577        locked: &mut Locked<Unlocked>,
1578        file: &FileObject,
1579        current_task: &CurrentTask,
1580        request: u32,
1581        arg: SyscallArg,
1582    ) -> Result<SyscallResult, Errno> {
1583        default_ioctl(file, locked, current_task, request, arg)
1584    }
1585}
1586
1587/// A file object that is not attached to a `RemoteFs`, which means it stores its own `RemoteIo`.
1588pub struct AnonymousRemoteFileObject {
1589    io: RemoteIo,
1590
1591    /// Cached read-only VMO handle.
1592    read_only_memory: OnceCell<Arc<MemoryObject>>,
1593
1594    /// Cached read/exec VMO handle.
1595    read_exec_memory: OnceCell<Arc<MemoryObject>>,
1596}
1597
1598impl AnonymousRemoteFileObject {
1599    fn new(io: RemoteIo) -> Self {
1600        Self { io, read_only_memory: Default::default(), read_exec_memory: Default::default() }
1601    }
1602}
1603
1604impl FileOps for AnonymousRemoteFileObject {
1605    fileops_impl_seekable!();
1606
1607    fn read(
1608        &self,
1609        _locked: &mut Locked<FileOpsCore>,
1610        _file: &FileObject,
1611        _current_task: &CurrentTask,
1612        offset: usize,
1613        data: &mut dyn OutputBuffer,
1614    ) -> Result<usize, Errno> {
1615        self.io.read_to_output_buffer(offset as u64, data)
1616    }
1617
1618    fn write(
1619        &self,
1620        _locked: &mut Locked<FileOpsCore>,
1621        _file: &FileObject,
1622        _current_task: &CurrentTask,
1623        offset: usize,
1624        data: &mut dyn InputBuffer,
1625    ) -> Result<usize, Errno> {
1626        // As this is an anonymous file, there's no point marking the node info dirty because this
1627        // isn't backed by `RemoteNode` or `RemoteSymlink`.
1628        self.io.write_from_input_buffer(offset as u64, data)
1629    }
1630
1631    fn get_memory(
1632        &self,
1633        _locked: &mut Locked<FileOpsCore>,
1634        _file: &FileObject,
1635        _current_task: &CurrentTask,
1636        _length: Option<usize>,
1637        prot: ProtectionFlags,
1638    ) -> Result<Arc<MemoryObject>, Errno> {
1639        trace_duration!(CATEGORY_STARNIX_MM, "RemoteFileGetVmo");
1640        let memory_cache = if prot == (ProtectionFlags::READ | ProtectionFlags::EXEC) {
1641            Some(&self.read_exec_memory)
1642        } else if prot == ProtectionFlags::READ {
1643            Some(&self.read_only_memory)
1644        } else {
1645            None
1646        };
1647
1648        memory_cache
1649            .map(|c| c.get_or_try_init(|| self.io.fetch_remote_memory(prot)).cloned())
1650            .unwrap_or_else(|| self.io.fetch_remote_memory(prot))
1651    }
1652
1653    fn to_handle(
1654        &self,
1655        _file: &FileObject,
1656        _current_task: &CurrentTask,
1657    ) -> Result<Option<zx::NullableHandle>, Errno> {
1658        // This is an anonymous file (not backed by `RemoteNode`).  Any external updates to the
1659        // file's attributes will not be tracked by Starnix.
1660        self.io
1661            .clone_proxy()
1662            .map_err(map_sync_io_client_error)
1663            .map(|p| Some(p.into_channel().into()))
1664    }
1665
1666    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1667        self.io.sync().map_err(map_sync_io_client_error)
1668    }
1669
1670    fn ioctl(
1671        &self,
1672        locked: &mut Locked<Unlocked>,
1673        file: &FileObject,
1674        current_task: &CurrentTask,
1675        request: u32,
1676        arg: SyscallArg,
1677    ) -> Result<SyscallResult, Errno> {
1678        default_ioctl(file, locked, current_task, request, arg)
1679    }
1680}
1681
1682pub struct RemoteZxioFileObject {
1683    /// The underlying Zircon I/O object.  This is shared, so we must take care not to use any
1684    /// stateful methods on the underlying object (reading and writing is fine).
1685    zxio: Zxio,
1686
1687    /// Cached read-only VMO handle.
1688    read_only_memory: OnceCell<Arc<MemoryObject>>,
1689
1690    /// Cached read/exec VMO handle.
1691    read_exec_memory: OnceCell<Arc<MemoryObject>>,
1692}
1693
1694impl RemoteZxioFileObject {
1695    fn new(zxio: Zxio) -> RemoteZxioFileObject {
1696        RemoteZxioFileObject {
1697            zxio,
1698            read_only_memory: Default::default(),
1699            read_exec_memory: Default::default(),
1700        }
1701    }
1702
1703    fn fetch_remote_memory(&self, prot: ProtectionFlags) -> Result<Arc<MemoryObject>, Errno> {
1704        let without_exec = self
1705            .zxio
1706            .vmo_get(prot.to_vmar_flags() - zx::VmarFlags::PERM_EXECUTE)
1707            .map_err(|status| from_status_like_fdio!(status))?;
1708        let all_flags = if prot.contains(ProtectionFlags::EXEC) {
1709            without_exec.replace_as_executable(&VMEX_RESOURCE).map_err(impossible_error)?
1710        } else {
1711            without_exec
1712        };
1713        Ok(Arc::new(MemoryObject::from(all_flags)))
1714    }
1715}
1716
1717impl FileOps for RemoteZxioFileObject {
1718    fileops_impl_seekable!();
1719
1720    fn read(
1721        &self,
1722        _locked: &mut Locked<FileOpsCore>,
1723        _file: &FileObject,
1724        _current_task: &CurrentTask,
1725        offset: usize,
1726        data: &mut dyn OutputBuffer,
1727    ) -> Result<usize, Errno> {
1728        let offset = offset as u64;
1729        let read_bytes = with_iovec_segments::<_, syncio::zxio::zx_iovec, _>(data, |iovecs| {
1730            // SAFETY: The iovecs are valid for writing because they come from OutputBuffer.
1731            unsafe { self.zxio.readv_at(offset, iovecs).map_err(map_stream_error) }
1732        });
1733
1734        match read_bytes {
1735            Some(actual) => {
1736                let actual = actual?;
1737                // SAFETY: we successfully read `actual` bytes
1738                // directly to the user's buffer segments.
1739                unsafe { data.advance(actual) }?;
1740                Ok(actual)
1741            }
1742            None => {
1743                // Perform the (slower) operation by using an intermediate buffer.
1744                let total = data.available();
1745                let mut bytes = vec![0u8; total];
1746                let actual = self
1747                    .zxio
1748                    .read_at(offset, &mut bytes)
1749                    .map_err(|status| from_status_like_fdio!(status))?;
1750                data.write_all(&bytes[0..actual])
1751            }
1752        }
1753    }
1754
1755    fn write(
1756        &self,
1757        _locked: &mut Locked<FileOpsCore>,
1758        _file: &FileObject,
1759        _current_task: &CurrentTask,
1760        offset: usize,
1761        data: &mut dyn InputBuffer,
1762    ) -> Result<usize, Errno> {
1763        let offset = offset as u64;
1764        let write_bytes = with_iovec_segments::<_, syncio::zxio::zx_iovec, _>(data, |iovecs| {
1765            // SAFETY: The iovecs are valid for reading because they come from InputBuffer.
1766            unsafe { self.zxio.writev_at(offset, iovecs).map_err(map_stream_error) }
1767        });
1768
1769        match write_bytes {
1770            Some(actual) => {
1771                let actual = actual?;
1772                data.advance(actual)?;
1773                Ok(actual)
1774            }
1775            None => {
1776                // Perform the (slower) operation by using an intermediate buffer.
1777                let bytes = data.peek_all()?;
1778                let actual = self
1779                    .zxio
1780                    .write_at(offset, &bytes)
1781                    .map_err(|status| from_status_like_fdio!(status))?;
1782                data.advance(actual)?;
1783                Ok(actual)
1784            }
1785        }
1786    }
1787
1788    fn get_memory(
1789        &self,
1790        _locked: &mut Locked<FileOpsCore>,
1791        _file: &FileObject,
1792        _current_task: &CurrentTask,
1793        _length: Option<usize>,
1794        prot: ProtectionFlags,
1795    ) -> Result<Arc<MemoryObject>, Errno> {
1796        trace_duration!(CATEGORY_STARNIX_MM, "RemoteFileGetVmo");
1797        let memory_cache = if prot == (ProtectionFlags::READ | ProtectionFlags::EXEC) {
1798            Some(&self.read_exec_memory)
1799        } else if prot == ProtectionFlags::READ {
1800            Some(&self.read_only_memory)
1801        } else {
1802            None
1803        };
1804
1805        memory_cache
1806            .map(|c| c.get_or_try_init(|| self.fetch_remote_memory(prot)).cloned())
1807            .unwrap_or_else(|| self.fetch_remote_memory(prot))
1808    }
1809
1810    fn to_handle(
1811        &self,
1812        _file: &FileObject,
1813        _current_task: &CurrentTask,
1814    ) -> Result<Option<zx::NullableHandle>, Errno> {
1815        self.zxio.clone_handle().map(Some).map_err(|status| from_status_like_fdio!(status))
1816    }
1817
1818    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1819        self.zxio.sync().map_err(map_sync_error)
1820    }
1821
1822    fn ioctl(
1823        &self,
1824        locked: &mut Locked<Unlocked>,
1825        file: &FileObject,
1826        current_task: &CurrentTask,
1827        request: u32,
1828        arg: SyscallArg,
1829    ) -> Result<SyscallResult, Errno> {
1830        default_ioctl(file, locked, current_task, request, arg)
1831    }
1832}
1833
1834struct RemoteSymlink {
1835    node: BaseNode,
1836    target: RwLock<Box<[u8]>>,
1837}
1838
1839impl RemoteSymlink {
1840    fn new(node: BaseNode, target: impl Into<Box<[u8]>>) -> Self {
1841        Self { node, target: RwLock::new(target.into()) }
1842    }
1843}
1844
1845impl FsNodeOps for RemoteSymlink {
1846    fs_node_impl_symlink!();
1847    fs_node_impl_xattr_delegate!(self, self.node);
1848
1849    fn readlink(
1850        &self,
1851        _locked: &mut Locked<FileOpsCore>,
1852        _node: &FsNode,
1853        _current_task: &CurrentTask,
1854    ) -> Result<SymlinkTarget, Errno> {
1855        Ok(SymlinkTarget::Path(FsString::new(self.target.read().to_vec())))
1856    }
1857
1858    fn fetch_and_refresh_info<'a>(
1859        &self,
1860        _locked: &mut Locked<FileOpsCore>,
1861        _node: &FsNode,
1862        _current_task: &CurrentTask,
1863        info: &'a RwLock<FsNodeInfo>,
1864    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
1865        self.node.fetch_and_refresh_info(info)
1866    }
1867
1868    fn forget(
1869        self: Box<Self>,
1870        _locked: &mut Locked<FileOpsCore>,
1871        _current_task: &CurrentTask,
1872        info: FsNodeInfo,
1873    ) -> Result<(), Errno> {
1874        // Before forgetting this node, update atime if we need to.
1875        if info.pending_time_access_update {
1876            self.node
1877                .io
1878                .attr_get(fio::NodeAttributesQuery::PENDING_ACCESS_TIME_UPDATE)
1879                .map_err(|status| from_status_like_fdio!(status))?;
1880        }
1881        Ok(())
1882    }
1883}
1884
1885pub struct RemoteCounter {
1886    counter: Counter,
1887}
1888
1889impl RemoteCounter {
1890    fn new(counter: Counter) -> Self {
1891        Self { counter }
1892    }
1893
1894    pub fn duplicate_handle(&self) -> Result<Counter, Errno> {
1895        self.counter.duplicate_handle(zx::Rights::SAME_RIGHTS).map_err(impossible_error)
1896    }
1897}
1898
1899impl FileOps for RemoteCounter {
1900    fileops_impl_nonseekable!();
1901    fileops_impl_noop_sync!();
1902
1903    fn read(
1904        &self,
1905        _locked: &mut Locked<FileOpsCore>,
1906        _file: &FileObject,
1907        _current_task: &CurrentTask,
1908        _offset: usize,
1909        _data: &mut dyn OutputBuffer,
1910    ) -> Result<usize, Errno> {
1911        error!(ENOTSUP)
1912    }
1913
1914    fn write(
1915        &self,
1916        _locked: &mut Locked<FileOpsCore>,
1917        _file: &FileObject,
1918        _current_task: &CurrentTask,
1919        _offset: usize,
1920        _data: &mut dyn InputBuffer,
1921    ) -> Result<usize, Errno> {
1922        error!(ENOTSUP)
1923    }
1924
1925    fn ioctl(
1926        &self,
1927        locked: &mut Locked<Unlocked>,
1928        file: &FileObject,
1929        current_task: &CurrentTask,
1930        request: u32,
1931        arg: SyscallArg,
1932    ) -> Result<SyscallResult, Errno> {
1933        let ioctl_type = (request >> 8) as u8;
1934        let ioctl_number = request as u8;
1935        if ioctl_type == SYNC_IOC_MAGIC
1936            && (ioctl_number == SYNC_IOC_FILE_INFO || ioctl_number == SYNC_IOC_MERGE)
1937        {
1938            let mut sync_points: Vec<SyncPoint> = vec![];
1939            let counter = self.duplicate_handle()?;
1940            sync_points.push(SyncPoint::new(Timeline::Hwc, counter.into()));
1941            let sync_file_name: &[u8; 32] = b"remote counter\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
1942            let sync_file = SyncFile::new(*sync_file_name, SyncFence { sync_points });
1943            return sync_file.ioctl(locked, file, current_task, request, arg);
1944        }
1945
1946        error!(EINVAL)
1947    }
1948}
1949
1950#[track_caller]
1951fn map_sync_error(status: zx::Status) -> Errno {
1952    match status {
1953        zx::Status::NO_RESOURCES | zx::Status::NO_MEMORY | zx::Status::NO_SPACE => {
1954            errno!(ENOSPC)
1955        }
1956        zx::Status::INVALID_ARGS | zx::Status::NOT_FILE => errno!(EINVAL),
1957        zx::Status::BAD_HANDLE => errno!(EBADFD),
1958        zx::Status::NOT_SUPPORTED => errno!(ENOTSUP),
1959        zx::Status::INTERRUPTED_RETRY => errno!(EINTR),
1960        _ => errno!(EIO),
1961    }
1962}
1963
1964#[track_caller]
1965fn map_stream_error(status: zx::Status) -> Errno {
1966    match status {
1967        // zx::Stream may return invalid args or not found error because of invalid zx_iovec buffer
1968        // pointers.
1969        zx::Status::INVALID_ARGS | zx::Status::NOT_FOUND => errno!(EFAULT),
1970        status => from_status_like_fdio!(status),
1971    }
1972}
1973
1974#[track_caller]
1975fn map_sync_io_client_error(status: zx::Status) -> Errno {
1976    from_status_like_fdio!(status)
1977}
1978
1979/// Used to keep track of whether node info is in sync or dirty so that we can avoid communicating
1980/// exernally if we think the node information is in sync.
1981// The two top bits are special (see below).  The remaining bits are a count of the number of
1982// in-flight dirty operations.
1983struct InfoState(AtomicU32);
1984
1985impl InfoState {
1986    /// When this bit is set and the PENDING_REFRESH bit is *not* set, the node information is in
1987    /// sync with the external node.
1988    const IN_SYNC: u32 = 0x8000_0000;
1989
1990    /// When this bit is set in `info_state`, it means the node information is currently being
1991    /// refreshed.
1992    const PENDING_REFRESH: u32 = 0x4000_0000;
1993
1994    fn new(dirty: bool) -> Self {
1995        Self(AtomicU32::new(if dirty { 0 } else { Self::IN_SYNC }))
1996    }
1997
1998    /// This guard should be taken whilst an operation that might result in dirty node information
1999    /// is in flight.
2000    fn dirty_op_guard(&self) -> DirtyOpGuard<'_> {
2001        // Increment the count indicating a dirty operation is in flight and also clear the
2002        // `IN_SYNC` bit to indicate the node information will need refreshing from its external
2003        // source.
2004        let mut current = self.0.load(Ordering::Relaxed);
2005        loop {
2006            assert!(current | Self::IN_SYNC | Self::PENDING_REFRESH != u32::MAX); // Check overflow
2007            match self.0.compare_exchange_weak(
2008                current,
2009                (current & !Self::IN_SYNC) + 1,
2010                Ordering::Relaxed,
2011                Ordering::Relaxed,
2012            ) {
2013                Ok(_) => break,
2014                Err(old) => current = old,
2015            }
2016        }
2017        DirtyOpGuard(self)
2018    }
2019
2020    /// Calls `refresh` if node information needs to be refreshed, or `not_needed` if node
2021    /// information does not need refreshing.
2022    fn maybe_refresh<'a, T: 'a>(
2023        &self,
2024        info: &'a RwLock<FsNodeInfo>,
2025        refresh: impl FnOnce(&'a RwLock<FsNodeInfo>) -> Result<T, Errno>,
2026        not_needed: impl FnOnce(&'a RwLock<FsNodeInfo>) -> Result<T, Errno>,
2027    ) -> Result<T, Errno> {
2028        let mut current = self.0.load(Ordering::Relaxed);
2029
2030        // If node information is dirty, and there are no pending dirty operations, and there is no
2031        // other thread currently refreshing node information, we can set the bits indicating that a
2032        // refresh is pending.  We want to set the `IN_SYNC` bit here in case `will_dirty` runs
2033        // before we're done.
2034        //
2035        // NOTE: Multiple threads can be refreshing at the same time, but only one of them will
2036        // succeed in setting the `PENDING_REFRESH` bit.
2037        while current == 0 {
2038            match self.0.compare_exchange_weak(
2039                0,
2040                Self::IN_SYNC | Self::PENDING_REFRESH,
2041                Ordering::Relaxed,
2042                Ordering::Relaxed,
2043            ) {
2044                Ok(_) => break,
2045                Err(old) => current = old,
2046            }
2047        }
2048
2049        // Skip the update if the cached information is in sync and there are no pending dirty
2050        // operations.  If there's a pending atime update, we'll skip updating that now; it
2051        // shouldn't be necessary and we can do it later.
2052        if current == Self::IN_SYNC {
2053            return not_needed(info);
2054        }
2055
2056        let result = refresh(info);
2057
2058        // If we set the PENDING_REFRESH bit above, we must clear it now.
2059        if current == 0 {
2060            if result.is_ok() {
2061                self.0.fetch_and(!Self::PENDING_REFRESH, Ordering::Relaxed);
2062            } else {
2063                // If there was an error, we should also clear the IN_SYNC bit to indicate the node
2064                // information is still dirty.
2065                self.0.fetch_and(!(Self::IN_SYNC | Self::PENDING_REFRESH), Ordering::Relaxed);
2066            }
2067        }
2068
2069        result
2070    }
2071}
2072
2073struct DirtyOpGuard<'a>(&'a InfoState);
2074
2075impl Drop for DirtyOpGuard<'_> {
2076    fn drop(&mut self) {
2077        // Decrement the count we took when we created the guard.
2078        self.0.0.fetch_sub(1, Ordering::Relaxed);
2079    }
2080}
2081
2082/// A wrapper to be used around calls that will end up making node info dirty.
2083fn will_dirty<'a, N: TryInto<&'a BaseNode> + Copy, T>(nodes: &[N], f: impl FnOnce() -> T) -> T {
2084    // We are about to execute an operation that will make the cached information for one or more
2085    // nodes out of date, and we must deal with races.  If we mark the node as dirty first, another
2086    // thread could sneak in and refresh the node information before this operation has finished,
2087    // and then the information would be out of date.  If we only mark the node as dirty afterwards,
2088    // there is a window between when the operation completes and when we mark the node as dirty
2089    // where another thread could observe the changes caused by this operation, but still see old
2090    // node information.  So, the approach we take is to mark the node as dirty before the operation
2091    // starts, but indicate that this operation is ongoing.  Any threads that try and retrieve node
2092    // information will fetch fresh information, but, importantly, they'll leave the node marked as
2093    // dirty.  Once this operation has finished, we'll indicate this operation is no longer
2094    // in-flight, and then the next time information is refreshed, we'll mark the node information
2095    // as being in sync.
2096
2097    let _guards: SmallVec<[_; 4]> = nodes
2098        .iter()
2099        .filter_map(|n| N::try_into(*n).ok())
2100        .map(|n| n.info_state.dirty_op_guard())
2101        .collect();
2102
2103    f()
2104}
2105
2106#[cfg(test)]
2107mod test {
2108    use super::*;
2109    use crate::mm::PAGE_SIZE;
2110    use crate::testing::*;
2111    use crate::vfs::buffers::{VecInputBuffer, VecOutputBuffer};
2112    use crate::vfs::socket::{SocketFile, SocketMessageFlags};
2113    use crate::vfs::{EpollFileObject, LookupContext, Namespace, SymlinkMode, TimeUpdateType};
2114    use assert_matches::assert_matches;
2115    use fidl::endpoints::{ServerEnd, create_request_stream};
2116    use flyweights::FlyByteStr;
2117    use fuchsia_runtime::UtcDuration;
2118    use futures::StreamExt;
2119    use fxfs_testing::{TestFixture, TestFixtureOptions};
2120    use starnix_uapi::auth::Credentials;
2121    use starnix_uapi::errors::EINVAL;
2122    use starnix_uapi::file_mode::{AccessCheck, mode};
2123    use starnix_uapi::ino_t;
2124    use starnix_uapi::open_flags::OpenFlags;
2125    use starnix_uapi::vfs::{EpollEvent, FdEvents};
2126    use std::sync::atomic::AtomicU32;
2127    use storage_device::DeviceHolder;
2128    use storage_device::fake_device::FakeDevice;
2129    use zx::HandleBased;
2130    use {fidl_fuchsia_io as fio, fuchsia_async as fasync};
2131
2132    #[::fuchsia::test]
2133    async fn test_remote_uds() {
2134        spawn_kernel_and_run(async |locked, current_task| {
2135            let (s1, s2) = zx::Socket::create_datagram();
2136            s2.write(&vec![0]).expect("write");
2137            let file = new_remote_file(locked, &current_task, s1.into(), OpenFlags::RDWR)
2138                .expect("new_remote_file");
2139            assert!(file.node().is_sock());
2140            let socket_ops = file.downcast_file::<SocketFile>().unwrap();
2141            let flags = SocketMessageFlags::CTRUNC
2142                | SocketMessageFlags::TRUNC
2143                | SocketMessageFlags::NOSIGNAL
2144                | SocketMessageFlags::CMSG_CLOEXEC;
2145            let mut buffer = VecOutputBuffer::new(1024);
2146            let info = socket_ops
2147                .recvmsg(locked, &current_task, &file, &mut buffer, flags, None)
2148                .expect("recvmsg");
2149            assert!(info.ancillary_data.is_empty());
2150            assert_eq!(info.message_length, 1);
2151        })
2152        .await;
2153    }
2154
2155    #[::fuchsia::test]
2156    async fn test_tree() {
2157        spawn_kernel_and_run(async |locked, current_task| {
2158            let kernel = current_task.kernel();
2159            let rights = fio::PERM_READABLE | fio::PERM_EXECUTABLE;
2160            let (server, client) = zx::Channel::create();
2161            fdio::open("/pkg", rights, server).expect("failed to open /pkg");
2162            let fs = RemoteFs::new_fs(
2163                locked,
2164                &kernel,
2165                client,
2166                FileSystemOptions { source: FlyByteStr::new(b"/pkg"), ..Default::default() },
2167                rights,
2168            )
2169            .unwrap();
2170            let ns = Namespace::new(fs);
2171            let root = ns.root();
2172            let mut context = LookupContext::default();
2173            assert_eq!(
2174                root.lookup_child(locked, &current_task, &mut context, "nib".into()).err(),
2175                Some(errno!(ENOENT))
2176            );
2177            let mut context = LookupContext::default();
2178            root.lookup_child(locked, &current_task, &mut context, "lib".into()).unwrap();
2179
2180            let mut context = LookupContext::default();
2181            let _test_file = root
2182                .lookup_child(
2183                    locked,
2184                    &current_task,
2185                    &mut context,
2186                    "data/tests/hello_starnix".into(),
2187                )
2188                .unwrap()
2189                .open(locked, &current_task, OpenFlags::RDONLY, AccessCheck::default())
2190                .unwrap();
2191        })
2192        .await;
2193    }
2194
2195    #[::fuchsia::test]
2196    async fn test_blocking_io() {
2197        spawn_kernel_and_run(async |locked, current_task| {
2198            let (client, server) = zx::Socket::create_stream();
2199            let pipe = create_fuchsia_pipe(locked, &current_task, client, OpenFlags::RDWR).unwrap();
2200
2201            let bytes = [0u8; 64];
2202            assert_eq!(bytes.len(), server.write(&bytes).unwrap());
2203
2204            // Spawn a kthread to get the right lock context.
2205            let bytes_read =
2206                pipe.read(locked, &current_task, &mut VecOutputBuffer::new(64)).unwrap();
2207
2208            assert_eq!(bytes_read, bytes.len());
2209        })
2210        .await;
2211    }
2212
2213    #[::fuchsia::test]
2214    async fn test_poll() {
2215        spawn_kernel_and_run(async |locked, current_task| {
2216            let (client, server) = zx::Socket::create_stream();
2217            let pipe = create_fuchsia_pipe(locked, &current_task, client, OpenFlags::RDWR)
2218                .expect("create_fuchsia_pipe");
2219            let server_zxio = Zxio::create(server.into_handle()).expect("Zxio::create");
2220
2221            assert_eq!(
2222                pipe.query_events(locked, &current_task),
2223                Ok(FdEvents::POLLOUT | FdEvents::POLLWRNORM)
2224            );
2225
2226            let epoll_object = EpollFileObject::new_file(locked, &current_task);
2227            let epoll_file = epoll_object.downcast_file::<EpollFileObject>().unwrap();
2228            let event = EpollEvent::new(FdEvents::POLLIN, 0);
2229            epoll_file
2230                .add(locked, &current_task, &pipe, &epoll_object, event)
2231                .expect("poll_file.add");
2232
2233            let fds = epoll_file
2234                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
2235                .expect("wait");
2236            assert!(fds.is_empty());
2237
2238            assert_eq!(server_zxio.write(&[0]).expect("write"), 1);
2239
2240            assert_eq!(
2241                pipe.query_events(locked, &current_task),
2242                Ok(FdEvents::POLLOUT
2243                    | FdEvents::POLLWRNORM
2244                    | FdEvents::POLLIN
2245                    | FdEvents::POLLRDNORM)
2246            );
2247            let fds = epoll_file
2248                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
2249                .expect("wait");
2250            assert_eq!(fds.len(), 1);
2251
2252            assert_eq!(
2253                pipe.read(locked, &current_task, &mut VecOutputBuffer::new(64)).expect("read"),
2254                1
2255            );
2256
2257            assert_eq!(
2258                pipe.query_events(locked, &current_task),
2259                Ok(FdEvents::POLLOUT | FdEvents::POLLWRNORM)
2260            );
2261            let fds = epoll_file
2262                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
2263                .expect("wait");
2264            assert!(fds.is_empty());
2265        })
2266        .await;
2267    }
2268
2269    #[::fuchsia::test]
2270    async fn test_new_remote_directory() {
2271        spawn_kernel_and_run(async |locked, current_task| {
2272            let (server, client) = zx::Channel::create();
2273            fdio::open("/pkg", fio::PERM_READABLE | fio::PERM_EXECUTABLE, server)
2274                .expect("failed to open /pkg");
2275
2276            let fd = new_remote_file(locked, &current_task, client.into(), OpenFlags::RDWR)
2277                .expect("new_remote_file");
2278            assert!(fd.node().is_dir());
2279            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
2280        })
2281        .await;
2282    }
2283
2284    #[::fuchsia::test]
2285    async fn test_new_remote_file() {
2286        spawn_kernel_and_run(async |locked, current_task| {
2287            let (server, client) = zx::Channel::create();
2288            fdio::open("/pkg/meta/contents", fio::PERM_READABLE, server)
2289                .expect("failed to open /pkg/meta/contents");
2290
2291            let fd = new_remote_file(locked, &current_task, client.into(), OpenFlags::RDONLY)
2292                .expect("new_remote_file");
2293            assert!(!fd.node().is_dir());
2294            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
2295        })
2296        .await;
2297    }
2298
2299    #[::fuchsia::test]
2300    async fn test_new_remote_counter() {
2301        spawn_kernel_and_run(async |locked, current_task| {
2302            let counter = zx::Counter::create();
2303
2304            let fd = new_remote_file(locked, &current_task, counter.into(), OpenFlags::RDONLY)
2305                .expect("new_remote_file");
2306            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
2307        })
2308        .await;
2309    }
2310
2311    #[::fuchsia::test]
2312    async fn test_new_remote_vmo() {
2313        spawn_kernel_and_run(async |locked, current_task| {
2314            let vmo = zx::Vmo::create(*PAGE_SIZE).expect("Vmo::create");
2315            let fd = new_remote_file(locked, &current_task, vmo.into(), OpenFlags::RDWR)
2316                .expect("new_remote_file");
2317            assert!(!fd.node().is_dir());
2318            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
2319        })
2320        .await;
2321    }
2322
2323    #[::fuchsia::test(threads = 2)]
2324    async fn test_symlink() {
2325        let fixture = TestFixture::new().await;
2326        let (server, client) = zx::Channel::create();
2327        fixture.root().clone(server.into()).expect("clone failed");
2328
2329        const LINK_PATH: &'static str = "symlink";
2330        const LINK_TARGET: &'static str = "私は「UTF8」です";
2331        // We expect the reported size of the symlink to be the length of the target, in bytes,
2332        // *without* a null terminator. Most Linux systems assume UTF-8 encoding.
2333        const LINK_SIZE: usize = 22;
2334        assert_eq!(LINK_SIZE, LINK_TARGET.len());
2335
2336        spawn_kernel_and_run(async move |locked, current_task| {
2337            let kernel = current_task.kernel();
2338            let fs = RemoteFs::new_fs(
2339                locked,
2340                &kernel,
2341                client,
2342                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2343                fio::PERM_READABLE | fio::PERM_WRITABLE,
2344            )
2345            .expect("new_fs failed");
2346            let ns = Namespace::new(fs);
2347            let root = ns.root();
2348            let symlink_node = root
2349                .create_symlink(locked, &current_task, LINK_PATH.into(), LINK_TARGET.into())
2350                .expect("symlink failed");
2351            assert_matches!(&*symlink_node.entry.node.info(), FsNodeInfo { size: LINK_SIZE, .. });
2352
2353            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2354            let child = root
2355                .lookup_child(locked, &current_task, &mut context, "symlink".into())
2356                .expect("lookup_child failed");
2357
2358            match child.readlink(locked, &current_task).expect("readlink failed") {
2359                SymlinkTarget::Path(path) => assert_eq!(path, LINK_TARGET),
2360                SymlinkTarget::Node(_) => panic!("readlink returned SymlinkTarget::Node"),
2361            }
2362            // Ensure the size stat reports matches what is expected.
2363            let stat_result = child.entry.node.stat(locked, &current_task).expect("stat failed");
2364            assert_eq!(stat_result.st_size as usize, LINK_SIZE);
2365        })
2366        .await;
2367
2368        // Simulate a second run to ensure the symlink was persisted correctly.
2369        let fixture = TestFixture::open(
2370            fixture.close().await,
2371            TestFixtureOptions { format: false, ..Default::default() },
2372        )
2373        .await;
2374        let (server, client) = zx::Channel::create();
2375        fixture.root().clone(server.into()).expect("clone failed after remount");
2376
2377        spawn_kernel_and_run(async move |locked, current_task| {
2378            let kernel = current_task.kernel();
2379            let fs = RemoteFs::new_fs(
2380                locked,
2381                &kernel,
2382                client,
2383                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2384                fio::PERM_READABLE | fio::PERM_WRITABLE,
2385            )
2386            .expect("new_fs failed after remount");
2387            let ns = Namespace::new(fs);
2388            let root = ns.root();
2389            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2390            let child = root
2391                .lookup_child(locked, &current_task, &mut context, "symlink".into())
2392                .expect("lookup_child failed after remount");
2393
2394            match child.readlink(locked, &current_task).expect("readlink failed after remount") {
2395                SymlinkTarget::Path(path) => assert_eq!(path, LINK_TARGET),
2396                SymlinkTarget::Node(_) => {
2397                    panic!("readlink returned SymlinkTarget::Node after remount")
2398                }
2399            }
2400            // Ensure the size stat reports matches what is expected.
2401            let stat_result =
2402                child.entry.node.stat(locked, &current_task).expect("stat failed after remount");
2403            assert_eq!(stat_result.st_size as usize, LINK_SIZE);
2404        })
2405        .await;
2406
2407        fixture.close().await;
2408    }
2409
2410    #[::fuchsia::test]
2411    async fn test_mode_uid_gid_and_dev_persists() {
2412        const FILE_MODE: FileMode = mode!(IFREG, 0o467);
2413        const DIR_MODE: FileMode = mode!(IFDIR, 0o647);
2414        const BLK_MODE: FileMode = mode!(IFBLK, 0o746);
2415
2416        let fixture = TestFixture::new().await;
2417        let (server, client) = zx::Channel::create();
2418        fixture.root().clone(server.into()).expect("clone failed");
2419
2420        // Simulate a first run of starnix.
2421        spawn_kernel_and_run(async move |locked, current_task| {
2422            let kernel = current_task.kernel();
2423            let creds = Credentials::clone(&current_task.current_creds());
2424            current_task.set_creds(Credentials { euid: 1, fsuid: 1, egid: 2, fsgid: 2, ..creds });
2425            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2426            let fs = RemoteFs::new_fs(
2427                locked,
2428                &kernel,
2429                client,
2430                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2431                rights,
2432            )
2433            .expect("new_fs failed");
2434            let ns = Namespace::new(fs);
2435            current_task.fs().set_umask(FileMode::from_bits(0));
2436            ns.root()
2437                .create_node(locked, &current_task, "file".into(), FILE_MODE, DeviceType::NONE)
2438                .expect("create_node failed");
2439            ns.root()
2440                .create_node(locked, &current_task, "dir".into(), DIR_MODE, DeviceType::NONE)
2441                .expect("create_node failed");
2442            ns.root()
2443                .create_node(locked, &current_task, "dev".into(), BLK_MODE, DeviceType::RANDOM)
2444                .expect("create_node failed");
2445        })
2446        .await;
2447
2448        // Simulate a second run.
2449        let fixture = TestFixture::open(
2450            fixture.close().await,
2451            TestFixtureOptions { format: false, ..Default::default() },
2452        )
2453        .await;
2454
2455        let (server, client) = zx::Channel::create();
2456        fixture.root().clone(server.into()).expect("clone failed");
2457
2458        spawn_kernel_and_run(async move |locked, current_task| {
2459            let kernel = current_task.kernel();
2460            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2461            let fs = RemoteFs::new_fs(
2462                locked,
2463                &kernel,
2464                client,
2465                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2466                rights,
2467            )
2468            .expect("new_fs failed");
2469            let ns = Namespace::new(fs);
2470            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2471            let child = ns
2472                .root()
2473                .lookup_child(locked, &current_task, &mut context, "file".into())
2474                .expect("lookup_child failed");
2475            assert_matches!(
2476                &*child.entry.node.info(),
2477                FsNodeInfo { mode: FILE_MODE, uid: 1, gid: 2, rdev: DeviceType::NONE, .. }
2478            );
2479            let child = ns
2480                .root()
2481                .lookup_child(locked, &current_task, &mut context, "dir".into())
2482                .expect("lookup_child failed");
2483            assert_matches!(
2484                &*child.entry.node.info(),
2485                FsNodeInfo { mode: DIR_MODE, uid: 1, gid: 2, rdev: DeviceType::NONE, .. }
2486            );
2487            let child = ns
2488                .root()
2489                .lookup_child(locked, &current_task, &mut context, "dev".into())
2490                .expect("lookup_child failed");
2491            assert_matches!(
2492                &*child.entry.node.info(),
2493                FsNodeInfo { mode: BLK_MODE, uid: 1, gid: 2, rdev: DeviceType::RANDOM, .. }
2494            );
2495        })
2496        .await;
2497        fixture.close().await;
2498    }
2499
2500    #[::fuchsia::test]
2501    async fn test_dot_dot_inode_numbers() {
2502        let fixture = TestFixture::new().await;
2503        let (server, client) = zx::Channel::create();
2504        fixture.root().clone(server.into()).expect("clone failed");
2505
2506        const MODE: FileMode = FileMode::from_bits(FileMode::IFDIR.bits() | 0o777);
2507
2508        spawn_kernel_and_run(async |locked, current_task| {
2509            let kernel = current_task.kernel();
2510            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2511            let fs = RemoteFs::new_fs(
2512                locked,
2513                &kernel,
2514                client,
2515                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2516                rights,
2517            )
2518            .expect("new_fs failed");
2519            let ns = Namespace::new(fs);
2520            current_task.fs().set_umask(FileMode::from_bits(0));
2521            let sub_dir1 = ns
2522                .root()
2523                .create_node(locked, &current_task, "dir".into(), MODE, DeviceType::NONE)
2524                .expect("create_node failed");
2525            let sub_dir2 = sub_dir1
2526                .create_node(locked, &current_task, "dir".into(), MODE, DeviceType::NONE)
2527                .expect("create_node failed");
2528
2529            let dir_handle = ns
2530                .root()
2531                .entry
2532                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2533                .expect("open failed");
2534
2535            #[derive(Default)]
2536            struct Sink {
2537                offset: off_t,
2538                dot_dot_inode_num: u64,
2539            }
2540            impl DirentSink for Sink {
2541                fn add(
2542                    &mut self,
2543                    inode_num: ino_t,
2544                    offset: off_t,
2545                    entry_type: DirectoryEntryType,
2546                    name: &FsStr,
2547                ) -> Result<(), Errno> {
2548                    if name == ".." {
2549                        self.dot_dot_inode_num = inode_num;
2550                        assert_eq!(entry_type, DirectoryEntryType::DIR);
2551                    }
2552                    self.offset = offset;
2553                    Ok(())
2554                }
2555                fn offset(&self) -> off_t {
2556                    self.offset
2557                }
2558            }
2559            let mut sink = Sink::default();
2560            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2561
2562            // inode_num for .. for the root should be the same as root.
2563            assert_eq!(sink.dot_dot_inode_num, ns.root().entry.node.ino);
2564
2565            let dir_handle = sub_dir1
2566                .entry
2567                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2568                .expect("open failed");
2569            let mut sink = Sink::default();
2570            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2571
2572            // inode_num for .. for the first sub directory should be the same as root.
2573            assert_eq!(sink.dot_dot_inode_num, ns.root().entry.node.ino);
2574
2575            let dir_handle = sub_dir2
2576                .entry
2577                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2578                .expect("open failed");
2579            let mut sink = Sink::default();
2580            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2581
2582            // inode_num for .. for the second subdir should be the first subdir.
2583            assert_eq!(sink.dot_dot_inode_num, sub_dir1.entry.node.ino);
2584        })
2585        .await;
2586        fixture.close().await;
2587    }
2588
2589    #[::fuchsia::test]
2590    async fn test_remote_special_node() {
2591        let fixture = TestFixture::new().await;
2592        let (server, client) = zx::Channel::create();
2593        fixture.root().clone(server.into()).expect("clone failed");
2594
2595        const FIFO_MODE: FileMode = FileMode::from_bits(FileMode::IFIFO.bits() | 0o777);
2596        const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2597
2598        spawn_kernel_and_run(async |locked, current_task| {
2599            let kernel = current_task.kernel();
2600            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2601            let fs = RemoteFs::new_fs(
2602                locked,
2603                &kernel,
2604                client,
2605                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2606                rights,
2607            )
2608            .expect("new_fs failed");
2609            let ns = Namespace::new(fs);
2610            current_task.fs().set_umask(FileMode::from_bits(0));
2611            let root = ns.root();
2612
2613            // Create RemoteSpecialNode (e.g. FIFO)
2614            root.create_node(locked, &current_task, "fifo".into(), FIFO_MODE, DeviceType::NONE)
2615                .expect("create_node failed");
2616            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2617            let fifo_node = root
2618                .lookup_child(locked, &current_task, &mut context, "fifo".into())
2619                .expect("lookup_child failed");
2620
2621            // Test that we get expected behaviour for RemoteSpecialNode operation, e.g.
2622            // test that truncate should return EINVAL
2623            match fifo_node.truncate(locked, &current_task, 0) {
2624                Ok(_) => {
2625                    panic!("truncate passed for special node")
2626                }
2627                Err(errno) if errno == EINVAL => {}
2628                Err(e) => {
2629                    panic!("truncate failed with error {:?}", e)
2630                }
2631            };
2632
2633            // Create regular RemoteNode
2634            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2635                .expect("create_node failed");
2636            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2637            let reg_node = root
2638                .lookup_child(locked, &current_task, &mut context, "file".into())
2639                .expect("lookup_child failed");
2640
2641            // We should be able to perform truncate on regular files
2642            reg_node.truncate(locked, &current_task, 0).expect("truncate failed");
2643        })
2644        .await;
2645        fixture.close().await;
2646    }
2647
2648    #[::fuchsia::test]
2649    async fn test_hard_link() {
2650        let fixture = TestFixture::new().await;
2651        let (server, client) = zx::Channel::create();
2652        fixture.root().clone(server.into()).expect("clone failed");
2653
2654        spawn_kernel_and_run(async move |locked, current_task| {
2655            let kernel = current_task.kernel();
2656            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2657            let fs = RemoteFs::new_fs(
2658                locked,
2659                &kernel,
2660                client,
2661                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2662                rights,
2663            )
2664            .expect("new_fs failed");
2665            let ns = Namespace::new(fs);
2666            current_task.fs().set_umask(FileMode::from_bits(0));
2667            let node = ns
2668                .root()
2669                .create_node(
2670                    locked,
2671                    &current_task,
2672                    "file1".into(),
2673                    mode!(IFREG, 0o666),
2674                    DeviceType::NONE,
2675                )
2676                .expect("create_node failed");
2677            ns.root()
2678                .entry
2679                .node
2680                .link(locked, &current_task, &ns.root().mount, "file2".into(), &node.entry.node)
2681                .expect("link failed");
2682        })
2683        .await;
2684
2685        let fixture = TestFixture::open(
2686            fixture.close().await,
2687            TestFixtureOptions { format: false, ..Default::default() },
2688        )
2689        .await;
2690
2691        let (server, client) = zx::Channel::create();
2692        fixture.root().clone(server.into()).expect("clone failed");
2693
2694        spawn_kernel_and_run(async move |locked, current_task| {
2695            let kernel = current_task.kernel();
2696            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2697            let fs = RemoteFs::new_fs(
2698                locked,
2699                &kernel,
2700                client,
2701                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2702                rights,
2703            )
2704            .expect("new_fs failed");
2705            let ns = Namespace::new(fs);
2706            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2707            let child1 = ns
2708                .root()
2709                .lookup_child(locked, &current_task, &mut context, "file1".into())
2710                .expect("lookup_child failed");
2711            let child2 = ns
2712                .root()
2713                .lookup_child(locked, &current_task, &mut context, "file2".into())
2714                .expect("lookup_child failed");
2715            assert!(Arc::ptr_eq(&child1.entry.node, &child2.entry.node));
2716        })
2717        .await;
2718        fixture.close().await;
2719    }
2720
2721    #[::fuchsia::test]
2722    async fn test_lookup_on_fsverity_enabled_file() {
2723        let fixture = TestFixture::new().await;
2724        let (server, client) = zx::Channel::create();
2725        fixture.root().clone(server.into()).expect("clone failed");
2726
2727        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2728
2729        spawn_kernel_and_run(async move |locked, current_task| {
2730            let kernel = current_task.kernel();
2731            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2732            let fs = RemoteFs::new_fs(
2733                locked,
2734                &kernel,
2735                client,
2736                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2737                rights,
2738            )
2739            .expect("new_fs failed");
2740            let ns = Namespace::new(fs);
2741            current_task.fs().set_umask(FileMode::from_bits(0));
2742            let file = ns
2743                .root()
2744                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2745                .expect("create_node failed");
2746            // Enable verity on the file.
2747            let desc = fsverity_descriptor {
2748                version: 1,
2749                hash_algorithm: 1,
2750                salt_size: 32,
2751                log_blocksize: 12,
2752                ..Default::default()
2753            };
2754            file.entry
2755                .node
2756                .enable_fsverity(locked, current_task, &desc)
2757                .expect("enable fsverity failed");
2758        })
2759        .await;
2760
2761        // Tear down the kernel and open the file again. The file should no longer be cached.
2762        // Test that lookup works as expected for an fsverity-enabled file.
2763        let fixture = TestFixture::open(
2764            fixture.close().await,
2765            TestFixtureOptions { format: false, ..Default::default() },
2766        )
2767        .await;
2768        let (server, client) = zx::Channel::create();
2769        fixture.root().clone(server.into()).expect("clone failed");
2770
2771        spawn_kernel_and_run(async move |locked, current_task| {
2772            let kernel = current_task.kernel();
2773            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2774            let fs = RemoteFs::new_fs(
2775                locked,
2776                &kernel,
2777                client,
2778                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2779                rights,
2780            )
2781            .expect("new_fs failed");
2782            let ns = Namespace::new(fs);
2783            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2784            let _child = ns
2785                .root()
2786                .lookup_child(locked, &current_task, &mut context, "file".into())
2787                .expect("lookup_child failed");
2788        })
2789        .await;
2790        fixture.close().await;
2791    }
2792
2793    #[::fuchsia::test]
2794    async fn test_update_attributes_persists() {
2795        let fixture = TestFixture::new().await;
2796        let (server, client) = zx::Channel::create();
2797        fixture.root().clone(server.into()).expect("clone failed");
2798
2799        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2800
2801        spawn_kernel_and_run(async move |locked, current_task| {
2802            let kernel = current_task.kernel();
2803            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2804            let fs = RemoteFs::new_fs(
2805                locked,
2806                &kernel,
2807                client,
2808                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2809                rights,
2810            )
2811            .expect("new_fs failed");
2812            let ns = Namespace::new(fs);
2813            current_task.fs().set_umask(FileMode::from_bits(0));
2814            let file = ns
2815                .root()
2816                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2817                .expect("create_node failed");
2818            // Change the mode, this change should persist
2819            file.entry
2820                .node
2821                .chmod(locked, &current_task, &file.mount, MODE | FileMode::ALLOW_ALL)
2822                .expect("chmod failed");
2823        })
2824        .await;
2825
2826        // Tear down the kernel and open the file again. Check that changes persisted.
2827        let fixture = TestFixture::open(
2828            fixture.close().await,
2829            TestFixtureOptions { format: false, ..Default::default() },
2830        )
2831        .await;
2832        let (server, client) = zx::Channel::create();
2833        fixture.root().clone(server.into()).expect("clone failed");
2834
2835        spawn_kernel_and_run(async move |locked, current_task| {
2836            let kernel = current_task.kernel();
2837            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2838            let fs = RemoteFs::new_fs(
2839                locked,
2840                &kernel,
2841                client,
2842                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2843                rights,
2844            )
2845            .expect("new_fs failed");
2846            let ns = Namespace::new(fs);
2847            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2848            let child = ns
2849                .root()
2850                .lookup_child(locked, &current_task, &mut context, "file".into())
2851                .expect("lookup_child failed");
2852            assert_eq!(child.entry.node.info().mode, MODE | FileMode::ALLOW_ALL);
2853        })
2854        .await;
2855        fixture.close().await;
2856    }
2857
2858    #[::fuchsia::test]
2859    async fn test_statfs() {
2860        let fixture = TestFixture::new().await;
2861        let (server, client) = zx::Channel::create();
2862        fixture.root().clone(server.into()).expect("clone failed");
2863
2864        spawn_kernel_and_run(async move |locked, current_task| {
2865            let kernel = current_task.kernel();
2866            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2867            let fs = RemoteFs::new_fs(
2868                locked,
2869                &kernel,
2870                client,
2871                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2872                rights,
2873            )
2874            .expect("new_fs failed");
2875
2876            let statfs = fs.statfs(locked, &current_task).expect("statfs failed");
2877            assert!(statfs.f_type != 0);
2878            assert!(statfs.f_bsize > 0);
2879            assert!(statfs.f_blocks > 0);
2880            assert!(statfs.f_bfree > 0 && statfs.f_bfree <= statfs.f_blocks);
2881            assert!(statfs.f_files > 0);
2882            assert!(statfs.f_ffree > 0 && statfs.f_ffree <= statfs.f_files);
2883            assert!(statfs.f_fsid.val[0] != 0 || statfs.f_fsid.val[1] != 0);
2884            assert!(statfs.f_namelen > 0);
2885            assert!(statfs.f_frsize > 0);
2886        })
2887        .await;
2888
2889        fixture.close().await;
2890    }
2891
2892    #[::fuchsia::test]
2893    async fn test_allocate() {
2894        let fixture = TestFixture::new().await;
2895        let (server, client) = zx::Channel::create();
2896        fixture.root().clone(server.into()).expect("clone failed");
2897
2898        spawn_kernel_and_run(async move |locked, current_task| {
2899            let kernel = current_task.kernel();
2900            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2901            let fs = RemoteFs::new_fs(
2902                locked,
2903                &kernel,
2904                client,
2905                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2906                rights,
2907            )
2908            .expect("new_fs failed");
2909            let ns = Namespace::new(fs);
2910            current_task.fs().set_umask(FileMode::from_bits(0));
2911            let root = ns.root();
2912
2913            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2914            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2915                .expect("create_node failed");
2916            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2917            let reg_node = root
2918                .lookup_child(locked, &current_task, &mut context, "file".into())
2919                .expect("lookup_child failed");
2920
2921            reg_node
2922                .entry
2923                .node
2924                .fallocate(locked, &current_task, FallocMode::Allocate { keep_size: false }, 0, 20)
2925                .expect("truncate failed");
2926        })
2927        .await;
2928        fixture.close().await;
2929    }
2930
2931    #[::fuchsia::test]
2932    async fn test_allocate_overflow() {
2933        let fixture = TestFixture::new().await;
2934        let (server, client) = zx::Channel::create();
2935        fixture.root().clone(server.into()).expect("clone failed");
2936
2937        spawn_kernel_and_run(async move |locked, current_task| {
2938            let kernel = current_task.kernel();
2939            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2940            let fs = RemoteFs::new_fs(
2941                locked,
2942                &kernel,
2943                client,
2944                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2945                rights,
2946            )
2947            .expect("new_fs failed");
2948            let ns = Namespace::new(fs);
2949            current_task.fs().set_umask(FileMode::from_bits(0));
2950            let root = ns.root();
2951
2952            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2953            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2954                .expect("create_node failed");
2955            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2956            let reg_node = root
2957                .lookup_child(locked, &current_task, &mut context, "file".into())
2958                .expect("lookup_child failed");
2959
2960            reg_node
2961                .entry
2962                .node
2963                .fallocate(
2964                    locked,
2965                    &current_task,
2966                    FallocMode::Allocate { keep_size: false },
2967                    1,
2968                    u64::MAX,
2969                )
2970                .expect_err("truncate unexpectedly passed");
2971        })
2972        .await;
2973        fixture.close().await;
2974    }
2975
2976    #[::fuchsia::test]
2977    async fn test_time_modify_persists() {
2978        let fixture = TestFixture::new().await;
2979        let (server, client) = zx::Channel::create();
2980        fixture.root().clone(server.into()).expect("clone failed");
2981
2982        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2983
2984        let last_modified = spawn_kernel_and_run(async move |locked, current_task| {
2985            let kernel = current_task.kernel();
2986            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2987            let fs = RemoteFs::new_fs(
2988                locked,
2989                &kernel,
2990                client,
2991                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2992                rights,
2993            )
2994            .expect("new_fs failed");
2995            let ns: Arc<Namespace> = Namespace::new(fs);
2996            current_task.fs().set_umask(FileMode::from_bits(0));
2997            let child = ns
2998                .root()
2999                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
3000                .expect("create_node failed");
3001            // Write to file (this should update mtime (time_modify))
3002            let file = child
3003                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
3004                .expect("open failed");
3005            // Call `fetch_and_refresh_info(..)` to refresh `time_modify` with the time managed by the
3006            // underlying filesystem
3007            let time_before_write = child
3008                .entry
3009                .node
3010                .fetch_and_refresh_info(locked, &current_task)
3011                .expect("fetch_and_refresh_info failed")
3012                .time_modify;
3013            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
3014            let written = file
3015                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
3016                .expect("write failed");
3017            assert_eq!(written, write_bytes.len());
3018            let last_modified = child
3019                .entry
3020                .node
3021                .fetch_and_refresh_info(locked, &current_task)
3022                .expect("fetch_and_refresh_info failed")
3023                .time_modify;
3024            assert!(last_modified > time_before_write);
3025            last_modified
3026        })
3027        .await;
3028
3029        // Tear down the kernel and open the file again. Check that modification time is when we
3030        // last modified the contents of the file
3031        let fixture = TestFixture::open(
3032            fixture.close().await,
3033            TestFixtureOptions { format: false, ..Default::default() },
3034        )
3035        .await;
3036        let (server, client) = zx::Channel::create();
3037        fixture.root().clone(server.into()).expect("clone failed");
3038        let refreshed_modified_time = spawn_kernel_and_run(async move |locked, current_task| {
3039            let kernel = current_task.kernel();
3040            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
3041            let fs = RemoteFs::new_fs(
3042                locked,
3043                &kernel,
3044                client,
3045                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
3046                rights,
3047            )
3048            .expect("new_fs failed");
3049            let ns = Namespace::new(fs);
3050            let mut context = LookupContext::new(SymlinkMode::NoFollow);
3051            let child = ns
3052                .root()
3053                .lookup_child(locked, &current_task, &mut context, "file".into())
3054                .expect("lookup_child failed");
3055            let last_modified = child
3056                .entry
3057                .node
3058                .fetch_and_refresh_info(locked, &current_task)
3059                .expect("fetch_and_refresh_info failed")
3060                .time_modify;
3061            last_modified
3062        })
3063        .await;
3064        assert_eq!(last_modified, refreshed_modified_time);
3065
3066        fixture.close().await;
3067    }
3068
3069    #[::fuchsia::test]
3070    async fn test_update_atime_mtime() {
3071        let fixture = TestFixture::new().await;
3072        let (server, client) = zx::Channel::create();
3073        fixture.root().clone(server.into()).expect("clone failed");
3074
3075        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
3076
3077        spawn_kernel_and_run(async move |locked, current_task| {
3078            let kernel = current_task.kernel();
3079            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
3080            let fs = RemoteFs::new_fs(
3081                locked,
3082                &kernel,
3083                client,
3084                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
3085                rights,
3086            )
3087            .expect("new_fs failed");
3088            let ns: Arc<Namespace> = Namespace::new(fs);
3089            current_task.fs().set_umask(FileMode::from_bits(0));
3090            let child = ns
3091                .root()
3092                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
3093                .expect("create_node failed");
3094
3095            let info_original = child
3096                .entry
3097                .node
3098                .fetch_and_refresh_info(locked, &current_task)
3099                .expect("fetch_and_refresh_info failed")
3100                .clone();
3101
3102            child
3103                .entry
3104                .node
3105                .update_atime_mtime(
3106                    locked,
3107                    &current_task,
3108                    &child.mount,
3109                    TimeUpdateType::Time(UtcInstant::from_nanos(30)),
3110                    TimeUpdateType::Omit,
3111                )
3112                .expect("update_atime_mtime failed");
3113            let info_after_update = child
3114                .entry
3115                .node
3116                .fetch_and_refresh_info(locked, &current_task)
3117                .expect("fetch_and_refresh_info failed")
3118                .clone();
3119            assert_eq!(info_after_update.time_modify, info_original.time_modify);
3120            assert_eq!(info_after_update.time_access, UtcInstant::from_nanos(30));
3121
3122            child
3123                .entry
3124                .node
3125                .update_atime_mtime(
3126                    locked,
3127                    &current_task,
3128                    &child.mount,
3129                    TimeUpdateType::Omit,
3130                    TimeUpdateType::Time(UtcInstant::from_nanos(50)),
3131                )
3132                .expect("update_atime_mtime failed");
3133            let info_after_update2 = child
3134                .entry
3135                .node
3136                .fetch_and_refresh_info(locked, &current_task)
3137                .expect("fetch_and_refresh_info failed")
3138                .clone();
3139            assert_eq!(info_after_update2.time_modify, UtcInstant::from_nanos(50));
3140            assert_eq!(info_after_update2.time_access, UtcInstant::from_nanos(30));
3141        })
3142        .await;
3143        fixture.close().await;
3144    }
3145
3146    #[::fuchsia::test]
3147    async fn test_write_updates_mtime_ctime() {
3148        let fixture = TestFixture::new().await;
3149        let (server, client) = zx::Channel::create();
3150        fixture.root().clone(server.into()).expect("clone failed");
3151
3152        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
3153
3154        spawn_kernel_and_run(async move |locked, current_task| {
3155            let kernel = current_task.kernel();
3156            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
3157            let fs = RemoteFs::new_fs(
3158                locked,
3159                &kernel,
3160                client,
3161                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
3162                rights,
3163            )
3164            .expect("new_fs failed");
3165            let ns: Arc<Namespace> = Namespace::new(fs);
3166            current_task.fs().set_umask(FileMode::from_bits(0));
3167            let child = ns
3168                .root()
3169                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
3170                .expect("create_node failed");
3171            let file = child
3172                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
3173                .expect("open failed");
3174            // Call `fetch_and_refresh_info(..)` to refresh ctime and mtime with the time managed by the
3175            // underlying filesystem
3176            let (ctime_before_write, mtime_before_write) = {
3177                let info = child
3178                    .entry
3179                    .node
3180                    .fetch_and_refresh_info(locked, &current_task)
3181                    .expect("fetch_and_refresh_info failed");
3182                (info.time_status_change, info.time_modify)
3183            };
3184
3185            // Writing to a file should update ctime and mtime
3186            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
3187            let written = file
3188                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
3189                .expect("write failed");
3190            assert_eq!(written, write_bytes.len());
3191
3192            // As Fxfs, the underlying filesystem in this test, can manage file timestamps,
3193            // we should not see an update in mtime and ctime without first refreshing the node with
3194            // the metadata from Fxfs.
3195            let (ctime_after_write_no_refresh, mtime_after_write_no_refresh) = {
3196                let info = child.entry.node.info();
3197                (info.time_status_change, info.time_modify)
3198            };
3199            assert_eq!(ctime_after_write_no_refresh, ctime_before_write);
3200            assert_eq!(mtime_after_write_no_refresh, mtime_before_write);
3201
3202            // Refresh information, we should see `info` with mtime and ctime from the remote
3203            // filesystem (assume this is true if the new timestamp values are greater than the ones
3204            // without the refresh).
3205            let (ctime_after_write_refresh, mtime_after_write_refresh) = {
3206                let info = child
3207                    .entry
3208                    .node
3209                    .fetch_and_refresh_info(locked, &current_task)
3210                    .expect("fetch_and_refresh_info failed");
3211                (info.time_status_change, info.time_modify)
3212            };
3213            assert_eq!(ctime_after_write_refresh, mtime_after_write_refresh);
3214            assert!(ctime_after_write_refresh > ctime_after_write_no_refresh);
3215        })
3216        .await;
3217        fixture.close().await;
3218    }
3219
3220    #[::fuchsia::test]
3221    async fn test_casefold_persists() {
3222        let fixture = TestFixture::new().await;
3223        let (server, client) = zx::Channel::create();
3224        fixture.root().clone(server.into()).expect("clone failed");
3225
3226        spawn_kernel_and_run(async move |locked, current_task| {
3227            let kernel = current_task.kernel();
3228            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
3229            let fs = RemoteFs::new_fs(
3230                locked,
3231                &kernel,
3232                client,
3233                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
3234                rights,
3235            )
3236            .expect("new_fs failed");
3237            let ns: Arc<Namespace> = Namespace::new(fs);
3238            let child = ns
3239                .root()
3240                .create_node(
3241                    locked,
3242                    &current_task,
3243                    "dir".into(),
3244                    FileMode::ALLOW_ALL.with_type(FileMode::IFDIR),
3245                    DeviceType::NONE,
3246                )
3247                .expect("create_node failed");
3248            child
3249                .entry
3250                .node
3251                .update_attributes(locked, &current_task, |info| {
3252                    info.casefold = true;
3253                    Ok(())
3254                })
3255                .expect("enable casefold")
3256        })
3257        .await;
3258
3259        // Tear down the kernel and open the dir again. Check that casefold is preserved.
3260        let fixture = TestFixture::open(
3261            fixture.close().await,
3262            TestFixtureOptions { format: false, ..Default::default() },
3263        )
3264        .await;
3265        let (server, client) = zx::Channel::create();
3266        fixture.root().clone(server.into()).expect("clone failed");
3267        let casefold = spawn_kernel_and_run(async move |locked, current_task| {
3268            let kernel = current_task.kernel();
3269            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
3270            let fs = RemoteFs::new_fs(
3271                locked,
3272                &kernel,
3273                client,
3274                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
3275                rights,
3276            )
3277            .expect("new_fs failed");
3278            let ns = Namespace::new(fs);
3279            let mut context = LookupContext::new(SymlinkMode::NoFollow);
3280            let child = ns
3281                .root()
3282                .lookup_child(locked, &current_task, &mut context, "dir".into())
3283                .expect("lookup_child failed");
3284            let casefold = child
3285                .entry
3286                .node
3287                .fetch_and_refresh_info(locked, &current_task)
3288                .expect("fetch_and_refresh_info failed")
3289                .casefold;
3290            casefold
3291        })
3292        .await;
3293        assert!(casefold);
3294
3295        fixture.close().await;
3296    }
3297
3298    #[::fuchsia::test]
3299    async fn test_pending_access_time() {
3300        const TEST_FILE: &str = "test_file";
3301
3302        let fixture = TestFixture::new().await;
3303        let (server, client) = zx::Channel::create();
3304        fixture.root().clone(server.into()).expect("clone failed");
3305        let (server, client2) = zx::Channel::create();
3306        fixture.root().clone(server.into()).expect("clone failed");
3307
3308        spawn_kernel_and_run(async move |locked, current_task| {
3309            let kernel = current_task.kernel.clone();
3310
3311            let atime3 = {
3312                let fs = RemoteFs::new_fs(
3313                    locked,
3314                    &kernel,
3315                    client,
3316                    FileSystemOptions {
3317                        source: FlyByteStr::new(b"/"),
3318                        flags: MountFlags::RELATIME,
3319                        ..Default::default()
3320                    },
3321                    fio::PERM_READABLE | fio::PERM_WRITABLE,
3322                )
3323                .expect("new_fs failed");
3324
3325                let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
3326                let child = ns
3327                    .root()
3328                    .open_create_node(
3329                        locked,
3330                        &current_task,
3331                        TEST_FILE.into(),
3332                        FileMode::ALLOW_ALL.with_type(FileMode::IFREG),
3333                        DeviceType::NONE,
3334                        OpenFlags::empty(),
3335                    )
3336                    .expect("create_node failed");
3337
3338                let atime1 = child.entry.node.info().time_access;
3339
3340                std::thread::sleep(std::time::Duration::from_micros(1));
3341
3342                let file_handle = child
3343                    .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
3344                    .expect("open failed");
3345
3346                file_handle
3347                    .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3348                    .expect("read failed");
3349
3350                // Expect atime to have changed.
3351                let atime2 = child.entry.node.info().time_access;
3352                assert!(atime2 > atime1);
3353
3354                std::thread::sleep(std::time::Duration::from_micros(1));
3355
3356                file_handle
3357                    .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3358                    .expect("read failed");
3359
3360                // And again.
3361                let atime3 = child.entry.node.info().time_access;
3362                assert!(atime3 > atime2);
3363
3364                atime3
3365            };
3366
3367            kernel.delayed_releaser.apply(locked.cast_locked(), current_task);
3368
3369            // After dropping the filesystem, the atime should have been persistently updated.
3370            let fs = RemoteFs::new_fs(
3371                locked,
3372                &kernel,
3373                client2,
3374                FileSystemOptions {
3375                    source: FlyByteStr::new(b"/"),
3376                    flags: MountFlags::RELATIME,
3377                    ..Default::default()
3378                },
3379                fio::PERM_READABLE | fio::PERM_WRITABLE,
3380            )
3381            .expect("new_fs failed");
3382
3383            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
3384            let child = ns
3385                .root()
3386                .lookup_child(
3387                    locked,
3388                    &current_task,
3389                    &mut LookupContext::new(Default::default()),
3390                    TEST_FILE.into(),
3391                )
3392                .expect("lookup_child failed");
3393
3394            let atime4 = child.entry.node.info().time_access;
3395
3396            assert!(atime4 >= atime3);
3397        })
3398        .await;
3399
3400        fixture.close().await;
3401    }
3402
3403    #[::fuchsia::test]
3404    async fn test_read_chunking() {
3405        use futures::StreamExt;
3406        let (client, mut stream) = create_request_stream::<fio::FileMarker>();
3407        let content = vec![0xAB; (fio::MAX_TRANSFER_SIZE + 100) as usize];
3408        let content_clone = content.clone();
3409
3410        let _server_task = fasync::Task::spawn(async move {
3411            while let Some(Ok(request)) = stream.next().await {
3412                match request {
3413                    fio::FileRequest::ReadAt { count, offset, responder } => {
3414                        let start = offset as usize;
3415                        let end = std::cmp::min(start + count as usize, content_clone.len());
3416                        let data = if start < content_clone.len() {
3417                            &content_clone[start..end]
3418                        } else {
3419                            &[]
3420                        };
3421                        responder.send(Ok(data)).unwrap();
3422                    }
3423                    _ => panic!("Unexpected request: {:?}", request),
3424                }
3425            }
3426        });
3427
3428        fasync::unblock(move || {
3429            let io = RemoteIo::new(client.into_channel().into());
3430            let mut buffer = VecOutputBuffer::new(content.len());
3431            assert_eq!(
3432                io.read_to_output_buffer(0, &mut buffer).expect("read_at failed"),
3433                content.len()
3434            );
3435            assert_eq!(buffer.data(), content.as_slice());
3436        })
3437        .await;
3438    }
3439
3440    #[::fuchsia::test]
3441    async fn test_write_chunking() {
3442        let (client, mut stream) = create_request_stream::<fio::FileMarker>();
3443        let content = vec![0xCD; (fio::MAX_TRANSFER_SIZE + 100) as usize];
3444        let content2 = content.clone();
3445
3446        let server_task = fasync::Task::spawn(async move {
3447            let mut written = vec![0; content2.len()];
3448            while let Some(Ok(request)) = stream.next().await {
3449                match request {
3450                    fio::FileRequest::WriteAt { offset, data, responder, .. } => {
3451                        let offset = offset as usize;
3452                        written[offset..offset + data.len()].copy_from_slice(&data);
3453                        responder.send(Ok(data.len() as u64)).unwrap();
3454                    }
3455                    _ => panic!("Unexpected request: {:?}", request),
3456                }
3457            }
3458            assert_eq!(written, content2);
3459        });
3460
3461        fasync::unblock(move || {
3462            let io = RemoteIo::new(client.into_channel().into());
3463            let mut buffer = VecInputBuffer::new(&content);
3464            assert_eq!(
3465                io.write_from_input_buffer(0, &mut buffer).expect("write_at failed"),
3466                content.len()
3467            );
3468        })
3469        .await;
3470
3471        server_task.await;
3472    }
3473
3474    #[::fuchsia::test]
3475    async fn test_cached_attribute_refresh_behavior() {
3476        let (client, mut stream) = create_request_stream::<fio::FileMarker>();
3477        let barrier = Arc::new(std::sync::Barrier::new(2));
3478        let barrier_clone = barrier.clone();
3479        let get_attrs_count = Arc::new(AtomicU32::new(0));
3480        let get_attrs_count_clone = get_attrs_count.clone();
3481
3482        let server_task = fasync::Task::spawn(async move {
3483            while let Some(Ok(request)) = stream.next().await {
3484                match request {
3485                    fio::FileRequest::GetAttributes { query: _, responder } => {
3486                        get_attrs_count_clone.fetch_add(1, Ordering::SeqCst);
3487                        let mutable_attrs = fio::MutableNodeAttributes { ..Default::default() };
3488                        let immutable_attrs = fio::ImmutableNodeAttributes {
3489                            id: Some(1),
3490                            link_count: Some(1),
3491                            ..Default::default()
3492                        };
3493                        responder.send(Ok((&mutable_attrs, &immutable_attrs))).unwrap();
3494                    }
3495                    fio::FileRequest::Resize { length: _, responder } => {
3496                        let barrier_clone = barrier_clone.clone();
3497                        fasync::Task::spawn(async move {
3498                            fasync::unblock(move || {
3499                                barrier_clone.wait();
3500                                barrier_clone.wait();
3501                            })
3502                            .await;
3503                            responder.send(Ok(())).unwrap();
3504                        })
3505                        .detach();
3506                    }
3507                    fio::FileRequest::Close { responder } => {
3508                        responder.send(Ok(())).unwrap();
3509                    }
3510                    _ => panic!("Unexpected request: {:?}", request),
3511                }
3512            }
3513        });
3514
3515        fasync::unblock(move || {
3516            let io = RemoteIo::new(client.into_channel().into());
3517            let node = BaseNode::new(io, false);
3518            let info = RwLock::new(FsNodeInfo::default());
3519
3520            // 1. Initial fetch. Should return cached info immediately.
3521            assert_eq!(get_attrs_count.load(Ordering::SeqCst), 0);
3522            {
3523                let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3524            }
3525            assert_eq!(get_attrs_count.load(Ordering::SeqCst), 0);
3526
3527            // 2. Spawn a thread to perform a dirty operation.
3528            std::thread::scope(|s| {
3529                s.spawn(|| {
3530                    will_dirty(&[&node], || {
3531                        node.io.truncate(0).expect("truncate failed");
3532                    });
3533                });
3534
3535                // Wait for the operation to start.
3536                barrier.wait();
3537
3538                // Now the node is dirty. Fetching attributes should trigger a request.
3539                {
3540                    let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3541                }
3542                assert_eq!(get_attrs_count.load(Ordering::SeqCst), 1);
3543
3544                // A second fetch should trigger another request.
3545                {
3546                    let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3547                }
3548                assert_eq!(get_attrs_count.load(Ordering::SeqCst), 2);
3549
3550                // Let the operation finish.
3551                barrier.wait();
3552            });
3553
3554            // 3. Operation finished. The next fetch should trigger a request.
3555            {
3556                let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3557            }
3558            assert_eq!(get_attrs_count.load(Ordering::SeqCst), 3);
3559
3560            // 4. Subsequent fetch should return cached info.
3561            {
3562                let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3563            }
3564            assert_eq!(get_attrs_count.load(Ordering::SeqCst), 3);
3565        })
3566        .await;
3567
3568        server_task.await;
3569    }
3570
3571    #[::fuchsia::test]
3572    async fn test_attribute_refresh_during_concurrent_dirty_operation() {
3573        let (client, mut stream) = create_request_stream::<fio::FileMarker>();
3574        let get_attrs_started = Arc::new(std::sync::Barrier::new(2));
3575        let get_attrs_started_clone = get_attrs_started.clone();
3576        let finish_get_attrs = Arc::new(std::sync::Barrier::new(2));
3577        let finish_get_attrs_clone = finish_get_attrs.clone();
3578
3579        let resize_started = Arc::new(std::sync::Barrier::new(2));
3580        let resize_started_clone = resize_started.clone();
3581        let finish_resize = Arc::new(std::sync::Barrier::new(2));
3582        let finish_resize_clone = finish_resize.clone();
3583
3584        let get_attrs_count = Arc::new(AtomicU32::new(0));
3585        let get_attrs_count_clone = get_attrs_count.clone();
3586
3587        let server_task = fasync::Task::spawn(async move {
3588            while let Some(Ok(request)) = stream.next().await {
3589                match request {
3590                    fio::FileRequest::GetAttributes { query: _, responder } => {
3591                        let count = get_attrs_count_clone.fetch_add(1, Ordering::SeqCst);
3592                        let finish_get_attrs_clone = finish_get_attrs_clone.clone();
3593                        let get_attrs_started_clone = get_attrs_started_clone.clone();
3594
3595                        fasync::Task::spawn(async move {
3596                            if count == 0 {
3597                                fasync::unblock(move || {
3598                                    get_attrs_started_clone.wait();
3599                                    finish_get_attrs_clone.wait();
3600                                })
3601                                .await;
3602                            }
3603                            let mutable_attrs = fio::MutableNodeAttributes { ..Default::default() };
3604                            let immutable_attrs = fio::ImmutableNodeAttributes {
3605                                id: Some(1),
3606                                link_count: Some(1),
3607                                ..Default::default()
3608                            };
3609                            responder.send(Ok((&mutable_attrs, &immutable_attrs))).unwrap();
3610                        })
3611                        .detach();
3612                    }
3613                    fio::FileRequest::Resize { length: _, responder } => {
3614                        let resize_started_clone = resize_started_clone.clone();
3615                        let finish_resize_clone = finish_resize_clone.clone();
3616                        fasync::Task::spawn(async move {
3617                            fasync::unblock(move || {
3618                                resize_started_clone.wait();
3619                                finish_resize_clone.wait();
3620                            })
3621                            .await;
3622                            responder.send(Ok(())).unwrap();
3623                        })
3624                        .detach();
3625                    }
3626                    fio::FileRequest::Close { responder } => {
3627                        responder.send(Ok(())).unwrap();
3628                    }
3629                    _ => panic!("Unexpected request: {:?}", request),
3630                }
3631            }
3632        });
3633
3634        fasync::unblock(move || {
3635            let io = RemoteIo::new(client.into_channel().into());
3636            let node = BaseNode::new(io, true);
3637            let info = RwLock::new(FsNodeInfo::default());
3638
3639            std::thread::scope(|s| {
3640                // 1. Start Refresh Thread
3641                let refresh_thread = s.spawn(|| {
3642                    let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3643                });
3644
3645                get_attrs_started.wait();
3646
3647                // 2. Start Dirty Thread
3648                let dirty_thread = s.spawn(|| {
3649                    will_dirty(&[&node], || {
3650                        node.io.truncate(0).expect("truncate failed");
3651                    });
3652                });
3653
3654                resize_started.wait();
3655
3656                // 3. Allow GetAttributes to finish
3657                finish_get_attrs.wait();
3658                refresh_thread.join().unwrap();
3659                assert_eq!(get_attrs_count.load(Ordering::SeqCst), 1);
3660
3661                // 4. Refresh #2 (Should fetch because dirty op is in flight)
3662                {
3663                    let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3664                }
3665                assert_eq!(get_attrs_count.load(Ordering::SeqCst), 2);
3666
3667                // 5. Allow Dirty Op to finish
3668                finish_resize.wait();
3669                dirty_thread.join().unwrap();
3670
3671                // 6. Refresh #3 (Should fetch because dirty op finished, but state was 0)
3672                {
3673                    let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3674                }
3675                assert_eq!(get_attrs_count.load(Ordering::SeqCst), 3);
3676
3677                // 7. Refresh #4 (Should be cached)
3678                {
3679                    let _info = node.fetch_and_refresh_info(&info).expect("fetch failed");
3680                }
3681                assert_eq!(get_attrs_count.load(Ordering::SeqCst), 3);
3682            });
3683        })
3684        .await;
3685
3686        server_task.await;
3687    }
3688
3689    #[::fuchsia::test]
3690    async fn test_update_attributes_invalidates_cache() {
3691        let (client, mut stream) = create_request_stream::<fio::DirectoryMarker>();
3692        let get_attrs_count = Arc::new(AtomicU32::new(0));
3693        let get_attrs_count_clone = get_attrs_count.clone();
3694
3695        let server_task = fasync::Task::spawn(async move {
3696            let mut sub_tasks = Vec::new();
3697            while let Some(Ok(request)) = stream.next().await {
3698                match request {
3699                    fio::DirectoryRequest::Open { path, object, flags, .. } => {
3700                        assert_eq!(path, ".", "Unexpected open() for non-self");
3701                        let get_attrs_count = get_attrs_count_clone.clone();
3702                        sub_tasks.push(fasync::Task::spawn(async move {
3703                            let (mut stream, control_handle) =
3704                                ServerEnd::<fio::DirectoryMarker>::new(object)
3705                                    .into_stream_and_control_handle();
3706                            assert!(flags.contains(fio::Flags::FLAG_SEND_REPRESENTATION));
3707
3708                            // The Representation provides the initial attributes to cache.
3709                            let mutable_attributes =
3710                                fio::MutableNodeAttributes { ..Default::default() };
3711                            let immutable_attributes = fio::ImmutableNodeAttributes {
3712                                id: Some(1),
3713                                link_count: Some(1),
3714                                ..Default::default()
3715                            };
3716                            let info = fio::DirectoryInfo {
3717                                attributes: Some(fio::NodeAttributes2 {
3718                                    mutable_attributes,
3719                                    immutable_attributes,
3720                                }),
3721                                ..Default::default()
3722                            };
3723                            let _ = control_handle
3724                                .send_on_representation(fio::Representation::Directory(info));
3725
3726                            while let Some(Ok(request)) = stream.next().await {
3727                                match request {
3728                                    fio::DirectoryRequest::GetAttributes {
3729                                        query: _,
3730                                        responder,
3731                                    } => {
3732                                        get_attrs_count.fetch_add(1, Ordering::SeqCst);
3733                                        let mutable_attrs =
3734                                            fio::MutableNodeAttributes { ..Default::default() };
3735                                        let immutable_attrs = fio::ImmutableNodeAttributes {
3736                                            id: Some(1),
3737                                            link_count: Some(1),
3738                                            ..Default::default()
3739                                        };
3740                                        responder
3741                                            .send(Ok((&mutable_attrs, &immutable_attrs)))
3742                                            .unwrap();
3743                                    }
3744                                    fio::DirectoryRequest::UpdateAttributes {
3745                                        payload: _,
3746                                        responder,
3747                                    } => {
3748                                        responder.send(Ok(())).unwrap();
3749                                    }
3750                                    fio::DirectoryRequest::Close { responder } => {
3751                                        responder.send(Ok(())).unwrap();
3752                                    }
3753                                    _ => {
3754                                        panic!("Unexpected request: {:?}", request)
3755                                    }
3756                                }
3757                            }
3758                        }));
3759                    }
3760                    fio::DirectoryRequest::Close { responder } => {
3761                        responder.send(Ok(())).unwrap();
3762                    }
3763                    fio::DirectoryRequest::QueryFilesystem { responder } => {
3764                        responder.send(0i32, None).unwrap();
3765                    }
3766                    _ => panic!("Unexpected request: {:?}", request),
3767                }
3768            }
3769
3770            for sub_task in sub_tasks {
3771                let _ = sub_task.await;
3772            }
3773        });
3774
3775        spawn_kernel_and_run(async move |locked, current_task| {
3776            let fs = RemoteFs::new_fs(
3777                locked,
3778                &current_task.kernel(),
3779                client.into_channel(),
3780                FileSystemOptions { source: FlyByteStr::new(b"."), ..Default::default() },
3781                fio::PERM_READABLE | fio::PERM_WRITABLE,
3782            )
3783            .expect("failed to mount test remote FS");
3784
3785            // 1. Initial fetch. Should use cached info because the root node has start_dirty=false.
3786            {
3787                let _info = fs
3788                    .root()
3789                    .node
3790                    .fetch_and_refresh_info(locked, current_task)
3791                    .expect("fetch failed");
3792            }
3793            assert_eq!(get_attrs_count.load(Ordering::SeqCst), 0);
3794
3795            // 2. Update attributes. This should dirty the node.
3796            fs.root()
3797                .node
3798                .update_attributes(locked, current_task, |attrs| {
3799                    attrs.time_modify += UtcDuration::from_seconds(1);
3800                    Ok(())
3801                })
3802                .expect("update_attributes failed");
3803
3804            // 3. Fetch again. Should trigger a request.
3805            {
3806                let _info = fs
3807                    .root()
3808                    .node
3809                    .fetch_and_refresh_info(locked, current_task)
3810                    .expect("fetch failed");
3811            }
3812            assert_eq!(get_attrs_count.load(Ordering::SeqCst), 1);
3813        })
3814        .await;
3815
3816        server_task.await;
3817    }
3818
3819    #[test]
3820    fn test_info_state_initial_state() {
3821        let state = InfoState::new(true); // dirty
3822        assert_eq!(state.0.load(Ordering::Relaxed), 0);
3823
3824        let state = InfoState::new(false); // in sync
3825        assert_eq!(state.0.load(Ordering::Relaxed), InfoState::IN_SYNC);
3826    }
3827
3828    #[test]
3829    fn test_info_state_dirty_op_guard() {
3830        let state = InfoState::new(false);
3831        {
3832            let _guard = state.dirty_op_guard();
3833            assert_eq!(state.0.load(Ordering::Relaxed), 1); // IN_SYNC bit cleared, count 1
3834        }
3835        assert_eq!(state.0.load(Ordering::Relaxed), 0);
3836    }
3837
3838    #[test]
3839    fn test_info_state_maybe_refresh_success() {
3840        let state = InfoState::new(true);
3841        let info = RwLock::new(FsNodeInfo::default());
3842
3843        let res = state.maybe_refresh(&info, |_| Ok(42), |_| unreachable!());
3844        assert_eq!(res.unwrap(), 42);
3845        assert_eq!(state.0.load(Ordering::Relaxed), InfoState::IN_SYNC);
3846    }
3847
3848    #[test]
3849    fn test_info_state_maybe_refresh_error() {
3850        let state = InfoState::new(true);
3851        let info = RwLock::new(FsNodeInfo::default());
3852
3853        let res: Result<u32, Errno> =
3854            state.maybe_refresh(&info, |_| error!(EIO), |_| unreachable!());
3855        assert!(res.is_err());
3856        assert_eq!(state.0.load(Ordering::Relaxed), 0); // Still dirty
3857    }
3858
3859    #[test]
3860    fn test_info_state_maybe_refresh_not_needed() {
3861        let state = InfoState::new(false); // in sync
3862        let info = RwLock::new(FsNodeInfo::default());
3863        let res = state.maybe_refresh(&info, |_| unreachable!(), |_| Ok(123));
3864        assert_eq!(res.unwrap(), 123);
3865    }
3866
3867    #[test]
3868    fn test_info_state_concurrent_dirty_op_during_refresh() {
3869        let state = InfoState::new(true);
3870        let info = RwLock::new(FsNodeInfo::default());
3871
3872        state
3873            .maybe_refresh(
3874                &info,
3875                |_| {
3876                    // Simulate a dirty op starting while refresh is in progress
3877                    let _guard = state.dirty_op_guard();
3878                    assert_eq!(state.0.load(Ordering::Relaxed), InfoState::PENDING_REFRESH | 1);
3879                    Ok(())
3880                },
3881                |_| unreachable!(),
3882            )
3883            .unwrap();
3884
3885        assert_eq!(state.0.load(Ordering::Relaxed), 0);
3886    }
3887
3888    #[::fuchsia::test]
3889    async fn test_sync() {
3890        let fixture = TestFixture::new().await;
3891        let (server, client) = zx::Channel::create();
3892        fixture.root().clone(server.into()).expect("clone failed");
3893
3894        spawn_kernel_and_run(async move |locked, current_task| {
3895            let kernel = current_task.kernel();
3896            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
3897            let fs = RemoteFs::new_fs(
3898                locked,
3899                &kernel,
3900                client,
3901                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
3902                rights,
3903            )
3904            .expect("new_fs failed");
3905            let ns = Namespace::new(fs);
3906            current_task.fs().set_umask(FileMode::from_bits(0));
3907            let root = ns.root();
3908
3909            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
3910            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
3911                .expect("create_node failed");
3912            let mut context = LookupContext::new(SymlinkMode::NoFollow);
3913            let reg_node = root
3914                .lookup_child(locked, &current_task, &mut context, "file".into())
3915                .expect("lookup_child failed");
3916
3917            // sync should delegate to zxio and succeed
3918            reg_node
3919                .entry
3920                .node
3921                .ops()
3922                .sync(&reg_node.entry.node, &current_task)
3923                .expect("sync failed");
3924        })
3925        .await;
3926        fixture.close().await;
3927    }
3928
3929    #[::fuchsia::test]
3930    async fn test_msync_propagates_to_fxfs() {
3931        use crate::mm::MemoryAccessor;
3932        use crate::mm::syscalls::{sys_mmap, sys_msync};
3933        use crate::vfs::FdFlags;
3934        use starnix_uapi::user_address::UserAddress;
3935        use starnix_uapi::{MAP_SHARED, MS_SYNC, PROT_READ, PROT_WRITE};
3936        use std::sync::atomic::{AtomicUsize, Ordering};
3937
3938        // Counter to track Fxfs transactions
3939        let commit_count = Arc::new(AtomicUsize::new(0));
3940        let commit_count_clone = commit_count.clone();
3941
3942        // Open fixture with pre_commit_hook
3943        let fixture = TestFixture::open(
3944            DeviceHolder::new(FakeDevice::new(1024 * 1024, 512)),
3945            TestFixtureOptions {
3946                format: true,
3947                as_blob: false,
3948                encrypted: true,
3949                pre_commit_hook: Some(Box::new(move |_transaction| {
3950                    commit_count_clone.fetch_add(1, Ordering::SeqCst);
3951                    Ok(())
3952                })),
3953            },
3954        )
3955        .await;
3956
3957        let (server, client) = zx::Channel::create();
3958        fixture.root().clone(server.into()).expect("clone channel");
3959
3960        spawn_kernel_and_run(async move |locked, current_task| {
3961            // Setup RemoteFs
3962            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
3963            let fs = RemoteFs::new_fs(
3964                locked,
3965                current_task.kernel(),
3966                client,
3967                FileSystemOptions { source: FlyByteStr::new(b"/test"), ..Default::default() },
3968                rights,
3969            )
3970            .expect("new_fs");
3971            let ns = Namespace::new(fs);
3972            let root = ns.root();
3973
3974            // Create and Open a file
3975            let node = root
3976                .create_node(
3977                    locked,
3978                    &current_task,
3979                    "test_file".into(),
3980                    mode!(IFREG, 0o666),
3981                    DeviceType::NONE,
3982                )
3983                .expect("create_node");
3984            let file_handle = node
3985                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
3986                .expect("open");
3987            let fd = current_task
3988                .files
3989                .add(locked, current_task, file_handle, FdFlags::empty())
3990                .expect("add file");
3991
3992            // Do mmap
3993            let len = *PAGE_SIZE as usize * 4;
3994            let mmap_addr = sys_mmap(
3995                locked,
3996                current_task,
3997                UserAddress::default(),
3998                len,
3999                PROT_READ | PROT_WRITE,
4000                MAP_SHARED,
4001                fd,
4002                0,
4003            )
4004            .expect("mmap");
4005
4006            // Modify memory (multiple pages)
4007            for i in 0..4 {
4008                let data = [0xAAu8; 1];
4009                current_task
4010                    .write_memory((mmap_addr + (i * *PAGE_SIZE as usize)).unwrap(), &data)
4011                    .expect("write memory");
4012            }
4013
4014            // Capture commit count before msync
4015            let commits_before_msync = commit_count.load(Ordering::SeqCst);
4016
4017            // invoke msync()
4018            sys_msync(locked, current_task, mmap_addr, len, MS_SYNC).expect("msync");
4019
4020            // Verify msync results
4021            let final_commits = commit_count.load(Ordering::SeqCst);
4022            assert!(
4023                final_commits > commits_before_msync,
4024                "msync should trigger Fxfs transaction. commits: {} -> {}",
4025                commits_before_msync,
4026                final_commits
4027            );
4028        })
4029        .await;
4030
4031        fixture.close().await;
4032    }
4033}