starnix_core/fs/fuchsia/
remote.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fs::fuchsia::RemoteUnixDomainSocket;
6use crate::fs::fuchsia::remote_volume::RemoteVolume;
7use crate::fs::fuchsia::sync_file::{SyncFence, SyncFile, SyncPoint, Timeline};
8use crate::mm::memory::MemoryObject;
9use crate::mm::{ProtectionFlags, VMEX_RESOURCE};
10use crate::security;
11use crate::task::{CurrentTask, FullCredentials, Kernel};
12use crate::vfs::buffers::{InputBuffer, OutputBuffer, with_iovec_segments};
13use crate::vfs::fsverity::FsVerityState;
14use crate::vfs::socket::{Socket, SocketFile, ZxioBackedSocket};
15use crate::vfs::{
16    Anon, AppendLockGuard, CacheMode, DEFAULT_BYTES_PER_BLOCK, DirectoryEntryType, DirentSink,
17    FallocMode, FileHandle, FileObject, FileOps, FileSystem, FileSystemHandle, FileSystemOps,
18    FileSystemOptions, FsNode, FsNodeHandle, FsNodeInfo, FsNodeOps, FsStr, FsString, SeekTarget,
19    SymlinkTarget, XattrOp, XattrStorage, default_ioctl, default_seek, fileops_impl_directory,
20    fileops_impl_nonseekable, fileops_impl_noop_sync, fileops_impl_seekable, fs_node_impl_not_dir,
21    fs_node_impl_symlink, fs_node_impl_xattr_delegate,
22};
23use bstr::ByteSlice;
24use fidl::endpoints::DiscoverableProtocolMarker as _;
25use fuchsia_runtime::UtcInstant;
26use linux_uapi::SYNC_IOC_MAGIC;
27use once_cell::sync::OnceCell;
28use starnix_crypt::EncryptionKeyId;
29use starnix_logging::{CATEGORY_STARNIX_MM, impossible_error, log_warn, trace_duration};
30use starnix_sync::{
31    FileOpsCore, LockEqualOrBefore, Locked, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard,
32    Unlocked,
33};
34use starnix_syscalls::{SyscallArg, SyscallResult};
35use starnix_types::vfs::default_statfs;
36use starnix_uapi::auth::FsCred;
37use starnix_uapi::device_type::DeviceType;
38use starnix_uapi::errors::Errno;
39use starnix_uapi::file_mode::FileMode;
40use starnix_uapi::mount_flags::MountFlags;
41use starnix_uapi::open_flags::OpenFlags;
42use starnix_uapi::{
43    __kernel_fsid_t, errno, error, from_status_like_fdio, fsverity_descriptor, ino_t, mode, off_t,
44    statfs,
45};
46use std::mem::MaybeUninit;
47use std::sync::Arc;
48use syncio::zxio::{
49    ZXIO_NODE_PROTOCOL_DIRECTORY, ZXIO_NODE_PROTOCOL_FILE, ZXIO_NODE_PROTOCOL_SYMLINK,
50    ZXIO_OBJECT_TYPE_DATAGRAM_SOCKET, ZXIO_OBJECT_TYPE_DIR, ZXIO_OBJECT_TYPE_FILE,
51    ZXIO_OBJECT_TYPE_NONE, ZXIO_OBJECT_TYPE_PACKET_SOCKET, ZXIO_OBJECT_TYPE_RAW_SOCKET,
52    ZXIO_OBJECT_TYPE_STREAM_SOCKET, ZXIO_OBJECT_TYPE_SYNCHRONOUS_DATAGRAM_SOCKET, zxio_node_attr,
53};
54use syncio::{
55    AllocateMode, DirentIterator, SelinuxContextAttr, XattrSetMode, ZXIO_ROOT_HASH_LENGTH, Zxio,
56    ZxioDirent, ZxioOpenOptions, zxio_fsverity_descriptor_t, zxio_node_attr_has_t,
57    zxio_node_attributes_t,
58};
59use zx::{Counter, HandleBased};
60use {
61    fidl_fuchsia_io as fio, fidl_fuchsia_starnix_binder as fbinder,
62    fidl_fuchsia_unknown as funknown,
63};
64
65pub fn new_remote_fs(
66    locked: &mut Locked<Unlocked>,
67    current_task: &CurrentTask,
68    options: FileSystemOptions,
69) -> Result<FileSystemHandle, Errno> {
70    let kernel = current_task.kernel();
71    let requested_path = std::str::from_utf8(&options.source)
72        .map_err(|_| errno!(EINVAL, "source path is not utf8"))?;
73    let mut create_flags =
74        fio::PERM_READABLE | fio::Flags::FLAG_MAYBE_CREATE | fio::Flags::PROTOCOL_DIRECTORY;
75    if !options.flags.contains(MountFlags::RDONLY) {
76        create_flags |= fio::PERM_WRITABLE;
77    }
78    let (root_proxy, subdir) = kernel.open_ns_dir(requested_path, create_flags)?;
79
80    let subdir = if subdir.is_empty() { ".".to_string() } else { subdir };
81    let mut open_rights = fio::PERM_READABLE;
82    if !options.flags.contains(MountFlags::RDONLY) {
83        open_rights |= fio::PERM_WRITABLE;
84    }
85    let mut subdir_options = options;
86    subdir_options.source = subdir.into();
87    create_remotefs_filesystem(locked, kernel, &root_proxy, subdir_options, open_rights)
88}
89
90/// Create a filesystem to access the content of the fuchsia directory available at `fs_src` inside
91/// `pkg`.
92pub fn create_remotefs_filesystem<L>(
93    locked: &mut Locked<L>,
94    kernel: &Kernel,
95    root: &fio::DirectorySynchronousProxy,
96    options: FileSystemOptions,
97    rights: fio::Flags,
98) -> Result<FileSystemHandle, Errno>
99where
100    L: LockEqualOrBefore<FileOpsCore>,
101{
102    let root = syncio::directory_open_directory_async(
103        root,
104        std::str::from_utf8(&options.source)
105            .map_err(|_| errno!(EINVAL, "source path is not utf8"))?,
106        rights,
107    )
108    .map_err(|e| errno!(EIO, format!("Failed to open root: {e}")))?;
109    RemoteFs::new_fs(locked, kernel, root.into_channel(), options, rights)
110}
111
112pub struct RemoteFs {
113    // If true, trust the remote file system's IDs (which requires that the remote file system does
114    // not span mounts).  This must be true to properly support hard links.  If this is false, the
115    // same node can end up having different IDs as it leaves and reenters the node cache.
116    // TODO(https://fxbug.dev/42081972): At the time of writing, package directories do not have
117    // unique IDs so this *must* be false in that case.
118    use_remote_ids: bool,
119
120    root_proxy: fio::DirectorySynchronousProxy,
121
122    // The rights used for the root node.
123    root_rights: fio::Flags,
124}
125
126impl RemoteFs {
127    /// Returns a reference to a RemoteFs given a reference to a FileSystem.
128    ///
129    /// # Panics
130    ///
131    /// This will panic if `fs`'s ops aren't `RemoteFs`, so this should only be called when this is
132    /// known to be the case.
133    fn from_fs(fs: &FileSystem) -> &RemoteFs {
134        if let Some(remote_vol) = fs.downcast_ops::<RemoteVolume>() {
135            remote_vol.remotefs()
136        } else {
137            fs.downcast_ops::<RemoteFs>().unwrap()
138        }
139    }
140}
141
142const REMOTE_FS_MAGIC: u32 = u32::from_be_bytes(*b"f.io");
143const SYNC_IOC_FILE_INFO: u8 = 4;
144const SYNC_IOC_MERGE: u8 = 3;
145
146impl FileSystemOps for RemoteFs {
147    fn statfs(
148        &self,
149        _locked: &mut Locked<FileOpsCore>,
150        _fs: &FileSystem,
151        _current_task: &CurrentTask,
152    ) -> Result<statfs, Errno> {
153        let (status, info) = self
154            .root_proxy
155            .query_filesystem(zx::MonotonicInstant::INFINITE)
156            .map_err(|_| errno!(EIO))?;
157        // Not all remote filesystems support `QueryFilesystem`, many return ZX_ERR_NOT_SUPPORTED.
158        if status == 0 {
159            if let Some(info) = info {
160                let (total_blocks, free_blocks) = if info.block_size > 0 {
161                    (
162                        (info.total_bytes / u64::from(info.block_size))
163                            .try_into()
164                            .unwrap_or(i64::MAX),
165                        ((info.total_bytes.saturating_sub(info.used_bytes))
166                            / u64::from(info.block_size))
167                        .try_into()
168                        .unwrap_or(i64::MAX),
169                    )
170                } else {
171                    (0, 0)
172                };
173
174                let fsid = __kernel_fsid_t {
175                    val: [
176                        (info.fs_id & 0xffffffff) as i32,
177                        ((info.fs_id >> 32) & 0xffffffff) as i32,
178                    ],
179                };
180
181                return Ok(statfs {
182                    f_type: info.fs_type as i64,
183                    f_bsize: info.block_size.into(),
184                    f_blocks: total_blocks,
185                    f_bfree: free_blocks,
186                    f_bavail: free_blocks,
187                    f_files: info.total_nodes.try_into().unwrap_or(i64::MAX),
188                    f_ffree: (info.total_nodes.saturating_sub(info.used_nodes))
189                        .try_into()
190                        .unwrap_or(i64::MAX),
191                    f_fsid: fsid,
192                    f_namelen: info.max_filename_size.try_into().unwrap_or(0),
193                    f_frsize: info.block_size.into(),
194                    ..statfs::default()
195                });
196            }
197        }
198        Ok(default_statfs(REMOTE_FS_MAGIC))
199    }
200
201    fn name(&self) -> &'static FsStr {
202        "remotefs".into()
203    }
204
205    fn uses_external_node_ids(&self) -> bool {
206        self.use_remote_ids
207    }
208
209    fn rename(
210        &self,
211        _locked: &mut Locked<FileOpsCore>,
212        _fs: &FileSystem,
213        current_task: &CurrentTask,
214        old_parent: &FsNodeHandle,
215        old_name: &FsStr,
216        new_parent: &FsNodeHandle,
217        new_name: &FsStr,
218        _renamed: &FsNodeHandle,
219        _replaced: Option<&FsNodeHandle>,
220    ) -> Result<(), Errno> {
221        // Renames should fail if the src or target directory is encrypted and locked.
222        old_parent.fail_if_locked(current_task)?;
223        new_parent.fail_if_locked(current_task)?;
224
225        let Some(old_parent) = old_parent.downcast_ops::<RemoteNode>() else {
226            return error!(EXDEV);
227        };
228        let Some(new_parent) = new_parent.downcast_ops::<RemoteNode>() else {
229            return error!(EXDEV);
230        };
231        old_parent
232            .zxio
233            .rename(get_name_str(old_name)?, &new_parent.zxio, get_name_str(new_name)?)
234            .map_err(|status| from_status_like_fdio!(status))
235    }
236
237    fn manages_timestamps(&self) -> bool {
238        true
239    }
240}
241
242impl RemoteFs {
243    pub(super) fn new(
244        root: zx::Channel,
245        server_end: zx::Channel,
246        root_rights: fio::Flags,
247    ) -> Result<RemoteFs, Errno> {
248        // See if open3 works.  We assume that if open3 works on the root, it will work for all
249        // descendent nodes in this filesystem.  At the time of writing, this is true for Fxfs.
250        let root_proxy = fio::DirectorySynchronousProxy::new(root);
251        root_proxy
252            .open(
253                ".",
254                fio::Flags::PROTOCOL_DIRECTORY
255                    | fio::PERM_READABLE
256                    | fio::Flags::PERM_INHERIT_WRITE
257                    | fio::Flags::PERM_INHERIT_EXECUTE
258                    | fio::Flags::FLAG_SEND_REPRESENTATION,
259                &fio::Options {
260                    attributes: Some(fio::NodeAttributesQuery::ID),
261                    ..Default::default()
262                },
263                server_end,
264            )
265            .map_err(|_| errno!(EIO))?;
266        // Use remote IDs if the filesystem is Fxfs which we know will give us unique IDs.  Hard
267        // links need to resolve to the same underlying FsNode, so we can only support hard links if
268        // the remote file system will give us unique IDs.  The IDs are also used as the key in
269        // caches, so we can't use remote IDs if the remote filesystem is not guaranteed to provide
270        // unique IDs, or if the remote filesystem spans multiple filesystems.
271        let (status, info) =
272            root_proxy.query_filesystem(zx::MonotonicInstant::INFINITE).map_err(|_| errno!(EIO))?;
273        // Be tolerant of errors here; many filesystems return `ZX_ERR_NOT_SUPPORTED`.
274        let use_remote_ids = status == 0
275            && info
276                .map(|i| i.fs_type == fidl_fuchsia_fs::VfsType::Fxfs.into_primitive())
277                .unwrap_or(false);
278        Ok(RemoteFs { use_remote_ids, root_proxy, root_rights })
279    }
280
281    pub fn new_fs<L>(
282        locked: &mut Locked<L>,
283        kernel: &Kernel,
284        root: zx::Channel,
285        mut options: FileSystemOptions,
286        rights: fio::Flags,
287    ) -> Result<FileSystemHandle, Errno>
288    where
289        L: LockEqualOrBefore<FileOpsCore>,
290    {
291        let (client_end, server_end) = zx::Channel::create();
292        let remotefs = RemoteFs::new(root, server_end, rights)?;
293        let mut attrs = zxio_node_attributes_t {
294            has: zxio_node_attr_has_t { id: true, wrapping_key_id: true, ..Default::default() },
295            ..Default::default()
296        };
297        let (remote_node, node_id) =
298            match Zxio::create_with_on_representation(client_end.into(), Some(&mut attrs)) {
299                Err(status) => return Err(from_status_like_fdio!(status)),
300                Ok(zxio) => (RemoteNode { zxio }, attrs.id),
301            };
302
303        if !rights.contains(fio::PERM_WRITABLE) {
304            options.flags |= MountFlags::RDONLY;
305        }
306        let use_remote_ids = remotefs.use_remote_ids;
307        let fs = FileSystem::new(
308            locked,
309            kernel,
310            CacheMode::Cached(kernel.fs_cache_config()),
311            remotefs,
312            options,
313        )?;
314
315        let mut info = FsNodeInfo::new(mode!(IFDIR, 0o777), FsCred::root());
316        if attrs.has.wrapping_key_id {
317            info.wrapping_key_id = Some(attrs.wrapping_key_id);
318        }
319
320        if use_remote_ids {
321            fs.create_root_with_info(node_id, remote_node, info);
322        } else {
323            let root_ino = fs.allocate_ino();
324            fs.create_root_with_info(root_ino, remote_node, info);
325        }
326
327        Ok(fs)
328    }
329
330    pub fn use_remote_ids(&self) -> bool {
331        self.use_remote_ids
332    }
333}
334
335pub struct RemoteNode {
336    /// The underlying Zircon I/O object for this remote node.
337    ///
338    /// We delegate to the zxio library for actually doing I/O with remote
339    /// objects, including fuchsia.io.Directory and fuchsia.io.File objects.
340    /// This structure lets us share code with FDIO and other Fuchsia clients.
341    zxio: syncio::Zxio,
342}
343
344impl RemoteNode {
345    pub fn new(zxio: syncio::Zxio) -> Self {
346        Self { zxio }
347    }
348}
349
350/// Create a file handle from a zx::NullableHandle.
351///
352/// The handle must be a channel, socket, vmo or debuglog object.  If the handle is a channel, then
353/// the channel must implement the `fuchsia.unknown/Queryable` protocol.
354///
355/// The resulting object will be owned by root, and will have permissions derived from the `flags`
356/// used to open this object. This is not the same as the permissions set if the object was created
357/// using Starnix itself. We use this mainly for interfacing with objects created outside of Starnix
358/// where these flags represent the desired permissions already.
359pub fn new_remote_file<L>(
360    locked: &mut Locked<L>,
361    current_task: &CurrentTask,
362    handle: zx::NullableHandle,
363    flags: OpenFlags,
364) -> Result<FileHandle, Errno>
365where
366    L: LockEqualOrBefore<FileOpsCore>,
367{
368    let remote_creds = current_task.full_current_creds();
369    let (attrs, ops) = remote_file_attrs_and_ops(current_task, handle.into(), remote_creds)?;
370    let mut rights = fio::Flags::empty();
371    if flags.can_read() {
372        rights |= fio::PERM_READABLE;
373    }
374    if flags.can_write() {
375        rights |= fio::PERM_WRITABLE;
376    }
377    let mode = get_mode(&attrs, rights);
378    // TODO: https://fxbug.dev/407611229 - Give these nodes valid labels.
379    let mut info = FsNodeInfo::new(mode, FsCred::root());
380    update_info_from_attrs(&mut info, &attrs);
381    Ok(Anon::new_private_file_extended(locked, current_task, ops, flags, "[fuchsia:remote]", info))
382}
383
384// Create a FileOps from a zx::NullableHandle.
385//
386// The handle must satisfy the same requirements as `new_remote_file`.
387pub fn new_remote_file_ops(
388    current_task: &CurrentTask,
389    handle: zx::NullableHandle,
390    creds: FullCredentials,
391) -> Result<Box<dyn FileOps>, Errno> {
392    let (_, ops) = remote_file_attrs_and_ops(current_task, handle, creds)?;
393    Ok(ops)
394}
395
396fn remote_file_attrs_and_ops(
397    current_task: &CurrentTask,
398    mut handle: zx::NullableHandle,
399    remote_creds: FullCredentials,
400) -> Result<(zxio_node_attr, Box<dyn FileOps>), Errno> {
401    let handle_type =
402        handle.basic_info().map_err(|status| from_status_like_fdio!(status))?.object_type;
403
404    // Check whether the channel implements a Starnix specific protoocol.
405    if handle_type == zx::ObjectType::CHANNEL {
406        let channel = zx::Channel::from(handle);
407        let queryable = funknown::QueryableSynchronousProxy::new(channel);
408        if let Ok(name) = queryable.query(zx::MonotonicInstant::INFINITE) {
409            if name == fbinder::UnixDomainSocketMarker::PROTOCOL_NAME.as_bytes() {
410                let socket_ops =
411                    RemoteUnixDomainSocket::new(queryable.into_channel(), remote_creds)?;
412                let socket = Socket::new_with_ops(Box::new(socket_ops))?;
413                let file_ops = SocketFile::new(socket);
414                let attr = zxio_node_attr {
415                    has: zxio_node_attr_has_t { mode: true, ..zxio_node_attr_has_t::default() },
416                    mode: 0o777 | FileMode::IFSOCK.bits(),
417                    ..zxio_node_attr::default()
418                };
419                return Ok((attr, file_ops));
420            }
421        };
422        handle = queryable.into_channel().into_handle();
423    } else if handle_type == zx::ObjectType::COUNTER {
424        let attr = zxio_node_attr::default();
425        let file_ops = Box::new(RemoteCounter::new(handle.into()));
426        return Ok((attr, file_ops));
427    }
428
429    // Otherwise, use zxio based objects.
430    let zxio = Zxio::create(handle).map_err(|status| from_status_like_fdio!(status))?;
431    let mut attrs = zxio
432        .attr_get(zxio_node_attr_has_t {
433            protocols: true,
434            abilities: true,
435            content_size: true,
436            storage_size: true,
437            link_count: true,
438            object_type: true,
439            ..Default::default()
440        })
441        .map_err(|status| from_status_like_fdio!(status))?;
442    let ops: Box<dyn FileOps> = match (handle_type, attrs.object_type) {
443        (_, ZXIO_OBJECT_TYPE_DIR) => Box::new(RemoteDirectoryObject::new(zxio)),
444        (zx::ObjectType::VMO, _)
445        | (zx::ObjectType::DEBUGLOG, _)
446        | (_, ZXIO_OBJECT_TYPE_FILE)
447        | (_, ZXIO_OBJECT_TYPE_NONE) => Box::new(RemoteFileObject::new(zxio)),
448        (zx::ObjectType::SOCKET, _)
449        | (_, ZXIO_OBJECT_TYPE_SYNCHRONOUS_DATAGRAM_SOCKET)
450        | (_, ZXIO_OBJECT_TYPE_DATAGRAM_SOCKET)
451        | (_, ZXIO_OBJECT_TYPE_STREAM_SOCKET)
452        | (_, ZXIO_OBJECT_TYPE_RAW_SOCKET)
453        | (_, ZXIO_OBJECT_TYPE_PACKET_SOCKET) => {
454            let socket_ops = ZxioBackedSocket::new_with_zxio(current_task, zxio);
455            let socket = Socket::new_with_ops(Box::new(socket_ops))?;
456            attrs.has.mode = true;
457            attrs.mode = FileMode::IFSOCK.bits();
458            SocketFile::new(socket)
459        }
460        _ => return error!(ENOTSUP),
461    };
462    Ok((attrs, ops))
463}
464
465pub fn create_fuchsia_pipe<L>(
466    locked: &mut Locked<L>,
467    current_task: &CurrentTask,
468    socket: zx::Socket,
469    flags: OpenFlags,
470) -> Result<FileHandle, Errno>
471where
472    L: LockEqualOrBefore<FileOpsCore>,
473{
474    new_remote_file(locked, current_task, socket.into(), flags)
475}
476
477fn fetch_and_refresh_info_impl<'a>(
478    zxio: &syncio::Zxio,
479    info: &'a RwLock<FsNodeInfo>,
480) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
481    let attrs = zxio
482        .attr_get(zxio_node_attr_has_t {
483            content_size: true,
484            storage_size: true,
485            link_count: true,
486            modification_time: true,
487            change_time: true,
488            access_time: true,
489            casefold: true,
490            wrapping_key_id: true,
491            pending_access_time_update: info.read().pending_time_access_update,
492            ..Default::default()
493        })
494        .map_err(|status| from_status_like_fdio!(status))?;
495    let mut info = info.write();
496    update_info_from_attrs(&mut info, &attrs);
497    info.pending_time_access_update = false;
498    Ok(RwLockWriteGuard::downgrade(info))
499}
500
501// Update info from attrs if they are set.
502pub fn update_info_from_attrs(info: &mut FsNodeInfo, attrs: &zxio_node_attributes_t) {
503    // TODO - store these in FsNodeState and convert on fstat
504    if attrs.has.content_size {
505        info.size = attrs.content_size.try_into().unwrap_or(std::usize::MAX);
506    }
507    if attrs.has.storage_size {
508        info.blocks = usize::try_from(attrs.storage_size)
509            .unwrap_or(std::usize::MAX)
510            .div_ceil(DEFAULT_BYTES_PER_BLOCK)
511    }
512    info.blksize = DEFAULT_BYTES_PER_BLOCK;
513    if attrs.has.link_count {
514        info.link_count = attrs.link_count.try_into().unwrap_or(std::usize::MAX);
515    }
516    if attrs.has.modification_time {
517        info.time_modify =
518            UtcInstant::from_nanos(attrs.modification_time.try_into().unwrap_or(i64::MAX));
519    }
520    if attrs.has.change_time {
521        info.time_status_change =
522            UtcInstant::from_nanos(attrs.change_time.try_into().unwrap_or(i64::MAX));
523    }
524    if attrs.has.access_time {
525        info.time_access = UtcInstant::from_nanos(attrs.access_time.try_into().unwrap_or(i64::MAX));
526    }
527    if attrs.has.wrapping_key_id {
528        info.wrapping_key_id = Some(attrs.wrapping_key_id);
529    }
530}
531
532fn get_mode(attrs: &zxio_node_attributes_t, rights: fio::Flags) -> FileMode {
533    if attrs.protocols & ZXIO_NODE_PROTOCOL_SYMLINK != 0 {
534        // We don't set the mode for symbolic links , so we synthesize it instead.
535        FileMode::IFLNK | FileMode::ALLOW_ALL
536    } else if attrs.has.mode {
537        // If the filesystem supports POSIX mode bits, use that directly.
538        FileMode::from_bits(attrs.mode)
539    } else {
540        // The filesystem doesn't support the `mode` attribute, so synthesize it from the protocols
541        // this node supports, and the rights used to open it.
542        let is_directory =
543            attrs.protocols & ZXIO_NODE_PROTOCOL_DIRECTORY == ZXIO_NODE_PROTOCOL_DIRECTORY;
544        let mode = if is_directory { FileMode::IFDIR } else { FileMode::IFREG };
545        let mut permissions = FileMode::EMPTY;
546        if rights.contains(fio::PERM_READABLE) {
547            permissions |= FileMode::IRUSR;
548        }
549        if rights.contains(fio::PERM_WRITABLE) {
550            permissions |= FileMode::IWUSR;
551        }
552        if rights.contains(fio::PERM_EXECUTABLE) {
553            permissions |= FileMode::IXUSR;
554        }
555        // Make sure the same permissions are granted to user, group, and other.
556        permissions |= FileMode::from_bits((permissions.bits() >> 3) | (permissions.bits() >> 6));
557        mode | permissions
558    }
559}
560
561fn get_name_str<'a>(name_bytes: &'a FsStr) -> Result<&'a str, Errno> {
562    std::str::from_utf8(name_bytes.as_ref()).map_err(|_| {
563        log_warn!("bad utf8 in pathname! remote filesystems can't handle this");
564        errno!(EINVAL)
565    })
566}
567
568impl XattrStorage for syncio::Zxio {
569    fn get_xattr(
570        &self,
571        _locked: &mut Locked<FileOpsCore>,
572        name: &FsStr,
573    ) -> Result<FsString, Errno> {
574        Ok(self
575            .xattr_get(name)
576            .map_err(|status| match status {
577                zx::Status::NOT_FOUND => errno!(ENODATA),
578                status => from_status_like_fdio!(status),
579            })?
580            .into())
581    }
582
583    fn set_xattr(
584        &self,
585        _locked: &mut Locked<FileOpsCore>,
586        name: &FsStr,
587        value: &FsStr,
588        op: XattrOp,
589    ) -> Result<(), Errno> {
590        let mode = match op {
591            XattrOp::Set => XattrSetMode::Set,
592            XattrOp::Create => XattrSetMode::Create,
593            XattrOp::Replace => XattrSetMode::Replace,
594        };
595
596        self.xattr_set(name, value, mode).map_err(|status| match status {
597            zx::Status::NOT_FOUND => errno!(ENODATA),
598            status => from_status_like_fdio!(status),
599        })
600    }
601
602    fn remove_xattr(&self, _locked: &mut Locked<FileOpsCore>, name: &FsStr) -> Result<(), Errno> {
603        self.xattr_remove(name).map_err(|status| match status {
604            zx::Status::NOT_FOUND => errno!(ENODATA),
605            _ => from_status_like_fdio!(status),
606        })
607    }
608
609    fn list_xattrs(&self, _locked: &mut Locked<FileOpsCore>) -> Result<Vec<FsString>, Errno> {
610        self.xattr_list()
611            .map(|attrs| attrs.into_iter().map(FsString::new).collect::<Vec<_>>())
612            .map_err(|status| from_status_like_fdio!(status))
613    }
614}
615
616impl FsNodeOps for RemoteNode {
617    fs_node_impl_xattr_delegate!(self, self.zxio);
618
619    fn create_file_ops(
620        &self,
621        _locked: &mut Locked<FileOpsCore>,
622        node: &FsNode,
623        current_task: &CurrentTask,
624        flags: OpenFlags,
625    ) -> Result<Box<dyn FileOps>, Errno> {
626        {
627            // It is safe to read the cached node info here because the `wrapping_key_id` is
628            // fetched when the node is first opened, and updated when set. We don't expect this to
629            // change out from under Starnix.
630            let node_info = node.info();
631            if node_info.mode.is_dir() {
632                if let Some(wrapping_key_id) = node_info.wrapping_key_id {
633                    if flags.can_write() {
634                        // Locked encrypted directories cannot be opened with write access.
635                        let crypt_service =
636                            node.fs().crypt_service().ok_or_else(|| errno!(ENOKEY))?;
637                        if !crypt_service.contains_key(EncryptionKeyId::from(wrapping_key_id)) {
638                            return error!(ENOKEY);
639                        }
640                    }
641                }
642                // For directories we need to deep-clone the connection because we rely on the seek
643                // offset.
644                return Ok(Box::new(RemoteDirectoryObject::new(
645                    self.zxio.deep_clone().map_err(|status| from_status_like_fdio!(status))?,
646                )));
647            }
648        }
649
650        // Locked encrypted files cannot be opened.
651        node.fail_if_locked(current_task)?;
652
653        // fsverity files cannot be opened in write mode, including while building.
654        if flags.can_write() {
655            node.fsverity.lock().check_writable()?;
656        }
657
658        // For files we can clone the `Zxio` because we don't rely on any per-connection state
659        // (i.e. the file offset).
660        Ok(Box::new(RemoteFileObject::new(self.zxio.clone())))
661    }
662
663    fn mknod(
664        &self,
665        _locked: &mut Locked<FileOpsCore>,
666        node: &FsNode,
667        current_task: &CurrentTask,
668        name: &FsStr,
669        mode: FileMode,
670        dev: DeviceType,
671        owner: FsCred,
672    ) -> Result<FsNodeHandle, Errno> {
673        node.fail_if_locked(current_task)?;
674        let name = get_name_str(name)?;
675
676        let fs = node.fs();
677        let fs_ops = RemoteFs::from_fs(&fs);
678
679        let zxio;
680        let mut node_id;
681        if !(mode.is_reg() || mode.is_chr() || mode.is_blk() || mode.is_fifo() || mode.is_sock()) {
682            return error!(EINVAL, name);
683        }
684        let mut attrs = zxio_node_attributes_t {
685            has: zxio_node_attr_has_t { id: true, wrapping_key_id: true, ..Default::default() },
686            ..Default::default()
687        };
688        zxio = self
689            .zxio
690            .open(
691                name,
692                fio::Flags::FLAG_MUST_CREATE
693                    | fio::Flags::PROTOCOL_FILE
694                    | fio::PERM_READABLE
695                    | fio::PERM_WRITABLE,
696                ZxioOpenOptions::new(
697                    Some(&mut attrs),
698                    Some(zxio_node_attributes_t {
699                        mode: mode.bits(),
700                        uid: owner.uid,
701                        gid: owner.gid,
702                        rdev: dev.bits(),
703                        has: zxio_node_attr_has_t {
704                            mode: true,
705                            uid: true,
706                            gid: true,
707                            rdev: true,
708                            ..Default::default()
709                        },
710                        ..Default::default()
711                    }),
712                ),
713            )
714            .map_err(|status| from_status_like_fdio!(status, name))?;
715        node_id = attrs.id;
716
717        let ops = if mode.is_reg() {
718            Box::new(RemoteNode { zxio }) as Box<dyn FsNodeOps>
719        } else {
720            Box::new(RemoteSpecialNode { zxio }) as Box<dyn FsNodeOps>
721        };
722
723        if !fs_ops.use_remote_ids {
724            node_id = fs.allocate_ino();
725        }
726
727        let mut node_info = FsNodeInfo { rdev: dev, ..FsNodeInfo::new(mode, owner) };
728        if attrs.has.wrapping_key_id {
729            node_info.wrapping_key_id = Some(attrs.wrapping_key_id);
730        }
731
732        let child = fs.create_node(node_id, ops, node_info);
733        Ok(child)
734    }
735
736    fn mkdir(
737        &self,
738        _locked: &mut Locked<FileOpsCore>,
739        node: &FsNode,
740        current_task: &CurrentTask,
741        name: &FsStr,
742        mode: FileMode,
743        owner: FsCred,
744    ) -> Result<FsNodeHandle, Errno> {
745        node.fail_if_locked(current_task)?;
746        let name = get_name_str(name)?;
747
748        let fs = node.fs();
749        let fs_ops = RemoteFs::from_fs(&fs);
750
751        let zxio;
752        let mut node_id;
753        let mut attrs = zxio_node_attributes_t {
754            has: zxio_node_attr_has_t { id: true, wrapping_key_id: true, ..Default::default() },
755            ..Default::default()
756        };
757        zxio = self
758            .zxio
759            .open(
760                name,
761                fio::Flags::FLAG_MUST_CREATE
762                    | fio::Flags::PROTOCOL_DIRECTORY
763                    | fio::PERM_READABLE
764                    | fio::PERM_WRITABLE,
765                ZxioOpenOptions::new(
766                    Some(&mut attrs),
767                    Some(zxio_node_attributes_t {
768                        mode: mode.bits(),
769                        uid: owner.uid,
770                        gid: owner.gid,
771                        has: zxio_node_attr_has_t {
772                            mode: true,
773                            uid: true,
774                            gid: true,
775                            ..Default::default()
776                        },
777                        ..Default::default()
778                    }),
779                ),
780            )
781            .map_err(|status| from_status_like_fdio!(status, name))?;
782        node_id = attrs.id;
783
784        let ops = RemoteNode { zxio };
785        if !fs_ops.use_remote_ids {
786            node_id = fs.allocate_ino();
787        }
788
789        let mut node_info = FsNodeInfo::new(mode, owner);
790        if attrs.has.wrapping_key_id {
791            node_info.wrapping_key_id = Some(attrs.wrapping_key_id);
792        }
793
794        let child = fs.create_node(node_id, ops, node_info);
795        Ok(child)
796    }
797
798    fn lookup(
799        &self,
800        _locked: &mut Locked<FileOpsCore>,
801        node: &FsNode,
802        current_task: &CurrentTask,
803        name: &FsStr,
804    ) -> Result<FsNodeHandle, Errno> {
805        let name = get_name_str(name)?;
806
807        let fs = node.fs();
808        let fs_ops = RemoteFs::from_fs(&fs);
809
810        let mut attrs = zxio_node_attributes_t {
811            has: zxio_node_attr_has_t {
812                protocols: true,
813                abilities: true,
814                mode: true,
815                uid: true,
816                gid: true,
817                rdev: true,
818                id: true,
819                wrapping_key_id: true,
820                fsverity_enabled: true,
821                casefold: true,
822                modification_time: true,
823                change_time: true,
824                access_time: true,
825                ..Default::default()
826            },
827            ..Default::default()
828        };
829        let mut options = ZxioOpenOptions::new(Some(&mut attrs), None);
830        let mut selinux_context_buffer =
831            MaybeUninit::<[u8; fio::MAX_SELINUX_CONTEXT_ATTRIBUTE_LEN as usize]>::uninit();
832        let mut cached_context = security::fs_is_xattr_labeled(node.fs())
833            .then(|| SelinuxContextAttr::new(&mut selinux_context_buffer));
834        if let Some(buffer) = &mut cached_context {
835            options = options.with_selinux_context_read(buffer).unwrap();
836        }
837        let zxio = self
838            .zxio
839            .open(name, fs_ops.root_rights, options)
840            .map_err(|status| from_status_like_fdio!(status, name))?;
841        let symlink_zxio = zxio.clone();
842        let mode = get_mode(&attrs, fs_ops.root_rights);
843        let node_id = if fs_ops.use_remote_ids {
844            if attrs.id == fio::INO_UNKNOWN {
845                return error!(ENOTSUP);
846            }
847            attrs.id
848        } else {
849            fs.allocate_ino()
850        };
851        let owner = FsCred { uid: attrs.uid, gid: attrs.gid };
852        let rdev = DeviceType::from_bits(attrs.rdev);
853        let fsverity_enabled = attrs.fsverity_enabled;
854        // fsverity should not be enabled for non-file nodes.
855        if fsverity_enabled && (attrs.protocols & ZXIO_NODE_PROTOCOL_FILE == 0) {
856            return error!(EINVAL);
857        }
858        let casefold = attrs.casefold;
859        let time_modify =
860            UtcInstant::from_nanos(attrs.modification_time.try_into().unwrap_or(i64::MAX));
861        let time_status_change =
862            UtcInstant::from_nanos(attrs.change_time.try_into().unwrap_or(i64::MAX));
863        let time_access = UtcInstant::from_nanos(attrs.access_time.try_into().unwrap_or(i64::MAX));
864
865        let node = fs.get_or_create_node(node_id, || {
866            let ops = if mode.is_lnk() {
867                Box::new(RemoteSymlink { zxio: Mutex::new(zxio) }) as Box<dyn FsNodeOps>
868            } else if mode.is_reg() || mode.is_dir() {
869                Box::new(RemoteNode { zxio }) as Box<dyn FsNodeOps>
870            } else {
871                Box::new(RemoteSpecialNode { zxio }) as Box<dyn FsNodeOps>
872            };
873            let wrapping_key_id = attrs.has.wrapping_key_id.then_some(attrs.wrapping_key_id);
874            let child = FsNode::new_uncached(
875                node_id,
876                ops,
877                &fs,
878                FsNodeInfo {
879                    rdev,
880                    casefold,
881                    time_status_change,
882                    time_modify,
883                    time_access,
884                    wrapping_key_id,
885                    ..FsNodeInfo::new(mode, owner)
886                },
887            );
888            if fsverity_enabled {
889                *child.fsverity.lock() = FsVerityState::FsVerity;
890            }
891            if let Some(buffer) = cached_context.as_ref().and_then(|buffer| buffer.get()) {
892                // This is valid to fail if we're using mount point labelling or the
893                // provided context string is invalid.
894                let _ = security::fs_node_notify_security_context(
895                    current_task,
896                    &child,
897                    FsStr::new(buffer),
898                );
899            }
900            Ok(child)
901        })?;
902        if let Some(symlink) = node.downcast_ops::<RemoteSymlink>() {
903            let mut zxio_guard = symlink.zxio.lock();
904            *zxio_guard = symlink_zxio;
905        }
906        Ok(node)
907    }
908
909    fn truncate(
910        &self,
911        _locked: &mut Locked<FileOpsCore>,
912        _guard: &AppendLockGuard<'_>,
913        node: &FsNode,
914        current_task: &CurrentTask,
915        length: u64,
916    ) -> Result<(), Errno> {
917        node.fail_if_locked(current_task)?;
918        self.zxio.truncate(length).map_err(|status| from_status_like_fdio!(status))
919    }
920
921    fn allocate(
922        &self,
923        _locked: &mut Locked<FileOpsCore>,
924        _guard: &AppendLockGuard<'_>,
925        node: &FsNode,
926        current_task: &CurrentTask,
927        mode: FallocMode,
928        offset: u64,
929        length: u64,
930    ) -> Result<(), Errno> {
931        match mode {
932            FallocMode::Allocate { keep_size: false } => {
933                node.fail_if_locked(current_task)?;
934                self.zxio
935                    .allocate(offset, length, AllocateMode::empty())
936                    .map_err(|status| from_status_like_fdio!(status))?;
937                Ok(())
938            }
939            _ => error!(EINVAL),
940        }
941    }
942
943    fn fetch_and_refresh_info<'a>(
944        &self,
945        _locked: &mut Locked<FileOpsCore>,
946        _node: &FsNode,
947        _current_task: &CurrentTask,
948        info: &'a RwLock<FsNodeInfo>,
949    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
950        fetch_and_refresh_info_impl(&self.zxio, info)
951    }
952
953    fn update_attributes(
954        &self,
955        _locked: &mut Locked<FileOpsCore>,
956        _current_task: &CurrentTask,
957        info: &FsNodeInfo,
958        has: zxio_node_attr_has_t,
959    ) -> Result<(), Errno> {
960        // Omit updating creation_time. By definition, there shouldn't be a change in creation_time.
961        let mut mutable_node_attributes = zxio_node_attributes_t {
962            modification_time: info.time_modify.into_nanos() as u64,
963            access_time: info.time_access.into_nanos() as u64,
964            mode: info.mode.bits(),
965            uid: info.uid,
966            gid: info.gid,
967            rdev: info.rdev.bits(),
968            casefold: info.casefold,
969            has,
970            ..Default::default()
971        };
972        if let Some(id) = info.wrapping_key_id {
973            mutable_node_attributes.wrapping_key_id = id;
974        }
975        self.zxio
976            .attr_set(&mutable_node_attributes)
977            .map_err(|status| from_status_like_fdio!(status))
978    }
979
980    fn unlink(
981        &self,
982        _locked: &mut Locked<FileOpsCore>,
983        _node: &FsNode,
984        _current_task: &CurrentTask,
985        name: &FsStr,
986        _child: &FsNodeHandle,
987    ) -> Result<(), Errno> {
988        // We don't care about the _child argument because 1. unlinking already takes the parent's
989        // children lock, so we don't have to worry about conflicts on this path, and 2. the remote
990        // filesystem tracks the link counts so we don't need to update them here.
991        let name = get_name_str(name)?;
992        self.zxio
993            .unlink(name, fio::UnlinkFlags::empty())
994            .map_err(|status| from_status_like_fdio!(status))
995    }
996
997    fn create_symlink(
998        &self,
999        _locked: &mut Locked<FileOpsCore>,
1000        node: &FsNode,
1001        current_task: &CurrentTask,
1002        name: &FsStr,
1003        target: &FsStr,
1004        owner: FsCred,
1005    ) -> Result<FsNodeHandle, Errno> {
1006        node.fail_if_locked(current_task)?;
1007
1008        let name = get_name_str(name)?;
1009        let zxio = self
1010            .zxio
1011            .create_symlink(name, target)
1012            .map_err(|status| from_status_like_fdio!(status))?;
1013
1014        let fs = node.fs();
1015        let fs_ops = RemoteFs::from_fs(&fs);
1016
1017        let node_id = if fs_ops.use_remote_ids {
1018            let attrs = zxio
1019                .attr_get(zxio_node_attr_has_t { id: true, ..Default::default() })
1020                .map_err(|status| from_status_like_fdio!(status))?;
1021            attrs.id
1022        } else {
1023            fs.allocate_ino()
1024        };
1025        let symlink = fs.create_node(
1026            node_id,
1027            RemoteSymlink { zxio: Mutex::new(zxio) },
1028            FsNodeInfo {
1029                size: target.len(),
1030                ..FsNodeInfo::new(FileMode::IFLNK | FileMode::ALLOW_ALL, owner)
1031            },
1032        );
1033        Ok(symlink)
1034    }
1035
1036    fn create_tmpfile(
1037        &self,
1038        node: &FsNode,
1039        _current_task: &CurrentTask,
1040        mode: FileMode,
1041        owner: FsCred,
1042    ) -> Result<FsNodeHandle, Errno> {
1043        let fs = node.fs();
1044        let fs_ops = RemoteFs::from_fs(&fs);
1045
1046        let zxio;
1047        let mut node_id;
1048        if !mode.is_reg() {
1049            return error!(EINVAL);
1050        }
1051        let mut attrs = zxio_node_attributes_t {
1052            has: zxio_node_attr_has_t { id: true, ..Default::default() },
1053            ..Default::default()
1054        };
1055        // `create_tmpfile` is used by O_TMPFILE. Note that
1056        // <https://man7.org/linux/man-pages/man2/open.2.html> states that if O_EXCL is specified
1057        // with O_TMPFILE, the temporary file created cannot be linked into the filesystem. Although
1058        // there exist fuchsia flags `fio::FLAG_TEMPORARY_AS_NOT_LINKABLE`, the starnix vfs already
1059        // handles this case and makes sure that the created file is not linkable. There is also no
1060        // current way of passing the open flags to this function.
1061        zxio = self
1062            .zxio
1063            .open(
1064                ".",
1065                fio::Flags::PROTOCOL_FILE
1066                    | fio::Flags::FLAG_CREATE_AS_UNNAMED_TEMPORARY
1067                    | fio::PERM_READABLE
1068                    | fio::PERM_WRITABLE,
1069                ZxioOpenOptions::new(
1070                    Some(&mut attrs),
1071                    Some(zxio_node_attributes_t {
1072                        mode: mode.bits(),
1073                        uid: owner.uid,
1074                        gid: owner.gid,
1075                        has: zxio_node_attr_has_t {
1076                            mode: true,
1077                            uid: true,
1078                            gid: true,
1079                            ..Default::default()
1080                        },
1081                        ..Default::default()
1082                    }),
1083                ),
1084            )
1085            .map_err(|status| from_status_like_fdio!(status))?;
1086        node_id = attrs.id;
1087
1088        let ops = Box::new(RemoteNode { zxio }) as Box<dyn FsNodeOps>;
1089
1090        if !fs_ops.use_remote_ids {
1091            node_id = fs.allocate_ino();
1092        }
1093        let child = fs.create_node(node_id, ops, FsNodeInfo::new(mode, owner));
1094
1095        Ok(child)
1096    }
1097
1098    fn link(
1099        &self,
1100        _locked: &mut Locked<FileOpsCore>,
1101        node: &FsNode,
1102        _current_task: &CurrentTask,
1103        name: &FsStr,
1104        child: &FsNodeHandle,
1105    ) -> Result<(), Errno> {
1106        if !RemoteFs::from_fs(&node.fs()).use_remote_ids {
1107            return error!(EPERM);
1108        }
1109        let name = get_name_str(name)?;
1110        let link_into = |zxio: &syncio::Zxio| {
1111            zxio.link_into(&self.zxio, name).map_err(|status| match status {
1112                zx::Status::BAD_STATE => errno!(EXDEV),
1113                zx::Status::ACCESS_DENIED => errno!(ENOKEY),
1114                s => from_status_like_fdio!(s),
1115            })
1116        };
1117        if let Some(child) = child.downcast_ops::<RemoteNode>() {
1118            link_into(&child.zxio)
1119        } else if let Some(child) = child.downcast_ops::<RemoteSymlink>() {
1120            link_into(&child.zxio())
1121        } else {
1122            error!(EXDEV)
1123        }
1124    }
1125
1126    fn forget(
1127        self: Box<Self>,
1128        _locked: &mut Locked<FileOpsCore>,
1129        _current_task: &CurrentTask,
1130        info: FsNodeInfo,
1131    ) -> Result<(), Errno> {
1132        // Before forgetting this node, update atime if we need to.
1133        if info.pending_time_access_update {
1134            self.zxio
1135                .close_and_update_access_time()
1136                .map_err(|status| from_status_like_fdio!(status))?;
1137        }
1138        Ok(())
1139    }
1140
1141    fn enable_fsverity(&self, descriptor: &fsverity_descriptor) -> Result<(), Errno> {
1142        let descr = zxio_fsverity_descriptor_t {
1143            hash_algorithm: descriptor.hash_algorithm,
1144            salt_size: descriptor.salt_size,
1145            salt: descriptor.salt,
1146        };
1147        self.zxio.enable_verity(&descr).map_err(|status| from_status_like_fdio!(status))
1148    }
1149
1150    fn get_fsverity_descriptor(&self, log_blocksize: u8) -> Result<fsverity_descriptor, Errno> {
1151        let mut root_hash = [0; ZXIO_ROOT_HASH_LENGTH];
1152        let attrs = self
1153            .zxio
1154            .attr_get_with_root_hash(
1155                zxio_node_attr_has_t {
1156                    content_size: true,
1157                    fsverity_options: true,
1158                    fsverity_root_hash: true,
1159                    ..Default::default()
1160                },
1161                &mut root_hash,
1162            )
1163            .map_err(|status| match status {
1164                zx::Status::INVALID_ARGS => errno!(ENODATA),
1165                _ => from_status_like_fdio!(status),
1166            })?;
1167        return Ok(fsverity_descriptor {
1168            version: 1,
1169            hash_algorithm: attrs.fsverity_options.hash_alg,
1170            log_blocksize,
1171            salt_size: attrs.fsverity_options.salt_size as u8,
1172            __reserved_0x04: 0u32,
1173            data_size: attrs.content_size,
1174            root_hash,
1175            salt: attrs.fsverity_options.salt,
1176            __reserved: [0u8; 144],
1177        });
1178    }
1179}
1180
1181struct RemoteSpecialNode {
1182    zxio: syncio::Zxio,
1183}
1184
1185impl FsNodeOps for RemoteSpecialNode {
1186    fs_node_impl_not_dir!();
1187    fs_node_impl_xattr_delegate!(self, self.zxio);
1188
1189    fn create_file_ops(
1190        &self,
1191        _locked: &mut Locked<FileOpsCore>,
1192        _node: &FsNode,
1193        _current_task: &CurrentTask,
1194        _flags: OpenFlags,
1195    ) -> Result<Box<dyn FileOps>, Errno> {
1196        unreachable!("Special nodes cannot be opened.");
1197    }
1198}
1199
1200fn zxio_read_write_inner_map_error(status: zx::Status) -> Errno {
1201    match status {
1202        // zx::Stream may return invalid args or not found error because of
1203        // invalid zx_iovec buffer pointers.
1204        zx::Status::INVALID_ARGS | zx::Status::NOT_FOUND => errno!(EFAULT, ""),
1205        status => from_status_like_fdio!(status),
1206    }
1207}
1208
1209fn zxio_read_inner(
1210    data: &mut dyn OutputBuffer,
1211    unified_read_fn: impl FnOnce(&[syncio::zxio::zx_iovec]) -> Result<usize, zx::Status>,
1212    vmo_read_fn: impl FnOnce(&mut [u8]) -> Result<usize, zx::Status>,
1213) -> Result<usize, Errno> {
1214    let read_bytes = with_iovec_segments(data, |iovecs| {
1215        unified_read_fn(&iovecs).map_err(zxio_read_write_inner_map_error)
1216    });
1217
1218    match read_bytes {
1219        Some(actual) => {
1220            let actual = actual?;
1221            // SAFETY: we successfully read `actual` bytes
1222            // directly to the user's buffer segments.
1223            unsafe { data.advance(actual) }?;
1224            Ok(actual)
1225        }
1226        None => {
1227            // Perform the (slower) operation by using an intermediate buffer.
1228            let total = data.available();
1229            let mut bytes = vec![0u8; total];
1230            let actual =
1231                vmo_read_fn(&mut bytes).map_err(|status| from_status_like_fdio!(status))?;
1232            data.write_all(&bytes[0..actual])
1233        }
1234    }
1235}
1236
1237fn zxio_read_at(zxio: &Zxio, offset: usize, data: &mut dyn OutputBuffer) -> Result<usize, Errno> {
1238    let offset = offset as u64;
1239    zxio_read_inner(
1240        data,
1241        |iovecs| {
1242            // SAFETY: `zxio_read_inner` maps the returned error to an appropriate
1243            // `Errno` for userspace to handle. `data` only points to memory that
1244            // is allowed to be written to (Linux user-mode aspace or a valid
1245            // Starnix owned buffer).
1246            unsafe { zxio.readv_at(offset, iovecs) }
1247        },
1248        |bytes| zxio.read_at(offset, bytes),
1249    )
1250}
1251
1252fn zxio_write_inner(
1253    data: &mut dyn InputBuffer,
1254    unified_write_fn: impl FnOnce(&[syncio::zxio::zx_iovec]) -> Result<usize, zx::Status>,
1255    vmo_write_fn: impl FnOnce(&[u8]) -> Result<usize, zx::Status>,
1256) -> Result<usize, Errno> {
1257    let write_bytes = with_iovec_segments(data, |iovecs| {
1258        unified_write_fn(&iovecs).map_err(zxio_read_write_inner_map_error)
1259    });
1260
1261    match write_bytes {
1262        Some(actual) => {
1263            let actual = actual?;
1264            data.advance(actual)?;
1265            Ok(actual)
1266        }
1267        None => {
1268            // Perform the (slower) operation by using an intermediate buffer.
1269            let bytes = data.peek_all()?;
1270            let actual = vmo_write_fn(&bytes).map_err(|status| from_status_like_fdio!(status))?;
1271            data.advance(actual)?;
1272            Ok(actual)
1273        }
1274    }
1275}
1276
1277fn zxio_write_at(
1278    zxio: &Zxio,
1279    _current_task: &CurrentTask,
1280    offset: usize,
1281    data: &mut dyn InputBuffer,
1282) -> Result<usize, Errno> {
1283    let offset = offset as u64;
1284    zxio_write_inner(
1285        data,
1286        |iovecs| {
1287            // SAFETY: `zxio_write_inner` maps the returned error to an appropriate
1288            // `Errno` for userspace to handle.
1289            unsafe { zxio.writev_at(offset, iovecs) }
1290        },
1291        |bytes| zxio.write_at(offset, bytes),
1292    )
1293}
1294
1295/// Helper struct to track the context necessary to iterate over dir entries.
1296#[derive(Default)]
1297struct RemoteDirectoryIterator<'a> {
1298    iterator: Option<DirentIterator<'a>>,
1299
1300    /// If the last attempt to write to the sink failed, this contains the entry that is pending to
1301    /// be added. This is also used to synthesize dot-dot.
1302    pending_entry: Entry,
1303}
1304
1305#[derive(Default)]
1306enum Entry {
1307    // Indicates no more entries.
1308    #[default]
1309    None,
1310
1311    Some(ZxioDirent),
1312
1313    // Indicates dot-dot should be synthesized.
1314    DotDot,
1315}
1316
1317impl Entry {
1318    fn take(&mut self) -> Entry {
1319        std::mem::replace(self, Entry::None)
1320    }
1321}
1322
1323impl From<Option<ZxioDirent>> for Entry {
1324    fn from(value: Option<ZxioDirent>) -> Self {
1325        match value {
1326            None => Entry::None,
1327            Some(x) => Entry::Some(x),
1328        }
1329    }
1330}
1331
1332impl<'a> RemoteDirectoryIterator<'a> {
1333    fn get_or_init_iterator(&mut self, zxio: &'a Zxio) -> Result<&mut DirentIterator<'a>, Errno> {
1334        if self.iterator.is_none() {
1335            let iterator =
1336                zxio.create_dirent_iterator().map_err(|status| from_status_like_fdio!(status))?;
1337            self.iterator = Some(iterator);
1338        }
1339        if let Some(iterator) = &mut self.iterator {
1340            return Ok(iterator);
1341        }
1342
1343        // Should be an impossible error, because we just created the iterator above.
1344        error!(EIO)
1345    }
1346
1347    /// Returns the next dir entry. If no more entries are found, returns None.  Returns an error if
1348    /// the iterator fails for other reasons described by the zxio library.
1349    pub fn next(&mut self, zxio: &'a Zxio) -> Result<Entry, Errno> {
1350        let mut next = self.pending_entry.take();
1351        if let Entry::None = next {
1352            next = self
1353                .get_or_init_iterator(zxio)?
1354                .next()
1355                .transpose()
1356                .map_err(|status| from_status_like_fdio!(status))?
1357                .into();
1358        }
1359        // We only want to synthesize .. if . exists because the . and .. entries get removed if the
1360        // directory is unlinked, so if the remote filesystem has removed ., we know to omit the
1361        // .. entry.
1362        match &next {
1363            Entry::Some(ZxioDirent { name, .. }) if name == "." => {
1364                self.pending_entry = Entry::DotDot;
1365            }
1366            _ => {}
1367        }
1368        Ok(next)
1369    }
1370}
1371
1372struct RemoteDirectoryObject {
1373    iterator: Mutex<RemoteDirectoryIterator<'static>>,
1374
1375    // The underlying Zircon I/O object.  This *must* be dropped after `iterator` above because the
1376    // iterator has references to this object.  We use some unsafe code below to erase the lifetime
1377    // (hence the 'static above).
1378    zxio: Zxio,
1379}
1380
1381impl RemoteDirectoryObject {
1382    pub fn new(zxio: Zxio) -> RemoteDirectoryObject {
1383        RemoteDirectoryObject { zxio, iterator: Mutex::new(RemoteDirectoryIterator::default()) }
1384    }
1385
1386    /// Returns a reference to Zxio with the lifetime erased.
1387    ///
1388    /// # Safety
1389    ///
1390    /// The caller must uphold the lifetime requirements, which will be the case if this is only
1391    /// used for the contained iterator (`iterator` is dropped before `zxio`).
1392    unsafe fn zxio(&self) -> &'static Zxio {
1393        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
1394        unsafe {
1395            &*(&self.zxio as *const Zxio)
1396        }
1397    }
1398}
1399
1400impl FileOps for RemoteDirectoryObject {
1401    fileops_impl_directory!();
1402
1403    fn seek(
1404        &self,
1405        _locked: &mut Locked<FileOpsCore>,
1406        _file: &FileObject,
1407        _current_task: &CurrentTask,
1408        current_offset: off_t,
1409        target: SeekTarget,
1410    ) -> Result<off_t, Errno> {
1411        let mut iterator = self.iterator.lock();
1412        let new_offset = default_seek(current_offset, target, || error!(EINVAL))?;
1413        let mut iterator_position = current_offset;
1414
1415        if new_offset < iterator_position {
1416            // Our iterator only goes forward, so reset it here.  Note: we *must* rewind it rather
1417            // than just create a new iterator because the remote end maintains the offset.
1418            if let Some(iterator) = &mut iterator.iterator {
1419                iterator.rewind().map_err(|status| from_status_like_fdio!(status))?;
1420            }
1421            iterator.pending_entry = Entry::None;
1422            iterator_position = 0;
1423        }
1424
1425        // Advance the iterator to catch up with the offset.
1426        for i in iterator_position..new_offset {
1427            // SAFETY: See the comment on the `zxio` function above.  The iterator outlives this
1428            // function and the zxio object must outlive the iterator.
1429            match iterator.next(unsafe { self.zxio() }) {
1430                Ok(Entry::Some(_) | Entry::DotDot) => {}
1431                Ok(Entry::None) => break, // No more entries.
1432                Err(_) => {
1433                    // In order to keep the offset and the iterator in sync, set the new offset
1434                    // to be as far as we could get.
1435                    // Note that failing the seek here would also cause the iterator and the
1436                    // offset to not be in sync, because the iterator has already moved from
1437                    // where it was.
1438                    return Ok(i);
1439                }
1440            }
1441        }
1442
1443        Ok(new_offset)
1444    }
1445
1446    fn readdir(
1447        &self,
1448        _locked: &mut Locked<FileOpsCore>,
1449        file: &FileObject,
1450        _current_task: &CurrentTask,
1451        sink: &mut dyn DirentSink,
1452    ) -> Result<(), Errno> {
1453        // It is important to acquire the lock to the offset before the context, to avoid a deadlock
1454        // where seek() tries to modify the context.
1455        let mut iterator = self.iterator.lock();
1456
1457        loop {
1458            // SAFETY: See the comment on the `zxio` function above.  The iterator outlives this
1459            // function and the zxio object must outlive the iterator.
1460            let entry = iterator.next(unsafe { self.zxio() })?;
1461            if let Err(e) = match &entry {
1462                Entry::Some(entry) => {
1463                    let inode_num: ino_t = entry.id.ok_or_else(|| errno!(EIO))?;
1464                    let entry_type = if entry.is_dir() {
1465                        DirectoryEntryType::DIR
1466                    } else if entry.is_file() {
1467                        DirectoryEntryType::REG
1468                    } else {
1469                        DirectoryEntryType::UNKNOWN
1470                    };
1471                    sink.add(inode_num, sink.offset() + 1, entry_type, entry.name.as_bstr())
1472                }
1473                Entry::DotDot => {
1474                    let inode_num = if let Some(parent) = file.name.parent_within_mount() {
1475                        parent.node.ino
1476                    } else {
1477                        // For the root .. should have the same inode number as .
1478                        file.name.entry.node.ino
1479                    };
1480                    sink.add(inode_num, sink.offset() + 1, DirectoryEntryType::DIR, "..".into())
1481                }
1482                Entry::None => break,
1483            } {
1484                iterator.pending_entry = entry;
1485                return Err(e);
1486            }
1487        }
1488        Ok(())
1489    }
1490
1491    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1492        self.zxio.sync().map_err(|status| match status {
1493            zx::Status::NO_RESOURCES | zx::Status::NO_MEMORY | zx::Status::NO_SPACE => {
1494                errno!(ENOSPC)
1495            }
1496            zx::Status::INVALID_ARGS | zx::Status::NOT_FILE => errno!(EINVAL),
1497            zx::Status::BAD_HANDLE => errno!(EBADFD),
1498            zx::Status::NOT_SUPPORTED => errno!(ENOTSUP),
1499            zx::Status::INTERRUPTED_RETRY => errno!(EINTR),
1500            _ => errno!(EIO),
1501        })
1502    }
1503
1504    fn to_handle(
1505        &self,
1506        _file: &FileObject,
1507        _current_task: &CurrentTask,
1508    ) -> Result<Option<zx::NullableHandle>, Errno> {
1509        self.zxio
1510            .deep_clone()
1511            .and_then(Zxio::release)
1512            .map(Some)
1513            .map_err(|status| from_status_like_fdio!(status))
1514    }
1515}
1516
1517pub struct RemoteFileObject {
1518    /// The underlying Zircon I/O object.  This is shared, so we must take care not to use any
1519    /// stateful methods on the underlying object (reading and writing is fine).
1520    zxio: Zxio,
1521
1522    /// Cached read-only VMO handle.
1523    read_only_memory: OnceCell<Arc<MemoryObject>>,
1524
1525    /// Cached read/exec VMO handle.
1526    read_exec_memory: OnceCell<Arc<MemoryObject>>,
1527}
1528
1529impl RemoteFileObject {
1530    fn new(zxio: Zxio) -> RemoteFileObject {
1531        RemoteFileObject {
1532            zxio,
1533            read_only_memory: Default::default(),
1534            read_exec_memory: Default::default(),
1535        }
1536    }
1537
1538    fn fetch_remote_memory(&self, prot: ProtectionFlags) -> Result<Arc<MemoryObject>, Errno> {
1539        let without_exec = self
1540            .zxio
1541            .vmo_get(prot.to_vmar_flags() - zx::VmarFlags::PERM_EXECUTE)
1542            .map_err(|status| from_status_like_fdio!(status))?;
1543        let all_flags = if prot.contains(ProtectionFlags::EXEC) {
1544            without_exec.replace_as_executable(&VMEX_RESOURCE).map_err(impossible_error)?
1545        } else {
1546            without_exec
1547        };
1548        Ok(Arc::new(MemoryObject::from(all_flags)))
1549    }
1550}
1551
1552impl FileOps for RemoteFileObject {
1553    fileops_impl_seekable!();
1554
1555    fn read(
1556        &self,
1557        _locked: &mut Locked<FileOpsCore>,
1558        _file: &FileObject,
1559        _current_task: &CurrentTask,
1560        offset: usize,
1561        data: &mut dyn OutputBuffer,
1562    ) -> Result<usize, Errno> {
1563        zxio_read_at(&self.zxio, offset, data)
1564    }
1565
1566    fn write(
1567        &self,
1568        _locked: &mut Locked<FileOpsCore>,
1569        _file: &FileObject,
1570        current_task: &CurrentTask,
1571        offset: usize,
1572        data: &mut dyn InputBuffer,
1573    ) -> Result<usize, Errno> {
1574        zxio_write_at(&self.zxio, current_task, offset, data)
1575    }
1576
1577    fn get_memory(
1578        &self,
1579        _locked: &mut Locked<FileOpsCore>,
1580        _file: &FileObject,
1581        _current_task: &CurrentTask,
1582        _length: Option<usize>,
1583        prot: ProtectionFlags,
1584    ) -> Result<Arc<MemoryObject>, Errno> {
1585        trace_duration!(CATEGORY_STARNIX_MM, "RemoteFileGetVmo");
1586        let memory_cache = if prot == (ProtectionFlags::READ | ProtectionFlags::EXEC) {
1587            Some(&self.read_exec_memory)
1588        } else if prot == ProtectionFlags::READ {
1589            Some(&self.read_only_memory)
1590        } else {
1591            None
1592        };
1593
1594        memory_cache
1595            .map(|c| c.get_or_try_init(|| self.fetch_remote_memory(prot)).cloned())
1596            .unwrap_or_else(|| self.fetch_remote_memory(prot))
1597    }
1598
1599    fn to_handle(
1600        &self,
1601        _file: &FileObject,
1602        _current_task: &CurrentTask,
1603    ) -> Result<Option<zx::NullableHandle>, Errno> {
1604        self.zxio
1605            .deep_clone()
1606            .and_then(Zxio::release)
1607            .map(Some)
1608            .map_err(|status| from_status_like_fdio!(status))
1609    }
1610
1611    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1612        self.zxio.sync().map_err(|status| match status {
1613            zx::Status::NO_RESOURCES | zx::Status::NO_MEMORY | zx::Status::NO_SPACE => {
1614                errno!(ENOSPC)
1615            }
1616            zx::Status::INVALID_ARGS | zx::Status::NOT_FILE => errno!(EINVAL),
1617            zx::Status::BAD_HANDLE => errno!(EBADFD),
1618            zx::Status::NOT_SUPPORTED => errno!(ENOTSUP),
1619            zx::Status::INTERRUPTED_RETRY => errno!(EINTR),
1620            _ => errno!(EIO),
1621        })
1622    }
1623
1624    fn ioctl(
1625        &self,
1626        locked: &mut Locked<Unlocked>,
1627        file: &FileObject,
1628        current_task: &CurrentTask,
1629        request: u32,
1630        arg: SyscallArg,
1631    ) -> Result<SyscallResult, Errno> {
1632        default_ioctl(file, locked, current_task, request, arg)
1633    }
1634}
1635
1636struct RemoteSymlink {
1637    zxio: Mutex<syncio::Zxio>,
1638}
1639
1640impl RemoteSymlink {
1641    fn zxio(&self) -> syncio::Zxio {
1642        self.zxio.lock().clone()
1643    }
1644}
1645
1646impl FsNodeOps for RemoteSymlink {
1647    fs_node_impl_symlink!();
1648    fs_node_impl_xattr_delegate!(self, self.zxio());
1649
1650    fn readlink(
1651        &self,
1652        _locked: &mut Locked<FileOpsCore>,
1653        _node: &FsNode,
1654        _current_task: &CurrentTask,
1655    ) -> Result<SymlinkTarget, Errno> {
1656        Ok(SymlinkTarget::Path(
1657            self.zxio().read_link().map_err(|status| from_status_like_fdio!(status))?.into(),
1658        ))
1659    }
1660
1661    fn fetch_and_refresh_info<'a>(
1662        &self,
1663        _locked: &mut Locked<FileOpsCore>,
1664        _node: &FsNode,
1665        _current_task: &CurrentTask,
1666        info: &'a RwLock<FsNodeInfo>,
1667    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
1668        fetch_and_refresh_info_impl(&self.zxio(), info)
1669    }
1670
1671    fn forget(
1672        self: Box<Self>,
1673        _locked: &mut Locked<FileOpsCore>,
1674        _current_task: &CurrentTask,
1675        info: FsNodeInfo,
1676    ) -> Result<(), Errno> {
1677        // Before forgetting this node, update atime if we need to.
1678        if info.pending_time_access_update {
1679            self.zxio()
1680                .close_and_update_access_time()
1681                .map_err(|status| from_status_like_fdio!(status))?;
1682        }
1683        Ok(())
1684    }
1685}
1686
1687pub struct RemoteCounter {
1688    counter: Counter,
1689}
1690
1691impl RemoteCounter {
1692    fn new(counter: Counter) -> Self {
1693        Self { counter }
1694    }
1695
1696    pub fn duplicate_handle(&self) -> Result<Counter, Errno> {
1697        self.counter.duplicate_handle(zx::Rights::SAME_RIGHTS).map_err(impossible_error)
1698    }
1699}
1700
1701impl FileOps for RemoteCounter {
1702    fileops_impl_nonseekable!();
1703    fileops_impl_noop_sync!();
1704
1705    fn read(
1706        &self,
1707        _locked: &mut Locked<FileOpsCore>,
1708        _file: &FileObject,
1709        _current_task: &CurrentTask,
1710        _offset: usize,
1711        _data: &mut dyn OutputBuffer,
1712    ) -> Result<usize, Errno> {
1713        error!(ENOTSUP)
1714    }
1715
1716    fn write(
1717        &self,
1718        _locked: &mut Locked<FileOpsCore>,
1719        _file: &FileObject,
1720        _current_task: &CurrentTask,
1721        _offset: usize,
1722        _data: &mut dyn InputBuffer,
1723    ) -> Result<usize, Errno> {
1724        error!(ENOTSUP)
1725    }
1726
1727    fn ioctl(
1728        &self,
1729        locked: &mut Locked<Unlocked>,
1730        file: &FileObject,
1731        current_task: &CurrentTask,
1732        request: u32,
1733        arg: SyscallArg,
1734    ) -> Result<SyscallResult, Errno> {
1735        let ioctl_type = (request >> 8) as u8;
1736        let ioctl_number = request as u8;
1737        if ioctl_type == SYNC_IOC_MAGIC
1738            && (ioctl_number == SYNC_IOC_FILE_INFO || ioctl_number == SYNC_IOC_MERGE)
1739        {
1740            let mut sync_points: Vec<SyncPoint> = vec![];
1741            let counter = self.duplicate_handle()?;
1742            sync_points.push(SyncPoint::new(Timeline::Hwc, counter.into()));
1743            let sync_file_name: &[u8; 32] = b"remote counter\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
1744            let sync_file = SyncFile::new(*sync_file_name, SyncFence { sync_points });
1745            return sync_file.ioctl(locked, file, current_task, request, arg);
1746        }
1747
1748        error!(EINVAL)
1749    }
1750}
1751
1752#[cfg(test)]
1753mod test {
1754    use super::*;
1755    use crate::mm::PAGE_SIZE;
1756    use crate::testing::*;
1757    use crate::vfs::buffers::{VecInputBuffer, VecOutputBuffer};
1758    use crate::vfs::socket::{SocketFile, SocketMessageFlags};
1759    use crate::vfs::{EpollFileObject, LookupContext, Namespace, SymlinkMode, TimeUpdateType};
1760    use assert_matches::assert_matches;
1761    use fidl_fuchsia_io as fio;
1762    use flyweights::FlyByteStr;
1763    use fxfs_testing::{TestFixture, TestFixtureOptions};
1764    use starnix_uapi::auth::Credentials;
1765    use starnix_uapi::errors::EINVAL;
1766    use starnix_uapi::file_mode::{AccessCheck, mode};
1767    use starnix_uapi::open_flags::OpenFlags;
1768    use starnix_uapi::vfs::{EpollEvent, FdEvents};
1769    use zx::HandleBased;
1770
1771    #[::fuchsia::test]
1772    async fn test_remote_uds() {
1773        spawn_kernel_and_run(async |locked, current_task| {
1774            let (s1, s2) = zx::Socket::create_datagram();
1775            s2.write(&vec![0]).expect("write");
1776            let file = new_remote_file(locked, &current_task, s1.into(), OpenFlags::RDWR)
1777                .expect("new_remote_file");
1778            assert!(file.node().is_sock());
1779            let socket_ops = file.downcast_file::<SocketFile>().unwrap();
1780            let flags = SocketMessageFlags::CTRUNC
1781                | SocketMessageFlags::TRUNC
1782                | SocketMessageFlags::NOSIGNAL
1783                | SocketMessageFlags::CMSG_CLOEXEC;
1784            let mut buffer = VecOutputBuffer::new(1024);
1785            let info = socket_ops
1786                .recvmsg(locked, &current_task, &file, &mut buffer, flags, None)
1787                .expect("recvmsg");
1788            assert!(info.ancillary_data.is_empty());
1789            assert_eq!(info.message_length, 1);
1790        })
1791        .await;
1792    }
1793
1794    #[::fuchsia::test]
1795    async fn test_tree() {
1796        spawn_kernel_and_run(async |locked, current_task| {
1797            let kernel = current_task.kernel();
1798            let rights = fio::PERM_READABLE | fio::PERM_EXECUTABLE;
1799            let (server, client) = zx::Channel::create();
1800            fdio::open("/pkg", rights, server).expect("failed to open /pkg");
1801            let fs = RemoteFs::new_fs(
1802                locked,
1803                &kernel,
1804                client,
1805                FileSystemOptions { source: FlyByteStr::new(b"/pkg"), ..Default::default() },
1806                rights,
1807            )
1808            .unwrap();
1809            let ns = Namespace::new(fs);
1810            let root = ns.root();
1811            let mut context = LookupContext::default();
1812            assert_eq!(
1813                root.lookup_child(locked, &current_task, &mut context, "nib".into()).err(),
1814                Some(errno!(ENOENT))
1815            );
1816            let mut context = LookupContext::default();
1817            root.lookup_child(locked, &current_task, &mut context, "lib".into()).unwrap();
1818
1819            let mut context = LookupContext::default();
1820            let _test_file = root
1821                .lookup_child(
1822                    locked,
1823                    &current_task,
1824                    &mut context,
1825                    "data/tests/hello_starnix".into(),
1826                )
1827                .unwrap()
1828                .open(locked, &current_task, OpenFlags::RDONLY, AccessCheck::default())
1829                .unwrap();
1830        })
1831        .await;
1832    }
1833
1834    #[::fuchsia::test]
1835    async fn test_blocking_io() {
1836        spawn_kernel_and_run(async |locked, current_task| {
1837            let (client, server) = zx::Socket::create_stream();
1838            let pipe = create_fuchsia_pipe(locked, &current_task, client, OpenFlags::RDWR).unwrap();
1839
1840            let bytes = [0u8; 64];
1841            assert_eq!(bytes.len(), server.write(&bytes).unwrap());
1842
1843            // Spawn a kthread to get the right lock context.
1844            let bytes_read =
1845                pipe.read(locked, &current_task, &mut VecOutputBuffer::new(64)).unwrap();
1846
1847            assert_eq!(bytes_read, bytes.len());
1848        })
1849        .await;
1850    }
1851
1852    #[::fuchsia::test]
1853    async fn test_poll() {
1854        spawn_kernel_and_run(async |locked, current_task| {
1855            let (client, server) = zx::Socket::create_stream();
1856            let pipe = create_fuchsia_pipe(locked, &current_task, client, OpenFlags::RDWR)
1857                .expect("create_fuchsia_pipe");
1858            let server_zxio = Zxio::create(server.into_handle()).expect("Zxio::create");
1859
1860            assert_eq!(
1861                pipe.query_events(locked, &current_task),
1862                Ok(FdEvents::POLLOUT | FdEvents::POLLWRNORM)
1863            );
1864
1865            let epoll_object = EpollFileObject::new_file(locked, &current_task);
1866            let epoll_file = epoll_object.downcast_file::<EpollFileObject>().unwrap();
1867            let event = EpollEvent::new(FdEvents::POLLIN, 0);
1868            epoll_file
1869                .add(locked, &current_task, &pipe, &epoll_object, event)
1870                .expect("poll_file.add");
1871
1872            let fds = epoll_file
1873                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1874                .expect("wait");
1875            assert!(fds.is_empty());
1876
1877            assert_eq!(server_zxio.write(&[0]).expect("write"), 1);
1878
1879            assert_eq!(
1880                pipe.query_events(locked, &current_task),
1881                Ok(FdEvents::POLLOUT
1882                    | FdEvents::POLLWRNORM
1883                    | FdEvents::POLLIN
1884                    | FdEvents::POLLRDNORM)
1885            );
1886            let fds = epoll_file
1887                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1888                .expect("wait");
1889            assert_eq!(fds.len(), 1);
1890
1891            assert_eq!(
1892                pipe.read(locked, &current_task, &mut VecOutputBuffer::new(64)).expect("read"),
1893                1
1894            );
1895
1896            assert_eq!(
1897                pipe.query_events(locked, &current_task),
1898                Ok(FdEvents::POLLOUT | FdEvents::POLLWRNORM)
1899            );
1900            let fds = epoll_file
1901                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1902                .expect("wait");
1903            assert!(fds.is_empty());
1904        })
1905        .await;
1906    }
1907
1908    #[::fuchsia::test]
1909    async fn test_new_remote_directory() {
1910        spawn_kernel_and_run(async |locked, current_task| {
1911            let (server, client) = zx::Channel::create();
1912            fdio::open("/pkg", fio::PERM_READABLE | fio::PERM_EXECUTABLE, server)
1913                .expect("failed to open /pkg");
1914
1915            let fd = new_remote_file(locked, &current_task, client.into(), OpenFlags::RDWR)
1916                .expect("new_remote_file");
1917            assert!(fd.node().is_dir());
1918            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1919        })
1920        .await;
1921    }
1922
1923    #[::fuchsia::test]
1924    async fn test_new_remote_file() {
1925        spawn_kernel_and_run(async |locked, current_task| {
1926            let (server, client) = zx::Channel::create();
1927            fdio::open("/pkg/meta/contents", fio::PERM_READABLE, server)
1928                .expect("failed to open /pkg/meta/contents");
1929
1930            let fd = new_remote_file(locked, &current_task, client.into(), OpenFlags::RDONLY)
1931                .expect("new_remote_file");
1932            assert!(!fd.node().is_dir());
1933            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1934        })
1935        .await;
1936    }
1937
1938    #[::fuchsia::test]
1939    async fn test_new_remote_counter() {
1940        spawn_kernel_and_run(async |locked, current_task| {
1941            let counter = zx::Counter::create();
1942
1943            let fd = new_remote_file(locked, &current_task, counter.into(), OpenFlags::RDONLY)
1944                .expect("new_remote_file");
1945            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1946        })
1947        .await;
1948    }
1949
1950    #[::fuchsia::test]
1951    async fn test_new_remote_vmo() {
1952        spawn_kernel_and_run(async |locked, current_task| {
1953            let vmo = zx::Vmo::create(*PAGE_SIZE).expect("Vmo::create");
1954            let fd = new_remote_file(locked, &current_task, vmo.into(), OpenFlags::RDWR)
1955                .expect("new_remote_file");
1956            assert!(!fd.node().is_dir());
1957            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1958        })
1959        .await;
1960    }
1961
1962    #[::fuchsia::test(threads = 2)]
1963    async fn test_symlink() {
1964        let fixture = TestFixture::new().await;
1965        let (server, client) = zx::Channel::create();
1966        fixture.root().clone(server.into()).expect("clone failed");
1967
1968        const LINK_PATH: &'static str = "symlink";
1969        const LINK_TARGET: &'static str = "私は「UTF8」です";
1970        // We expect the reported size of the symlink to be the length of the target, in bytes,
1971        // *without* a null terminator. Most Linux systems assume UTF-8 encoding.
1972        const LINK_SIZE: usize = 22;
1973        assert_eq!(LINK_SIZE, LINK_TARGET.len());
1974
1975        spawn_kernel_and_run(async move |locked, current_task| {
1976            let kernel = current_task.kernel();
1977            let fs = RemoteFs::new_fs(
1978                locked,
1979                &kernel,
1980                client,
1981                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
1982                fio::PERM_READABLE | fio::PERM_WRITABLE,
1983            )
1984            .expect("new_fs failed");
1985            let ns = Namespace::new(fs);
1986            let root = ns.root();
1987            let symlink_node = root
1988                .create_symlink(locked, &current_task, LINK_PATH.into(), LINK_TARGET.into())
1989                .expect("symlink failed");
1990            assert_matches!(&*symlink_node.entry.node.info(), FsNodeInfo { size: LINK_SIZE, .. });
1991
1992            let mut context = LookupContext::new(SymlinkMode::NoFollow);
1993            let child = root
1994                .lookup_child(locked, &current_task, &mut context, "symlink".into())
1995                .expect("lookup_child failed");
1996
1997            match child.readlink(locked, &current_task).expect("readlink failed") {
1998                SymlinkTarget::Path(path) => assert_eq!(path, LINK_TARGET),
1999                SymlinkTarget::Node(_) => panic!("readlink returned SymlinkTarget::Node"),
2000            }
2001            // Ensure the size stat reports matches what is expected.
2002            let stat_result = child.entry.node.stat(locked, &current_task).expect("stat failed");
2003            assert_eq!(stat_result.st_size as usize, LINK_SIZE);
2004        })
2005        .await;
2006
2007        // Simulate a second run to ensure the symlink was persisted correctly.
2008        let fixture = TestFixture::open(
2009            fixture.close().await,
2010            TestFixtureOptions { format: false, ..Default::default() },
2011        )
2012        .await;
2013        let (server, client) = zx::Channel::create();
2014        fixture.root().clone(server.into()).expect("clone failed after remount");
2015
2016        spawn_kernel_and_run(async move |locked, current_task| {
2017            let kernel = current_task.kernel();
2018            let fs = RemoteFs::new_fs(
2019                locked,
2020                &kernel,
2021                client,
2022                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2023                fio::PERM_READABLE | fio::PERM_WRITABLE,
2024            )
2025            .expect("new_fs failed after remount");
2026            let ns = Namespace::new(fs);
2027            let root = ns.root();
2028            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2029            let child = root
2030                .lookup_child(locked, &current_task, &mut context, "symlink".into())
2031                .expect("lookup_child failed after remount");
2032
2033            match child.readlink(locked, &current_task).expect("readlink failed after remount") {
2034                SymlinkTarget::Path(path) => assert_eq!(path, LINK_TARGET),
2035                SymlinkTarget::Node(_) => {
2036                    panic!("readlink returned SymlinkTarget::Node after remount")
2037                }
2038            }
2039            // Ensure the size stat reports matches what is expected.
2040            let stat_result =
2041                child.entry.node.stat(locked, &current_task).expect("stat failed after remount");
2042            assert_eq!(stat_result.st_size as usize, LINK_SIZE);
2043        })
2044        .await;
2045
2046        fixture.close().await;
2047    }
2048
2049    #[::fuchsia::test]
2050    async fn test_mode_uid_gid_and_dev_persists() {
2051        const FILE_MODE: FileMode = mode!(IFREG, 0o467);
2052        const DIR_MODE: FileMode = mode!(IFDIR, 0o647);
2053        const BLK_MODE: FileMode = mode!(IFBLK, 0o746);
2054
2055        let fixture = TestFixture::new().await;
2056        let (server, client) = zx::Channel::create();
2057        fixture.root().clone(server.into()).expect("clone failed");
2058
2059        // Simulate a first run of starnix.
2060        spawn_kernel_and_run(async move |locked, current_task| {
2061            let kernel = current_task.kernel();
2062            let creds = Credentials::clone(&current_task.current_creds());
2063            current_task.set_creds(Credentials { euid: 1, fsuid: 1, egid: 2, fsgid: 2, ..creds });
2064            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2065            let fs = RemoteFs::new_fs(
2066                locked,
2067                &kernel,
2068                client,
2069                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2070                rights,
2071            )
2072            .expect("new_fs failed");
2073            let ns = Namespace::new(fs);
2074            current_task.fs().set_umask(FileMode::from_bits(0));
2075            ns.root()
2076                .create_node(locked, &current_task, "file".into(), FILE_MODE, DeviceType::NONE)
2077                .expect("create_node failed");
2078            ns.root()
2079                .create_node(locked, &current_task, "dir".into(), DIR_MODE, DeviceType::NONE)
2080                .expect("create_node failed");
2081            ns.root()
2082                .create_node(locked, &current_task, "dev".into(), BLK_MODE, DeviceType::RANDOM)
2083                .expect("create_node failed");
2084        })
2085        .await;
2086
2087        // Simulate a second run.
2088        let fixture = TestFixture::open(
2089            fixture.close().await,
2090            TestFixtureOptions { format: false, ..Default::default() },
2091        )
2092        .await;
2093
2094        let (server, client) = zx::Channel::create();
2095        fixture.root().clone(server.into()).expect("clone failed");
2096
2097        spawn_kernel_and_run(async move |locked, current_task| {
2098            let kernel = current_task.kernel();
2099            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2100            let fs = RemoteFs::new_fs(
2101                locked,
2102                &kernel,
2103                client,
2104                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2105                rights,
2106            )
2107            .expect("new_fs failed");
2108            let ns = Namespace::new(fs);
2109            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2110            let child = ns
2111                .root()
2112                .lookup_child(locked, &current_task, &mut context, "file".into())
2113                .expect("lookup_child failed");
2114            assert_matches!(
2115                &*child.entry.node.info(),
2116                FsNodeInfo { mode: FILE_MODE, uid: 1, gid: 2, rdev: DeviceType::NONE, .. }
2117            );
2118            let child = ns
2119                .root()
2120                .lookup_child(locked, &current_task, &mut context, "dir".into())
2121                .expect("lookup_child failed");
2122            assert_matches!(
2123                &*child.entry.node.info(),
2124                FsNodeInfo { mode: DIR_MODE, uid: 1, gid: 2, rdev: DeviceType::NONE, .. }
2125            );
2126            let child = ns
2127                .root()
2128                .lookup_child(locked, &current_task, &mut context, "dev".into())
2129                .expect("lookup_child failed");
2130            assert_matches!(
2131                &*child.entry.node.info(),
2132                FsNodeInfo { mode: BLK_MODE, uid: 1, gid: 2, rdev: DeviceType::RANDOM, .. }
2133            );
2134        })
2135        .await;
2136        fixture.close().await;
2137    }
2138
2139    #[::fuchsia::test]
2140    async fn test_dot_dot_inode_numbers() {
2141        let fixture = TestFixture::new().await;
2142        let (server, client) = zx::Channel::create();
2143        fixture.root().clone(server.into()).expect("clone failed");
2144
2145        const MODE: FileMode = FileMode::from_bits(FileMode::IFDIR.bits() | 0o777);
2146
2147        spawn_kernel_and_run(async |locked, current_task| {
2148            let kernel = current_task.kernel();
2149            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2150            let fs = RemoteFs::new_fs(
2151                locked,
2152                &kernel,
2153                client,
2154                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2155                rights,
2156            )
2157            .expect("new_fs failed");
2158            let ns = Namespace::new(fs);
2159            current_task.fs().set_umask(FileMode::from_bits(0));
2160            let sub_dir1 = ns
2161                .root()
2162                .create_node(locked, &current_task, "dir".into(), MODE, DeviceType::NONE)
2163                .expect("create_node failed");
2164            let sub_dir2 = sub_dir1
2165                .create_node(locked, &current_task, "dir".into(), MODE, DeviceType::NONE)
2166                .expect("create_node failed");
2167
2168            let dir_handle = ns
2169                .root()
2170                .entry
2171                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2172                .expect("open failed");
2173
2174            #[derive(Default)]
2175            struct Sink {
2176                offset: off_t,
2177                dot_dot_inode_num: u64,
2178            }
2179            impl DirentSink for Sink {
2180                fn add(
2181                    &mut self,
2182                    inode_num: ino_t,
2183                    offset: off_t,
2184                    entry_type: DirectoryEntryType,
2185                    name: &FsStr,
2186                ) -> Result<(), Errno> {
2187                    if name == ".." {
2188                        self.dot_dot_inode_num = inode_num;
2189                        assert_eq!(entry_type, DirectoryEntryType::DIR);
2190                    }
2191                    self.offset = offset;
2192                    Ok(())
2193                }
2194                fn offset(&self) -> off_t {
2195                    self.offset
2196                }
2197            }
2198            let mut sink = Sink::default();
2199            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2200
2201            // inode_num for .. for the root should be the same as root.
2202            assert_eq!(sink.dot_dot_inode_num, ns.root().entry.node.ino);
2203
2204            let dir_handle = sub_dir1
2205                .entry
2206                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2207                .expect("open failed");
2208            let mut sink = Sink::default();
2209            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2210
2211            // inode_num for .. for the first sub directory should be the same as root.
2212            assert_eq!(sink.dot_dot_inode_num, ns.root().entry.node.ino);
2213
2214            let dir_handle = sub_dir2
2215                .entry
2216                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2217                .expect("open failed");
2218            let mut sink = Sink::default();
2219            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2220
2221            // inode_num for .. for the second subdir should be the first subdir.
2222            assert_eq!(sink.dot_dot_inode_num, sub_dir1.entry.node.ino);
2223        })
2224        .await;
2225        fixture.close().await;
2226    }
2227
2228    #[::fuchsia::test]
2229    async fn test_remote_special_node() {
2230        let fixture = TestFixture::new().await;
2231        let (server, client) = zx::Channel::create();
2232        fixture.root().clone(server.into()).expect("clone failed");
2233
2234        const FIFO_MODE: FileMode = FileMode::from_bits(FileMode::IFIFO.bits() | 0o777);
2235        const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2236
2237        spawn_kernel_and_run(async |locked, current_task| {
2238            let kernel = current_task.kernel();
2239            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2240            let fs = RemoteFs::new_fs(
2241                locked,
2242                &kernel,
2243                client,
2244                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2245                rights,
2246            )
2247            .expect("new_fs failed");
2248            let ns = Namespace::new(fs);
2249            current_task.fs().set_umask(FileMode::from_bits(0));
2250            let root = ns.root();
2251
2252            // Create RemoteSpecialNode (e.g. FIFO)
2253            root.create_node(locked, &current_task, "fifo".into(), FIFO_MODE, DeviceType::NONE)
2254                .expect("create_node failed");
2255            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2256            let fifo_node = root
2257                .lookup_child(locked, &current_task, &mut context, "fifo".into())
2258                .expect("lookup_child failed");
2259
2260            // Test that we get expected behaviour for RemoteSpecialNode operation, e.g.
2261            // test that truncate should return EINVAL
2262            match fifo_node.truncate(locked, &current_task, 0) {
2263                Ok(_) => {
2264                    panic!("truncate passed for special node")
2265                }
2266                Err(errno) if errno == EINVAL => {}
2267                Err(e) => {
2268                    panic!("truncate failed with error {:?}", e)
2269                }
2270            };
2271
2272            // Create regular RemoteNode
2273            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2274                .expect("create_node failed");
2275            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2276            let reg_node = root
2277                .lookup_child(locked, &current_task, &mut context, "file".into())
2278                .expect("lookup_child failed");
2279
2280            // We should be able to perform truncate on regular files
2281            reg_node.truncate(locked, &current_task, 0).expect("truncate failed");
2282        })
2283        .await;
2284        fixture.close().await;
2285    }
2286
2287    #[::fuchsia::test]
2288    async fn test_hard_link() {
2289        let fixture = TestFixture::new().await;
2290        let (server, client) = zx::Channel::create();
2291        fixture.root().clone(server.into()).expect("clone failed");
2292
2293        spawn_kernel_and_run(async move |locked, current_task| {
2294            let kernel = current_task.kernel();
2295            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2296            let fs = RemoteFs::new_fs(
2297                locked,
2298                &kernel,
2299                client,
2300                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2301                rights,
2302            )
2303            .expect("new_fs failed");
2304            let ns = Namespace::new(fs);
2305            current_task.fs().set_umask(FileMode::from_bits(0));
2306            let node = ns
2307                .root()
2308                .create_node(
2309                    locked,
2310                    &current_task,
2311                    "file1".into(),
2312                    mode!(IFREG, 0o666),
2313                    DeviceType::NONE,
2314                )
2315                .expect("create_node failed");
2316            ns.root()
2317                .entry
2318                .node
2319                .link(locked, &current_task, &ns.root().mount, "file2".into(), &node.entry.node)
2320                .expect("link failed");
2321        })
2322        .await;
2323
2324        let fixture = TestFixture::open(
2325            fixture.close().await,
2326            TestFixtureOptions { format: false, ..Default::default() },
2327        )
2328        .await;
2329
2330        let (server, client) = zx::Channel::create();
2331        fixture.root().clone(server.into()).expect("clone failed");
2332
2333        spawn_kernel_and_run(async move |locked, current_task| {
2334            let kernel = current_task.kernel();
2335            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2336            let fs = RemoteFs::new_fs(
2337                locked,
2338                &kernel,
2339                client,
2340                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2341                rights,
2342            )
2343            .expect("new_fs failed");
2344            let ns = Namespace::new(fs);
2345            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2346            let child1 = ns
2347                .root()
2348                .lookup_child(locked, &current_task, &mut context, "file1".into())
2349                .expect("lookup_child failed");
2350            let child2 = ns
2351                .root()
2352                .lookup_child(locked, &current_task, &mut context, "file2".into())
2353                .expect("lookup_child failed");
2354            assert!(Arc::ptr_eq(&child1.entry.node, &child2.entry.node));
2355        })
2356        .await;
2357        fixture.close().await;
2358    }
2359
2360    #[::fuchsia::test]
2361    async fn test_lookup_on_fsverity_enabled_file() {
2362        let fixture = TestFixture::new().await;
2363        let (server, client) = zx::Channel::create();
2364        fixture.root().clone(server.into()).expect("clone failed");
2365
2366        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2367
2368        spawn_kernel_and_run(async move |locked, current_task| {
2369            let kernel = current_task.kernel();
2370            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2371            let fs = RemoteFs::new_fs(
2372                locked,
2373                &kernel,
2374                client,
2375                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2376                rights,
2377            )
2378            .expect("new_fs failed");
2379            let ns = Namespace::new(fs);
2380            current_task.fs().set_umask(FileMode::from_bits(0));
2381            let file = ns
2382                .root()
2383                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2384                .expect("create_node failed");
2385            // Enable verity on the file.
2386            let desc = fsverity_descriptor {
2387                version: 1,
2388                hash_algorithm: 1,
2389                salt_size: 32,
2390                log_blocksize: 12,
2391                ..Default::default()
2392            };
2393            file.entry.node.ops().enable_fsverity(&desc).expect("enable fsverity failed");
2394        })
2395        .await;
2396
2397        // Tear down the kernel and open the file again. The file should no longer be cached.
2398        // Test that lookup works as expected for an fsverity-enabled file.
2399        let fixture = TestFixture::open(
2400            fixture.close().await,
2401            TestFixtureOptions { format: false, ..Default::default() },
2402        )
2403        .await;
2404        let (server, client) = zx::Channel::create();
2405        fixture.root().clone(server.into()).expect("clone failed");
2406
2407        spawn_kernel_and_run(async move |locked, current_task| {
2408            let kernel = current_task.kernel();
2409            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2410            let fs = RemoteFs::new_fs(
2411                locked,
2412                &kernel,
2413                client,
2414                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2415                rights,
2416            )
2417            .expect("new_fs failed");
2418            let ns = Namespace::new(fs);
2419            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2420            let _child = ns
2421                .root()
2422                .lookup_child(locked, &current_task, &mut context, "file".into())
2423                .expect("lookup_child failed");
2424        })
2425        .await;
2426        fixture.close().await;
2427    }
2428
2429    #[::fuchsia::test]
2430    async fn test_update_attributes_persists() {
2431        let fixture = TestFixture::new().await;
2432        let (server, client) = zx::Channel::create();
2433        fixture.root().clone(server.into()).expect("clone failed");
2434
2435        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2436
2437        spawn_kernel_and_run(async move |locked, current_task| {
2438            let kernel = current_task.kernel();
2439            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2440            let fs = RemoteFs::new_fs(
2441                locked,
2442                &kernel,
2443                client,
2444                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2445                rights,
2446            )
2447            .expect("new_fs failed");
2448            let ns = Namespace::new(fs);
2449            current_task.fs().set_umask(FileMode::from_bits(0));
2450            let file = ns
2451                .root()
2452                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2453                .expect("create_node failed");
2454            // Change the mode, this change should persist
2455            file.entry
2456                .node
2457                .chmod(locked, &current_task, &file.mount, MODE | FileMode::ALLOW_ALL)
2458                .expect("chmod failed");
2459        })
2460        .await;
2461
2462        // Tear down the kernel and open the file again. Check that changes persisted.
2463        let fixture = TestFixture::open(
2464            fixture.close().await,
2465            TestFixtureOptions { format: false, ..Default::default() },
2466        )
2467        .await;
2468        let (server, client) = zx::Channel::create();
2469        fixture.root().clone(server.into()).expect("clone failed");
2470
2471        spawn_kernel_and_run(async move |locked, current_task| {
2472            let kernel = current_task.kernel();
2473            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2474            let fs = RemoteFs::new_fs(
2475                locked,
2476                &kernel,
2477                client,
2478                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2479                rights,
2480            )
2481            .expect("new_fs failed");
2482            let ns = Namespace::new(fs);
2483            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2484            let child = ns
2485                .root()
2486                .lookup_child(locked, &current_task, &mut context, "file".into())
2487                .expect("lookup_child failed");
2488            assert_eq!(child.entry.node.info().mode, MODE | FileMode::ALLOW_ALL);
2489        })
2490        .await;
2491        fixture.close().await;
2492    }
2493
2494    #[::fuchsia::test]
2495    async fn test_statfs() {
2496        let fixture = TestFixture::new().await;
2497        let (server, client) = zx::Channel::create();
2498        fixture.root().clone(server.into()).expect("clone failed");
2499
2500        spawn_kernel_and_run(async move |locked, current_task| {
2501            let kernel = current_task.kernel();
2502            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2503            let fs = RemoteFs::new_fs(
2504                locked,
2505                &kernel,
2506                client,
2507                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2508                rights,
2509            )
2510            .expect("new_fs failed");
2511
2512            let statfs = fs.statfs(locked, &current_task).expect("statfs failed");
2513            assert!(statfs.f_type != 0);
2514            assert!(statfs.f_bsize > 0);
2515            assert!(statfs.f_blocks > 0);
2516            assert!(statfs.f_bfree > 0 && statfs.f_bfree <= statfs.f_blocks);
2517            assert!(statfs.f_files > 0);
2518            assert!(statfs.f_ffree > 0 && statfs.f_ffree <= statfs.f_files);
2519            assert!(statfs.f_fsid.val[0] != 0 || statfs.f_fsid.val[1] != 0);
2520            assert!(statfs.f_namelen > 0);
2521            assert!(statfs.f_frsize > 0);
2522        })
2523        .await;
2524
2525        fixture.close().await;
2526    }
2527
2528    #[::fuchsia::test]
2529    async fn test_allocate() {
2530        let fixture = TestFixture::new().await;
2531        let (server, client) = zx::Channel::create();
2532        fixture.root().clone(server.into()).expect("clone failed");
2533
2534        spawn_kernel_and_run(async move |locked, current_task| {
2535            let kernel = current_task.kernel();
2536            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2537            let fs = RemoteFs::new_fs(
2538                locked,
2539                &kernel,
2540                client,
2541                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2542                rights,
2543            )
2544            .expect("new_fs failed");
2545            let ns = Namespace::new(fs);
2546            current_task.fs().set_umask(FileMode::from_bits(0));
2547            let root = ns.root();
2548
2549            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2550            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2551                .expect("create_node failed");
2552            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2553            let reg_node = root
2554                .lookup_child(locked, &current_task, &mut context, "file".into())
2555                .expect("lookup_child failed");
2556
2557            reg_node
2558                .entry
2559                .node
2560                .fallocate(locked, &current_task, FallocMode::Allocate { keep_size: false }, 0, 20)
2561                .expect("truncate failed");
2562        })
2563        .await;
2564        fixture.close().await;
2565    }
2566
2567    #[::fuchsia::test]
2568    async fn test_allocate_overflow() {
2569        let fixture = TestFixture::new().await;
2570        let (server, client) = zx::Channel::create();
2571        fixture.root().clone(server.into()).expect("clone failed");
2572
2573        spawn_kernel_and_run(async move |locked, current_task| {
2574            let kernel = current_task.kernel();
2575            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2576            let fs = RemoteFs::new_fs(
2577                locked,
2578                &kernel,
2579                client,
2580                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2581                rights,
2582            )
2583            .expect("new_fs failed");
2584            let ns = Namespace::new(fs);
2585            current_task.fs().set_umask(FileMode::from_bits(0));
2586            let root = ns.root();
2587
2588            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2589            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2590                .expect("create_node failed");
2591            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2592            let reg_node = root
2593                .lookup_child(locked, &current_task, &mut context, "file".into())
2594                .expect("lookup_child failed");
2595
2596            reg_node
2597                .entry
2598                .node
2599                .fallocate(
2600                    locked,
2601                    &current_task,
2602                    FallocMode::Allocate { keep_size: false },
2603                    1,
2604                    u64::MAX,
2605                )
2606                .expect_err("truncate unexpectedly passed");
2607        })
2608        .await;
2609        fixture.close().await;
2610    }
2611
2612    #[::fuchsia::test]
2613    async fn test_time_modify_persists() {
2614        let fixture = TestFixture::new().await;
2615        let (server, client) = zx::Channel::create();
2616        fixture.root().clone(server.into()).expect("clone failed");
2617
2618        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2619
2620        let last_modified = spawn_kernel_and_run(async move |locked, current_task| {
2621            let kernel = current_task.kernel();
2622            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2623            let fs = RemoteFs::new_fs(
2624                locked,
2625                &kernel,
2626                client,
2627                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2628                rights,
2629            )
2630            .expect("new_fs failed");
2631            let ns: Arc<Namespace> = Namespace::new(fs);
2632            current_task.fs().set_umask(FileMode::from_bits(0));
2633            let child = ns
2634                .root()
2635                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2636                .expect("create_node failed");
2637            // Write to file (this should update mtime (time_modify))
2638            let file = child
2639                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2640                .expect("open failed");
2641            // Call `fetch_and_refresh_info(..)` to refresh `time_modify` with the time managed by the
2642            // underlying filesystem
2643            let time_before_write = child
2644                .entry
2645                .node
2646                .fetch_and_refresh_info(locked, &current_task)
2647                .expect("fetch_and_refresh_info failed")
2648                .time_modify;
2649            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
2650            let written = file
2651                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
2652                .expect("write failed");
2653            assert_eq!(written, write_bytes.len());
2654            let last_modified = child
2655                .entry
2656                .node
2657                .fetch_and_refresh_info(locked, &current_task)
2658                .expect("fetch_and_refresh_info failed")
2659                .time_modify;
2660            assert!(last_modified > time_before_write);
2661            last_modified
2662        })
2663        .await;
2664
2665        // Tear down the kernel and open the file again. Check that modification time is when we
2666        // last modified the contents of the file
2667        let fixture = TestFixture::open(
2668            fixture.close().await,
2669            TestFixtureOptions { format: false, ..Default::default() },
2670        )
2671        .await;
2672        let (server, client) = zx::Channel::create();
2673        fixture.root().clone(server.into()).expect("clone failed");
2674        let refreshed_modified_time = spawn_kernel_and_run(async move |locked, current_task| {
2675            let kernel = current_task.kernel();
2676            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2677            let fs = RemoteFs::new_fs(
2678                locked,
2679                &kernel,
2680                client,
2681                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2682                rights,
2683            )
2684            .expect("new_fs failed");
2685            let ns = Namespace::new(fs);
2686            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2687            let child = ns
2688                .root()
2689                .lookup_child(locked, &current_task, &mut context, "file".into())
2690                .expect("lookup_child failed");
2691            let last_modified = child
2692                .entry
2693                .node
2694                .fetch_and_refresh_info(locked, &current_task)
2695                .expect("fetch_and_refresh_info failed")
2696                .time_modify;
2697            last_modified
2698        })
2699        .await;
2700        assert_eq!(last_modified, refreshed_modified_time);
2701
2702        fixture.close().await;
2703    }
2704
2705    #[::fuchsia::test]
2706    async fn test_update_atime_mtime() {
2707        let fixture = TestFixture::new().await;
2708        let (server, client) = zx::Channel::create();
2709        fixture.root().clone(server.into()).expect("clone failed");
2710
2711        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2712
2713        spawn_kernel_and_run(async move |locked, current_task| {
2714            let kernel = current_task.kernel();
2715            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2716            let fs = RemoteFs::new_fs(
2717                locked,
2718                &kernel,
2719                client,
2720                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2721                rights,
2722            )
2723            .expect("new_fs failed");
2724            let ns: Arc<Namespace> = Namespace::new(fs);
2725            current_task.fs().set_umask(FileMode::from_bits(0));
2726            let child = ns
2727                .root()
2728                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2729                .expect("create_node failed");
2730
2731            let info_original = child
2732                .entry
2733                .node
2734                .fetch_and_refresh_info(locked, &current_task)
2735                .expect("fetch_and_refresh_info failed")
2736                .clone();
2737
2738            child
2739                .entry
2740                .node
2741                .update_atime_mtime(
2742                    locked,
2743                    &current_task,
2744                    &child.mount,
2745                    TimeUpdateType::Time(UtcInstant::from_nanos(30)),
2746                    TimeUpdateType::Omit,
2747                )
2748                .expect("update_atime_mtime failed");
2749            let info_after_update = child
2750                .entry
2751                .node
2752                .fetch_and_refresh_info(locked, &current_task)
2753                .expect("fetch_and_refresh_info failed")
2754                .clone();
2755            assert_eq!(info_after_update.time_modify, info_original.time_modify);
2756            assert_eq!(info_after_update.time_access, UtcInstant::from_nanos(30));
2757
2758            child
2759                .entry
2760                .node
2761                .update_atime_mtime(
2762                    locked,
2763                    &current_task,
2764                    &child.mount,
2765                    TimeUpdateType::Omit,
2766                    TimeUpdateType::Time(UtcInstant::from_nanos(50)),
2767                )
2768                .expect("update_atime_mtime failed");
2769            let info_after_update2 = child
2770                .entry
2771                .node
2772                .fetch_and_refresh_info(locked, &current_task)
2773                .expect("fetch_and_refresh_info failed")
2774                .clone();
2775            assert_eq!(info_after_update2.time_modify, UtcInstant::from_nanos(50));
2776            assert_eq!(info_after_update2.time_access, UtcInstant::from_nanos(30));
2777        })
2778        .await;
2779        fixture.close().await;
2780    }
2781
2782    #[::fuchsia::test]
2783    async fn test_write_updates_mtime_ctime() {
2784        let fixture = TestFixture::new().await;
2785        let (server, client) = zx::Channel::create();
2786        fixture.root().clone(server.into()).expect("clone failed");
2787
2788        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2789
2790        spawn_kernel_and_run(async move |locked, current_task| {
2791            let kernel = current_task.kernel();
2792            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2793            let fs = RemoteFs::new_fs(
2794                locked,
2795                &kernel,
2796                client,
2797                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2798                rights,
2799            )
2800            .expect("new_fs failed");
2801            let ns: Arc<Namespace> = Namespace::new(fs);
2802            current_task.fs().set_umask(FileMode::from_bits(0));
2803            let child = ns
2804                .root()
2805                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2806                .expect("create_node failed");
2807            let file = child
2808                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2809                .expect("open failed");
2810            // Call `fetch_and_refresh_info(..)` to refresh ctime and mtime with the time managed by the
2811            // underlying filesystem
2812            let (ctime_before_write, mtime_before_write) = {
2813                let info = child
2814                    .entry
2815                    .node
2816                    .fetch_and_refresh_info(locked, &current_task)
2817                    .expect("fetch_and_refresh_info failed");
2818                (info.time_status_change, info.time_modify)
2819            };
2820
2821            // Writing to a file should update ctime and mtime
2822            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
2823            let written = file
2824                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
2825                .expect("write failed");
2826            assert_eq!(written, write_bytes.len());
2827
2828            // As Fxfs, the underlying filesystem in this test, can manage file timestamps,
2829            // we should not see an update in mtime and ctime without first refreshing the node with
2830            // the metadata from Fxfs.
2831            let (ctime_after_write_no_refresh, mtime_after_write_no_refresh) = {
2832                let info = child.entry.node.info();
2833                (info.time_status_change, info.time_modify)
2834            };
2835            assert_eq!(ctime_after_write_no_refresh, ctime_before_write);
2836            assert_eq!(mtime_after_write_no_refresh, mtime_before_write);
2837
2838            // Refresh information, we should see `info` with mtime and ctime from the remote
2839            // filesystem (assume this is true if the new timestamp values are greater than the ones
2840            // without the refresh).
2841            let (ctime_after_write_refresh, mtime_after_write_refresh) = {
2842                let info = child
2843                    .entry
2844                    .node
2845                    .fetch_and_refresh_info(locked, &current_task)
2846                    .expect("fetch_and_refresh_info failed");
2847                (info.time_status_change, info.time_modify)
2848            };
2849            assert_eq!(ctime_after_write_refresh, mtime_after_write_refresh);
2850            assert!(ctime_after_write_refresh > ctime_after_write_no_refresh);
2851        })
2852        .await;
2853        fixture.close().await;
2854    }
2855
2856    #[::fuchsia::test]
2857    async fn test_casefold_persists() {
2858        let fixture = TestFixture::new().await;
2859        let (server, client) = zx::Channel::create();
2860        fixture.root().clone(server.into()).expect("clone failed");
2861
2862        spawn_kernel_and_run(async move |locked, current_task| {
2863            let kernel = current_task.kernel();
2864            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2865            let fs = RemoteFs::new_fs(
2866                locked,
2867                &kernel,
2868                client,
2869                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2870                rights,
2871            )
2872            .expect("new_fs failed");
2873            let ns: Arc<Namespace> = Namespace::new(fs);
2874            let child = ns
2875                .root()
2876                .create_node(
2877                    locked,
2878                    &current_task,
2879                    "dir".into(),
2880                    FileMode::ALLOW_ALL.with_type(FileMode::IFDIR),
2881                    DeviceType::NONE,
2882                )
2883                .expect("create_node failed");
2884            child
2885                .entry
2886                .node
2887                .update_attributes(locked, &current_task, |info| {
2888                    info.casefold = true;
2889                    Ok(())
2890                })
2891                .expect("enable casefold")
2892        })
2893        .await;
2894
2895        // Tear down the kernel and open the dir again. Check that casefold is preserved.
2896        let fixture = TestFixture::open(
2897            fixture.close().await,
2898            TestFixtureOptions { format: false, ..Default::default() },
2899        )
2900        .await;
2901        let (server, client) = zx::Channel::create();
2902        fixture.root().clone(server.into()).expect("clone failed");
2903        let casefold = spawn_kernel_and_run(async move |locked, current_task| {
2904            let kernel = current_task.kernel();
2905            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2906            let fs = RemoteFs::new_fs(
2907                locked,
2908                &kernel,
2909                client,
2910                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2911                rights,
2912            )
2913            .expect("new_fs failed");
2914            let ns = Namespace::new(fs);
2915            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2916            let child = ns
2917                .root()
2918                .lookup_child(locked, &current_task, &mut context, "dir".into())
2919                .expect("lookup_child failed");
2920            let casefold = child
2921                .entry
2922                .node
2923                .fetch_and_refresh_info(locked, &current_task)
2924                .expect("fetch_and_refresh_info failed")
2925                .casefold;
2926            casefold
2927        })
2928        .await;
2929        assert!(casefold);
2930
2931        fixture.close().await;
2932    }
2933
2934    #[::fuchsia::test]
2935    async fn test_update_time_access_persists() {
2936        const TEST_FILE: &str = "test_file";
2937
2938        let fixture = TestFixture::new().await;
2939        let (server, client) = zx::Channel::create();
2940        fixture.root().clone(server.into()).expect("clone failed");
2941        // Set up file.
2942        let info_after_read = spawn_kernel_and_run(async move |locked, current_task| {
2943            let kernel = current_task.kernel();
2944            let fs = RemoteFs::new_fs(
2945                locked,
2946                &kernel,
2947                client,
2948                FileSystemOptions {
2949                    source: FlyByteStr::new(b"/"),
2950                    flags: MountFlags::RELATIME,
2951                    ..Default::default()
2952                },
2953                fio::PERM_READABLE | fio::PERM_WRITABLE,
2954            )
2955            .expect("new_fs failed");
2956            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
2957            let child = ns
2958                .root()
2959                .open_create_node(
2960                    locked,
2961                    &current_task,
2962                    TEST_FILE.into(),
2963                    FileMode::ALLOW_ALL.with_type(FileMode::IFREG),
2964                    DeviceType::NONE,
2965                    OpenFlags::empty(),
2966                )
2967                .expect("create_node failed");
2968
2969            let file_handle = child
2970                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2971                .expect("open failed");
2972
2973            // Expect atime to be updated as this is the first file access since the
2974            // last file modification or status change.
2975            file_handle
2976                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
2977                .expect("read failed");
2978
2979            // Call `fetch_and_refresh_info` to persist atime update.
2980            let info_after_read = child
2981                .entry
2982                .node
2983                .fetch_and_refresh_info(locked, &current_task)
2984                .expect("fetch_and_refresh_info failed")
2985                .clone();
2986
2987            info_after_read
2988        })
2989        .await;
2990
2991        // Tear down the kernel and open the file again. The file should no longer be cached.
2992        let fixture = TestFixture::open(
2993            fixture.close().await,
2994            TestFixtureOptions { format: false, ..Default::default() },
2995        )
2996        .await;
2997
2998        let (server, client) = zx::Channel::create();
2999        fixture.root().clone(server.into()).expect("clone failed");
3000
3001        spawn_kernel_and_run(async move |locked, current_task| {
3002            let kernel = current_task.kernel();
3003            let fs = RemoteFs::new_fs(
3004                locked,
3005                &kernel,
3006                client,
3007                FileSystemOptions {
3008                    source: FlyByteStr::new(b"/"),
3009                    flags: MountFlags::RELATIME,
3010                    ..Default::default()
3011                },
3012                fio::PERM_READABLE | fio::PERM_WRITABLE,
3013            )
3014            .expect("new_fs failed");
3015            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
3016            let mut context = LookupContext::new(SymlinkMode::NoFollow);
3017            let child = ns
3018                .root()
3019                .lookup_child(locked, &current_task, &mut context, TEST_FILE.into())
3020                .expect("lookup_child failed");
3021
3022            // Get info - this should be refreshed with info that was persisted before
3023            // we tore down the kernel.
3024            let persisted_info = child
3025                .entry
3026                .node
3027                .fetch_and_refresh_info(locked, &current_task)
3028                .expect("fetch_and_refresh_info failed")
3029                .clone();
3030            assert_eq!(info_after_read.time_access, persisted_info.time_access);
3031        })
3032        .await;
3033        fixture.close().await;
3034    }
3035
3036    #[::fuchsia::test]
3037    async fn test_pending_access_time_updates() {
3038        const TEST_FILE: &str = "test_file";
3039
3040        let fixture = TestFixture::new().await;
3041        let (server, client) = zx::Channel::create();
3042        fixture.root().clone(server.into()).expect("clone failed");
3043
3044        spawn_kernel_and_run(async move |locked, current_task| {
3045            let kernel = current_task.kernel.clone();
3046            let fs = RemoteFs::new_fs(
3047                locked,
3048                &kernel,
3049                client,
3050                FileSystemOptions {
3051                    source: FlyByteStr::new(b"/"),
3052                    flags: MountFlags::RELATIME,
3053                    ..Default::default()
3054                },
3055                fio::PERM_READABLE | fio::PERM_WRITABLE,
3056            )
3057            .expect("new_fs failed");
3058
3059            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
3060            let child = ns
3061                .root()
3062                .open_create_node(
3063                    locked,
3064                    &current_task,
3065                    TEST_FILE.into(),
3066                    FileMode::ALLOW_ALL.with_type(FileMode::IFREG),
3067                    DeviceType::NONE,
3068                    OpenFlags::empty(),
3069                )
3070                .expect("create_node failed");
3071
3072            let file_handle = child
3073                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
3074                .expect("open failed");
3075
3076            // Expect atime to be updated as this is the first file access since the last
3077            // file modification or status change.
3078            file_handle
3079                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3080                .expect("read failed");
3081
3082            let atime_after_first_read = child
3083                .entry
3084                .node
3085                .fetch_and_refresh_info(locked, &current_task)
3086                .expect("fetch_and_refresh_info failed")
3087                .time_access;
3088
3089            // Read again (this read will not trigger a persistent atime update if
3090            // filesystem was mounted with atime)
3091            file_handle
3092                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3093                .expect("read failed");
3094
3095            let atime_after_second_read = child
3096                .entry
3097                .node
3098                .fetch_and_refresh_info(locked, &current_task)
3099                .expect("fetch_and_refresh_info failed")
3100                .time_access;
3101            assert_eq!(atime_after_first_read, atime_after_second_read);
3102
3103            // Do another operation that will update ctime and/or mtime but not atime.
3104            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
3105            let _written = file_handle
3106                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
3107                .expect("write failed");
3108
3109            // Read again (atime should be updated).
3110            file_handle
3111                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3112                .expect("read failed");
3113
3114            assert!(
3115                atime_after_second_read
3116                    < child
3117                        .entry
3118                        .node
3119                        .fetch_and_refresh_info(locked, &current_task)
3120                        .expect("fetch_and_refresh_info failed")
3121                        .time_access
3122            );
3123        })
3124        .await;
3125        fixture.close().await;
3126    }
3127}