starnix_core/fs/fuchsia/
remote.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fs::fuchsia::RemoteUnixDomainSocket;
6use crate::fs::fuchsia::remote_volume::RemoteVolume;
7use crate::fs::fuchsia::sync_file::{SyncFence, SyncFile, SyncPoint, Timeline};
8use crate::mm::memory::MemoryObject;
9use crate::mm::{ProtectionFlags, VMEX_RESOURCE};
10use crate::security;
11use crate::task::{CurrentTask, FullCredentials, Kernel};
12use crate::vfs::buffers::{InputBuffer, OutputBuffer, with_iovec_segments};
13use crate::vfs::fsverity::FsVerityState;
14use crate::vfs::socket::{Socket, SocketFile, ZxioBackedSocket};
15use crate::vfs::{
16    Anon, AppendLockGuard, CacheConfig, CacheMode, DEFAULT_BYTES_PER_BLOCK, DirectoryEntryType,
17    DirentSink, FallocMode, FileHandle, FileObject, FileOps, FileSystem, FileSystemHandle,
18    FileSystemOps, FileSystemOptions, FsNode, FsNodeHandle, FsNodeInfo, FsNodeOps, FsStr, FsString,
19    SeekTarget, SymlinkTarget, XattrOp, XattrStorage, default_ioctl, default_seek,
20    fileops_impl_directory, fileops_impl_nonseekable, fileops_impl_noop_sync,
21    fileops_impl_seekable, fs_node_impl_not_dir, fs_node_impl_symlink, fs_node_impl_xattr_delegate,
22};
23use bstr::ByteSlice;
24use fidl::endpoints::DiscoverableProtocolMarker as _;
25use fuchsia_runtime::UtcInstant;
26use linux_uapi::SYNC_IOC_MAGIC;
27use once_cell::sync::OnceCell;
28use starnix_crypt::EncryptionKeyId;
29use starnix_logging::{CATEGORY_STARNIX_MM, impossible_error, log_warn, trace_duration};
30use starnix_sync::{
31    FileOpsCore, LockEqualOrBefore, Locked, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard,
32    Unlocked,
33};
34use starnix_syscalls::{SyscallArg, SyscallResult};
35use starnix_types::vfs::default_statfs;
36use starnix_uapi::auth::FsCred;
37use starnix_uapi::device_type::DeviceType;
38use starnix_uapi::errors::Errno;
39use starnix_uapi::file_mode::FileMode;
40use starnix_uapi::mount_flags::MountFlags;
41use starnix_uapi::open_flags::OpenFlags;
42use starnix_uapi::{
43    __kernel_fsid_t, errno, error, from_status_like_fdio, fsverity_descriptor, ino_t, off_t, statfs,
44};
45use std::mem::MaybeUninit;
46use std::sync::Arc;
47use syncio::zxio::{
48    ZXIO_NODE_PROTOCOL_DIRECTORY, ZXIO_NODE_PROTOCOL_FILE, ZXIO_NODE_PROTOCOL_SYMLINK,
49    ZXIO_OBJECT_TYPE_DATAGRAM_SOCKET, ZXIO_OBJECT_TYPE_DIR, ZXIO_OBJECT_TYPE_FILE,
50    ZXIO_OBJECT_TYPE_NONE, ZXIO_OBJECT_TYPE_PACKET_SOCKET, ZXIO_OBJECT_TYPE_RAW_SOCKET,
51    ZXIO_OBJECT_TYPE_STREAM_SOCKET, ZXIO_OBJECT_TYPE_SYNCHRONOUS_DATAGRAM_SOCKET, zxio_node_attr,
52};
53use syncio::{
54    AllocateMode, DirentIterator, SelinuxContextAttr, XattrSetMode, ZXIO_ROOT_HASH_LENGTH, Zxio,
55    ZxioDirent, ZxioOpenOptions, zxio_fsverity_descriptor_t, zxio_node_attr_has_t,
56    zxio_node_attributes_t,
57};
58use zx::{Counter, HandleBased};
59use {
60    fidl_fuchsia_io as fio, fidl_fuchsia_starnix_binder as fbinder,
61    fidl_fuchsia_unknown as funknown,
62};
63
64pub fn new_remote_fs(
65    locked: &mut Locked<Unlocked>,
66    current_task: &CurrentTask,
67    options: FileSystemOptions,
68) -> Result<FileSystemHandle, Errno> {
69    let kernel = current_task.kernel();
70    // TODO(379929394): After soft transition of fstab is complete, we should
71    // validate the requested_path is a non-empty, non-root path.
72    let requested_path = std::str::from_utf8(&options.source)
73        .map_err(|_| errno!(EINVAL, "source path is not utf8"))?;
74    let mut create_flags =
75        fio::PERM_READABLE | fio::Flags::FLAG_MAYBE_CREATE | fio::Flags::PROTOCOL_DIRECTORY;
76    if !options.flags.contains(MountFlags::RDONLY) {
77        create_flags |= fio::PERM_WRITABLE;
78    }
79    let (root_proxy, subdir) = kernel.open_ns_dir(requested_path, create_flags)?;
80
81    let subdir = if subdir.is_empty() { ".".to_string() } else { subdir };
82    let mut open_rights = fio::PERM_READABLE;
83    if !options.flags.contains(MountFlags::RDONLY) {
84        open_rights |= fio::PERM_WRITABLE;
85    }
86    let mut subdir_options = options;
87    subdir_options.source = subdir.into();
88    create_remotefs_filesystem(locked, kernel, &root_proxy, subdir_options, open_rights)
89}
90
91/// Create a filesystem to access the content of the fuchsia directory available at `fs_src` inside
92/// `pkg`.
93pub fn create_remotefs_filesystem<L>(
94    locked: &mut Locked<L>,
95    kernel: &Kernel,
96    root: &fio::DirectorySynchronousProxy,
97    options: FileSystemOptions,
98    rights: fio::Flags,
99) -> Result<FileSystemHandle, Errno>
100where
101    L: LockEqualOrBefore<FileOpsCore>,
102{
103    let root = syncio::directory_open_directory_async(
104        root,
105        std::str::from_utf8(&options.source)
106            .map_err(|_| errno!(EINVAL, "source path is not utf8"))?,
107        rights,
108    )
109    .map_err(|e| errno!(EIO, format!("Failed to open root: {e}")))?;
110    RemoteFs::new_fs(locked, kernel, root.into_channel(), options, rights)
111}
112
113pub struct RemoteFs {
114    // If true, trust the remote file system's IDs (which requires that the remote file system does
115    // not span mounts).  This must be true to properly support hard links.  If this is false, the
116    // same node can end up having different IDs as it leaves and reenters the node cache.
117    // TODO(https://fxbug.dev/42081972): At the time of writing, package directories do not have
118    // unique IDs so this *must* be false in that case.
119    use_remote_ids: bool,
120
121    root_proxy: fio::DirectorySynchronousProxy,
122}
123
124impl RemoteFs {
125    /// Returns a reference to a RemoteFs given a reference to a FileSystem.
126    ///
127    /// # Panics
128    ///
129    /// This will panic if `fs`'s ops aren't `RemoteFs`, so this should only be called when this is
130    /// known to be the case.
131    fn from_fs(fs: &FileSystem) -> &RemoteFs {
132        if let Some(remote_vol) = fs.downcast_ops::<RemoteVolume>() {
133            remote_vol.remotefs()
134        } else {
135            fs.downcast_ops::<RemoteFs>().unwrap()
136        }
137    }
138}
139
140const REMOTE_FS_MAGIC: u32 = u32::from_be_bytes(*b"f.io");
141const SYNC_IOC_FILE_INFO: u8 = 4;
142const SYNC_IOC_MERGE: u8 = 3;
143
144impl FileSystemOps for RemoteFs {
145    fn statfs(
146        &self,
147        _locked: &mut Locked<FileOpsCore>,
148        _fs: &FileSystem,
149        _current_task: &CurrentTask,
150    ) -> Result<statfs, Errno> {
151        let (status, info) = self
152            .root_proxy
153            .query_filesystem(zx::MonotonicInstant::INFINITE)
154            .map_err(|_| errno!(EIO))?;
155        // Not all remote filesystems support `QueryFilesystem`, many return ZX_ERR_NOT_SUPPORTED.
156        if status == 0 {
157            if let Some(info) = info {
158                let (total_blocks, free_blocks) = if info.block_size > 0 {
159                    (
160                        (info.total_bytes / u64::from(info.block_size))
161                            .try_into()
162                            .unwrap_or(i64::MAX),
163                        ((info.total_bytes.saturating_sub(info.used_bytes))
164                            / u64::from(info.block_size))
165                        .try_into()
166                        .unwrap_or(i64::MAX),
167                    )
168                } else {
169                    (0, 0)
170                };
171
172                let fsid = __kernel_fsid_t {
173                    val: [
174                        (info.fs_id & 0xffffffff) as i32,
175                        ((info.fs_id >> 32) & 0xffffffff) as i32,
176                    ],
177                };
178
179                return Ok(statfs {
180                    f_type: info.fs_type as i64,
181                    f_bsize: info.block_size.into(),
182                    f_blocks: total_blocks,
183                    f_bfree: free_blocks,
184                    f_bavail: free_blocks,
185                    f_files: info.total_nodes.try_into().unwrap_or(i64::MAX),
186                    f_ffree: (info.total_nodes.saturating_sub(info.used_nodes))
187                        .try_into()
188                        .unwrap_or(i64::MAX),
189                    f_fsid: fsid,
190                    f_namelen: info.max_filename_size.try_into().unwrap_or(0),
191                    f_frsize: info.block_size.into(),
192                    ..statfs::default()
193                });
194            }
195        }
196        Ok(default_statfs(REMOTE_FS_MAGIC))
197    }
198
199    fn name(&self) -> &'static FsStr {
200        "remotefs".into()
201    }
202
203    fn uses_external_node_ids(&self) -> bool {
204        self.use_remote_ids
205    }
206
207    fn rename(
208        &self,
209        _locked: &mut Locked<FileOpsCore>,
210        _fs: &FileSystem,
211        current_task: &CurrentTask,
212        old_parent: &FsNodeHandle,
213        old_name: &FsStr,
214        new_parent: &FsNodeHandle,
215        new_name: &FsStr,
216        _renamed: &FsNodeHandle,
217        _replaced: Option<&FsNodeHandle>,
218    ) -> Result<(), Errno> {
219        // Renames should fail if the src or target directory is encrypted and locked.
220        old_parent.fail_if_locked(current_task)?;
221        new_parent.fail_if_locked(current_task)?;
222
223        let Some(old_parent) = old_parent.downcast_ops::<RemoteNode>() else {
224            return error!(EXDEV);
225        };
226        let Some(new_parent) = new_parent.downcast_ops::<RemoteNode>() else {
227            return error!(EXDEV);
228        };
229        old_parent
230            .zxio
231            .rename(get_name_str(old_name)?, &new_parent.zxio, get_name_str(new_name)?)
232            .map_err(|status| from_status_like_fdio!(status))
233    }
234
235    fn manages_timestamps(&self) -> bool {
236        true
237    }
238}
239
240impl RemoteFs {
241    pub fn new(root: zx::Channel, server_end: zx::Channel) -> Result<RemoteFs, Errno> {
242        // See if open3 works.  We assume that if open3 works on the root, it will work for all
243        // descendent nodes in this filesystem.  At the time of writing, this is true for Fxfs.
244        let root_proxy = fio::DirectorySynchronousProxy::new(root);
245        root_proxy
246            .open(
247                ".",
248                fio::Flags::PROTOCOL_DIRECTORY
249                    | fio::PERM_READABLE
250                    | fio::Flags::PERM_INHERIT_WRITE
251                    | fio::Flags::PERM_INHERIT_EXECUTE
252                    | fio::Flags::FLAG_SEND_REPRESENTATION,
253                &fio::Options {
254                    attributes: Some(fio::NodeAttributesQuery::ID),
255                    ..Default::default()
256                },
257                server_end,
258            )
259            .map_err(|_| errno!(EIO))?;
260        // Use remote IDs if the filesystem is Fxfs which we know will give us unique IDs.  Hard
261        // links need to resolve to the same underlying FsNode, so we can only support hard links if
262        // the remote file system will give us unique IDs.  The IDs are also used as the key in
263        // caches, so we can't use remote IDs if the remote filesystem is not guaranteed to provide
264        // unique IDs, or if the remote filesystem spans multiple filesystems.
265        let (status, info) =
266            root_proxy.query_filesystem(zx::MonotonicInstant::INFINITE).map_err(|_| errno!(EIO))?;
267        // Be tolerant of errors here; many filesystems return `ZX_ERR_NOT_SUPPORTED`.
268        let use_remote_ids = status == 0
269            && info
270                .map(|i| i.fs_type == fidl_fuchsia_fs::VfsType::Fxfs.into_primitive())
271                .unwrap_or(false);
272        Ok(RemoteFs { use_remote_ids, root_proxy })
273    }
274
275    pub fn new_fs<L>(
276        locked: &mut Locked<L>,
277        kernel: &Kernel,
278        root: zx::Channel,
279        mut options: FileSystemOptions,
280        rights: fio::Flags,
281    ) -> Result<FileSystemHandle, Errno>
282    where
283        L: LockEqualOrBefore<FileOpsCore>,
284    {
285        let (client_end, server_end) = zx::Channel::create();
286        let remotefs = RemoteFs::new(root, server_end)?;
287        let mut attrs = zxio_node_attributes_t {
288            has: zxio_node_attr_has_t { id: true, ..Default::default() },
289            ..Default::default()
290        };
291        let (remote_node, node_id) =
292            match Zxio::create_with_on_representation(client_end.into(), Some(&mut attrs)) {
293                Err(status) => return Err(from_status_like_fdio!(status)),
294                Ok(zxio) => (RemoteNode { zxio, rights }, attrs.id),
295            };
296
297        if !rights.contains(fio::PERM_WRITABLE) {
298            options.flags |= MountFlags::RDONLY;
299        }
300        let use_remote_ids = remotefs.use_remote_ids;
301        let fs = FileSystem::new(
302            locked,
303            kernel,
304            CacheMode::Cached(CacheConfig::default()),
305            remotefs,
306            options,
307        )?;
308        if use_remote_ids {
309            fs.create_root(node_id, remote_node);
310        } else {
311            let root_ino = fs.allocate_ino();
312            fs.create_root(root_ino, remote_node);
313        }
314        Ok(fs)
315    }
316
317    pub fn use_remote_ids(&self) -> bool {
318        self.use_remote_ids
319    }
320}
321
322pub struct RemoteNode {
323    /// The underlying Zircon I/O object for this remote node.
324    ///
325    /// We delegate to the zxio library for actually doing I/O with remote
326    /// objects, including fuchsia.io.Directory and fuchsia.io.File objects.
327    /// This structure lets us share code with FDIO and other Fuchsia clients.
328    zxio: syncio::Zxio,
329
330    /// The fuchsia.io rights for the dir handle. Subdirs will be opened with
331    /// the same rights.
332    rights: fio::Flags,
333}
334
335impl RemoteNode {
336    pub fn new(zxio: syncio::Zxio, rights: fio::Flags) -> Self {
337        Self { zxio, rights }
338    }
339}
340
341/// Create a file handle from a zx::NullableHandle.
342///
343/// The handle must be a channel, socket, vmo or debuglog object.  If the handle is a channel, then
344/// the channel must implement the `fuchsia.unknown/Queryable` protocol.
345///
346/// The resulting object will be owned by root, and will have permissions derived from the `flags`
347/// used to open this object. This is not the same as the permissions set if the object was created
348/// using Starnix itself. We use this mainly for interfacing with objects created outside of Starnix
349/// where these flags represent the desired permissions already.
350pub fn new_remote_file<L>(
351    locked: &mut Locked<L>,
352    current_task: &CurrentTask,
353    handle: zx::NullableHandle,
354    flags: OpenFlags,
355) -> Result<FileHandle, Errno>
356where
357    L: LockEqualOrBefore<FileOpsCore>,
358{
359    let remote_creds = current_task.full_current_creds();
360    let (attrs, ops) = remote_file_attrs_and_ops(current_task, handle.into(), remote_creds)?;
361    let mut rights = fio::Flags::empty();
362    if flags.can_read() {
363        rights |= fio::PERM_READABLE;
364    }
365    if flags.can_write() {
366        rights |= fio::PERM_WRITABLE;
367    }
368    let mode = get_mode(&attrs, rights);
369    // TODO: https://fxbug.dev/407611229 - Give these nodes valid labels.
370    let mut info = FsNodeInfo::new(mode, FsCred::root());
371    update_info_from_attrs(&mut info, &attrs);
372    Ok(Anon::new_private_file_extended(locked, current_task, ops, flags, "[fuchsia:remote]", info))
373}
374
375// Create a FileOps from a zx::NullableHandle.
376//
377// The handle must satisfy the same requirements as `new_remote_file`.
378pub fn new_remote_file_ops(
379    current_task: &CurrentTask,
380    handle: zx::NullableHandle,
381    creds: FullCredentials,
382) -> Result<Box<dyn FileOps>, Errno> {
383    let (_, ops) = remote_file_attrs_and_ops(current_task, handle, creds)?;
384    Ok(ops)
385}
386
387fn remote_file_attrs_and_ops(
388    current_task: &CurrentTask,
389    mut handle: zx::NullableHandle,
390    remote_creds: FullCredentials,
391) -> Result<(zxio_node_attr, Box<dyn FileOps>), Errno> {
392    let handle_type =
393        handle.basic_info().map_err(|status| from_status_like_fdio!(status))?.object_type;
394
395    // Check whether the channel implements a Starnix specific protoocol.
396    if handle_type == zx::ObjectType::CHANNEL {
397        let channel = zx::Channel::from(handle);
398        let queryable = funknown::QueryableSynchronousProxy::new(channel);
399        if let Ok(name) = queryable.query(zx::MonotonicInstant::INFINITE) {
400            if name == fbinder::UnixDomainSocketMarker::PROTOCOL_NAME.as_bytes() {
401                let socket_ops =
402                    RemoteUnixDomainSocket::new(queryable.into_channel(), remote_creds)?;
403                let socket = Socket::new_with_ops(Box::new(socket_ops))?;
404                let file_ops = SocketFile::new(socket);
405                let attr = zxio_node_attr {
406                    has: zxio_node_attr_has_t { mode: true, ..zxio_node_attr_has_t::default() },
407                    mode: 0o777 | FileMode::IFSOCK.bits(),
408                    ..zxio_node_attr::default()
409                };
410                return Ok((attr, file_ops));
411            }
412        };
413        handle = queryable.into_channel().into_handle();
414    } else if handle_type == zx::ObjectType::COUNTER {
415        let attr = zxio_node_attr::default();
416        let file_ops = Box::new(RemoteCounter::new(handle.into()));
417        return Ok((attr, file_ops));
418    }
419
420    // Otherwise, use zxio based objects.
421    let zxio = Zxio::create(handle).map_err(|status| from_status_like_fdio!(status))?;
422    let mut attrs = zxio
423        .attr_get(zxio_node_attr_has_t {
424            protocols: true,
425            abilities: true,
426            content_size: true,
427            storage_size: true,
428            link_count: true,
429            object_type: true,
430            ..Default::default()
431        })
432        .map_err(|status| from_status_like_fdio!(status))?;
433    let ops: Box<dyn FileOps> = match (handle_type, attrs.object_type) {
434        (_, ZXIO_OBJECT_TYPE_DIR) => Box::new(RemoteDirectoryObject::new(zxio)),
435        (zx::ObjectType::VMO, _)
436        | (zx::ObjectType::DEBUGLOG, _)
437        | (_, ZXIO_OBJECT_TYPE_FILE)
438        | (_, ZXIO_OBJECT_TYPE_NONE) => Box::new(RemoteFileObject::new(zxio)),
439        (zx::ObjectType::SOCKET, _)
440        | (_, ZXIO_OBJECT_TYPE_SYNCHRONOUS_DATAGRAM_SOCKET)
441        | (_, ZXIO_OBJECT_TYPE_DATAGRAM_SOCKET)
442        | (_, ZXIO_OBJECT_TYPE_STREAM_SOCKET)
443        | (_, ZXIO_OBJECT_TYPE_RAW_SOCKET)
444        | (_, ZXIO_OBJECT_TYPE_PACKET_SOCKET) => {
445            let socket_ops = ZxioBackedSocket::new_with_zxio(current_task, zxio);
446            let socket = Socket::new_with_ops(Box::new(socket_ops))?;
447            attrs.has.mode = true;
448            attrs.mode = FileMode::IFSOCK.bits();
449            SocketFile::new(socket)
450        }
451        _ => return error!(ENOTSUP),
452    };
453    Ok((attrs, ops))
454}
455
456pub fn create_fuchsia_pipe<L>(
457    locked: &mut Locked<L>,
458    current_task: &CurrentTask,
459    socket: zx::Socket,
460    flags: OpenFlags,
461) -> Result<FileHandle, Errno>
462where
463    L: LockEqualOrBefore<FileOpsCore>,
464{
465    new_remote_file(locked, current_task, socket.into(), flags)
466}
467
468fn fetch_and_refresh_info_impl<'a>(
469    zxio: &syncio::Zxio,
470    info: &'a RwLock<FsNodeInfo>,
471) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
472    let attrs = zxio
473        .attr_get(zxio_node_attr_has_t {
474            content_size: true,
475            storage_size: true,
476            link_count: true,
477            modification_time: true,
478            change_time: true,
479            access_time: true,
480            casefold: true,
481            wrapping_key_id: true,
482            pending_access_time_update: info.read().pending_time_access_update,
483            ..Default::default()
484        })
485        .map_err(|status| from_status_like_fdio!(status))?;
486    let mut info = info.write();
487    update_info_from_attrs(&mut info, &attrs);
488    info.pending_time_access_update = false;
489    Ok(RwLockWriteGuard::downgrade(info))
490}
491
492// Update info from attrs if they are set.
493pub fn update_info_from_attrs(info: &mut FsNodeInfo, attrs: &zxio_node_attributes_t) {
494    // TODO - store these in FsNodeState and convert on fstat
495    if attrs.has.content_size {
496        info.size = attrs.content_size.try_into().unwrap_or(std::usize::MAX);
497    }
498    if attrs.has.storage_size {
499        info.blocks = usize::try_from(attrs.storage_size)
500            .unwrap_or(std::usize::MAX)
501            .div_ceil(DEFAULT_BYTES_PER_BLOCK)
502    }
503    info.blksize = DEFAULT_BYTES_PER_BLOCK;
504    if attrs.has.link_count {
505        info.link_count = attrs.link_count.try_into().unwrap_or(std::usize::MAX);
506    }
507    if attrs.has.modification_time {
508        info.time_modify =
509            UtcInstant::from_nanos(attrs.modification_time.try_into().unwrap_or(i64::MAX));
510    }
511    if attrs.has.change_time {
512        info.time_status_change =
513            UtcInstant::from_nanos(attrs.change_time.try_into().unwrap_or(i64::MAX));
514    }
515    if attrs.has.access_time {
516        info.time_access = UtcInstant::from_nanos(attrs.access_time.try_into().unwrap_or(i64::MAX));
517    }
518    if attrs.has.wrapping_key_id {
519        info.wrapping_key_id = Some(attrs.wrapping_key_id);
520    }
521}
522
523fn get_mode(attrs: &zxio_node_attributes_t, rights: fio::Flags) -> FileMode {
524    if attrs.protocols & ZXIO_NODE_PROTOCOL_SYMLINK != 0 {
525        // We don't set the mode for symbolic links , so we synthesize it instead.
526        FileMode::IFLNK | FileMode::ALLOW_ALL
527    } else if attrs.has.mode {
528        // If the filesystem supports POSIX mode bits, use that directly.
529        FileMode::from_bits(attrs.mode)
530    } else {
531        // The filesystem doesn't support the `mode` attribute, so synthesize it from the protocols
532        // this node supports, and the rights used to open it.
533        let is_directory =
534            attrs.protocols & ZXIO_NODE_PROTOCOL_DIRECTORY == ZXIO_NODE_PROTOCOL_DIRECTORY;
535        let mode = if is_directory { FileMode::IFDIR } else { FileMode::IFREG };
536        let mut permissions = FileMode::EMPTY;
537        if rights.contains(fio::PERM_READABLE) {
538            permissions |= FileMode::IRUSR;
539        }
540        if rights.contains(fio::PERM_WRITABLE) {
541            permissions |= FileMode::IWUSR;
542        }
543        if rights.contains(fio::PERM_EXECUTABLE) {
544            permissions |= FileMode::IXUSR;
545        }
546        // Make sure the same permissions are granted to user, group, and other.
547        permissions |= FileMode::from_bits((permissions.bits() >> 3) | (permissions.bits() >> 6));
548        mode | permissions
549    }
550}
551
552fn get_name_str<'a>(name_bytes: &'a FsStr) -> Result<&'a str, Errno> {
553    std::str::from_utf8(name_bytes.as_ref()).map_err(|_| {
554        log_warn!("bad utf8 in pathname! remote filesystems can't handle this");
555        errno!(EINVAL)
556    })
557}
558
559impl XattrStorage for syncio::Zxio {
560    fn get_xattr(
561        &self,
562        _locked: &mut Locked<FileOpsCore>,
563        name: &FsStr,
564    ) -> Result<FsString, Errno> {
565        Ok(self
566            .xattr_get(name)
567            .map_err(|status| match status {
568                zx::Status::NOT_FOUND => errno!(ENODATA),
569                status => from_status_like_fdio!(status),
570            })?
571            .into())
572    }
573
574    fn set_xattr(
575        &self,
576        _locked: &mut Locked<FileOpsCore>,
577        name: &FsStr,
578        value: &FsStr,
579        op: XattrOp,
580    ) -> Result<(), Errno> {
581        let mode = match op {
582            XattrOp::Set => XattrSetMode::Set,
583            XattrOp::Create => XattrSetMode::Create,
584            XattrOp::Replace => XattrSetMode::Replace,
585        };
586
587        self.xattr_set(name, value, mode).map_err(|status| match status {
588            zx::Status::NOT_FOUND => errno!(ENODATA),
589            status => from_status_like_fdio!(status),
590        })
591    }
592
593    fn remove_xattr(&self, _locked: &mut Locked<FileOpsCore>, name: &FsStr) -> Result<(), Errno> {
594        self.xattr_remove(name).map_err(|status| match status {
595            zx::Status::NOT_FOUND => errno!(ENODATA),
596            _ => from_status_like_fdio!(status),
597        })
598    }
599
600    fn list_xattrs(&self, _locked: &mut Locked<FileOpsCore>) -> Result<Vec<FsString>, Errno> {
601        self.xattr_list()
602            .map(|attrs| attrs.into_iter().map(FsString::new).collect::<Vec<_>>())
603            .map_err(|status| from_status_like_fdio!(status))
604    }
605}
606
607impl FsNodeOps for RemoteNode {
608    fs_node_impl_xattr_delegate!(self, self.zxio);
609
610    fn create_file_ops(
611        &self,
612        locked: &mut Locked<FileOpsCore>,
613        node: &FsNode,
614        current_task: &CurrentTask,
615        flags: OpenFlags,
616    ) -> Result<Box<dyn FileOps>, Errno> {
617        {
618            let node_info = node.fetch_and_refresh_info(locked, current_task)?;
619            if node_info.mode.is_dir() {
620                if let Some(wrapping_key_id) = node_info.wrapping_key_id {
621                    if flags.can_write() {
622                        // Locked encrypted directories cannot be opened with write access.
623                        let crypt_service =
624                            node.fs().crypt_service().ok_or_else(|| errno!(ENOKEY))?;
625                        if !crypt_service.contains_key(EncryptionKeyId::from(wrapping_key_id)) {
626                            return error!(ENOKEY);
627                        }
628                    }
629                }
630                // For directories we need to deep-clone the connection because we rely on the seek
631                // offset.
632                return Ok(Box::new(RemoteDirectoryObject::new(
633                    self.zxio.deep_clone().map_err(|status| from_status_like_fdio!(status))?,
634                )));
635            }
636        }
637
638        // Locked encrypted files cannot be opened.
639        node.fail_if_locked(current_task)?;
640
641        // fsverity files cannot be opened in write mode, including while building.
642        if flags.can_write() {
643            node.fsverity.lock().check_writable()?;
644        }
645
646        // For files we can clone the `Zxio` because we don't rely on any per-connection state
647        // (i.e. the file offset).
648        Ok(Box::new(RemoteFileObject::new(self.zxio.clone())))
649    }
650
651    fn mknod(
652        &self,
653        _locked: &mut Locked<FileOpsCore>,
654        node: &FsNode,
655        current_task: &CurrentTask,
656        name: &FsStr,
657        mode: FileMode,
658        dev: DeviceType,
659        owner: FsCred,
660    ) -> Result<FsNodeHandle, Errno> {
661        node.fail_if_locked(current_task)?;
662        let name = get_name_str(name)?;
663
664        let fs = node.fs();
665        let fs_ops = RemoteFs::from_fs(&fs);
666
667        let zxio;
668        let mut node_id;
669        if !(mode.is_reg() || mode.is_chr() || mode.is_blk() || mode.is_fifo() || mode.is_sock()) {
670            return error!(EINVAL, name);
671        }
672        let mut attrs = zxio_node_attributes_t {
673            has: zxio_node_attr_has_t { id: true, ..Default::default() },
674            ..Default::default()
675        };
676        zxio = self
677            .zxio
678            .open(
679                name,
680                fio::Flags::FLAG_MUST_CREATE
681                    | fio::Flags::PROTOCOL_FILE
682                    | fio::PERM_READABLE
683                    | fio::PERM_WRITABLE,
684                ZxioOpenOptions::new(
685                    Some(&mut attrs),
686                    Some(zxio_node_attributes_t {
687                        mode: mode.bits(),
688                        uid: owner.uid,
689                        gid: owner.gid,
690                        rdev: dev.bits(),
691                        has: zxio_node_attr_has_t {
692                            mode: true,
693                            uid: true,
694                            gid: true,
695                            rdev: true,
696                            ..Default::default()
697                        },
698                        ..Default::default()
699                    }),
700                ),
701            )
702            .map_err(|status| from_status_like_fdio!(status, name))?;
703        node_id = attrs.id;
704
705        let ops = if mode.is_reg() {
706            Box::new(RemoteNode { zxio, rights: self.rights }) as Box<dyn FsNodeOps>
707        } else {
708            Box::new(RemoteSpecialNode { zxio }) as Box<dyn FsNodeOps>
709        };
710
711        if !fs_ops.use_remote_ids {
712            node_id = fs.allocate_ino();
713        }
714        let child =
715            fs.create_node(node_id, ops, FsNodeInfo { rdev: dev, ..FsNodeInfo::new(mode, owner) });
716        Ok(child)
717    }
718
719    fn mkdir(
720        &self,
721        _locked: &mut Locked<FileOpsCore>,
722        node: &FsNode,
723        current_task: &CurrentTask,
724        name: &FsStr,
725        mode: FileMode,
726        owner: FsCred,
727    ) -> Result<FsNodeHandle, Errno> {
728        node.fail_if_locked(current_task)?;
729        let name = get_name_str(name)?;
730
731        let fs = node.fs();
732        let fs_ops = RemoteFs::from_fs(&fs);
733
734        let zxio;
735        let mut node_id;
736        let mut attrs = zxio_node_attributes_t {
737            has: zxio_node_attr_has_t { id: true, ..Default::default() },
738            ..Default::default()
739        };
740        zxio = self
741            .zxio
742            .open(
743                name,
744                fio::Flags::FLAG_MUST_CREATE
745                    | fio::Flags::PROTOCOL_DIRECTORY
746                    | fio::PERM_READABLE
747                    | fio::PERM_WRITABLE,
748                ZxioOpenOptions::new(
749                    Some(&mut attrs),
750                    Some(zxio_node_attributes_t {
751                        mode: mode.bits(),
752                        uid: owner.uid,
753                        gid: owner.gid,
754                        has: zxio_node_attr_has_t {
755                            mode: true,
756                            uid: true,
757                            gid: true,
758                            ..Default::default()
759                        },
760                        ..Default::default()
761                    }),
762                ),
763            )
764            .map_err(|status| from_status_like_fdio!(status, name))?;
765        node_id = attrs.id;
766
767        let ops = RemoteNode { zxio, rights: self.rights };
768        if !fs_ops.use_remote_ids {
769            node_id = fs.allocate_ino();
770        }
771        let child = fs.create_node(node_id, ops, FsNodeInfo::new(mode, owner));
772        Ok(child)
773    }
774
775    fn lookup(
776        &self,
777        _locked: &mut Locked<FileOpsCore>,
778        node: &FsNode,
779        current_task: &CurrentTask,
780        name: &FsStr,
781    ) -> Result<FsNodeHandle, Errno> {
782        let name = get_name_str(name)?;
783
784        let fs = node.fs();
785        let fs_ops = RemoteFs::from_fs(&fs);
786
787        let mut attrs = zxio_node_attributes_t {
788            has: zxio_node_attr_has_t {
789                protocols: true,
790                abilities: true,
791                mode: true,
792                uid: true,
793                gid: true,
794                rdev: true,
795                id: true,
796                fsverity_enabled: true,
797                casefold: true,
798                modification_time: true,
799                change_time: true,
800                access_time: true,
801                ..Default::default()
802            },
803            ..Default::default()
804        };
805        let mut options = ZxioOpenOptions::new(Some(&mut attrs), None);
806        let mut selinux_context_buffer =
807            MaybeUninit::<[u8; fio::MAX_SELINUX_CONTEXT_ATTRIBUTE_LEN as usize]>::uninit();
808        let mut cached_context = security::fs_is_xattr_labeled(node.fs())
809            .then(|| SelinuxContextAttr::new(&mut selinux_context_buffer));
810        if let Some(buffer) = &mut cached_context {
811            options = options.with_selinux_context_read(buffer).unwrap();
812        }
813        let zxio = self
814            .zxio
815            .open(name, self.rights, options)
816            .map_err(|status| from_status_like_fdio!(status, name))?;
817        let symlink_zxio = zxio.clone();
818        let mode = get_mode(&attrs, self.rights);
819        let node_id = if fs_ops.use_remote_ids {
820            if attrs.id == fio::INO_UNKNOWN {
821                return error!(ENOTSUP);
822            }
823            attrs.id
824        } else {
825            fs.allocate_ino()
826        };
827        let owner = FsCred { uid: attrs.uid, gid: attrs.gid };
828        let rdev = DeviceType::from_bits(attrs.rdev);
829        let fsverity_enabled = attrs.fsverity_enabled;
830        // fsverity should not be enabled for non-file nodes.
831        if fsverity_enabled && (attrs.protocols & ZXIO_NODE_PROTOCOL_FILE == 0) {
832            return error!(EINVAL);
833        }
834        let casefold = attrs.casefold;
835        let time_modify =
836            UtcInstant::from_nanos(attrs.modification_time.try_into().unwrap_or(i64::MAX));
837        let time_status_change =
838            UtcInstant::from_nanos(attrs.change_time.try_into().unwrap_or(i64::MAX));
839        let time_access = UtcInstant::from_nanos(attrs.access_time.try_into().unwrap_or(i64::MAX));
840
841        let node = fs.get_or_create_node(node_id, || {
842            let ops = if mode.is_lnk() {
843                Box::new(RemoteSymlink { zxio: Mutex::new(zxio) }) as Box<dyn FsNodeOps>
844            } else if mode.is_reg() || mode.is_dir() {
845                Box::new(RemoteNode { zxio, rights: self.rights }) as Box<dyn FsNodeOps>
846            } else {
847                Box::new(RemoteSpecialNode { zxio }) as Box<dyn FsNodeOps>
848            };
849            let child = FsNode::new_uncached(
850                node_id,
851                ops,
852                &fs,
853                FsNodeInfo {
854                    rdev,
855                    casefold,
856                    time_status_change,
857                    time_modify,
858                    time_access,
859                    ..FsNodeInfo::new(mode, owner)
860                },
861            );
862            if fsverity_enabled {
863                *child.fsverity.lock() = FsVerityState::FsVerity;
864            }
865            if let Some(buffer) = cached_context.as_ref().and_then(|buffer| buffer.get()) {
866                // This is valid to fail if we're using mount point labelling or the
867                // provided context string is invalid.
868                let _ = security::fs_node_notify_security_context(
869                    current_task,
870                    &child,
871                    FsStr::new(buffer),
872                );
873            }
874            Ok(child)
875        })?;
876        if let Some(symlink) = node.downcast_ops::<RemoteSymlink>() {
877            let mut zxio_guard = symlink.zxio.lock();
878            *zxio_guard = symlink_zxio;
879        }
880        Ok(node)
881    }
882
883    fn truncate(
884        &self,
885        _locked: &mut Locked<FileOpsCore>,
886        _guard: &AppendLockGuard<'_>,
887        node: &FsNode,
888        current_task: &CurrentTask,
889        length: u64,
890    ) -> Result<(), Errno> {
891        node.fail_if_locked(current_task)?;
892        self.zxio.truncate(length).map_err(|status| from_status_like_fdio!(status))
893    }
894
895    fn allocate(
896        &self,
897        _locked: &mut Locked<FileOpsCore>,
898        _guard: &AppendLockGuard<'_>,
899        node: &FsNode,
900        current_task: &CurrentTask,
901        mode: FallocMode,
902        offset: u64,
903        length: u64,
904    ) -> Result<(), Errno> {
905        match mode {
906            FallocMode::Allocate { keep_size: false } => {
907                node.fail_if_locked(current_task)?;
908                self.zxio
909                    .allocate(offset, length, AllocateMode::empty())
910                    .map_err(|status| from_status_like_fdio!(status))?;
911                Ok(())
912            }
913            _ => error!(EINVAL),
914        }
915    }
916
917    fn fetch_and_refresh_info<'a>(
918        &self,
919        _locked: &mut Locked<FileOpsCore>,
920        _node: &FsNode,
921        _current_task: &CurrentTask,
922        info: &'a RwLock<FsNodeInfo>,
923    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
924        fetch_and_refresh_info_impl(&self.zxio, info)
925    }
926
927    fn update_attributes(
928        &self,
929        _locked: &mut Locked<FileOpsCore>,
930        _current_task: &CurrentTask,
931        info: &FsNodeInfo,
932        has: zxio_node_attr_has_t,
933    ) -> Result<(), Errno> {
934        // Omit updating creation_time. By definition, there shouldn't be a change in creation_time.
935        let mut mutable_node_attributes = zxio_node_attributes_t {
936            modification_time: info.time_modify.into_nanos() as u64,
937            access_time: info.time_access.into_nanos() as u64,
938            mode: info.mode.bits(),
939            uid: info.uid,
940            gid: info.gid,
941            rdev: info.rdev.bits(),
942            casefold: info.casefold,
943            has,
944            ..Default::default()
945        };
946        if let Some(id) = info.wrapping_key_id {
947            mutable_node_attributes.wrapping_key_id = id;
948        }
949        self.zxio
950            .attr_set(&mutable_node_attributes)
951            .map_err(|status| from_status_like_fdio!(status))
952    }
953
954    fn unlink(
955        &self,
956        _locked: &mut Locked<FileOpsCore>,
957        _node: &FsNode,
958        _current_task: &CurrentTask,
959        name: &FsStr,
960        _child: &FsNodeHandle,
961    ) -> Result<(), Errno> {
962        // We don't care about the _child argument because 1. unlinking already takes the parent's
963        // children lock, so we don't have to worry about conflicts on this path, and 2. the remote
964        // filesystem tracks the link counts so we don't need to update them here.
965        let name = get_name_str(name)?;
966        self.zxio
967            .unlink(name, fio::UnlinkFlags::empty())
968            .map_err(|status| from_status_like_fdio!(status))
969    }
970
971    fn create_symlink(
972        &self,
973        _locked: &mut Locked<FileOpsCore>,
974        node: &FsNode,
975        current_task: &CurrentTask,
976        name: &FsStr,
977        target: &FsStr,
978        owner: FsCred,
979    ) -> Result<FsNodeHandle, Errno> {
980        node.fail_if_locked(current_task)?;
981
982        let name = get_name_str(name)?;
983        let zxio = self
984            .zxio
985            .create_symlink(name, target)
986            .map_err(|status| from_status_like_fdio!(status))?;
987
988        let fs = node.fs();
989        let fs_ops = RemoteFs::from_fs(&fs);
990
991        let node_id = if fs_ops.use_remote_ids {
992            let attrs = zxio
993                .attr_get(zxio_node_attr_has_t { id: true, ..Default::default() })
994                .map_err(|status| from_status_like_fdio!(status))?;
995            attrs.id
996        } else {
997            fs.allocate_ino()
998        };
999        let symlink = fs.create_node(
1000            node_id,
1001            RemoteSymlink { zxio: Mutex::new(zxio) },
1002            FsNodeInfo {
1003                size: target.len(),
1004                ..FsNodeInfo::new(FileMode::IFLNK | FileMode::ALLOW_ALL, owner)
1005            },
1006        );
1007        Ok(symlink)
1008    }
1009
1010    fn create_tmpfile(
1011        &self,
1012        node: &FsNode,
1013        _current_task: &CurrentTask,
1014        mode: FileMode,
1015        owner: FsCred,
1016    ) -> Result<FsNodeHandle, Errno> {
1017        let fs = node.fs();
1018        let fs_ops = RemoteFs::from_fs(&fs);
1019
1020        let zxio;
1021        let mut node_id;
1022        if !mode.is_reg() {
1023            return error!(EINVAL);
1024        }
1025        let mut attrs = zxio_node_attributes_t {
1026            has: zxio_node_attr_has_t { id: true, ..Default::default() },
1027            ..Default::default()
1028        };
1029        // `create_tmpfile` is used by O_TMPFILE. Note that
1030        // <https://man7.org/linux/man-pages/man2/open.2.html> states that if O_EXCL is specified
1031        // with O_TMPFILE, the temporary file created cannot be linked into the filesystem. Although
1032        // there exist fuchsia flags `fio::FLAG_TEMPORARY_AS_NOT_LINKABLE`, the starnix vfs already
1033        // handles this case and makes sure that the created file is not linkable. There is also no
1034        // current way of passing the open flags to this function.
1035        zxio = self
1036            .zxio
1037            .open(
1038                ".",
1039                fio::Flags::PROTOCOL_FILE
1040                    | fio::Flags::FLAG_CREATE_AS_UNNAMED_TEMPORARY
1041                    | self.rights,
1042                ZxioOpenOptions::new(
1043                    Some(&mut attrs),
1044                    Some(zxio_node_attributes_t {
1045                        mode: mode.bits(),
1046                        uid: owner.uid,
1047                        gid: owner.gid,
1048                        has: zxio_node_attr_has_t {
1049                            mode: true,
1050                            uid: true,
1051                            gid: true,
1052                            ..Default::default()
1053                        },
1054                        ..Default::default()
1055                    }),
1056                ),
1057            )
1058            .map_err(|status| from_status_like_fdio!(status))?;
1059        node_id = attrs.id;
1060
1061        let ops = Box::new(RemoteNode { zxio, rights: self.rights }) as Box<dyn FsNodeOps>;
1062
1063        if !fs_ops.use_remote_ids {
1064            node_id = fs.allocate_ino();
1065        }
1066        let child = fs.create_node(node_id, ops, FsNodeInfo::new(mode, owner));
1067
1068        Ok(child)
1069    }
1070
1071    fn link(
1072        &self,
1073        _locked: &mut Locked<FileOpsCore>,
1074        node: &FsNode,
1075        _current_task: &CurrentTask,
1076        name: &FsStr,
1077        child: &FsNodeHandle,
1078    ) -> Result<(), Errno> {
1079        if !RemoteFs::from_fs(&node.fs()).use_remote_ids {
1080            return error!(EPERM);
1081        }
1082        let name = get_name_str(name)?;
1083        let link_into = |zxio: &syncio::Zxio| {
1084            zxio.link_into(&self.zxio, name).map_err(|status| match status {
1085                zx::Status::BAD_STATE => errno!(EXDEV),
1086                zx::Status::ACCESS_DENIED => errno!(ENOKEY),
1087                s => from_status_like_fdio!(s),
1088            })
1089        };
1090        if let Some(child) = child.downcast_ops::<RemoteNode>() {
1091            link_into(&child.zxio)
1092        } else if let Some(child) = child.downcast_ops::<RemoteSymlink>() {
1093            link_into(&child.zxio())
1094        } else {
1095            error!(EXDEV)
1096        }
1097    }
1098
1099    fn forget(
1100        self: Box<Self>,
1101        _locked: &mut Locked<FileOpsCore>,
1102        _current_task: &CurrentTask,
1103        info: FsNodeInfo,
1104    ) -> Result<(), Errno> {
1105        // Before forgetting this node, update atime if we need to.
1106        if info.pending_time_access_update {
1107            self.zxio
1108                .close_and_update_access_time()
1109                .map_err(|status| from_status_like_fdio!(status))?;
1110        }
1111        Ok(())
1112    }
1113
1114    fn enable_fsverity(&self, descriptor: &fsverity_descriptor) -> Result<(), Errno> {
1115        let descr = zxio_fsverity_descriptor_t {
1116            hash_algorithm: descriptor.hash_algorithm,
1117            salt_size: descriptor.salt_size,
1118            salt: descriptor.salt,
1119        };
1120        self.zxio.enable_verity(&descr).map_err(|status| from_status_like_fdio!(status))
1121    }
1122
1123    fn get_fsverity_descriptor(&self, log_blocksize: u8) -> Result<fsverity_descriptor, Errno> {
1124        let mut root_hash = [0; ZXIO_ROOT_HASH_LENGTH];
1125        let attrs = self
1126            .zxio
1127            .attr_get_with_root_hash(
1128                zxio_node_attr_has_t {
1129                    content_size: true,
1130                    fsverity_options: true,
1131                    fsverity_root_hash: true,
1132                    ..Default::default()
1133                },
1134                &mut root_hash,
1135            )
1136            .map_err(|status| match status {
1137                zx::Status::INVALID_ARGS => errno!(ENODATA),
1138                _ => from_status_like_fdio!(status),
1139            })?;
1140        return Ok(fsverity_descriptor {
1141            version: 1,
1142            hash_algorithm: attrs.fsverity_options.hash_alg,
1143            log_blocksize,
1144            salt_size: attrs.fsverity_options.salt_size as u8,
1145            __reserved_0x04: 0u32,
1146            data_size: attrs.content_size,
1147            root_hash,
1148            salt: attrs.fsverity_options.salt,
1149            __reserved: [0u8; 144],
1150        });
1151    }
1152}
1153
1154struct RemoteSpecialNode {
1155    zxio: syncio::Zxio,
1156}
1157
1158impl FsNodeOps for RemoteSpecialNode {
1159    fs_node_impl_not_dir!();
1160    fs_node_impl_xattr_delegate!(self, self.zxio);
1161
1162    fn create_file_ops(
1163        &self,
1164        _locked: &mut Locked<FileOpsCore>,
1165        _node: &FsNode,
1166        _current_task: &CurrentTask,
1167        _flags: OpenFlags,
1168    ) -> Result<Box<dyn FileOps>, Errno> {
1169        unreachable!("Special nodes cannot be opened.");
1170    }
1171}
1172
1173fn zxio_read_write_inner_map_error(status: zx::Status) -> Errno {
1174    match status {
1175        // zx::Stream may return invalid args or not found error because of
1176        // invalid zx_iovec buffer pointers.
1177        zx::Status::INVALID_ARGS | zx::Status::NOT_FOUND => errno!(EFAULT, ""),
1178        status => from_status_like_fdio!(status),
1179    }
1180}
1181
1182fn zxio_read_inner(
1183    data: &mut dyn OutputBuffer,
1184    unified_read_fn: impl FnOnce(&[syncio::zxio::zx_iovec]) -> Result<usize, zx::Status>,
1185    vmo_read_fn: impl FnOnce(&mut [u8]) -> Result<usize, zx::Status>,
1186) -> Result<usize, Errno> {
1187    let read_bytes = with_iovec_segments(data, |iovecs| {
1188        unified_read_fn(&iovecs).map_err(zxio_read_write_inner_map_error)
1189    });
1190
1191    match read_bytes {
1192        Some(actual) => {
1193            let actual = actual?;
1194            // SAFETY: we successfully read `actual` bytes
1195            // directly to the user's buffer segments.
1196            unsafe { data.advance(actual) }?;
1197            Ok(actual)
1198        }
1199        None => {
1200            // Perform the (slower) operation by using an intermediate buffer.
1201            let total = data.available();
1202            let mut bytes = vec![0u8; total];
1203            let actual =
1204                vmo_read_fn(&mut bytes).map_err(|status| from_status_like_fdio!(status))?;
1205            data.write_all(&bytes[0..actual])
1206        }
1207    }
1208}
1209
1210fn zxio_read_at(zxio: &Zxio, offset: usize, data: &mut dyn OutputBuffer) -> Result<usize, Errno> {
1211    let offset = offset as u64;
1212    zxio_read_inner(
1213        data,
1214        |iovecs| {
1215            // SAFETY: `zxio_read_inner` maps the returned error to an appropriate
1216            // `Errno` for userspace to handle. `data` only points to memory that
1217            // is allowed to be written to (Linux user-mode aspace or a valid
1218            // Starnix owned buffer).
1219            unsafe { zxio.readv_at(offset, iovecs) }
1220        },
1221        |bytes| zxio.read_at(offset, bytes),
1222    )
1223}
1224
1225fn zxio_write_inner(
1226    data: &mut dyn InputBuffer,
1227    unified_write_fn: impl FnOnce(&[syncio::zxio::zx_iovec]) -> Result<usize, zx::Status>,
1228    vmo_write_fn: impl FnOnce(&[u8]) -> Result<usize, zx::Status>,
1229) -> Result<usize, Errno> {
1230    let write_bytes = with_iovec_segments(data, |iovecs| {
1231        unified_write_fn(&iovecs).map_err(zxio_read_write_inner_map_error)
1232    });
1233
1234    match write_bytes {
1235        Some(actual) => {
1236            let actual = actual?;
1237            data.advance(actual)?;
1238            Ok(actual)
1239        }
1240        None => {
1241            // Perform the (slower) operation by using an intermediate buffer.
1242            let bytes = data.peek_all()?;
1243            let actual = vmo_write_fn(&bytes).map_err(|status| from_status_like_fdio!(status))?;
1244            data.advance(actual)?;
1245            Ok(actual)
1246        }
1247    }
1248}
1249
1250fn zxio_write_at(
1251    zxio: &Zxio,
1252    _current_task: &CurrentTask,
1253    offset: usize,
1254    data: &mut dyn InputBuffer,
1255) -> Result<usize, Errno> {
1256    let offset = offset as u64;
1257    zxio_write_inner(
1258        data,
1259        |iovecs| {
1260            // SAFETY: `zxio_write_inner` maps the returned error to an appropriate
1261            // `Errno` for userspace to handle.
1262            unsafe { zxio.writev_at(offset, iovecs) }
1263        },
1264        |bytes| zxio.write_at(offset, bytes),
1265    )
1266}
1267
1268/// Helper struct to track the context necessary to iterate over dir entries.
1269#[derive(Default)]
1270struct RemoteDirectoryIterator<'a> {
1271    iterator: Option<DirentIterator<'a>>,
1272
1273    /// If the last attempt to write to the sink failed, this contains the entry that is pending to
1274    /// be added. This is also used to synthesize dot-dot.
1275    pending_entry: Entry,
1276}
1277
1278#[derive(Default)]
1279enum Entry {
1280    // Indicates no more entries.
1281    #[default]
1282    None,
1283
1284    Some(ZxioDirent),
1285
1286    // Indicates dot-dot should be synthesized.
1287    DotDot,
1288}
1289
1290impl Entry {
1291    fn take(&mut self) -> Entry {
1292        std::mem::replace(self, Entry::None)
1293    }
1294}
1295
1296impl From<Option<ZxioDirent>> for Entry {
1297    fn from(value: Option<ZxioDirent>) -> Self {
1298        match value {
1299            None => Entry::None,
1300            Some(x) => Entry::Some(x),
1301        }
1302    }
1303}
1304
1305impl<'a> RemoteDirectoryIterator<'a> {
1306    fn get_or_init_iterator(&mut self, zxio: &'a Zxio) -> Result<&mut DirentIterator<'a>, Errno> {
1307        if self.iterator.is_none() {
1308            let iterator =
1309                zxio.create_dirent_iterator().map_err(|status| from_status_like_fdio!(status))?;
1310            self.iterator = Some(iterator);
1311        }
1312        if let Some(iterator) = &mut self.iterator {
1313            return Ok(iterator);
1314        }
1315
1316        // Should be an impossible error, because we just created the iterator above.
1317        error!(EIO)
1318    }
1319
1320    /// Returns the next dir entry. If no more entries are found, returns None.  Returns an error if
1321    /// the iterator fails for other reasons described by the zxio library.
1322    pub fn next(&mut self, zxio: &'a Zxio) -> Result<Entry, Errno> {
1323        let mut next = self.pending_entry.take();
1324        if let Entry::None = next {
1325            next = self
1326                .get_or_init_iterator(zxio)?
1327                .next()
1328                .transpose()
1329                .map_err(|status| from_status_like_fdio!(status))?
1330                .into();
1331        }
1332        // We only want to synthesize .. if . exists because the . and .. entries get removed if the
1333        // directory is unlinked, so if the remote filesystem has removed ., we know to omit the
1334        // .. entry.
1335        match &next {
1336            Entry::Some(ZxioDirent { name, .. }) if name == "." => {
1337                self.pending_entry = Entry::DotDot;
1338            }
1339            _ => {}
1340        }
1341        Ok(next)
1342    }
1343}
1344
1345struct RemoteDirectoryObject {
1346    iterator: Mutex<RemoteDirectoryIterator<'static>>,
1347
1348    // The underlying Zircon I/O object.  This *must* be dropped after `iterator` above because the
1349    // iterator has references to this object.  We use some unsafe code below to erase the lifetime
1350    // (hence the 'static above).
1351    zxio: Zxio,
1352}
1353
1354impl RemoteDirectoryObject {
1355    pub fn new(zxio: Zxio) -> RemoteDirectoryObject {
1356        RemoteDirectoryObject { zxio, iterator: Mutex::new(RemoteDirectoryIterator::default()) }
1357    }
1358
1359    /// Returns a reference to Zxio with the lifetime erased.
1360    ///
1361    /// # Safety
1362    ///
1363    /// The caller must uphold the lifetime requirements, which will be the case if this is only
1364    /// used for the contained iterator (`iterator` is dropped before `zxio`).
1365    unsafe fn zxio(&self) -> &'static Zxio {
1366        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
1367        unsafe {
1368            &*(&self.zxio as *const Zxio)
1369        }
1370    }
1371}
1372
1373impl FileOps for RemoteDirectoryObject {
1374    fileops_impl_directory!();
1375
1376    fn seek(
1377        &self,
1378        _locked: &mut Locked<FileOpsCore>,
1379        _file: &FileObject,
1380        _current_task: &CurrentTask,
1381        current_offset: off_t,
1382        target: SeekTarget,
1383    ) -> Result<off_t, Errno> {
1384        let mut iterator = self.iterator.lock();
1385        let new_offset = default_seek(current_offset, target, || error!(EINVAL))?;
1386        let mut iterator_position = current_offset;
1387
1388        if new_offset < iterator_position {
1389            // Our iterator only goes forward, so reset it here.  Note: we *must* rewind it rather
1390            // than just create a new iterator because the remote end maintains the offset.
1391            if let Some(iterator) = &mut iterator.iterator {
1392                iterator.rewind().map_err(|status| from_status_like_fdio!(status))?;
1393            }
1394            iterator.pending_entry = Entry::None;
1395            iterator_position = 0;
1396        }
1397
1398        // Advance the iterator to catch up with the offset.
1399        for i in iterator_position..new_offset {
1400            // SAFETY: See the comment on the `zxio` function above.  The iterator outlives this
1401            // function and the zxio object must outlive the iterator.
1402            match iterator.next(unsafe { self.zxio() }) {
1403                Ok(Entry::Some(_) | Entry::DotDot) => {}
1404                Ok(Entry::None) => break, // No more entries.
1405                Err(_) => {
1406                    // In order to keep the offset and the iterator in sync, set the new offset
1407                    // to be as far as we could get.
1408                    // Note that failing the seek here would also cause the iterator and the
1409                    // offset to not be in sync, because the iterator has already moved from
1410                    // where it was.
1411                    return Ok(i);
1412                }
1413            }
1414        }
1415
1416        Ok(new_offset)
1417    }
1418
1419    fn readdir(
1420        &self,
1421        _locked: &mut Locked<FileOpsCore>,
1422        file: &FileObject,
1423        _current_task: &CurrentTask,
1424        sink: &mut dyn DirentSink,
1425    ) -> Result<(), Errno> {
1426        // It is important to acquire the lock to the offset before the context, to avoid a deadlock
1427        // where seek() tries to modify the context.
1428        let mut iterator = self.iterator.lock();
1429
1430        loop {
1431            // SAFETY: See the comment on the `zxio` function above.  The iterator outlives this
1432            // function and the zxio object must outlive the iterator.
1433            let entry = iterator.next(unsafe { self.zxio() })?;
1434            if let Err(e) = match &entry {
1435                Entry::Some(entry) => {
1436                    let inode_num: ino_t = entry.id.ok_or_else(|| errno!(EIO))?;
1437                    let entry_type = if entry.is_dir() {
1438                        DirectoryEntryType::DIR
1439                    } else if entry.is_file() {
1440                        DirectoryEntryType::REG
1441                    } else {
1442                        DirectoryEntryType::UNKNOWN
1443                    };
1444                    sink.add(inode_num, sink.offset() + 1, entry_type, entry.name.as_bstr())
1445                }
1446                Entry::DotDot => {
1447                    let inode_num = if let Some(parent) = file.name.parent_within_mount() {
1448                        parent.node.ino
1449                    } else {
1450                        // For the root .. should have the same inode number as .
1451                        file.name.entry.node.ino
1452                    };
1453                    sink.add(inode_num, sink.offset() + 1, DirectoryEntryType::DIR, "..".into())
1454                }
1455                Entry::None => break,
1456            } {
1457                iterator.pending_entry = entry;
1458                return Err(e);
1459            }
1460        }
1461        Ok(())
1462    }
1463
1464    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1465        self.zxio.sync().map_err(|status| match status {
1466            zx::Status::NO_RESOURCES | zx::Status::NO_MEMORY | zx::Status::NO_SPACE => {
1467                errno!(ENOSPC)
1468            }
1469            zx::Status::INVALID_ARGS | zx::Status::NOT_FILE => errno!(EINVAL),
1470            zx::Status::BAD_HANDLE => errno!(EBADFD),
1471            zx::Status::NOT_SUPPORTED => errno!(ENOTSUP),
1472            zx::Status::INTERRUPTED_RETRY => errno!(EINTR),
1473            _ => errno!(EIO),
1474        })
1475    }
1476
1477    fn to_handle(
1478        &self,
1479        _file: &FileObject,
1480        _current_task: &CurrentTask,
1481    ) -> Result<Option<zx::NullableHandle>, Errno> {
1482        self.zxio
1483            .deep_clone()
1484            .and_then(Zxio::release)
1485            .map(Some)
1486            .map_err(|status| from_status_like_fdio!(status))
1487    }
1488}
1489
1490pub struct RemoteFileObject {
1491    /// The underlying Zircon I/O object.  This is shared, so we must take care not to use any
1492    /// stateful methods on the underlying object (reading and writing is fine).
1493    zxio: Zxio,
1494
1495    /// Cached read-only VMO handle.
1496    read_only_memory: OnceCell<Arc<MemoryObject>>,
1497
1498    /// Cached read/exec VMO handle.
1499    read_exec_memory: OnceCell<Arc<MemoryObject>>,
1500}
1501
1502impl RemoteFileObject {
1503    fn new(zxio: Zxio) -> RemoteFileObject {
1504        RemoteFileObject {
1505            zxio,
1506            read_only_memory: Default::default(),
1507            read_exec_memory: Default::default(),
1508        }
1509    }
1510
1511    fn fetch_remote_memory(&self, prot: ProtectionFlags) -> Result<Arc<MemoryObject>, Errno> {
1512        let without_exec = self
1513            .zxio
1514            .vmo_get(prot.to_vmar_flags() - zx::VmarFlags::PERM_EXECUTE)
1515            .map_err(|status| from_status_like_fdio!(status))?;
1516        let all_flags = if prot.contains(ProtectionFlags::EXEC) {
1517            without_exec.replace_as_executable(&VMEX_RESOURCE).map_err(impossible_error)?
1518        } else {
1519            without_exec
1520        };
1521        Ok(Arc::new(MemoryObject::from(all_flags)))
1522    }
1523}
1524
1525impl FileOps for RemoteFileObject {
1526    fileops_impl_seekable!();
1527
1528    fn read(
1529        &self,
1530        _locked: &mut Locked<FileOpsCore>,
1531        _file: &FileObject,
1532        _current_task: &CurrentTask,
1533        offset: usize,
1534        data: &mut dyn OutputBuffer,
1535    ) -> Result<usize, Errno> {
1536        zxio_read_at(&self.zxio, offset, data)
1537    }
1538
1539    fn write(
1540        &self,
1541        _locked: &mut Locked<FileOpsCore>,
1542        _file: &FileObject,
1543        current_task: &CurrentTask,
1544        offset: usize,
1545        data: &mut dyn InputBuffer,
1546    ) -> Result<usize, Errno> {
1547        zxio_write_at(&self.zxio, current_task, offset, data)
1548    }
1549
1550    fn get_memory(
1551        &self,
1552        _locked: &mut Locked<FileOpsCore>,
1553        _file: &FileObject,
1554        _current_task: &CurrentTask,
1555        _length: Option<usize>,
1556        prot: ProtectionFlags,
1557    ) -> Result<Arc<MemoryObject>, Errno> {
1558        trace_duration!(CATEGORY_STARNIX_MM, "RemoteFileGetVmo");
1559        let memory_cache = if prot == (ProtectionFlags::READ | ProtectionFlags::EXEC) {
1560            Some(&self.read_exec_memory)
1561        } else if prot == ProtectionFlags::READ {
1562            Some(&self.read_only_memory)
1563        } else {
1564            None
1565        };
1566
1567        memory_cache
1568            .map(|c| c.get_or_try_init(|| self.fetch_remote_memory(prot)).cloned())
1569            .unwrap_or_else(|| self.fetch_remote_memory(prot))
1570    }
1571
1572    fn to_handle(
1573        &self,
1574        _file: &FileObject,
1575        _current_task: &CurrentTask,
1576    ) -> Result<Option<zx::NullableHandle>, Errno> {
1577        self.zxio
1578            .deep_clone()
1579            .and_then(Zxio::release)
1580            .map(Some)
1581            .map_err(|status| from_status_like_fdio!(status))
1582    }
1583
1584    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1585        self.zxio.sync().map_err(|status| match status {
1586            zx::Status::NO_RESOURCES | zx::Status::NO_MEMORY | zx::Status::NO_SPACE => {
1587                errno!(ENOSPC)
1588            }
1589            zx::Status::INVALID_ARGS | zx::Status::NOT_FILE => errno!(EINVAL),
1590            zx::Status::BAD_HANDLE => errno!(EBADFD),
1591            zx::Status::NOT_SUPPORTED => errno!(ENOTSUP),
1592            zx::Status::INTERRUPTED_RETRY => errno!(EINTR),
1593            _ => errno!(EIO),
1594        })
1595    }
1596
1597    fn ioctl(
1598        &self,
1599        locked: &mut Locked<Unlocked>,
1600        file: &FileObject,
1601        current_task: &CurrentTask,
1602        request: u32,
1603        arg: SyscallArg,
1604    ) -> Result<SyscallResult, Errno> {
1605        default_ioctl(file, locked, current_task, request, arg)
1606    }
1607}
1608
1609struct RemoteSymlink {
1610    zxio: Mutex<syncio::Zxio>,
1611}
1612
1613impl RemoteSymlink {
1614    fn zxio(&self) -> syncio::Zxio {
1615        self.zxio.lock().clone()
1616    }
1617}
1618
1619impl FsNodeOps for RemoteSymlink {
1620    fs_node_impl_symlink!();
1621    fs_node_impl_xattr_delegate!(self, self.zxio());
1622
1623    fn readlink(
1624        &self,
1625        _locked: &mut Locked<FileOpsCore>,
1626        _node: &FsNode,
1627        _current_task: &CurrentTask,
1628    ) -> Result<SymlinkTarget, Errno> {
1629        Ok(SymlinkTarget::Path(
1630            self.zxio().read_link().map_err(|status| from_status_like_fdio!(status))?.into(),
1631        ))
1632    }
1633
1634    fn fetch_and_refresh_info<'a>(
1635        &self,
1636        _locked: &mut Locked<FileOpsCore>,
1637        _node: &FsNode,
1638        _current_task: &CurrentTask,
1639        info: &'a RwLock<FsNodeInfo>,
1640    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
1641        fetch_and_refresh_info_impl(&self.zxio(), info)
1642    }
1643
1644    fn forget(
1645        self: Box<Self>,
1646        _locked: &mut Locked<FileOpsCore>,
1647        _current_task: &CurrentTask,
1648        info: FsNodeInfo,
1649    ) -> Result<(), Errno> {
1650        // Before forgetting this node, update atime if we need to.
1651        if info.pending_time_access_update {
1652            self.zxio()
1653                .close_and_update_access_time()
1654                .map_err(|status| from_status_like_fdio!(status))?;
1655        }
1656        Ok(())
1657    }
1658}
1659
1660pub struct RemoteCounter {
1661    counter: Counter,
1662}
1663
1664impl RemoteCounter {
1665    fn new(counter: Counter) -> Self {
1666        Self { counter }
1667    }
1668
1669    pub fn duplicate_handle(&self) -> Result<Counter, Errno> {
1670        self.counter.duplicate_handle(zx::Rights::SAME_RIGHTS).map_err(impossible_error)
1671    }
1672}
1673
1674impl FileOps for RemoteCounter {
1675    fileops_impl_nonseekable!();
1676    fileops_impl_noop_sync!();
1677
1678    fn read(
1679        &self,
1680        _locked: &mut Locked<FileOpsCore>,
1681        _file: &FileObject,
1682        _current_task: &CurrentTask,
1683        _offset: usize,
1684        _data: &mut dyn OutputBuffer,
1685    ) -> Result<usize, Errno> {
1686        error!(ENOTSUP)
1687    }
1688
1689    fn write(
1690        &self,
1691        _locked: &mut Locked<FileOpsCore>,
1692        _file: &FileObject,
1693        _current_task: &CurrentTask,
1694        _offset: usize,
1695        _data: &mut dyn InputBuffer,
1696    ) -> Result<usize, Errno> {
1697        error!(ENOTSUP)
1698    }
1699
1700    fn ioctl(
1701        &self,
1702        locked: &mut Locked<Unlocked>,
1703        file: &FileObject,
1704        current_task: &CurrentTask,
1705        request: u32,
1706        arg: SyscallArg,
1707    ) -> Result<SyscallResult, Errno> {
1708        let ioctl_type = (request >> 8) as u8;
1709        let ioctl_number = request as u8;
1710        if ioctl_type == SYNC_IOC_MAGIC
1711            && (ioctl_number == SYNC_IOC_FILE_INFO || ioctl_number == SYNC_IOC_MERGE)
1712        {
1713            let mut sync_points: Vec<SyncPoint> = vec![];
1714            let counter = self.duplicate_handle()?;
1715            sync_points.push(SyncPoint::new(Timeline::Hwc, counter.into()));
1716            let sync_file_name: &[u8; 32] = b"remote counter\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
1717            let sync_file = SyncFile::new(*sync_file_name, SyncFence { sync_points });
1718            return sync_file.ioctl(locked, file, current_task, request, arg);
1719        }
1720
1721        error!(EINVAL)
1722    }
1723}
1724
1725#[cfg(test)]
1726mod test {
1727    use super::*;
1728    use crate::mm::PAGE_SIZE;
1729    use crate::testing::*;
1730    use crate::vfs::buffers::{VecInputBuffer, VecOutputBuffer};
1731    use crate::vfs::socket::{SocketFile, SocketMessageFlags};
1732    use crate::vfs::{EpollFileObject, LookupContext, Namespace, SymlinkMode, TimeUpdateType};
1733    use assert_matches::assert_matches;
1734    use fidl_fuchsia_io as fio;
1735    use flyweights::FlyByteStr;
1736    use fxfs_testing::{TestFixture, TestFixtureOptions};
1737    use starnix_uapi::auth::Credentials;
1738    use starnix_uapi::errors::EINVAL;
1739    use starnix_uapi::file_mode::{AccessCheck, mode};
1740    use starnix_uapi::open_flags::OpenFlags;
1741    use starnix_uapi::vfs::{EpollEvent, FdEvents};
1742    use zx::HandleBased;
1743
1744    #[::fuchsia::test]
1745    async fn test_remote_uds() {
1746        spawn_kernel_and_run(async |locked, current_task| {
1747            let (s1, s2) = zx::Socket::create_datagram();
1748            s2.write(&vec![0]).expect("write");
1749            let file = new_remote_file(locked, &current_task, s1.into(), OpenFlags::RDWR)
1750                .expect("new_remote_file");
1751            assert!(file.node().is_sock());
1752            let socket_ops = file.downcast_file::<SocketFile>().unwrap();
1753            let flags = SocketMessageFlags::CTRUNC
1754                | SocketMessageFlags::TRUNC
1755                | SocketMessageFlags::NOSIGNAL
1756                | SocketMessageFlags::CMSG_CLOEXEC;
1757            let mut buffer = VecOutputBuffer::new(1024);
1758            let info = socket_ops
1759                .recvmsg(locked, &current_task, &file, &mut buffer, flags, None)
1760                .expect("recvmsg");
1761            assert!(info.ancillary_data.is_empty());
1762            assert_eq!(info.message_length, 1);
1763        })
1764        .await;
1765    }
1766
1767    #[::fuchsia::test]
1768    async fn test_tree() {
1769        spawn_kernel_and_run(async |locked, current_task| {
1770            let kernel = current_task.kernel();
1771            let rights = fio::PERM_READABLE | fio::PERM_EXECUTABLE;
1772            let (server, client) = zx::Channel::create();
1773            fdio::open("/pkg", rights, server).expect("failed to open /pkg");
1774            let fs = RemoteFs::new_fs(
1775                locked,
1776                &kernel,
1777                client,
1778                FileSystemOptions { source: FlyByteStr::new(b"/pkg"), ..Default::default() },
1779                rights,
1780            )
1781            .unwrap();
1782            let ns = Namespace::new(fs);
1783            let root = ns.root();
1784            let mut context = LookupContext::default();
1785            assert_eq!(
1786                root.lookup_child(locked, &current_task, &mut context, "nib".into()).err(),
1787                Some(errno!(ENOENT))
1788            );
1789            let mut context = LookupContext::default();
1790            root.lookup_child(locked, &current_task, &mut context, "lib".into()).unwrap();
1791
1792            let mut context = LookupContext::default();
1793            let _test_file = root
1794                .lookup_child(
1795                    locked,
1796                    &current_task,
1797                    &mut context,
1798                    "data/tests/hello_starnix".into(),
1799                )
1800                .unwrap()
1801                .open(locked, &current_task, OpenFlags::RDONLY, AccessCheck::default())
1802                .unwrap();
1803        })
1804        .await;
1805    }
1806
1807    #[::fuchsia::test]
1808    async fn test_blocking_io() {
1809        spawn_kernel_and_run(async |locked, current_task| {
1810            let (client, server) = zx::Socket::create_stream();
1811            let pipe = create_fuchsia_pipe(locked, &current_task, client, OpenFlags::RDWR).unwrap();
1812
1813            let bytes = [0u8; 64];
1814            assert_eq!(bytes.len(), server.write(&bytes).unwrap());
1815
1816            // Spawn a kthread to get the right lock context.
1817            let bytes_read =
1818                pipe.read(locked, &current_task, &mut VecOutputBuffer::new(64)).unwrap();
1819
1820            assert_eq!(bytes_read, bytes.len());
1821        })
1822        .await;
1823    }
1824
1825    #[::fuchsia::test]
1826    async fn test_poll() {
1827        spawn_kernel_and_run(async |locked, current_task| {
1828            let (client, server) = zx::Socket::create_stream();
1829            let pipe = create_fuchsia_pipe(locked, &current_task, client, OpenFlags::RDWR)
1830                .expect("create_fuchsia_pipe");
1831            let server_zxio = Zxio::create(server.into_handle()).expect("Zxio::create");
1832
1833            assert_eq!(
1834                pipe.query_events(locked, &current_task),
1835                Ok(FdEvents::POLLOUT | FdEvents::POLLWRNORM)
1836            );
1837
1838            let epoll_object = EpollFileObject::new_file(locked, &current_task);
1839            let epoll_file = epoll_object.downcast_file::<EpollFileObject>().unwrap();
1840            let event = EpollEvent::new(FdEvents::POLLIN, 0);
1841            epoll_file
1842                .add(locked, &current_task, &pipe, &epoll_object, event)
1843                .expect("poll_file.add");
1844
1845            let fds = epoll_file
1846                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1847                .expect("wait");
1848            assert!(fds.is_empty());
1849
1850            assert_eq!(server_zxio.write(&[0]).expect("write"), 1);
1851
1852            assert_eq!(
1853                pipe.query_events(locked, &current_task),
1854                Ok(FdEvents::POLLOUT
1855                    | FdEvents::POLLWRNORM
1856                    | FdEvents::POLLIN
1857                    | FdEvents::POLLRDNORM)
1858            );
1859            let fds = epoll_file
1860                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1861                .expect("wait");
1862            assert_eq!(fds.len(), 1);
1863
1864            assert_eq!(
1865                pipe.read(locked, &current_task, &mut VecOutputBuffer::new(64)).expect("read"),
1866                1
1867            );
1868
1869            assert_eq!(
1870                pipe.query_events(locked, &current_task),
1871                Ok(FdEvents::POLLOUT | FdEvents::POLLWRNORM)
1872            );
1873            let fds = epoll_file
1874                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1875                .expect("wait");
1876            assert!(fds.is_empty());
1877        })
1878        .await;
1879    }
1880
1881    #[::fuchsia::test]
1882    async fn test_new_remote_directory() {
1883        spawn_kernel_and_run(async |locked, current_task| {
1884            let (server, client) = zx::Channel::create();
1885            fdio::open("/pkg", fio::PERM_READABLE | fio::PERM_EXECUTABLE, server)
1886                .expect("failed to open /pkg");
1887
1888            let fd = new_remote_file(locked, &current_task, client.into(), OpenFlags::RDWR)
1889                .expect("new_remote_file");
1890            assert!(fd.node().is_dir());
1891            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1892        })
1893        .await;
1894    }
1895
1896    #[::fuchsia::test]
1897    async fn test_new_remote_file() {
1898        spawn_kernel_and_run(async |locked, current_task| {
1899            let (server, client) = zx::Channel::create();
1900            fdio::open("/pkg/meta/contents", fio::PERM_READABLE, server)
1901                .expect("failed to open /pkg/meta/contents");
1902
1903            let fd = new_remote_file(locked, &current_task, client.into(), OpenFlags::RDONLY)
1904                .expect("new_remote_file");
1905            assert!(!fd.node().is_dir());
1906            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1907        })
1908        .await;
1909    }
1910
1911    #[::fuchsia::test]
1912    async fn test_new_remote_counter() {
1913        spawn_kernel_and_run(async |locked, current_task| {
1914            let counter = zx::Counter::create();
1915
1916            let fd = new_remote_file(locked, &current_task, counter.into(), OpenFlags::RDONLY)
1917                .expect("new_remote_file");
1918            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1919        })
1920        .await;
1921    }
1922
1923    #[::fuchsia::test]
1924    async fn test_new_remote_vmo() {
1925        spawn_kernel_and_run(async |locked, current_task| {
1926            let vmo = zx::Vmo::create(*PAGE_SIZE).expect("Vmo::create");
1927            let fd = new_remote_file(locked, &current_task, vmo.into(), OpenFlags::RDWR)
1928                .expect("new_remote_file");
1929            assert!(!fd.node().is_dir());
1930            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1931        })
1932        .await;
1933    }
1934
1935    #[::fuchsia::test(threads = 2)]
1936    async fn test_symlink() {
1937        let fixture = TestFixture::new().await;
1938        let (server, client) = zx::Channel::create();
1939        fixture.root().clone(server.into()).expect("clone failed");
1940
1941        const LINK_PATH: &'static str = "symlink";
1942        const LINK_TARGET: &'static str = "私は「UTF8」です";
1943        // We expect the reported size of the symlink to be the length of the target, in bytes,
1944        // *without* a null terminator. Most Linux systems assume UTF-8 encoding.
1945        const LINK_SIZE: usize = 22;
1946        assert_eq!(LINK_SIZE, LINK_TARGET.len());
1947
1948        spawn_kernel_and_run(async move |locked, current_task| {
1949            let kernel = current_task.kernel();
1950            let fs = RemoteFs::new_fs(
1951                locked,
1952                &kernel,
1953                client,
1954                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
1955                fio::PERM_READABLE | fio::PERM_WRITABLE,
1956            )
1957            .expect("new_fs failed");
1958            let ns = Namespace::new(fs);
1959            let root = ns.root();
1960            let symlink_node = root
1961                .create_symlink(locked, &current_task, LINK_PATH.into(), LINK_TARGET.into())
1962                .expect("symlink failed");
1963            assert_matches!(&*symlink_node.entry.node.info(), FsNodeInfo { size: LINK_SIZE, .. });
1964
1965            let mut context = LookupContext::new(SymlinkMode::NoFollow);
1966            let child = root
1967                .lookup_child(locked, &current_task, &mut context, "symlink".into())
1968                .expect("lookup_child failed");
1969
1970            match child.readlink(locked, &current_task).expect("readlink failed") {
1971                SymlinkTarget::Path(path) => assert_eq!(path, LINK_TARGET),
1972                SymlinkTarget::Node(_) => panic!("readlink returned SymlinkTarget::Node"),
1973            }
1974            // Ensure the size stat reports matches what is expected.
1975            let stat_result = child.entry.node.stat(locked, &current_task).expect("stat failed");
1976            assert_eq!(stat_result.st_size as usize, LINK_SIZE);
1977        })
1978        .await;
1979
1980        // Simulate a second run to ensure the symlink was persisted correctly.
1981        let fixture = TestFixture::open(
1982            fixture.close().await,
1983            TestFixtureOptions { format: false, ..Default::default() },
1984        )
1985        .await;
1986        let (server, client) = zx::Channel::create();
1987        fixture.root().clone(server.into()).expect("clone failed after remount");
1988
1989        spawn_kernel_and_run(async move |locked, current_task| {
1990            let kernel = current_task.kernel();
1991            let fs = RemoteFs::new_fs(
1992                locked,
1993                &kernel,
1994                client,
1995                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
1996                fio::PERM_READABLE | fio::PERM_WRITABLE,
1997            )
1998            .expect("new_fs failed after remount");
1999            let ns = Namespace::new(fs);
2000            let root = ns.root();
2001            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2002            let child = root
2003                .lookup_child(locked, &current_task, &mut context, "symlink".into())
2004                .expect("lookup_child failed after remount");
2005
2006            match child.readlink(locked, &current_task).expect("readlink failed after remount") {
2007                SymlinkTarget::Path(path) => assert_eq!(path, LINK_TARGET),
2008                SymlinkTarget::Node(_) => {
2009                    panic!("readlink returned SymlinkTarget::Node after remount")
2010                }
2011            }
2012            // Ensure the size stat reports matches what is expected.
2013            let stat_result =
2014                child.entry.node.stat(locked, &current_task).expect("stat failed after remount");
2015            assert_eq!(stat_result.st_size as usize, LINK_SIZE);
2016        })
2017        .await;
2018
2019        fixture.close().await;
2020    }
2021
2022    #[::fuchsia::test]
2023    async fn test_mode_uid_gid_and_dev_persists() {
2024        const FILE_MODE: FileMode = mode!(IFREG, 0o467);
2025        const DIR_MODE: FileMode = mode!(IFDIR, 0o647);
2026        const BLK_MODE: FileMode = mode!(IFBLK, 0o746);
2027
2028        let fixture = TestFixture::new().await;
2029        let (server, client) = zx::Channel::create();
2030        fixture.root().clone(server.into()).expect("clone failed");
2031
2032        // Simulate a first run of starnix.
2033        spawn_kernel_and_run(async move |locked, current_task| {
2034            let kernel = current_task.kernel();
2035            current_task.set_creds(Credentials {
2036                euid: 1,
2037                fsuid: 1,
2038                egid: 2,
2039                fsgid: 2,
2040                ..current_task.current_creds()
2041            });
2042            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2043            let fs = RemoteFs::new_fs(
2044                locked,
2045                &kernel,
2046                client,
2047                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2048                rights,
2049            )
2050            .expect("new_fs failed");
2051            let ns = Namespace::new(fs);
2052            current_task.fs().set_umask(FileMode::from_bits(0));
2053            ns.root()
2054                .create_node(locked, &current_task, "file".into(), FILE_MODE, DeviceType::NONE)
2055                .expect("create_node failed");
2056            ns.root()
2057                .create_node(locked, &current_task, "dir".into(), DIR_MODE, DeviceType::NONE)
2058                .expect("create_node failed");
2059            ns.root()
2060                .create_node(locked, &current_task, "dev".into(), BLK_MODE, DeviceType::RANDOM)
2061                .expect("create_node failed");
2062        })
2063        .await;
2064
2065        // Simulate a second run.
2066        let fixture = TestFixture::open(
2067            fixture.close().await,
2068            TestFixtureOptions { format: false, ..Default::default() },
2069        )
2070        .await;
2071
2072        let (server, client) = zx::Channel::create();
2073        fixture.root().clone(server.into()).expect("clone failed");
2074
2075        spawn_kernel_and_run(async move |locked, current_task| {
2076            let kernel = current_task.kernel();
2077            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2078            let fs = RemoteFs::new_fs(
2079                locked,
2080                &kernel,
2081                client,
2082                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2083                rights,
2084            )
2085            .expect("new_fs failed");
2086            let ns = Namespace::new(fs);
2087            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2088            let child = ns
2089                .root()
2090                .lookup_child(locked, &current_task, &mut context, "file".into())
2091                .expect("lookup_child failed");
2092            assert_matches!(
2093                &*child.entry.node.info(),
2094                FsNodeInfo { mode: FILE_MODE, uid: 1, gid: 2, rdev: DeviceType::NONE, .. }
2095            );
2096            let child = ns
2097                .root()
2098                .lookup_child(locked, &current_task, &mut context, "dir".into())
2099                .expect("lookup_child failed");
2100            assert_matches!(
2101                &*child.entry.node.info(),
2102                FsNodeInfo { mode: DIR_MODE, uid: 1, gid: 2, rdev: DeviceType::NONE, .. }
2103            );
2104            let child = ns
2105                .root()
2106                .lookup_child(locked, &current_task, &mut context, "dev".into())
2107                .expect("lookup_child failed");
2108            assert_matches!(
2109                &*child.entry.node.info(),
2110                FsNodeInfo { mode: BLK_MODE, uid: 1, gid: 2, rdev: DeviceType::RANDOM, .. }
2111            );
2112        })
2113        .await;
2114        fixture.close().await;
2115    }
2116
2117    #[::fuchsia::test]
2118    async fn test_dot_dot_inode_numbers() {
2119        let fixture = TestFixture::new().await;
2120        let (server, client) = zx::Channel::create();
2121        fixture.root().clone(server.into()).expect("clone failed");
2122
2123        const MODE: FileMode = FileMode::from_bits(FileMode::IFDIR.bits() | 0o777);
2124
2125        spawn_kernel_and_run(async |locked, current_task| {
2126            let kernel = current_task.kernel();
2127            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2128            let fs = RemoteFs::new_fs(
2129                locked,
2130                &kernel,
2131                client,
2132                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2133                rights,
2134            )
2135            .expect("new_fs failed");
2136            let ns = Namespace::new(fs);
2137            current_task.fs().set_umask(FileMode::from_bits(0));
2138            let sub_dir1 = ns
2139                .root()
2140                .create_node(locked, &current_task, "dir".into(), MODE, DeviceType::NONE)
2141                .expect("create_node failed");
2142            let sub_dir2 = sub_dir1
2143                .create_node(locked, &current_task, "dir".into(), MODE, DeviceType::NONE)
2144                .expect("create_node failed");
2145
2146            let dir_handle = ns
2147                .root()
2148                .entry
2149                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2150                .expect("open failed");
2151
2152            #[derive(Default)]
2153            struct Sink {
2154                offset: off_t,
2155                dot_dot_inode_num: u64,
2156            }
2157            impl DirentSink for Sink {
2158                fn add(
2159                    &mut self,
2160                    inode_num: ino_t,
2161                    offset: off_t,
2162                    entry_type: DirectoryEntryType,
2163                    name: &FsStr,
2164                ) -> Result<(), Errno> {
2165                    if name == ".." {
2166                        self.dot_dot_inode_num = inode_num;
2167                        assert_eq!(entry_type, DirectoryEntryType::DIR);
2168                    }
2169                    self.offset = offset;
2170                    Ok(())
2171                }
2172                fn offset(&self) -> off_t {
2173                    self.offset
2174                }
2175            }
2176            let mut sink = Sink::default();
2177            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2178
2179            // inode_num for .. for the root should be the same as root.
2180            assert_eq!(sink.dot_dot_inode_num, ns.root().entry.node.ino);
2181
2182            let dir_handle = sub_dir1
2183                .entry
2184                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2185                .expect("open failed");
2186            let mut sink = Sink::default();
2187            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2188
2189            // inode_num for .. for the first sub directory should be the same as root.
2190            assert_eq!(sink.dot_dot_inode_num, ns.root().entry.node.ino);
2191
2192            let dir_handle = sub_dir2
2193                .entry
2194                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2195                .expect("open failed");
2196            let mut sink = Sink::default();
2197            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2198
2199            // inode_num for .. for the second subdir should be the first subdir.
2200            assert_eq!(sink.dot_dot_inode_num, sub_dir1.entry.node.ino);
2201        })
2202        .await;
2203        fixture.close().await;
2204    }
2205
2206    #[::fuchsia::test]
2207    async fn test_remote_special_node() {
2208        let fixture = TestFixture::new().await;
2209        let (server, client) = zx::Channel::create();
2210        fixture.root().clone(server.into()).expect("clone failed");
2211
2212        const FIFO_MODE: FileMode = FileMode::from_bits(FileMode::IFIFO.bits() | 0o777);
2213        const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2214
2215        spawn_kernel_and_run(async |locked, current_task| {
2216            let kernel = current_task.kernel();
2217            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2218            let fs = RemoteFs::new_fs(
2219                locked,
2220                &kernel,
2221                client,
2222                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2223                rights,
2224            )
2225            .expect("new_fs failed");
2226            let ns = Namespace::new(fs);
2227            current_task.fs().set_umask(FileMode::from_bits(0));
2228            let root = ns.root();
2229
2230            // Create RemoteSpecialNode (e.g. FIFO)
2231            root.create_node(locked, &current_task, "fifo".into(), FIFO_MODE, DeviceType::NONE)
2232                .expect("create_node failed");
2233            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2234            let fifo_node = root
2235                .lookup_child(locked, &current_task, &mut context, "fifo".into())
2236                .expect("lookup_child failed");
2237
2238            // Test that we get expected behaviour for RemoteSpecialNode operation, e.g.
2239            // test that truncate should return EINVAL
2240            match fifo_node.truncate(locked, &current_task, 0) {
2241                Ok(_) => {
2242                    panic!("truncate passed for special node")
2243                }
2244                Err(errno) if errno == EINVAL => {}
2245                Err(e) => {
2246                    panic!("truncate failed with error {:?}", e)
2247                }
2248            };
2249
2250            // Create regular RemoteNode
2251            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2252                .expect("create_node failed");
2253            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2254            let reg_node = root
2255                .lookup_child(locked, &current_task, &mut context, "file".into())
2256                .expect("lookup_child failed");
2257
2258            // We should be able to perform truncate on regular files
2259            reg_node.truncate(locked, &current_task, 0).expect("truncate failed");
2260        })
2261        .await;
2262        fixture.close().await;
2263    }
2264
2265    #[::fuchsia::test]
2266    async fn test_hard_link() {
2267        let fixture = TestFixture::new().await;
2268        let (server, client) = zx::Channel::create();
2269        fixture.root().clone(server.into()).expect("clone failed");
2270
2271        spawn_kernel_and_run(async move |locked, current_task| {
2272            let kernel = current_task.kernel();
2273            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2274            let fs = RemoteFs::new_fs(
2275                locked,
2276                &kernel,
2277                client,
2278                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2279                rights,
2280            )
2281            .expect("new_fs failed");
2282            let ns = Namespace::new(fs);
2283            current_task.fs().set_umask(FileMode::from_bits(0));
2284            let node = ns
2285                .root()
2286                .create_node(
2287                    locked,
2288                    &current_task,
2289                    "file1".into(),
2290                    mode!(IFREG, 0o666),
2291                    DeviceType::NONE,
2292                )
2293                .expect("create_node failed");
2294            ns.root()
2295                .entry
2296                .node
2297                .link(locked, &current_task, &ns.root().mount, "file2".into(), &node.entry.node)
2298                .expect("link failed");
2299        })
2300        .await;
2301
2302        let fixture = TestFixture::open(
2303            fixture.close().await,
2304            TestFixtureOptions { format: false, ..Default::default() },
2305        )
2306        .await;
2307
2308        let (server, client) = zx::Channel::create();
2309        fixture.root().clone(server.into()).expect("clone failed");
2310
2311        spawn_kernel_and_run(async move |locked, current_task| {
2312            let kernel = current_task.kernel();
2313            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2314            let fs = RemoteFs::new_fs(
2315                locked,
2316                &kernel,
2317                client,
2318                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2319                rights,
2320            )
2321            .expect("new_fs failed");
2322            let ns = Namespace::new(fs);
2323            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2324            let child1 = ns
2325                .root()
2326                .lookup_child(locked, &current_task, &mut context, "file1".into())
2327                .expect("lookup_child failed");
2328            let child2 = ns
2329                .root()
2330                .lookup_child(locked, &current_task, &mut context, "file2".into())
2331                .expect("lookup_child failed");
2332            assert!(Arc::ptr_eq(&child1.entry.node, &child2.entry.node));
2333        })
2334        .await;
2335        fixture.close().await;
2336    }
2337
2338    #[::fuchsia::test]
2339    async fn test_lookup_on_fsverity_enabled_file() {
2340        let fixture = TestFixture::new().await;
2341        let (server, client) = zx::Channel::create();
2342        fixture.root().clone(server.into()).expect("clone failed");
2343
2344        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2345
2346        spawn_kernel_and_run(async move |locked, current_task| {
2347            let kernel = current_task.kernel();
2348            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2349            let fs = RemoteFs::new_fs(
2350                locked,
2351                &kernel,
2352                client,
2353                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2354                rights,
2355            )
2356            .expect("new_fs failed");
2357            let ns = Namespace::new(fs);
2358            current_task.fs().set_umask(FileMode::from_bits(0));
2359            let file = ns
2360                .root()
2361                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2362                .expect("create_node failed");
2363            // Enable verity on the file.
2364            let desc = fsverity_descriptor {
2365                version: 1,
2366                hash_algorithm: 1,
2367                salt_size: 32,
2368                log_blocksize: 12,
2369                ..Default::default()
2370            };
2371            file.entry.node.ops().enable_fsverity(&desc).expect("enable fsverity failed");
2372        })
2373        .await;
2374
2375        // Tear down the kernel and open the file again. The file should no longer be cached.
2376        // Test that lookup works as expected for an fsverity-enabled file.
2377        let fixture = TestFixture::open(
2378            fixture.close().await,
2379            TestFixtureOptions { format: false, ..Default::default() },
2380        )
2381        .await;
2382        let (server, client) = zx::Channel::create();
2383        fixture.root().clone(server.into()).expect("clone failed");
2384
2385        spawn_kernel_and_run(async move |locked, current_task| {
2386            let kernel = current_task.kernel();
2387            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2388            let fs = RemoteFs::new_fs(
2389                locked,
2390                &kernel,
2391                client,
2392                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2393                rights,
2394            )
2395            .expect("new_fs failed");
2396            let ns = Namespace::new(fs);
2397            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2398            let _child = ns
2399                .root()
2400                .lookup_child(locked, &current_task, &mut context, "file".into())
2401                .expect("lookup_child failed");
2402        })
2403        .await;
2404        fixture.close().await;
2405    }
2406
2407    #[::fuchsia::test]
2408    async fn test_update_attributes_persists() {
2409        let fixture = TestFixture::new().await;
2410        let (server, client) = zx::Channel::create();
2411        fixture.root().clone(server.into()).expect("clone failed");
2412
2413        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2414
2415        spawn_kernel_and_run(async move |locked, current_task| {
2416            let kernel = current_task.kernel();
2417            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2418            let fs = RemoteFs::new_fs(
2419                locked,
2420                &kernel,
2421                client,
2422                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2423                rights,
2424            )
2425            .expect("new_fs failed");
2426            let ns = Namespace::new(fs);
2427            current_task.fs().set_umask(FileMode::from_bits(0));
2428            let file = ns
2429                .root()
2430                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2431                .expect("create_node failed");
2432            // Change the mode, this change should persist
2433            file.entry
2434                .node
2435                .chmod(locked, &current_task, &file.mount, MODE | FileMode::ALLOW_ALL)
2436                .expect("chmod failed");
2437        })
2438        .await;
2439
2440        // Tear down the kernel and open the file again. Check that changes persisted.
2441        let fixture = TestFixture::open(
2442            fixture.close().await,
2443            TestFixtureOptions { format: false, ..Default::default() },
2444        )
2445        .await;
2446        let (server, client) = zx::Channel::create();
2447        fixture.root().clone(server.into()).expect("clone failed");
2448
2449        spawn_kernel_and_run(async move |locked, current_task| {
2450            let kernel = current_task.kernel();
2451            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2452            let fs = RemoteFs::new_fs(
2453                locked,
2454                &kernel,
2455                client,
2456                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2457                rights,
2458            )
2459            .expect("new_fs failed");
2460            let ns = Namespace::new(fs);
2461            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2462            let child = ns
2463                .root()
2464                .lookup_child(locked, &current_task, &mut context, "file".into())
2465                .expect("lookup_child failed");
2466            assert_eq!(child.entry.node.info().mode, MODE | FileMode::ALLOW_ALL);
2467        })
2468        .await;
2469        fixture.close().await;
2470    }
2471
2472    #[::fuchsia::test]
2473    async fn test_statfs() {
2474        let fixture = TestFixture::new().await;
2475        let (server, client) = zx::Channel::create();
2476        fixture.root().clone(server.into()).expect("clone failed");
2477
2478        spawn_kernel_and_run(async move |locked, current_task| {
2479            let kernel = current_task.kernel();
2480            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2481            let fs = RemoteFs::new_fs(
2482                locked,
2483                &kernel,
2484                client,
2485                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2486                rights,
2487            )
2488            .expect("new_fs failed");
2489
2490            let statfs = fs.statfs(locked, &current_task).expect("statfs failed");
2491            assert!(statfs.f_type != 0);
2492            assert!(statfs.f_bsize > 0);
2493            assert!(statfs.f_blocks > 0);
2494            assert!(statfs.f_bfree > 0 && statfs.f_bfree <= statfs.f_blocks);
2495            assert!(statfs.f_files > 0);
2496            assert!(statfs.f_ffree > 0 && statfs.f_ffree <= statfs.f_files);
2497            assert!(statfs.f_fsid.val[0] != 0 || statfs.f_fsid.val[1] != 0);
2498            assert!(statfs.f_namelen > 0);
2499            assert!(statfs.f_frsize > 0);
2500        })
2501        .await;
2502
2503        fixture.close().await;
2504    }
2505
2506    #[::fuchsia::test]
2507    async fn test_allocate() {
2508        let fixture = TestFixture::new().await;
2509        let (server, client) = zx::Channel::create();
2510        fixture.root().clone(server.into()).expect("clone failed");
2511
2512        spawn_kernel_and_run(async move |locked, current_task| {
2513            let kernel = current_task.kernel();
2514            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2515            let fs = RemoteFs::new_fs(
2516                locked,
2517                &kernel,
2518                client,
2519                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2520                rights,
2521            )
2522            .expect("new_fs failed");
2523            let ns = Namespace::new(fs);
2524            current_task.fs().set_umask(FileMode::from_bits(0));
2525            let root = ns.root();
2526
2527            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2528            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2529                .expect("create_node failed");
2530            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2531            let reg_node = root
2532                .lookup_child(locked, &current_task, &mut context, "file".into())
2533                .expect("lookup_child failed");
2534
2535            reg_node
2536                .entry
2537                .node
2538                .fallocate(locked, &current_task, FallocMode::Allocate { keep_size: false }, 0, 20)
2539                .expect("truncate failed");
2540        })
2541        .await;
2542        fixture.close().await;
2543    }
2544
2545    #[::fuchsia::test]
2546    async fn test_allocate_overflow() {
2547        let fixture = TestFixture::new().await;
2548        let (server, client) = zx::Channel::create();
2549        fixture.root().clone(server.into()).expect("clone failed");
2550
2551        spawn_kernel_and_run(async move |locked, current_task| {
2552            let kernel = current_task.kernel();
2553            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2554            let fs = RemoteFs::new_fs(
2555                locked,
2556                &kernel,
2557                client,
2558                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2559                rights,
2560            )
2561            .expect("new_fs failed");
2562            let ns = Namespace::new(fs);
2563            current_task.fs().set_umask(FileMode::from_bits(0));
2564            let root = ns.root();
2565
2566            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2567            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2568                .expect("create_node failed");
2569            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2570            let reg_node = root
2571                .lookup_child(locked, &current_task, &mut context, "file".into())
2572                .expect("lookup_child failed");
2573
2574            reg_node
2575                .entry
2576                .node
2577                .fallocate(
2578                    locked,
2579                    &current_task,
2580                    FallocMode::Allocate { keep_size: false },
2581                    1,
2582                    u64::MAX,
2583                )
2584                .expect_err("truncate unexpectedly passed");
2585        })
2586        .await;
2587        fixture.close().await;
2588    }
2589
2590    #[::fuchsia::test]
2591    async fn test_time_modify_persists() {
2592        let fixture = TestFixture::new().await;
2593        let (server, client) = zx::Channel::create();
2594        fixture.root().clone(server.into()).expect("clone failed");
2595
2596        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2597
2598        let last_modified = spawn_kernel_and_run(async move |locked, current_task| {
2599            let kernel = current_task.kernel();
2600            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2601            let fs = RemoteFs::new_fs(
2602                locked,
2603                &kernel,
2604                client,
2605                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2606                rights,
2607            )
2608            .expect("new_fs failed");
2609            let ns: Arc<Namespace> = Namespace::new(fs);
2610            current_task.fs().set_umask(FileMode::from_bits(0));
2611            let child = ns
2612                .root()
2613                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2614                .expect("create_node failed");
2615            // Write to file (this should update mtime (time_modify))
2616            let file = child
2617                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2618                .expect("open failed");
2619            // Call `fetch_and_refresh_info(..)` to refresh `time_modify` with the time managed by the
2620            // underlying filesystem
2621            let time_before_write = child
2622                .entry
2623                .node
2624                .fetch_and_refresh_info(locked, &current_task)
2625                .expect("fetch_and_refresh_info failed")
2626                .time_modify;
2627            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
2628            let written = file
2629                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
2630                .expect("write failed");
2631            assert_eq!(written, write_bytes.len());
2632            let last_modified = child
2633                .entry
2634                .node
2635                .fetch_and_refresh_info(locked, &current_task)
2636                .expect("fetch_and_refresh_info failed")
2637                .time_modify;
2638            assert!(last_modified > time_before_write);
2639            last_modified
2640        })
2641        .await;
2642
2643        // Tear down the kernel and open the file again. Check that modification time is when we
2644        // last modified the contents of the file
2645        let fixture = TestFixture::open(
2646            fixture.close().await,
2647            TestFixtureOptions { format: false, ..Default::default() },
2648        )
2649        .await;
2650        let (server, client) = zx::Channel::create();
2651        fixture.root().clone(server.into()).expect("clone failed");
2652        let refreshed_modified_time = spawn_kernel_and_run(async move |locked, current_task| {
2653            let kernel = current_task.kernel();
2654            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2655            let fs = RemoteFs::new_fs(
2656                locked,
2657                &kernel,
2658                client,
2659                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2660                rights,
2661            )
2662            .expect("new_fs failed");
2663            let ns = Namespace::new(fs);
2664            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2665            let child = ns
2666                .root()
2667                .lookup_child(locked, &current_task, &mut context, "file".into())
2668                .expect("lookup_child failed");
2669            let last_modified = child
2670                .entry
2671                .node
2672                .fetch_and_refresh_info(locked, &current_task)
2673                .expect("fetch_and_refresh_info failed")
2674                .time_modify;
2675            last_modified
2676        })
2677        .await;
2678        assert_eq!(last_modified, refreshed_modified_time);
2679
2680        fixture.close().await;
2681    }
2682
2683    #[::fuchsia::test]
2684    async fn test_update_atime_mtime() {
2685        let fixture = TestFixture::new().await;
2686        let (server, client) = zx::Channel::create();
2687        fixture.root().clone(server.into()).expect("clone failed");
2688
2689        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2690
2691        spawn_kernel_and_run(async move |locked, current_task| {
2692            let kernel = current_task.kernel();
2693            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2694            let fs = RemoteFs::new_fs(
2695                locked,
2696                &kernel,
2697                client,
2698                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2699                rights,
2700            )
2701            .expect("new_fs failed");
2702            let ns: Arc<Namespace> = Namespace::new(fs);
2703            current_task.fs().set_umask(FileMode::from_bits(0));
2704            let child = ns
2705                .root()
2706                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2707                .expect("create_node failed");
2708
2709            let info_original = child
2710                .entry
2711                .node
2712                .fetch_and_refresh_info(locked, &current_task)
2713                .expect("fetch_and_refresh_info failed")
2714                .clone();
2715
2716            child
2717                .entry
2718                .node
2719                .update_atime_mtime(
2720                    locked,
2721                    &current_task,
2722                    &child.mount,
2723                    TimeUpdateType::Time(UtcInstant::from_nanos(30)),
2724                    TimeUpdateType::Omit,
2725                )
2726                .expect("update_atime_mtime failed");
2727            let info_after_update = child
2728                .entry
2729                .node
2730                .fetch_and_refresh_info(locked, &current_task)
2731                .expect("fetch_and_refresh_info failed")
2732                .clone();
2733            assert_eq!(info_after_update.time_modify, info_original.time_modify);
2734            assert_eq!(info_after_update.time_access, UtcInstant::from_nanos(30));
2735
2736            child
2737                .entry
2738                .node
2739                .update_atime_mtime(
2740                    locked,
2741                    &current_task,
2742                    &child.mount,
2743                    TimeUpdateType::Omit,
2744                    TimeUpdateType::Time(UtcInstant::from_nanos(50)),
2745                )
2746                .expect("update_atime_mtime failed");
2747            let info_after_update2 = child
2748                .entry
2749                .node
2750                .fetch_and_refresh_info(locked, &current_task)
2751                .expect("fetch_and_refresh_info failed")
2752                .clone();
2753            assert_eq!(info_after_update2.time_modify, UtcInstant::from_nanos(50));
2754            assert_eq!(info_after_update2.time_access, UtcInstant::from_nanos(30));
2755        })
2756        .await;
2757        fixture.close().await;
2758    }
2759
2760    #[::fuchsia::test]
2761    async fn test_write_updates_mtime_ctime() {
2762        let fixture = TestFixture::new().await;
2763        let (server, client) = zx::Channel::create();
2764        fixture.root().clone(server.into()).expect("clone failed");
2765
2766        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2767
2768        spawn_kernel_and_run(async move |locked, current_task| {
2769            let kernel = current_task.kernel();
2770            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2771            let fs = RemoteFs::new_fs(
2772                locked,
2773                &kernel,
2774                client,
2775                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2776                rights,
2777            )
2778            .expect("new_fs failed");
2779            let ns: Arc<Namespace> = Namespace::new(fs);
2780            current_task.fs().set_umask(FileMode::from_bits(0));
2781            let child = ns
2782                .root()
2783                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2784                .expect("create_node failed");
2785            let file = child
2786                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2787                .expect("open failed");
2788            // Call `fetch_and_refresh_info(..)` to refresh ctime and mtime with the time managed by the
2789            // underlying filesystem
2790            let (ctime_before_write, mtime_before_write) = {
2791                let info = child
2792                    .entry
2793                    .node
2794                    .fetch_and_refresh_info(locked, &current_task)
2795                    .expect("fetch_and_refresh_info failed");
2796                (info.time_status_change, info.time_modify)
2797            };
2798
2799            // Writing to a file should update ctime and mtime
2800            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
2801            let written = file
2802                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
2803                .expect("write failed");
2804            assert_eq!(written, write_bytes.len());
2805
2806            // As Fxfs, the underlying filesystem in this test, can manage file timestamps,
2807            // we should not see an update in mtime and ctime without first refreshing the node with
2808            // the metadata from Fxfs.
2809            let (ctime_after_write_no_refresh, mtime_after_write_no_refresh) = {
2810                let info = child.entry.node.info();
2811                (info.time_status_change, info.time_modify)
2812            };
2813            assert_eq!(ctime_after_write_no_refresh, ctime_before_write);
2814            assert_eq!(mtime_after_write_no_refresh, mtime_before_write);
2815
2816            // Refresh information, we should see `info` with mtime and ctime from the remote
2817            // filesystem (assume this is true if the new timestamp values are greater than the ones
2818            // without the refresh).
2819            let (ctime_after_write_refresh, mtime_after_write_refresh) = {
2820                let info = child
2821                    .entry
2822                    .node
2823                    .fetch_and_refresh_info(locked, &current_task)
2824                    .expect("fetch_and_refresh_info failed");
2825                (info.time_status_change, info.time_modify)
2826            };
2827            assert_eq!(ctime_after_write_refresh, mtime_after_write_refresh);
2828            assert!(ctime_after_write_refresh > ctime_after_write_no_refresh);
2829        })
2830        .await;
2831        fixture.close().await;
2832    }
2833
2834    #[::fuchsia::test]
2835    async fn test_casefold_persists() {
2836        let fixture = TestFixture::new().await;
2837        let (server, client) = zx::Channel::create();
2838        fixture.root().clone(server.into()).expect("clone failed");
2839
2840        spawn_kernel_and_run(async move |locked, current_task| {
2841            let kernel = current_task.kernel();
2842            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2843            let fs = RemoteFs::new_fs(
2844                locked,
2845                &kernel,
2846                client,
2847                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2848                rights,
2849            )
2850            .expect("new_fs failed");
2851            let ns: Arc<Namespace> = Namespace::new(fs);
2852            let child = ns
2853                .root()
2854                .create_node(
2855                    locked,
2856                    &current_task,
2857                    "dir".into(),
2858                    FileMode::ALLOW_ALL.with_type(FileMode::IFDIR),
2859                    DeviceType::NONE,
2860                )
2861                .expect("create_node failed");
2862            child
2863                .entry
2864                .node
2865                .update_attributes(locked, &current_task, |info| {
2866                    info.casefold = true;
2867                    Ok(())
2868                })
2869                .expect("enable casefold")
2870        })
2871        .await;
2872
2873        // Tear down the kernel and open the dir again. Check that casefold is preserved.
2874        let fixture = TestFixture::open(
2875            fixture.close().await,
2876            TestFixtureOptions { format: false, ..Default::default() },
2877        )
2878        .await;
2879        let (server, client) = zx::Channel::create();
2880        fixture.root().clone(server.into()).expect("clone failed");
2881        let casefold = spawn_kernel_and_run(async move |locked, current_task| {
2882            let kernel = current_task.kernel();
2883            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2884            let fs = RemoteFs::new_fs(
2885                locked,
2886                &kernel,
2887                client,
2888                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2889                rights,
2890            )
2891            .expect("new_fs failed");
2892            let ns = Namespace::new(fs);
2893            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2894            let child = ns
2895                .root()
2896                .lookup_child(locked, &current_task, &mut context, "dir".into())
2897                .expect("lookup_child failed");
2898            let casefold = child
2899                .entry
2900                .node
2901                .fetch_and_refresh_info(locked, &current_task)
2902                .expect("fetch_and_refresh_info failed")
2903                .casefold;
2904            casefold
2905        })
2906        .await;
2907        assert!(casefold);
2908
2909        fixture.close().await;
2910    }
2911
2912    #[::fuchsia::test]
2913    async fn test_update_time_access_persists() {
2914        const TEST_FILE: &str = "test_file";
2915
2916        let fixture = TestFixture::new().await;
2917        let (server, client) = zx::Channel::create();
2918        fixture.root().clone(server.into()).expect("clone failed");
2919        // Set up file.
2920        let info_after_read = spawn_kernel_and_run(async move |locked, current_task| {
2921            let kernel = current_task.kernel();
2922            let fs = RemoteFs::new_fs(
2923                locked,
2924                &kernel,
2925                client,
2926                FileSystemOptions {
2927                    source: FlyByteStr::new(b"/"),
2928                    flags: MountFlags::RELATIME,
2929                    ..Default::default()
2930                },
2931                fio::PERM_READABLE | fio::PERM_WRITABLE,
2932            )
2933            .expect("new_fs failed");
2934            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
2935            let child = ns
2936                .root()
2937                .open_create_node(
2938                    locked,
2939                    &current_task,
2940                    TEST_FILE.into(),
2941                    FileMode::ALLOW_ALL.with_type(FileMode::IFREG),
2942                    DeviceType::NONE,
2943                    OpenFlags::empty(),
2944                )
2945                .expect("create_node failed");
2946
2947            let file_handle = child
2948                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2949                .expect("open failed");
2950
2951            // Expect atime to be updated as this is the first file access since the
2952            // last file modification or status change.
2953            file_handle
2954                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
2955                .expect("read failed");
2956
2957            // Call `fetch_and_refresh_info` to persist atime update.
2958            let info_after_read = child
2959                .entry
2960                .node
2961                .fetch_and_refresh_info(locked, &current_task)
2962                .expect("fetch_and_refresh_info failed")
2963                .clone();
2964
2965            info_after_read
2966        })
2967        .await;
2968
2969        // Tear down the kernel and open the file again. The file should no longer be cached.
2970        let fixture = TestFixture::open(
2971            fixture.close().await,
2972            TestFixtureOptions { format: false, ..Default::default() },
2973        )
2974        .await;
2975
2976        let (server, client) = zx::Channel::create();
2977        fixture.root().clone(server.into()).expect("clone failed");
2978
2979        spawn_kernel_and_run(async move |locked, current_task| {
2980            let kernel = current_task.kernel();
2981            let fs = RemoteFs::new_fs(
2982                locked,
2983                &kernel,
2984                client,
2985                FileSystemOptions {
2986                    source: FlyByteStr::new(b"/"),
2987                    flags: MountFlags::RELATIME,
2988                    ..Default::default()
2989                },
2990                fio::PERM_READABLE | fio::PERM_WRITABLE,
2991            )
2992            .expect("new_fs failed");
2993            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
2994            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2995            let child = ns
2996                .root()
2997                .lookup_child(locked, &current_task, &mut context, TEST_FILE.into())
2998                .expect("lookup_child failed");
2999
3000            // Get info - this should be refreshed with info that was persisted before
3001            // we tore down the kernel.
3002            let persisted_info = child
3003                .entry
3004                .node
3005                .fetch_and_refresh_info(locked, &current_task)
3006                .expect("fetch_and_refresh_info failed")
3007                .clone();
3008            assert_eq!(info_after_read.time_access, persisted_info.time_access);
3009        })
3010        .await;
3011        fixture.close().await;
3012    }
3013
3014    #[::fuchsia::test]
3015    async fn test_pending_access_time_updates() {
3016        const TEST_FILE: &str = "test_file";
3017
3018        let fixture = TestFixture::new().await;
3019        let (server, client) = zx::Channel::create();
3020        fixture.root().clone(server.into()).expect("clone failed");
3021
3022        spawn_kernel_and_run(async move |locked, current_task| {
3023            let kernel = current_task.kernel.clone();
3024            let fs = RemoteFs::new_fs(
3025                locked,
3026                &kernel,
3027                client,
3028                FileSystemOptions {
3029                    source: FlyByteStr::new(b"/"),
3030                    flags: MountFlags::RELATIME,
3031                    ..Default::default()
3032                },
3033                fio::PERM_READABLE | fio::PERM_WRITABLE,
3034            )
3035            .expect("new_fs failed");
3036
3037            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
3038            let child = ns
3039                .root()
3040                .open_create_node(
3041                    locked,
3042                    &current_task,
3043                    TEST_FILE.into(),
3044                    FileMode::ALLOW_ALL.with_type(FileMode::IFREG),
3045                    DeviceType::NONE,
3046                    OpenFlags::empty(),
3047                )
3048                .expect("create_node failed");
3049
3050            let file_handle = child
3051                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
3052                .expect("open failed");
3053
3054            // Expect atime to be updated as this is the first file access since the last
3055            // file modification or status change.
3056            file_handle
3057                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3058                .expect("read failed");
3059
3060            let atime_after_first_read = child
3061                .entry
3062                .node
3063                .fetch_and_refresh_info(locked, &current_task)
3064                .expect("fetch_and_refresh_info failed")
3065                .time_access;
3066
3067            // Read again (this read will not trigger a persistent atime update if
3068            // filesystem was mounted with atime)
3069            file_handle
3070                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3071                .expect("read failed");
3072
3073            let atime_after_second_read = child
3074                .entry
3075                .node
3076                .fetch_and_refresh_info(locked, &current_task)
3077                .expect("fetch_and_refresh_info failed")
3078                .time_access;
3079            assert_eq!(atime_after_first_read, atime_after_second_read);
3080
3081            // Do another operation that will update ctime and/or mtime but not atime.
3082            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
3083            let _written = file_handle
3084                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
3085                .expect("write failed");
3086
3087            // Read again (atime should be updated).
3088            file_handle
3089                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3090                .expect("read failed");
3091
3092            assert!(
3093                atime_after_second_read
3094                    < child
3095                        .entry
3096                        .node
3097                        .fetch_and_refresh_info(locked, &current_task)
3098                        .expect("fetch_and_refresh_info failed")
3099                        .time_access
3100            );
3101        })
3102        .await;
3103        fixture.close().await;
3104    }
3105}