starnix_core/fs/fuchsia/
remote.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fs::fuchsia::RemoteUnixDomainSocket;
6use crate::fs::fuchsia::remote_volume::RemoteVolume;
7use crate::fs::fuchsia::sync_file::{SyncFence, SyncFile, SyncPoint, Timeline};
8use crate::mm::memory::MemoryObject;
9use crate::mm::{ProtectionFlags, VMEX_RESOURCE};
10use crate::security;
11use crate::task::{CurrentTask, FullCredentials, Kernel};
12use crate::vfs::buffers::{InputBuffer, OutputBuffer, with_iovec_segments};
13use crate::vfs::fsverity::FsVerityState;
14use crate::vfs::socket::{Socket, SocketFile, ZxioBackedSocket};
15use crate::vfs::{
16    Anon, AppendLockGuard, CacheMode, DEFAULT_BYTES_PER_BLOCK, DirectoryEntryType, DirentSink,
17    FallocMode, FileHandle, FileObject, FileOps, FileSystem, FileSystemHandle, FileSystemOps,
18    FileSystemOptions, FsNode, FsNodeHandle, FsNodeInfo, FsNodeOps, FsStr, FsString, SeekTarget,
19    SymlinkTarget, XattrOp, XattrStorage, default_ioctl, default_seek, fileops_impl_directory,
20    fileops_impl_nonseekable, fileops_impl_noop_sync, fileops_impl_seekable, fs_node_impl_not_dir,
21    fs_node_impl_symlink, fs_node_impl_xattr_delegate,
22};
23use bstr::ByteSlice;
24use fidl::endpoints::DiscoverableProtocolMarker as _;
25use fuchsia_runtime::UtcInstant;
26use linux_uapi::SYNC_IOC_MAGIC;
27use once_cell::sync::OnceCell;
28use starnix_crypt::EncryptionKeyId;
29use starnix_logging::{CATEGORY_STARNIX_MM, impossible_error, log_warn, trace_duration};
30use starnix_sync::{
31    FileOpsCore, LockEqualOrBefore, Locked, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard,
32    Unlocked,
33};
34use starnix_syscalls::{SyscallArg, SyscallResult};
35use starnix_types::vfs::default_statfs;
36use starnix_uapi::auth::FsCred;
37use starnix_uapi::device_type::DeviceType;
38use starnix_uapi::errors::Errno;
39use starnix_uapi::file_mode::FileMode;
40use starnix_uapi::mount_flags::MountFlags;
41use starnix_uapi::open_flags::OpenFlags;
42use starnix_uapi::{
43    __kernel_fsid_t, errno, error, from_status_like_fdio, fsverity_descriptor, ino_t, off_t, statfs,
44};
45use std::mem::MaybeUninit;
46use std::sync::Arc;
47use syncio::zxio::{
48    ZXIO_NODE_PROTOCOL_DIRECTORY, ZXIO_NODE_PROTOCOL_FILE, ZXIO_NODE_PROTOCOL_SYMLINK,
49    ZXIO_OBJECT_TYPE_DATAGRAM_SOCKET, ZXIO_OBJECT_TYPE_DIR, ZXIO_OBJECT_TYPE_FILE,
50    ZXIO_OBJECT_TYPE_NONE, ZXIO_OBJECT_TYPE_PACKET_SOCKET, ZXIO_OBJECT_TYPE_RAW_SOCKET,
51    ZXIO_OBJECT_TYPE_STREAM_SOCKET, ZXIO_OBJECT_TYPE_SYNCHRONOUS_DATAGRAM_SOCKET, zxio_node_attr,
52};
53use syncio::{
54    AllocateMode, DirentIterator, SelinuxContextAttr, XattrSetMode, ZXIO_ROOT_HASH_LENGTH, Zxio,
55    ZxioDirent, ZxioOpenOptions, zxio_fsverity_descriptor_t, zxio_node_attr_has_t,
56    zxio_node_attributes_t,
57};
58use zx::{Counter, HandleBased};
59use {
60    fidl_fuchsia_io as fio, fidl_fuchsia_starnix_binder as fbinder,
61    fidl_fuchsia_unknown as funknown,
62};
63
64pub fn new_remote_fs(
65    locked: &mut Locked<Unlocked>,
66    current_task: &CurrentTask,
67    options: FileSystemOptions,
68) -> Result<FileSystemHandle, Errno> {
69    let kernel = current_task.kernel();
70    let requested_path = std::str::from_utf8(&options.source)
71        .map_err(|_| errno!(EINVAL, "source path is not utf8"))?;
72    let mut create_flags =
73        fio::PERM_READABLE | fio::Flags::FLAG_MAYBE_CREATE | fio::Flags::PROTOCOL_DIRECTORY;
74    if !options.flags.contains(MountFlags::RDONLY) {
75        create_flags |= fio::PERM_WRITABLE;
76    }
77    let (root_proxy, subdir) = kernel.open_ns_dir(requested_path, create_flags)?;
78
79    let subdir = if subdir.is_empty() { ".".to_string() } else { subdir };
80    let mut open_rights = fio::PERM_READABLE;
81    if !options.flags.contains(MountFlags::RDONLY) {
82        open_rights |= fio::PERM_WRITABLE;
83    }
84    let mut subdir_options = options;
85    subdir_options.source = subdir.into();
86    create_remotefs_filesystem(locked, kernel, &root_proxy, subdir_options, open_rights)
87}
88
89/// Create a filesystem to access the content of the fuchsia directory available at `fs_src` inside
90/// `pkg`.
91pub fn create_remotefs_filesystem<L>(
92    locked: &mut Locked<L>,
93    kernel: &Kernel,
94    root: &fio::DirectorySynchronousProxy,
95    options: FileSystemOptions,
96    rights: fio::Flags,
97) -> Result<FileSystemHandle, Errno>
98where
99    L: LockEqualOrBefore<FileOpsCore>,
100{
101    let root = syncio::directory_open_directory_async(
102        root,
103        std::str::from_utf8(&options.source)
104            .map_err(|_| errno!(EINVAL, "source path is not utf8"))?,
105        rights,
106    )
107    .map_err(|e| errno!(EIO, format!("Failed to open root: {e}")))?;
108    RemoteFs::new_fs(locked, kernel, root.into_channel(), options, rights)
109}
110
111pub struct RemoteFs {
112    // If true, trust the remote file system's IDs (which requires that the remote file system does
113    // not span mounts).  This must be true to properly support hard links.  If this is false, the
114    // same node can end up having different IDs as it leaves and reenters the node cache.
115    // TODO(https://fxbug.dev/42081972): At the time of writing, package directories do not have
116    // unique IDs so this *must* be false in that case.
117    use_remote_ids: bool,
118
119    root_proxy: fio::DirectorySynchronousProxy,
120}
121
122impl RemoteFs {
123    /// Returns a reference to a RemoteFs given a reference to a FileSystem.
124    ///
125    /// # Panics
126    ///
127    /// This will panic if `fs`'s ops aren't `RemoteFs`, so this should only be called when this is
128    /// known to be the case.
129    fn from_fs(fs: &FileSystem) -> &RemoteFs {
130        if let Some(remote_vol) = fs.downcast_ops::<RemoteVolume>() {
131            remote_vol.remotefs()
132        } else {
133            fs.downcast_ops::<RemoteFs>().unwrap()
134        }
135    }
136}
137
138const REMOTE_FS_MAGIC: u32 = u32::from_be_bytes(*b"f.io");
139const SYNC_IOC_FILE_INFO: u8 = 4;
140const SYNC_IOC_MERGE: u8 = 3;
141
142impl FileSystemOps for RemoteFs {
143    fn statfs(
144        &self,
145        _locked: &mut Locked<FileOpsCore>,
146        _fs: &FileSystem,
147        _current_task: &CurrentTask,
148    ) -> Result<statfs, Errno> {
149        let (status, info) = self
150            .root_proxy
151            .query_filesystem(zx::MonotonicInstant::INFINITE)
152            .map_err(|_| errno!(EIO))?;
153        // Not all remote filesystems support `QueryFilesystem`, many return ZX_ERR_NOT_SUPPORTED.
154        if status == 0 {
155            if let Some(info) = info {
156                let (total_blocks, free_blocks) = if info.block_size > 0 {
157                    (
158                        (info.total_bytes / u64::from(info.block_size))
159                            .try_into()
160                            .unwrap_or(i64::MAX),
161                        ((info.total_bytes.saturating_sub(info.used_bytes))
162                            / u64::from(info.block_size))
163                        .try_into()
164                        .unwrap_or(i64::MAX),
165                    )
166                } else {
167                    (0, 0)
168                };
169
170                let fsid = __kernel_fsid_t {
171                    val: [
172                        (info.fs_id & 0xffffffff) as i32,
173                        ((info.fs_id >> 32) & 0xffffffff) as i32,
174                    ],
175                };
176
177                return Ok(statfs {
178                    f_type: info.fs_type as i64,
179                    f_bsize: info.block_size.into(),
180                    f_blocks: total_blocks,
181                    f_bfree: free_blocks,
182                    f_bavail: free_blocks,
183                    f_files: info.total_nodes.try_into().unwrap_or(i64::MAX),
184                    f_ffree: (info.total_nodes.saturating_sub(info.used_nodes))
185                        .try_into()
186                        .unwrap_or(i64::MAX),
187                    f_fsid: fsid,
188                    f_namelen: info.max_filename_size.try_into().unwrap_or(0),
189                    f_frsize: info.block_size.into(),
190                    ..statfs::default()
191                });
192            }
193        }
194        Ok(default_statfs(REMOTE_FS_MAGIC))
195    }
196
197    fn name(&self) -> &'static FsStr {
198        "remotefs".into()
199    }
200
201    fn uses_external_node_ids(&self) -> bool {
202        self.use_remote_ids
203    }
204
205    fn rename(
206        &self,
207        _locked: &mut Locked<FileOpsCore>,
208        _fs: &FileSystem,
209        current_task: &CurrentTask,
210        old_parent: &FsNodeHandle,
211        old_name: &FsStr,
212        new_parent: &FsNodeHandle,
213        new_name: &FsStr,
214        _renamed: &FsNodeHandle,
215        _replaced: Option<&FsNodeHandle>,
216    ) -> Result<(), Errno> {
217        // Renames should fail if the src or target directory is encrypted and locked.
218        old_parent.fail_if_locked(current_task)?;
219        new_parent.fail_if_locked(current_task)?;
220
221        let Some(old_parent) = old_parent.downcast_ops::<RemoteNode>() else {
222            return error!(EXDEV);
223        };
224        let Some(new_parent) = new_parent.downcast_ops::<RemoteNode>() else {
225            return error!(EXDEV);
226        };
227        old_parent
228            .zxio
229            .rename(get_name_str(old_name)?, &new_parent.zxio, get_name_str(new_name)?)
230            .map_err(|status| from_status_like_fdio!(status))
231    }
232
233    fn manages_timestamps(&self) -> bool {
234        true
235    }
236}
237
238impl RemoteFs {
239    pub fn new(root: zx::Channel, server_end: zx::Channel) -> Result<RemoteFs, Errno> {
240        // See if open3 works.  We assume that if open3 works on the root, it will work for all
241        // descendent nodes in this filesystem.  At the time of writing, this is true for Fxfs.
242        let root_proxy = fio::DirectorySynchronousProxy::new(root);
243        root_proxy
244            .open(
245                ".",
246                fio::Flags::PROTOCOL_DIRECTORY
247                    | fio::PERM_READABLE
248                    | fio::Flags::PERM_INHERIT_WRITE
249                    | fio::Flags::PERM_INHERIT_EXECUTE
250                    | fio::Flags::FLAG_SEND_REPRESENTATION,
251                &fio::Options {
252                    attributes: Some(fio::NodeAttributesQuery::ID),
253                    ..Default::default()
254                },
255                server_end,
256            )
257            .map_err(|_| errno!(EIO))?;
258        // Use remote IDs if the filesystem is Fxfs which we know will give us unique IDs.  Hard
259        // links need to resolve to the same underlying FsNode, so we can only support hard links if
260        // the remote file system will give us unique IDs.  The IDs are also used as the key in
261        // caches, so we can't use remote IDs if the remote filesystem is not guaranteed to provide
262        // unique IDs, or if the remote filesystem spans multiple filesystems.
263        let (status, info) =
264            root_proxy.query_filesystem(zx::MonotonicInstant::INFINITE).map_err(|_| errno!(EIO))?;
265        // Be tolerant of errors here; many filesystems return `ZX_ERR_NOT_SUPPORTED`.
266        let use_remote_ids = status == 0
267            && info
268                .map(|i| i.fs_type == fidl_fuchsia_fs::VfsType::Fxfs.into_primitive())
269                .unwrap_or(false);
270        Ok(RemoteFs { use_remote_ids, root_proxy })
271    }
272
273    pub fn new_fs<L>(
274        locked: &mut Locked<L>,
275        kernel: &Kernel,
276        root: zx::Channel,
277        mut options: FileSystemOptions,
278        rights: fio::Flags,
279    ) -> Result<FileSystemHandle, Errno>
280    where
281        L: LockEqualOrBefore<FileOpsCore>,
282    {
283        let (client_end, server_end) = zx::Channel::create();
284        let remotefs = RemoteFs::new(root, server_end)?;
285        let mut attrs = zxio_node_attributes_t {
286            has: zxio_node_attr_has_t { id: true, ..Default::default() },
287            ..Default::default()
288        };
289        let (remote_node, node_id) =
290            match Zxio::create_with_on_representation(client_end.into(), Some(&mut attrs)) {
291                Err(status) => return Err(from_status_like_fdio!(status)),
292                Ok(zxio) => (RemoteNode { zxio, rights }, attrs.id),
293            };
294
295        if !rights.contains(fio::PERM_WRITABLE) {
296            options.flags |= MountFlags::RDONLY;
297        }
298        let use_remote_ids = remotefs.use_remote_ids;
299        let fs = FileSystem::new(
300            locked,
301            kernel,
302            CacheMode::Cached(kernel.fs_cache_config()),
303            remotefs,
304            options,
305        )?;
306        if use_remote_ids {
307            fs.create_root(node_id, remote_node);
308        } else {
309            let root_ino = fs.allocate_ino();
310            fs.create_root(root_ino, remote_node);
311        }
312        Ok(fs)
313    }
314
315    pub fn use_remote_ids(&self) -> bool {
316        self.use_remote_ids
317    }
318}
319
320pub struct RemoteNode {
321    /// The underlying Zircon I/O object for this remote node.
322    ///
323    /// We delegate to the zxio library for actually doing I/O with remote
324    /// objects, including fuchsia.io.Directory and fuchsia.io.File objects.
325    /// This structure lets us share code with FDIO and other Fuchsia clients.
326    zxio: syncio::Zxio,
327
328    /// The fuchsia.io rights for the dir handle. Subdirs will be opened with
329    /// the same rights.
330    rights: fio::Flags,
331}
332
333impl RemoteNode {
334    pub fn new(zxio: syncio::Zxio, rights: fio::Flags) -> Self {
335        Self { zxio, rights }
336    }
337}
338
339/// Create a file handle from a zx::NullableHandle.
340///
341/// The handle must be a channel, socket, vmo or debuglog object.  If the handle is a channel, then
342/// the channel must implement the `fuchsia.unknown/Queryable` protocol.
343///
344/// The resulting object will be owned by root, and will have permissions derived from the `flags`
345/// used to open this object. This is not the same as the permissions set if the object was created
346/// using Starnix itself. We use this mainly for interfacing with objects created outside of Starnix
347/// where these flags represent the desired permissions already.
348pub fn new_remote_file<L>(
349    locked: &mut Locked<L>,
350    current_task: &CurrentTask,
351    handle: zx::NullableHandle,
352    flags: OpenFlags,
353) -> Result<FileHandle, Errno>
354where
355    L: LockEqualOrBefore<FileOpsCore>,
356{
357    let remote_creds = current_task.full_current_creds();
358    let (attrs, ops) = remote_file_attrs_and_ops(current_task, handle.into(), remote_creds)?;
359    let mut rights = fio::Flags::empty();
360    if flags.can_read() {
361        rights |= fio::PERM_READABLE;
362    }
363    if flags.can_write() {
364        rights |= fio::PERM_WRITABLE;
365    }
366    let mode = get_mode(&attrs, rights);
367    // TODO: https://fxbug.dev/407611229 - Give these nodes valid labels.
368    let mut info = FsNodeInfo::new(mode, FsCred::root());
369    update_info_from_attrs(&mut info, &attrs);
370    Ok(Anon::new_private_file_extended(locked, current_task, ops, flags, "[fuchsia:remote]", info))
371}
372
373// Create a FileOps from a zx::NullableHandle.
374//
375// The handle must satisfy the same requirements as `new_remote_file`.
376pub fn new_remote_file_ops(
377    current_task: &CurrentTask,
378    handle: zx::NullableHandle,
379    creds: FullCredentials,
380) -> Result<Box<dyn FileOps>, Errno> {
381    let (_, ops) = remote_file_attrs_and_ops(current_task, handle, creds)?;
382    Ok(ops)
383}
384
385fn remote_file_attrs_and_ops(
386    current_task: &CurrentTask,
387    mut handle: zx::NullableHandle,
388    remote_creds: FullCredentials,
389) -> Result<(zxio_node_attr, Box<dyn FileOps>), Errno> {
390    let handle_type =
391        handle.basic_info().map_err(|status| from_status_like_fdio!(status))?.object_type;
392
393    // Check whether the channel implements a Starnix specific protoocol.
394    if handle_type == zx::ObjectType::CHANNEL {
395        let channel = zx::Channel::from(handle);
396        let queryable = funknown::QueryableSynchronousProxy::new(channel);
397        if let Ok(name) = queryable.query(zx::MonotonicInstant::INFINITE) {
398            if name == fbinder::UnixDomainSocketMarker::PROTOCOL_NAME.as_bytes() {
399                let socket_ops =
400                    RemoteUnixDomainSocket::new(queryable.into_channel(), remote_creds)?;
401                let socket = Socket::new_with_ops(Box::new(socket_ops))?;
402                let file_ops = SocketFile::new(socket);
403                let attr = zxio_node_attr {
404                    has: zxio_node_attr_has_t { mode: true, ..zxio_node_attr_has_t::default() },
405                    mode: 0o777 | FileMode::IFSOCK.bits(),
406                    ..zxio_node_attr::default()
407                };
408                return Ok((attr, file_ops));
409            }
410        };
411        handle = queryable.into_channel().into_handle();
412    } else if handle_type == zx::ObjectType::COUNTER {
413        let attr = zxio_node_attr::default();
414        let file_ops = Box::new(RemoteCounter::new(handle.into()));
415        return Ok((attr, file_ops));
416    }
417
418    // Otherwise, use zxio based objects.
419    let zxio = Zxio::create(handle).map_err(|status| from_status_like_fdio!(status))?;
420    let mut attrs = zxio
421        .attr_get(zxio_node_attr_has_t {
422            protocols: true,
423            abilities: true,
424            content_size: true,
425            storage_size: true,
426            link_count: true,
427            object_type: true,
428            ..Default::default()
429        })
430        .map_err(|status| from_status_like_fdio!(status))?;
431    let ops: Box<dyn FileOps> = match (handle_type, attrs.object_type) {
432        (_, ZXIO_OBJECT_TYPE_DIR) => Box::new(RemoteDirectoryObject::new(zxio)),
433        (zx::ObjectType::VMO, _)
434        | (zx::ObjectType::DEBUGLOG, _)
435        | (_, ZXIO_OBJECT_TYPE_FILE)
436        | (_, ZXIO_OBJECT_TYPE_NONE) => Box::new(RemoteFileObject::new(zxio)),
437        (zx::ObjectType::SOCKET, _)
438        | (_, ZXIO_OBJECT_TYPE_SYNCHRONOUS_DATAGRAM_SOCKET)
439        | (_, ZXIO_OBJECT_TYPE_DATAGRAM_SOCKET)
440        | (_, ZXIO_OBJECT_TYPE_STREAM_SOCKET)
441        | (_, ZXIO_OBJECT_TYPE_RAW_SOCKET)
442        | (_, ZXIO_OBJECT_TYPE_PACKET_SOCKET) => {
443            let socket_ops = ZxioBackedSocket::new_with_zxio(current_task, zxio);
444            let socket = Socket::new_with_ops(Box::new(socket_ops))?;
445            attrs.has.mode = true;
446            attrs.mode = FileMode::IFSOCK.bits();
447            SocketFile::new(socket)
448        }
449        _ => return error!(ENOTSUP),
450    };
451    Ok((attrs, ops))
452}
453
454pub fn create_fuchsia_pipe<L>(
455    locked: &mut Locked<L>,
456    current_task: &CurrentTask,
457    socket: zx::Socket,
458    flags: OpenFlags,
459) -> Result<FileHandle, Errno>
460where
461    L: LockEqualOrBefore<FileOpsCore>,
462{
463    new_remote_file(locked, current_task, socket.into(), flags)
464}
465
466fn fetch_and_refresh_info_impl<'a>(
467    zxio: &syncio::Zxio,
468    info: &'a RwLock<FsNodeInfo>,
469) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
470    let attrs = zxio
471        .attr_get(zxio_node_attr_has_t {
472            content_size: true,
473            storage_size: true,
474            link_count: true,
475            modification_time: true,
476            change_time: true,
477            access_time: true,
478            casefold: true,
479            wrapping_key_id: true,
480            pending_access_time_update: info.read().pending_time_access_update,
481            ..Default::default()
482        })
483        .map_err(|status| from_status_like_fdio!(status))?;
484    let mut info = info.write();
485    update_info_from_attrs(&mut info, &attrs);
486    info.pending_time_access_update = false;
487    Ok(RwLockWriteGuard::downgrade(info))
488}
489
490// Update info from attrs if they are set.
491pub fn update_info_from_attrs(info: &mut FsNodeInfo, attrs: &zxio_node_attributes_t) {
492    // TODO - store these in FsNodeState and convert on fstat
493    if attrs.has.content_size {
494        info.size = attrs.content_size.try_into().unwrap_or(std::usize::MAX);
495    }
496    if attrs.has.storage_size {
497        info.blocks = usize::try_from(attrs.storage_size)
498            .unwrap_or(std::usize::MAX)
499            .div_ceil(DEFAULT_BYTES_PER_BLOCK)
500    }
501    info.blksize = DEFAULT_BYTES_PER_BLOCK;
502    if attrs.has.link_count {
503        info.link_count = attrs.link_count.try_into().unwrap_or(std::usize::MAX);
504    }
505    if attrs.has.modification_time {
506        info.time_modify =
507            UtcInstant::from_nanos(attrs.modification_time.try_into().unwrap_or(i64::MAX));
508    }
509    if attrs.has.change_time {
510        info.time_status_change =
511            UtcInstant::from_nanos(attrs.change_time.try_into().unwrap_or(i64::MAX));
512    }
513    if attrs.has.access_time {
514        info.time_access = UtcInstant::from_nanos(attrs.access_time.try_into().unwrap_or(i64::MAX));
515    }
516    if attrs.has.wrapping_key_id {
517        info.wrapping_key_id = Some(attrs.wrapping_key_id);
518    }
519}
520
521fn get_mode(attrs: &zxio_node_attributes_t, rights: fio::Flags) -> FileMode {
522    if attrs.protocols & ZXIO_NODE_PROTOCOL_SYMLINK != 0 {
523        // We don't set the mode for symbolic links , so we synthesize it instead.
524        FileMode::IFLNK | FileMode::ALLOW_ALL
525    } else if attrs.has.mode {
526        // If the filesystem supports POSIX mode bits, use that directly.
527        FileMode::from_bits(attrs.mode)
528    } else {
529        // The filesystem doesn't support the `mode` attribute, so synthesize it from the protocols
530        // this node supports, and the rights used to open it.
531        let is_directory =
532            attrs.protocols & ZXIO_NODE_PROTOCOL_DIRECTORY == ZXIO_NODE_PROTOCOL_DIRECTORY;
533        let mode = if is_directory { FileMode::IFDIR } else { FileMode::IFREG };
534        let mut permissions = FileMode::EMPTY;
535        if rights.contains(fio::PERM_READABLE) {
536            permissions |= FileMode::IRUSR;
537        }
538        if rights.contains(fio::PERM_WRITABLE) {
539            permissions |= FileMode::IWUSR;
540        }
541        if rights.contains(fio::PERM_EXECUTABLE) {
542            permissions |= FileMode::IXUSR;
543        }
544        // Make sure the same permissions are granted to user, group, and other.
545        permissions |= FileMode::from_bits((permissions.bits() >> 3) | (permissions.bits() >> 6));
546        mode | permissions
547    }
548}
549
550fn get_name_str<'a>(name_bytes: &'a FsStr) -> Result<&'a str, Errno> {
551    std::str::from_utf8(name_bytes.as_ref()).map_err(|_| {
552        log_warn!("bad utf8 in pathname! remote filesystems can't handle this");
553        errno!(EINVAL)
554    })
555}
556
557impl XattrStorage for syncio::Zxio {
558    fn get_xattr(
559        &self,
560        _locked: &mut Locked<FileOpsCore>,
561        name: &FsStr,
562    ) -> Result<FsString, Errno> {
563        Ok(self
564            .xattr_get(name)
565            .map_err(|status| match status {
566                zx::Status::NOT_FOUND => errno!(ENODATA),
567                status => from_status_like_fdio!(status),
568            })?
569            .into())
570    }
571
572    fn set_xattr(
573        &self,
574        _locked: &mut Locked<FileOpsCore>,
575        name: &FsStr,
576        value: &FsStr,
577        op: XattrOp,
578    ) -> Result<(), Errno> {
579        let mode = match op {
580            XattrOp::Set => XattrSetMode::Set,
581            XattrOp::Create => XattrSetMode::Create,
582            XattrOp::Replace => XattrSetMode::Replace,
583        };
584
585        self.xattr_set(name, value, mode).map_err(|status| match status {
586            zx::Status::NOT_FOUND => errno!(ENODATA),
587            status => from_status_like_fdio!(status),
588        })
589    }
590
591    fn remove_xattr(&self, _locked: &mut Locked<FileOpsCore>, name: &FsStr) -> Result<(), Errno> {
592        self.xattr_remove(name).map_err(|status| match status {
593            zx::Status::NOT_FOUND => errno!(ENODATA),
594            _ => from_status_like_fdio!(status),
595        })
596    }
597
598    fn list_xattrs(&self, _locked: &mut Locked<FileOpsCore>) -> Result<Vec<FsString>, Errno> {
599        self.xattr_list()
600            .map(|attrs| attrs.into_iter().map(FsString::new).collect::<Vec<_>>())
601            .map_err(|status| from_status_like_fdio!(status))
602    }
603}
604
605impl FsNodeOps for RemoteNode {
606    fs_node_impl_xattr_delegate!(self, self.zxio);
607
608    fn create_file_ops(
609        &self,
610        locked: &mut Locked<FileOpsCore>,
611        node: &FsNode,
612        current_task: &CurrentTask,
613        flags: OpenFlags,
614    ) -> Result<Box<dyn FileOps>, Errno> {
615        {
616            let node_info = node.fetch_and_refresh_info(locked, current_task)?;
617            if node_info.mode.is_dir() {
618                if let Some(wrapping_key_id) = node_info.wrapping_key_id {
619                    if flags.can_write() {
620                        // Locked encrypted directories cannot be opened with write access.
621                        let crypt_service =
622                            node.fs().crypt_service().ok_or_else(|| errno!(ENOKEY))?;
623                        if !crypt_service.contains_key(EncryptionKeyId::from(wrapping_key_id)) {
624                            return error!(ENOKEY);
625                        }
626                    }
627                }
628                // For directories we need to deep-clone the connection because we rely on the seek
629                // offset.
630                return Ok(Box::new(RemoteDirectoryObject::new(
631                    self.zxio.deep_clone().map_err(|status| from_status_like_fdio!(status))?,
632                )));
633            }
634        }
635
636        // Locked encrypted files cannot be opened.
637        node.fail_if_locked(current_task)?;
638
639        // fsverity files cannot be opened in write mode, including while building.
640        if flags.can_write() {
641            node.fsverity.lock().check_writable()?;
642        }
643
644        // For files we can clone the `Zxio` because we don't rely on any per-connection state
645        // (i.e. the file offset).
646        Ok(Box::new(RemoteFileObject::new(self.zxio.clone())))
647    }
648
649    fn mknod(
650        &self,
651        _locked: &mut Locked<FileOpsCore>,
652        node: &FsNode,
653        current_task: &CurrentTask,
654        name: &FsStr,
655        mode: FileMode,
656        dev: DeviceType,
657        owner: FsCred,
658    ) -> Result<FsNodeHandle, Errno> {
659        node.fail_if_locked(current_task)?;
660        let name = get_name_str(name)?;
661
662        let fs = node.fs();
663        let fs_ops = RemoteFs::from_fs(&fs);
664
665        let zxio;
666        let mut node_id;
667        if !(mode.is_reg() || mode.is_chr() || mode.is_blk() || mode.is_fifo() || mode.is_sock()) {
668            return error!(EINVAL, name);
669        }
670        let mut attrs = zxio_node_attributes_t {
671            has: zxio_node_attr_has_t { id: true, ..Default::default() },
672            ..Default::default()
673        };
674        zxio = self
675            .zxio
676            .open(
677                name,
678                fio::Flags::FLAG_MUST_CREATE
679                    | fio::Flags::PROTOCOL_FILE
680                    | fio::PERM_READABLE
681                    | fio::PERM_WRITABLE,
682                ZxioOpenOptions::new(
683                    Some(&mut attrs),
684                    Some(zxio_node_attributes_t {
685                        mode: mode.bits(),
686                        uid: owner.uid,
687                        gid: owner.gid,
688                        rdev: dev.bits(),
689                        has: zxio_node_attr_has_t {
690                            mode: true,
691                            uid: true,
692                            gid: true,
693                            rdev: true,
694                            ..Default::default()
695                        },
696                        ..Default::default()
697                    }),
698                ),
699            )
700            .map_err(|status| from_status_like_fdio!(status, name))?;
701        node_id = attrs.id;
702
703        let ops = if mode.is_reg() {
704            Box::new(RemoteNode { zxio, rights: self.rights }) as Box<dyn FsNodeOps>
705        } else {
706            Box::new(RemoteSpecialNode { zxio }) as Box<dyn FsNodeOps>
707        };
708
709        if !fs_ops.use_remote_ids {
710            node_id = fs.allocate_ino();
711        }
712        let child =
713            fs.create_node(node_id, ops, FsNodeInfo { rdev: dev, ..FsNodeInfo::new(mode, owner) });
714        Ok(child)
715    }
716
717    fn mkdir(
718        &self,
719        _locked: &mut Locked<FileOpsCore>,
720        node: &FsNode,
721        current_task: &CurrentTask,
722        name: &FsStr,
723        mode: FileMode,
724        owner: FsCred,
725    ) -> Result<FsNodeHandle, Errno> {
726        node.fail_if_locked(current_task)?;
727        let name = get_name_str(name)?;
728
729        let fs = node.fs();
730        let fs_ops = RemoteFs::from_fs(&fs);
731
732        let zxio;
733        let mut node_id;
734        let mut attrs = zxio_node_attributes_t {
735            has: zxio_node_attr_has_t { id: true, ..Default::default() },
736            ..Default::default()
737        };
738        zxio = self
739            .zxio
740            .open(
741                name,
742                fio::Flags::FLAG_MUST_CREATE
743                    | fio::Flags::PROTOCOL_DIRECTORY
744                    | fio::PERM_READABLE
745                    | fio::PERM_WRITABLE,
746                ZxioOpenOptions::new(
747                    Some(&mut attrs),
748                    Some(zxio_node_attributes_t {
749                        mode: mode.bits(),
750                        uid: owner.uid,
751                        gid: owner.gid,
752                        has: zxio_node_attr_has_t {
753                            mode: true,
754                            uid: true,
755                            gid: true,
756                            ..Default::default()
757                        },
758                        ..Default::default()
759                    }),
760                ),
761            )
762            .map_err(|status| from_status_like_fdio!(status, name))?;
763        node_id = attrs.id;
764
765        let ops = RemoteNode { zxio, rights: self.rights };
766        if !fs_ops.use_remote_ids {
767            node_id = fs.allocate_ino();
768        }
769        let child = fs.create_node(node_id, ops, FsNodeInfo::new(mode, owner));
770        Ok(child)
771    }
772
773    fn lookup(
774        &self,
775        _locked: &mut Locked<FileOpsCore>,
776        node: &FsNode,
777        current_task: &CurrentTask,
778        name: &FsStr,
779    ) -> Result<FsNodeHandle, Errno> {
780        let name = get_name_str(name)?;
781
782        let fs = node.fs();
783        let fs_ops = RemoteFs::from_fs(&fs);
784
785        let mut attrs = zxio_node_attributes_t {
786            has: zxio_node_attr_has_t {
787                protocols: true,
788                abilities: true,
789                mode: true,
790                uid: true,
791                gid: true,
792                rdev: true,
793                id: true,
794                fsverity_enabled: true,
795                casefold: true,
796                modification_time: true,
797                change_time: true,
798                access_time: true,
799                ..Default::default()
800            },
801            ..Default::default()
802        };
803        let mut options = ZxioOpenOptions::new(Some(&mut attrs), None);
804        let mut selinux_context_buffer =
805            MaybeUninit::<[u8; fio::MAX_SELINUX_CONTEXT_ATTRIBUTE_LEN as usize]>::uninit();
806        let mut cached_context = security::fs_is_xattr_labeled(node.fs())
807            .then(|| SelinuxContextAttr::new(&mut selinux_context_buffer));
808        if let Some(buffer) = &mut cached_context {
809            options = options.with_selinux_context_read(buffer).unwrap();
810        }
811        let zxio = self
812            .zxio
813            .open(name, self.rights, options)
814            .map_err(|status| from_status_like_fdio!(status, name))?;
815        let symlink_zxio = zxio.clone();
816        let mode = get_mode(&attrs, self.rights);
817        let node_id = if fs_ops.use_remote_ids {
818            if attrs.id == fio::INO_UNKNOWN {
819                return error!(ENOTSUP);
820            }
821            attrs.id
822        } else {
823            fs.allocate_ino()
824        };
825        let owner = FsCred { uid: attrs.uid, gid: attrs.gid };
826        let rdev = DeviceType::from_bits(attrs.rdev);
827        let fsverity_enabled = attrs.fsverity_enabled;
828        // fsverity should not be enabled for non-file nodes.
829        if fsverity_enabled && (attrs.protocols & ZXIO_NODE_PROTOCOL_FILE == 0) {
830            return error!(EINVAL);
831        }
832        let casefold = attrs.casefold;
833        let time_modify =
834            UtcInstant::from_nanos(attrs.modification_time.try_into().unwrap_or(i64::MAX));
835        let time_status_change =
836            UtcInstant::from_nanos(attrs.change_time.try_into().unwrap_or(i64::MAX));
837        let time_access = UtcInstant::from_nanos(attrs.access_time.try_into().unwrap_or(i64::MAX));
838
839        let node = fs.get_or_create_node(node_id, || {
840            let ops = if mode.is_lnk() {
841                Box::new(RemoteSymlink { zxio: Mutex::new(zxio) }) as Box<dyn FsNodeOps>
842            } else if mode.is_reg() || mode.is_dir() {
843                Box::new(RemoteNode { zxio, rights: self.rights }) as Box<dyn FsNodeOps>
844            } else {
845                Box::new(RemoteSpecialNode { zxio }) as Box<dyn FsNodeOps>
846            };
847            let child = FsNode::new_uncached(
848                node_id,
849                ops,
850                &fs,
851                FsNodeInfo {
852                    rdev,
853                    casefold,
854                    time_status_change,
855                    time_modify,
856                    time_access,
857                    ..FsNodeInfo::new(mode, owner)
858                },
859            );
860            if fsverity_enabled {
861                *child.fsverity.lock() = FsVerityState::FsVerity;
862            }
863            if let Some(buffer) = cached_context.as_ref().and_then(|buffer| buffer.get()) {
864                // This is valid to fail if we're using mount point labelling or the
865                // provided context string is invalid.
866                let _ = security::fs_node_notify_security_context(
867                    current_task,
868                    &child,
869                    FsStr::new(buffer),
870                );
871            }
872            Ok(child)
873        })?;
874        if let Some(symlink) = node.downcast_ops::<RemoteSymlink>() {
875            let mut zxio_guard = symlink.zxio.lock();
876            *zxio_guard = symlink_zxio;
877        }
878        Ok(node)
879    }
880
881    fn truncate(
882        &self,
883        _locked: &mut Locked<FileOpsCore>,
884        _guard: &AppendLockGuard<'_>,
885        node: &FsNode,
886        current_task: &CurrentTask,
887        length: u64,
888    ) -> Result<(), Errno> {
889        node.fail_if_locked(current_task)?;
890        self.zxio.truncate(length).map_err(|status| from_status_like_fdio!(status))
891    }
892
893    fn allocate(
894        &self,
895        _locked: &mut Locked<FileOpsCore>,
896        _guard: &AppendLockGuard<'_>,
897        node: &FsNode,
898        current_task: &CurrentTask,
899        mode: FallocMode,
900        offset: u64,
901        length: u64,
902    ) -> Result<(), Errno> {
903        match mode {
904            FallocMode::Allocate { keep_size: false } => {
905                node.fail_if_locked(current_task)?;
906                self.zxio
907                    .allocate(offset, length, AllocateMode::empty())
908                    .map_err(|status| from_status_like_fdio!(status))?;
909                Ok(())
910            }
911            _ => error!(EINVAL),
912        }
913    }
914
915    fn fetch_and_refresh_info<'a>(
916        &self,
917        _locked: &mut Locked<FileOpsCore>,
918        _node: &FsNode,
919        _current_task: &CurrentTask,
920        info: &'a RwLock<FsNodeInfo>,
921    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
922        fetch_and_refresh_info_impl(&self.zxio, info)
923    }
924
925    fn update_attributes(
926        &self,
927        _locked: &mut Locked<FileOpsCore>,
928        _current_task: &CurrentTask,
929        info: &FsNodeInfo,
930        has: zxio_node_attr_has_t,
931    ) -> Result<(), Errno> {
932        // Omit updating creation_time. By definition, there shouldn't be a change in creation_time.
933        let mut mutable_node_attributes = zxio_node_attributes_t {
934            modification_time: info.time_modify.into_nanos() as u64,
935            access_time: info.time_access.into_nanos() as u64,
936            mode: info.mode.bits(),
937            uid: info.uid,
938            gid: info.gid,
939            rdev: info.rdev.bits(),
940            casefold: info.casefold,
941            has,
942            ..Default::default()
943        };
944        if let Some(id) = info.wrapping_key_id {
945            mutable_node_attributes.wrapping_key_id = id;
946        }
947        self.zxio
948            .attr_set(&mutable_node_attributes)
949            .map_err(|status| from_status_like_fdio!(status))
950    }
951
952    fn unlink(
953        &self,
954        _locked: &mut Locked<FileOpsCore>,
955        _node: &FsNode,
956        _current_task: &CurrentTask,
957        name: &FsStr,
958        _child: &FsNodeHandle,
959    ) -> Result<(), Errno> {
960        // We don't care about the _child argument because 1. unlinking already takes the parent's
961        // children lock, so we don't have to worry about conflicts on this path, and 2. the remote
962        // filesystem tracks the link counts so we don't need to update them here.
963        let name = get_name_str(name)?;
964        self.zxio
965            .unlink(name, fio::UnlinkFlags::empty())
966            .map_err(|status| from_status_like_fdio!(status))
967    }
968
969    fn create_symlink(
970        &self,
971        _locked: &mut Locked<FileOpsCore>,
972        node: &FsNode,
973        current_task: &CurrentTask,
974        name: &FsStr,
975        target: &FsStr,
976        owner: FsCred,
977    ) -> Result<FsNodeHandle, Errno> {
978        node.fail_if_locked(current_task)?;
979
980        let name = get_name_str(name)?;
981        let zxio = self
982            .zxio
983            .create_symlink(name, target)
984            .map_err(|status| from_status_like_fdio!(status))?;
985
986        let fs = node.fs();
987        let fs_ops = RemoteFs::from_fs(&fs);
988
989        let node_id = if fs_ops.use_remote_ids {
990            let attrs = zxio
991                .attr_get(zxio_node_attr_has_t { id: true, ..Default::default() })
992                .map_err(|status| from_status_like_fdio!(status))?;
993            attrs.id
994        } else {
995            fs.allocate_ino()
996        };
997        let symlink = fs.create_node(
998            node_id,
999            RemoteSymlink { zxio: Mutex::new(zxio) },
1000            FsNodeInfo {
1001                size: target.len(),
1002                ..FsNodeInfo::new(FileMode::IFLNK | FileMode::ALLOW_ALL, owner)
1003            },
1004        );
1005        Ok(symlink)
1006    }
1007
1008    fn create_tmpfile(
1009        &self,
1010        node: &FsNode,
1011        _current_task: &CurrentTask,
1012        mode: FileMode,
1013        owner: FsCred,
1014    ) -> Result<FsNodeHandle, Errno> {
1015        let fs = node.fs();
1016        let fs_ops = RemoteFs::from_fs(&fs);
1017
1018        let zxio;
1019        let mut node_id;
1020        if !mode.is_reg() {
1021            return error!(EINVAL);
1022        }
1023        let mut attrs = zxio_node_attributes_t {
1024            has: zxio_node_attr_has_t { id: true, ..Default::default() },
1025            ..Default::default()
1026        };
1027        // `create_tmpfile` is used by O_TMPFILE. Note that
1028        // <https://man7.org/linux/man-pages/man2/open.2.html> states that if O_EXCL is specified
1029        // with O_TMPFILE, the temporary file created cannot be linked into the filesystem. Although
1030        // there exist fuchsia flags `fio::FLAG_TEMPORARY_AS_NOT_LINKABLE`, the starnix vfs already
1031        // handles this case and makes sure that the created file is not linkable. There is also no
1032        // current way of passing the open flags to this function.
1033        zxio = self
1034            .zxio
1035            .open(
1036                ".",
1037                fio::Flags::PROTOCOL_FILE
1038                    | fio::Flags::FLAG_CREATE_AS_UNNAMED_TEMPORARY
1039                    | self.rights,
1040                ZxioOpenOptions::new(
1041                    Some(&mut attrs),
1042                    Some(zxio_node_attributes_t {
1043                        mode: mode.bits(),
1044                        uid: owner.uid,
1045                        gid: owner.gid,
1046                        has: zxio_node_attr_has_t {
1047                            mode: true,
1048                            uid: true,
1049                            gid: true,
1050                            ..Default::default()
1051                        },
1052                        ..Default::default()
1053                    }),
1054                ),
1055            )
1056            .map_err(|status| from_status_like_fdio!(status))?;
1057        node_id = attrs.id;
1058
1059        let ops = Box::new(RemoteNode { zxio, rights: self.rights }) as Box<dyn FsNodeOps>;
1060
1061        if !fs_ops.use_remote_ids {
1062            node_id = fs.allocate_ino();
1063        }
1064        let child = fs.create_node(node_id, ops, FsNodeInfo::new(mode, owner));
1065
1066        Ok(child)
1067    }
1068
1069    fn link(
1070        &self,
1071        _locked: &mut Locked<FileOpsCore>,
1072        node: &FsNode,
1073        _current_task: &CurrentTask,
1074        name: &FsStr,
1075        child: &FsNodeHandle,
1076    ) -> Result<(), Errno> {
1077        if !RemoteFs::from_fs(&node.fs()).use_remote_ids {
1078            return error!(EPERM);
1079        }
1080        let name = get_name_str(name)?;
1081        let link_into = |zxio: &syncio::Zxio| {
1082            zxio.link_into(&self.zxio, name).map_err(|status| match status {
1083                zx::Status::BAD_STATE => errno!(EXDEV),
1084                zx::Status::ACCESS_DENIED => errno!(ENOKEY),
1085                s => from_status_like_fdio!(s),
1086            })
1087        };
1088        if let Some(child) = child.downcast_ops::<RemoteNode>() {
1089            link_into(&child.zxio)
1090        } else if let Some(child) = child.downcast_ops::<RemoteSymlink>() {
1091            link_into(&child.zxio())
1092        } else {
1093            error!(EXDEV)
1094        }
1095    }
1096
1097    fn forget(
1098        self: Box<Self>,
1099        _locked: &mut Locked<FileOpsCore>,
1100        _current_task: &CurrentTask,
1101        info: FsNodeInfo,
1102    ) -> Result<(), Errno> {
1103        // Before forgetting this node, update atime if we need to.
1104        if info.pending_time_access_update {
1105            self.zxio
1106                .close_and_update_access_time()
1107                .map_err(|status| from_status_like_fdio!(status))?;
1108        }
1109        Ok(())
1110    }
1111
1112    fn enable_fsverity(&self, descriptor: &fsverity_descriptor) -> Result<(), Errno> {
1113        let descr = zxio_fsverity_descriptor_t {
1114            hash_algorithm: descriptor.hash_algorithm,
1115            salt_size: descriptor.salt_size,
1116            salt: descriptor.salt,
1117        };
1118        self.zxio.enable_verity(&descr).map_err(|status| from_status_like_fdio!(status))
1119    }
1120
1121    fn get_fsverity_descriptor(&self, log_blocksize: u8) -> Result<fsverity_descriptor, Errno> {
1122        let mut root_hash = [0; ZXIO_ROOT_HASH_LENGTH];
1123        let attrs = self
1124            .zxio
1125            .attr_get_with_root_hash(
1126                zxio_node_attr_has_t {
1127                    content_size: true,
1128                    fsverity_options: true,
1129                    fsverity_root_hash: true,
1130                    ..Default::default()
1131                },
1132                &mut root_hash,
1133            )
1134            .map_err(|status| match status {
1135                zx::Status::INVALID_ARGS => errno!(ENODATA),
1136                _ => from_status_like_fdio!(status),
1137            })?;
1138        return Ok(fsverity_descriptor {
1139            version: 1,
1140            hash_algorithm: attrs.fsverity_options.hash_alg,
1141            log_blocksize,
1142            salt_size: attrs.fsverity_options.salt_size as u8,
1143            __reserved_0x04: 0u32,
1144            data_size: attrs.content_size,
1145            root_hash,
1146            salt: attrs.fsverity_options.salt,
1147            __reserved: [0u8; 144],
1148        });
1149    }
1150}
1151
1152struct RemoteSpecialNode {
1153    zxio: syncio::Zxio,
1154}
1155
1156impl FsNodeOps for RemoteSpecialNode {
1157    fs_node_impl_not_dir!();
1158    fs_node_impl_xattr_delegate!(self, self.zxio);
1159
1160    fn create_file_ops(
1161        &self,
1162        _locked: &mut Locked<FileOpsCore>,
1163        _node: &FsNode,
1164        _current_task: &CurrentTask,
1165        _flags: OpenFlags,
1166    ) -> Result<Box<dyn FileOps>, Errno> {
1167        unreachable!("Special nodes cannot be opened.");
1168    }
1169}
1170
1171fn zxio_read_write_inner_map_error(status: zx::Status) -> Errno {
1172    match status {
1173        // zx::Stream may return invalid args or not found error because of
1174        // invalid zx_iovec buffer pointers.
1175        zx::Status::INVALID_ARGS | zx::Status::NOT_FOUND => errno!(EFAULT, ""),
1176        status => from_status_like_fdio!(status),
1177    }
1178}
1179
1180fn zxio_read_inner(
1181    data: &mut dyn OutputBuffer,
1182    unified_read_fn: impl FnOnce(&[syncio::zxio::zx_iovec]) -> Result<usize, zx::Status>,
1183    vmo_read_fn: impl FnOnce(&mut [u8]) -> Result<usize, zx::Status>,
1184) -> Result<usize, Errno> {
1185    let read_bytes = with_iovec_segments(data, |iovecs| {
1186        unified_read_fn(&iovecs).map_err(zxio_read_write_inner_map_error)
1187    });
1188
1189    match read_bytes {
1190        Some(actual) => {
1191            let actual = actual?;
1192            // SAFETY: we successfully read `actual` bytes
1193            // directly to the user's buffer segments.
1194            unsafe { data.advance(actual) }?;
1195            Ok(actual)
1196        }
1197        None => {
1198            // Perform the (slower) operation by using an intermediate buffer.
1199            let total = data.available();
1200            let mut bytes = vec![0u8; total];
1201            let actual =
1202                vmo_read_fn(&mut bytes).map_err(|status| from_status_like_fdio!(status))?;
1203            data.write_all(&bytes[0..actual])
1204        }
1205    }
1206}
1207
1208fn zxio_read_at(zxio: &Zxio, offset: usize, data: &mut dyn OutputBuffer) -> Result<usize, Errno> {
1209    let offset = offset as u64;
1210    zxio_read_inner(
1211        data,
1212        |iovecs| {
1213            // SAFETY: `zxio_read_inner` maps the returned error to an appropriate
1214            // `Errno` for userspace to handle. `data` only points to memory that
1215            // is allowed to be written to (Linux user-mode aspace or a valid
1216            // Starnix owned buffer).
1217            unsafe { zxio.readv_at(offset, iovecs) }
1218        },
1219        |bytes| zxio.read_at(offset, bytes),
1220    )
1221}
1222
1223fn zxio_write_inner(
1224    data: &mut dyn InputBuffer,
1225    unified_write_fn: impl FnOnce(&[syncio::zxio::zx_iovec]) -> Result<usize, zx::Status>,
1226    vmo_write_fn: impl FnOnce(&[u8]) -> Result<usize, zx::Status>,
1227) -> Result<usize, Errno> {
1228    let write_bytes = with_iovec_segments(data, |iovecs| {
1229        unified_write_fn(&iovecs).map_err(zxio_read_write_inner_map_error)
1230    });
1231
1232    match write_bytes {
1233        Some(actual) => {
1234            let actual = actual?;
1235            data.advance(actual)?;
1236            Ok(actual)
1237        }
1238        None => {
1239            // Perform the (slower) operation by using an intermediate buffer.
1240            let bytes = data.peek_all()?;
1241            let actual = vmo_write_fn(&bytes).map_err(|status| from_status_like_fdio!(status))?;
1242            data.advance(actual)?;
1243            Ok(actual)
1244        }
1245    }
1246}
1247
1248fn zxio_write_at(
1249    zxio: &Zxio,
1250    _current_task: &CurrentTask,
1251    offset: usize,
1252    data: &mut dyn InputBuffer,
1253) -> Result<usize, Errno> {
1254    let offset = offset as u64;
1255    zxio_write_inner(
1256        data,
1257        |iovecs| {
1258            // SAFETY: `zxio_write_inner` maps the returned error to an appropriate
1259            // `Errno` for userspace to handle.
1260            unsafe { zxio.writev_at(offset, iovecs) }
1261        },
1262        |bytes| zxio.write_at(offset, bytes),
1263    )
1264}
1265
1266/// Helper struct to track the context necessary to iterate over dir entries.
1267#[derive(Default)]
1268struct RemoteDirectoryIterator<'a> {
1269    iterator: Option<DirentIterator<'a>>,
1270
1271    /// If the last attempt to write to the sink failed, this contains the entry that is pending to
1272    /// be added. This is also used to synthesize dot-dot.
1273    pending_entry: Entry,
1274}
1275
1276#[derive(Default)]
1277enum Entry {
1278    // Indicates no more entries.
1279    #[default]
1280    None,
1281
1282    Some(ZxioDirent),
1283
1284    // Indicates dot-dot should be synthesized.
1285    DotDot,
1286}
1287
1288impl Entry {
1289    fn take(&mut self) -> Entry {
1290        std::mem::replace(self, Entry::None)
1291    }
1292}
1293
1294impl From<Option<ZxioDirent>> for Entry {
1295    fn from(value: Option<ZxioDirent>) -> Self {
1296        match value {
1297            None => Entry::None,
1298            Some(x) => Entry::Some(x),
1299        }
1300    }
1301}
1302
1303impl<'a> RemoteDirectoryIterator<'a> {
1304    fn get_or_init_iterator(&mut self, zxio: &'a Zxio) -> Result<&mut DirentIterator<'a>, Errno> {
1305        if self.iterator.is_none() {
1306            let iterator =
1307                zxio.create_dirent_iterator().map_err(|status| from_status_like_fdio!(status))?;
1308            self.iterator = Some(iterator);
1309        }
1310        if let Some(iterator) = &mut self.iterator {
1311            return Ok(iterator);
1312        }
1313
1314        // Should be an impossible error, because we just created the iterator above.
1315        error!(EIO)
1316    }
1317
1318    /// Returns the next dir entry. If no more entries are found, returns None.  Returns an error if
1319    /// the iterator fails for other reasons described by the zxio library.
1320    pub fn next(&mut self, zxio: &'a Zxio) -> Result<Entry, Errno> {
1321        let mut next = self.pending_entry.take();
1322        if let Entry::None = next {
1323            next = self
1324                .get_or_init_iterator(zxio)?
1325                .next()
1326                .transpose()
1327                .map_err(|status| from_status_like_fdio!(status))?
1328                .into();
1329        }
1330        // We only want to synthesize .. if . exists because the . and .. entries get removed if the
1331        // directory is unlinked, so if the remote filesystem has removed ., we know to omit the
1332        // .. entry.
1333        match &next {
1334            Entry::Some(ZxioDirent { name, .. }) if name == "." => {
1335                self.pending_entry = Entry::DotDot;
1336            }
1337            _ => {}
1338        }
1339        Ok(next)
1340    }
1341}
1342
1343struct RemoteDirectoryObject {
1344    iterator: Mutex<RemoteDirectoryIterator<'static>>,
1345
1346    // The underlying Zircon I/O object.  This *must* be dropped after `iterator` above because the
1347    // iterator has references to this object.  We use some unsafe code below to erase the lifetime
1348    // (hence the 'static above).
1349    zxio: Zxio,
1350}
1351
1352impl RemoteDirectoryObject {
1353    pub fn new(zxio: Zxio) -> RemoteDirectoryObject {
1354        RemoteDirectoryObject { zxio, iterator: Mutex::new(RemoteDirectoryIterator::default()) }
1355    }
1356
1357    /// Returns a reference to Zxio with the lifetime erased.
1358    ///
1359    /// # Safety
1360    ///
1361    /// The caller must uphold the lifetime requirements, which will be the case if this is only
1362    /// used for the contained iterator (`iterator` is dropped before `zxio`).
1363    unsafe fn zxio(&self) -> &'static Zxio {
1364        #[allow(clippy::undocumented_unsafe_blocks, reason = "2024 edition migration")]
1365        unsafe {
1366            &*(&self.zxio as *const Zxio)
1367        }
1368    }
1369}
1370
1371impl FileOps for RemoteDirectoryObject {
1372    fileops_impl_directory!();
1373
1374    fn seek(
1375        &self,
1376        _locked: &mut Locked<FileOpsCore>,
1377        _file: &FileObject,
1378        _current_task: &CurrentTask,
1379        current_offset: off_t,
1380        target: SeekTarget,
1381    ) -> Result<off_t, Errno> {
1382        let mut iterator = self.iterator.lock();
1383        let new_offset = default_seek(current_offset, target, || error!(EINVAL))?;
1384        let mut iterator_position = current_offset;
1385
1386        if new_offset < iterator_position {
1387            // Our iterator only goes forward, so reset it here.  Note: we *must* rewind it rather
1388            // than just create a new iterator because the remote end maintains the offset.
1389            if let Some(iterator) = &mut iterator.iterator {
1390                iterator.rewind().map_err(|status| from_status_like_fdio!(status))?;
1391            }
1392            iterator.pending_entry = Entry::None;
1393            iterator_position = 0;
1394        }
1395
1396        // Advance the iterator to catch up with the offset.
1397        for i in iterator_position..new_offset {
1398            // SAFETY: See the comment on the `zxio` function above.  The iterator outlives this
1399            // function and the zxio object must outlive the iterator.
1400            match iterator.next(unsafe { self.zxio() }) {
1401                Ok(Entry::Some(_) | Entry::DotDot) => {}
1402                Ok(Entry::None) => break, // No more entries.
1403                Err(_) => {
1404                    // In order to keep the offset and the iterator in sync, set the new offset
1405                    // to be as far as we could get.
1406                    // Note that failing the seek here would also cause the iterator and the
1407                    // offset to not be in sync, because the iterator has already moved from
1408                    // where it was.
1409                    return Ok(i);
1410                }
1411            }
1412        }
1413
1414        Ok(new_offset)
1415    }
1416
1417    fn readdir(
1418        &self,
1419        _locked: &mut Locked<FileOpsCore>,
1420        file: &FileObject,
1421        _current_task: &CurrentTask,
1422        sink: &mut dyn DirentSink,
1423    ) -> Result<(), Errno> {
1424        // It is important to acquire the lock to the offset before the context, to avoid a deadlock
1425        // where seek() tries to modify the context.
1426        let mut iterator = self.iterator.lock();
1427
1428        loop {
1429            // SAFETY: See the comment on the `zxio` function above.  The iterator outlives this
1430            // function and the zxio object must outlive the iterator.
1431            let entry = iterator.next(unsafe { self.zxio() })?;
1432            if let Err(e) = match &entry {
1433                Entry::Some(entry) => {
1434                    let inode_num: ino_t = entry.id.ok_or_else(|| errno!(EIO))?;
1435                    let entry_type = if entry.is_dir() {
1436                        DirectoryEntryType::DIR
1437                    } else if entry.is_file() {
1438                        DirectoryEntryType::REG
1439                    } else {
1440                        DirectoryEntryType::UNKNOWN
1441                    };
1442                    sink.add(inode_num, sink.offset() + 1, entry_type, entry.name.as_bstr())
1443                }
1444                Entry::DotDot => {
1445                    let inode_num = if let Some(parent) = file.name.parent_within_mount() {
1446                        parent.node.ino
1447                    } else {
1448                        // For the root .. should have the same inode number as .
1449                        file.name.entry.node.ino
1450                    };
1451                    sink.add(inode_num, sink.offset() + 1, DirectoryEntryType::DIR, "..".into())
1452                }
1453                Entry::None => break,
1454            } {
1455                iterator.pending_entry = entry;
1456                return Err(e);
1457            }
1458        }
1459        Ok(())
1460    }
1461
1462    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1463        self.zxio.sync().map_err(|status| match status {
1464            zx::Status::NO_RESOURCES | zx::Status::NO_MEMORY | zx::Status::NO_SPACE => {
1465                errno!(ENOSPC)
1466            }
1467            zx::Status::INVALID_ARGS | zx::Status::NOT_FILE => errno!(EINVAL),
1468            zx::Status::BAD_HANDLE => errno!(EBADFD),
1469            zx::Status::NOT_SUPPORTED => errno!(ENOTSUP),
1470            zx::Status::INTERRUPTED_RETRY => errno!(EINTR),
1471            _ => errno!(EIO),
1472        })
1473    }
1474
1475    fn to_handle(
1476        &self,
1477        _file: &FileObject,
1478        _current_task: &CurrentTask,
1479    ) -> Result<Option<zx::NullableHandle>, Errno> {
1480        self.zxio
1481            .deep_clone()
1482            .and_then(Zxio::release)
1483            .map(Some)
1484            .map_err(|status| from_status_like_fdio!(status))
1485    }
1486}
1487
1488pub struct RemoteFileObject {
1489    /// The underlying Zircon I/O object.  This is shared, so we must take care not to use any
1490    /// stateful methods on the underlying object (reading and writing is fine).
1491    zxio: Zxio,
1492
1493    /// Cached read-only VMO handle.
1494    read_only_memory: OnceCell<Arc<MemoryObject>>,
1495
1496    /// Cached read/exec VMO handle.
1497    read_exec_memory: OnceCell<Arc<MemoryObject>>,
1498}
1499
1500impl RemoteFileObject {
1501    fn new(zxio: Zxio) -> RemoteFileObject {
1502        RemoteFileObject {
1503            zxio,
1504            read_only_memory: Default::default(),
1505            read_exec_memory: Default::default(),
1506        }
1507    }
1508
1509    fn fetch_remote_memory(&self, prot: ProtectionFlags) -> Result<Arc<MemoryObject>, Errno> {
1510        let without_exec = self
1511            .zxio
1512            .vmo_get(prot.to_vmar_flags() - zx::VmarFlags::PERM_EXECUTE)
1513            .map_err(|status| from_status_like_fdio!(status))?;
1514        let all_flags = if prot.contains(ProtectionFlags::EXEC) {
1515            without_exec.replace_as_executable(&VMEX_RESOURCE).map_err(impossible_error)?
1516        } else {
1517            without_exec
1518        };
1519        Ok(Arc::new(MemoryObject::from(all_flags)))
1520    }
1521}
1522
1523impl FileOps for RemoteFileObject {
1524    fileops_impl_seekable!();
1525
1526    fn read(
1527        &self,
1528        _locked: &mut Locked<FileOpsCore>,
1529        _file: &FileObject,
1530        _current_task: &CurrentTask,
1531        offset: usize,
1532        data: &mut dyn OutputBuffer,
1533    ) -> Result<usize, Errno> {
1534        zxio_read_at(&self.zxio, offset, data)
1535    }
1536
1537    fn write(
1538        &self,
1539        _locked: &mut Locked<FileOpsCore>,
1540        _file: &FileObject,
1541        current_task: &CurrentTask,
1542        offset: usize,
1543        data: &mut dyn InputBuffer,
1544    ) -> Result<usize, Errno> {
1545        zxio_write_at(&self.zxio, current_task, offset, data)
1546    }
1547
1548    fn get_memory(
1549        &self,
1550        _locked: &mut Locked<FileOpsCore>,
1551        _file: &FileObject,
1552        _current_task: &CurrentTask,
1553        _length: Option<usize>,
1554        prot: ProtectionFlags,
1555    ) -> Result<Arc<MemoryObject>, Errno> {
1556        trace_duration!(CATEGORY_STARNIX_MM, "RemoteFileGetVmo");
1557        let memory_cache = if prot == (ProtectionFlags::READ | ProtectionFlags::EXEC) {
1558            Some(&self.read_exec_memory)
1559        } else if prot == ProtectionFlags::READ {
1560            Some(&self.read_only_memory)
1561        } else {
1562            None
1563        };
1564
1565        memory_cache
1566            .map(|c| c.get_or_try_init(|| self.fetch_remote_memory(prot)).cloned())
1567            .unwrap_or_else(|| self.fetch_remote_memory(prot))
1568    }
1569
1570    fn to_handle(
1571        &self,
1572        _file: &FileObject,
1573        _current_task: &CurrentTask,
1574    ) -> Result<Option<zx::NullableHandle>, Errno> {
1575        self.zxio
1576            .deep_clone()
1577            .and_then(Zxio::release)
1578            .map(Some)
1579            .map_err(|status| from_status_like_fdio!(status))
1580    }
1581
1582    fn sync(&self, _file: &FileObject, _current_task: &CurrentTask) -> Result<(), Errno> {
1583        self.zxio.sync().map_err(|status| match status {
1584            zx::Status::NO_RESOURCES | zx::Status::NO_MEMORY | zx::Status::NO_SPACE => {
1585                errno!(ENOSPC)
1586            }
1587            zx::Status::INVALID_ARGS | zx::Status::NOT_FILE => errno!(EINVAL),
1588            zx::Status::BAD_HANDLE => errno!(EBADFD),
1589            zx::Status::NOT_SUPPORTED => errno!(ENOTSUP),
1590            zx::Status::INTERRUPTED_RETRY => errno!(EINTR),
1591            _ => errno!(EIO),
1592        })
1593    }
1594
1595    fn ioctl(
1596        &self,
1597        locked: &mut Locked<Unlocked>,
1598        file: &FileObject,
1599        current_task: &CurrentTask,
1600        request: u32,
1601        arg: SyscallArg,
1602    ) -> Result<SyscallResult, Errno> {
1603        default_ioctl(file, locked, current_task, request, arg)
1604    }
1605}
1606
1607struct RemoteSymlink {
1608    zxio: Mutex<syncio::Zxio>,
1609}
1610
1611impl RemoteSymlink {
1612    fn zxio(&self) -> syncio::Zxio {
1613        self.zxio.lock().clone()
1614    }
1615}
1616
1617impl FsNodeOps for RemoteSymlink {
1618    fs_node_impl_symlink!();
1619    fs_node_impl_xattr_delegate!(self, self.zxio());
1620
1621    fn readlink(
1622        &self,
1623        _locked: &mut Locked<FileOpsCore>,
1624        _node: &FsNode,
1625        _current_task: &CurrentTask,
1626    ) -> Result<SymlinkTarget, Errno> {
1627        Ok(SymlinkTarget::Path(
1628            self.zxio().read_link().map_err(|status| from_status_like_fdio!(status))?.into(),
1629        ))
1630    }
1631
1632    fn fetch_and_refresh_info<'a>(
1633        &self,
1634        _locked: &mut Locked<FileOpsCore>,
1635        _node: &FsNode,
1636        _current_task: &CurrentTask,
1637        info: &'a RwLock<FsNodeInfo>,
1638    ) -> Result<RwLockReadGuard<'a, FsNodeInfo>, Errno> {
1639        fetch_and_refresh_info_impl(&self.zxio(), info)
1640    }
1641
1642    fn forget(
1643        self: Box<Self>,
1644        _locked: &mut Locked<FileOpsCore>,
1645        _current_task: &CurrentTask,
1646        info: FsNodeInfo,
1647    ) -> Result<(), Errno> {
1648        // Before forgetting this node, update atime if we need to.
1649        if info.pending_time_access_update {
1650            self.zxio()
1651                .close_and_update_access_time()
1652                .map_err(|status| from_status_like_fdio!(status))?;
1653        }
1654        Ok(())
1655    }
1656}
1657
1658pub struct RemoteCounter {
1659    counter: Counter,
1660}
1661
1662impl RemoteCounter {
1663    fn new(counter: Counter) -> Self {
1664        Self { counter }
1665    }
1666
1667    pub fn duplicate_handle(&self) -> Result<Counter, Errno> {
1668        self.counter.duplicate_handle(zx::Rights::SAME_RIGHTS).map_err(impossible_error)
1669    }
1670}
1671
1672impl FileOps for RemoteCounter {
1673    fileops_impl_nonseekable!();
1674    fileops_impl_noop_sync!();
1675
1676    fn read(
1677        &self,
1678        _locked: &mut Locked<FileOpsCore>,
1679        _file: &FileObject,
1680        _current_task: &CurrentTask,
1681        _offset: usize,
1682        _data: &mut dyn OutputBuffer,
1683    ) -> Result<usize, Errno> {
1684        error!(ENOTSUP)
1685    }
1686
1687    fn write(
1688        &self,
1689        _locked: &mut Locked<FileOpsCore>,
1690        _file: &FileObject,
1691        _current_task: &CurrentTask,
1692        _offset: usize,
1693        _data: &mut dyn InputBuffer,
1694    ) -> Result<usize, Errno> {
1695        error!(ENOTSUP)
1696    }
1697
1698    fn ioctl(
1699        &self,
1700        locked: &mut Locked<Unlocked>,
1701        file: &FileObject,
1702        current_task: &CurrentTask,
1703        request: u32,
1704        arg: SyscallArg,
1705    ) -> Result<SyscallResult, Errno> {
1706        let ioctl_type = (request >> 8) as u8;
1707        let ioctl_number = request as u8;
1708        if ioctl_type == SYNC_IOC_MAGIC
1709            && (ioctl_number == SYNC_IOC_FILE_INFO || ioctl_number == SYNC_IOC_MERGE)
1710        {
1711            let mut sync_points: Vec<SyncPoint> = vec![];
1712            let counter = self.duplicate_handle()?;
1713            sync_points.push(SyncPoint::new(Timeline::Hwc, counter.into()));
1714            let sync_file_name: &[u8; 32] = b"remote counter\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
1715            let sync_file = SyncFile::new(*sync_file_name, SyncFence { sync_points });
1716            return sync_file.ioctl(locked, file, current_task, request, arg);
1717        }
1718
1719        error!(EINVAL)
1720    }
1721}
1722
1723#[cfg(test)]
1724mod test {
1725    use super::*;
1726    use crate::mm::PAGE_SIZE;
1727    use crate::testing::*;
1728    use crate::vfs::buffers::{VecInputBuffer, VecOutputBuffer};
1729    use crate::vfs::socket::{SocketFile, SocketMessageFlags};
1730    use crate::vfs::{EpollFileObject, LookupContext, Namespace, SymlinkMode, TimeUpdateType};
1731    use assert_matches::assert_matches;
1732    use fidl_fuchsia_io as fio;
1733    use flyweights::FlyByteStr;
1734    use fxfs_testing::{TestFixture, TestFixtureOptions};
1735    use starnix_uapi::auth::Credentials;
1736    use starnix_uapi::errors::EINVAL;
1737    use starnix_uapi::file_mode::{AccessCheck, mode};
1738    use starnix_uapi::open_flags::OpenFlags;
1739    use starnix_uapi::vfs::{EpollEvent, FdEvents};
1740    use zx::HandleBased;
1741
1742    #[::fuchsia::test]
1743    async fn test_remote_uds() {
1744        spawn_kernel_and_run(async |locked, current_task| {
1745            let (s1, s2) = zx::Socket::create_datagram();
1746            s2.write(&vec![0]).expect("write");
1747            let file = new_remote_file(locked, &current_task, s1.into(), OpenFlags::RDWR)
1748                .expect("new_remote_file");
1749            assert!(file.node().is_sock());
1750            let socket_ops = file.downcast_file::<SocketFile>().unwrap();
1751            let flags = SocketMessageFlags::CTRUNC
1752                | SocketMessageFlags::TRUNC
1753                | SocketMessageFlags::NOSIGNAL
1754                | SocketMessageFlags::CMSG_CLOEXEC;
1755            let mut buffer = VecOutputBuffer::new(1024);
1756            let info = socket_ops
1757                .recvmsg(locked, &current_task, &file, &mut buffer, flags, None)
1758                .expect("recvmsg");
1759            assert!(info.ancillary_data.is_empty());
1760            assert_eq!(info.message_length, 1);
1761        })
1762        .await;
1763    }
1764
1765    #[::fuchsia::test]
1766    async fn test_tree() {
1767        spawn_kernel_and_run(async |locked, current_task| {
1768            let kernel = current_task.kernel();
1769            let rights = fio::PERM_READABLE | fio::PERM_EXECUTABLE;
1770            let (server, client) = zx::Channel::create();
1771            fdio::open("/pkg", rights, server).expect("failed to open /pkg");
1772            let fs = RemoteFs::new_fs(
1773                locked,
1774                &kernel,
1775                client,
1776                FileSystemOptions { source: FlyByteStr::new(b"/pkg"), ..Default::default() },
1777                rights,
1778            )
1779            .unwrap();
1780            let ns = Namespace::new(fs);
1781            let root = ns.root();
1782            let mut context = LookupContext::default();
1783            assert_eq!(
1784                root.lookup_child(locked, &current_task, &mut context, "nib".into()).err(),
1785                Some(errno!(ENOENT))
1786            );
1787            let mut context = LookupContext::default();
1788            root.lookup_child(locked, &current_task, &mut context, "lib".into()).unwrap();
1789
1790            let mut context = LookupContext::default();
1791            let _test_file = root
1792                .lookup_child(
1793                    locked,
1794                    &current_task,
1795                    &mut context,
1796                    "data/tests/hello_starnix".into(),
1797                )
1798                .unwrap()
1799                .open(locked, &current_task, OpenFlags::RDONLY, AccessCheck::default())
1800                .unwrap();
1801        })
1802        .await;
1803    }
1804
1805    #[::fuchsia::test]
1806    async fn test_blocking_io() {
1807        spawn_kernel_and_run(async |locked, current_task| {
1808            let (client, server) = zx::Socket::create_stream();
1809            let pipe = create_fuchsia_pipe(locked, &current_task, client, OpenFlags::RDWR).unwrap();
1810
1811            let bytes = [0u8; 64];
1812            assert_eq!(bytes.len(), server.write(&bytes).unwrap());
1813
1814            // Spawn a kthread to get the right lock context.
1815            let bytes_read =
1816                pipe.read(locked, &current_task, &mut VecOutputBuffer::new(64)).unwrap();
1817
1818            assert_eq!(bytes_read, bytes.len());
1819        })
1820        .await;
1821    }
1822
1823    #[::fuchsia::test]
1824    async fn test_poll() {
1825        spawn_kernel_and_run(async |locked, current_task| {
1826            let (client, server) = zx::Socket::create_stream();
1827            let pipe = create_fuchsia_pipe(locked, &current_task, client, OpenFlags::RDWR)
1828                .expect("create_fuchsia_pipe");
1829            let server_zxio = Zxio::create(server.into_handle()).expect("Zxio::create");
1830
1831            assert_eq!(
1832                pipe.query_events(locked, &current_task),
1833                Ok(FdEvents::POLLOUT | FdEvents::POLLWRNORM)
1834            );
1835
1836            let epoll_object = EpollFileObject::new_file(locked, &current_task);
1837            let epoll_file = epoll_object.downcast_file::<EpollFileObject>().unwrap();
1838            let event = EpollEvent::new(FdEvents::POLLIN, 0);
1839            epoll_file
1840                .add(locked, &current_task, &pipe, &epoll_object, event)
1841                .expect("poll_file.add");
1842
1843            let fds = epoll_file
1844                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1845                .expect("wait");
1846            assert!(fds.is_empty());
1847
1848            assert_eq!(server_zxio.write(&[0]).expect("write"), 1);
1849
1850            assert_eq!(
1851                pipe.query_events(locked, &current_task),
1852                Ok(FdEvents::POLLOUT
1853                    | FdEvents::POLLWRNORM
1854                    | FdEvents::POLLIN
1855                    | FdEvents::POLLRDNORM)
1856            );
1857            let fds = epoll_file
1858                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1859                .expect("wait");
1860            assert_eq!(fds.len(), 1);
1861
1862            assert_eq!(
1863                pipe.read(locked, &current_task, &mut VecOutputBuffer::new(64)).expect("read"),
1864                1
1865            );
1866
1867            assert_eq!(
1868                pipe.query_events(locked, &current_task),
1869                Ok(FdEvents::POLLOUT | FdEvents::POLLWRNORM)
1870            );
1871            let fds = epoll_file
1872                .wait(locked, &current_task, 1, zx::MonotonicInstant::ZERO)
1873                .expect("wait");
1874            assert!(fds.is_empty());
1875        })
1876        .await;
1877    }
1878
1879    #[::fuchsia::test]
1880    async fn test_new_remote_directory() {
1881        spawn_kernel_and_run(async |locked, current_task| {
1882            let (server, client) = zx::Channel::create();
1883            fdio::open("/pkg", fio::PERM_READABLE | fio::PERM_EXECUTABLE, server)
1884                .expect("failed to open /pkg");
1885
1886            let fd = new_remote_file(locked, &current_task, client.into(), OpenFlags::RDWR)
1887                .expect("new_remote_file");
1888            assert!(fd.node().is_dir());
1889            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1890        })
1891        .await;
1892    }
1893
1894    #[::fuchsia::test]
1895    async fn test_new_remote_file() {
1896        spawn_kernel_and_run(async |locked, current_task| {
1897            let (server, client) = zx::Channel::create();
1898            fdio::open("/pkg/meta/contents", fio::PERM_READABLE, server)
1899                .expect("failed to open /pkg/meta/contents");
1900
1901            let fd = new_remote_file(locked, &current_task, client.into(), OpenFlags::RDONLY)
1902                .expect("new_remote_file");
1903            assert!(!fd.node().is_dir());
1904            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1905        })
1906        .await;
1907    }
1908
1909    #[::fuchsia::test]
1910    async fn test_new_remote_counter() {
1911        spawn_kernel_and_run(async |locked, current_task| {
1912            let counter = zx::Counter::create();
1913
1914            let fd = new_remote_file(locked, &current_task, counter.into(), OpenFlags::RDONLY)
1915                .expect("new_remote_file");
1916            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1917        })
1918        .await;
1919    }
1920
1921    #[::fuchsia::test]
1922    async fn test_new_remote_vmo() {
1923        spawn_kernel_and_run(async |locked, current_task| {
1924            let vmo = zx::Vmo::create(*PAGE_SIZE).expect("Vmo::create");
1925            let fd = new_remote_file(locked, &current_task, vmo.into(), OpenFlags::RDWR)
1926                .expect("new_remote_file");
1927            assert!(!fd.node().is_dir());
1928            assert!(fd.to_handle(&current_task).expect("to_handle").is_some());
1929        })
1930        .await;
1931    }
1932
1933    #[::fuchsia::test(threads = 2)]
1934    async fn test_symlink() {
1935        let fixture = TestFixture::new().await;
1936        let (server, client) = zx::Channel::create();
1937        fixture.root().clone(server.into()).expect("clone failed");
1938
1939        const LINK_PATH: &'static str = "symlink";
1940        const LINK_TARGET: &'static str = "私は「UTF8」です";
1941        // We expect the reported size of the symlink to be the length of the target, in bytes,
1942        // *without* a null terminator. Most Linux systems assume UTF-8 encoding.
1943        const LINK_SIZE: usize = 22;
1944        assert_eq!(LINK_SIZE, LINK_TARGET.len());
1945
1946        spawn_kernel_and_run(async move |locked, current_task| {
1947            let kernel = current_task.kernel();
1948            let fs = RemoteFs::new_fs(
1949                locked,
1950                &kernel,
1951                client,
1952                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
1953                fio::PERM_READABLE | fio::PERM_WRITABLE,
1954            )
1955            .expect("new_fs failed");
1956            let ns = Namespace::new(fs);
1957            let root = ns.root();
1958            let symlink_node = root
1959                .create_symlink(locked, &current_task, LINK_PATH.into(), LINK_TARGET.into())
1960                .expect("symlink failed");
1961            assert_matches!(&*symlink_node.entry.node.info(), FsNodeInfo { size: LINK_SIZE, .. });
1962
1963            let mut context = LookupContext::new(SymlinkMode::NoFollow);
1964            let child = root
1965                .lookup_child(locked, &current_task, &mut context, "symlink".into())
1966                .expect("lookup_child failed");
1967
1968            match child.readlink(locked, &current_task).expect("readlink failed") {
1969                SymlinkTarget::Path(path) => assert_eq!(path, LINK_TARGET),
1970                SymlinkTarget::Node(_) => panic!("readlink returned SymlinkTarget::Node"),
1971            }
1972            // Ensure the size stat reports matches what is expected.
1973            let stat_result = child.entry.node.stat(locked, &current_task).expect("stat failed");
1974            assert_eq!(stat_result.st_size as usize, LINK_SIZE);
1975        })
1976        .await;
1977
1978        // Simulate a second run to ensure the symlink was persisted correctly.
1979        let fixture = TestFixture::open(
1980            fixture.close().await,
1981            TestFixtureOptions { format: false, ..Default::default() },
1982        )
1983        .await;
1984        let (server, client) = zx::Channel::create();
1985        fixture.root().clone(server.into()).expect("clone failed after remount");
1986
1987        spawn_kernel_and_run(async move |locked, current_task| {
1988            let kernel = current_task.kernel();
1989            let fs = RemoteFs::new_fs(
1990                locked,
1991                &kernel,
1992                client,
1993                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
1994                fio::PERM_READABLE | fio::PERM_WRITABLE,
1995            )
1996            .expect("new_fs failed after remount");
1997            let ns = Namespace::new(fs);
1998            let root = ns.root();
1999            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2000            let child = root
2001                .lookup_child(locked, &current_task, &mut context, "symlink".into())
2002                .expect("lookup_child failed after remount");
2003
2004            match child.readlink(locked, &current_task).expect("readlink failed after remount") {
2005                SymlinkTarget::Path(path) => assert_eq!(path, LINK_TARGET),
2006                SymlinkTarget::Node(_) => {
2007                    panic!("readlink returned SymlinkTarget::Node after remount")
2008                }
2009            }
2010            // Ensure the size stat reports matches what is expected.
2011            let stat_result =
2012                child.entry.node.stat(locked, &current_task).expect("stat failed after remount");
2013            assert_eq!(stat_result.st_size as usize, LINK_SIZE);
2014        })
2015        .await;
2016
2017        fixture.close().await;
2018    }
2019
2020    #[::fuchsia::test]
2021    async fn test_mode_uid_gid_and_dev_persists() {
2022        const FILE_MODE: FileMode = mode!(IFREG, 0o467);
2023        const DIR_MODE: FileMode = mode!(IFDIR, 0o647);
2024        const BLK_MODE: FileMode = mode!(IFBLK, 0o746);
2025
2026        let fixture = TestFixture::new().await;
2027        let (server, client) = zx::Channel::create();
2028        fixture.root().clone(server.into()).expect("clone failed");
2029
2030        // Simulate a first run of starnix.
2031        spawn_kernel_and_run(async move |locked, current_task| {
2032            let kernel = current_task.kernel();
2033            let creds = Credentials::clone(&current_task.current_creds());
2034            current_task.set_creds(Credentials { euid: 1, fsuid: 1, egid: 2, fsgid: 2, ..creds });
2035            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2036            let fs = RemoteFs::new_fs(
2037                locked,
2038                &kernel,
2039                client,
2040                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2041                rights,
2042            )
2043            .expect("new_fs failed");
2044            let ns = Namespace::new(fs);
2045            current_task.fs().set_umask(FileMode::from_bits(0));
2046            ns.root()
2047                .create_node(locked, &current_task, "file".into(), FILE_MODE, DeviceType::NONE)
2048                .expect("create_node failed");
2049            ns.root()
2050                .create_node(locked, &current_task, "dir".into(), DIR_MODE, DeviceType::NONE)
2051                .expect("create_node failed");
2052            ns.root()
2053                .create_node(locked, &current_task, "dev".into(), BLK_MODE, DeviceType::RANDOM)
2054                .expect("create_node failed");
2055        })
2056        .await;
2057
2058        // Simulate a second run.
2059        let fixture = TestFixture::open(
2060            fixture.close().await,
2061            TestFixtureOptions { format: false, ..Default::default() },
2062        )
2063        .await;
2064
2065        let (server, client) = zx::Channel::create();
2066        fixture.root().clone(server.into()).expect("clone failed");
2067
2068        spawn_kernel_and_run(async move |locked, current_task| {
2069            let kernel = current_task.kernel();
2070            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2071            let fs = RemoteFs::new_fs(
2072                locked,
2073                &kernel,
2074                client,
2075                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2076                rights,
2077            )
2078            .expect("new_fs failed");
2079            let ns = Namespace::new(fs);
2080            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2081            let child = ns
2082                .root()
2083                .lookup_child(locked, &current_task, &mut context, "file".into())
2084                .expect("lookup_child failed");
2085            assert_matches!(
2086                &*child.entry.node.info(),
2087                FsNodeInfo { mode: FILE_MODE, uid: 1, gid: 2, rdev: DeviceType::NONE, .. }
2088            );
2089            let child = ns
2090                .root()
2091                .lookup_child(locked, &current_task, &mut context, "dir".into())
2092                .expect("lookup_child failed");
2093            assert_matches!(
2094                &*child.entry.node.info(),
2095                FsNodeInfo { mode: DIR_MODE, uid: 1, gid: 2, rdev: DeviceType::NONE, .. }
2096            );
2097            let child = ns
2098                .root()
2099                .lookup_child(locked, &current_task, &mut context, "dev".into())
2100                .expect("lookup_child failed");
2101            assert_matches!(
2102                &*child.entry.node.info(),
2103                FsNodeInfo { mode: BLK_MODE, uid: 1, gid: 2, rdev: DeviceType::RANDOM, .. }
2104            );
2105        })
2106        .await;
2107        fixture.close().await;
2108    }
2109
2110    #[::fuchsia::test]
2111    async fn test_dot_dot_inode_numbers() {
2112        let fixture = TestFixture::new().await;
2113        let (server, client) = zx::Channel::create();
2114        fixture.root().clone(server.into()).expect("clone failed");
2115
2116        const MODE: FileMode = FileMode::from_bits(FileMode::IFDIR.bits() | 0o777);
2117
2118        spawn_kernel_and_run(async |locked, current_task| {
2119            let kernel = current_task.kernel();
2120            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2121            let fs = RemoteFs::new_fs(
2122                locked,
2123                &kernel,
2124                client,
2125                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2126                rights,
2127            )
2128            .expect("new_fs failed");
2129            let ns = Namespace::new(fs);
2130            current_task.fs().set_umask(FileMode::from_bits(0));
2131            let sub_dir1 = ns
2132                .root()
2133                .create_node(locked, &current_task, "dir".into(), MODE, DeviceType::NONE)
2134                .expect("create_node failed");
2135            let sub_dir2 = sub_dir1
2136                .create_node(locked, &current_task, "dir".into(), MODE, DeviceType::NONE)
2137                .expect("create_node failed");
2138
2139            let dir_handle = ns
2140                .root()
2141                .entry
2142                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2143                .expect("open failed");
2144
2145            #[derive(Default)]
2146            struct Sink {
2147                offset: off_t,
2148                dot_dot_inode_num: u64,
2149            }
2150            impl DirentSink for Sink {
2151                fn add(
2152                    &mut self,
2153                    inode_num: ino_t,
2154                    offset: off_t,
2155                    entry_type: DirectoryEntryType,
2156                    name: &FsStr,
2157                ) -> Result<(), Errno> {
2158                    if name == ".." {
2159                        self.dot_dot_inode_num = inode_num;
2160                        assert_eq!(entry_type, DirectoryEntryType::DIR);
2161                    }
2162                    self.offset = offset;
2163                    Ok(())
2164                }
2165                fn offset(&self) -> off_t {
2166                    self.offset
2167                }
2168            }
2169            let mut sink = Sink::default();
2170            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2171
2172            // inode_num for .. for the root should be the same as root.
2173            assert_eq!(sink.dot_dot_inode_num, ns.root().entry.node.ino);
2174
2175            let dir_handle = sub_dir1
2176                .entry
2177                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2178                .expect("open failed");
2179            let mut sink = Sink::default();
2180            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2181
2182            // inode_num for .. for the first sub directory should be the same as root.
2183            assert_eq!(sink.dot_dot_inode_num, ns.root().entry.node.ino);
2184
2185            let dir_handle = sub_dir2
2186                .entry
2187                .open_anonymous(locked, &current_task, OpenFlags::RDONLY)
2188                .expect("open failed");
2189            let mut sink = Sink::default();
2190            dir_handle.readdir(locked, &current_task, &mut sink).expect("readdir failed");
2191
2192            // inode_num for .. for the second subdir should be the first subdir.
2193            assert_eq!(sink.dot_dot_inode_num, sub_dir1.entry.node.ino);
2194        })
2195        .await;
2196        fixture.close().await;
2197    }
2198
2199    #[::fuchsia::test]
2200    async fn test_remote_special_node() {
2201        let fixture = TestFixture::new().await;
2202        let (server, client) = zx::Channel::create();
2203        fixture.root().clone(server.into()).expect("clone failed");
2204
2205        const FIFO_MODE: FileMode = FileMode::from_bits(FileMode::IFIFO.bits() | 0o777);
2206        const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2207
2208        spawn_kernel_and_run(async |locked, current_task| {
2209            let kernel = current_task.kernel();
2210            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2211            let fs = RemoteFs::new_fs(
2212                locked,
2213                &kernel,
2214                client,
2215                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2216                rights,
2217            )
2218            .expect("new_fs failed");
2219            let ns = Namespace::new(fs);
2220            current_task.fs().set_umask(FileMode::from_bits(0));
2221            let root = ns.root();
2222
2223            // Create RemoteSpecialNode (e.g. FIFO)
2224            root.create_node(locked, &current_task, "fifo".into(), FIFO_MODE, DeviceType::NONE)
2225                .expect("create_node failed");
2226            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2227            let fifo_node = root
2228                .lookup_child(locked, &current_task, &mut context, "fifo".into())
2229                .expect("lookup_child failed");
2230
2231            // Test that we get expected behaviour for RemoteSpecialNode operation, e.g.
2232            // test that truncate should return EINVAL
2233            match fifo_node.truncate(locked, &current_task, 0) {
2234                Ok(_) => {
2235                    panic!("truncate passed for special node")
2236                }
2237                Err(errno) if errno == EINVAL => {}
2238                Err(e) => {
2239                    panic!("truncate failed with error {:?}", e)
2240                }
2241            };
2242
2243            // Create regular RemoteNode
2244            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2245                .expect("create_node failed");
2246            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2247            let reg_node = root
2248                .lookup_child(locked, &current_task, &mut context, "file".into())
2249                .expect("lookup_child failed");
2250
2251            // We should be able to perform truncate on regular files
2252            reg_node.truncate(locked, &current_task, 0).expect("truncate failed");
2253        })
2254        .await;
2255        fixture.close().await;
2256    }
2257
2258    #[::fuchsia::test]
2259    async fn test_hard_link() {
2260        let fixture = TestFixture::new().await;
2261        let (server, client) = zx::Channel::create();
2262        fixture.root().clone(server.into()).expect("clone failed");
2263
2264        spawn_kernel_and_run(async move |locked, current_task| {
2265            let kernel = current_task.kernel();
2266            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2267            let fs = RemoteFs::new_fs(
2268                locked,
2269                &kernel,
2270                client,
2271                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2272                rights,
2273            )
2274            .expect("new_fs failed");
2275            let ns = Namespace::new(fs);
2276            current_task.fs().set_umask(FileMode::from_bits(0));
2277            let node = ns
2278                .root()
2279                .create_node(
2280                    locked,
2281                    &current_task,
2282                    "file1".into(),
2283                    mode!(IFREG, 0o666),
2284                    DeviceType::NONE,
2285                )
2286                .expect("create_node failed");
2287            ns.root()
2288                .entry
2289                .node
2290                .link(locked, &current_task, &ns.root().mount, "file2".into(), &node.entry.node)
2291                .expect("link failed");
2292        })
2293        .await;
2294
2295        let fixture = TestFixture::open(
2296            fixture.close().await,
2297            TestFixtureOptions { format: false, ..Default::default() },
2298        )
2299        .await;
2300
2301        let (server, client) = zx::Channel::create();
2302        fixture.root().clone(server.into()).expect("clone failed");
2303
2304        spawn_kernel_and_run(async move |locked, current_task| {
2305            let kernel = current_task.kernel();
2306            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2307            let fs = RemoteFs::new_fs(
2308                locked,
2309                &kernel,
2310                client,
2311                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2312                rights,
2313            )
2314            .expect("new_fs failed");
2315            let ns = Namespace::new(fs);
2316            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2317            let child1 = ns
2318                .root()
2319                .lookup_child(locked, &current_task, &mut context, "file1".into())
2320                .expect("lookup_child failed");
2321            let child2 = ns
2322                .root()
2323                .lookup_child(locked, &current_task, &mut context, "file2".into())
2324                .expect("lookup_child failed");
2325            assert!(Arc::ptr_eq(&child1.entry.node, &child2.entry.node));
2326        })
2327        .await;
2328        fixture.close().await;
2329    }
2330
2331    #[::fuchsia::test]
2332    async fn test_lookup_on_fsverity_enabled_file() {
2333        let fixture = TestFixture::new().await;
2334        let (server, client) = zx::Channel::create();
2335        fixture.root().clone(server.into()).expect("clone failed");
2336
2337        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2338
2339        spawn_kernel_and_run(async move |locked, current_task| {
2340            let kernel = current_task.kernel();
2341            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2342            let fs = RemoteFs::new_fs(
2343                locked,
2344                &kernel,
2345                client,
2346                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2347                rights,
2348            )
2349            .expect("new_fs failed");
2350            let ns = Namespace::new(fs);
2351            current_task.fs().set_umask(FileMode::from_bits(0));
2352            let file = ns
2353                .root()
2354                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2355                .expect("create_node failed");
2356            // Enable verity on the file.
2357            let desc = fsverity_descriptor {
2358                version: 1,
2359                hash_algorithm: 1,
2360                salt_size: 32,
2361                log_blocksize: 12,
2362                ..Default::default()
2363            };
2364            file.entry.node.ops().enable_fsverity(&desc).expect("enable fsverity failed");
2365        })
2366        .await;
2367
2368        // Tear down the kernel and open the file again. The file should no longer be cached.
2369        // Test that lookup works as expected for an fsverity-enabled file.
2370        let fixture = TestFixture::open(
2371            fixture.close().await,
2372            TestFixtureOptions { format: false, ..Default::default() },
2373        )
2374        .await;
2375        let (server, client) = zx::Channel::create();
2376        fixture.root().clone(server.into()).expect("clone failed");
2377
2378        spawn_kernel_and_run(async move |locked, current_task| {
2379            let kernel = current_task.kernel();
2380            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2381            let fs = RemoteFs::new_fs(
2382                locked,
2383                &kernel,
2384                client,
2385                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2386                rights,
2387            )
2388            .expect("new_fs failed");
2389            let ns = Namespace::new(fs);
2390            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2391            let _child = ns
2392                .root()
2393                .lookup_child(locked, &current_task, &mut context, "file".into())
2394                .expect("lookup_child failed");
2395        })
2396        .await;
2397        fixture.close().await;
2398    }
2399
2400    #[::fuchsia::test]
2401    async fn test_update_attributes_persists() {
2402        let fixture = TestFixture::new().await;
2403        let (server, client) = zx::Channel::create();
2404        fixture.root().clone(server.into()).expect("clone failed");
2405
2406        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2407
2408        spawn_kernel_and_run(async move |locked, current_task| {
2409            let kernel = current_task.kernel();
2410            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2411            let fs = RemoteFs::new_fs(
2412                locked,
2413                &kernel,
2414                client,
2415                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2416                rights,
2417            )
2418            .expect("new_fs failed");
2419            let ns = Namespace::new(fs);
2420            current_task.fs().set_umask(FileMode::from_bits(0));
2421            let file = ns
2422                .root()
2423                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2424                .expect("create_node failed");
2425            // Change the mode, this change should persist
2426            file.entry
2427                .node
2428                .chmod(locked, &current_task, &file.mount, MODE | FileMode::ALLOW_ALL)
2429                .expect("chmod failed");
2430        })
2431        .await;
2432
2433        // Tear down the kernel and open the file again. Check that changes persisted.
2434        let fixture = TestFixture::open(
2435            fixture.close().await,
2436            TestFixtureOptions { format: false, ..Default::default() },
2437        )
2438        .await;
2439        let (server, client) = zx::Channel::create();
2440        fixture.root().clone(server.into()).expect("clone failed");
2441
2442        spawn_kernel_and_run(async move |locked, current_task| {
2443            let kernel = current_task.kernel();
2444            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2445            let fs = RemoteFs::new_fs(
2446                locked,
2447                &kernel,
2448                client,
2449                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2450                rights,
2451            )
2452            .expect("new_fs failed");
2453            let ns = Namespace::new(fs);
2454            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2455            let child = ns
2456                .root()
2457                .lookup_child(locked, &current_task, &mut context, "file".into())
2458                .expect("lookup_child failed");
2459            assert_eq!(child.entry.node.info().mode, MODE | FileMode::ALLOW_ALL);
2460        })
2461        .await;
2462        fixture.close().await;
2463    }
2464
2465    #[::fuchsia::test]
2466    async fn test_statfs() {
2467        let fixture = TestFixture::new().await;
2468        let (server, client) = zx::Channel::create();
2469        fixture.root().clone(server.into()).expect("clone failed");
2470
2471        spawn_kernel_and_run(async move |locked, current_task| {
2472            let kernel = current_task.kernel();
2473            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2474            let fs = RemoteFs::new_fs(
2475                locked,
2476                &kernel,
2477                client,
2478                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2479                rights,
2480            )
2481            .expect("new_fs failed");
2482
2483            let statfs = fs.statfs(locked, &current_task).expect("statfs failed");
2484            assert!(statfs.f_type != 0);
2485            assert!(statfs.f_bsize > 0);
2486            assert!(statfs.f_blocks > 0);
2487            assert!(statfs.f_bfree > 0 && statfs.f_bfree <= statfs.f_blocks);
2488            assert!(statfs.f_files > 0);
2489            assert!(statfs.f_ffree > 0 && statfs.f_ffree <= statfs.f_files);
2490            assert!(statfs.f_fsid.val[0] != 0 || statfs.f_fsid.val[1] != 0);
2491            assert!(statfs.f_namelen > 0);
2492            assert!(statfs.f_frsize > 0);
2493        })
2494        .await;
2495
2496        fixture.close().await;
2497    }
2498
2499    #[::fuchsia::test]
2500    async fn test_allocate() {
2501        let fixture = TestFixture::new().await;
2502        let (server, client) = zx::Channel::create();
2503        fixture.root().clone(server.into()).expect("clone failed");
2504
2505        spawn_kernel_and_run(async move |locked, current_task| {
2506            let kernel = current_task.kernel();
2507            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2508            let fs = RemoteFs::new_fs(
2509                locked,
2510                &kernel,
2511                client,
2512                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2513                rights,
2514            )
2515            .expect("new_fs failed");
2516            let ns = Namespace::new(fs);
2517            current_task.fs().set_umask(FileMode::from_bits(0));
2518            let root = ns.root();
2519
2520            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2521            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2522                .expect("create_node failed");
2523            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2524            let reg_node = root
2525                .lookup_child(locked, &current_task, &mut context, "file".into())
2526                .expect("lookup_child failed");
2527
2528            reg_node
2529                .entry
2530                .node
2531                .fallocate(locked, &current_task, FallocMode::Allocate { keep_size: false }, 0, 20)
2532                .expect("truncate failed");
2533        })
2534        .await;
2535        fixture.close().await;
2536    }
2537
2538    #[::fuchsia::test]
2539    async fn test_allocate_overflow() {
2540        let fixture = TestFixture::new().await;
2541        let (server, client) = zx::Channel::create();
2542        fixture.root().clone(server.into()).expect("clone failed");
2543
2544        spawn_kernel_and_run(async move |locked, current_task| {
2545            let kernel = current_task.kernel();
2546            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2547            let fs = RemoteFs::new_fs(
2548                locked,
2549                &kernel,
2550                client,
2551                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2552                rights,
2553            )
2554            .expect("new_fs failed");
2555            let ns = Namespace::new(fs);
2556            current_task.fs().set_umask(FileMode::from_bits(0));
2557            let root = ns.root();
2558
2559            const REG_MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits());
2560            root.create_node(locked, &current_task, "file".into(), REG_MODE, DeviceType::NONE)
2561                .expect("create_node failed");
2562            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2563            let reg_node = root
2564                .lookup_child(locked, &current_task, &mut context, "file".into())
2565                .expect("lookup_child failed");
2566
2567            reg_node
2568                .entry
2569                .node
2570                .fallocate(
2571                    locked,
2572                    &current_task,
2573                    FallocMode::Allocate { keep_size: false },
2574                    1,
2575                    u64::MAX,
2576                )
2577                .expect_err("truncate unexpectedly passed");
2578        })
2579        .await;
2580        fixture.close().await;
2581    }
2582
2583    #[::fuchsia::test]
2584    async fn test_time_modify_persists() {
2585        let fixture = TestFixture::new().await;
2586        let (server, client) = zx::Channel::create();
2587        fixture.root().clone(server.into()).expect("clone failed");
2588
2589        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2590
2591        let last_modified = spawn_kernel_and_run(async move |locked, current_task| {
2592            let kernel = current_task.kernel();
2593            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2594            let fs = RemoteFs::new_fs(
2595                locked,
2596                &kernel,
2597                client,
2598                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2599                rights,
2600            )
2601            .expect("new_fs failed");
2602            let ns: Arc<Namespace> = Namespace::new(fs);
2603            current_task.fs().set_umask(FileMode::from_bits(0));
2604            let child = ns
2605                .root()
2606                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2607                .expect("create_node failed");
2608            // Write to file (this should update mtime (time_modify))
2609            let file = child
2610                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2611                .expect("open failed");
2612            // Call `fetch_and_refresh_info(..)` to refresh `time_modify` with the time managed by the
2613            // underlying filesystem
2614            let time_before_write = child
2615                .entry
2616                .node
2617                .fetch_and_refresh_info(locked, &current_task)
2618                .expect("fetch_and_refresh_info failed")
2619                .time_modify;
2620            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
2621            let written = file
2622                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
2623                .expect("write failed");
2624            assert_eq!(written, write_bytes.len());
2625            let last_modified = child
2626                .entry
2627                .node
2628                .fetch_and_refresh_info(locked, &current_task)
2629                .expect("fetch_and_refresh_info failed")
2630                .time_modify;
2631            assert!(last_modified > time_before_write);
2632            last_modified
2633        })
2634        .await;
2635
2636        // Tear down the kernel and open the file again. Check that modification time is when we
2637        // last modified the contents of the file
2638        let fixture = TestFixture::open(
2639            fixture.close().await,
2640            TestFixtureOptions { format: false, ..Default::default() },
2641        )
2642        .await;
2643        let (server, client) = zx::Channel::create();
2644        fixture.root().clone(server.into()).expect("clone failed");
2645        let refreshed_modified_time = spawn_kernel_and_run(async move |locked, current_task| {
2646            let kernel = current_task.kernel();
2647            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2648            let fs = RemoteFs::new_fs(
2649                locked,
2650                &kernel,
2651                client,
2652                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2653                rights,
2654            )
2655            .expect("new_fs failed");
2656            let ns = Namespace::new(fs);
2657            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2658            let child = ns
2659                .root()
2660                .lookup_child(locked, &current_task, &mut context, "file".into())
2661                .expect("lookup_child failed");
2662            let last_modified = child
2663                .entry
2664                .node
2665                .fetch_and_refresh_info(locked, &current_task)
2666                .expect("fetch_and_refresh_info failed")
2667                .time_modify;
2668            last_modified
2669        })
2670        .await;
2671        assert_eq!(last_modified, refreshed_modified_time);
2672
2673        fixture.close().await;
2674    }
2675
2676    #[::fuchsia::test]
2677    async fn test_update_atime_mtime() {
2678        let fixture = TestFixture::new().await;
2679        let (server, client) = zx::Channel::create();
2680        fixture.root().clone(server.into()).expect("clone failed");
2681
2682        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2683
2684        spawn_kernel_and_run(async move |locked, current_task| {
2685            let kernel = current_task.kernel();
2686            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2687            let fs = RemoteFs::new_fs(
2688                locked,
2689                &kernel,
2690                client,
2691                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2692                rights,
2693            )
2694            .expect("new_fs failed");
2695            let ns: Arc<Namespace> = Namespace::new(fs);
2696            current_task.fs().set_umask(FileMode::from_bits(0));
2697            let child = ns
2698                .root()
2699                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2700                .expect("create_node failed");
2701
2702            let info_original = child
2703                .entry
2704                .node
2705                .fetch_and_refresh_info(locked, &current_task)
2706                .expect("fetch_and_refresh_info failed")
2707                .clone();
2708
2709            child
2710                .entry
2711                .node
2712                .update_atime_mtime(
2713                    locked,
2714                    &current_task,
2715                    &child.mount,
2716                    TimeUpdateType::Time(UtcInstant::from_nanos(30)),
2717                    TimeUpdateType::Omit,
2718                )
2719                .expect("update_atime_mtime failed");
2720            let info_after_update = child
2721                .entry
2722                .node
2723                .fetch_and_refresh_info(locked, &current_task)
2724                .expect("fetch_and_refresh_info failed")
2725                .clone();
2726            assert_eq!(info_after_update.time_modify, info_original.time_modify);
2727            assert_eq!(info_after_update.time_access, UtcInstant::from_nanos(30));
2728
2729            child
2730                .entry
2731                .node
2732                .update_atime_mtime(
2733                    locked,
2734                    &current_task,
2735                    &child.mount,
2736                    TimeUpdateType::Omit,
2737                    TimeUpdateType::Time(UtcInstant::from_nanos(50)),
2738                )
2739                .expect("update_atime_mtime failed");
2740            let info_after_update2 = child
2741                .entry
2742                .node
2743                .fetch_and_refresh_info(locked, &current_task)
2744                .expect("fetch_and_refresh_info failed")
2745                .clone();
2746            assert_eq!(info_after_update2.time_modify, UtcInstant::from_nanos(50));
2747            assert_eq!(info_after_update2.time_access, UtcInstant::from_nanos(30));
2748        })
2749        .await;
2750        fixture.close().await;
2751    }
2752
2753    #[::fuchsia::test]
2754    async fn test_write_updates_mtime_ctime() {
2755        let fixture = TestFixture::new().await;
2756        let (server, client) = zx::Channel::create();
2757        fixture.root().clone(server.into()).expect("clone failed");
2758
2759        const MODE: FileMode = FileMode::from_bits(FileMode::IFREG.bits() | 0o467);
2760
2761        spawn_kernel_and_run(async move |locked, current_task| {
2762            let kernel = current_task.kernel();
2763            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2764            let fs = RemoteFs::new_fs(
2765                locked,
2766                &kernel,
2767                client,
2768                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2769                rights,
2770            )
2771            .expect("new_fs failed");
2772            let ns: Arc<Namespace> = Namespace::new(fs);
2773            current_task.fs().set_umask(FileMode::from_bits(0));
2774            let child = ns
2775                .root()
2776                .create_node(locked, &current_task, "file".into(), MODE, DeviceType::NONE)
2777                .expect("create_node failed");
2778            let file = child
2779                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2780                .expect("open failed");
2781            // Call `fetch_and_refresh_info(..)` to refresh ctime and mtime with the time managed by the
2782            // underlying filesystem
2783            let (ctime_before_write, mtime_before_write) = {
2784                let info = child
2785                    .entry
2786                    .node
2787                    .fetch_and_refresh_info(locked, &current_task)
2788                    .expect("fetch_and_refresh_info failed");
2789                (info.time_status_change, info.time_modify)
2790            };
2791
2792            // Writing to a file should update ctime and mtime
2793            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
2794            let written = file
2795                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
2796                .expect("write failed");
2797            assert_eq!(written, write_bytes.len());
2798
2799            // As Fxfs, the underlying filesystem in this test, can manage file timestamps,
2800            // we should not see an update in mtime and ctime without first refreshing the node with
2801            // the metadata from Fxfs.
2802            let (ctime_after_write_no_refresh, mtime_after_write_no_refresh) = {
2803                let info = child.entry.node.info();
2804                (info.time_status_change, info.time_modify)
2805            };
2806            assert_eq!(ctime_after_write_no_refresh, ctime_before_write);
2807            assert_eq!(mtime_after_write_no_refresh, mtime_before_write);
2808
2809            // Refresh information, we should see `info` with mtime and ctime from the remote
2810            // filesystem (assume this is true if the new timestamp values are greater than the ones
2811            // without the refresh).
2812            let (ctime_after_write_refresh, mtime_after_write_refresh) = {
2813                let info = child
2814                    .entry
2815                    .node
2816                    .fetch_and_refresh_info(locked, &current_task)
2817                    .expect("fetch_and_refresh_info failed");
2818                (info.time_status_change, info.time_modify)
2819            };
2820            assert_eq!(ctime_after_write_refresh, mtime_after_write_refresh);
2821            assert!(ctime_after_write_refresh > ctime_after_write_no_refresh);
2822        })
2823        .await;
2824        fixture.close().await;
2825    }
2826
2827    #[::fuchsia::test]
2828    async fn test_casefold_persists() {
2829        let fixture = TestFixture::new().await;
2830        let (server, client) = zx::Channel::create();
2831        fixture.root().clone(server.into()).expect("clone failed");
2832
2833        spawn_kernel_and_run(async move |locked, current_task| {
2834            let kernel = current_task.kernel();
2835            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2836            let fs = RemoteFs::new_fs(
2837                locked,
2838                &kernel,
2839                client,
2840                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2841                rights,
2842            )
2843            .expect("new_fs failed");
2844            let ns: Arc<Namespace> = Namespace::new(fs);
2845            let child = ns
2846                .root()
2847                .create_node(
2848                    locked,
2849                    &current_task,
2850                    "dir".into(),
2851                    FileMode::ALLOW_ALL.with_type(FileMode::IFDIR),
2852                    DeviceType::NONE,
2853                )
2854                .expect("create_node failed");
2855            child
2856                .entry
2857                .node
2858                .update_attributes(locked, &current_task, |info| {
2859                    info.casefold = true;
2860                    Ok(())
2861                })
2862                .expect("enable casefold")
2863        })
2864        .await;
2865
2866        // Tear down the kernel and open the dir again. Check that casefold is preserved.
2867        let fixture = TestFixture::open(
2868            fixture.close().await,
2869            TestFixtureOptions { format: false, ..Default::default() },
2870        )
2871        .await;
2872        let (server, client) = zx::Channel::create();
2873        fixture.root().clone(server.into()).expect("clone failed");
2874        let casefold = spawn_kernel_and_run(async move |locked, current_task| {
2875            let kernel = current_task.kernel();
2876            let rights = fio::PERM_READABLE | fio::PERM_WRITABLE;
2877            let fs = RemoteFs::new_fs(
2878                locked,
2879                &kernel,
2880                client,
2881                FileSystemOptions { source: FlyByteStr::new(b"/"), ..Default::default() },
2882                rights,
2883            )
2884            .expect("new_fs failed");
2885            let ns = Namespace::new(fs);
2886            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2887            let child = ns
2888                .root()
2889                .lookup_child(locked, &current_task, &mut context, "dir".into())
2890                .expect("lookup_child failed");
2891            let casefold = child
2892                .entry
2893                .node
2894                .fetch_and_refresh_info(locked, &current_task)
2895                .expect("fetch_and_refresh_info failed")
2896                .casefold;
2897            casefold
2898        })
2899        .await;
2900        assert!(casefold);
2901
2902        fixture.close().await;
2903    }
2904
2905    #[::fuchsia::test]
2906    async fn test_update_time_access_persists() {
2907        const TEST_FILE: &str = "test_file";
2908
2909        let fixture = TestFixture::new().await;
2910        let (server, client) = zx::Channel::create();
2911        fixture.root().clone(server.into()).expect("clone failed");
2912        // Set up file.
2913        let info_after_read = spawn_kernel_and_run(async move |locked, current_task| {
2914            let kernel = current_task.kernel();
2915            let fs = RemoteFs::new_fs(
2916                locked,
2917                &kernel,
2918                client,
2919                FileSystemOptions {
2920                    source: FlyByteStr::new(b"/"),
2921                    flags: MountFlags::RELATIME,
2922                    ..Default::default()
2923                },
2924                fio::PERM_READABLE | fio::PERM_WRITABLE,
2925            )
2926            .expect("new_fs failed");
2927            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
2928            let child = ns
2929                .root()
2930                .open_create_node(
2931                    locked,
2932                    &current_task,
2933                    TEST_FILE.into(),
2934                    FileMode::ALLOW_ALL.with_type(FileMode::IFREG),
2935                    DeviceType::NONE,
2936                    OpenFlags::empty(),
2937                )
2938                .expect("create_node failed");
2939
2940            let file_handle = child
2941                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
2942                .expect("open failed");
2943
2944            // Expect atime to be updated as this is the first file access since the
2945            // last file modification or status change.
2946            file_handle
2947                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
2948                .expect("read failed");
2949
2950            // Call `fetch_and_refresh_info` to persist atime update.
2951            let info_after_read = child
2952                .entry
2953                .node
2954                .fetch_and_refresh_info(locked, &current_task)
2955                .expect("fetch_and_refresh_info failed")
2956                .clone();
2957
2958            info_after_read
2959        })
2960        .await;
2961
2962        // Tear down the kernel and open the file again. The file should no longer be cached.
2963        let fixture = TestFixture::open(
2964            fixture.close().await,
2965            TestFixtureOptions { format: false, ..Default::default() },
2966        )
2967        .await;
2968
2969        let (server, client) = zx::Channel::create();
2970        fixture.root().clone(server.into()).expect("clone failed");
2971
2972        spawn_kernel_and_run(async move |locked, current_task| {
2973            let kernel = current_task.kernel();
2974            let fs = RemoteFs::new_fs(
2975                locked,
2976                &kernel,
2977                client,
2978                FileSystemOptions {
2979                    source: FlyByteStr::new(b"/"),
2980                    flags: MountFlags::RELATIME,
2981                    ..Default::default()
2982                },
2983                fio::PERM_READABLE | fio::PERM_WRITABLE,
2984            )
2985            .expect("new_fs failed");
2986            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
2987            let mut context = LookupContext::new(SymlinkMode::NoFollow);
2988            let child = ns
2989                .root()
2990                .lookup_child(locked, &current_task, &mut context, TEST_FILE.into())
2991                .expect("lookup_child failed");
2992
2993            // Get info - this should be refreshed with info that was persisted before
2994            // we tore down the kernel.
2995            let persisted_info = child
2996                .entry
2997                .node
2998                .fetch_and_refresh_info(locked, &current_task)
2999                .expect("fetch_and_refresh_info failed")
3000                .clone();
3001            assert_eq!(info_after_read.time_access, persisted_info.time_access);
3002        })
3003        .await;
3004        fixture.close().await;
3005    }
3006
3007    #[::fuchsia::test]
3008    async fn test_pending_access_time_updates() {
3009        const TEST_FILE: &str = "test_file";
3010
3011        let fixture = TestFixture::new().await;
3012        let (server, client) = zx::Channel::create();
3013        fixture.root().clone(server.into()).expect("clone failed");
3014
3015        spawn_kernel_and_run(async move |locked, current_task| {
3016            let kernel = current_task.kernel.clone();
3017            let fs = RemoteFs::new_fs(
3018                locked,
3019                &kernel,
3020                client,
3021                FileSystemOptions {
3022                    source: FlyByteStr::new(b"/"),
3023                    flags: MountFlags::RELATIME,
3024                    ..Default::default()
3025                },
3026                fio::PERM_READABLE | fio::PERM_WRITABLE,
3027            )
3028            .expect("new_fs failed");
3029
3030            let ns = Namespace::new_with_flags(fs, MountFlags::RELATIME);
3031            let child = ns
3032                .root()
3033                .open_create_node(
3034                    locked,
3035                    &current_task,
3036                    TEST_FILE.into(),
3037                    FileMode::ALLOW_ALL.with_type(FileMode::IFREG),
3038                    DeviceType::NONE,
3039                    OpenFlags::empty(),
3040                )
3041                .expect("create_node failed");
3042
3043            let file_handle = child
3044                .open(locked, &current_task, OpenFlags::RDWR, AccessCheck::default())
3045                .expect("open failed");
3046
3047            // Expect atime to be updated as this is the first file access since the last
3048            // file modification or status change.
3049            file_handle
3050                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3051                .expect("read failed");
3052
3053            let atime_after_first_read = child
3054                .entry
3055                .node
3056                .fetch_and_refresh_info(locked, &current_task)
3057                .expect("fetch_and_refresh_info failed")
3058                .time_access;
3059
3060            // Read again (this read will not trigger a persistent atime update if
3061            // filesystem was mounted with atime)
3062            file_handle
3063                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3064                .expect("read failed");
3065
3066            let atime_after_second_read = child
3067                .entry
3068                .node
3069                .fetch_and_refresh_info(locked, &current_task)
3070                .expect("fetch_and_refresh_info failed")
3071                .time_access;
3072            assert_eq!(atime_after_first_read, atime_after_second_read);
3073
3074            // Do another operation that will update ctime and/or mtime but not atime.
3075            let write_bytes: [u8; 5] = [1, 2, 3, 4, 5];
3076            let _written = file_handle
3077                .write(locked, &current_task, &mut VecInputBuffer::new(&write_bytes))
3078                .expect("write failed");
3079
3080            // Read again (atime should be updated).
3081            file_handle
3082                .read(locked, &current_task, &mut VecOutputBuffer::new(10))
3083                .expect("read failed");
3084
3085            assert!(
3086                atime_after_second_read
3087                    < child
3088                        .entry
3089                        .node
3090                        .fetch_and_refresh_info(locked, &current_task)
3091                        .expect("fetch_and_refresh_info failed")
3092                        .time_access
3093            );
3094        })
3095        .await;
3096        fixture.close().await;
3097    }
3098}