fs_management/
filesystem.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Contains the asynchronous version of [`Filesystem`][`crate::Filesystem`].
6
7use crate::error::{QueryError, ShutdownError};
8use crate::{ComponentType, FSConfig, Options};
9use anyhow::{Context, Error, anyhow, bail, ensure};
10use fidl::endpoints::{ClientEnd, ServerEnd, create_endpoints, create_proxy};
11use fidl_fuchsia_component::{self as fcomponent, RealmMarker};
12use fidl_fuchsia_fs::AdminMarker;
13use fidl_fuchsia_fs_startup::{CheckOptions, CreateOptions, MountOptions, StartupMarker};
14use fidl_fuchsia_hardware_block_volume::VolumeMarker;
15use fuchsia_component_client::{
16    connect_to_named_protocol_at_dir_root, connect_to_protocol, connect_to_protocol_at_dir_root,
17    connect_to_protocol_at_dir_svc, open_childs_exposed_directory,
18};
19use std::sync::Arc;
20use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
21use zx::{self as zx, AsHandleRef as _, Status};
22use {fidl_fuchsia_component_decl as fdecl, fidl_fuchsia_io as fio};
23
24/// Creates new connections to an instance of fuchsia.hardware.block.Block and similar protocols
25/// (Volume, Partition).
26///
27/// NOTE: It is important to understand the difference between `BlockConnector` and the actual
28/// protocols (e.g. a `ClientEnd<BlockMarker>` or `BlockProxy`): `BlockConnector` is used to *create
29/// new connections* to a Block.
30///
31/// It is not possible to directly convert a `ClientEnd<BlockMarker>` (or `BlockProxy`) into a
32/// `BlockConnector`, because Block is not cloneable.  To implement `BlockConnector`, you will need
33/// a way to generate new connections to a Block instance.  A few common implementations are
34/// provided below.
35pub trait BlockConnector: Send + Sync {
36    fn connect_channel_to_volume(&self, server_end: ServerEnd<VolumeMarker>) -> Result<(), Error>;
37    fn connect_volume(&self) -> Result<ClientEnd<VolumeMarker>, Error> {
38        let (client, server) = fidl::endpoints::create_endpoints();
39        self.connect_channel_to_volume(server)?;
40        Ok(client)
41    }
42    fn connect_partition(
43        &self,
44    ) -> Result<ClientEnd<fidl_fuchsia_hardware_block_partition::PartitionMarker>, Error> {
45        self.connect_volume().map(|v| ClientEnd::new(v.into_channel()))
46    }
47    fn connect_block(&self) -> Result<ClientEnd<fidl_fuchsia_hardware_block::BlockMarker>, Error> {
48        self.connect_volume().map(|v| ClientEnd::new(v.into_channel()))
49    }
50}
51
52/// Implements `BlockConnector` via a service dir.  Wraps `connect_to_named_protocol_at_dir_root`.
53#[derive(Clone, Debug)]
54pub struct DirBasedBlockConnector(fio::DirectoryProxy, String);
55
56impl DirBasedBlockConnector {
57    pub fn new(dir: fio::DirectoryProxy, path: String) -> Self {
58        Self(dir, path)
59    }
60
61    pub fn path(&self) -> &str {
62        &self.1
63    }
64}
65
66impl BlockConnector for DirBasedBlockConnector {
67    fn connect_channel_to_volume(&self, server_end: ServerEnd<VolumeMarker>) -> Result<(), Error> {
68        self.0.open(
69            self.path(),
70            fio::Flags::PROTOCOL_SERVICE,
71            &fio::Options::default(),
72            server_end.into_channel(),
73        )?;
74        Ok(())
75    }
76}
77
78impl BlockConnector for fidl_fuchsia_device::ControllerProxy {
79    fn connect_channel_to_volume(&self, server_end: ServerEnd<VolumeMarker>) -> Result<(), Error> {
80        let () = self.connect_to_device_fidl(server_end.into_channel())?;
81        Ok(())
82    }
83}
84
85impl BlockConnector for fidl_fuchsia_storage_partitions::PartitionServiceProxy {
86    fn connect_channel_to_volume(&self, server_end: ServerEnd<VolumeMarker>) -> Result<(), Error> {
87        self.connect_channel_to_volume(server_end)?;
88        Ok(())
89    }
90}
91
92// NB: We have to be specific here; we cannot do a blanket impl for AsRef<T: BlockConnector> because
93// that would conflict with a downstream crate that implements AsRef for a concrete BlockConnector
94// defined here already.
95impl<T: BlockConnector> BlockConnector for Arc<T> {
96    fn connect_channel_to_volume(&self, server_end: ServerEnd<VolumeMarker>) -> Result<(), Error> {
97        self.as_ref().connect_channel_to_volume(server_end)
98    }
99}
100
101impl<F> BlockConnector for F
102where
103    F: Fn(ServerEnd<VolumeMarker>) -> Result<(), Error> + Send + Sync,
104{
105    fn connect_channel_to_volume(&self, server_end: ServerEnd<VolumeMarker>) -> Result<(), Error> {
106        self(server_end)
107    }
108}
109
110/// Asynchronously manages a block device for filesystem operations.
111pub struct Filesystem {
112    /// The filesystem struct keeps the FSConfig in a Box<dyn> instead of holding it directly for
113    /// code size reasons. Using a type parameter instead would make monomorphized versions of the
114    /// Filesystem impl block for each filesystem type, which duplicates several multi-kilobyte
115    /// functions (get_component_exposed_dir and serve in particular) that are otherwise quite
116    /// generic over config. Clients that want to be generic over filesystem type also pay the
117    /// monomorphization cost, with some, like fshost, paying a lot.
118    config: Box<dyn FSConfig>,
119    block_connector: Box<dyn BlockConnector>,
120    component: Option<Arc<DynamicComponentInstance>>,
121}
122
123// Used to disambiguate children in our component collection.
124static COLLECTION_COUNTER: AtomicU64 = AtomicU64::new(0);
125
126impl Filesystem {
127    pub fn config(&self) -> &dyn FSConfig {
128        self.config.as_ref()
129    }
130
131    pub fn into_config(self) -> Box<dyn FSConfig> {
132        self.config
133    }
134
135    /// Creates a new `Filesystem`.
136    pub fn new<B: BlockConnector + 'static, FSC: FSConfig>(
137        block_connector: B,
138        config: FSC,
139    ) -> Self {
140        Self::from_boxed_config(Box::new(block_connector), Box::new(config))
141    }
142
143    /// Creates a new `Filesystem`.
144    pub fn from_boxed_config(
145        block_connector: Box<dyn BlockConnector>,
146        config: Box<dyn FSConfig>,
147    ) -> Self {
148        Self { config, block_connector, component: None }
149    }
150
151    /// If the filesystem is a currently running component, returns its (relative) moniker.
152    pub fn get_component_moniker(&self) -> Option<String> {
153        Some(match self.config.options().component_type {
154            ComponentType::StaticChild => self.config.options().component_name.to_string(),
155            ComponentType::DynamicChild { .. } => {
156                let component = self.component.as_ref()?;
157                format!("{}:{}", component.collection, component.name)
158            }
159        })
160    }
161
162    async fn get_component_exposed_dir(&mut self) -> Result<fio::DirectoryProxy, Error> {
163        let options = self.config.options();
164        let component_name = options.component_name;
165        match options.component_type {
166            ComponentType::StaticChild => open_childs_exposed_directory(component_name, None).await,
167            ComponentType::DynamicChild { collection_name } => {
168                if let Some(component) = &self.component {
169                    return open_childs_exposed_directory(
170                        component.name.clone(),
171                        Some(component.collection.clone()),
172                    )
173                    .await;
174                }
175
176                // We need a unique name, so we pull in the process Koid here since it's possible
177                // for the same binary in a component to be launched multiple times and we don't
178                // want to collide with children created by other processes.
179                let name = format!(
180                    "{}-{}-{}",
181                    component_name,
182                    fuchsia_runtime::process_self().get_koid().unwrap().raw_koid(),
183                    COLLECTION_COUNTER.fetch_add(1, Ordering::Relaxed)
184                );
185
186                let collection_ref = fdecl::CollectionRef { name: collection_name };
187                let child_decls = vec![
188                    fdecl::Child {
189                        name: Some(format!("{}-relative", name)),
190                        url: Some(format!("#meta/{}.cm", component_name)),
191                        startup: Some(fdecl::StartupMode::Lazy),
192                        ..Default::default()
193                    },
194                    fdecl::Child {
195                        name: Some(name),
196                        url: Some(format!(
197                            "fuchsia-boot:///{}#meta/{}.cm",
198                            component_name, component_name
199                        )),
200                        startup: Some(fdecl::StartupMode::Lazy),
201                        ..Default::default()
202                    },
203                ];
204                let realm_proxy = connect_to_protocol::<RealmMarker>()?;
205                for child_decl in child_decls {
206                    // Launch a new component in our collection.
207                    realm_proxy
208                        .create_child(
209                            &collection_ref,
210                            &child_decl,
211                            fcomponent::CreateChildArgs::default(),
212                        )
213                        .await?
214                        .map_err(|e| anyhow!("create_child failed: {:?}", e))?;
215
216                    let component = Arc::new(DynamicComponentInstance {
217                        name: child_decl.name.unwrap(),
218                        collection: collection_ref.name.clone(),
219                        should_not_drop: AtomicBool::new(false),
220                    });
221
222                    if let Ok(proxy) = open_childs_exposed_directory(
223                        component.name.clone(),
224                        Some(component.collection.clone()),
225                    )
226                    .await
227                    {
228                        self.component = Some(component);
229                        return Ok(proxy);
230                    }
231                }
232                Err(anyhow!("Failed to open exposed directory"))
233            }
234        }
235    }
236
237    /// Calls fuchsia.fs.startup/Startup.Format on the configured filesystem component.
238    ///
239    /// Which component is used and the options passed to it are controlled by the config this
240    /// `Filesystem` was created with.
241    ///
242    /// See [`FSConfig`].
243    ///
244    /// # Errors
245    ///
246    /// Returns any errors from the Format method. Also returns an error if the startup protocol is
247    /// not found, if it couldn't launch or find the filesystem component, or if it couldn't get
248    /// the block device channel.
249    pub async fn format(&mut self) -> Result<(), Error> {
250        let channel = self.block_connector.connect_block()?;
251
252        let exposed_dir = self.get_component_exposed_dir().await?;
253        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
254        proxy
255            .format(channel, &self.config().options().format_options)
256            .await?
257            .map_err(Status::from_raw)?;
258
259        Ok(())
260    }
261
262    /// Calls fuchsia.fs.startup/Startup.Check on the configured filesystem component.
263    ///
264    /// Which component is used and the options passed to it are controlled by the config this
265    /// `Filesystem` was created with.
266    ///
267    /// See [`FSConfig`].
268    ///
269    /// # Errors
270    ///
271    /// Returns any errors from the Check method. Also returns an error if the startup protocol is
272    /// not found, if it couldn't launch or find the filesystem component, or if it couldn't get
273    /// the block device channel.
274    pub async fn fsck(&mut self) -> Result<(), Error> {
275        let channel = self.block_connector.connect_block()?;
276        let exposed_dir = self.get_component_exposed_dir().await?;
277        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
278        proxy.check(channel, CheckOptions::default()).await?.map_err(Status::from_raw)?;
279        Ok(())
280    }
281
282    /// Serves the filesystem on the block device and returns a [`ServingSingleVolumeFilesystem`]
283    /// representing the running filesystem component.
284    ///
285    /// # Errors
286    ///
287    /// Returns [`Err`] if serving the filesystem failed.
288    pub async fn serve(&mut self) -> Result<ServingSingleVolumeFilesystem, Error> {
289        if self.config.is_multi_volume() {
290            bail!("Can't serve a multivolume filesystem; use serve_multi_volume");
291        }
292        let Options { start_options, reuse_component_after_serving, .. } = self.config.options();
293
294        let exposed_dir = self.get_component_exposed_dir().await?;
295        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
296        proxy
297            .start(self.block_connector.connect_block()?, &start_options)
298            .await?
299            .map_err(Status::from_raw)?;
300
301        let (root_dir, server_end) = create_endpoints::<fio::NodeMarker>();
302        exposed_dir.open(
303            "root",
304            fio::PERM_READABLE | fio::Flags::PERM_INHERIT_WRITE | fio::Flags::PERM_INHERIT_EXECUTE,
305            &Default::default(),
306            server_end.into_channel(),
307        )?;
308        let component = self.component.clone();
309        if !reuse_component_after_serving {
310            self.component = None;
311        }
312        Ok(ServingSingleVolumeFilesystem {
313            component,
314            exposed_dir: Some(exposed_dir),
315            root_dir: ClientEnd::<fio::DirectoryMarker>::new(root_dir.into_channel()).into_proxy(),
316            binding: None,
317        })
318    }
319
320    /// Serves the filesystem on the block device and returns a [`ServingMultiVolumeFilesystem`]
321    /// representing the running filesystem component.  No volumes are opened; clients have to do
322    /// that explicitly.
323    ///
324    /// # Errors
325    ///
326    /// Returns [`Err`] if serving the filesystem failed.
327    pub async fn serve_multi_volume(&mut self) -> Result<ServingMultiVolumeFilesystem, Error> {
328        if !self.config.is_multi_volume() {
329            bail!("Can't serve_multi_volume a single-volume filesystem; use serve");
330        }
331
332        let exposed_dir = self.get_component_exposed_dir().await?;
333        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
334        proxy
335            .start(self.block_connector.connect_block()?, &self.config.options().start_options)
336            .await?
337            .map_err(Status::from_raw)?;
338
339        Ok(ServingMultiVolumeFilesystem {
340            component: self.component.clone(),
341            exposed_dir: Some(exposed_dir),
342        })
343    }
344}
345
346// Destroys the child when dropped.
347struct DynamicComponentInstance {
348    name: String,
349    collection: String,
350    should_not_drop: AtomicBool,
351}
352
353impl DynamicComponentInstance {
354    fn forget(&self) {
355        self.should_not_drop.store(true, Ordering::Relaxed);
356    }
357}
358
359impl Drop for DynamicComponentInstance {
360    fn drop(&mut self) {
361        if self.should_not_drop.load(Ordering::Relaxed) {
362            return;
363        }
364        if let Ok(realm_proxy) = connect_to_protocol::<RealmMarker>() {
365            let _ = realm_proxy.destroy_child(&fdecl::ChildRef {
366                name: self.name.clone(),
367                collection: Some(self.collection.clone()),
368            });
369        }
370    }
371}
372
373/// Manages the binding of a `fuchsia_io::DirectoryProxy` into the local namespace.  When the object
374/// is dropped, the binding is removed.
375#[derive(Default)]
376pub struct NamespaceBinding(String);
377
378impl NamespaceBinding {
379    pub fn create(root_dir: &fio::DirectoryProxy, path: String) -> Result<NamespaceBinding, Error> {
380        let (client_end, server_end) = create_endpoints();
381        root_dir.clone(ServerEnd::new(server_end.into_channel()))?;
382        let namespace = fdio::Namespace::installed()?;
383        namespace.bind(&path, client_end)?;
384        Ok(Self(path))
385    }
386}
387
388impl std::ops::Deref for NamespaceBinding {
389    type Target = str;
390    fn deref(&self) -> &Self::Target {
391        &self.0
392    }
393}
394
395impl Drop for NamespaceBinding {
396    fn drop(&mut self) {
397        if let Ok(namespace) = fdio::Namespace::installed() {
398            let _ = namespace.unbind(&self.0);
399        }
400    }
401}
402
403// TODO(https://fxbug.dev/42174810): Soft migration; remove this after completion
404pub type ServingFilesystem = ServingSingleVolumeFilesystem;
405
406/// Asynchronously manages a serving filesystem. Created from [`Filesystem::serve()`].
407pub struct ServingSingleVolumeFilesystem {
408    component: Option<Arc<DynamicComponentInstance>>,
409    // exposed_dir will always be Some, except when the filesystem is shutting down.
410    exposed_dir: Option<fio::DirectoryProxy>,
411    root_dir: fio::DirectoryProxy,
412
413    // The path in the local namespace that this filesystem is bound to (optional).
414    binding: Option<NamespaceBinding>,
415}
416
417impl ServingSingleVolumeFilesystem {
418    /// Returns a proxy to the exposed directory of the serving filesystem.
419    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
420        self.exposed_dir.as_ref().unwrap()
421    }
422
423    /// Returns a proxy to the root directory of the serving filesystem.
424    pub fn root(&self) -> &fio::DirectoryProxy {
425        &self.root_dir
426    }
427
428    /// Binds the root directory being served by this filesystem to a path in the local namespace.
429    /// The path must be absolute, containing no "." nor ".." entries.  The binding will be dropped
430    /// when self is dropped.  Only one binding is supported.
431    ///
432    /// # Errors
433    ///
434    /// Returns [`Err`] if binding failed.
435    pub fn bind_to_path(&mut self, path: &str) -> Result<(), Error> {
436        ensure!(self.binding.is_none(), "Already bound");
437        self.binding = Some(NamespaceBinding::create(&self.root_dir, path.to_string())?);
438        Ok(())
439    }
440
441    pub fn bound_path(&self) -> Option<&str> {
442        self.binding.as_deref()
443    }
444
445    /// Returns a [`FilesystemInfo`] object containing information about the serving filesystem.
446    ///
447    /// # Errors
448    ///
449    /// Returns [`Err`] if querying the filesystem failed.
450    pub async fn query(&self) -> Result<Box<fio::FilesystemInfo>, QueryError> {
451        let (status, info) = self.root_dir.query_filesystem().await?;
452        Status::ok(status).map_err(QueryError::DirectoryQuery)?;
453        info.ok_or(QueryError::DirectoryEmptyResult)
454    }
455
456    /// Take the exposed dir from this filesystem instance, dropping the management struct without
457    /// shutting the filesystem down. This leaves the caller with the responsibility of shutting
458    /// down the filesystem, and the filesystem component if necessary.
459    pub fn take_exposed_dir(mut self) -> fio::DirectoryProxy {
460        self.component.take().expect("BUG: component missing").forget();
461        self.exposed_dir.take().expect("BUG: exposed dir missing")
462    }
463
464    /// Attempts to shutdown the filesystem using the
465    /// [`fidl_fuchsia_fs::AdminProxy::shutdown()`] FIDL method and waiting for the filesystem
466    /// process to terminate.
467    ///
468    /// # Errors
469    ///
470    /// Returns [`Err`] if the shutdown failed or the filesystem process did not terminate.
471    pub async fn shutdown(mut self) -> Result<(), ShutdownError> {
472        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(
473            &self.exposed_dir.take().expect("BUG: exposed dir missing"),
474        )?
475        .shutdown()
476        .await?;
477        Ok(())
478    }
479
480    /// Attempts to kill the filesystem process and waits for the process to terminate.
481    ///
482    /// # Errors
483    ///
484    /// Returns [`Err`] if the filesystem process could not be terminated. There is no way to
485    /// recover the [`Filesystem`] from this error.
486    pub async fn kill(self) -> Result<(), Error> {
487        // For components, just shut down the filesystem.
488        // TODO(https://fxbug.dev/293949323): Figure out a way to make this more abrupt - the use-cases are
489        // either testing or when the filesystem isn't responding.
490        self.shutdown().await?;
491        Ok(())
492    }
493}
494
495impl Drop for ServingSingleVolumeFilesystem {
496    fn drop(&mut self) {
497        // Make a best effort attempt to shut down to the filesystem, if we need to.
498        if let Some(exposed_dir) = self.exposed_dir.take() {
499            if let Ok(proxy) =
500                connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
501            {
502                let _ = proxy.shutdown();
503            }
504        }
505    }
506}
507
508/// Asynchronously manages a serving multivolume filesystem. Created from
509/// [`Filesystem::serve_multi_volume()`].
510pub struct ServingMultiVolumeFilesystem {
511    component: Option<Arc<DynamicComponentInstance>>,
512    // exposed_dir will always be Some, except in Self::shutdown.
513    exposed_dir: Option<fio::DirectoryProxy>,
514}
515
516/// Represents an opened volume in a [`ServingMultiVolumeFilesystem'] instance.
517pub struct ServingVolume {
518    root_dir: fio::DirectoryProxy,
519    binding: Option<NamespaceBinding>,
520    exposed_dir: fio::DirectoryProxy,
521}
522
523impl ServingVolume {
524    fn new(exposed_dir: fio::DirectoryProxy) -> Result<Self, Error> {
525        let (root_dir, server_end) = create_endpoints::<fio::NodeMarker>();
526        exposed_dir.open(
527            "root",
528            fio::PERM_READABLE | fio::Flags::PERM_INHERIT_WRITE | fio::Flags::PERM_INHERIT_EXECUTE,
529            &Default::default(),
530            server_end.into_channel(),
531        )?;
532        Ok(ServingVolume {
533            root_dir: ClientEnd::<fio::DirectoryMarker>::new(root_dir.into_channel()).into_proxy(),
534            binding: None,
535            exposed_dir,
536        })
537    }
538
539    /// Returns a proxy to the root directory of the serving volume.
540    pub fn root(&self) -> &fio::DirectoryProxy {
541        &self.root_dir
542    }
543
544    /// Returns a proxy to the exposed directory of the serving volume.
545    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
546        &self.exposed_dir
547    }
548
549    /// Binds the root directory being served by this filesystem to a path in the local namespace.
550    /// The path must be absolute, containing no "." nor ".." entries.  The binding will be dropped
551    /// when self is dropped, or when unbind_path is called.  Only one binding is supported.
552    ///
553    /// # Errors
554    ///
555    /// Returns [`Err`] if binding failed, or if a binding already exists.
556    pub fn bind_to_path(&mut self, path: &str) -> Result<(), Error> {
557        ensure!(self.binding.is_none(), "Already bound");
558        self.binding = Some(NamespaceBinding::create(&self.root_dir, path.to_string())?);
559        Ok(())
560    }
561
562    /// Remove the namespace binding to the root directory being served by this volume, if there is
563    /// one. If there is no binding, this function does nothing. After this, it is safe to call
564    /// bind_to_path again.
565    pub fn unbind_path(&mut self) {
566        let _ = self.binding.take();
567    }
568
569    pub fn bound_path(&self) -> Option<&str> {
570        self.binding.as_deref()
571    }
572
573    /// Returns a [`FilesystemInfo`] object containing information about the serving volume.
574    ///
575    /// # Errors
576    ///
577    /// Returns [`Err`] if querying the filesystem failed.
578    pub async fn query(&self) -> Result<Box<fio::FilesystemInfo>, QueryError> {
579        let (status, info) = self.root_dir.query_filesystem().await?;
580        Status::ok(status).map_err(QueryError::DirectoryQuery)?;
581        info.ok_or(QueryError::DirectoryEmptyResult)
582    }
583
584    /// Attempts to shutdown the filesystem using the [`fidl_fuchsia_fs::AdminProxy::shutdown()`]
585    /// FIDL method. Fails if the volume is not already open.
586    pub async fn shutdown(self) -> Result<(), Error> {
587        let admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(self.exposed_dir())?;
588        admin_proxy.shutdown().await.context("failed to shutdown volume")?;
589        Ok(())
590    }
591}
592
593impl ServingMultiVolumeFilesystem {
594    /// Returns whether the given volume exists.
595    pub async fn has_volume(&mut self, volume: &str) -> Result<bool, Error> {
596        let path = format!("volumes/{}", volume);
597        fuchsia_fs::directory::open_node(
598            self.exposed_dir.as_ref().unwrap(),
599            &path,
600            fio::Flags::PROTOCOL_NODE,
601        )
602        .await
603        .map(|_| true)
604        .or_else(|e| {
605            if let fuchsia_fs::node::OpenError::OpenError(status) = &e {
606                if *status == zx::Status::NOT_FOUND {
607                    return Ok(false);
608                }
609            }
610            Err(e.into())
611        })
612    }
613
614    /// Creates and mounts the volume.  Fails if the volume already exists.
615    /// If `options.crypt` is set, the volume will be encrypted using the provided Crypt instance.
616    /// If `options.as_blob` is set, creates a blob volume that is mounted as a blob filesystem.
617    pub async fn create_volume(
618        &self,
619        volume: &str,
620        create_options: CreateOptions,
621        options: MountOptions,
622    ) -> Result<ServingVolume, Error> {
623        let (exposed_dir, server) = create_proxy::<fio::DirectoryMarker>();
624        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumesMarker>(
625            self.exposed_dir.as_ref().unwrap(),
626        )?
627        .create(volume, server, create_options, options)
628        .await?
629        .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
630        ServingVolume::new(exposed_dir)
631    }
632
633    /// Deletes the volume. Fails if the volume is already mounted.
634    pub async fn remove_volume(&self, volume: &str) -> Result<(), Error> {
635        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumesMarker>(
636            self.exposed_dir.as_ref().unwrap(),
637        )?
638        .remove(volume)
639        .await?
640        .map_err(|e| anyhow!(zx::Status::from_raw(e)))
641    }
642
643    /// Mounts an existing volume.  Fails if the volume is already mounted or doesn't exist.
644    /// If `crypt` is set, the volume will be decrypted using the provided Crypt instance.
645    pub async fn open_volume(
646        &self,
647        volume: &str,
648        options: MountOptions,
649    ) -> Result<ServingVolume, Error> {
650        let (exposed_dir, server) = create_proxy::<fio::DirectoryMarker>();
651        let path = format!("volumes/{}", volume);
652        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
653            self.exposed_dir.as_ref().unwrap(),
654            &path,
655        )?
656        .mount(server, options)
657        .await?
658        .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
659
660        ServingVolume::new(exposed_dir)
661    }
662
663    /// Sets the max byte limit for a volume. Fails if the volume is not mounted.
664    pub async fn set_byte_limit(&self, volume: &str, byte_limit: u64) -> Result<(), Error> {
665        if byte_limit == 0 {
666            return Ok(());
667        }
668        let path = format!("volumes/{}", volume);
669        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
670            self.exposed_dir.as_ref().unwrap(),
671            &path,
672        )?
673        .set_limit(byte_limit)
674        .await?
675        .map_err(|e| anyhow!(zx::Status::from_raw(e)))
676    }
677
678    pub async fn check_volume(&self, volume: &str, options: CheckOptions) -> Result<(), Error> {
679        let path = format!("volumes/{}", volume);
680        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
681            self.exposed_dir.as_ref().unwrap(),
682            &path,
683        )?
684        .check(options)
685        .await?
686        .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
687        Ok(())
688    }
689
690    /// Provides access to the internal |exposed_dir| for use in testing
691    /// callsites which need directory access.
692    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
693        self.exposed_dir.as_ref().expect("BUG: exposed dir missing")
694    }
695
696    /// Attempts to shutdown the filesystem using the [`fidl_fuchsia_fs::AdminProxy::shutdown()`]
697    /// FIDL method.
698    ///
699    /// # Errors
700    ///
701    /// Returns [`Err`] if the shutdown failed.
702    pub async fn shutdown(mut self) -> Result<(), ShutdownError> {
703        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(
704            // Take exposed_dir so we don't attempt to shut down again in Drop.
705            &self.exposed_dir.take().expect("BUG: exposed dir missing"),
706        )?
707        .shutdown()
708        .await?;
709        Ok(())
710    }
711
712    /// Take the exposed dir from this filesystem instance, dropping the management struct without
713    /// shutting the filesystem down. This leaves the caller with the responsibility of shutting
714    /// down the filesystem, and the filesystem component if necessary.
715    pub fn take_exposed_dir(mut self) -> fio::DirectoryProxy {
716        self.component.take().expect("BUG: missing component").forget();
717        self.exposed_dir.take().expect("BUG: exposed dir missing")
718    }
719}
720
721impl Drop for ServingMultiVolumeFilesystem {
722    fn drop(&mut self) {
723        if let Some(exposed_dir) = self.exposed_dir.take() {
724            // Make a best effort attempt to shut down to the filesystem.
725            if let Ok(proxy) =
726                connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
727            {
728                let _ = proxy.shutdown();
729            }
730        }
731    }
732}
733
734#[cfg(test)]
735mod tests {
736    use super::*;
737    use crate::{BlobCompression, BlobEvictionPolicy, Blobfs, F2fs, Fxfs, Minfs};
738    use fuchsia_async as fasync;
739    use ramdevice_client::RamdiskClient;
740    use std::io::{Read as _, Write as _};
741    use std::time::Duration;
742
743    async fn ramdisk(block_size: u64) -> RamdiskClient {
744        RamdiskClient::create(block_size, 1 << 16).await.unwrap()
745    }
746
747    async fn new_fs<FSC: FSConfig>(ramdisk: &mut RamdiskClient, config: FSC) -> Filesystem {
748        Filesystem::new(ramdisk.take_controller().unwrap(), config)
749    }
750
751    #[fuchsia::test]
752    async fn blobfs_custom_config() {
753        let block_size = 512;
754        let mut ramdisk = ramdisk(block_size).await;
755        let config = Blobfs {
756            verbose: true,
757            readonly: true,
758            write_compression_algorithm: BlobCompression::Uncompressed,
759            cache_eviction_policy_override: BlobEvictionPolicy::EvictImmediately,
760            ..Default::default()
761        };
762        let mut blobfs = new_fs(&mut ramdisk, config).await;
763
764        blobfs.format().await.expect("failed to format blobfs");
765        blobfs.fsck().await.expect("failed to fsck blobfs");
766        let _ = blobfs.serve().await.expect("failed to serve blobfs");
767
768        ramdisk.destroy().await.expect("failed to destroy ramdisk");
769    }
770
771    #[fuchsia::test]
772    async fn blobfs_format_fsck_success() {
773        let block_size = 512;
774        let mut ramdisk = ramdisk(block_size).await;
775        let mut blobfs = new_fs(&mut ramdisk, Blobfs::default()).await;
776
777        blobfs.format().await.expect("failed to format blobfs");
778        blobfs.fsck().await.expect("failed to fsck blobfs");
779
780        ramdisk.destroy().await.expect("failed to destroy ramdisk");
781    }
782
783    #[fuchsia::test]
784    async fn blobfs_format_serve_write_query_restart_read_shutdown() {
785        let block_size = 512;
786        let mut ramdisk = ramdisk(block_size).await;
787        let mut blobfs = new_fs(&mut ramdisk, Blobfs::default()).await;
788
789        blobfs.format().await.expect("failed to format blobfs");
790
791        let serving = blobfs.serve().await.expect("failed to serve blobfs the first time");
792
793        // snapshot of FilesystemInfo
794        let fs_info1 =
795            serving.query().await.expect("failed to query filesystem info after first serving");
796
797        // pre-generated merkle test fixture data
798        let merkle = "be901a14ec42ee0a8ee220eb119294cdd40d26d573139ee3d51e4430e7d08c28";
799        let content = String::from("test content").into_bytes();
800
801        {
802            let test_file = fuchsia_fs::directory::open_file(
803                serving.root(),
804                merkle,
805                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_WRITABLE,
806            )
807            .await
808            .expect("failed to create test file");
809            let () = test_file
810                .resize(content.len() as u64)
811                .await
812                .expect("failed to send resize FIDL")
813                .map_err(Status::from_raw)
814                .expect("failed to resize file");
815            let _: u64 = test_file
816                .write(&content)
817                .await
818                .expect("failed to write to test file")
819                .map_err(Status::from_raw)
820                .expect("write error");
821        }
822
823        // check against the snapshot FilesystemInfo
824        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
825        assert_eq!(
826            fs_info2.used_bytes - fs_info1.used_bytes,
827            fs_info2.block_size as u64 // assuming content < 8K
828        );
829
830        serving.shutdown().await.expect("failed to shutdown blobfs the first time");
831        let serving = blobfs.serve().await.expect("failed to serve blobfs the second time");
832        {
833            let test_file =
834                fuchsia_fs::directory::open_file(serving.root(), merkle, fio::PERM_READABLE)
835                    .await
836                    .expect("failed to open test file");
837            let read_content =
838                fuchsia_fs::file::read(&test_file).await.expect("failed to read from test file");
839            assert_eq!(content, read_content);
840        }
841
842        // once more check against the snapshot FilesystemInfo
843        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
844        assert_eq!(
845            fs_info3.used_bytes - fs_info1.used_bytes,
846            fs_info3.block_size as u64 // assuming content < 8K
847        );
848
849        serving.shutdown().await.expect("failed to shutdown blobfs the second time");
850
851        ramdisk.destroy().await.expect("failed to destroy ramdisk");
852    }
853
854    #[fuchsia::test]
855    async fn blobfs_bind_to_path() {
856        let block_size = 512;
857        let merkle = "be901a14ec42ee0a8ee220eb119294cdd40d26d573139ee3d51e4430e7d08c28";
858        let test_content = b"test content";
859        let mut ramdisk = ramdisk(block_size).await;
860        let mut blobfs = new_fs(&mut ramdisk, Blobfs::default()).await;
861
862        blobfs.format().await.expect("failed to format blobfs");
863        let mut serving = blobfs.serve().await.expect("failed to serve blobfs");
864        serving.bind_to_path("/test-blobfs-path").expect("bind_to_path failed");
865        let test_path = format!("/test-blobfs-path/{}", merkle);
866
867        {
868            let mut file = std::fs::File::create(&test_path).expect("failed to create test file");
869            file.set_len(test_content.len() as u64).expect("failed to set size");
870            file.write_all(test_content).expect("write bytes");
871        }
872
873        {
874            let mut file = std::fs::File::open(&test_path).expect("failed to open test file");
875            let mut buf = Vec::new();
876            file.read_to_end(&mut buf).expect("failed to read test file");
877            assert_eq!(buf, test_content);
878        }
879
880        serving.shutdown().await.expect("failed to shutdown blobfs");
881
882        std::fs::File::open(&test_path).expect_err("test file was not unbound");
883    }
884
885    #[fuchsia::test]
886    async fn minfs_custom_config() {
887        let block_size = 512;
888        let mut ramdisk = ramdisk(block_size).await;
889        let config = Minfs {
890            verbose: true,
891            readonly: true,
892            fsck_after_every_transaction: true,
893            ..Default::default()
894        };
895        let mut minfs = new_fs(&mut ramdisk, config).await;
896
897        minfs.format().await.expect("failed to format minfs");
898        minfs.fsck().await.expect("failed to fsck minfs");
899        let _ = minfs.serve().await.expect("failed to serve minfs");
900
901        ramdisk.destroy().await.expect("failed to destroy ramdisk");
902    }
903
904    #[fuchsia::test]
905    async fn minfs_format_fsck_success() {
906        let block_size = 8192;
907        let mut ramdisk = ramdisk(block_size).await;
908        let mut minfs = new_fs(&mut ramdisk, Minfs::default()).await;
909
910        minfs.format().await.expect("failed to format minfs");
911        minfs.fsck().await.expect("failed to fsck minfs");
912
913        ramdisk.destroy().await.expect("failed to destroy ramdisk");
914    }
915
916    #[fuchsia::test]
917    async fn minfs_format_serve_write_query_restart_read_shutdown() {
918        let block_size = 8192;
919        let mut ramdisk = ramdisk(block_size).await;
920        let mut minfs = new_fs(&mut ramdisk, Minfs::default()).await;
921
922        minfs.format().await.expect("failed to format minfs");
923        let serving = minfs.serve().await.expect("failed to serve minfs the first time");
924
925        // snapshot of FilesystemInfo
926        let fs_info1 =
927            serving.query().await.expect("failed to query filesystem info after first serving");
928
929        let filename = "test_file";
930        let content = String::from("test content").into_bytes();
931
932        {
933            let test_file = fuchsia_fs::directory::open_file(
934                serving.root(),
935                filename,
936                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_WRITABLE,
937            )
938            .await
939            .expect("failed to create test file");
940            let _: u64 = test_file
941                .write(&content)
942                .await
943                .expect("failed to write to test file")
944                .map_err(Status::from_raw)
945                .expect("write error");
946        }
947
948        // check against the snapshot FilesystemInfo
949        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
950        assert_eq!(
951            fs_info2.used_bytes - fs_info1.used_bytes,
952            fs_info2.block_size as u64 // assuming content < 8K
953        );
954
955        serving.shutdown().await.expect("failed to shutdown minfs the first time");
956        let serving = minfs.serve().await.expect("failed to serve minfs the second time");
957
958        {
959            let test_file =
960                fuchsia_fs::directory::open_file(serving.root(), filename, fio::PERM_READABLE)
961                    .await
962                    .expect("failed to open test file");
963            let read_content =
964                fuchsia_fs::file::read(&test_file).await.expect("failed to read from test file");
965            assert_eq!(content, read_content);
966        }
967
968        // once more check against the snapshot FilesystemInfo
969        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
970        assert_eq!(
971            fs_info3.used_bytes - fs_info1.used_bytes,
972            fs_info3.block_size as u64 // assuming content < 8K
973        );
974
975        let _ = serving.shutdown().await.expect("failed to shutdown minfs the second time");
976
977        ramdisk.destroy().await.expect("failed to destroy ramdisk");
978    }
979
980    #[fuchsia::test]
981    async fn minfs_bind_to_path() {
982        let block_size = 8192;
983        let test_content = b"test content";
984        let mut ramdisk = ramdisk(block_size).await;
985        let mut minfs = new_fs(&mut ramdisk, Minfs::default()).await;
986
987        minfs.format().await.expect("failed to format minfs");
988        let mut serving = minfs.serve().await.expect("failed to serve minfs");
989        serving.bind_to_path("/test-minfs-path").expect("bind_to_path failed");
990        let test_path = "/test-minfs-path/test_file";
991
992        {
993            let mut file = std::fs::File::create(test_path).expect("failed to create test file");
994            file.write_all(test_content).expect("write bytes");
995        }
996
997        {
998            let mut file = std::fs::File::open(test_path).expect("failed to open test file");
999            let mut buf = Vec::new();
1000            file.read_to_end(&mut buf).expect("failed to read test file");
1001            assert_eq!(buf, test_content);
1002        }
1003
1004        serving.shutdown().await.expect("failed to shutdown minfs");
1005
1006        std::fs::File::open(test_path).expect_err("test file was not unbound");
1007    }
1008
1009    #[fuchsia::test]
1010    async fn minfs_take_exposed_dir_does_not_drop() {
1011        let block_size = 512;
1012        let test_content = b"test content";
1013        let test_file_name = "test-file";
1014        let mut ramdisk = ramdisk(block_size).await;
1015        let mut minfs = new_fs(&mut ramdisk, Minfs::default()).await;
1016
1017        minfs.format().await.expect("failed to format fxfs");
1018
1019        let fs = minfs.serve().await.expect("failed to serve fxfs");
1020        let file = {
1021            let file = fuchsia_fs::directory::open_file(
1022                fs.root(),
1023                test_file_name,
1024                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_READABLE | fio::PERM_WRITABLE,
1025            )
1026            .await
1027            .unwrap();
1028            fuchsia_fs::file::write(&file, test_content).await.unwrap();
1029            file.close().await.expect("close fidl error").expect("close error");
1030            fuchsia_fs::directory::open_file(fs.root(), test_file_name, fio::PERM_READABLE)
1031                .await
1032                .unwrap()
1033        };
1034
1035        let exposed_dir = fs.take_exposed_dir();
1036
1037        assert_eq!(fuchsia_fs::file::read(&file).await.unwrap(), test_content);
1038
1039        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
1040            .expect("connecting to admin marker")
1041            .shutdown()
1042            .await
1043            .expect("shutdown failed");
1044    }
1045
1046    #[fuchsia::test]
1047    async fn f2fs_format_fsck_success() {
1048        let block_size = 4096;
1049        let mut ramdisk = ramdisk(block_size).await;
1050        let mut f2fs = new_fs(&mut ramdisk, F2fs::default()).await;
1051
1052        f2fs.format().await.expect("failed to format f2fs");
1053        f2fs.fsck().await.expect("failed to fsck f2fs");
1054
1055        ramdisk.destroy().await.expect("failed to destroy ramdisk");
1056    }
1057
1058    #[fuchsia::test]
1059    async fn f2fs_format_serve_write_query_restart_read_shutdown() {
1060        let block_size = 4096;
1061        let mut ramdisk = ramdisk(block_size).await;
1062        let mut f2fs = new_fs(&mut ramdisk, F2fs::default()).await;
1063
1064        f2fs.format().await.expect("failed to format f2fs");
1065        let serving = f2fs.serve().await.expect("failed to serve f2fs the first time");
1066
1067        // snapshot of FilesystemInfo
1068        let fs_info1 =
1069            serving.query().await.expect("failed to query filesystem info after first serving");
1070
1071        let filename = "test_file";
1072        let content = String::from("test content").into_bytes();
1073
1074        {
1075            let test_file = fuchsia_fs::directory::open_file(
1076                serving.root(),
1077                filename,
1078                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_WRITABLE,
1079            )
1080            .await
1081            .expect("failed to create test file");
1082            let _: u64 = test_file
1083                .write(&content)
1084                .await
1085                .expect("failed to write to test file")
1086                .map_err(Status::from_raw)
1087                .expect("write error");
1088        }
1089
1090        // check against the snapshot FilesystemInfo
1091        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
1092        // With zx::stream, f2fs doesn't support the inline data feature allowing file
1093        // inode blocks to include small data. This way requires keeping two copies of VMOs
1094        // for the same inline data
1095        // assuming content < 4K and its inode block.
1096        let expected_size2 = fs_info2.block_size * 2;
1097        assert_eq!(fs_info2.used_bytes - fs_info1.used_bytes, expected_size2 as u64);
1098
1099        serving.shutdown().await.expect("failed to shutdown f2fs the first time");
1100        let serving = f2fs.serve().await.expect("failed to serve f2fs the second time");
1101
1102        {
1103            let test_file =
1104                fuchsia_fs::directory::open_file(serving.root(), filename, fio::PERM_READABLE)
1105                    .await
1106                    .expect("failed to open test file");
1107            let read_content =
1108                fuchsia_fs::file::read(&test_file).await.expect("failed to read from test file");
1109            assert_eq!(content, read_content);
1110        }
1111
1112        // once more check against the snapshot FilesystemInfo
1113        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
1114        // assuming content < 4K and its inode block.
1115        let expected_size3 = fs_info3.block_size * 2;
1116        assert_eq!(fs_info3.used_bytes - fs_info1.used_bytes, expected_size3 as u64);
1117
1118        serving.shutdown().await.expect("failed to shutdown f2fs the second time");
1119        f2fs.fsck().await.expect("failed to fsck f2fs after shutting down the second time");
1120
1121        ramdisk.destroy().await.expect("failed to destroy ramdisk");
1122    }
1123
1124    #[fuchsia::test]
1125    async fn f2fs_bind_to_path() {
1126        let block_size = 4096;
1127        let test_content = b"test content";
1128        let mut ramdisk = ramdisk(block_size).await;
1129        let mut f2fs = new_fs(&mut ramdisk, F2fs::default()).await;
1130
1131        f2fs.format().await.expect("failed to format f2fs");
1132        let mut serving = f2fs.serve().await.expect("failed to serve f2fs");
1133        serving.bind_to_path("/test-f2fs-path").expect("bind_to_path failed");
1134        let test_path = "/test-f2fs-path/test_file";
1135
1136        {
1137            let mut file = std::fs::File::create(test_path).expect("failed to create test file");
1138            file.write_all(test_content).expect("write bytes");
1139        }
1140
1141        {
1142            let mut file = std::fs::File::open(test_path).expect("failed to open test file");
1143            let mut buf = Vec::new();
1144            file.read_to_end(&mut buf).expect("failed to read test file");
1145            assert_eq!(buf, test_content);
1146        }
1147
1148        serving.shutdown().await.expect("failed to shutdown f2fs");
1149
1150        std::fs::File::open(test_path).expect_err("test file was not unbound");
1151    }
1152
1153    // TODO(https://fxbug.dev/42174810): Re-enable this test; it depends on Fxfs failing repeated calls to
1154    // Start.
1155    #[ignore]
1156    #[fuchsia::test]
1157    async fn fxfs_shutdown_component_when_dropped() {
1158        let block_size = 512;
1159        let mut ramdisk = ramdisk(block_size).await;
1160        let mut fxfs = new_fs(&mut ramdisk, Fxfs::default()).await;
1161
1162        fxfs.format().await.expect("failed to format fxfs");
1163        {
1164            let _fs = fxfs.serve_multi_volume().await.expect("failed to serve fxfs");
1165
1166            // Serve should fail for the second time.
1167            assert!(
1168                fxfs.serve_multi_volume().await.is_err(),
1169                "serving succeeded when already mounted"
1170            );
1171        }
1172
1173        // Fxfs should get shut down when dropped, but it's asynchronous, so we need to loop here.
1174        let mut attempts = 0;
1175        loop {
1176            if let Ok(_) = fxfs.serve_multi_volume().await {
1177                break;
1178            }
1179            attempts += 1;
1180            assert!(attempts < 10);
1181            fasync::Timer::new(Duration::from_secs(1)).await;
1182        }
1183    }
1184
1185    #[fuchsia::test]
1186    async fn fxfs_open_volume() {
1187        let block_size = 512;
1188        let mut ramdisk = ramdisk(block_size).await;
1189        let mut fxfs = new_fs(&mut ramdisk, Fxfs::default()).await;
1190
1191        fxfs.format().await.expect("failed to format fxfs");
1192
1193        let mut fs = fxfs.serve_multi_volume().await.expect("failed to serve fxfs");
1194
1195        assert_eq!(fs.has_volume("foo").await.expect("has_volume"), false);
1196        assert!(
1197            fs.open_volume("foo", MountOptions::default()).await.is_err(),
1198            "Opening nonexistent volume should fail"
1199        );
1200
1201        let vol = fs
1202            .create_volume("foo", CreateOptions::default(), MountOptions::default())
1203            .await
1204            .expect("Create volume failed");
1205        vol.query().await.expect("Query volume failed");
1206        // TODO(https://fxbug.dev/42057878) Closing the volume is not synchronous. Immediately reopening the
1207        // volume will race with the asynchronous close and sometimes fail because the volume is
1208        // still mounted.
1209        // fs.open_volume("foo", MountOptions{crypt: None, as_blob: false}).await
1210        //    .expect("Open volume failed");
1211        assert_eq!(fs.has_volume("foo").await.expect("has_volume"), true);
1212    }
1213
1214    #[fuchsia::test]
1215    async fn fxfs_take_exposed_dir_does_not_drop() {
1216        let block_size = 512;
1217        let test_content = b"test content";
1218        let test_file_name = "test-file";
1219        let mut ramdisk = ramdisk(block_size).await;
1220        let mut fxfs = new_fs(&mut ramdisk, Fxfs::default()).await;
1221
1222        fxfs.format().await.expect("failed to format fxfs");
1223
1224        let fs = fxfs.serve_multi_volume().await.expect("failed to serve fxfs");
1225        let file = {
1226            let vol = fs
1227                .create_volume("foo", CreateOptions::default(), MountOptions::default())
1228                .await
1229                .expect("Create volume failed");
1230            let file = fuchsia_fs::directory::open_file(
1231                vol.root(),
1232                test_file_name,
1233                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_READABLE | fio::PERM_WRITABLE,
1234            )
1235            .await
1236            .unwrap();
1237            fuchsia_fs::file::write(&file, test_content).await.unwrap();
1238            file.close().await.expect("close fidl error").expect("close error");
1239            fuchsia_fs::directory::open_file(vol.root(), test_file_name, fio::PERM_READABLE)
1240                .await
1241                .unwrap()
1242        };
1243
1244        let exposed_dir = fs.take_exposed_dir();
1245
1246        assert_eq!(fuchsia_fs::file::read(&file).await.unwrap(), test_content);
1247
1248        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
1249            .expect("connecting to admin marker")
1250            .shutdown()
1251            .await
1252            .expect("shutdown failed");
1253    }
1254}