fs_management/
filesystem.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Contains the asynchronous version of [`Filesystem`][`crate::Filesystem`].
6
7use crate::error::{QueryError, ShutdownError};
8use crate::{ComponentType, FSConfig, Options};
9use anyhow::{Context, Error, anyhow, bail, ensure};
10use fidl::endpoints::{ClientEnd, ServerEnd, create_endpoints, create_proxy};
11use fidl_fuchsia_component::{self as fcomponent, RealmMarker};
12use fidl_fuchsia_fs::AdminMarker;
13use fidl_fuchsia_fs_startup::{CheckOptions, CreateOptions, MountOptions, StartupMarker};
14use fidl_fuchsia_storage_block::BlockMarker;
15use fuchsia_component_client::{
16    connect_to_named_protocol_at_dir_root, connect_to_protocol, connect_to_protocol_at_dir_root,
17    connect_to_protocol_at_dir_svc, open_childs_exposed_directory,
18};
19use std::sync::Arc;
20use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
21use zx::Status;
22use {fidl_fuchsia_component_decl as fdecl, fidl_fuchsia_io as fio};
23
24/// Creates new connections to an instance of fuchsia.hardware.block.Block and similar protocols
25/// (Volume, Partition).
26///
27/// NOTE: It is important to understand the difference between `BlockConnector` and the actual
28/// protocols (e.g. a `ClientEnd<BlockMarker>` or `BlockProxy`): `BlockConnector` is used to *create
29/// new connections* to a Block.
30///
31/// It is not possible to directly convert a `ClientEnd<BlockMarker>` (or `BlockProxy`) into a
32/// `BlockConnector`, because Block is not cloneable.  To implement `BlockConnector`, you will need
33/// a way to generate new connections to a Block instance.  A few common implementations are
34/// provided below.
35pub trait BlockConnector: Send + Sync {
36    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error>;
37    fn connect_block(&self) -> Result<ClientEnd<BlockMarker>, Error> {
38        let (client, server) = fidl::endpoints::create_endpoints();
39        self.connect_channel_to_block(server)?;
40        Ok(client)
41    }
42}
43
44/// Implements `BlockConnector` via a service dir.  Wraps `connect_to_named_protocol_at_dir_root`.
45#[derive(Clone, Debug)]
46pub struct DirBasedBlockConnector(fio::DirectoryProxy, String);
47
48impl DirBasedBlockConnector {
49    /// Creates a new [`DirBasedBlockConnector`].  It is expected that `path` within `dir` hosts the
50    /// Volume protocol.
51    pub fn new(dir: fio::DirectoryProxy, path: String) -> Self {
52        Self(dir, path)
53    }
54
55    pub fn dir(&self) -> &fio::DirectoryProxy {
56        &self.0
57    }
58
59    /// Returns the path relative to the directory which hosts the volume protocol.
60    pub fn path(&self) -> &str {
61        &self.1
62    }
63}
64
65impl BlockConnector for DirBasedBlockConnector {
66    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
67        self.0.open(
68            self.path(),
69            fio::Flags::PROTOCOL_SERVICE,
70            &fio::Options::default(),
71            server_end.into_channel(),
72        )?;
73        Ok(())
74    }
75}
76
77impl BlockConnector for fidl_fuchsia_device::ControllerProxy {
78    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
79        let () = self.connect_to_device_fidl(server_end.into_channel())?;
80        Ok(())
81    }
82}
83
84impl BlockConnector for fidl_fuchsia_storage_partitions::PartitionServiceProxy {
85    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
86        self.connect_channel_to_volume(server_end)?;
87        Ok(())
88    }
89}
90
91// NB: We have to be specific here; we cannot do a blanket impl for AsRef<T: BlockConnector> because
92// that would conflict with a downstream crate that implements AsRef for a concrete BlockConnector
93// defined here already.
94impl<T: BlockConnector> BlockConnector for Arc<T> {
95    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
96        self.as_ref().connect_channel_to_block(server_end)
97    }
98}
99
100impl<F> BlockConnector for F
101where
102    F: Fn(ServerEnd<BlockMarker>) -> Result<(), Error> + Send + Sync,
103{
104    fn connect_channel_to_block(&self, server_end: ServerEnd<BlockMarker>) -> Result<(), Error> {
105        self(server_end)
106    }
107}
108
109/// Asynchronously manages a block device for filesystem operations.
110pub struct Filesystem {
111    /// The filesystem struct keeps the FSConfig in a Box<dyn> instead of holding it directly for
112    /// code size reasons. Using a type parameter instead would make monomorphized versions of the
113    /// Filesystem impl block for each filesystem type, which duplicates several multi-kilobyte
114    /// functions (get_component_exposed_dir and serve in particular) that are otherwise quite
115    /// generic over config. Clients that want to be generic over filesystem type also pay the
116    /// monomorphization cost, with some, like fshost, paying a lot.
117    config: Box<dyn FSConfig>,
118    block_connector: Box<dyn BlockConnector>,
119    component: Option<Arc<DynamicComponentInstance>>,
120}
121
122// Used to disambiguate children in our component collection.
123static COLLECTION_COUNTER: AtomicU64 = AtomicU64::new(0);
124
125impl Filesystem {
126    pub fn config(&self) -> &dyn FSConfig {
127        self.config.as_ref()
128    }
129
130    pub fn into_config(self) -> Box<dyn FSConfig> {
131        self.config
132    }
133
134    /// Creates a new `Filesystem`.
135    pub fn new<B: BlockConnector + 'static, FSC: FSConfig>(
136        block_connector: B,
137        config: FSC,
138    ) -> Self {
139        Self::from_boxed_config(Box::new(block_connector), Box::new(config))
140    }
141
142    /// Creates a new `Filesystem`.
143    pub fn from_boxed_config(
144        block_connector: Box<dyn BlockConnector>,
145        config: Box<dyn FSConfig>,
146    ) -> Self {
147        Self { config, block_connector, component: None }
148    }
149
150    /// Returns the (relative) moniker of the filesystem component. This will start the component
151    /// instance if it is not running.
152    pub async fn get_component_moniker(&mut self) -> Result<String, Error> {
153        let _ = self.get_component_exposed_dir().await?;
154        Ok(match self.config.options().component_type {
155            ComponentType::StaticChild => self.config.options().component_name.to_string(),
156            ComponentType::DynamicChild { .. } => {
157                let component = self.component.as_ref().unwrap();
158                format!("{}:{}", component.collection, component.name)
159            }
160        })
161    }
162
163    async fn get_component_exposed_dir(&mut self) -> Result<fio::DirectoryProxy, Error> {
164        let options = self.config.options();
165        let component_name = options.component_name;
166        match options.component_type {
167            ComponentType::StaticChild => open_childs_exposed_directory(component_name, None).await,
168            ComponentType::DynamicChild { collection_name } => {
169                if let Some(component) = &self.component {
170                    return open_childs_exposed_directory(
171                        component.name.clone(),
172                        Some(component.collection.clone()),
173                    )
174                    .await;
175                }
176
177                // We need a unique name, so we pull in the process Koid here since it's possible
178                // for the same binary in a component to be launched multiple times and we don't
179                // want to collide with children created by other processes.
180                let name = format!(
181                    "{}-{}-{}",
182                    component_name,
183                    fuchsia_runtime::process_self().koid().unwrap().raw_koid(),
184                    COLLECTION_COUNTER.fetch_add(1, Ordering::Relaxed)
185                );
186
187                let collection_ref = fdecl::CollectionRef { name: collection_name };
188                let child_decls = vec![
189                    fdecl::Child {
190                        name: Some(format!("{}-relative", name)),
191                        url: Some(format!("#meta/{}.cm", component_name)),
192                        startup: Some(fdecl::StartupMode::Lazy),
193                        ..Default::default()
194                    },
195                    fdecl::Child {
196                        name: Some(name),
197                        url: Some(format!(
198                            "fuchsia-boot:///{}#meta/{}.cm",
199                            component_name, component_name
200                        )),
201                        startup: Some(fdecl::StartupMode::Lazy),
202                        ..Default::default()
203                    },
204                ];
205                let realm_proxy = connect_to_protocol::<RealmMarker>()?;
206                for child_decl in child_decls {
207                    // Launch a new component in our collection.
208                    realm_proxy
209                        .create_child(
210                            &collection_ref,
211                            &child_decl,
212                            fcomponent::CreateChildArgs::default(),
213                        )
214                        .await?
215                        .map_err(|e| anyhow!("create_child failed: {:?}", e))?;
216
217                    let component = Arc::new(DynamicComponentInstance {
218                        name: child_decl.name.unwrap(),
219                        collection: collection_ref.name.clone(),
220                        should_not_drop: AtomicBool::new(false),
221                    });
222
223                    if let Ok(proxy) = open_childs_exposed_directory(
224                        component.name.clone(),
225                        Some(component.collection.clone()),
226                    )
227                    .await
228                    {
229                        self.component = Some(component);
230                        return Ok(proxy);
231                    }
232                }
233                Err(anyhow!("Failed to open exposed directory"))
234            }
235        }
236    }
237
238    /// Calls fuchsia.fs.startup/Startup.Format on the configured filesystem component.
239    ///
240    /// Which component is used and the options passed to it are controlled by the config this
241    /// `Filesystem` was created with.
242    ///
243    /// See [`FSConfig`].
244    ///
245    /// # Errors
246    ///
247    /// Returns any errors from the Format method. Also returns an error if the startup protocol is
248    /// not found, if it couldn't launch or find the filesystem component, or if it couldn't get
249    /// the block device channel.
250    pub async fn format(&mut self) -> Result<(), Error> {
251        let channel = self.block_connector.connect_block()?;
252
253        let exposed_dir = self.get_component_exposed_dir().await?;
254        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
255        proxy
256            .format(channel, &self.config().options().format_options)
257            .await?
258            .map_err(Status::from_raw)?;
259
260        Ok(())
261    }
262
263    /// Calls fuchsia.fs.startup/Startup.Check on the configured filesystem component.
264    ///
265    /// Which component is used and the options passed to it are controlled by the config this
266    /// `Filesystem` was created with.
267    ///
268    /// See [`FSConfig`].
269    ///
270    /// # Errors
271    ///
272    /// Returns any errors from the Check method. Also returns an error if the startup protocol is
273    /// not found, if it couldn't launch or find the filesystem component, or if it couldn't get
274    /// the block device channel.
275    pub async fn fsck(&mut self) -> Result<(), Error> {
276        let channel = self.block_connector.connect_block()?;
277        let exposed_dir = self.get_component_exposed_dir().await?;
278        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
279        proxy.check(channel, CheckOptions::default()).await?.map_err(Status::from_raw)?;
280        Ok(())
281    }
282
283    /// Serves the filesystem on the block device and returns a [`ServingSingleVolumeFilesystem`]
284    /// representing the running filesystem component.
285    ///
286    /// # Errors
287    ///
288    /// Returns [`Err`] if serving the filesystem failed.
289    pub async fn serve(mut self) -> Result<ServingSingleVolumeFilesystem, Error> {
290        if self.config.is_multi_volume() {
291            bail!("Can't serve a multivolume filesystem; use serve_multi_volume");
292        }
293        let Options { start_options, reuse_component_after_serving, .. } = self.config.options();
294
295        let exposed_dir = self.get_component_exposed_dir().await?;
296        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
297        proxy
298            .start(self.block_connector.connect_block()?, &start_options)
299            .await?
300            .map_err(Status::from_raw)?;
301
302        let (root_dir, server_end) = create_endpoints::<fio::NodeMarker>();
303        exposed_dir.open(
304            "root",
305            fio::PERM_READABLE | fio::Flags::PERM_INHERIT_WRITE | fio::Flags::PERM_INHERIT_EXECUTE,
306            &Default::default(),
307            server_end.into_channel(),
308        )?;
309        let component = self.component.clone();
310        if !reuse_component_after_serving {
311            self.component = None;
312        }
313        Ok(ServingSingleVolumeFilesystem {
314            component,
315            exposed_dir: Some(exposed_dir),
316            root_dir: ClientEnd::<fio::DirectoryMarker>::new(root_dir.into_channel()).into_proxy(),
317            binding: None,
318        })
319    }
320
321    /// Serves the filesystem on the block device and returns a [`ServingMultiVolumeFilesystem`]
322    /// representing the running filesystem component.  No volumes are opened; clients have to do
323    /// that explicitly.
324    ///
325    /// # Errors
326    ///
327    /// Returns [`Err`] if serving the filesystem failed.
328    pub async fn serve_multi_volume(mut self) -> Result<ServingMultiVolumeFilesystem, Error> {
329        if !self.config.is_multi_volume() {
330            bail!("Can't serve_multi_volume a single-volume filesystem; use serve");
331        }
332
333        let exposed_dir = self.get_component_exposed_dir().await?;
334        let proxy = connect_to_protocol_at_dir_root::<StartupMarker>(&exposed_dir)?;
335        proxy
336            .start(self.block_connector.connect_block()?, &self.config.options().start_options)
337            .await?
338            .map_err(Status::from_raw)?;
339
340        Ok(ServingMultiVolumeFilesystem {
341            component: self.component,
342            exposed_dir: Some(exposed_dir),
343        })
344    }
345}
346
347// Destroys the child when dropped.
348struct DynamicComponentInstance {
349    name: String,
350    collection: String,
351    should_not_drop: AtomicBool,
352}
353
354impl DynamicComponentInstance {
355    fn forget(&self) {
356        self.should_not_drop.store(true, Ordering::Relaxed);
357    }
358}
359
360impl Drop for DynamicComponentInstance {
361    fn drop(&mut self) {
362        if self.should_not_drop.load(Ordering::Relaxed) {
363            return;
364        }
365        if let Ok(realm_proxy) = connect_to_protocol::<RealmMarker>() {
366            let _ = realm_proxy.destroy_child(&fdecl::ChildRef {
367                name: self.name.clone(),
368                collection: Some(self.collection.clone()),
369            });
370        }
371    }
372}
373
374/// Manages the binding of a `fuchsia_io::DirectoryProxy` into the local namespace.  When the object
375/// is dropped, the binding is removed.
376#[derive(Default)]
377pub struct NamespaceBinding(String);
378
379impl NamespaceBinding {
380    pub fn create(root_dir: &fio::DirectoryProxy, path: String) -> Result<NamespaceBinding, Error> {
381        let (client_end, server_end) = create_endpoints();
382        root_dir.clone(ServerEnd::new(server_end.into_channel()))?;
383        let namespace = fdio::Namespace::installed()?;
384        namespace.bind(&path, client_end)?;
385        Ok(Self(path))
386    }
387}
388
389impl std::ops::Deref for NamespaceBinding {
390    type Target = str;
391    fn deref(&self) -> &Self::Target {
392        &self.0
393    }
394}
395
396impl Drop for NamespaceBinding {
397    fn drop(&mut self) {
398        if let Ok(namespace) = fdio::Namespace::installed() {
399            let _ = namespace.unbind(&self.0);
400        }
401    }
402}
403
404// TODO(https://fxbug.dev/42174810): Soft migration; remove this after completion
405pub type ServingFilesystem = ServingSingleVolumeFilesystem;
406
407/// Asynchronously manages a serving filesystem. Created from [`Filesystem::serve()`].
408pub struct ServingSingleVolumeFilesystem {
409    component: Option<Arc<DynamicComponentInstance>>,
410    // exposed_dir will always be Some, except when the filesystem is shutting down.
411    exposed_dir: Option<fio::DirectoryProxy>,
412    root_dir: fio::DirectoryProxy,
413
414    // The path in the local namespace that this filesystem is bound to (optional).
415    binding: Option<NamespaceBinding>,
416}
417
418impl ServingSingleVolumeFilesystem {
419    /// Returns a proxy to the exposed directory of the serving filesystem.
420    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
421        self.exposed_dir.as_ref().unwrap()
422    }
423
424    /// Returns a proxy to the root directory of the serving filesystem.
425    pub fn root(&self) -> &fio::DirectoryProxy {
426        &self.root_dir
427    }
428
429    /// Binds the root directory being served by this filesystem to a path in the local namespace.
430    /// The path must be absolute, containing no "." nor ".." entries.  The binding will be dropped
431    /// when self is dropped.  Only one binding is supported.
432    ///
433    /// # Errors
434    ///
435    /// Returns [`Err`] if binding failed.
436    pub fn bind_to_path(&mut self, path: &str) -> Result<(), Error> {
437        ensure!(self.binding.is_none(), "Already bound");
438        self.binding = Some(NamespaceBinding::create(&self.root_dir, path.to_string())?);
439        Ok(())
440    }
441
442    pub fn bound_path(&self) -> Option<&str> {
443        self.binding.as_deref()
444    }
445
446    /// Returns a [`FilesystemInfo`] object containing information about the serving filesystem.
447    ///
448    /// # Errors
449    ///
450    /// Returns [`Err`] if querying the filesystem failed.
451    pub async fn query(&self) -> Result<Box<fio::FilesystemInfo>, QueryError> {
452        let (status, info) = self.root_dir.query_filesystem().await?;
453        Status::ok(status).map_err(QueryError::DirectoryQuery)?;
454        info.ok_or(QueryError::DirectoryEmptyResult)
455    }
456
457    /// Take the exposed dir from this filesystem instance, dropping the management struct without
458    /// shutting the filesystem down. This leaves the caller with the responsibility of shutting
459    /// down the filesystem, and the filesystem component if necessary.
460    pub fn take_exposed_dir(mut self) -> fio::DirectoryProxy {
461        self.component.take().expect("BUG: component missing").forget();
462        self.exposed_dir.take().expect("BUG: exposed dir missing")
463    }
464
465    /// Attempts to shutdown the filesystem using the
466    /// [`fidl_fuchsia_fs::AdminProxy::shutdown()`] FIDL method and waiting for the filesystem
467    /// process to terminate.
468    ///
469    /// # Errors
470    ///
471    /// Returns [`Err`] if the shutdown failed or the filesystem process did not terminate.
472    pub async fn shutdown(mut self) -> Result<(), ShutdownError> {
473        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(
474            &self.exposed_dir.take().expect("BUG: exposed dir missing"),
475        )?
476        .shutdown()
477        .await?;
478        Ok(())
479    }
480
481    /// Attempts to kill the filesystem process and waits for the process to terminate.
482    ///
483    /// # Errors
484    ///
485    /// Returns [`Err`] if the filesystem process could not be terminated. There is no way to
486    /// recover the [`Filesystem`] from this error.
487    pub async fn kill(self) -> Result<(), Error> {
488        // For components, just shut down the filesystem.
489        // TODO(https://fxbug.dev/293949323): Figure out a way to make this more abrupt - the use-cases are
490        // either testing or when the filesystem isn't responding.
491        self.shutdown().await?;
492        Ok(())
493    }
494}
495
496impl Drop for ServingSingleVolumeFilesystem {
497    fn drop(&mut self) {
498        // Make a best effort attempt to shut down to the filesystem, if we need to.
499        if let Some(exposed_dir) = self.exposed_dir.take() {
500            if let Ok(proxy) =
501                connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
502            {
503                let _ = proxy.shutdown();
504            }
505        }
506    }
507}
508
509/// Asynchronously manages a serving multivolume filesystem. Created from
510/// [`Filesystem::serve_multi_volume()`].
511pub struct ServingMultiVolumeFilesystem {
512    component: Option<Arc<DynamicComponentInstance>>,
513    // exposed_dir will always be Some, except in Self::shutdown.
514    exposed_dir: Option<fio::DirectoryProxy>,
515}
516
517/// Represents an opened volume in a [`ServingMultiVolumeFilesystem'] instance.
518pub struct ServingVolume {
519    root_dir: fio::DirectoryProxy,
520    binding: Option<NamespaceBinding>,
521    exposed_dir: fio::DirectoryProxy,
522}
523
524impl ServingVolume {
525    fn new(exposed_dir: fio::DirectoryProxy) -> Result<Self, Error> {
526        let (root_dir, server_end) = create_endpoints::<fio::NodeMarker>();
527        exposed_dir.open(
528            "root",
529            fio::PERM_READABLE | fio::Flags::PERM_INHERIT_WRITE | fio::Flags::PERM_INHERIT_EXECUTE,
530            &Default::default(),
531            server_end.into_channel(),
532        )?;
533        Ok(ServingVolume {
534            root_dir: ClientEnd::<fio::DirectoryMarker>::new(root_dir.into_channel()).into_proxy(),
535            binding: None,
536            exposed_dir,
537        })
538    }
539
540    /// Returns a proxy to the root directory of the serving volume.
541    pub fn root(&self) -> &fio::DirectoryProxy {
542        &self.root_dir
543    }
544
545    /// Returns a proxy to the exposed directory of the serving volume.
546    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
547        &self.exposed_dir
548    }
549
550    /// Binds the root directory being served by this filesystem to a path in the local namespace.
551    /// The path must be absolute, containing no "." nor ".." entries.  The binding will be dropped
552    /// when self is dropped, or when unbind_path is called.  Only one binding is supported.
553    ///
554    /// # Errors
555    ///
556    /// Returns [`Err`] if binding failed, or if a binding already exists.
557    pub fn bind_to_path(&mut self, path: &str) -> Result<(), Error> {
558        ensure!(self.binding.is_none(), "Already bound");
559        self.binding = Some(NamespaceBinding::create(&self.root_dir, path.to_string())?);
560        Ok(())
561    }
562
563    /// Remove the namespace binding to the root directory being served by this volume, if there is
564    /// one. If there is no binding, this function does nothing. After this, it is safe to call
565    /// bind_to_path again.
566    pub fn unbind_path(&mut self) {
567        let _ = self.binding.take();
568    }
569
570    pub fn bound_path(&self) -> Option<&str> {
571        self.binding.as_deref()
572    }
573
574    /// Returns a [`FilesystemInfo`] object containing information about the serving volume.
575    ///
576    /// # Errors
577    ///
578    /// Returns [`Err`] if querying the filesystem failed.
579    pub async fn query(&self) -> Result<Box<fio::FilesystemInfo>, QueryError> {
580        let (status, info) = self.root_dir.query_filesystem().await?;
581        Status::ok(status).map_err(QueryError::DirectoryQuery)?;
582        info.ok_or(QueryError::DirectoryEmptyResult)
583    }
584
585    /// Attempts to shutdown the filesystem using the [`fidl_fuchsia_fs::AdminProxy::shutdown()`]
586    /// FIDL method. Fails if the volume is not already open.
587    pub async fn shutdown(self) -> Result<(), Error> {
588        let admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(self.exposed_dir())?;
589        admin_proxy.shutdown().await.context("failed to shutdown volume")?;
590        Ok(())
591    }
592}
593
594impl ServingMultiVolumeFilesystem {
595    /// Returns whether the given volume exists.
596    pub async fn has_volume(&self, volume: &str) -> Result<bool, Error> {
597        let path = format!("volumes/{}", volume);
598        fuchsia_fs::directory::open_node(
599            self.exposed_dir.as_ref().unwrap(),
600            &path,
601            fio::Flags::PROTOCOL_NODE,
602        )
603        .await
604        .map(|_| true)
605        .or_else(|e| {
606            if let fuchsia_fs::node::OpenError::OpenError(status) = &e {
607                if *status == zx::Status::NOT_FOUND {
608                    return Ok(false);
609                }
610            }
611            Err(e.into())
612        })
613    }
614
615    /// Creates and mounts the volume.  Fails if the volume already exists.
616    /// If `options.crypt` is set, the volume will be encrypted using the provided Crypt instance.
617    /// If `options.as_blob` is set, creates a blob volume that is mounted as a blob filesystem.
618    pub async fn create_volume(
619        &self,
620        volume: &str,
621        create_options: CreateOptions,
622        options: MountOptions,
623    ) -> Result<ServingVolume, Error> {
624        let (exposed_dir, server) = create_proxy::<fio::DirectoryMarker>();
625        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumesMarker>(
626            self.exposed_dir.as_ref().unwrap(),
627        )?
628        .create(volume, server, create_options, options)
629        .await?
630        .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
631        ServingVolume::new(exposed_dir)
632    }
633
634    /// Deletes the volume. Fails if the volume is already mounted.
635    pub async fn remove_volume(&self, volume: &str) -> Result<(), Error> {
636        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumesMarker>(
637            self.exposed_dir.as_ref().unwrap(),
638        )?
639        .remove(volume)
640        .await?
641        .map_err(|e| anyhow!(zx::Status::from_raw(e)))
642    }
643
644    /// Mounts an existing volume.  Fails if the volume is already mounted or doesn't exist.
645    /// If `crypt` is set, the volume will be decrypted using the provided Crypt instance.
646    pub async fn open_volume(
647        &self,
648        volume: &str,
649        options: MountOptions,
650    ) -> Result<ServingVolume, Error> {
651        let (exposed_dir, server) = create_proxy::<fio::DirectoryMarker>();
652        let path = format!("volumes/{}", volume);
653        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
654            self.exposed_dir.as_ref().unwrap(),
655            &path,
656        )?
657        .mount(server, options)
658        .await?
659        .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
660
661        ServingVolume::new(exposed_dir)
662    }
663
664    /// Returns volume info for `volume`.
665    pub async fn get_volume_info(
666        &self,
667        volume: &str,
668    ) -> Result<fidl_fuchsia_fs_startup::VolumeInfo, Error> {
669        let path = format!("volumes/{}", volume);
670        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
671            self.exposed_dir.as_ref().unwrap(),
672            &path,
673        )?
674        .get_info()
675        .await?
676        .map_err(|e| anyhow!(zx::Status::from_raw(e)))
677    }
678
679    /// Sets the max byte limit for a volume. Fails if the volume is not mounted.
680    pub async fn set_byte_limit(&self, volume: &str, byte_limit: u64) -> Result<(), Error> {
681        if byte_limit == 0 {
682            return Ok(());
683        }
684        let path = format!("volumes/{}", volume);
685        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
686            self.exposed_dir.as_ref().unwrap(),
687            &path,
688        )?
689        .set_limit(byte_limit)
690        .await?
691        .map_err(|e| anyhow!(zx::Status::from_raw(e)))
692    }
693
694    pub async fn check_volume(&self, volume: &str, options: CheckOptions) -> Result<(), Error> {
695        let path = format!("volumes/{}", volume);
696        connect_to_named_protocol_at_dir_root::<fidl_fuchsia_fs_startup::VolumeMarker>(
697            self.exposed_dir.as_ref().unwrap(),
698            &path,
699        )?
700        .check(options)
701        .await?
702        .map_err(|e| anyhow!(zx::Status::from_raw(e)))?;
703        Ok(())
704    }
705
706    /// Provides access to the internal |exposed_dir| for use in testing
707    /// callsites which need directory access.
708    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
709        self.exposed_dir.as_ref().expect("BUG: exposed dir missing")
710    }
711
712    /// Attempts to shutdown the filesystem using the [`fidl_fuchsia_fs::AdminProxy::shutdown()`]
713    /// FIDL method.
714    ///
715    /// # Errors
716    ///
717    /// Returns [`Err`] if the shutdown failed.
718    pub async fn shutdown(mut self) -> Result<(), ShutdownError> {
719        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(
720            // Take exposed_dir so we don't attempt to shut down again in Drop.
721            &self.exposed_dir.take().expect("BUG: exposed dir missing"),
722        )?
723        .shutdown()
724        .await?;
725        Ok(())
726    }
727
728    /// Take the exposed dir from this filesystem instance, dropping the management struct without
729    /// shutting the filesystem down. This leaves the caller with the responsibility of shutting
730    /// down the filesystem, and the filesystem component if necessary.
731    pub fn take_exposed_dir(mut self) -> fio::DirectoryProxy {
732        self.component.take().expect("BUG: missing component").forget();
733        self.exposed_dir.take().expect("BUG: exposed dir missing")
734    }
735
736    /// Returns a list of volumes found in the filesystem.
737    pub async fn list_volumes(&self) -> Result<Vec<String>, Error> {
738        let volumes_dir = fuchsia_fs::directory::open_async::<fio::DirectoryMarker>(
739            self.exposed_dir(),
740            "volumes",
741            fio::PERM_READABLE,
742        )
743        .unwrap();
744        fuchsia_fs::directory::readdir(&volumes_dir)
745            .await
746            .map(|entries| entries.into_iter().map(|e| e.name).collect())
747            .map_err(|e| anyhow!("failed to read volumes dir: {}", e))
748    }
749}
750
751impl Drop for ServingMultiVolumeFilesystem {
752    fn drop(&mut self) {
753        if let Some(exposed_dir) = self.exposed_dir.take() {
754            // Make a best effort attempt to shut down to the filesystem.
755            if let Ok(proxy) =
756                connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
757            {
758                let _ = proxy.shutdown();
759            }
760        }
761    }
762}
763
764#[cfg(test)]
765mod tests {
766    use super::*;
767    use crate::{Blobfs, F2fs, Fxfs, Minfs};
768    use delivery_blob::{CompressionMode, Type1Blob};
769    use fidl_fuchsia_fxfs::{BlobCreatorMarker, BlobReaderMarker};
770    use ramdevice_client::RamdiskClient;
771    use std::io::{Read as _, Write as _};
772
773    async fn ramdisk(block_size: u64) -> RamdiskClient {
774        RamdiskClient::create(block_size, 1 << 16).await.unwrap()
775    }
776
777    async fn new_fs<FSC: FSConfig>(ramdisk: &RamdiskClient, config: FSC) -> Filesystem {
778        Filesystem::new(ramdisk.open_controller().unwrap(), config)
779    }
780
781    #[fuchsia::test]
782    async fn blobfs_custom_config() {
783        let block_size = 512;
784        let ramdisk = ramdisk(block_size).await;
785        let config = Blobfs { verbose: true, readonly: true, ..Default::default() };
786        let mut blobfs = new_fs(&ramdisk, config).await;
787
788        blobfs.format().await.expect("failed to format blobfs");
789        blobfs.fsck().await.expect("failed to fsck blobfs");
790        let _ = blobfs.serve().await.expect("failed to serve blobfs");
791
792        ramdisk.destroy().await.expect("failed to destroy ramdisk");
793    }
794
795    #[fuchsia::test]
796    async fn blobfs_format_fsck_success() {
797        let block_size = 512;
798        let ramdisk = ramdisk(block_size).await;
799        let mut blobfs = new_fs(&ramdisk, Blobfs::default()).await;
800
801        blobfs.format().await.expect("failed to format blobfs");
802        blobfs.fsck().await.expect("failed to fsck blobfs");
803
804        ramdisk.destroy().await.expect("failed to destroy ramdisk");
805    }
806
807    #[fuchsia::test]
808    async fn blobfs_format_serve_write_query_restart_read_shutdown() {
809        let block_size = 512;
810        let ramdisk = ramdisk(block_size).await;
811        let mut blobfs = new_fs(&ramdisk, Blobfs::default()).await;
812
813        blobfs.format().await.expect("failed to format blobfs");
814
815        let serving = blobfs.serve().await.expect("failed to serve blobfs the first time");
816
817        // snapshot of FilesystemInfo
818        let fs_info1 =
819            serving.query().await.expect("failed to query filesystem info after first serving");
820
821        // pre-generated merkle test fixture data
822        let content = b"test content";
823        let merkle = fuchsia_merkle::root_from_slice(content);
824        let delivery_blob = Type1Blob::generate(content, CompressionMode::Never);
825
826        {
827            let creator = fuchsia_component_client::connect_to_protocol_at_dir_root::<
828                BlobCreatorMarker,
829            >(serving.exposed_dir())
830            .unwrap();
831            let writer = creator.create(&merkle.into(), false).await.unwrap().unwrap();
832            let mut writer =
833                blob_writer::BlobWriter::create(writer.into_proxy(), delivery_blob.len() as u64)
834                    .await
835                    .unwrap();
836            writer.write(&delivery_blob).await.unwrap();
837        }
838
839        // check against the snapshot FilesystemInfo
840        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
841        assert_eq!(
842            fs_info2.used_bytes - fs_info1.used_bytes,
843            fs_info2.block_size as u64 // assuming content < 8K
844        );
845
846        serving.shutdown().await.expect("failed to shutdown blobfs the first time");
847        let blobfs = new_fs(&ramdisk, Blobfs::default()).await;
848        let serving = blobfs.serve().await.expect("failed to serve blobfs the second time");
849        {
850            let reader = fuchsia_component_client::connect_to_protocol_at_dir_root::<
851                BlobReaderMarker,
852            >(serving.exposed_dir())
853            .unwrap();
854            let vmo = reader.get_vmo(&merkle.into()).await.unwrap().unwrap();
855            let read_content = vmo.read_to_vec::<u8>(0, content.len() as u64).unwrap();
856            assert_eq!(read_content, content);
857        }
858
859        // once more check against the snapshot FilesystemInfo
860        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
861        assert_eq!(
862            fs_info3.used_bytes - fs_info1.used_bytes,
863            fs_info3.block_size as u64 // assuming content < 8K
864        );
865
866        serving.shutdown().await.expect("failed to shutdown blobfs the second time");
867
868        ramdisk.destroy().await.expect("failed to destroy ramdisk");
869    }
870
871    #[fuchsia::test]
872    async fn blobfs_bind_to_path() {
873        let block_size = 512;
874        let test_content = b"test content";
875        let merkle = fuchsia_merkle::root_from_slice(test_content);
876        let delivery_blob = Type1Blob::generate(test_content, CompressionMode::Never);
877        let ramdisk = ramdisk(block_size).await;
878        let mut blobfs = new_fs(&ramdisk, Blobfs::default()).await;
879
880        blobfs.format().await.expect("failed to format blobfs");
881        let mut serving = blobfs.serve().await.expect("failed to serve blobfs");
882        serving.bind_to_path("/test-blobfs-path").expect("bind_to_path failed");
883
884        {
885            let creator = fuchsia_component_client::connect_to_protocol_at_dir_root::<
886                BlobCreatorMarker,
887            >(serving.exposed_dir())
888            .unwrap();
889            let writer = creator.create(&merkle.into(), false).await.unwrap().unwrap();
890            let mut writer =
891                blob_writer::BlobWriter::create(writer.into_proxy(), delivery_blob.len() as u64)
892                    .await
893                    .unwrap();
894            writer.write(&delivery_blob).await.unwrap();
895        }
896
897        let entries = std::fs::read_dir("/test-blobfs-path")
898            .unwrap()
899            .map(|entry| entry.unwrap().file_name().into_string().unwrap())
900            .collect::<Vec<_>>();
901        assert_eq!(entries, &[merkle.to_string()]);
902
903        serving.shutdown().await.expect("failed to shutdown blobfs");
904    }
905
906    #[fuchsia::test]
907    async fn minfs_custom_config() {
908        let block_size = 512;
909        let ramdisk = ramdisk(block_size).await;
910        let config = Minfs {
911            verbose: true,
912            readonly: true,
913            fsck_after_every_transaction: true,
914            ..Default::default()
915        };
916        let mut minfs = new_fs(&ramdisk, config).await;
917
918        minfs.format().await.expect("failed to format minfs");
919        minfs.fsck().await.expect("failed to fsck minfs");
920        let _ = minfs.serve().await.expect("failed to serve minfs");
921
922        ramdisk.destroy().await.expect("failed to destroy ramdisk");
923    }
924
925    #[fuchsia::test]
926    async fn minfs_format_fsck_success() {
927        let block_size = 8192;
928        let ramdisk = ramdisk(block_size).await;
929        let mut minfs = new_fs(&ramdisk, Minfs::default()).await;
930
931        minfs.format().await.expect("failed to format minfs");
932        minfs.fsck().await.expect("failed to fsck minfs");
933
934        ramdisk.destroy().await.expect("failed to destroy ramdisk");
935    }
936
937    #[fuchsia::test]
938    async fn minfs_format_serve_write_query_restart_read_shutdown() {
939        let block_size = 8192;
940        let ramdisk = ramdisk(block_size).await;
941        let mut minfs = new_fs(&ramdisk, Minfs::default()).await;
942
943        minfs.format().await.expect("failed to format minfs");
944        let serving = minfs.serve().await.expect("failed to serve minfs the first time");
945
946        // snapshot of FilesystemInfo
947        let fs_info1 =
948            serving.query().await.expect("failed to query filesystem info after first serving");
949
950        let filename = "test_file";
951        let content = String::from("test content").into_bytes();
952
953        {
954            let test_file = fuchsia_fs::directory::open_file(
955                serving.root(),
956                filename,
957                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_WRITABLE,
958            )
959            .await
960            .expect("failed to create test file");
961            let _: u64 = test_file
962                .write(&content)
963                .await
964                .expect("failed to write to test file")
965                .map_err(Status::from_raw)
966                .expect("write error");
967        }
968
969        // check against the snapshot FilesystemInfo
970        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
971        assert_eq!(
972            fs_info2.used_bytes - fs_info1.used_bytes,
973            fs_info2.block_size as u64 // assuming content < 8K
974        );
975
976        serving.shutdown().await.expect("failed to shutdown minfs the first time");
977        let minfs = new_fs(&ramdisk, Minfs::default()).await;
978        let serving = minfs.serve().await.expect("failed to serve minfs the second time");
979
980        {
981            let test_file =
982                fuchsia_fs::directory::open_file(serving.root(), filename, fio::PERM_READABLE)
983                    .await
984                    .expect("failed to open test file");
985            let read_content =
986                fuchsia_fs::file::read(&test_file).await.expect("failed to read from test file");
987            assert_eq!(content, read_content);
988        }
989
990        // once more check against the snapshot FilesystemInfo
991        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
992        assert_eq!(
993            fs_info3.used_bytes - fs_info1.used_bytes,
994            fs_info3.block_size as u64 // assuming content < 8K
995        );
996
997        let _ = serving.shutdown().await.expect("failed to shutdown minfs the second time");
998
999        ramdisk.destroy().await.expect("failed to destroy ramdisk");
1000    }
1001
1002    #[fuchsia::test]
1003    async fn minfs_bind_to_path() {
1004        let block_size = 8192;
1005        let test_content = b"test content";
1006        let ramdisk = ramdisk(block_size).await;
1007        let mut minfs = new_fs(&ramdisk, Minfs::default()).await;
1008
1009        minfs.format().await.expect("failed to format minfs");
1010        let mut serving = minfs.serve().await.expect("failed to serve minfs");
1011        serving.bind_to_path("/test-minfs-path").expect("bind_to_path failed");
1012        let test_path = "/test-minfs-path/test_file";
1013
1014        {
1015            let mut file = std::fs::File::create(test_path).expect("failed to create test file");
1016            file.write_all(test_content).expect("write bytes");
1017        }
1018
1019        {
1020            let mut file = std::fs::File::open(test_path).expect("failed to open test file");
1021            let mut buf = Vec::new();
1022            file.read_to_end(&mut buf).expect("failed to read test file");
1023            assert_eq!(buf, test_content);
1024        }
1025
1026        serving.shutdown().await.expect("failed to shutdown minfs");
1027
1028        std::fs::File::open(test_path).expect_err("test file was not unbound");
1029    }
1030
1031    #[fuchsia::test]
1032    async fn minfs_take_exposed_dir_does_not_drop() {
1033        let block_size = 512;
1034        let test_content = b"test content";
1035        let test_file_name = "test-file";
1036        let ramdisk = ramdisk(block_size).await;
1037        let mut minfs = new_fs(&ramdisk, Minfs::default()).await;
1038
1039        minfs.format().await.expect("failed to format fxfs");
1040
1041        let fs = minfs.serve().await.expect("failed to serve fxfs");
1042        let file = {
1043            let file = fuchsia_fs::directory::open_file(
1044                fs.root(),
1045                test_file_name,
1046                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_READABLE | fio::PERM_WRITABLE,
1047            )
1048            .await
1049            .unwrap();
1050            fuchsia_fs::file::write(&file, test_content).await.unwrap();
1051            file.close().await.expect("close fidl error").expect("close error");
1052            fuchsia_fs::directory::open_file(fs.root(), test_file_name, fio::PERM_READABLE)
1053                .await
1054                .unwrap()
1055        };
1056
1057        let exposed_dir = fs.take_exposed_dir();
1058
1059        assert_eq!(fuchsia_fs::file::read(&file).await.unwrap(), test_content);
1060
1061        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
1062            .expect("connecting to admin marker")
1063            .shutdown()
1064            .await
1065            .expect("shutdown failed");
1066    }
1067
1068    #[fuchsia::test]
1069    async fn f2fs_format_fsck_success() {
1070        let block_size = 4096;
1071        let ramdisk = ramdisk(block_size).await;
1072        let mut f2fs = new_fs(&ramdisk, F2fs::default()).await;
1073
1074        f2fs.format().await.expect("failed to format f2fs");
1075        f2fs.fsck().await.expect("failed to fsck f2fs");
1076
1077        ramdisk.destroy().await.expect("failed to destroy ramdisk");
1078    }
1079
1080    #[fuchsia::test]
1081    async fn f2fs_format_serve_write_query_restart_read_shutdown() {
1082        let block_size = 4096;
1083        let ramdisk = ramdisk(block_size).await;
1084        let mut f2fs = new_fs(&ramdisk, F2fs::default()).await;
1085
1086        f2fs.format().await.expect("failed to format f2fs");
1087        let serving = f2fs.serve().await.expect("failed to serve f2fs the first time");
1088
1089        // snapshot of FilesystemInfo
1090        let fs_info1 =
1091            serving.query().await.expect("failed to query filesystem info after first serving");
1092
1093        let filename = "test_file";
1094        let content = String::from("test content").into_bytes();
1095
1096        {
1097            let test_file = fuchsia_fs::directory::open_file(
1098                serving.root(),
1099                filename,
1100                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_WRITABLE,
1101            )
1102            .await
1103            .expect("failed to create test file");
1104            let _: u64 = test_file
1105                .write(&content)
1106                .await
1107                .expect("failed to write to test file")
1108                .map_err(Status::from_raw)
1109                .expect("write error");
1110        }
1111
1112        // check against the snapshot FilesystemInfo
1113        let fs_info2 = serving.query().await.expect("failed to query filesystem info after write");
1114        // With zx::stream, f2fs doesn't support the inline data feature allowing file
1115        // inode blocks to include small data. This way requires keeping two copies of VMOs
1116        // for the same inline data
1117        // assuming content < 4K and its inode block.
1118        let expected_size2 = fs_info2.block_size * 2;
1119        assert_eq!(fs_info2.used_bytes - fs_info1.used_bytes, expected_size2 as u64);
1120
1121        serving.shutdown().await.expect("failed to shutdown f2fs the first time");
1122        let f2fs = new_fs(&ramdisk, F2fs::default()).await;
1123        let serving = f2fs.serve().await.expect("failed to serve f2fs the second time");
1124
1125        {
1126            let test_file =
1127                fuchsia_fs::directory::open_file(serving.root(), filename, fio::PERM_READABLE)
1128                    .await
1129                    .expect("failed to open test file");
1130            let read_content =
1131                fuchsia_fs::file::read(&test_file).await.expect("failed to read from test file");
1132            assert_eq!(content, read_content);
1133        }
1134
1135        // once more check against the snapshot FilesystemInfo
1136        let fs_info3 = serving.query().await.expect("failed to query filesystem info after read");
1137        // assuming content < 4K and its inode block.
1138        let expected_size3 = fs_info3.block_size * 2;
1139        assert_eq!(fs_info3.used_bytes - fs_info1.used_bytes, expected_size3 as u64);
1140
1141        serving.shutdown().await.expect("failed to shutdown f2fs the second time");
1142        let mut f2fs = new_fs(&ramdisk, F2fs::default()).await;
1143        f2fs.fsck().await.expect("failed to fsck f2fs after shutting down the second time");
1144
1145        ramdisk.destroy().await.expect("failed to destroy ramdisk");
1146    }
1147
1148    #[fuchsia::test]
1149    async fn f2fs_bind_to_path() {
1150        let block_size = 4096;
1151        let test_content = b"test content";
1152        let ramdisk = ramdisk(block_size).await;
1153        let mut f2fs = new_fs(&ramdisk, F2fs::default()).await;
1154
1155        f2fs.format().await.expect("failed to format f2fs");
1156        let mut serving = f2fs.serve().await.expect("failed to serve f2fs");
1157        serving.bind_to_path("/test-f2fs-path").expect("bind_to_path failed");
1158        let test_path = "/test-f2fs-path/test_file";
1159
1160        {
1161            let mut file = std::fs::File::create(test_path).expect("failed to create test file");
1162            file.write_all(test_content).expect("write bytes");
1163        }
1164
1165        {
1166            let mut file = std::fs::File::open(test_path).expect("failed to open test file");
1167            let mut buf = Vec::new();
1168            file.read_to_end(&mut buf).expect("failed to read test file");
1169            assert_eq!(buf, test_content);
1170        }
1171
1172        serving.shutdown().await.expect("failed to shutdown f2fs");
1173
1174        std::fs::File::open(test_path).expect_err("test file was not unbound");
1175    }
1176
1177    #[fuchsia::test]
1178    async fn fxfs_open_volume() {
1179        let block_size = 512;
1180        let ramdisk = ramdisk(block_size).await;
1181        let mut fxfs = new_fs(&ramdisk, Fxfs::default()).await;
1182
1183        fxfs.format().await.expect("failed to format fxfs");
1184
1185        let fs = fxfs.serve_multi_volume().await.expect("failed to serve fxfs");
1186
1187        assert_eq!(fs.has_volume("foo").await.expect("has_volume"), false);
1188        assert!(
1189            fs.open_volume("foo", MountOptions::default()).await.is_err(),
1190            "Opening nonexistent volume should fail"
1191        );
1192
1193        let vol = fs
1194            .create_volume("foo", CreateOptions::default(), MountOptions::default())
1195            .await
1196            .expect("Create volume failed");
1197        vol.query().await.expect("Query volume failed");
1198        // TODO(https://fxbug.dev/42057878) Closing the volume is not synchronous. Immediately reopening the
1199        // volume will race with the asynchronous close and sometimes fail because the volume is
1200        // still mounted.
1201        // fs.open_volume("foo", MountOptions{crypt: None, as_blob: false}).await
1202        //    .expect("Open volume failed");
1203        assert_eq!(fs.has_volume("foo").await.expect("has_volume"), true);
1204    }
1205
1206    #[fuchsia::test]
1207    async fn fxfs_take_exposed_dir_does_not_drop() {
1208        let block_size = 512;
1209        let test_content = b"test content";
1210        let test_file_name = "test-file";
1211        let ramdisk = ramdisk(block_size).await;
1212        let mut fxfs = new_fs(&ramdisk, Fxfs::default()).await;
1213
1214        fxfs.format().await.expect("failed to format fxfs");
1215
1216        let fs = fxfs.serve_multi_volume().await.expect("failed to serve fxfs");
1217        let file = {
1218            let vol = fs
1219                .create_volume("foo", CreateOptions::default(), MountOptions::default())
1220                .await
1221                .expect("Create volume failed");
1222            let file = fuchsia_fs::directory::open_file(
1223                vol.root(),
1224                test_file_name,
1225                fio::Flags::FLAG_MAYBE_CREATE | fio::PERM_READABLE | fio::PERM_WRITABLE,
1226            )
1227            .await
1228            .unwrap();
1229            fuchsia_fs::file::write(&file, test_content).await.unwrap();
1230            file.close().await.expect("close fidl error").expect("close error");
1231            fuchsia_fs::directory::open_file(vol.root(), test_file_name, fio::PERM_READABLE)
1232                .await
1233                .unwrap()
1234        };
1235
1236        let exposed_dir = fs.take_exposed_dir();
1237
1238        assert_eq!(fuchsia_fs::file::read(&file).await.unwrap(), test_content);
1239
1240        connect_to_protocol_at_dir_root::<fidl_fuchsia_fs::AdminMarker>(&exposed_dir)
1241            .expect("connecting to admin marker")
1242            .shutdown()
1243            .await
1244            .expect("shutdown failed");
1245    }
1246}