Skip to main content

fxfs_platform/fuchsia/
component.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fuchsia::debug::{FxfsDebug, handle_debug_request};
6use crate::fuchsia::errors::map_to_status;
7use crate::fuchsia::memory_pressure::MemoryPressureMonitor;
8use crate::fuchsia::volume::MemoryPressureConfig;
9use crate::fuchsia::volumes_directory::VolumesDirectory;
10use anyhow::{Context, Error, bail};
11use async_trait::async_trait;
12use block_client::{BlockClient as _, RemoteBlockClient};
13use fidl::endpoints::{ClientEnd, DiscoverableProtocolMarker, RequestStream};
14use fidl_fuchsia_fs::{AdminMarker, AdminRequest, AdminRequestStream};
15use fidl_fuchsia_fs_startup::{
16    CheckOptions, StartOptions, StartupMarker, StartupRequest, StartupRequestStream, VolumesMarker,
17    VolumesRequest, VolumesRequestStream,
18};
19use fidl_fuchsia_fxfs::{
20    DebugMarker, DebugRequestStream, VolumeInstallerMarker, VolumeInstallerRequest,
21    VolumeInstallerRequestStream,
22};
23use fidl_fuchsia_process_lifecycle::{LifecycleRequest, LifecycleRequestStream};
24use fidl_fuchsia_storage_block::BlockMarker;
25use fs_inspect::{FsInspect, FsInspectTree, InfoData, UsageData};
26use fuchsia_component_client::connect_to_protocol;
27use futures::TryStreamExt;
28use futures::lock::Mutex;
29use fxfs::filesystem::{FxFilesystem, FxFilesystemBuilder, MIN_BLOCK_SIZE, OpenFxFilesystem, mkfs};
30use fxfs::log::*;
31use fxfs::object_store::volume::root_volume;
32use fxfs::serialized_types::LATEST_VERSION;
33use fxfs::{fsck, metrics};
34use refaults_vmo::PageRefaultCounter;
35use std::ops::Deref;
36use std::sync::{Arc, Weak};
37use storage_device::DeviceHolder;
38use storage_device::block_device::BlockDevice;
39use vfs::directory::helper::DirectlyMutable;
40use vfs::execution_scope::ExecutionScope;
41use {
42    fidl_fuchsia_io as fio, fidl_fuchsia_memory_attribution as fattribution,
43    fuchsia_async as fasync,
44};
45
46const FXFS_INFO_NAME: &'static str = "fxfs";
47
48pub fn map_to_raw_status(e: Error) -> zx::sys::zx_status_t {
49    map_to_status(e).into_raw()
50}
51
52pub async fn new_block_client(remote: ClientEnd<BlockMarker>) -> Result<RemoteBlockClient, Error> {
53    Ok(RemoteBlockClient::new(remote.into_proxy()).await?)
54}
55
56/// Runs Fxfs as a component.
57pub struct Component {
58    state: futures::lock::Mutex<State>,
59
60    // The execution scope of the pseudo filesystem.
61    scope: ExecutionScope,
62
63    // The root of the pseudo filesystem for the component.
64    export_dir: Arc<vfs::directory::immutable::Simple>,
65}
66
67// Wrapper type to add `FsInspect` support to an `OpenFxFilesystem`.
68struct InspectedFxFilesystem(OpenFxFilesystem, /*fs_id=*/ u64);
69
70impl From<OpenFxFilesystem> for InspectedFxFilesystem {
71    fn from(fs: OpenFxFilesystem) -> Self {
72        Self(fs, zx::Event::create().koid().unwrap().raw_koid())
73    }
74}
75
76impl Deref for InspectedFxFilesystem {
77    type Target = Arc<FxFilesystem>;
78    fn deref(&self) -> &Self::Target {
79        &self.0.deref()
80    }
81}
82
83#[async_trait]
84impl FsInspect for InspectedFxFilesystem {
85    fn get_info_data(&self) -> InfoData {
86        let earliest_version = self.0.super_block_header().earliest_version;
87        InfoData {
88            id: self.1,
89            fs_type: fidl_fuchsia_fs::VfsType::Fxfs.into_primitive().into(),
90            name: FXFS_INFO_NAME.into(),
91            version_major: LATEST_VERSION.major.into(),
92            version_minor: LATEST_VERSION.minor.into(),
93            block_size: self.0.block_size() as u64,
94            max_filename_length: fio::MAX_NAME_LENGTH,
95            oldest_version: Some(format!("{}.{}", earliest_version.major, earliest_version.minor)),
96        }
97    }
98
99    async fn get_usage_data(&self) -> UsageData {
100        let info = self.0.get_info();
101        UsageData {
102            total_bytes: info.total_bytes,
103            used_bytes: info.used_bytes,
104            // TODO(https://fxbug.dev/42175930): Should these be moved to per-volume nodes?
105            total_nodes: 0,
106            used_nodes: 0,
107        }
108    }
109}
110
111struct RunningState {
112    // We have to wrap this in an Arc, even though it itself basically just wraps an Arc, so that
113    // FsInspectTree can reference `fs` as a Weak<dyn FsInspect>`.
114    fs: Arc<InspectedFxFilesystem>,
115    volumes: Arc<VolumesDirectory>,
116    _debug: Arc<FxfsDebug>,
117    _inspect_tree: Arc<FsInspectTree>,
118}
119
120enum State {
121    ComponentStarted,
122    Running(RunningState),
123}
124
125impl State {
126    async fn stop(&mut self, outgoing_dir: &vfs::directory::immutable::Simple) {
127        if let State::Running(RunningState { fs, volumes, .. }) =
128            std::mem::replace(self, State::ComponentStarted)
129        {
130            info!("Stopping Fxfs runtime; remaining connections will be forcibly closed");
131            let _ = outgoing_dir.remove_entry("volumes", /* must_be_directory: */ false);
132            let _ = outgoing_dir.remove_entry("debug", /* must_be_directory: */ false);
133            volumes.terminate().await;
134            let _ = fs.deref().close().await;
135        }
136    }
137}
138
139impl Component {
140    pub fn new() -> Arc<Self> {
141        Arc::new(Self {
142            state: Mutex::new(State::ComponentStarted),
143            scope: ExecutionScope::new(),
144            export_dir: vfs::directory::immutable::simple(),
145        })
146    }
147
148    /// Runs Fxfs as a component.
149    pub async fn run(
150        self: Arc<Self>,
151        outgoing_dir: zx::Channel,
152        lifecycle_channel: Option<zx::Channel>,
153    ) -> Result<(), Error> {
154        let svc_dir = vfs::directory::immutable::simple();
155        self.export_dir.add_entry("svc", svc_dir.clone()).expect("Unable to create svc dir");
156
157        let weak = Arc::downgrade(&self);
158        svc_dir.add_entry(
159            StartupMarker::PROTOCOL_NAME,
160            vfs::service::host(move |requests| {
161                let weak = weak.clone();
162                async move {
163                    if let Some(me) = weak.upgrade() {
164                        let _ = me.handle_startup_requests(requests).await;
165                    }
166                }
167            }),
168        )?;
169        let weak = Arc::downgrade(&self);
170        svc_dir.add_entry(
171            VolumesMarker::PROTOCOL_NAME,
172            vfs::service::host(move |requests| {
173                let weak = weak.clone();
174                async move {
175                    if let Some(me) = weak.upgrade() {
176                        me.handle_volumes_requests(requests).await;
177                    }
178                }
179            }),
180        )?;
181
182        let weak = Arc::downgrade(&self);
183        svc_dir.add_entry(
184            AdminMarker::PROTOCOL_NAME,
185            vfs::service::host(move |requests| {
186                let weak = weak.clone();
187                async move {
188                    if let Some(me) = weak.upgrade() {
189                        let _ = me.handle_admin_requests(requests).await;
190                    }
191                }
192            }),
193        )?;
194
195        let weak = Arc::downgrade(&self);
196        svc_dir.add_entry(
197            VolumeInstallerMarker::PROTOCOL_NAME,
198            vfs::service::host(move |requests| {
199                let weak = weak.clone();
200                async move {
201                    if let Some(me) = weak.upgrade() {
202                        if let Err(error) = me.handle_volume_installer_requests(requests).await {
203                            error!(error:?; "failed to handle VolumeInstaller requests");
204                        }
205                    }
206                }
207            }),
208        )?;
209
210        // TODO(b/315704445): Only enable in debug builds?
211        let weak = Arc::downgrade(&self);
212        svc_dir.add_entry(
213            DebugMarker::PROTOCOL_NAME,
214            vfs::service::host(move |requests| {
215                let weak = weak.clone();
216                async move {
217                    if let Some(me) = weak.upgrade() {
218                        let _ = me.handle_debug_requests(requests).await;
219                    }
220                }
221            }),
222        )?;
223
224        vfs::directory::serve_on(
225            self.export_dir.clone(),
226            fio::PERM_READABLE | fio::PERM_WRITABLE | fio::PERM_EXECUTABLE,
227            self.scope.clone(),
228            fidl::endpoints::ServerEnd::new(outgoing_dir),
229        );
230
231        if let Some(channel) = lifecycle_channel {
232            let me = self.clone();
233            self.scope.spawn(async move {
234                if let Err(error) = me.handle_lifecycle_requests(channel).await {
235                    warn!(error:?; "handle_lifecycle_requests");
236                }
237            });
238        }
239
240        self.scope.wait().await;
241
242        Ok(())
243    }
244
245    async fn handle_startup_requests(&self, mut stream: StartupRequestStream) -> Result<(), Error> {
246        while let Some(request) = stream.try_next().await? {
247            match request {
248                StartupRequest::Start { responder, device, options } => {
249                    responder.send(self.handle_start(device, options).await.map_err(|error| {
250                        error!(error:?; "handle_start failed");
251                        map_to_raw_status(error)
252                    }))?
253                }
254                StartupRequest::Format { responder, device, .. } => {
255                    responder.send(self.handle_format(device).await.map_err(|error| {
256                        error!(error:?; "handle_format failed");
257                        map_to_raw_status(error)
258                    }))?
259                }
260                StartupRequest::Check { responder, device, options } => {
261                    responder.send(self.handle_check(device, options).await.map_err(|error| {
262                        error!(error:?; "handle_check failed");
263                        map_to_raw_status(error)
264                    }))?
265                }
266            }
267        }
268        Ok(())
269    }
270
271    async fn handle_start(
272        &self,
273        device: ClientEnd<BlockMarker>,
274        options: StartOptions,
275    ) -> Result<(), Error> {
276        info!(options:?; "Received start request");
277        let mut state = self.state.lock().await;
278        // TODO(https://fxbug.dev/42174810): This is not very graceful.  It would be better for the client to
279        // explicitly shut down all volumes first, and make this fail if there are remaining active
280        // connections.  Fix the bug in fs_test which requires this.
281        state.stop(&self.export_dir).await;
282        let client = new_block_client(device).await?;
283
284        // TODO(https://fxbug.dev/42063349) Add support for block sizes greater than the page size.
285        assert!(client.block_size() <= zx::system_get_page_size());
286        assert!((zx::system_get_page_size() as u64) == MIN_BLOCK_SIZE);
287
288        let fs = FxFilesystemBuilder::new()
289            .fsck_after_every_transaction(options.fsck_after_every_transaction.unwrap_or(false))
290            .read_only(options.read_only.unwrap_or(false))
291            .inline_crypto_enabled(options.inline_crypto_enabled.unwrap_or(false))
292            .barriers_enabled(options.barriers_enabled.unwrap_or(false))
293            .open(DeviceHolder::new(
294                BlockDevice::new(client, options.read_only.unwrap_or(false)).await?,
295            ))
296            .await?;
297        let root_volume = root_volume(fs.clone()).await?;
298        let fs: Arc<InspectedFxFilesystem> = Arc::new(fs.into());
299        let weak_fs = Arc::downgrade(&fs) as Weak<dyn FsInspect + Send + Sync>;
300        let inspect_tree =
301            Arc::new(FsInspectTree::new(weak_fs, fuchsia_inspect::component::inspector().root()));
302
303        let blob_resupplied_count = Arc::new(PageRefaultCounter::new()?);
304        connect_to_protocol::<fattribution::PageRefaultSinkMarker>()?
305            .send_page_refault_count(blob_resupplied_count.readonly_vmo()?)?;
306
307        let mem_monitor = match MemoryPressureMonitor::start() {
308            Ok(v) => Some(v),
309            Err(error) => {
310                warn!(
311                    error:?;
312                    "Failed to connect to memory pressure monitor. Running \
313                     without pressure awareness."
314                );
315                None
316            }
317        };
318
319        let volumes = VolumesDirectory::new(
320            root_volume,
321            Arc::downgrade(&inspect_tree),
322            mem_monitor,
323            blob_resupplied_count,
324            MemoryPressureConfig::default(),
325        )
326        .await?;
327
328        self.export_dir.add_entry_may_overwrite(
329            "volumes",
330            volumes.directory_node().clone(),
331            /* overwrite: */ true,
332        )?;
333
334        let debug = FxfsDebug::new(&**fs, &volumes)?;
335        self.export_dir.add_entry_may_overwrite("debug", debug.root(), true)?;
336
337        fs.allocator().track_statistics(&metrics::detail(), "allocator");
338        fs.journal().track_statistics(&metrics::detail(), "journal");
339        fs.object_manager().track_statistics(&metrics::detail(), "object_manager");
340
341        let info = fs.get_info();
342        info!(
343            device_size = info.total_bytes,
344            used = info.used_bytes,
345            free = info.total_bytes - info.used_bytes;
346            "Mounted"
347        );
348
349        if let Some(profile_time) = options.startup_profiling_seconds {
350            // Unwrap ok, shouldn't have anything else recording or replaying this early in startup.
351            volumes
352                .clone()
353                .record_or_replay_profile(".boot".to_owned(), profile_time)
354                .await
355                .unwrap();
356        }
357
358        *state = State::Running(RunningState {
359            fs,
360            volumes,
361            _debug: debug,
362            _inspect_tree: inspect_tree,
363        });
364
365        Ok(())
366    }
367
368    async fn handle_format(&self, device: ClientEnd<BlockMarker>) -> Result<(), Error> {
369        let device = DeviceHolder::new(
370            BlockDevice::new(new_block_client(device).await?, /* read_only: */ false).await?,
371        );
372        mkfs(device).await?;
373        info!("Formatted filesystem");
374        Ok(())
375    }
376
377    async fn handle_check(
378        &self,
379        device: ClientEnd<BlockMarker>,
380        _options: CheckOptions,
381    ) -> Result<(), Error> {
382        let state = self.state.lock().await;
383        let (fs_container, fs) = match *state {
384            State::ComponentStarted => {
385                let client = new_block_client(device).await?;
386                let fs_container = FxFilesystemBuilder::new()
387                    .read_only(true)
388                    .open(DeviceHolder::new(BlockDevice::new(client, /* read_only: */ true).await?))
389                    .await?;
390                let fs = fs_container.clone();
391                (Some(fs_container), fs)
392            }
393            State::Running(RunningState { ref fs, .. }) => (None, fs.deref().deref().clone()),
394        };
395        let res = fsck::fsck(fs.clone()).await;
396        if let Some(fs_container) = fs_container {
397            let _ = fs_container.close().await;
398        }
399        // TODO(b/311550633): Stash ok res in inspect.
400        info!("handle_check for fs: {:?}", res?);
401        Ok(())
402    }
403
404    async fn handle_admin_requests(&self, mut stream: AdminRequestStream) -> Result<(), Error> {
405        while let Some(request) = stream.try_next().await.context("Reading request")? {
406            if self.handle_admin(request).await? {
407                break;
408            }
409        }
410        Ok(())
411    }
412
413    // Returns true if we should close the connection.
414    async fn handle_admin(&self, req: AdminRequest) -> Result<bool, Error> {
415        match req {
416            AdminRequest::Shutdown { responder } => {
417                info!("Received shutdown request");
418                self.shutdown().await;
419                responder
420                    .send()
421                    .unwrap_or_else(|error| warn!(error:?; "Failed to send shutdown response"));
422                return Ok(true);
423            }
424        }
425    }
426
427    /// Handles fuchsia.fxfs.Debug requests, providing live debugging internals of the running
428    /// filesystem.
429    async fn handle_debug_requests(&self, mut stream: DebugRequestStream) -> Result<(), Error> {
430        while let Some(request) = stream.try_next().await.context("Reading request")? {
431            let state = self.state.lock().await;
432            let (fs, volumes) = match &*state {
433                State::ComponentStarted => {
434                    info!("Debug commands are not valid unless component is started.");
435                    bail!("Component not started");
436                }
437                State::Running(RunningState { fs, volumes, .. }) => {
438                    (fs.deref().deref().clone(), volumes.clone())
439                }
440            };
441            handle_debug_request(fs, volumes, request).await?;
442        }
443        Ok(())
444    }
445
446    async fn shutdown(&self) {
447        self.state.lock().await.stop(&self.export_dir).await;
448        info!("Filesystem terminated");
449    }
450
451    async fn handle_volumes_requests(&self, mut stream: VolumesRequestStream) {
452        let volumes =
453            if let State::Running(RunningState { volumes, .. }) = &*self.state.lock().await {
454                volumes.clone()
455            } else {
456                let _ = stream.into_inner().0.shutdown_with_epitaph(zx::Status::BAD_STATE);
457                return;
458            };
459        while let Ok(Some(request)) = stream.try_next().await {
460            match request {
461                VolumesRequest::Create {
462                    name,
463                    outgoing_directory,
464                    create_options,
465                    mount_options,
466                    responder,
467                } => {
468                    info!(
469                        name = name.as_str();
470                        "Create {}volume",
471                        if mount_options.crypt.is_some() { "encrypted " } else { "" }
472                    );
473                    responder
474                        .send(
475                            volumes
476                                .create_and_serve_volume(
477                                    &name,
478                                    outgoing_directory.into_channel().into(),
479                                    mount_options,
480                                    create_options,
481                                )
482                                .await
483                                .map_err(map_to_raw_status),
484                        )
485                        .unwrap_or_else(
486                            |error| warn!(error:?; "Failed to send volume creation response"),
487                        );
488                }
489                VolumesRequest::Remove { name, responder } => {
490                    info!(name = name.as_str(); "Remove volume");
491                    responder
492                        .send(volumes.remove_volume(&name).await.map_err(map_to_raw_status))
493                        .unwrap_or_else(
494                            |error| warn!(error:?; "Failed to send volume removal response"),
495                        );
496                }
497                VolumesRequest::GetInfo { responder } => {
498                    responder.send(Err(zx::Status::NOT_SUPPORTED.into_raw())).unwrap_or_else(
499                        |error| warn!(error:?; "Failed to send volume removal response"),
500                    )
501                }
502            }
503        }
504    }
505
506    async fn handle_lifecycle_requests(&self, lifecycle_channel: zx::Channel) -> Result<(), Error> {
507        let mut stream =
508            LifecycleRequestStream::from_channel(fasync::Channel::from_channel(lifecycle_channel));
509        match stream.try_next().await.context("Reading request")? {
510            Some(LifecycleRequest::Stop { .. }) => {
511                info!("Received Lifecycle::Stop request");
512                self.shutdown().await;
513            }
514            None => {}
515        }
516        Ok(())
517    }
518
519    async fn handle_volume_installer_requests(
520        &self,
521        mut stream: VolumeInstallerRequestStream,
522    ) -> Result<(), Error> {
523        let volumes =
524            if let State::Running(RunningState { volumes, .. }) = &*self.state.lock().await {
525                volumes.clone()
526            } else {
527                let _ = stream.into_inner().0.shutdown_with_epitaph(zx::Status::BAD_STATE);
528                return Err(zx::Status::BAD_STATE).context("fxfs component not running");
529            };
530        while let Some(request) = stream.try_next().await.context("reading request")? {
531            match request {
532                VolumeInstallerRequest::Install { src, image_file, dst, responder } => {
533                    let response = volumes.install_volume(&src, &image_file, &dst).await;
534                    responder.send(response.map_err(|error| {
535                        error!(error:?; "install failed");
536                        map_to_raw_status(error)
537                    }))?;
538                }
539            }
540        }
541        Ok(())
542    }
543}
544
545#[cfg(test)]
546mod tests {
547    use super::{Component, new_block_client};
548    use fidl::endpoints::Proxy;
549    use fidl_fuchsia_fs::AdminMarker;
550    use fidl_fuchsia_fs_startup::{
551        CreateOptions, MountOptions, StartOptions, StartupMarker, VolumesMarker,
552    };
553    use fidl_fuchsia_process_lifecycle::{LifecycleMarker, LifecycleProxy};
554    use fuchsia_component_client::connect_to_protocol_at_dir_svc;
555    use fuchsia_fs::directory::readdir;
556    use futures::future::{BoxFuture, FusedFuture, FutureExt};
557    use futures::{pin_mut, select};
558    use fxfs::filesystem::FxFilesystem;
559    use fxfs::object_store::volume::root_volume;
560    use fxfs::object_store::{NewChildStoreOptions, StoreOptions};
561    use std::pin::Pin;
562    use std::sync::Arc;
563    use storage_device::DeviceHolder;
564    use storage_device::block_device::BlockDevice;
565    use vmo_backed_block_server::{
566        InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt,
567    };
568    use {fidl_fuchsia_io as fio, fuchsia_async as fasync};
569
570    async fn run_test(
571        callback: impl Fn(&fio::DirectoryProxy, LifecycleProxy) -> BoxFuture<'static, ()>,
572    ) -> (Pin<Box<impl FusedFuture>>, Arc<VmoBackedServer>) {
573        const BLOCK_SIZE: u32 = 512;
574        let block_server = Arc::new(
575            VmoBackedServerOptions {
576                block_size: BLOCK_SIZE,
577                initial_contents: InitialContents::FromCapacity(BLOCK_SIZE as u64 * 16384),
578                ..Default::default()
579            }
580            .build()
581            .expect("build failed"),
582        );
583
584        {
585            let fs = FxFilesystem::new_empty(DeviceHolder::new(
586                BlockDevice::new(
587                    new_block_client(block_server.connect())
588                        .await
589                        .expect("Unable to create block client"),
590                    false,
591                )
592                .await
593                .unwrap(),
594            ))
595            .await
596            .expect("FxFilesystem::new_empty failed");
597            {
598                let root_volume = root_volume(fs.clone()).await.expect("Open root_volume failed");
599                root_volume
600                    .new_volume("default", NewChildStoreOptions::default())
601                    .await
602                    .expect("Create volume failed");
603            }
604            fs.close().await.expect("close failed");
605        }
606
607        let (client_end, server_end) = fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
608
609        let (lifecycle_client, lifecycle_server) =
610            fidl::endpoints::create_proxy::<LifecycleMarker>();
611
612        let mut component_task = Box::pin(
613            async {
614                Component::new()
615                    .run(server_end.into_channel(), Some(lifecycle_server.into_channel()))
616                    .await
617                    .expect("Failed to run component");
618            }
619            .fuse(),
620        );
621
622        let startup_proxy = connect_to_protocol_at_dir_svc::<StartupMarker>(&client_end)
623            .expect("Unable to connect to Startup protocol");
624        let block_server_connection = block_server.connect();
625        let task = async {
626            startup_proxy
627                .start(block_server_connection, &StartOptions::default())
628                .await
629                .expect("Start failed (FIDL)")
630                .expect("Start failed");
631            callback(&client_end, lifecycle_client).await;
632        }
633        .fuse();
634
635        pin_mut!(task);
636
637        loop {
638            select! {
639                () = component_task => {},
640                () = task => break,
641            }
642        }
643
644        (component_task, block_server)
645    }
646
647    #[fuchsia::test(threads = 2)]
648    async fn test_shutdown() {
649        let (component_task, _block_server) = run_test(|client, _| {
650            let admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(client)
651                .expect("Unable to connect to Admin protocol");
652            async move {
653                admin_proxy.shutdown().await.expect("shutdown failed");
654            }
655            .boxed()
656        })
657        .await;
658        assert!(!component_task.is_terminated());
659    }
660
661    #[fuchsia::test(threads = 2)]
662    async fn test_lifecycle_stop() {
663        let (component_task, _block_server) = run_test(|_, lifecycle_client| {
664            lifecycle_client.stop().expect("Stop failed");
665            async move {
666                fasync::OnSignals::new(
667                    &lifecycle_client.into_channel().expect("into_channel failed"),
668                    zx::Signals::CHANNEL_PEER_CLOSED,
669                )
670                .await
671                .expect("OnSignals failed");
672            }
673            .boxed()
674        })
675        .await;
676        component_task.await;
677    }
678
679    #[fuchsia::test(threads = 2)]
680    async fn test_create_and_remove() {
681        let (component_task, _block_server) = run_test(|client, _| {
682            let volumes_proxy = connect_to_protocol_at_dir_svc::<VolumesMarker>(client)
683                .expect("Unable to connect to Volumes protocol");
684
685            let fs_admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(client)
686                .expect("Unable to connect to Admin protocol");
687
688            async move {
689                let (dir_proxy, server_end) =
690                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
691                volumes_proxy
692                    .create("test", server_end, CreateOptions::default(), MountOptions::default())
693                    .await
694                    .expect("fidl failed")
695                    .expect("create failed");
696
697                // This should fail whilst the volume is mounted.
698                volumes_proxy
699                    .remove("test")
700                    .await
701                    .expect("fidl failed")
702                    .expect_err("remove succeeded");
703
704                let volume_admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(&dir_proxy)
705                    .expect("Unable to connect to Admin protocol");
706                volume_admin_proxy.shutdown().await.expect("shutdown failed");
707
708                // Creating another volume with the same name should fail.
709                let (_dir_proxy, server_end) =
710                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
711                volumes_proxy
712                    .create("test", server_end, CreateOptions::default(), MountOptions::default())
713                    .await
714                    .expect("fidl failed")
715                    .expect_err("create succeeded");
716
717                volumes_proxy.remove("test").await.expect("fidl failed").expect("remove failed");
718
719                // Removing a non-existent volume should fail.
720                volumes_proxy
721                    .remove("test")
722                    .await
723                    .expect("fidl failed")
724                    .expect_err("remove failed");
725
726                // Create the same volume again and it should now succeed.
727                let (_dir_proxy, server_end) =
728                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
729                volumes_proxy
730                    .create("test", server_end, CreateOptions::default(), MountOptions::default())
731                    .await
732                    .expect("fidl failed")
733                    .expect("create failed");
734
735                fs_admin_proxy.shutdown().await.expect("shutdown failed");
736            }
737            .boxed()
738        })
739        .await;
740        component_task.await;
741    }
742
743    #[fuchsia::test(threads = 2)]
744    async fn test_volumes_enumeration() {
745        let (component_task, _block_server) = run_test(|client, _| {
746            let volumes_proxy = connect_to_protocol_at_dir_svc::<VolumesMarker>(client)
747                .expect("Unable to connect to Volumes protocol");
748
749            let (volumes_dir_proxy, server_end) =
750                fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
751            client
752                .open("volumes", fio::PERM_READABLE, &Default::default(), server_end.into_channel())
753                .expect("open failed");
754
755            let fs_admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(client)
756                .expect("Unable to connect to Admin protocol");
757
758            async move {
759                let (_dir_proxy, server_end) =
760                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
761                volumes_proxy
762                    .create("test", server_end, CreateOptions::default(), MountOptions::default())
763                    .await
764                    .expect("fidl failed")
765                    .expect("create failed");
766
767                let entries = readdir(&volumes_dir_proxy).await.expect("readdir failed");
768                let mut entry_names = entries.iter().map(|d| d.name.as_str()).collect::<Vec<_>>();
769                entry_names.sort();
770                assert_eq!(entry_names, ["default", "test"]);
771
772                fs_admin_proxy.shutdown().await.expect("shutdown failed");
773            }
774            .boxed()
775        })
776        .await;
777        component_task.await;
778    }
779
780    #[fuchsia::test(threads = 2)]
781    async fn test_create_with_guid() {
782        let (component_task, block_server) = run_test(|client, _| {
783            let volumes_proxy = connect_to_protocol_at_dir_svc::<VolumesMarker>(client)
784                .expect("Unable to connect to Volumes protocol");
785            let admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(client)
786                .expect("Unable to connect to Admin protocol");
787            async move {
788                let (_dir_proxy, server_end) =
789                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
790                let guid = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
791                volumes_proxy
792                    .create(
793                        "test_guid",
794                        server_end,
795                        CreateOptions { guid: Some(guid), ..Default::default() },
796                        MountOptions::default(),
797                    )
798                    .await
799                    .expect("fidl failed")
800                    .expect("create failed");
801                admin_proxy.shutdown().await.expect("shutdown failed");
802            }
803            .boxed()
804        })
805        .await;
806        component_task.await;
807
808        // Verify the GUID
809        let fs = FxFilesystem::open(DeviceHolder::new(
810            BlockDevice::new(
811                new_block_client(block_server.connect())
812                    .await
813                    .expect("Unable to create block client"),
814                false,
815            )
816            .await
817            .unwrap(),
818        ))
819        .await
820        .expect("FxFilesystem::open failed");
821        {
822            let root_volume = root_volume(fs.clone()).await.expect("Open root_volume failed");
823            let vol = root_volume
824                .volume("test_guid", StoreOptions::default())
825                .await
826                .expect("Open volume failed");
827            assert_eq!(vol.guid(), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
828        }
829        fs.close().await.expect("close failed");
830    }
831}