Skip to main content

fxfs_platform/fuchsia/
component.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::fuchsia::debug::{FxfsDebug, handle_debug_request};
6use crate::fuchsia::errors::map_to_status;
7use crate::fuchsia::memory_pressure::MemoryPressureMonitor;
8use crate::fuchsia::power::FuchsiaPowerManager;
9use crate::fuchsia::volume::MemoryPressureConfig;
10use crate::fuchsia::volumes_directory::VolumesDirectory;
11use anyhow::{Context, Error, bail};
12use async_trait::async_trait;
13use block_client::{BlockClient as _, RemoteBlockClient};
14use fidl::endpoints::{ClientEnd, DiscoverableProtocolMarker, RequestStream};
15use fidl_fuchsia_fs::{AdminMarker, AdminRequest, AdminRequestStream};
16use fidl_fuchsia_fs_startup::{
17    CheckOptions, StartOptions, StartupMarker, StartupRequest, StartupRequestStream, VolumesMarker,
18    VolumesRequest, VolumesRequestStream,
19};
20use fidl_fuchsia_fxfs::{
21    DebugMarker, DebugRequestStream, VolumeInstallerMarker, VolumeInstallerRequest,
22    VolumeInstallerRequestStream,
23};
24use fidl_fuchsia_process_lifecycle::{LifecycleRequest, LifecycleRequestStream};
25use fidl_fuchsia_storage_block::BlockMarker;
26use fs_inspect::{FsInspect, FsInspectTree, InfoData, UsageData};
27use fuchsia_component_client::connect_to_protocol;
28use futures::TryStreamExt;
29use futures::lock::Mutex;
30use fxfs::filesystem::{FxFilesystem, FxFilesystemBuilder, MIN_BLOCK_SIZE, OpenFxFilesystem, mkfs};
31use fxfs::log::*;
32use fxfs::object_store::volume::root_volume;
33use fxfs::serialized_types::LATEST_VERSION;
34use fxfs::{fsck, metrics};
35use fxfs_trace::{TraceFutureExt, trace_future_args};
36use refaults_vmo::PageRefaultCounter;
37use std::ops::Deref;
38use std::sync::{Arc, Weak};
39use storage_device::DeviceHolder;
40use storage_device::block_device::BlockDevice;
41use vfs::directory::helper::DirectlyMutable;
42use vfs::execution_scope::ExecutionScope;
43use {
44    fidl_fuchsia_io as fio, fidl_fuchsia_memory_attribution as fattribution,
45    fuchsia_async as fasync,
46};
47
48const FXFS_INFO_NAME: &'static str = "fxfs";
49
50pub fn map_to_raw_status(e: Error) -> zx::sys::zx_status_t {
51    map_to_status(e).into_raw()
52}
53
54pub async fn new_block_client(remote: ClientEnd<BlockMarker>) -> Result<RemoteBlockClient, Error> {
55    Ok(RemoteBlockClient::new(remote.into_proxy()).await?)
56}
57
58/// Runs Fxfs as a component.
59pub struct Component {
60    state: futures::lock::Mutex<State>,
61
62    // The execution scope of the pseudo filesystem.
63    scope: ExecutionScope,
64
65    // The root of the pseudo filesystem for the component.
66    export_dir: Arc<vfs::directory::immutable::Simple>,
67}
68
69// Wrapper type to add `FsInspect` support to an `OpenFxFilesystem`.
70struct InspectedFxFilesystem(OpenFxFilesystem, /*fs_id=*/ u64);
71
72impl From<OpenFxFilesystem> for InspectedFxFilesystem {
73    fn from(fs: OpenFxFilesystem) -> Self {
74        Self(fs, zx::Event::create().koid().unwrap().raw_koid())
75    }
76}
77
78impl Deref for InspectedFxFilesystem {
79    type Target = Arc<FxFilesystem>;
80    fn deref(&self) -> &Self::Target {
81        &self.0.deref()
82    }
83}
84
85#[async_trait]
86impl FsInspect for InspectedFxFilesystem {
87    fn get_info_data(&self) -> InfoData {
88        let earliest_version = self.0.super_block_header().earliest_version;
89        InfoData {
90            id: self.1,
91            fs_type: fidl_fuchsia_fs::VfsType::Fxfs.into_primitive().into(),
92            name: FXFS_INFO_NAME.into(),
93            version_major: LATEST_VERSION.major.into(),
94            version_minor: LATEST_VERSION.minor.into(),
95            block_size: self.0.block_size() as u64,
96            max_filename_length: fio::MAX_NAME_LENGTH,
97            oldest_version: Some(format!("{}.{}", earliest_version.major, earliest_version.minor)),
98        }
99    }
100
101    async fn get_usage_data(&self) -> UsageData {
102        let info = self.0.get_info();
103        UsageData {
104            total_bytes: info.total_bytes,
105            used_bytes: info.used_bytes,
106            // TODO(https://fxbug.dev/42175930): Should these be moved to per-volume nodes?
107            total_nodes: 0,
108            used_nodes: 0,
109        }
110    }
111}
112
113struct RunningState {
114    // We have to wrap this in an Arc, even though it itself basically just wraps an Arc, so that
115    // FsInspectTree can reference `fs` as a Weak<dyn FsInspect>`.
116    fs: Arc<InspectedFxFilesystem>,
117    volumes: Arc<VolumesDirectory>,
118    _debug: Arc<FxfsDebug>,
119    _inspect_tree: Arc<FsInspectTree>,
120}
121
122enum State {
123    ComponentStarted,
124    Running(RunningState),
125}
126
127impl State {
128    async fn stop(&mut self, outgoing_dir: &vfs::directory::immutable::Simple) {
129        if let State::Running(RunningState { fs, volumes, .. }) =
130            std::mem::replace(self, State::ComponentStarted)
131        {
132            info!("Stopping Fxfs runtime; remaining connections will be forcibly closed");
133            let _ = outgoing_dir.remove_entry("volumes", /* must_be_directory: */ false);
134            let _ = outgoing_dir.remove_entry("debug", /* must_be_directory: */ false);
135            volumes.terminate().await;
136            let _ = fs.deref().close().await;
137        }
138    }
139}
140
141impl Component {
142    pub fn new() -> Arc<Self> {
143        Arc::new(Self {
144            state: Mutex::new(State::ComponentStarted),
145            scope: ExecutionScope::new(),
146            export_dir: vfs::directory::immutable::simple(),
147        })
148    }
149
150    /// Runs Fxfs as a component.
151    pub async fn run(
152        self: Arc<Self>,
153        outgoing_dir: zx::Channel,
154        lifecycle_channel: Option<zx::Channel>,
155    ) -> Result<(), Error> {
156        let svc_dir = vfs::directory::immutable::simple();
157        self.export_dir.add_entry("svc", svc_dir.clone()).expect("Unable to create svc dir");
158
159        let weak = Arc::downgrade(&self);
160        svc_dir.add_entry(
161            StartupMarker::PROTOCOL_NAME,
162            vfs::service::host(move |requests| {
163                let weak = weak.clone();
164                async move {
165                    if let Some(me) = weak.upgrade() {
166                        let _ = me.handle_startup_requests(requests).await;
167                    }
168                }
169            }),
170        )?;
171        let weak = Arc::downgrade(&self);
172        svc_dir.add_entry(
173            VolumesMarker::PROTOCOL_NAME,
174            vfs::service::host(move |requests| {
175                let weak = weak.clone();
176                async move {
177                    if let Some(me) = weak.upgrade() {
178                        me.handle_volumes_requests(requests).await;
179                    }
180                }
181            }),
182        )?;
183
184        let weak = Arc::downgrade(&self);
185        svc_dir.add_entry(
186            AdminMarker::PROTOCOL_NAME,
187            vfs::service::host(move |requests| {
188                let weak = weak.clone();
189                async move {
190                    if let Some(me) = weak.upgrade() {
191                        let _ = me.handle_admin_requests(requests).await;
192                    }
193                }
194            }),
195        )?;
196
197        let weak = Arc::downgrade(&self);
198        svc_dir.add_entry(
199            VolumeInstallerMarker::PROTOCOL_NAME,
200            vfs::service::host(move |requests| {
201                let weak = weak.clone();
202                async move {
203                    if let Some(me) = weak.upgrade() {
204                        if let Err(error) = me.handle_volume_installer_requests(requests).await {
205                            error!(error:?; "failed to handle VolumeInstaller requests");
206                        }
207                    }
208                }
209            }),
210        )?;
211
212        // TODO(b/315704445): Only enable in debug builds?
213        let weak = Arc::downgrade(&self);
214        svc_dir.add_entry(
215            DebugMarker::PROTOCOL_NAME,
216            vfs::service::host(move |requests| {
217                let weak = weak.clone();
218                async move {
219                    if let Some(me) = weak.upgrade() {
220                        let _ = me.handle_debug_requests(requests).await;
221                    }
222                }
223            }),
224        )?;
225
226        vfs::directory::serve_on(
227            self.export_dir.clone(),
228            fio::PERM_READABLE | fio::PERM_WRITABLE | fio::PERM_EXECUTABLE,
229            self.scope.clone(),
230            fidl::endpoints::ServerEnd::new(outgoing_dir),
231        );
232
233        if let Some(channel) = lifecycle_channel {
234            let me = self.clone();
235            self.scope.spawn(async move {
236                if let Err(error) = me.handle_lifecycle_requests(channel).await {
237                    warn!(error:?; "handle_lifecycle_requests");
238                }
239            });
240        }
241
242        self.scope.wait().await;
243
244        Ok(())
245    }
246
247    async fn handle_startup_requests(&self, mut stream: StartupRequestStream) -> Result<(), Error> {
248        while let Some(request) = stream.try_next().await? {
249            match request {
250                StartupRequest::Start { responder, device, options } => {
251                    async move {
252                        responder.send(self.handle_start(device, options).await.map_err(|error| {
253                            error!(error:?; "handle_start failed");
254                            map_to_raw_status(error)
255                        }))
256                    }
257                    .trace(trace_future_args!("Startup::Start"))
258                    .await?
259                }
260                StartupRequest::Format { responder, device, .. } => {
261                    async move {
262                        responder.send(self.handle_format(device).await.map_err(|error| {
263                            error!(error:?; "handle_format failed");
264                            map_to_raw_status(error)
265                        }))
266                    }
267                    .trace(trace_future_args!("Startup::Format"))
268                    .await?
269                }
270                StartupRequest::Check { responder, device, options } => {
271                    async move {
272                        responder.send(self.handle_check(device, options).await.map_err(|error| {
273                            error!(error:?; "handle_check failed");
274                            map_to_raw_status(error)
275                        }))
276                    }
277                    .trace(trace_future_args!("Startup::Check"))
278                    .await?
279                }
280            }
281        }
282        Ok(())
283    }
284
285    async fn handle_start(
286        &self,
287        device: ClientEnd<BlockMarker>,
288        options: StartOptions,
289    ) -> Result<(), Error> {
290        info!(options:?; "Received start request");
291        let mut state = self.state.lock().await;
292        // TODO(https://fxbug.dev/42174810): This is not very graceful.  It would be better for the client to
293        // explicitly shut down all volumes first, and make this fail if there are remaining active
294        // connections.  Fix the bug in fs_test which requires this.
295        state.stop(&self.export_dir).await;
296        let client = new_block_client(device).await?;
297
298        // TODO(https://fxbug.dev/42063349) Add support for block sizes greater than the page size.
299        assert!(client.block_size() <= zx::system_get_page_size());
300        assert!((zx::system_get_page_size() as u64) == MIN_BLOCK_SIZE);
301
302        let fs = FxFilesystemBuilder::new()
303            .fsck_after_every_transaction(options.fsck_after_every_transaction.unwrap_or(false))
304            .read_only(options.read_only.unwrap_or(false))
305            .inline_crypto_enabled(options.inline_crypto_enabled.unwrap_or(false))
306            .barriers_enabled(options.barriers_enabled.unwrap_or(false))
307            .power_manager(FuchsiaPowerManager::new())
308            .open(DeviceHolder::new(
309                BlockDevice::new(client, options.read_only.unwrap_or(false)).await?,
310            ))
311            .await?;
312        let root_volume = root_volume(fs.clone()).await?;
313        let fs: Arc<InspectedFxFilesystem> = Arc::new(fs.into());
314        let weak_fs = Arc::downgrade(&fs) as Weak<dyn FsInspect + Send + Sync>;
315        let inspect_tree =
316            Arc::new(FsInspectTree::new(weak_fs, fuchsia_inspect::component::inspector().root()));
317
318        let blob_resupplied_count = Arc::new(PageRefaultCounter::new()?);
319        connect_to_protocol::<fattribution::PageRefaultSinkMarker>()?
320            .send_page_refault_count(blob_resupplied_count.readonly_vmo()?)?;
321
322        let mem_monitor = match MemoryPressureMonitor::start() {
323            Ok(v) => Some(v),
324            Err(error) => {
325                warn!(
326                    error:?;
327                    "Failed to connect to memory pressure monitor. Running \
328                     without pressure awareness."
329                );
330                None
331            }
332        };
333
334        let volumes = VolumesDirectory::new(
335            root_volume,
336            Arc::downgrade(&inspect_tree),
337            mem_monitor,
338            blob_resupplied_count,
339            MemoryPressureConfig::default(),
340        )
341        .await?;
342
343        self.export_dir.add_entry_may_overwrite(
344            "volumes",
345            volumes.directory_node().clone(),
346            /* overwrite: */ true,
347        )?;
348
349        let debug = FxfsDebug::new(&**fs, &volumes)?;
350        self.export_dir.add_entry_may_overwrite("debug", debug.root(), true)?;
351
352        fs.allocator().track_statistics(&metrics::detail(), "allocator");
353        fs.journal().track_statistics(&metrics::detail(), "journal");
354        fs.object_manager().track_statistics(&metrics::detail(), "object_manager");
355
356        let info = fs.get_info();
357        info!(
358            device_size = info.total_bytes,
359            used = info.used_bytes,
360            free = info.total_bytes - info.used_bytes;
361            "Mounted"
362        );
363
364        if let Some(profile_time) = options.startup_profiling_seconds {
365            // Unwrap ok, shouldn't have anything else recording or replaying this early in startup.
366            volumes.record_or_replay_profile(None, ".boot".to_owned(), profile_time).await.unwrap();
367        }
368
369        *state = State::Running(RunningState {
370            fs,
371            volumes,
372            _debug: debug,
373            _inspect_tree: inspect_tree,
374        });
375
376        Ok(())
377    }
378
379    async fn handle_format(&self, device: ClientEnd<BlockMarker>) -> Result<(), Error> {
380        let device = DeviceHolder::new(
381            BlockDevice::new(new_block_client(device).await?, /* read_only: */ false).await?,
382        );
383        mkfs(device).await?;
384        info!("Formatted filesystem");
385        Ok(())
386    }
387
388    async fn handle_check(
389        &self,
390        device: ClientEnd<BlockMarker>,
391        _options: CheckOptions,
392    ) -> Result<(), Error> {
393        let state = self.state.lock().await;
394        let (fs_container, fs) = match *state {
395            State::ComponentStarted => {
396                let client = new_block_client(device).await?;
397                let fs_container = FxFilesystemBuilder::new()
398                    .read_only(true)
399                    .open(DeviceHolder::new(BlockDevice::new(client, /* read_only: */ true).await?))
400                    .await?;
401                let fs = fs_container.clone();
402                (Some(fs_container), fs)
403            }
404            State::Running(RunningState { ref fs, .. }) => (None, fs.deref().deref().clone()),
405        };
406        let res = fsck::fsck(fs.clone()).await;
407        if let Some(fs_container) = fs_container {
408            let _ = fs_container.close().await;
409        }
410        // TODO(b/311550633): Stash ok res in inspect.
411        info!("handle_check for fs: {:?}", res?);
412        Ok(())
413    }
414
415    async fn handle_admin_requests(&self, mut stream: AdminRequestStream) -> Result<(), Error> {
416        while let Some(request) = stream.try_next().await.context("Reading request")? {
417            if self.handle_admin(request).await? {
418                break;
419            }
420        }
421        Ok(())
422    }
423
424    // Returns true if we should close the connection.
425    async fn handle_admin(&self, req: AdminRequest) -> Result<bool, Error> {
426        match req {
427            AdminRequest::Shutdown { responder } => {
428                async move {
429                    info!("Received shutdown request");
430                    self.shutdown().await;
431                    responder
432                        .send()
433                        .unwrap_or_else(|error| warn!(error:?; "Failed to send shutdown response"));
434                }
435                .trace(trace_future_args!("Admin::Shutdown"))
436                .await;
437                return Ok(true);
438            }
439        }
440    }
441
442    /// Handles fuchsia.fxfs.Debug requests, providing live debugging internals of the running
443    /// filesystem.
444    async fn handle_debug_requests(&self, mut stream: DebugRequestStream) -> Result<(), Error> {
445        while let Some(request) = stream.try_next().await.context("Reading request")? {
446            let state = self.state.lock().await;
447            let (fs, volumes) = match &*state {
448                State::ComponentStarted => {
449                    info!("Debug commands are not valid unless component is started.");
450                    bail!("Component not started");
451                }
452                State::Running(RunningState { fs, volumes, .. }) => {
453                    (fs.deref().deref().clone(), volumes.clone())
454                }
455            };
456            handle_debug_request(fs, volumes, request).await?;
457        }
458        Ok(())
459    }
460
461    async fn shutdown(&self) {
462        self.state.lock().await.stop(&self.export_dir).await;
463        info!("Filesystem terminated");
464    }
465
466    async fn handle_volumes_requests(&self, mut stream: VolumesRequestStream) {
467        let volumes =
468            if let State::Running(RunningState { volumes, .. }) = &*self.state.lock().await {
469                volumes.clone()
470            } else {
471                let _ = stream.into_inner().0.shutdown_with_epitaph(zx::Status::BAD_STATE);
472                return;
473            };
474        while let Ok(Some(request)) = stream.try_next().await {
475            match request {
476                VolumesRequest::Create {
477                    name,
478                    outgoing_directory,
479                    create_options,
480                    mount_options,
481                    responder,
482                } => {
483                    async {
484                        info!(
485                            name = name.as_str();
486                            "Create {}volume",
487                            if mount_options.crypt.is_some() { "encrypted " } else { "" }
488                        );
489                        responder
490                            .send(
491                                volumes
492                                    .create_and_serve_volume(
493                                        &name,
494                                        outgoing_directory.into_channel().into(),
495                                        mount_options,
496                                        create_options,
497                                    )
498                                    .await
499                                    .map_err(map_to_raw_status),
500                            )
501                            .unwrap_or_else(
502                                |error| warn!(error:?; "Failed to send volume creation response"),
503                            );
504                    }
505                    .trace(trace_future_args!("Volumes::Create"))
506                    .await;
507                }
508                VolumesRequest::Remove { name, responder } => {
509                    async {
510                        info!(name = name.as_str(); "Remove volume");
511                        responder
512                            .send(volumes.remove_volume(&name).await.map_err(map_to_raw_status))
513                            .unwrap_or_else(
514                                |error| warn!(error:?; "Failed to send volume removal response"),
515                            );
516                    }
517                    .trace(trace_future_args!("Volumes::Remove"))
518                    .await;
519                }
520                VolumesRequest::GetInfo { responder } => {
521                    async move {
522                        responder.send(Err(zx::Status::NOT_SUPPORTED.into_raw())).unwrap_or_else(
523                            |error| warn!(error:?; "Failed to send volume removal response"),
524                        )
525                    }
526                    .trace(trace_future_args!("Volumes::GetInfo"))
527                    .await;
528                }
529            }
530        }
531    }
532
533    async fn handle_lifecycle_requests(&self, lifecycle_channel: zx::Channel) -> Result<(), Error> {
534        let mut stream =
535            LifecycleRequestStream::from_channel(fasync::Channel::from_channel(lifecycle_channel));
536        match stream.try_next().await.context("Reading request")? {
537            Some(LifecycleRequest::Stop { .. }) => {
538                info!("Received Lifecycle::Stop request");
539                self.shutdown().await;
540            }
541            None => {}
542        }
543        Ok(())
544    }
545
546    async fn handle_volume_installer_requests(
547        &self,
548        mut stream: VolumeInstallerRequestStream,
549    ) -> Result<(), Error> {
550        let volumes =
551            if let State::Running(RunningState { volumes, .. }) = &*self.state.lock().await {
552                volumes.clone()
553            } else {
554                let _ = stream.into_inner().0.shutdown_with_epitaph(zx::Status::BAD_STATE);
555                return Err(zx::Status::BAD_STATE).context("fxfs component not running");
556            };
557        while let Some(request) = stream.try_next().await.context("reading request")? {
558            match request {
559                VolumeInstallerRequest::Install { src, image_file, dst, responder } => {
560                    async {
561                        let response = volumes.install_volume(&src, &image_file, &dst).await;
562                        responder.send(response.map_err(|error| {
563                            error!(error:?; "install failed");
564                            map_to_raw_status(error)
565                        }))
566                    }
567                    .trace(trace_future_args!("VolumeInstaller::Install"))
568                    .await?;
569                }
570            }
571        }
572        Ok(())
573    }
574}
575
576#[cfg(test)]
577mod tests {
578    use super::{Component, new_block_client};
579    use fidl::endpoints::Proxy;
580    use fidl_fuchsia_fs::AdminMarker;
581    use fidl_fuchsia_fs_startup::{
582        CreateOptions, MountOptions, StartOptions, StartupMarker, VolumesMarker,
583    };
584    use fidl_fuchsia_process_lifecycle::{LifecycleMarker, LifecycleProxy};
585    use fuchsia_component_client::connect_to_protocol_at_dir_svc;
586    use fuchsia_fs::directory::readdir;
587    use futures::future::{BoxFuture, FusedFuture, FutureExt};
588    use futures::{pin_mut, select};
589    use fxfs::filesystem::FxFilesystem;
590    use fxfs::object_store::volume::root_volume;
591    use fxfs::object_store::{NewChildStoreOptions, StoreOptions};
592    use std::pin::Pin;
593    use std::sync::Arc;
594    use storage_device::DeviceHolder;
595    use storage_device::block_device::BlockDevice;
596    use vmo_backed_block_server::{
597        InitialContents, VmoBackedServer, VmoBackedServerOptions, VmoBackedServerTestingExt,
598    };
599    use {fidl_fuchsia_io as fio, fuchsia_async as fasync};
600
601    async fn run_test(
602        callback: impl Fn(&fio::DirectoryProxy, LifecycleProxy) -> BoxFuture<'static, ()>,
603    ) -> (Pin<Box<impl FusedFuture>>, Arc<VmoBackedServer>) {
604        const BLOCK_SIZE: u32 = 512;
605        let block_server = Arc::new(
606            VmoBackedServerOptions {
607                block_size: BLOCK_SIZE,
608                initial_contents: InitialContents::FromCapacity(BLOCK_SIZE as u64 * 16384),
609                ..Default::default()
610            }
611            .build()
612            .expect("build failed"),
613        );
614
615        {
616            let fs = FxFilesystem::new_empty(DeviceHolder::new(
617                BlockDevice::new(
618                    new_block_client(block_server.connect())
619                        .await
620                        .expect("Unable to create block client"),
621                    false,
622                )
623                .await
624                .unwrap(),
625            ))
626            .await
627            .expect("FxFilesystem::new_empty failed");
628            {
629                let root_volume = root_volume(fs.clone()).await.expect("Open root_volume failed");
630                root_volume
631                    .new_volume("default", NewChildStoreOptions::default())
632                    .await
633                    .expect("Create volume failed");
634            }
635            fs.close().await.expect("close failed");
636        }
637
638        let (client_end, server_end) = fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
639
640        let (lifecycle_client, lifecycle_server) =
641            fidl::endpoints::create_proxy::<LifecycleMarker>();
642
643        let mut component_task = Box::pin(
644            async {
645                Component::new()
646                    .run(server_end.into_channel(), Some(lifecycle_server.into_channel()))
647                    .await
648                    .expect("Failed to run component");
649            }
650            .fuse(),
651        );
652
653        let startup_proxy = connect_to_protocol_at_dir_svc::<StartupMarker>(&client_end)
654            .expect("Unable to connect to Startup protocol");
655        let block_server_connection = block_server.connect();
656        let task = async {
657            startup_proxy
658                .start(block_server_connection, &StartOptions::default())
659                .await
660                .expect("Start failed (FIDL)")
661                .expect("Start failed");
662            callback(&client_end, lifecycle_client).await;
663        }
664        .fuse();
665
666        pin_mut!(task);
667
668        loop {
669            select! {
670                () = component_task => {},
671                () = task => break,
672            }
673        }
674
675        (component_task, block_server)
676    }
677
678    #[fuchsia::test(threads = 2)]
679    async fn test_shutdown() {
680        let (component_task, _block_server) = run_test(|client, _| {
681            let admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(client)
682                .expect("Unable to connect to Admin protocol");
683            async move {
684                admin_proxy.shutdown().await.expect("shutdown failed");
685            }
686            .boxed()
687        })
688        .await;
689        assert!(!component_task.is_terminated());
690    }
691
692    #[fuchsia::test(threads = 2)]
693    async fn test_lifecycle_stop() {
694        let (component_task, _block_server) = run_test(|_, lifecycle_client| {
695            lifecycle_client.stop().expect("Stop failed");
696            async move {
697                fasync::OnSignals::new(
698                    &lifecycle_client.into_channel().expect("into_channel failed"),
699                    zx::Signals::CHANNEL_PEER_CLOSED,
700                )
701                .await
702                .expect("OnSignals failed");
703            }
704            .boxed()
705        })
706        .await;
707        component_task.await;
708    }
709
710    #[fuchsia::test(threads = 2)]
711    async fn test_create_and_remove() {
712        let (component_task, _block_server) = run_test(|client, _| {
713            let volumes_proxy = connect_to_protocol_at_dir_svc::<VolumesMarker>(client)
714                .expect("Unable to connect to Volumes protocol");
715
716            let fs_admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(client)
717                .expect("Unable to connect to Admin protocol");
718
719            async move {
720                let (dir_proxy, server_end) =
721                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
722                volumes_proxy
723                    .create("test", server_end, CreateOptions::default(), MountOptions::default())
724                    .await
725                    .expect("fidl failed")
726                    .expect("create failed");
727
728                // This should fail whilst the volume is mounted.
729                volumes_proxy
730                    .remove("test")
731                    .await
732                    .expect("fidl failed")
733                    .expect_err("remove succeeded");
734
735                let volume_admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(&dir_proxy)
736                    .expect("Unable to connect to Admin protocol");
737                volume_admin_proxy.shutdown().await.expect("shutdown failed");
738
739                // Creating another volume with the same name should fail.
740                let (_dir_proxy, server_end) =
741                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
742                volumes_proxy
743                    .create("test", server_end, CreateOptions::default(), MountOptions::default())
744                    .await
745                    .expect("fidl failed")
746                    .expect_err("create succeeded");
747
748                volumes_proxy.remove("test").await.expect("fidl failed").expect("remove failed");
749
750                // Removing a non-existent volume should fail.
751                volumes_proxy
752                    .remove("test")
753                    .await
754                    .expect("fidl failed")
755                    .expect_err("remove failed");
756
757                // Create the same volume again and it should now succeed.
758                let (_dir_proxy, server_end) =
759                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
760                volumes_proxy
761                    .create("test", server_end, CreateOptions::default(), MountOptions::default())
762                    .await
763                    .expect("fidl failed")
764                    .expect("create failed");
765
766                fs_admin_proxy.shutdown().await.expect("shutdown failed");
767            }
768            .boxed()
769        })
770        .await;
771        component_task.await;
772    }
773
774    #[fuchsia::test(threads = 2)]
775    async fn test_volumes_enumeration() {
776        let (component_task, _block_server) = run_test(|client, _| {
777            let volumes_proxy = connect_to_protocol_at_dir_svc::<VolumesMarker>(client)
778                .expect("Unable to connect to Volumes protocol");
779
780            let (volumes_dir_proxy, server_end) =
781                fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
782            client
783                .open("volumes", fio::PERM_READABLE, &Default::default(), server_end.into_channel())
784                .expect("open failed");
785
786            let fs_admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(client)
787                .expect("Unable to connect to Admin protocol");
788
789            async move {
790                let (_dir_proxy, server_end) =
791                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
792                volumes_proxy
793                    .create("test", server_end, CreateOptions::default(), MountOptions::default())
794                    .await
795                    .expect("fidl failed")
796                    .expect("create failed");
797
798                let entries = readdir(&volumes_dir_proxy).await.expect("readdir failed");
799                let mut entry_names = entries.iter().map(|d| d.name.as_str()).collect::<Vec<_>>();
800                entry_names.sort();
801                assert_eq!(entry_names, ["default", "test"]);
802
803                fs_admin_proxy.shutdown().await.expect("shutdown failed");
804            }
805            .boxed()
806        })
807        .await;
808        component_task.await;
809    }
810
811    #[fuchsia::test(threads = 2)]
812    async fn test_create_with_guid() {
813        let (component_task, block_server) = run_test(|client, _| {
814            let volumes_proxy = connect_to_protocol_at_dir_svc::<VolumesMarker>(client)
815                .expect("Unable to connect to Volumes protocol");
816            let admin_proxy = connect_to_protocol_at_dir_svc::<AdminMarker>(client)
817                .expect("Unable to connect to Admin protocol");
818            async move {
819                let (_dir_proxy, server_end) =
820                    fidl::endpoints::create_proxy::<fio::DirectoryMarker>();
821                let guid = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
822                volumes_proxy
823                    .create(
824                        "test_guid",
825                        server_end,
826                        CreateOptions { guid: Some(guid), ..Default::default() },
827                        MountOptions::default(),
828                    )
829                    .await
830                    .expect("fidl failed")
831                    .expect("create failed");
832                admin_proxy.shutdown().await.expect("shutdown failed");
833            }
834            .boxed()
835        })
836        .await;
837        component_task.await;
838
839        // Verify the GUID
840        let fs = FxFilesystem::open(DeviceHolder::new(
841            BlockDevice::new(
842                new_block_client(block_server.connect())
843                    .await
844                    .expect("Unable to create block client"),
845                false,
846            )
847            .await
848            .unwrap(),
849        ))
850        .await
851        .expect("FxFilesystem::open failed");
852        {
853            let root_volume = root_volume(fs.clone()).await.expect("Open root_volume failed");
854            let vol = root_volume
855                .volume("test_guid", StoreOptions::default())
856                .await
857                .expect("Open volume failed");
858            assert_eq!(vol.guid(), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
859        }
860        fs.close().await.expect("close failed");
861    }
862}