fshost_test_fixture/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use assert_matches::assert_matches;
6use diagnostics_assertions::assert_data_tree;
7use diagnostics_reader::ArchiveReader;
8use disk_builder::{Disk, DEFAULT_DATA_VOLUME_SIZE};
9use fidl::endpoints::{create_proxy, ServiceMarker as _};
10use fidl_fuchsia_fxfs::{
11    BlobReaderMarker, CryptManagementMarker, CryptManagementProxy, CryptMarker, CryptProxy,
12    KeyPurpose,
13};
14use fuchsia_component::client::connect_to_protocol_at_dir_root;
15use fuchsia_component_test::{Capability, ChildOptions, RealmBuilder, RealmInstance, Ref, Route};
16use fuchsia_driver_test::{DriverTestRealmBuilder, DriverTestRealmInstance};
17use futures::channel::mpsc;
18use futures::{FutureExt as _, StreamExt as _};
19use ramdevice_client::{RamdiskClient, RamdiskClientBuilder};
20use std::pin::pin;
21use std::time::Duration;
22use {
23    fidl_fuchsia_boot as fboot, fidl_fuchsia_driver_test as fdt,
24    fidl_fuchsia_feedback as ffeedback, fidl_fuchsia_hardware_block_volume as fvolume,
25    fidl_fuchsia_hardware_ramdisk as framdisk, fidl_fuchsia_io as fio, fuchsia_async as fasync,
26};
27
28pub mod disk_builder;
29pub mod fshost_builder;
30mod mocks;
31
32pub use disk_builder::write_test_blob;
33
34pub const VFS_TYPE_BLOBFS: u32 = 0x9e694d21;
35pub const VFS_TYPE_MINFS: u32 = 0x6e694d21;
36pub const VFS_TYPE_MEMFS: u32 = 0x3e694d21;
37pub const VFS_TYPE_FXFS: u32 = 0x73667866;
38pub const VFS_TYPE_F2FS: u32 = 0xfe694d21;
39pub const BLOBFS_MAX_BYTES: u64 = 8765432;
40pub const DATA_MAX_BYTES: u64 = DEFAULT_DATA_VOLUME_SIZE;
41pub const STARNIX_VOLUME_NAME: &str = "starnix_volume";
42
43pub fn round_down<
44    T: Into<U>,
45    U: Copy + std::ops::Rem<U, Output = U> + std::ops::Sub<U, Output = U>,
46>(
47    offset: U,
48    block_size: T,
49) -> U {
50    let block_size = block_size.into();
51    offset - offset % block_size
52}
53
54pub struct TestFixtureBuilder {
55    netboot: bool,
56    no_fuchsia_boot: bool,
57    disk: Option<Disk>,
58    extra_disks: Vec<Disk>,
59    fshost: fshost_builder::FshostBuilder,
60    zbi_ramdisk: Option<disk_builder::DiskBuilder>,
61    storage_host: bool,
62}
63
64impl TestFixtureBuilder {
65    pub fn new(fshost_component_name: &'static str, storage_host: bool) -> Self {
66        Self {
67            netboot: false,
68            no_fuchsia_boot: false,
69            disk: None,
70            extra_disks: vec![],
71            fshost: fshost_builder::FshostBuilder::new(fshost_component_name),
72            zbi_ramdisk: None,
73            storage_host,
74        }
75    }
76
77    pub fn fshost(&mut self) -> &mut fshost_builder::FshostBuilder {
78        &mut self.fshost
79    }
80
81    pub fn with_disk(&mut self) -> &mut disk_builder::DiskBuilder {
82        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::new()));
83        self.disk.as_mut().unwrap().builder()
84    }
85
86    pub fn with_extra_disk(&mut self) -> &mut disk_builder::DiskBuilder {
87        self.extra_disks.push(Disk::Builder(disk_builder::DiskBuilder::new()));
88        self.extra_disks.last_mut().unwrap().builder()
89    }
90
91    pub fn with_uninitialized_disk(mut self) -> Self {
92        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::uninitialized()));
93        self
94    }
95
96    pub fn with_disk_from(mut self, disk: Disk) -> Self {
97        self.disk = Some(disk);
98        self
99    }
100
101    pub fn with_zbi_ramdisk(&mut self) -> &mut disk_builder::DiskBuilder {
102        self.zbi_ramdisk = Some(disk_builder::DiskBuilder::new());
103        self.zbi_ramdisk.as_mut().unwrap()
104    }
105
106    pub fn netboot(mut self) -> Self {
107        self.netboot = true;
108        self
109    }
110
111    pub fn no_fuchsia_boot(mut self) -> Self {
112        self.no_fuchsia_boot = true;
113        self
114    }
115
116    pub async fn build(self) -> TestFixture {
117        let builder = RealmBuilder::new().await.unwrap();
118        let fshost = self.fshost.build(&builder).await;
119
120        let maybe_zbi_vmo = match self.zbi_ramdisk {
121            Some(disk_builder) => Some(disk_builder.build_as_zbi_ramdisk().await),
122            None => None,
123        };
124        let (tx, crash_reports) = mpsc::channel(32);
125        let mocks = mocks::new_mocks(self.netboot, maybe_zbi_vmo, tx).await;
126
127        let mocks = builder
128            .add_local_child("mocks", move |h| mocks(h).boxed(), ChildOptions::new())
129            .await
130            .unwrap();
131        builder
132            .add_route(
133                Route::new()
134                    .capability(Capability::protocol::<ffeedback::CrashReporterMarker>())
135                    .capability(Capability::directory("boot").rights(fio::R_STAR_DIR).path("/boot"))
136                    .from(&mocks)
137                    .to(&fshost),
138            )
139            .await
140            .unwrap();
141        if !self.no_fuchsia_boot {
142            builder
143                .add_route(
144                    Route::new()
145                        .capability(Capability::protocol::<fboot::ArgumentsMarker>())
146                        .capability(Capability::protocol::<fboot::ItemsMarker>())
147                        .from(&mocks)
148                        .to(&fshost),
149                )
150                .await
151                .unwrap();
152        }
153
154        builder
155            .add_route(
156                Route::new()
157                    .capability(Capability::dictionary("diagnostics"))
158                    .from(Ref::parent())
159                    .to(&fshost),
160            )
161            .await
162            .unwrap();
163
164        let dtr_exposes = vec![
165            fidl_fuchsia_component_test::Capability::Service(
166                fidl_fuchsia_component_test::Service {
167                    name: Some("fuchsia.hardware.ramdisk.Service".to_owned()),
168                    ..Default::default()
169                },
170            ),
171            fidl_fuchsia_component_test::Capability::Service(
172                fidl_fuchsia_component_test::Service {
173                    name: Some("fuchsia.hardware.block.volume.Service".to_owned()),
174                    ..Default::default()
175                },
176            ),
177        ];
178        builder.driver_test_realm_setup().await.unwrap();
179        builder.driver_test_realm_add_dtr_exposes(&dtr_exposes).await.unwrap();
180        builder
181            .add_route(
182                Route::new()
183                    .capability(Capability::directory("dev-topological").rights(fio::R_STAR_DIR))
184                    .capability(Capability::service::<framdisk::ServiceMarker>())
185                    .capability(Capability::service::<fvolume::ServiceMarker>())
186                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
187                    .to(&fshost),
188            )
189            .await
190            .unwrap();
191        builder
192            .add_route(
193                Route::new()
194                    .capability(
195                        Capability::directory("dev-class")
196                            .rights(fio::R_STAR_DIR)
197                            .subdir("block")
198                            .as_("dev-class-block"),
199                    )
200                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
201                    .to(Ref::parent()),
202            )
203            .await
204            .unwrap();
205
206        let mut fixture = TestFixture {
207            realm: builder.build().await.unwrap(),
208            ramdisks: Vec::new(),
209            main_disk: None,
210            crash_reports,
211            torn_down: TornDown(false),
212            storage_host: self.storage_host,
213        };
214
215        log::info!(
216            realm_name:? = fixture.realm.root.child_name();
217            "built new test realm",
218        );
219
220        fixture
221            .realm
222            .driver_test_realm_start(fdt::RealmArgs {
223                root_driver: Some("fuchsia-boot:///platform-bus#meta/platform-bus.cm".to_owned()),
224                dtr_exposes: Some(dtr_exposes),
225                software_devices: Some(vec![
226                    fdt::SoftwareDevice {
227                        device_name: "ram-disk".to_string(),
228                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_DISK,
229                    },
230                    fdt::SoftwareDevice {
231                        device_name: "ram-nand".to_string(),
232                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_NAND,
233                    },
234                ]),
235                ..Default::default()
236            })
237            .await
238            .unwrap();
239
240        // The order of adding disks matters here, unfortunately. fshost should not change behavior
241        // based on the order disks appear, but because we take the first available that matches
242        // whatever relevant criteria, it's useful to test that matchers don't get clogged up by
243        // previous disks.
244        // TODO(https://fxbug.dev/380353856): This type of testing should be irrelevant once the
245        // block devices are determined by configuration options instead of heuristically.
246        for disk in self.extra_disks.into_iter() {
247            let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
248            fixture.add_ramdisk(vmo, type_guid).await;
249        }
250        if let Some(disk) = self.disk {
251            let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
252            let vmo_clone =
253                vmo.create_child(zx::VmoChildOptions::SLICE, 0, vmo.get_size().unwrap()).unwrap();
254
255            fixture.add_ramdisk(vmo, type_guid).await;
256            fixture.main_disk = Some(Disk::Prebuilt(vmo_clone, type_guid));
257        }
258
259        fixture
260    }
261}
262
263/// Create a separate struct that does the drop-assert because fixture.tear_down can't call
264/// realm.destroy if it has the drop impl itself.
265struct TornDown(bool);
266
267impl Drop for TornDown {
268    fn drop(&mut self) {
269        // Because tear_down is async, it needs to be called by the test in an async context. It
270        // checks some properties so for correctness it must be called.
271        assert!(self.0, "fixture.tear_down() must be called");
272    }
273}
274
275pub struct TestFixture {
276    pub realm: RealmInstance,
277    pub ramdisks: Vec<RamdiskClient>,
278    pub main_disk: Option<Disk>,
279    pub crash_reports: mpsc::Receiver<ffeedback::CrashReport>,
280    torn_down: TornDown,
281    storage_host: bool,
282}
283
284impl TestFixture {
285    pub async fn tear_down(mut self) -> Option<Disk> {
286        log::info!(realm_name:? = self.realm.root.child_name(); "tearing down");
287        let disk = self.main_disk.take();
288        // Check the crash reports before destroying the realm because tearing down the realm can
289        // cause mounting errors that trigger a crash report.
290        assert_matches!(self.crash_reports.try_next(), Ok(None) | Err(_));
291        self.realm.destroy().await.unwrap();
292        self.torn_down.0 = true;
293        disk
294    }
295
296    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
297        self.realm.root.get_exposed_dir()
298    }
299
300    pub fn dir(&self, dir: &str, flags: fio::Flags) -> fio::DirectoryProxy {
301        let (dev, server) = create_proxy::<fio::DirectoryMarker>();
302        let flags = flags | fio::Flags::PROTOCOL_DIRECTORY;
303        self.realm
304            .root
305            .get_exposed_dir()
306            .open(dir, flags, &fio::Options::default(), server.into_channel())
307            .expect("open failed");
308        dev
309    }
310
311    pub async fn check_fs_type(&self, dir: &str, fs_type: u32) {
312        let (status, info) =
313            self.dir(dir, fio::Flags::empty()).query_filesystem().await.expect("query failed");
314        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
315        assert!(info.is_some());
316        let info_type = info.unwrap().fs_type;
317        assert_eq!(info_type, fs_type, "{:#08x} != {:#08x}", info_type, fs_type);
318    }
319
320    pub async fn check_test_blob(&self, use_fxblob: bool) {
321        let expected_blob_hash = fuchsia_merkle::from_slice(&disk_builder::BLOB_CONTENTS).root();
322        if use_fxblob {
323            let reader = connect_to_protocol_at_dir_root::<BlobReaderMarker>(
324                self.realm.root.get_exposed_dir(),
325            )
326            .expect("failed to connect to the BlobReader");
327            let _vmo = reader.get_vmo(&expected_blob_hash.into()).await.unwrap().unwrap();
328        } else {
329            let (blob, server_end) = create_proxy::<fio::FileMarker>();
330            let path = &format!("{}", expected_blob_hash);
331            self.dir("blob", fio::PERM_READABLE)
332                .open(path, fio::PERM_READABLE, &fio::Options::default(), server_end.into_channel())
333                .expect("open failed");
334            blob.query().await.expect("open file failed");
335        }
336    }
337
338    /// Check for the existence of a well-known set of test files in the data volume. These files
339    /// are placed by the disk builder if it formats the filesystem beforehand.
340    pub async fn check_test_data_file(&self) {
341        let (file, server) = create_proxy::<fio::NodeMarker>();
342        self.dir("data", fio::PERM_READABLE)
343            .open(".testdata", fio::PERM_READABLE, &fio::Options::default(), server.into_channel())
344            .expect("open failed");
345        file.get_attr().await.expect("get_attr failed - data was probably deleted!");
346
347        let data = self.dir("data", fio::PERM_READABLE);
348        fuchsia_fs::directory::open_file(&data, ".testdata", fio::PERM_READABLE).await.unwrap();
349
350        fuchsia_fs::directory::open_directory(&data, "ssh", fio::PERM_READABLE).await.unwrap();
351        fuchsia_fs::directory::open_directory(&data, "ssh/config", fio::PERM_READABLE)
352            .await
353            .unwrap();
354        fuchsia_fs::directory::open_directory(&data, "problems", fio::PERM_READABLE).await.unwrap();
355
356        let authorized_keys =
357            fuchsia_fs::directory::open_file(&data, "ssh/authorized_keys", fio::PERM_READABLE)
358                .await
359                .unwrap();
360        assert_eq!(
361            &fuchsia_fs::file::read_to_string(&authorized_keys).await.unwrap(),
362            "public key!"
363        );
364    }
365
366    /// Checks for the absence of the .testdata marker file, indicating the data filesystem was
367    /// reformatted.
368    pub async fn check_test_data_file_absent(&self) {
369        let (file, server) = create_proxy::<fio::NodeMarker>();
370        self.dir("data", fio::PERM_READABLE)
371            .open(".testdata", fio::PERM_READABLE, &fio::Options::default(), server.into_channel())
372            .expect("open failed");
373        file.get_attr().await.expect_err(".testdata should be absent");
374    }
375
376    async fn add_ramdisk(&mut self, vmo: zx::Vmo, type_guid: Option<[u8; 16]>) {
377        let mut ramdisk_builder = if self.storage_host {
378            RamdiskClientBuilder::new_with_vmo(vmo, Some(512)).use_v2().publish().ramdisk_service(
379                self.dir(framdisk::ServiceMarker::SERVICE_NAME, fio::Flags::empty()),
380            )
381        } else {
382            RamdiskClientBuilder::new_with_vmo(vmo, Some(512))
383                .dev_root(self.dir("dev-topological", fio::Flags::empty()))
384        };
385        if let Some(guid) = type_guid {
386            ramdisk_builder = ramdisk_builder.guid(guid);
387        }
388        let mut ramdisk = pin!(ramdisk_builder.build().fuse());
389
390        let ramdisk = futures::select_biased!(
391            res = ramdisk => res,
392            _ = fasync::Timer::new(Duration::from_secs(120))
393                .fuse() => panic!("Timed out waiting for RamdiskClient"),
394        )
395        .unwrap();
396        self.ramdisks.push(ramdisk);
397    }
398
399    pub async fn setup_starnix_crypt(&self) -> (CryptProxy, CryptManagementProxy) {
400        let crypt_management =
401            self.realm.root.connect_to_protocol_at_exposed_dir::<CryptManagementMarker>().expect(
402                "connect_to_protocol_at_exposed_dir failed for the CryptManagement protocol",
403            );
404        let crypt = self
405            .realm
406            .root
407            .connect_to_protocol_at_exposed_dir::<CryptMarker>()
408            .expect("connect_to_protocol_at_exposed_dir failed for the Crypt protocol");
409        let key = vec![0xABu8; 32];
410        crypt_management
411            .add_wrapping_key(&u128::to_le_bytes(0), key.as_slice())
412            .await
413            .expect("fidl transport error")
414            .expect("add wrapping key failed");
415        crypt_management
416            .add_wrapping_key(&u128::to_le_bytes(1), key.as_slice())
417            .await
418            .expect("fidl transport error")
419            .expect("add wrapping key failed");
420        crypt_management
421            .set_active_key(KeyPurpose::Data, &u128::to_le_bytes(0))
422            .await
423            .expect("fidl transport error")
424            .expect("set metadata key failed");
425        crypt_management
426            .set_active_key(KeyPurpose::Metadata, &u128::to_le_bytes(1))
427            .await
428            .expect("fidl transport error")
429            .expect("set metadata key failed");
430        (crypt, crypt_management)
431    }
432
433    /// This must be called if any crash reports are expected, since spurious reports will cause a
434    /// failure in TestFixture::tear_down.
435    pub async fn wait_for_crash_reports(
436        &mut self,
437        count: usize,
438        expected_program: &'_ str,
439        expected_signature: &'_ str,
440    ) {
441        log::info!("Waiting for {count} crash reports");
442        for _ in 0..count {
443            let report = self.crash_reports.next().await.expect("Sender closed");
444            assert_eq!(report.program_name.as_deref(), Some(expected_program));
445            assert_eq!(report.crash_signature.as_deref(), Some(expected_signature));
446        }
447        if count > 0 {
448            let selector =
449                format!("realm_builder\\:{}/test-fshost:root", self.realm.root.child_name());
450            log::info!("Checking inspect for corruption event, selector={selector}");
451            let tree = ArchiveReader::inspect()
452                .add_selector(selector)
453                .snapshot()
454                .await
455                .unwrap()
456                .into_iter()
457                .next()
458                .and_then(|result| result.payload)
459                .expect("expected one inspect hierarchy");
460
461            let format = || expected_program.to_string();
462
463            assert_data_tree!(tree, root: contains {
464                corruption_events: contains {
465                    format() => 1u64,
466                }
467            });
468        }
469    }
470}