fshost_test_fixture/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use assert_matches::assert_matches;
6use diagnostics_assertions::assert_data_tree;
7use diagnostics_reader::ArchiveReader;
8use disk_builder::Disk;
9use fidl::endpoints::{create_proxy, ServiceMarker as _};
10use fidl_fuchsia_fxfs::{
11    BlobReaderMarker, CryptManagementMarker, CryptManagementProxy, CryptMarker, CryptProxy,
12    KeyPurpose,
13};
14use fuchsia_component::client::connect_to_protocol_at_dir_root;
15use fuchsia_component_test::{Capability, ChildOptions, RealmBuilder, RealmInstance, Ref, Route};
16use fuchsia_driver_test::{DriverTestRealmBuilder, DriverTestRealmInstance};
17use futures::channel::mpsc;
18use futures::{FutureExt as _, StreamExt as _};
19use ramdevice_client::{RamdiskClient, RamdiskClientBuilder};
20use std::pin::pin;
21use std::time::Duration;
22use {
23    fidl_fuchsia_boot as fboot, fidl_fuchsia_driver_test as fdt,
24    fidl_fuchsia_feedback as ffeedback, fidl_fuchsia_hardware_block_volume as fvolume,
25    fidl_fuchsia_hardware_ramdisk as framdisk, fidl_fuchsia_io as fio, fuchsia_async as fasync,
26};
27
28pub mod disk_builder;
29pub mod fshost_builder;
30mod mocks;
31
32pub use disk_builder::write_test_blob;
33
34pub const VFS_TYPE_BLOBFS: u32 = 0x9e694d21;
35pub const VFS_TYPE_MINFS: u32 = 0x6e694d21;
36pub const VFS_TYPE_MEMFS: u32 = 0x3e694d21;
37pub const VFS_TYPE_FXFS: u32 = 0x73667866;
38pub const VFS_TYPE_F2FS: u32 = 0xfe694d21;
39pub const BLOBFS_MAX_BYTES: u64 = 8765432;
40// DATA_MAX_BYTES must be greater than DEFAULT_F2FS_MIN_BYTES
41// (defined in device/constants.rs) to ensure that when f2fs is
42// the data filesystem format, we don't run out of space
43pub const DATA_MAX_BYTES: u64 = 109876543;
44pub const STARNIX_VOLUME_NAME: &str = "starnix_volume";
45
46pub fn round_down<
47    T: Into<U>,
48    U: Copy + std::ops::Rem<U, Output = U> + std::ops::Sub<U, Output = U>,
49>(
50    offset: U,
51    block_size: T,
52) -> U {
53    let block_size = block_size.into();
54    offset - offset % block_size
55}
56
57pub struct TestFixtureBuilder {
58    netboot: bool,
59    no_fuchsia_boot: bool,
60    disk: Option<Disk>,
61    extra_disks: Vec<Disk>,
62    fshost: fshost_builder::FshostBuilder,
63    zbi_ramdisk: Option<disk_builder::DiskBuilder>,
64    storage_host: bool,
65}
66
67impl TestFixtureBuilder {
68    pub fn new(fshost_component_name: &'static str, storage_host: bool) -> Self {
69        Self {
70            netboot: false,
71            no_fuchsia_boot: false,
72            disk: None,
73            extra_disks: vec![],
74            fshost: fshost_builder::FshostBuilder::new(fshost_component_name),
75            zbi_ramdisk: None,
76            storage_host,
77        }
78    }
79
80    pub fn fshost(&mut self) -> &mut fshost_builder::FshostBuilder {
81        &mut self.fshost
82    }
83
84    pub fn with_disk(&mut self) -> &mut disk_builder::DiskBuilder {
85        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::new()));
86        self.disk.as_mut().unwrap().builder()
87    }
88
89    pub fn with_extra_disk(&mut self) -> &mut disk_builder::DiskBuilder {
90        self.extra_disks.push(Disk::Builder(disk_builder::DiskBuilder::new()));
91        self.extra_disks.last_mut().unwrap().builder()
92    }
93
94    pub fn with_uninitialized_disk(mut self) -> Self {
95        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::uninitialized()));
96        self
97    }
98
99    pub fn with_disk_from(mut self, disk: Disk) -> Self {
100        self.disk = Some(disk);
101        self
102    }
103
104    pub fn with_zbi_ramdisk(&mut self) -> &mut disk_builder::DiskBuilder {
105        self.zbi_ramdisk = Some(disk_builder::DiskBuilder::new());
106        self.zbi_ramdisk.as_mut().unwrap()
107    }
108
109    pub fn netboot(mut self) -> Self {
110        self.netboot = true;
111        self
112    }
113
114    pub fn no_fuchsia_boot(mut self) -> Self {
115        self.no_fuchsia_boot = true;
116        self
117    }
118
119    pub async fn build(self) -> TestFixture {
120        let builder = RealmBuilder::new().await.unwrap();
121        let fshost = self.fshost.build(&builder).await;
122
123        let maybe_zbi_vmo = match self.zbi_ramdisk {
124            Some(disk_builder) => Some(disk_builder.build_as_zbi_ramdisk().await),
125            None => None,
126        };
127        let (tx, crash_reports) = mpsc::channel(32);
128        let mocks = mocks::new_mocks(self.netboot, maybe_zbi_vmo, tx).await;
129
130        let mocks = builder
131            .add_local_child("mocks", move |h| mocks(h).boxed(), ChildOptions::new())
132            .await
133            .unwrap();
134        builder
135            .add_route(
136                Route::new()
137                    .capability(Capability::protocol::<ffeedback::CrashReporterMarker>())
138                    .from(&mocks)
139                    .to(&fshost),
140            )
141            .await
142            .unwrap();
143        if !self.no_fuchsia_boot {
144            builder
145                .add_route(
146                    Route::new()
147                        .capability(Capability::protocol::<fboot::ArgumentsMarker>())
148                        .capability(Capability::protocol::<fboot::ItemsMarker>())
149                        .from(&mocks)
150                        .to(&fshost),
151                )
152                .await
153                .unwrap();
154        }
155
156        builder
157            .add_route(
158                Route::new()
159                    .capability(Capability::dictionary("diagnostics"))
160                    .from(Ref::parent())
161                    .to(&fshost),
162            )
163            .await
164            .unwrap();
165
166        let dtr_exposes = vec![
167            fidl_fuchsia_component_test::Capability::Service(
168                fidl_fuchsia_component_test::Service {
169                    name: Some("fuchsia.hardware.ramdisk.Service".to_owned()),
170                    ..Default::default()
171                },
172            ),
173            fidl_fuchsia_component_test::Capability::Service(
174                fidl_fuchsia_component_test::Service {
175                    name: Some("fuchsia.hardware.block.volume.Service".to_owned()),
176                    ..Default::default()
177                },
178            ),
179        ];
180        builder.driver_test_realm_setup().await.unwrap();
181        builder.driver_test_realm_add_dtr_exposes(&dtr_exposes).await.unwrap();
182        builder
183            .add_route(
184                Route::new()
185                    .capability(Capability::directory("dev-topological").rights(fio::R_STAR_DIR))
186                    .capability(Capability::service::<framdisk::ServiceMarker>())
187                    .capability(Capability::service::<fvolume::ServiceMarker>())
188                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
189                    .to(&fshost),
190            )
191            .await
192            .unwrap();
193        builder
194            .add_route(
195                Route::new()
196                    .capability(
197                        Capability::directory("dev-class")
198                            .rights(fio::R_STAR_DIR)
199                            .subdir("block")
200                            .as_("dev-class-block"),
201                    )
202                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
203                    .to(Ref::parent()),
204            )
205            .await
206            .unwrap();
207
208        let mut fixture = TestFixture {
209            realm: builder.build().await.unwrap(),
210            ramdisks: Vec::new(),
211            main_disk: None,
212            crash_reports,
213            torn_down: TornDown(false),
214            storage_host: self.storage_host,
215        };
216
217        log::info!(
218            realm_name:? = fixture.realm.root.child_name();
219            "built new test realm",
220        );
221
222        fixture
223            .realm
224            .driver_test_realm_start(fdt::RealmArgs {
225                root_driver: Some("fuchsia-boot:///platform-bus#meta/platform-bus.cm".to_owned()),
226                dtr_exposes: Some(dtr_exposes),
227                software_devices: Some(vec![
228                    fdt::SoftwareDevice {
229                        device_name: "ram-disk".to_string(),
230                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_DISK,
231                    },
232                    fdt::SoftwareDevice {
233                        device_name: "ram-nand".to_string(),
234                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_NAND,
235                    },
236                ]),
237                ..Default::default()
238            })
239            .await
240            .unwrap();
241
242        // The order of adding disks matters here, unfortunately. fshost should not change behavior
243        // based on the order disks appear, but because we take the first available that matches
244        // whatever relevant criteria, it's useful to test that matchers don't get clogged up by
245        // previous disks.
246        // TODO(https://fxbug.dev/380353856): This type of testing should be irrelevant once the
247        // block devices are determined by configuration options instead of heuristically.
248        for disk in self.extra_disks.into_iter() {
249            let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
250            fixture.add_ramdisk(vmo, type_guid).await;
251        }
252        if let Some(disk) = self.disk {
253            let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
254            let vmo_clone =
255                vmo.create_child(zx::VmoChildOptions::SLICE, 0, vmo.get_size().unwrap()).unwrap();
256
257            fixture.add_ramdisk(vmo, type_guid).await;
258            fixture.main_disk = Some(Disk::Prebuilt(vmo_clone, type_guid));
259        }
260
261        fixture
262    }
263}
264
265/// Create a separate struct that does the drop-assert because fixture.tear_down can't call
266/// realm.destroy if it has the drop impl itself.
267struct TornDown(bool);
268
269impl Drop for TornDown {
270    fn drop(&mut self) {
271        // Because tear_down is async, it needs to be called by the test in an async context. It
272        // checks some properties so for correctness it must be called.
273        assert!(self.0, "fixture.tear_down() must be called");
274    }
275}
276
277pub struct TestFixture {
278    pub realm: RealmInstance,
279    pub ramdisks: Vec<RamdiskClient>,
280    pub main_disk: Option<Disk>,
281    pub crash_reports: mpsc::Receiver<ffeedback::CrashReport>,
282    torn_down: TornDown,
283    storage_host: bool,
284}
285
286impl TestFixture {
287    pub async fn tear_down(mut self) -> Option<Disk> {
288        log::info!(realm_name:? = self.realm.root.child_name(); "tearing down");
289        let disk = self.main_disk.take();
290        // Check the crash reports before destroying the realm because tearing down the realm can
291        // cause mounting errors that trigger a crash report.
292        assert_matches!(self.crash_reports.try_next(), Ok(None) | Err(_));
293        self.realm.destroy().await.unwrap();
294        self.torn_down.0 = true;
295        disk
296    }
297
298    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
299        self.realm.root.get_exposed_dir()
300    }
301
302    pub fn dir(&self, dir: &str, flags: fio::Flags) -> fio::DirectoryProxy {
303        let (dev, server) = create_proxy::<fio::DirectoryMarker>();
304        let flags = flags | fio::Flags::PROTOCOL_DIRECTORY;
305        self.realm
306            .root
307            .get_exposed_dir()
308            .open(dir, flags, &fio::Options::default(), server.into_channel())
309            .expect("open failed");
310        dev
311    }
312
313    pub async fn check_fs_type(&self, dir: &str, fs_type: u32) {
314        let (status, info) =
315            self.dir(dir, fio::Flags::empty()).query_filesystem().await.expect("query failed");
316        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
317        assert!(info.is_some());
318        let info_type = info.unwrap().fs_type;
319        assert_eq!(info_type, fs_type, "{:#08x} != {:#08x}", info_type, fs_type);
320    }
321
322    pub async fn check_test_blob(&self, use_fxblob: bool) {
323        let expected_blob_hash = fuchsia_merkle::from_slice(&disk_builder::BLOB_CONTENTS).root();
324        if use_fxblob {
325            let reader = connect_to_protocol_at_dir_root::<BlobReaderMarker>(
326                self.realm.root.get_exposed_dir(),
327            )
328            .expect("failed to connect to the BlobReader");
329            let _vmo = reader.get_vmo(&expected_blob_hash.into()).await.unwrap().unwrap();
330        } else {
331            let (blob, server_end) = create_proxy::<fio::FileMarker>();
332            let path = &format!("{}", expected_blob_hash);
333            self.dir("blob", fio::PERM_READABLE)
334                .open(path, fio::PERM_READABLE, &fio::Options::default(), server_end.into_channel())
335                .expect("open failed");
336            blob.query().await.expect("open file failed");
337        }
338    }
339
340    /// Check for the existence of a well-known set of test files in the data volume. These files
341    /// are placed by the disk builder if it formats the filesystem beforehand.
342    pub async fn check_test_data_file(&self) {
343        let (file, server) = create_proxy::<fio::NodeMarker>();
344        self.dir("data", fio::PERM_READABLE)
345            .open(".testdata", fio::PERM_READABLE, &fio::Options::default(), server.into_channel())
346            .expect("open failed");
347        file.get_attr().await.expect("get_attr failed - data was probably deleted!");
348
349        let data = self.dir("data", fio::PERM_READABLE);
350        fuchsia_fs::directory::open_file(&data, ".testdata", fio::PERM_READABLE).await.unwrap();
351
352        fuchsia_fs::directory::open_directory(&data, "ssh", fio::PERM_READABLE).await.unwrap();
353        fuchsia_fs::directory::open_directory(&data, "ssh/config", fio::PERM_READABLE)
354            .await
355            .unwrap();
356        fuchsia_fs::directory::open_directory(&data, "problems", fio::PERM_READABLE).await.unwrap();
357
358        let authorized_keys =
359            fuchsia_fs::directory::open_file(&data, "ssh/authorized_keys", fio::PERM_READABLE)
360                .await
361                .unwrap();
362        assert_eq!(
363            &fuchsia_fs::file::read_to_string(&authorized_keys).await.unwrap(),
364            "public key!"
365        );
366    }
367
368    /// Checks for the absence of the .testdata marker file, indicating the data filesystem was
369    /// reformatted.
370    pub async fn check_test_data_file_absent(&self) {
371        let (file, server) = create_proxy::<fio::NodeMarker>();
372        self.dir("data", fio::PERM_READABLE)
373            .open(".testdata", fio::PERM_READABLE, &fio::Options::default(), server.into_channel())
374            .expect("open failed");
375        file.get_attr().await.expect_err(".testdata should be absent");
376    }
377
378    async fn add_ramdisk(&mut self, vmo: zx::Vmo, type_guid: Option<[u8; 16]>) {
379        let mut ramdisk_builder = if self.storage_host {
380            RamdiskClientBuilder::new_with_vmo(vmo, Some(512)).use_v2().publish().ramdisk_service(
381                self.dir(framdisk::ServiceMarker::SERVICE_NAME, fio::Flags::empty()),
382            )
383        } else {
384            RamdiskClientBuilder::new_with_vmo(vmo, Some(512))
385                .dev_root(self.dir("dev-topological", fio::Flags::empty()))
386        };
387        if let Some(guid) = type_guid {
388            ramdisk_builder = ramdisk_builder.guid(guid);
389        }
390        let mut ramdisk = pin!(ramdisk_builder.build().fuse());
391
392        let ramdisk = futures::select_biased!(
393            res = ramdisk => res,
394            _ = fasync::Timer::new(Duration::from_secs(120))
395                .fuse() => panic!("Timed out waiting for RamdiskClient"),
396        )
397        .unwrap();
398        self.ramdisks.push(ramdisk);
399    }
400
401    pub async fn setup_starnix_crypt(&self) -> (CryptProxy, CryptManagementProxy) {
402        let crypt_management =
403            self.realm.root.connect_to_protocol_at_exposed_dir::<CryptManagementMarker>().expect(
404                "connect_to_protocol_at_exposed_dir failed for the CryptManagement protocol",
405            );
406        let crypt = self
407            .realm
408            .root
409            .connect_to_protocol_at_exposed_dir::<CryptMarker>()
410            .expect("connect_to_protocol_at_exposed_dir failed for the Crypt protocol");
411        let key = vec![0xABu8; 32];
412        crypt_management
413            .add_wrapping_key(&u128::to_le_bytes(0), key.as_slice())
414            .await
415            .expect("fidl transport error")
416            .expect("add wrapping key failed");
417        crypt_management
418            .add_wrapping_key(&u128::to_le_bytes(1), key.as_slice())
419            .await
420            .expect("fidl transport error")
421            .expect("add wrapping key failed");
422        crypt_management
423            .set_active_key(KeyPurpose::Data, &u128::to_le_bytes(0))
424            .await
425            .expect("fidl transport error")
426            .expect("set metadata key failed");
427        crypt_management
428            .set_active_key(KeyPurpose::Metadata, &u128::to_le_bytes(1))
429            .await
430            .expect("fidl transport error")
431            .expect("set metadata key failed");
432        (crypt, crypt_management)
433    }
434
435    /// This must be called if any crash reports are expected, since spurious reports will cause a
436    /// failure in TestFixture::tear_down.
437    pub async fn wait_for_crash_reports(
438        &mut self,
439        count: usize,
440        expected_program: &'_ str,
441        expected_signature: &'_ str,
442    ) {
443        log::info!("Waiting for {count} crash reports");
444        for _ in 0..count {
445            let report = self.crash_reports.next().await.expect("Sender closed");
446            assert_eq!(report.program_name.as_deref(), Some(expected_program));
447            assert_eq!(report.crash_signature.as_deref(), Some(expected_signature));
448        }
449        if count > 0 {
450            let selector =
451                format!("realm_builder\\:{}/test-fshost:root", self.realm.root.child_name());
452            log::info!("Checking inspect for corruption event, selector={selector}");
453            let tree = ArchiveReader::inspect()
454                .add_selector(selector)
455                .snapshot()
456                .await
457                .unwrap()
458                .into_iter()
459                .next()
460                .and_then(|result| result.payload)
461                .expect("expected one inspect hierarchy");
462
463            let format = || expected_program.to_string();
464
465            assert_data_tree!(tree, root: contains {
466                corruption_events: contains {
467                    format() => 1u64,
468                }
469            });
470        }
471    }
472}