fshost_test_fixture/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use assert_matches::assert_matches;
6use diagnostics_assertions::assert_data_tree;
7use diagnostics_reader::ArchiveReader;
8use disk_builder::Disk;
9use fidl::endpoints::{ServiceMarker as _, create_proxy};
10use fidl_fuchsia_fxfs::{BlobReaderMarker, CryptManagementProxy, CryptProxy, KeyPurpose};
11use fuchsia_component::client::{
12    connect_to_named_protocol_at_dir_root, connect_to_protocol_at_dir_root,
13};
14use fuchsia_component_test::{Capability, ChildOptions, RealmBuilder, RealmInstance, Ref, Route};
15use fuchsia_driver_test::{DriverTestRealmBuilder, DriverTestRealmInstance};
16use futures::channel::mpsc;
17use futures::{FutureExt as _, StreamExt as _};
18use ramdevice_client::{RamdiskClient, RamdiskClientBuilder};
19use std::pin::pin;
20use std::time::Duration;
21use {
22    fidl_fuchsia_boot as fboot, fidl_fuchsia_driver_test as fdt,
23    fidl_fuchsia_feedback as ffeedback, fidl_fuchsia_fshost_fxfsprovisioner as ffxfsprovisioner,
24    fidl_fuchsia_hardware_block_volume as fvolume, fidl_fuchsia_hardware_ramdisk as framdisk,
25    fidl_fuchsia_io as fio, fidl_fuchsia_security_keymint as fkeymint,
26    fidl_fuchsia_storage_partitions as fpartitions, fuchsia_async as fasync,
27};
28
29pub mod disk_builder;
30mod mocks;
31
32pub use disk_builder::write_blob;
33pub use fshost_assembly_config::{BlockDeviceConfig, BlockDeviceIdentifiers, BlockDeviceParent};
34
35pub const VFS_TYPE_BLOBFS: u32 = 0x9e694d21;
36pub const VFS_TYPE_MINFS: u32 = 0x6e694d21;
37pub const VFS_TYPE_MEMFS: u32 = 0x3e694d21;
38pub const VFS_TYPE_FXFS: u32 = 0x73667866;
39pub const VFS_TYPE_F2FS: u32 = 0xfe694d21;
40pub const STARNIX_VOLUME_NAME: &str = "starnix_volume";
41
42pub fn round_down<
43    T: Into<U>,
44    U: Copy + std::ops::Rem<U, Output = U> + std::ops::Sub<U, Output = U>,
45>(
46    offset: U,
47    block_size: T,
48) -> U {
49    let block_size = block_size.into();
50    offset - offset % block_size
51}
52
53pub struct TestFixtureBuilder {
54    no_fuchsia_boot: bool,
55    disk: Option<Disk>,
56    extra_disks: Vec<Disk>,
57    fshost: fshost_testing::FshostBuilder,
58    zbi_ramdisk: Option<disk_builder::DiskBuilder>,
59    storage_host: bool,
60    force_fxfs_provisioner_failure: bool,
61}
62
63impl TestFixtureBuilder {
64    pub fn new(fshost_component_name: &'static str, storage_host: bool) -> Self {
65        Self {
66            no_fuchsia_boot: false,
67            disk: None,
68            extra_disks: Vec::new(),
69            fshost: fshost_testing::FshostBuilder::new(fshost_component_name),
70            zbi_ramdisk: None,
71            storage_host,
72            force_fxfs_provisioner_failure: false,
73        }
74    }
75
76    pub fn fshost(&mut self) -> &mut fshost_testing::FshostBuilder {
77        &mut self.fshost
78    }
79
80    pub fn with_disk(&mut self) -> &mut disk_builder::DiskBuilder {
81        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::new()));
82        self.disk.as_mut().unwrap().builder()
83    }
84
85    pub fn with_extra_disk(&mut self) -> &mut disk_builder::DiskBuilder {
86        self.extra_disks.push(Disk::Builder(disk_builder::DiskBuilder::new()));
87        self.extra_disks.last_mut().unwrap().builder()
88    }
89
90    pub fn with_uninitialized_disk(mut self) -> Self {
91        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::uninitialized()));
92        self
93    }
94
95    pub fn with_disk_from(mut self, disk: Disk) -> Self {
96        self.disk = Some(disk);
97        self
98    }
99
100    pub fn with_zbi_ramdisk(&mut self) -> &mut disk_builder::DiskBuilder {
101        self.zbi_ramdisk = Some(disk_builder::DiskBuilder::new());
102        self.zbi_ramdisk.as_mut().unwrap()
103    }
104
105    pub fn no_fuchsia_boot(mut self) -> Self {
106        self.no_fuchsia_boot = true;
107        self
108    }
109
110    pub fn with_device_config(mut self, device_config: Vec<BlockDeviceConfig>) -> Self {
111        self.fshost.set_device_config(device_config);
112        self
113    }
114
115    pub fn with_crypt_policy(mut self, policy: crypt_policy::Policy) -> Self {
116        self.fshost.set_crypt_policy(policy);
117        self
118    }
119
120    pub fn force_fxfs_provisioner_failure(mut self) -> Self {
121        self.force_fxfs_provisioner_failure = true;
122        self
123    }
124
125    pub async fn build(self) -> TestFixture {
126        let builder = RealmBuilder::new().await.unwrap();
127        let fshost = self.fshost.build(&builder).await;
128
129        let maybe_zbi_vmo = match self.zbi_ramdisk {
130            Some(disk_builder) => Some(disk_builder.build_as_zbi_ramdisk().await),
131            None => None,
132        };
133        let (tx, crash_reports) = mpsc::channel(32);
134        let mocks = mocks::new_mocks(maybe_zbi_vmo, tx, self.force_fxfs_provisioner_failure);
135
136        let mocks = builder
137            .add_local_child("mocks", move |h| mocks(h).boxed(), ChildOptions::new())
138            .await
139            .unwrap();
140        builder
141            .add_route(
142                Route::new()
143                    .capability(Capability::protocol::<ffeedback::CrashReporterMarker>())
144                    .capability(Capability::protocol::<ffxfsprovisioner::FxfsProvisionerMarker>())
145                    .capability(Capability::protocol::<fkeymint::SealingKeysMarker>())
146                    .from(&mocks)
147                    .to(&fshost),
148            )
149            .await
150            .unwrap();
151        if !self.no_fuchsia_boot {
152            builder
153                .add_route(
154                    Route::new()
155                        .capability(Capability::protocol::<fboot::ArgumentsMarker>())
156                        .capability(Capability::protocol::<fboot::ItemsMarker>())
157                        .from(&mocks)
158                        .to(&fshost),
159                )
160                .await
161                .unwrap();
162        }
163
164        builder
165            .add_route(
166                Route::new()
167                    .capability(Capability::dictionary("diagnostics"))
168                    .from(Ref::parent())
169                    .to(&fshost),
170            )
171            .await
172            .unwrap();
173
174        let dtr_exposes = vec![
175            fidl_fuchsia_component_test::Capability::Service(
176                fidl_fuchsia_component_test::Service {
177                    name: Some("fuchsia.hardware.ramdisk.Service".to_owned()),
178                    ..Default::default()
179                },
180            ),
181            fidl_fuchsia_component_test::Capability::Service(
182                fidl_fuchsia_component_test::Service {
183                    name: Some("fuchsia.hardware.block.volume.Service".to_owned()),
184                    ..Default::default()
185                },
186            ),
187        ];
188        builder.driver_test_realm_setup().await.unwrap();
189        builder.driver_test_realm_add_dtr_exposes(&dtr_exposes).await.unwrap();
190        builder
191            .add_route(
192                Route::new()
193                    .capability(Capability::directory("dev-topological").rights(fio::R_STAR_DIR))
194                    .capability(Capability::service::<fvolume::ServiceMarker>())
195                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
196                    .to(&fshost),
197            )
198            .await
199            .unwrap();
200        builder
201            .add_route(
202                Route::new()
203                    .capability(
204                        Capability::directory("dev-class")
205                            .rights(fio::R_STAR_DIR)
206                            .subdir("block")
207                            .as_("dev-class-block"),
208                    )
209                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
210                    .to(Ref::parent()),
211            )
212            .await
213            .unwrap();
214
215        let mut fixture = TestFixture {
216            realm: builder.build().await.unwrap(),
217            ramdisks: Vec::new(),
218            main_disk: None,
219            crash_reports,
220            torn_down: TornDown(false),
221            storage_host: self.storage_host,
222        };
223
224        log::info!(
225            realm_name:? = fixture.realm.root.child_name();
226            "built new test realm",
227        );
228
229        fixture
230            .realm
231            .driver_test_realm_start(fdt::RealmArgs {
232                root_driver: Some("fuchsia-boot:///platform-bus#meta/platform-bus.cm".to_owned()),
233                dtr_exposes: Some(dtr_exposes),
234                software_devices: Some(vec![
235                    fdt::SoftwareDevice {
236                        device_name: "ram-disk".to_string(),
237                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_DISK,
238                    },
239                    fdt::SoftwareDevice {
240                        device_name: "ram-nand".to_string(),
241                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_NAND,
242                    },
243                ]),
244                ..Default::default()
245            })
246            .await
247            .unwrap();
248
249        // The order of adding disks matters here, unfortunately. fshost should not change behavior
250        // based on the order disks appear, but because we take the first available that matches
251        // whatever relevant criteria, it's useful to test that matchers don't get clogged up by
252        // previous disks.
253        // TODO(https://fxbug.dev/380353856): This type of testing should be irrelevant once the
254        // block devices are determined by configuration options instead of heuristically.
255        for disk in self.extra_disks.into_iter() {
256            fixture.add_disk(disk).await;
257        }
258        if let Some(disk) = self.disk {
259            fixture.add_main_disk(disk).await;
260        }
261
262        fixture
263    }
264}
265
266/// Create a separate struct that does the drop-assert because fixture.tear_down can't call
267/// realm.destroy if it has the drop impl itself.
268struct TornDown(bool);
269
270impl Drop for TornDown {
271    fn drop(&mut self) {
272        // Because tear_down is async, it needs to be called by the test in an async context. It
273        // checks some properties so for correctness it must be called.
274        assert!(self.0, "fixture.tear_down() must be called");
275    }
276}
277
278pub struct TestFixture {
279    pub realm: RealmInstance,
280    pub ramdisks: Vec<RamdiskClient>,
281    pub main_disk: Option<Disk>,
282    pub crash_reports: mpsc::Receiver<ffeedback::CrashReport>,
283    torn_down: TornDown,
284    storage_host: bool,
285}
286
287impl TestFixture {
288    pub async fn tear_down(mut self) -> Option<Disk> {
289        log::info!(realm_name:? = self.realm.root.child_name(); "tearing down");
290        let disk = self.main_disk.take();
291        // Check the crash reports before destroying the realm because tearing down the realm can
292        // cause mounting errors that trigger a crash report.
293        assert_matches!(self.crash_reports.try_next(), Ok(None) | Err(_));
294        self.realm.destroy().await.unwrap();
295        self.torn_down.0 = true;
296        disk
297    }
298
299    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
300        self.realm.root.get_exposed_dir()
301    }
302
303    pub fn dir(&self, dir: &str, flags: fio::Flags) -> fio::DirectoryProxy {
304        let (dev, server) = create_proxy::<fio::DirectoryMarker>();
305        let flags = flags | fio::Flags::PROTOCOL_DIRECTORY;
306        self.realm
307            .root
308            .get_exposed_dir()
309            .open(dir, flags, &fio::Options::default(), server.into_channel())
310            .expect("open failed");
311        dev
312    }
313
314    pub async fn check_fs_type(&self, dir: &str, fs_type: u32) {
315        let (status, info) =
316            self.dir(dir, fio::Flags::empty()).query_filesystem().await.expect("query failed");
317        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
318        assert!(info.is_some());
319        let info_type = info.unwrap().fs_type;
320        assert_eq!(info_type, fs_type, "{:#08x} != {:#08x}", info_type, fs_type);
321    }
322
323    pub async fn check_test_blob(&self, use_fxblob: bool) {
324        let expected_blob_hash = disk_builder::test_blob_hash();
325        if use_fxblob {
326            let reader = connect_to_protocol_at_dir_root::<BlobReaderMarker>(
327                self.realm.root.get_exposed_dir(),
328            )
329            .expect("failed to connect to the BlobReader");
330            let _vmo = reader.get_vmo(&expected_blob_hash.into()).await.unwrap().unwrap();
331        } else {
332            let (blob, server_end) = create_proxy::<fio::FileMarker>();
333            let path = &format!("{}", expected_blob_hash);
334            self.dir("blob", fio::PERM_READABLE)
335                .open(path, fio::PERM_READABLE, &fio::Options::default(), server_end.into_channel())
336                .expect("open failed");
337            blob.query().await.expect("open file failed");
338        }
339    }
340
341    /// Check for the existence of a well-known set of test files in the data volume. These files
342    /// are placed by the disk builder if it formats the filesystem beforehand.
343    pub async fn check_test_data_file(&self) {
344        let (file, server) = create_proxy::<fio::NodeMarker>();
345        self.dir("data", fio::PERM_READABLE)
346            .open(".testdata", fio::PERM_READABLE, &fio::Options::default(), server.into_channel())
347            .expect("open failed");
348        file.get_attributes(fio::NodeAttributesQuery::empty())
349            .await
350            .expect("Fidl transport error on get_attributes()")
351            .expect("get_attr failed - data was probably deleted!");
352
353        let data = self.dir("data", fio::PERM_READABLE);
354        fuchsia_fs::directory::open_file(&data, ".testdata", fio::PERM_READABLE).await.unwrap();
355
356        fuchsia_fs::directory::open_directory(&data, "ssh", fio::PERM_READABLE).await.unwrap();
357        fuchsia_fs::directory::open_directory(&data, "ssh/config", fio::PERM_READABLE)
358            .await
359            .unwrap();
360        fuchsia_fs::directory::open_directory(&data, "problems", fio::PERM_READABLE).await.unwrap();
361
362        let authorized_keys =
363            fuchsia_fs::directory::open_file(&data, "ssh/authorized_keys", fio::PERM_READABLE)
364                .await
365                .unwrap();
366        assert_eq!(
367            &fuchsia_fs::file::read_to_string(&authorized_keys).await.unwrap(),
368            "public key!"
369        );
370    }
371
372    /// Checks for the absence of the .testdata marker file, indicating the data filesystem was
373    /// reformatted.
374    pub async fn check_test_data_file_absent(&self) {
375        let err = fuchsia_fs::directory::open_file(
376            &self.dir("data", fio::PERM_READABLE),
377            ".testdata",
378            fio::PERM_READABLE,
379        )
380        .await
381        .expect_err("open_file failed");
382        assert!(err.is_not_found_error());
383    }
384
385    pub async fn add_main_disk(&mut self, disk: Disk) {
386        assert!(self.main_disk.is_none());
387        let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
388        let vmo_clone =
389            vmo.create_child(zx::VmoChildOptions::SLICE, 0, vmo.get_size().unwrap()).unwrap();
390
391        self.add_ramdisk(vmo, type_guid).await;
392        self.main_disk = Some(Disk::Prebuilt(vmo_clone, type_guid));
393    }
394
395    pub async fn add_disk(&mut self, disk: Disk) {
396        let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
397        self.add_ramdisk(vmo, type_guid).await;
398    }
399
400    async fn add_ramdisk(&mut self, vmo: zx::Vmo, type_guid: Option<[u8; 16]>) {
401        let mut ramdisk_builder = if self.storage_host {
402            RamdiskClientBuilder::new_with_vmo(vmo, Some(512)).use_v2().publish().ramdisk_service(
403                self.dir(framdisk::ServiceMarker::SERVICE_NAME, fio::Flags::empty()),
404            )
405        } else {
406            RamdiskClientBuilder::new_with_vmo(vmo, Some(512))
407                .dev_root(self.dir("dev-topological", fio::Flags::empty()))
408        };
409        if let Some(guid) = type_guid {
410            ramdisk_builder = ramdisk_builder.guid(guid);
411        }
412        let mut ramdisk = pin!(ramdisk_builder.build().fuse());
413
414        let ramdisk = futures::select_biased!(
415            res = ramdisk => res,
416            _ = fasync::Timer::new(Duration::from_secs(120))
417                .fuse() => panic!("Timed out waiting for RamdiskClient"),
418        )
419        .unwrap();
420        self.ramdisks.push(ramdisk);
421    }
422
423    pub fn connect_to_crypt(&self) -> CryptProxy {
424        self.realm
425            .root
426            .connect_to_protocol_at_exposed_dir()
427            .expect("connect_to_protocol_at_exposed_dir failed for the Crypt protocol")
428    }
429
430    pub async fn setup_starnix_crypt(&self) -> (CryptProxy, CryptManagementProxy) {
431        let crypt_management: CryptManagementProxy =
432            self.realm.root.connect_to_protocol_at_exposed_dir().expect(
433                "connect_to_protocol_at_exposed_dir failed for the CryptManagement protocol",
434            );
435        let crypt = self
436            .realm
437            .root
438            .connect_to_protocol_at_exposed_dir()
439            .expect("connect_to_protocol_at_exposed_dir failed for the Crypt protocol");
440        let key = vec![0xABu8; 32];
441        crypt_management
442            .add_wrapping_key(&u128::to_le_bytes(0), key.as_slice())
443            .await
444            .expect("fidl transport error")
445            .expect("add wrapping key failed");
446        crypt_management
447            .add_wrapping_key(&u128::to_le_bytes(1), key.as_slice())
448            .await
449            .expect("fidl transport error")
450            .expect("add wrapping key failed");
451        crypt_management
452            .set_active_key(KeyPurpose::Data, &u128::to_le_bytes(0))
453            .await
454            .expect("fidl transport error")
455            .expect("set metadata key failed");
456        crypt_management
457            .set_active_key(KeyPurpose::Metadata, &u128::to_le_bytes(1))
458            .await
459            .expect("fidl transport error")
460            .expect("set metadata key failed");
461        (crypt, crypt_management)
462    }
463
464    /// This must be called if any crash reports are expected, since spurious reports will cause a
465    /// failure in TestFixture::tear_down.
466    pub async fn wait_for_crash_reports(
467        &mut self,
468        count: usize,
469        expected_program: &'_ str,
470        expected_signature: &'_ str,
471    ) {
472        log::info!("Waiting for {count} crash reports");
473        for _ in 0..count {
474            let report = self.crash_reports.next().await.expect("Sender closed");
475            assert_eq!(report.program_name.as_deref(), Some(expected_program));
476            assert_eq!(report.crash_signature.as_deref(), Some(expected_signature));
477        }
478        if count > 0 {
479            let selector =
480                format!("realm_builder\\:{}/test-fshost:root", self.realm.root.child_name());
481            log::info!("Checking inspect for corruption event, selector={selector}");
482            let tree = ArchiveReader::inspect()
483                .add_selector(selector)
484                .snapshot()
485                .await
486                .unwrap()
487                .into_iter()
488                .next()
489                .and_then(|result| result.payload)
490                .expect("expected one inspect hierarchy");
491
492            let format = || expected_program.to_string();
493
494            assert_data_tree!(tree, root: contains {
495                corruption_events: contains {
496                    format() => 1u64,
497                }
498            });
499        }
500    }
501
502    // Check that the system partition table contains partitions with labels found in `expected`.
503    pub async fn check_system_partitions(&self, mut expected: Vec<&str>) {
504        let partitions =
505            self.dir(fpartitions::PartitionServiceMarker::SERVICE_NAME, fio::PERM_READABLE);
506        let entries =
507            fuchsia_fs::directory::readdir(&partitions).await.expect("Failed to read partitions");
508
509        assert_eq!(entries.len(), expected.len());
510
511        let mut found_partition_labels = Vec::new();
512        for entry in entries {
513            let endpoint_name = format!("{}/volume", entry.name);
514            let volume = connect_to_named_protocol_at_dir_root::<fvolume::VolumeMarker>(
515                &partitions,
516                &endpoint_name,
517            )
518            .expect("failed to connect to named protocol at dir root");
519            let (raw_status, label) = volume.get_name().await.expect("failed to call get_name");
520            zx::Status::ok(raw_status).expect("get_name status failed");
521            found_partition_labels.push(label.expect("partition label expected to be some value"));
522        }
523        found_partition_labels.sort();
524        expected.sort();
525        assert_eq!(found_partition_labels, expected);
526    }
527}