Skip to main content

fshost_test_fixture/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use assert_matches::assert_matches;
6use diagnostics_assertions::assert_data_tree;
7use diagnostics_reader::ArchiveReader;
8use disk_builder::Disk;
9use fake_keymint::FakeKeymint;
10use fidl::endpoints::{ServiceMarker as _, create_proxy};
11use fidl_fuchsia_boot as fboot;
12use fidl_fuchsia_driver_test as fdt;
13use fidl_fuchsia_feedback as ffeedback;
14use fidl_fuchsia_fshost_fxfsprovisioner as ffxfsprovisioner;
15use fidl_fuchsia_fxfs::{BlobReaderMarker, CryptManagementProxy, CryptProxy, KeyPurpose};
16use fidl_fuchsia_hardware_block_volume as fvolume;
17use fidl_fuchsia_hardware_ramdisk as framdisk;
18use fidl_fuchsia_io as fio;
19use fidl_fuchsia_security_keymint as fkeymint;
20use fidl_fuchsia_storage_block as fblock;
21use fidl_fuchsia_storage_partitions as fpartitions;
22use fuchsia_async::{self as fasync, TimeoutExt as _};
23use fuchsia_component::client::{
24    connect_to_named_protocol_at_dir_root, connect_to_protocol_at_dir_root,
25};
26use fuchsia_component_test::{Capability, ChildOptions, RealmBuilder, RealmInstance, Ref, Route};
27use fuchsia_driver_test::{DriverTestRealmBuilder, DriverTestRealmInstance};
28use futures::channel::mpsc;
29use futures::{FutureExt as _, StreamExt as _};
30use ramdevice_client::{RamdiskClient, RamdiskClientBuilder};
31use std::pin::pin;
32use std::time::Duration;
33
34pub mod disk_builder;
35mod mocks;
36
37pub use disk_builder::write_blob;
38pub use fshost_assembly_config::{BlockDeviceConfig, BlockDeviceIdentifiers, BlockDeviceParent};
39
40pub const VFS_TYPE_BLOBFS: u32 = 0x9e694d21;
41pub const VFS_TYPE_MINFS: u32 = 0x6e694d21;
42pub const VFS_TYPE_MEMFS: u32 = 0x3e694d21;
43pub const VFS_TYPE_FXFS: u32 = 0x73667866;
44pub const VFS_TYPE_F2FS: u32 = 0xfe694d21;
45pub const STARNIX_VOLUME_NAME: &str = "starnix_volume";
46
47const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60);
48
49async fn with_timeout<F: std::future::Future>(fut: F, name: impl Into<String>) -> F::Output {
50    let name = name.into();
51    fut.on_timeout(DEFAULT_TIMEOUT, move || panic!("{name} timed out after {DEFAULT_TIMEOUT:?}"))
52        .await
53}
54
55/// fshost will expose an alias of its fuchsia.hardware.block.volume.Service directory at this path.
56/// This allows tests to disambiguate service instances from the driver test realm, which are
57/// automatically aggregated.
58pub const FSHOST_VOLUME_SERVICE_DIR_NAME: &str = "VolumeService";
59
60pub fn round_down<
61    T: Into<U>,
62    U: Copy + std::ops::Rem<U, Output = U> + std::ops::Sub<U, Output = U>,
63>(
64    offset: U,
65    block_size: T,
66) -> U {
67    let block_size = block_size.into();
68    offset - offset % block_size
69}
70
71pub struct TestFixtureBuilder {
72    no_fuchsia_boot: bool,
73    disk: Option<Disk>,
74    extra_disks: Vec<Disk>,
75    fshost: fshost_testing::FshostBuilder,
76    zbi_ramdisk: Option<disk_builder::DiskBuilder>,
77    force_fxfs_provisioner_failure: bool,
78    keymint: std::sync::Arc<FakeKeymint>,
79    crypt_policy: crypt_policy::Policy,
80}
81
82impl TestFixtureBuilder {
83    pub fn new(fshost_component_name: &'static str) -> Self {
84        Self {
85            no_fuchsia_boot: false,
86            disk: None,
87            extra_disks: Vec::new(),
88            fshost: fshost_testing::FshostBuilder::new(fshost_component_name),
89            zbi_ramdisk: None,
90            force_fxfs_provisioner_failure: false,
91            keymint: std::sync::Arc::new(FakeKeymint::default()),
92            crypt_policy: crypt_policy::Policy::Null,
93        }
94    }
95
96    pub fn fshost(&mut self) -> &mut fshost_testing::FshostBuilder {
97        &mut self.fshost
98    }
99
100    pub fn keymint(&mut self) -> std::sync::Arc<FakeKeymint> {
101        self.keymint.clone()
102    }
103
104    pub fn with_keymint_instance(mut self, keymint: std::sync::Arc<FakeKeymint>) -> Self {
105        self.keymint = keymint.clone();
106        if let Some(Disk::Builder(ref mut disk_builder)) = self.disk {
107            disk_builder.with_keymint_instance(keymint.clone());
108        }
109        for disk in &mut self.extra_disks {
110            if let Disk::Builder(disk_builder) = disk {
111                disk_builder.with_keymint_instance(keymint.clone());
112            }
113        }
114        self
115    }
116
117    pub fn with_disk(&mut self) -> &mut disk_builder::DiskBuilder {
118        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::new()));
119        self.disk
120            .as_mut()
121            .unwrap()
122            .builder()
123            .with_crypt_policy(self.crypt_policy)
124            .with_keymint_instance(self.keymint.clone());
125        self.disk.as_mut().unwrap().builder()
126    }
127
128    pub fn with_extra_disk(&mut self) -> &mut disk_builder::DiskBuilder {
129        self.extra_disks.push(Disk::Builder(disk_builder::DiskBuilder::new()));
130        self.extra_disks
131            .last_mut()
132            .unwrap()
133            .builder()
134            .with_crypt_policy(self.crypt_policy)
135            .with_keymint_instance(self.keymint.clone());
136        self.extra_disks.last_mut().unwrap().builder()
137    }
138
139    pub fn with_uninitialized_disk(mut self) -> Self {
140        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::uninitialized()));
141        self
142    }
143
144    pub fn with_disk_from(mut self, disk: Disk) -> Self {
145        self.disk = Some(disk);
146        self
147    }
148
149    pub fn with_zbi_ramdisk(&mut self) -> &mut disk_builder::DiskBuilder {
150        self.zbi_ramdisk = Some(disk_builder::DiskBuilder::new());
151        self.zbi_ramdisk.as_mut().unwrap()
152    }
153
154    pub fn no_fuchsia_boot(mut self) -> Self {
155        self.no_fuchsia_boot = true;
156        self
157    }
158
159    pub fn with_device_config(mut self, device_config: Vec<BlockDeviceConfig>) -> Self {
160        self.fshost.set_device_config(device_config);
161        self
162    }
163
164    pub fn with_crypt_policy(mut self, policy: crypt_policy::Policy) -> Self {
165        self.fshost.set_crypt_policy(policy);
166        self.crypt_policy = policy;
167        if let Some(Disk::Builder(ref mut disk_builder)) = self.disk {
168            disk_builder.with_crypt_policy(policy);
169        }
170        for disk in &mut self.extra_disks {
171            if let Disk::Builder(disk_builder) = disk {
172                disk_builder.with_crypt_policy(policy);
173            }
174        }
175        self
176    }
177
178    pub fn force_fxfs_provisioner_failure(mut self) -> Self {
179        self.force_fxfs_provisioner_failure = true;
180        self
181    }
182
183    pub async fn build(self) -> TestFixture {
184        let builder = RealmBuilder::new().await.unwrap();
185        let fshost = self.fshost.build(&builder).await;
186        // Create a second alias which routes fshost's volume Service capability to the parent.
187        builder
188            .add_route(
189                Route::new()
190                    .capability(
191                        Capability::service::<fvolume::ServiceMarker>()
192                            .as_(FSHOST_VOLUME_SERVICE_DIR_NAME),
193                    )
194                    .from(&fshost)
195                    .to(Ref::parent()),
196            )
197            .await
198            .unwrap();
199
200        let maybe_zbi_vmo = match self.zbi_ramdisk {
201            Some(disk_builder) => Some(disk_builder.build_as_zbi_ramdisk().await),
202            None => None,
203        };
204        let (tx, crash_reports) = mpsc::channel(32);
205        let mocks = mocks::new_mocks(
206            maybe_zbi_vmo,
207            tx,
208            self.force_fxfs_provisioner_failure,
209            self.keymint.clone(),
210        );
211
212        let mocks = builder
213            .add_local_child("mocks", move |h| mocks(h).boxed(), ChildOptions::new())
214            .await
215            .unwrap();
216        builder
217            .add_route(
218                Route::new()
219                    .capability(Capability::protocol::<fkeymint::SealingKeysMarker>())
220                    .capability(Capability::protocol::<fkeymint::AdminMarker>())
221                    .from(&mocks)
222                    .to(Ref::parent()),
223            )
224            .await
225            .unwrap();
226        builder
227            .add_route(
228                Route::new()
229                    .capability(Capability::protocol::<ffeedback::CrashReporterMarker>())
230                    .capability(Capability::protocol::<ffxfsprovisioner::FxfsProvisionerMarker>())
231                    .capability(Capability::protocol::<fkeymint::SealingKeysMarker>())
232                    .capability(Capability::protocol::<fkeymint::AdminMarker>())
233                    .from(&mocks)
234                    .to(&fshost),
235            )
236            .await
237            .unwrap();
238        if !self.no_fuchsia_boot {
239            builder
240                .add_route(
241                    Route::new()
242                        .capability(Capability::protocol::<fboot::ArgumentsMarker>())
243                        .capability(Capability::protocol::<fboot::ItemsMarker>())
244                        .from(&mocks)
245                        .to(&fshost),
246                )
247                .await
248                .unwrap();
249        }
250
251        builder
252            .add_route(
253                Route::new()
254                    .capability(Capability::dictionary("diagnostics"))
255                    .from(Ref::parent())
256                    .to(&fshost),
257            )
258            .await
259            .unwrap();
260
261        let dtr_exposes = vec![
262            fidl_fuchsia_component_test::Capability::Service(
263                fidl_fuchsia_component_test::Service {
264                    name: Some("fuchsia.hardware.ramdisk.Service".to_owned()),
265                    ..Default::default()
266                },
267            ),
268            fidl_fuchsia_component_test::Capability::Service(
269                fidl_fuchsia_component_test::Service {
270                    name: Some("fuchsia.hardware.block.volume.Service".to_owned()),
271                    ..Default::default()
272                },
273            ),
274        ];
275        builder.driver_test_realm_setup().await.unwrap();
276        builder.driver_test_realm_add_dtr_exposes(&dtr_exposes).await.unwrap();
277        builder
278            .add_route(
279                Route::new()
280                    .capability(Capability::directory("dev-topological").rights(fio::R_STAR_DIR))
281                    .capability(Capability::service::<fvolume::ServiceMarker>())
282                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
283                    .to(&fshost),
284            )
285            .await
286            .unwrap();
287        builder
288            .add_route(
289                Route::new()
290                    .capability(
291                        Capability::directory("dev-class")
292                            .rights(fio::R_STAR_DIR)
293                            .subdir("block")
294                            .as_("dev-class-block"),
295                    )
296                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
297                    .to(Ref::parent()),
298            )
299            .await
300            .unwrap();
301
302        let mut fixture = TestFixture {
303            realm: builder.build().await.unwrap(),
304            ramdisks: Vec::new(),
305            main_disk: None,
306            crash_reports,
307            torn_down: TornDown(false),
308        };
309
310        log::info!(
311            realm_name:? = fixture.realm.root.child_name();
312            "built new test realm",
313        );
314
315        fixture
316            .realm
317            .driver_test_realm_start(fdt::RealmArgs {
318                root_driver: Some("fuchsia-boot:///platform-bus#meta/platform-bus.cm".to_owned()),
319                dtr_exposes: Some(dtr_exposes),
320                software_devices: Some(vec![
321                    fdt::SoftwareDevice {
322                        device_name: "ram-disk".to_string(),
323                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_DISK,
324                    },
325                    fdt::SoftwareDevice {
326                        device_name: "ram-nand".to_string(),
327                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_NAND,
328                    },
329                ]),
330                ..Default::default()
331            })
332            .await
333            .unwrap();
334
335        // The order of adding disks matters here, unfortunately. fshost should not change behavior
336        // based on the order disks appear, but because we take the first available that matches
337        // whatever relevant criteria, it's useful to test that matchers don't get clogged up by
338        // previous disks.
339        // TODO(https://fxbug.dev/380353856): This type of testing should be irrelevant once the
340        // block devices are determined by configuration options instead of heuristically.
341        for disk in self.extra_disks.into_iter() {
342            fixture.add_disk(disk).await;
343        }
344        if let Some(disk) = self.disk {
345            fixture.add_main_disk(disk).await;
346        }
347
348        fixture
349    }
350}
351
352/// Create a separate struct that does the drop-assert because fixture.tear_down can't call
353/// realm.destroy if it has the drop impl itself.
354struct TornDown(bool);
355
356impl Drop for TornDown {
357    fn drop(&mut self) {
358        // Because tear_down is async, it needs to be called by the test in an async context. It
359        // checks some properties so for correctness it must be called.
360        assert!(self.0, "fixture.tear_down() must be called");
361    }
362}
363
364pub struct TestFixture {
365    pub realm: RealmInstance,
366    pub ramdisks: Vec<RamdiskClient>,
367    pub main_disk: Option<Disk>,
368    pub crash_reports: mpsc::Receiver<ffeedback::CrashReport>,
369    torn_down: TornDown,
370}
371
372impl TestFixture {
373    pub async fn tear_down(mut self) -> Option<Disk> {
374        log::info!(realm_name:? = self.realm.root.child_name(); "tearing down");
375        let disk = self.main_disk.take();
376        // Check the crash reports before destroying the realm because tearing down the realm can
377        // cause mounting errors that trigger a crash report.
378        assert_matches!(self.crash_reports.try_next(), Ok(None) | Err(_));
379        self.realm.destroy().await.unwrap();
380        self.torn_down.0 = true;
381        disk
382    }
383
384    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
385        self.realm.root.get_exposed_dir()
386    }
387
388    pub fn dir(&self, dir: &str, flags: fio::Flags) -> fio::DirectoryProxy {
389        let (dev, server) = create_proxy::<fio::DirectoryMarker>();
390        let flags = flags | fio::Flags::PROTOCOL_DIRECTORY;
391        self.realm
392            .root
393            .get_exposed_dir()
394            .open(dir, flags, &fio::Options::default(), server.into_channel())
395            .expect("open failed");
396        dev
397    }
398
399    pub async fn check_fs_type(&self, dir: &str, fs_type: u32) {
400        let (status, info) = with_timeout(
401            self.dir(dir, fio::Flags::empty()).query_filesystem(),
402            format!("check_fs_type({dir})"),
403        )
404        .await
405        .expect("query failed");
406        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
407        assert!(info.is_some());
408        let info_type = info.unwrap().fs_type;
409        assert_eq!(info_type, fs_type, "{:#08x} != {:#08x}", info_type, fs_type);
410    }
411
412    pub async fn check_test_blob(&self) {
413        with_timeout(
414            async {
415                let expected_blob_hash = disk_builder::test_blob_hash();
416                let reader = connect_to_protocol_at_dir_root::<BlobReaderMarker>(
417                    self.realm.root.get_exposed_dir(),
418                )
419                .expect("failed to connect to the BlobReader");
420                let _vmo = reader
421                    .get_vmo(&expected_blob_hash.into())
422                    .await
423                    .expect("blob get_vmo fidl error")
424                    .unwrap_or_else(|e| match zx::Status::from_raw(e) {
425                        zx::Status::NOT_FOUND => panic!("Test blob not found - blobfs lost data!"),
426                        s => panic!("Error while opening test blob vmo: {s}"),
427                    });
428            },
429            "check_test_blob",
430        )
431        .await
432    }
433
434    /// Check for the existence of a well-known set of test files in the data volume. These files
435    /// are placed by the disk builder if it formats the filesystem beforehand.
436    pub async fn check_test_data_file(&self) {
437        with_timeout(
438            async {
439                let (file, server) = create_proxy::<fio::NodeMarker>();
440                self.dir("data", fio::PERM_READABLE)
441                    .open(
442                        ".testdata",
443                        fio::PERM_READABLE,
444                        &fio::Options::default(),
445                        server.into_channel(),
446                    )
447                    .expect("open failed");
448                file.get_attributes(fio::NodeAttributesQuery::empty())
449                    .await
450                    .expect("Fidl transport error on get_attributes()")
451                    .expect("get_attr failed - data was probably deleted!");
452
453                let data = self.dir("data", fio::PERM_READABLE);
454                fuchsia_fs::directory::open_file(&data, ".testdata", fio::PERM_READABLE)
455                    .await
456                    .unwrap();
457
458                fuchsia_fs::directory::open_directory(&data, "ssh", fio::PERM_READABLE)
459                    .await
460                    .unwrap();
461                fuchsia_fs::directory::open_directory(&data, "ssh/config", fio::PERM_READABLE)
462                    .await
463                    .unwrap();
464                fuchsia_fs::directory::open_directory(&data, "problems", fio::PERM_READABLE)
465                    .await
466                    .unwrap();
467
468                let authorized_keys = fuchsia_fs::directory::open_file(
469                    &data,
470                    "ssh/authorized_keys",
471                    fio::PERM_READABLE,
472                )
473                .await
474                .unwrap();
475                assert_eq!(
476                    &fuchsia_fs::file::read_to_string(&authorized_keys).await.unwrap(),
477                    "public key!"
478                );
479            },
480            "check_test_data_file",
481        )
482        .await
483    }
484
485    /// Checks for the absence of the .testdata marker file, indicating the data filesystem was
486    /// reformatted.
487    pub async fn check_test_data_file_absent(&self) {
488        let err = with_timeout(
489            fuchsia_fs::directory::open_file(
490                &self.dir("data", fio::PERM_READABLE),
491                ".testdata",
492                fio::PERM_READABLE,
493            ),
494            "check_test_data_file_absent",
495        )
496        .await
497        .expect_err("open_file failed");
498        assert!(err.is_not_found_error());
499    }
500
501    pub async fn add_main_disk(&mut self, disk: Disk) {
502        assert!(self.main_disk.is_none());
503        let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
504        let vmo_clone =
505            vmo.create_child(zx::VmoChildOptions::SLICE, 0, vmo.get_size().unwrap()).unwrap();
506
507        self.add_ramdisk(vmo, type_guid).await;
508        self.main_disk = Some(Disk::Prebuilt(vmo_clone, type_guid));
509    }
510
511    pub async fn add_disk(&mut self, disk: Disk) {
512        let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
513        self.add_ramdisk(vmo, type_guid).await;
514    }
515
516    async fn add_ramdisk(&mut self, vmo: zx::Vmo, type_guid: Option<[u8; 16]>) {
517        let mut ramdisk_builder = RamdiskClientBuilder::new_with_vmo(vmo, Some(512))
518            .publish()
519            .ramdisk_service(self.dir(framdisk::ServiceMarker::SERVICE_NAME, fio::Flags::empty()));
520        if let Some(guid) = type_guid {
521            ramdisk_builder = ramdisk_builder.guid(guid);
522        }
523        let mut ramdisk = pin!(ramdisk_builder.build().fuse());
524
525        let ramdisk = futures::select_biased!(
526            res = ramdisk => res,
527            _ = fasync::Timer::new(Duration::from_secs(120))
528                .fuse() => panic!("Timed out waiting for RamdiskClient"),
529        )
530        .unwrap();
531        self.ramdisks.push(ramdisk);
532    }
533
534    pub fn connect_to_crypt(&self) -> CryptProxy {
535        self.realm
536            .root
537            .connect_to_protocol_at_exposed_dir()
538            .expect("connect_to_protocol_at_exposed_dir failed for the Crypt protocol")
539    }
540
541    pub async fn setup_starnix_crypt(&self) -> (CryptProxy, CryptManagementProxy) {
542        let crypt_management: CryptManagementProxy =
543            self.realm.root.connect_to_protocol_at_exposed_dir().expect(
544                "connect_to_protocol_at_exposed_dir failed for the CryptManagement protocol",
545            );
546        let crypt = self
547            .realm
548            .root
549            .connect_to_protocol_at_exposed_dir()
550            .expect("connect_to_protocol_at_exposed_dir failed for the Crypt protocol");
551        let key = vec![0xABu8; 32];
552        crypt_management
553            .add_wrapping_key(&u128::to_le_bytes(0), key.as_slice())
554            .await
555            .expect("fidl transport error")
556            .expect("add wrapping key failed");
557        crypt_management
558            .add_wrapping_key(&u128::to_le_bytes(1), key.as_slice())
559            .await
560            .expect("fidl transport error")
561            .expect("add wrapping key failed");
562        crypt_management
563            .set_active_key(KeyPurpose::Data, &u128::to_le_bytes(0))
564            .await
565            .expect("fidl transport error")
566            .expect("set metadata key failed");
567        crypt_management
568            .set_active_key(KeyPurpose::Metadata, &u128::to_le_bytes(1))
569            .await
570            .expect("fidl transport error")
571            .expect("set metadata key failed");
572        (crypt, crypt_management)
573    }
574
575    /// This must be called if any crash reports are expected, since spurious reports will cause a
576    /// failure in TestFixture::tear_down.
577    pub async fn wait_for_crash_reports(
578        &mut self,
579        count: usize,
580        expected_program: &'_ str,
581        expected_signature: &'_ str,
582    ) {
583        log::info!("Waiting for {count} crash reports");
584        for _ in 0..count {
585            let report = self.crash_reports.next().await.expect("Sender closed");
586            assert_eq!(report.program_name.as_deref(), Some(expected_program));
587            assert_eq!(report.crash_signature.as_deref(), Some(expected_signature));
588        }
589        if count > 0 {
590            let selector =
591                format!("realm_builder\\:{}/test-fshost:root", self.realm.root.child_name());
592            log::info!("Checking inspect for corruption event, selector={selector}");
593            let tree = ArchiveReader::inspect()
594                .add_selector(selector)
595                .snapshot()
596                .await
597                .unwrap()
598                .into_iter()
599                .next()
600                .and_then(|result| result.payload)
601                .expect("expected one inspect hierarchy");
602
603            let format = || expected_program.to_string();
604
605            assert_data_tree!(tree, root: contains {
606                corruption_events: contains {
607                    format() => 1u64,
608                }
609            });
610        }
611    }
612
613    // Check that the system partition table contains partitions with labels found in `expected`.
614    pub async fn check_system_partitions(&self, mut expected: Vec<&str>) {
615        with_timeout(
616            async {
617                let partitions =
618                    self.dir(fpartitions::PartitionServiceMarker::SERVICE_NAME, fio::PERM_READABLE);
619                let entries = fuchsia_fs::directory::readdir(&partitions)
620                    .await
621                    .expect("Failed to read partitions");
622
623                assert_eq!(entries.len(), expected.len());
624
625                let mut found_partition_labels = Vec::new();
626                for entry in entries {
627                    let endpoint_name = format!("{}/volume", entry.name);
628                    let volume = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(
629                        &partitions,
630                        &endpoint_name,
631                    )
632                    .expect("failed to connect to named protocol at dir root");
633                    let (raw_status, label) =
634                        volume.get_name().await.expect("failed to call get_name");
635                    zx::Status::ok(raw_status).expect("get_name status failed");
636                    found_partition_labels
637                        .push(label.expect("partition label expected to be some value"));
638                }
639                found_partition_labels.sort();
640                expected.sort();
641                assert_eq!(found_partition_labels, expected);
642            },
643            "check_system_partitions",
644        )
645        .await
646    }
647}