Skip to main content

fshost_test_fixture/
lib.rs

1// Copyright 2022 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use assert_matches::assert_matches;
6use diagnostics_assertions::assert_data_tree;
7use diagnostics_reader::ArchiveReader;
8use disk_builder::Disk;
9use fake_keymint::FakeKeymint;
10use fidl::endpoints::{ServiceMarker as _, create_proxy};
11use fidl_fuchsia_boot as fboot;
12use fidl_fuchsia_driver_test as fdt;
13use fidl_fuchsia_feedback as ffeedback;
14use fidl_fuchsia_fshost_fxfsprovisioner as ffxfsprovisioner;
15use fidl_fuchsia_fxfs::{BlobReaderMarker, CryptManagementProxy, CryptProxy, KeyPurpose};
16use fidl_fuchsia_hardware_block_volume as fvolume;
17use fidl_fuchsia_hardware_ramdisk as framdisk;
18use fidl_fuchsia_io as fio;
19use fidl_fuchsia_security_keymint as fkeymint;
20use fidl_fuchsia_storage_block as fblock;
21use fidl_fuchsia_storage_partitions as fpartitions;
22use fuchsia_async::{self as fasync, TimeoutExt as _};
23use fuchsia_component::client::{
24    connect_to_named_protocol_at_dir_root, connect_to_protocol_at_dir_root,
25};
26use fuchsia_component_test::{Capability, ChildOptions, RealmBuilder, RealmInstance, Ref, Route};
27use fuchsia_driver_test::{DriverTestRealmBuilder, DriverTestRealmInstance};
28use futures::channel::mpsc;
29use futures::{FutureExt as _, StreamExt as _};
30use ramdevice_client::{RamdiskClient, RamdiskClientBuilder};
31use std::pin::pin;
32use std::time::Duration;
33
34pub mod disk_builder;
35mod mocks;
36
37pub use disk_builder::write_blob;
38pub use fshost_assembly_config::{BlockDeviceConfig, BlockDeviceIdentifiers, BlockDeviceParent};
39
40pub const VFS_TYPE_BLOBFS: u32 = 0x9e694d21;
41pub const VFS_TYPE_MINFS: u32 = 0x6e694d21;
42pub const VFS_TYPE_MEMFS: u32 = 0x3e694d21;
43pub const VFS_TYPE_FXFS: u32 = 0x73667866;
44pub const VFS_TYPE_F2FS: u32 = 0xfe694d21;
45pub const STARNIX_VOLUME_NAME: &str = "starnix_volume";
46
47const DEFAULT_TIMEOUT: Duration = Duration::from_secs(60);
48
49async fn with_timeout<F: std::future::Future>(fut: F, name: impl Into<String>) -> F::Output {
50    let name = name.into();
51    fut.on_timeout(DEFAULT_TIMEOUT, move || panic!("{name} timed out after {DEFAULT_TIMEOUT:?}"))
52        .await
53}
54
55/// fshost will expose an alias of its fuchsia.hardware.block.volume.Service directory at this path.
56/// This allows tests to disambiguate service instances from the driver test realm, which are
57/// automatically aggregated.
58pub const FSHOST_VOLUME_SERVICE_DIR_NAME: &str = "VolumeService";
59
60pub fn round_down<
61    T: Into<U>,
62    U: Copy + std::ops::Rem<U, Output = U> + std::ops::Sub<U, Output = U>,
63>(
64    offset: U,
65    block_size: T,
66) -> U {
67    let block_size = block_size.into();
68    offset - offset % block_size
69}
70
71pub struct TestFixtureBuilder {
72    no_fuchsia_boot: bool,
73    disk: Option<Disk>,
74    extra_disks: Vec<Disk>,
75    fshost: fshost_testing::FshostBuilder,
76    zbi_ramdisk: Option<disk_builder::DiskBuilder>,
77    storage_host: bool,
78    force_fxfs_provisioner_failure: bool,
79    keymint: std::sync::Arc<FakeKeymint>,
80    crypt_policy: crypt_policy::Policy,
81}
82
83impl TestFixtureBuilder {
84    pub fn new(fshost_component_name: &'static str, storage_host: bool) -> Self {
85        Self {
86            no_fuchsia_boot: false,
87            disk: None,
88            extra_disks: Vec::new(),
89            fshost: fshost_testing::FshostBuilder::new(fshost_component_name),
90            zbi_ramdisk: None,
91            storage_host,
92            force_fxfs_provisioner_failure: false,
93            keymint: std::sync::Arc::new(FakeKeymint::default()),
94            crypt_policy: crypt_policy::Policy::Null,
95        }
96    }
97
98    pub fn fshost(&mut self) -> &mut fshost_testing::FshostBuilder {
99        &mut self.fshost
100    }
101
102    pub fn keymint(&mut self) -> std::sync::Arc<FakeKeymint> {
103        self.keymint.clone()
104    }
105
106    pub fn with_keymint_instance(mut self, keymint: std::sync::Arc<FakeKeymint>) -> Self {
107        self.keymint = keymint.clone();
108        if let Some(Disk::Builder(ref mut disk_builder)) = self.disk {
109            disk_builder.with_keymint_instance(keymint.clone());
110        }
111        for disk in &mut self.extra_disks {
112            if let Disk::Builder(disk_builder) = disk {
113                disk_builder.with_keymint_instance(keymint.clone());
114            }
115        }
116        self
117    }
118
119    pub fn with_disk(&mut self) -> &mut disk_builder::DiskBuilder {
120        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::new()));
121        self.disk
122            .as_mut()
123            .unwrap()
124            .builder()
125            .with_crypt_policy(self.crypt_policy)
126            .with_keymint_instance(self.keymint.clone());
127        self.disk.as_mut().unwrap().builder()
128    }
129
130    pub fn with_extra_disk(&mut self) -> &mut disk_builder::DiskBuilder {
131        self.extra_disks.push(Disk::Builder(disk_builder::DiskBuilder::new()));
132        self.extra_disks
133            .last_mut()
134            .unwrap()
135            .builder()
136            .with_crypt_policy(self.crypt_policy)
137            .with_keymint_instance(self.keymint.clone());
138        self.extra_disks.last_mut().unwrap().builder()
139    }
140
141    pub fn with_uninitialized_disk(mut self) -> Self {
142        self.disk = Some(Disk::Builder(disk_builder::DiskBuilder::uninitialized()));
143        self
144    }
145
146    pub fn with_disk_from(mut self, disk: Disk) -> Self {
147        self.disk = Some(disk);
148        self
149    }
150
151    pub fn with_zbi_ramdisk(&mut self) -> &mut disk_builder::DiskBuilder {
152        self.zbi_ramdisk = Some(disk_builder::DiskBuilder::new());
153        self.zbi_ramdisk.as_mut().unwrap()
154    }
155
156    pub fn no_fuchsia_boot(mut self) -> Self {
157        self.no_fuchsia_boot = true;
158        self
159    }
160
161    pub fn with_device_config(mut self, device_config: Vec<BlockDeviceConfig>) -> Self {
162        self.fshost.set_device_config(device_config);
163        self
164    }
165
166    pub fn with_crypt_policy(mut self, policy: crypt_policy::Policy) -> Self {
167        self.fshost.set_crypt_policy(policy);
168        self.crypt_policy = policy;
169        if let Some(Disk::Builder(ref mut disk_builder)) = self.disk {
170            disk_builder.with_crypt_policy(policy);
171        }
172        for disk in &mut self.extra_disks {
173            if let Disk::Builder(disk_builder) = disk {
174                disk_builder.with_crypt_policy(policy);
175            }
176        }
177        self
178    }
179
180    pub fn force_fxfs_provisioner_failure(mut self) -> Self {
181        self.force_fxfs_provisioner_failure = true;
182        self
183    }
184
185    pub async fn build(self) -> TestFixture {
186        let builder = RealmBuilder::new().await.unwrap();
187        let fshost = self.fshost.build(&builder).await;
188        // Create a second alias which routes fshost's volume Service capability to the parent.
189        builder
190            .add_route(
191                Route::new()
192                    .capability(
193                        Capability::service::<fvolume::ServiceMarker>()
194                            .as_(FSHOST_VOLUME_SERVICE_DIR_NAME),
195                    )
196                    .from(&fshost)
197                    .to(Ref::parent()),
198            )
199            .await
200            .unwrap();
201
202        let maybe_zbi_vmo = match self.zbi_ramdisk {
203            Some(disk_builder) => Some(disk_builder.build_as_zbi_ramdisk().await),
204            None => None,
205        };
206        let (tx, crash_reports) = mpsc::channel(32);
207        let mocks = mocks::new_mocks(
208            maybe_zbi_vmo,
209            tx,
210            self.force_fxfs_provisioner_failure,
211            self.keymint.clone(),
212        );
213
214        let mocks = builder
215            .add_local_child("mocks", move |h| mocks(h).boxed(), ChildOptions::new())
216            .await
217            .unwrap();
218        builder
219            .add_route(
220                Route::new()
221                    .capability(Capability::protocol::<fkeymint::SealingKeysMarker>())
222                    .capability(Capability::protocol::<fkeymint::AdminMarker>())
223                    .from(&mocks)
224                    .to(Ref::parent()),
225            )
226            .await
227            .unwrap();
228        builder
229            .add_route(
230                Route::new()
231                    .capability(Capability::protocol::<ffeedback::CrashReporterMarker>())
232                    .capability(Capability::protocol::<ffxfsprovisioner::FxfsProvisionerMarker>())
233                    .capability(Capability::protocol::<fkeymint::SealingKeysMarker>())
234                    .capability(Capability::protocol::<fkeymint::AdminMarker>())
235                    .from(&mocks)
236                    .to(&fshost),
237            )
238            .await
239            .unwrap();
240        if !self.no_fuchsia_boot {
241            builder
242                .add_route(
243                    Route::new()
244                        .capability(Capability::protocol::<fboot::ArgumentsMarker>())
245                        .capability(Capability::protocol::<fboot::ItemsMarker>())
246                        .from(&mocks)
247                        .to(&fshost),
248                )
249                .await
250                .unwrap();
251        }
252
253        builder
254            .add_route(
255                Route::new()
256                    .capability(Capability::dictionary("diagnostics"))
257                    .from(Ref::parent())
258                    .to(&fshost),
259            )
260            .await
261            .unwrap();
262
263        let dtr_exposes = vec![
264            fidl_fuchsia_component_test::Capability::Service(
265                fidl_fuchsia_component_test::Service {
266                    name: Some("fuchsia.hardware.ramdisk.Service".to_owned()),
267                    ..Default::default()
268                },
269            ),
270            fidl_fuchsia_component_test::Capability::Service(
271                fidl_fuchsia_component_test::Service {
272                    name: Some("fuchsia.hardware.block.volume.Service".to_owned()),
273                    ..Default::default()
274                },
275            ),
276        ];
277        builder.driver_test_realm_setup().await.unwrap();
278        builder.driver_test_realm_add_dtr_exposes(&dtr_exposes).await.unwrap();
279        builder
280            .add_route(
281                Route::new()
282                    .capability(Capability::directory("dev-topological").rights(fio::R_STAR_DIR))
283                    .capability(Capability::service::<fvolume::ServiceMarker>())
284                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
285                    .to(&fshost),
286            )
287            .await
288            .unwrap();
289        builder
290            .add_route(
291                Route::new()
292                    .capability(
293                        Capability::directory("dev-class")
294                            .rights(fio::R_STAR_DIR)
295                            .subdir("block")
296                            .as_("dev-class-block"),
297                    )
298                    .from(Ref::child(fuchsia_driver_test::COMPONENT_NAME))
299                    .to(Ref::parent()),
300            )
301            .await
302            .unwrap();
303
304        let mut fixture = TestFixture {
305            realm: builder.build().await.unwrap(),
306            ramdisks: Vec::new(),
307            main_disk: None,
308            crash_reports,
309            torn_down: TornDown(false),
310            storage_host: self.storage_host,
311        };
312
313        log::info!(
314            realm_name:? = fixture.realm.root.child_name();
315            "built new test realm",
316        );
317
318        fixture
319            .realm
320            .driver_test_realm_start(fdt::RealmArgs {
321                root_driver: Some("fuchsia-boot:///platform-bus#meta/platform-bus.cm".to_owned()),
322                dtr_exposes: Some(dtr_exposes),
323                software_devices: Some(vec![
324                    fdt::SoftwareDevice {
325                        device_name: "ram-disk".to_string(),
326                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_DISK,
327                    },
328                    fdt::SoftwareDevice {
329                        device_name: "ram-nand".to_string(),
330                        device_id: bind_fuchsia_platform::BIND_PLATFORM_DEV_DID_RAM_NAND,
331                    },
332                ]),
333                ..Default::default()
334            })
335            .await
336            .unwrap();
337
338        // The order of adding disks matters here, unfortunately. fshost should not change behavior
339        // based on the order disks appear, but because we take the first available that matches
340        // whatever relevant criteria, it's useful to test that matchers don't get clogged up by
341        // previous disks.
342        // TODO(https://fxbug.dev/380353856): This type of testing should be irrelevant once the
343        // block devices are determined by configuration options instead of heuristically.
344        for disk in self.extra_disks.into_iter() {
345            fixture.add_disk(disk).await;
346        }
347        if let Some(disk) = self.disk {
348            fixture.add_main_disk(disk).await;
349        }
350
351        fixture
352    }
353}
354
355/// Create a separate struct that does the drop-assert because fixture.tear_down can't call
356/// realm.destroy if it has the drop impl itself.
357struct TornDown(bool);
358
359impl Drop for TornDown {
360    fn drop(&mut self) {
361        // Because tear_down is async, it needs to be called by the test in an async context. It
362        // checks some properties so for correctness it must be called.
363        assert!(self.0, "fixture.tear_down() must be called");
364    }
365}
366
367pub struct TestFixture {
368    pub realm: RealmInstance,
369    pub ramdisks: Vec<RamdiskClient>,
370    pub main_disk: Option<Disk>,
371    pub crash_reports: mpsc::Receiver<ffeedback::CrashReport>,
372    torn_down: TornDown,
373    storage_host: bool,
374}
375
376impl TestFixture {
377    pub async fn tear_down(mut self) -> Option<Disk> {
378        log::info!(realm_name:? = self.realm.root.child_name(); "tearing down");
379        let disk = self.main_disk.take();
380        // Check the crash reports before destroying the realm because tearing down the realm can
381        // cause mounting errors that trigger a crash report.
382        assert_matches!(self.crash_reports.try_next(), Ok(None) | Err(_));
383        self.realm.destroy().await.unwrap();
384        self.torn_down.0 = true;
385        disk
386    }
387
388    pub fn exposed_dir(&self) -> &fio::DirectoryProxy {
389        self.realm.root.get_exposed_dir()
390    }
391
392    pub fn dir(&self, dir: &str, flags: fio::Flags) -> fio::DirectoryProxy {
393        let (dev, server) = create_proxy::<fio::DirectoryMarker>();
394        let flags = flags | fio::Flags::PROTOCOL_DIRECTORY;
395        self.realm
396            .root
397            .get_exposed_dir()
398            .open(dir, flags, &fio::Options::default(), server.into_channel())
399            .expect("open failed");
400        dev
401    }
402
403    pub async fn check_fs_type(&self, dir: &str, fs_type: u32) {
404        let (status, info) = with_timeout(
405            self.dir(dir, fio::Flags::empty()).query_filesystem(),
406            format!("check_fs_type({dir})"),
407        )
408        .await
409        .expect("query failed");
410        assert_eq!(zx::Status::from_raw(status), zx::Status::OK);
411        assert!(info.is_some());
412        let info_type = info.unwrap().fs_type;
413        assert_eq!(info_type, fs_type, "{:#08x} != {:#08x}", info_type, fs_type);
414    }
415
416    pub async fn check_test_blob(&self) {
417        with_timeout(
418            async {
419                let expected_blob_hash = disk_builder::test_blob_hash();
420                let reader = connect_to_protocol_at_dir_root::<BlobReaderMarker>(
421                    self.realm.root.get_exposed_dir(),
422                )
423                .expect("failed to connect to the BlobReader");
424                let _vmo = reader
425                    .get_vmo(&expected_blob_hash.into())
426                    .await
427                    .expect("blob get_vmo fidl error")
428                    .unwrap_or_else(|e| match zx::Status::from_raw(e) {
429                        zx::Status::NOT_FOUND => panic!("Test blob not found - blobfs lost data!"),
430                        s => panic!("Error while opening test blob vmo: {s}"),
431                    });
432            },
433            "check_test_blob",
434        )
435        .await
436    }
437
438    /// Check for the existence of a well-known set of test files in the data volume. These files
439    /// are placed by the disk builder if it formats the filesystem beforehand.
440    pub async fn check_test_data_file(&self) {
441        with_timeout(
442            async {
443                let (file, server) = create_proxy::<fio::NodeMarker>();
444                self.dir("data", fio::PERM_READABLE)
445                    .open(
446                        ".testdata",
447                        fio::PERM_READABLE,
448                        &fio::Options::default(),
449                        server.into_channel(),
450                    )
451                    .expect("open failed");
452                file.get_attributes(fio::NodeAttributesQuery::empty())
453                    .await
454                    .expect("Fidl transport error on get_attributes()")
455                    .expect("get_attr failed - data was probably deleted!");
456
457                let data = self.dir("data", fio::PERM_READABLE);
458                fuchsia_fs::directory::open_file(&data, ".testdata", fio::PERM_READABLE)
459                    .await
460                    .unwrap();
461
462                fuchsia_fs::directory::open_directory(&data, "ssh", fio::PERM_READABLE)
463                    .await
464                    .unwrap();
465                fuchsia_fs::directory::open_directory(&data, "ssh/config", fio::PERM_READABLE)
466                    .await
467                    .unwrap();
468                fuchsia_fs::directory::open_directory(&data, "problems", fio::PERM_READABLE)
469                    .await
470                    .unwrap();
471
472                let authorized_keys = fuchsia_fs::directory::open_file(
473                    &data,
474                    "ssh/authorized_keys",
475                    fio::PERM_READABLE,
476                )
477                .await
478                .unwrap();
479                assert_eq!(
480                    &fuchsia_fs::file::read_to_string(&authorized_keys).await.unwrap(),
481                    "public key!"
482                );
483            },
484            "check_test_data_file",
485        )
486        .await
487    }
488
489    /// Checks for the absence of the .testdata marker file, indicating the data filesystem was
490    /// reformatted.
491    pub async fn check_test_data_file_absent(&self) {
492        let err = with_timeout(
493            fuchsia_fs::directory::open_file(
494                &self.dir("data", fio::PERM_READABLE),
495                ".testdata",
496                fio::PERM_READABLE,
497            ),
498            "check_test_data_file_absent",
499        )
500        .await
501        .expect_err("open_file failed");
502        assert!(err.is_not_found_error());
503    }
504
505    pub async fn add_main_disk(&mut self, disk: Disk) {
506        assert!(self.main_disk.is_none());
507        let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
508        let vmo_clone =
509            vmo.create_child(zx::VmoChildOptions::SLICE, 0, vmo.get_size().unwrap()).unwrap();
510
511        self.add_ramdisk(vmo, type_guid).await;
512        self.main_disk = Some(Disk::Prebuilt(vmo_clone, type_guid));
513    }
514
515    pub async fn add_disk(&mut self, disk: Disk) {
516        let (vmo, type_guid) = disk.into_vmo_and_type_guid().await;
517        self.add_ramdisk(vmo, type_guid).await;
518    }
519
520    async fn add_ramdisk(&mut self, vmo: zx::Vmo, type_guid: Option<[u8; 16]>) {
521        let mut ramdisk_builder = if self.storage_host {
522            RamdiskClientBuilder::new_with_vmo(vmo, Some(512)).use_v2().publish().ramdisk_service(
523                self.dir(framdisk::ServiceMarker::SERVICE_NAME, fio::Flags::empty()),
524            )
525        } else {
526            RamdiskClientBuilder::new_with_vmo(vmo, Some(512))
527                .dev_root(self.dir("dev-topological", fio::Flags::empty()))
528        };
529        if let Some(guid) = type_guid {
530            ramdisk_builder = ramdisk_builder.guid(guid);
531        }
532        let mut ramdisk = pin!(ramdisk_builder.build().fuse());
533
534        let ramdisk = futures::select_biased!(
535            res = ramdisk => res,
536            _ = fasync::Timer::new(Duration::from_secs(120))
537                .fuse() => panic!("Timed out waiting for RamdiskClient"),
538        )
539        .unwrap();
540        self.ramdisks.push(ramdisk);
541    }
542
543    pub fn connect_to_crypt(&self) -> CryptProxy {
544        self.realm
545            .root
546            .connect_to_protocol_at_exposed_dir()
547            .expect("connect_to_protocol_at_exposed_dir failed for the Crypt protocol")
548    }
549
550    pub async fn setup_starnix_crypt(&self) -> (CryptProxy, CryptManagementProxy) {
551        let crypt_management: CryptManagementProxy =
552            self.realm.root.connect_to_protocol_at_exposed_dir().expect(
553                "connect_to_protocol_at_exposed_dir failed for the CryptManagement protocol",
554            );
555        let crypt = self
556            .realm
557            .root
558            .connect_to_protocol_at_exposed_dir()
559            .expect("connect_to_protocol_at_exposed_dir failed for the Crypt protocol");
560        let key = vec![0xABu8; 32];
561        crypt_management
562            .add_wrapping_key(&u128::to_le_bytes(0), key.as_slice())
563            .await
564            .expect("fidl transport error")
565            .expect("add wrapping key failed");
566        crypt_management
567            .add_wrapping_key(&u128::to_le_bytes(1), key.as_slice())
568            .await
569            .expect("fidl transport error")
570            .expect("add wrapping key failed");
571        crypt_management
572            .set_active_key(KeyPurpose::Data, &u128::to_le_bytes(0))
573            .await
574            .expect("fidl transport error")
575            .expect("set metadata key failed");
576        crypt_management
577            .set_active_key(KeyPurpose::Metadata, &u128::to_le_bytes(1))
578            .await
579            .expect("fidl transport error")
580            .expect("set metadata key failed");
581        (crypt, crypt_management)
582    }
583
584    /// This must be called if any crash reports are expected, since spurious reports will cause a
585    /// failure in TestFixture::tear_down.
586    pub async fn wait_for_crash_reports(
587        &mut self,
588        count: usize,
589        expected_program: &'_ str,
590        expected_signature: &'_ str,
591    ) {
592        log::info!("Waiting for {count} crash reports");
593        for _ in 0..count {
594            let report = self.crash_reports.next().await.expect("Sender closed");
595            assert_eq!(report.program_name.as_deref(), Some(expected_program));
596            assert_eq!(report.crash_signature.as_deref(), Some(expected_signature));
597        }
598        if count > 0 {
599            let selector =
600                format!("realm_builder\\:{}/test-fshost:root", self.realm.root.child_name());
601            log::info!("Checking inspect for corruption event, selector={selector}");
602            let tree = ArchiveReader::inspect()
603                .add_selector(selector)
604                .snapshot()
605                .await
606                .unwrap()
607                .into_iter()
608                .next()
609                .and_then(|result| result.payload)
610                .expect("expected one inspect hierarchy");
611
612            let format = || expected_program.to_string();
613
614            assert_data_tree!(tree, root: contains {
615                corruption_events: contains {
616                    format() => 1u64,
617                }
618            });
619        }
620    }
621
622    // Check that the system partition table contains partitions with labels found in `expected`.
623    pub async fn check_system_partitions(&self, mut expected: Vec<&str>) {
624        with_timeout(
625            async {
626                let partitions =
627                    self.dir(fpartitions::PartitionServiceMarker::SERVICE_NAME, fio::PERM_READABLE);
628                let entries = fuchsia_fs::directory::readdir(&partitions)
629                    .await
630                    .expect("Failed to read partitions");
631
632                assert_eq!(entries.len(), expected.len());
633
634                let mut found_partition_labels = Vec::new();
635                for entry in entries {
636                    let endpoint_name = format!("{}/volume", entry.name);
637                    let volume = connect_to_named_protocol_at_dir_root::<fblock::BlockMarker>(
638                        &partitions,
639                        &endpoint_name,
640                    )
641                    .expect("failed to connect to named protocol at dir root");
642                    let (raw_status, label) =
643                        volume.get_name().await.expect("failed to call get_name");
644                    zx::Status::ok(raw_status).expect("get_name status failed");
645                    found_partition_labels
646                        .push(label.expect("partition label expected to be some value"));
647                }
648                found_partition_labels.sort();
649                expected.sort();
650                assert_eq!(found_partition_labels, expected);
651            },
652            "check_system_partitions",
653        )
654        .await
655    }
656}