Skip to main content

fxfs/object_store/journal/
super_block.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! We currently store two of these super-blocks (A/B) starting at offset 0 and 512kB.
6//!
7//! Immediately following the serialized `SuperBlockHeader` structure below is a stream of
8//! serialized operations that are replayed into the root parent `ObjectStore`. Note that the root
9//! parent object store exists entirely in RAM until serialized back into the super-block.
10//!
11//! Super-blocks are updated alternately with a monotonically increasing generation number.
12//! At mount time, the super-block used is the valid `SuperBlock` with the highest generation
13//! number.
14//!
15//! Note the asymmetry here regarding load/save:
16//!   * We load a superblock from a Device/SuperBlockInstance and return a
17//!     (SuperBlockHeader, ObjectStore) pair. The ObjectStore is populated directly from device.
18//!   * We save a superblock from a (SuperBlockHeader, Vec<ObjectItem>) pair to a WriteObjectHandle.
19//!
20//! This asymmetry is required for consistency.
21//! The Vec<ObjectItem> is produced by scanning the root_parent_store. This is the responsibility
22//! of the journal code, which must hold a lock to avoid concurrent updates. However, this lock
23//! must NOT be held when saving the superblock as additional extents may need to be allocated as
24//! part of the save process.
25use crate::errors::FxfsError;
26use crate::filesystem::{ApplyContext, ApplyMode, FxFilesystem, JournalingObject};
27use crate::log::*;
28use crate::lsm_tree::types::LayerIterator;
29use crate::lsm_tree::{LSMTree, LayerSet, Query};
30use crate::metrics;
31use crate::object_handle::ObjectHandle as _;
32use crate::object_store::allocator::Reservation;
33use crate::object_store::data_object_handle::{FileExtent, OverwriteOptions};
34use crate::object_store::journal::bootstrap_handle::BootstrapObjectHandle;
35use crate::object_store::journal::reader::{JournalReader, ReadResult};
36use crate::object_store::journal::writer::JournalWriter;
37use crate::object_store::journal::{BLOCK_SIZE, JournalCheckpoint, JournalCheckpointV32};
38use crate::object_store::object_record::{
39    ObjectItem, ObjectItemV40, ObjectItemV41, ObjectItemV43, ObjectItemV46, ObjectItemV47,
40    ObjectItemV49, ObjectItemV50, ObjectItemV54,
41};
42use crate::object_store::transaction::{AssocObj, Options};
43use crate::object_store::tree::MajorCompactable;
44use crate::object_store::{
45    DataObjectHandle, HandleOptions, HandleOwner, Mutation, ObjectKey, ObjectStore, ObjectValue,
46};
47use crate::range::RangeExt;
48use crate::serialized_types::{
49    EARLIEST_SUPPORTED_VERSION, FIRST_EXTENT_IN_SUPERBLOCK_VERSION, Migrate,
50    SMALL_SUPERBLOCK_VERSION, Version, Versioned, VersionedLatest, migrate_nodefault,
51    migrate_to_version,
52};
53use anyhow::{Context, Error, bail, ensure};
54use fprint::TypeFingerprint;
55use fuchsia_inspect::{Property as _, UintProperty};
56use fuchsia_sync::Mutex;
57use futures::FutureExt;
58use rustc_hash::FxHashMap as HashMap;
59use serde::{Deserialize, Serialize};
60use std::collections::VecDeque;
61use std::fmt;
62use std::io::{Read, Write};
63use std::ops::Range;
64use std::sync::Arc;
65use std::time::SystemTime;
66use storage_device::Device;
67use uuid::Uuid;
68
69// These only exist in the root store.
70const SUPER_BLOCK_A_OBJECT_ID: u64 = 1;
71const SUPER_BLOCK_B_OBJECT_ID: u64 = 2;
72
73/// The superblock is extended in units of `SUPER_BLOCK_CHUNK_SIZE` as required.
74pub const SUPER_BLOCK_CHUNK_SIZE: u64 = 65536;
75
76/// Each superblock is one block but may contain records that extend its own length.
77pub(crate) const MIN_SUPER_BLOCK_SIZE: u64 = 4096;
78/// The first 2 * 512 KiB on the disk used to be reserved for two A/B super-blocks.
79const LEGACY_MIN_SUPER_BLOCK_SIZE: u64 = 524_288;
80
81/// All superblocks start with the magic bytes "FxfsSupr".
82const SUPER_BLOCK_MAGIC: &[u8; 8] = b"FxfsSupr";
83
84/// An enum representing one of our super-block instances.
85///
86/// This provides hard-coded constants related to the location and properties of the super-blocks
87/// that are required to bootstrap the filesystem.
88#[derive(Copy, Clone, Debug)]
89pub enum SuperBlockInstance {
90    A,
91    B,
92}
93
94impl SuperBlockInstance {
95    /// Returns the next [SuperBlockInstance] for use in round-robining writes across super-blocks.
96    pub fn next(&self) -> SuperBlockInstance {
97        match self {
98            SuperBlockInstance::A => SuperBlockInstance::B,
99            SuperBlockInstance::B => SuperBlockInstance::A,
100        }
101    }
102
103    pub fn object_id(&self) -> u64 {
104        match self {
105            SuperBlockInstance::A => SUPER_BLOCK_A_OBJECT_ID,
106            SuperBlockInstance::B => SUPER_BLOCK_B_OBJECT_ID,
107        }
108    }
109
110    /// Returns the byte range where the first extent of the [SuperBlockInstance] is stored.
111    /// (Note that a [SuperBlockInstance] may still have multiple extents.)
112    pub fn first_extent(&self) -> Range<u64> {
113        match self {
114            SuperBlockInstance::A => 0..MIN_SUPER_BLOCK_SIZE,
115            SuperBlockInstance::B => 524288..524288 + MIN_SUPER_BLOCK_SIZE,
116        }
117    }
118
119    /// We used to allocate 512kB to superblocks but this was almost always more than needed.
120    pub fn legacy_first_extent(&self) -> Range<u64> {
121        match self {
122            SuperBlockInstance::A => 0..LEGACY_MIN_SUPER_BLOCK_SIZE,
123            SuperBlockInstance::B => LEGACY_MIN_SUPER_BLOCK_SIZE..2 * LEGACY_MIN_SUPER_BLOCK_SIZE,
124        }
125    }
126}
127
128pub type SuperBlockHeader = SuperBlockHeaderV32;
129
130#[derive(
131    Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize, TypeFingerprint, Versioned,
132)]
133pub struct SuperBlockHeaderV32 {
134    /// The globally unique identifier for the filesystem.
135    pub guid: UuidWrapperV32,
136
137    /// There are two super-blocks which are used in an A/B configuration. The super-block with the
138    /// greatest generation number is what is used when mounting an Fxfs image; the other is
139    /// discarded.
140    pub generation: u64,
141
142    /// The root parent store is an in-memory only store and serves as the backing store for the
143    /// root store and the journal.  The records for this store are serialized into the super-block
144    /// and mutations are also recorded in the journal.
145    pub root_parent_store_object_id: u64,
146
147    /// The root parent needs a graveyard and there's nowhere else to store it other than in the
148    /// super-block.
149    pub root_parent_graveyard_directory_object_id: u64,
150
151    /// The root object store contains all other metadata objects (including the allocator, the
152    /// journal and the super-blocks) and is the parent for all other object stores.
153    pub root_store_object_id: u64,
154
155    /// This is in the root object store.
156    pub allocator_object_id: u64,
157
158    /// This is in the root parent object store.
159    pub journal_object_id: u64,
160
161    /// Start checkpoint for the journal file.
162    pub journal_checkpoint: JournalCheckpointV32,
163
164    /// Offset of the journal file when the super-block was written.  If no entry is present in
165    /// journal_file_offsets for a particular object, then an object might have dependencies on the
166    /// journal from super_block_journal_file_offset onwards, but not earlier.
167    pub super_block_journal_file_offset: u64,
168
169    /// object id -> journal file offset. Indicates where each object has been flushed to.
170    pub journal_file_offsets: HashMap<u64, u64>,
171
172    /// Records the amount of borrowed metadata space as applicable at
173    /// `super_block_journal_file_offset`.
174    pub borrowed_metadata_space: u64,
175
176    /// The earliest version of Fxfs used to create any still-existing struct in the filesystem.
177    ///
178    /// Note: structs in the filesystem may had been made with various different versions of Fxfs.
179    pub earliest_version: Version,
180}
181
182type UuidWrapper = UuidWrapperV32;
183#[derive(Clone, Default, Eq, PartialEq)]
184pub struct UuidWrapperV32(pub Uuid);
185
186impl UuidWrapper {
187    fn new() -> Self {
188        Self(Uuid::new_v4())
189    }
190    #[cfg(test)]
191    fn nil() -> Self {
192        Self(Uuid::nil())
193    }
194}
195
196impl fmt::Debug for UuidWrapper {
197    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
198        // The UUID uniquely identifies the filesystem, so we should redact it so that we don't leak
199        // it in logs.
200        f.write_str("<redacted>")
201    }
202}
203
204impl TypeFingerprint for UuidWrapper {
205    fn fingerprint() -> String {
206        "<[u8;16]>".to_owned()
207    }
208}
209
210// Uuid serializes like a slice, but SuperBlockHeader used to contain [u8; 16] and we want to remain
211// compatible.
212impl Serialize for UuidWrapper {
213    fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
214        self.0.as_bytes().serialize(serializer)
215    }
216}
217
218impl<'de> Deserialize<'de> for UuidWrapper {
219    fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
220        <[u8; 16]>::deserialize(deserializer).map(|bytes| UuidWrapperV32(Uuid::from_bytes(bytes)))
221    }
222}
223
224pub type SuperBlockRecord = SuperBlockRecordV54;
225
226#[allow(clippy::large_enum_variant)]
227#[derive(Debug, Serialize, Deserialize, TypeFingerprint, Versioned)]
228pub enum SuperBlockRecordV54 {
229    // When reading the super-block we know the initial extent, but not subsequent extents, so these
230    // records need to exist to allow us to completely read the super-block.
231    Extent(Range<u64>),
232
233    // Following the super-block header are ObjectItem records that are to be replayed into the root
234    // parent object store.
235    ObjectItem(ObjectItemV54),
236
237    // Marks the end of the full super-block.
238    End,
239}
240
241#[allow(clippy::large_enum_variant)]
242#[derive(Migrate, Serialize, Deserialize, TypeFingerprint, Versioned)]
243#[migrate_to_version(SuperBlockRecordV54)]
244#[migrate_nodefault]
245pub enum SuperBlockRecordV50 {
246    Extent(Range<u64>),
247    ObjectItem(ObjectItemV50),
248    End,
249}
250
251#[allow(clippy::large_enum_variant)]
252#[derive(Migrate, Serialize, Deserialize, TypeFingerprint, Versioned)]
253#[migrate_to_version(SuperBlockRecordV50)]
254pub enum SuperBlockRecordV49 {
255    Extent(Range<u64>),
256    ObjectItem(ObjectItemV49),
257    End,
258}
259
260#[allow(clippy::large_enum_variant)]
261#[derive(Migrate, Serialize, Deserialize, TypeFingerprint, Versioned)]
262#[migrate_to_version(SuperBlockRecordV49)]
263pub enum SuperBlockRecordV47 {
264    Extent(Range<u64>),
265    ObjectItem(ObjectItemV47),
266    End,
267}
268
269#[allow(clippy::large_enum_variant)]
270#[derive(Migrate, Serialize, Deserialize, TypeFingerprint, Versioned)]
271#[migrate_to_version(SuperBlockRecordV47)]
272pub enum SuperBlockRecordV46 {
273    Extent(Range<u64>),
274    ObjectItem(ObjectItemV46),
275    End,
276}
277
278#[allow(clippy::large_enum_variant)]
279#[derive(Migrate, Serialize, Deserialize, TypeFingerprint, Versioned)]
280#[migrate_to_version(SuperBlockRecordV46)]
281pub enum SuperBlockRecordV43 {
282    Extent(Range<u64>),
283    ObjectItem(ObjectItemV43),
284    End,
285}
286
287#[derive(Migrate, Serialize, Deserialize, TypeFingerprint, Versioned)]
288#[migrate_to_version(SuperBlockRecordV43)]
289pub enum SuperBlockRecordV41 {
290    Extent(Range<u64>),
291    ObjectItem(ObjectItemV41),
292    End,
293}
294
295#[derive(Migrate, Serialize, Deserialize, TypeFingerprint, Versioned)]
296#[migrate_to_version(SuperBlockRecordV41)]
297pub enum SuperBlockRecordV40 {
298    Extent(Range<u64>),
299    ObjectItem(ObjectItemV40),
300    End,
301}
302
303struct SuperBlockMetrics {
304    /// Time we wrote the most recent superblock in milliseconds since [`std::time::UNIX_EPOCH`].
305    /// Uses [`std::time::SystemTime`] as the clock source.
306    last_super_block_update_time_ms: UintProperty,
307
308    /// Offset of the most recent superblock we wrote in the journal.
309    last_super_block_offset: UintProperty,
310}
311
312impl Default for SuperBlockMetrics {
313    fn default() -> Self {
314        SuperBlockMetrics {
315            last_super_block_update_time_ms: metrics::detail()
316                .create_uint("last_super_block_update_time_ms", 0),
317            last_super_block_offset: metrics::detail().create_uint("last_super_block_offset", 0),
318        }
319    }
320}
321
322/// Reads an individual (A/B) super-block instance and root_parent_store from device.
323/// Users should use SuperBlockManager::load() instead.
324async fn read(
325    device: Arc<dyn Device>,
326    block_size: u64,
327    instance: SuperBlockInstance,
328) -> Result<(SuperBlockHeader, SuperBlockInstance, ObjectStore), Error> {
329    let (super_block_header, mut reader) = SuperBlockHeader::read_header(device.clone(), instance)
330        .await
331        .context("failed to read superblock")?;
332    let root_parent = ObjectStore::new_root_parent(
333        device,
334        block_size,
335        super_block_header.root_parent_store_object_id,
336    );
337    root_parent.set_graveyard_directory_object_id(
338        super_block_header.root_parent_graveyard_directory_object_id,
339    );
340
341    loop {
342        // TODO: Flatten a layer and move reader here?
343        let (mutation, sequence) = match reader.next_item().await? {
344            // RecordReader should filter out extent records.
345            SuperBlockRecord::Extent(_) => bail!("Unexpected extent record"),
346            SuperBlockRecord::ObjectItem(item) => {
347                (Mutation::insert_object(item.key, item.value), item.sequence)
348            }
349            SuperBlockRecord::End => break,
350        };
351        root_parent.apply_mutation(
352            mutation,
353            &ApplyContext {
354                mode: ApplyMode::Replay,
355                checkpoint: JournalCheckpoint { file_offset: sequence, ..Default::default() },
356            },
357            AssocObj::None,
358        )?;
359    }
360    Ok((super_block_header, instance, root_parent))
361}
362
363/// Write a super-block to the given file handle.
364/// Requires that the filesystem is fully loaded and writable as this may require allocation.
365async fn write<S: HandleOwner>(
366    super_block_header: &SuperBlockHeader,
367    items: LayerSet<ObjectKey, ObjectValue>,
368    handle: DataObjectHandle<S>,
369) -> Result<(), Error> {
370    let object_manager = handle.store().filesystem().object_manager().clone();
371    // TODO(https://fxbug.dev/42177407): Don't use the same code here for Journal and SuperBlock. They
372    // aren't the same things and it is already getting convoluted. e.g of diff stream content:
373    //   Superblock:  (Magic, Ver, Header(Ver), Extent(Ver)*, SuperBlockRecord(Ver)*, ...)
374    //   Journal:     (Ver, JournalRecord(Ver)*, RESET, Ver2, JournalRecord(Ver2)*, ...)
375    // We should abstract away the checksum code and implement these separately.
376
377    let mut writer =
378        SuperBlockWriter::new(handle, super_block_header, object_manager.metadata_reservation())
379            .await?;
380    let mut merger = items.merger();
381    let mut iter = LSMTree::major_iter(merger.query(Query::FullScan).await?).await?;
382    while let Some(item) = iter.get() {
383        writer.write_root_parent_item(item.cloned()).await?;
384        iter.advance().await?;
385    }
386    writer.finalize().await
387}
388
389// Compacts and returns the *old* snapshot of the root_parent store.
390// Must be performed whilst holding a writer lock.
391pub fn compact_root_parent(
392    root_parent_store: &ObjectStore,
393) -> Result<LayerSet<ObjectKey, ObjectValue>, Error> {
394    // The root parent always uses in-memory layers which shouldn't be async, so we can use
395    // `now_or_never`.
396    let tree = root_parent_store.tree();
397    let layer_set = tree.layer_set();
398    {
399        let mut merger = layer_set.merger();
400        let mut iter = LSMTree::major_iter(merger.query(Query::FullScan).now_or_never().unwrap()?)
401            .now_or_never()
402            .unwrap()?;
403        let new_layer = LSMTree::new_mutable_layer();
404        while let Some(item_ref) = iter.get() {
405            new_layer.insert(item_ref.cloned())?;
406            iter.advance().now_or_never().unwrap()?;
407        }
408        tree.set_mutable_layer(new_layer);
409    }
410    Ok(layer_set)
411}
412
413/// This encapsulates the A/B alternating super-block logic.
414/// All super-block load/save operations should be via the methods on this type.
415pub(super) struct SuperBlockManager {
416    pub next_instance: Mutex<SuperBlockInstance>,
417    metrics: SuperBlockMetrics,
418}
419
420impl SuperBlockManager {
421    pub fn new() -> Self {
422        Self { next_instance: Mutex::new(SuperBlockInstance::A), metrics: Default::default() }
423    }
424
425    /// Loads both A/B super-blocks and root_parent ObjectStores and and returns the newest valid
426    /// pair. Also ensures the next superblock updated via |save| will be the other instance.
427    pub async fn load(
428        &self,
429        device: Arc<dyn Device>,
430        block_size: u64,
431    ) -> Result<(SuperBlockHeader, ObjectStore), Error> {
432        // Superblocks consume a minimum of one block. We currently hard code the length of
433        // this first extent. It should work with larger block sizes, but has not been tested.
434        // TODO(https://fxbug.dev/42063349): Consider relaxing this.
435        debug_assert!(MIN_SUPER_BLOCK_SIZE == block_size);
436
437        let (super_block, current_super_block, root_parent) = match futures::join!(
438            read(device.clone(), block_size, SuperBlockInstance::A),
439            read(device.clone(), block_size, SuperBlockInstance::B)
440        ) {
441            (Err(e1), Err(e2)) => {
442                bail!("Failed to load both superblocks due to {:?}\nand\n{:?}", e1, e2)
443            }
444            (Ok(result), Err(_)) => result,
445            (Err(_), Ok(result)) => result,
446            (Ok(result1), Ok(result2)) => {
447                // Break the tie by taking the super-block with the greatest generation.
448                if (result2.0.generation as i64).wrapping_sub(result1.0.generation as i64) > 0 {
449                    result2
450                } else {
451                    result1
452                }
453            }
454        };
455        info!(super_block:?, current_super_block:?; "loaded super-block");
456        *self.next_instance.lock() = current_super_block.next();
457        Ok((super_block, root_parent))
458    }
459
460    /// Writes the provided superblock and root_parent ObjectStore to the device.
461    /// Requires that the filesystem is fully loaded and writable as this may require allocation.
462    pub async fn save(
463        &self,
464        super_block_header: SuperBlockHeader,
465        filesystem: Arc<FxFilesystem>,
466        root_parent: LayerSet<ObjectKey, ObjectValue>,
467    ) -> Result<(), Error> {
468        let root_store = filesystem.root_store();
469        let object_id = {
470            let mut next_instance = self.next_instance.lock();
471            let object_id = next_instance.object_id();
472            *next_instance = next_instance.next();
473            object_id
474        };
475        let handle = ObjectStore::open_object(
476            &root_store,
477            object_id,
478            HandleOptions { skip_journal_checks: true, ..Default::default() },
479            None,
480        )
481        .await
482        .context("Failed to open superblock object")?;
483        write(&super_block_header, root_parent, handle).await?;
484        self.metrics
485            .last_super_block_offset
486            .set(super_block_header.super_block_journal_file_offset);
487        self.metrics.last_super_block_update_time_ms.set(
488            SystemTime::now()
489                .duration_since(SystemTime::UNIX_EPOCH)
490                .unwrap()
491                .as_millis()
492                .try_into()
493                .unwrap_or(0u64),
494        );
495        Ok(())
496    }
497}
498
499impl SuperBlockHeader {
500    /// Creates a new instance with random GUID.
501    pub fn new(
502        generation: u64,
503        root_parent_store_object_id: u64,
504        root_parent_graveyard_directory_object_id: u64,
505        root_store_object_id: u64,
506        allocator_object_id: u64,
507        journal_object_id: u64,
508        journal_checkpoint: JournalCheckpoint,
509        earliest_version: Version,
510    ) -> Self {
511        SuperBlockHeader {
512            guid: UuidWrapper::new(),
513            generation,
514            root_parent_store_object_id,
515            root_parent_graveyard_directory_object_id,
516            root_store_object_id,
517            allocator_object_id,
518            journal_object_id,
519            journal_checkpoint,
520            earliest_version,
521            ..Default::default()
522        }
523    }
524
525    /// Read the super-block header, and return it and a reader that produces the records that are
526    /// to be replayed in to the root parent object store.
527    async fn read_header(
528        device: Arc<dyn Device>,
529        target_super_block: SuperBlockInstance,
530    ) -> Result<(SuperBlockHeader, RecordReader), Error> {
531        let handle = BootstrapObjectHandle::new(
532            target_super_block.object_id(),
533            device,
534            target_super_block.first_extent(),
535        );
536        let mut reader = JournalReader::new(handle, &JournalCheckpoint::default());
537        reader.set_eof_ok();
538
539        reader.fill_buf().await?;
540
541        let mut super_block_header;
542        let super_block_version;
543        reader.consume({
544            let mut cursor = std::io::Cursor::new(reader.buffer());
545            // Validate magic bytes.
546            let mut magic_bytes: [u8; 8] = [0; 8];
547            cursor.read_exact(&mut magic_bytes)?;
548            if magic_bytes.as_slice() != SUPER_BLOCK_MAGIC.as_slice() {
549                bail!("Invalid magic: {:?}", magic_bytes);
550            }
551            (super_block_header, super_block_version) =
552                SuperBlockHeader::deserialize_with_version(&mut cursor)?;
553
554            if super_block_version < EARLIEST_SUPPORTED_VERSION {
555                bail!("Unsupported SuperBlock version: {:?}", super_block_version);
556            }
557
558            // NOTE: It is possible that data was written to the journal with an old version
559            // but no compaction ever happened, so the journal version could potentially be older
560            // than the layer file versions.
561            if super_block_header.journal_checkpoint.version < EARLIEST_SUPPORTED_VERSION {
562                bail!(
563                    "Unsupported JournalCheckpoint version: {:?}",
564                    super_block_header.journal_checkpoint.version
565                );
566            }
567
568            if super_block_header.earliest_version < EARLIEST_SUPPORTED_VERSION {
569                bail!(
570                    "Filesystem contains struct with unsupported version: {:?}",
571                    super_block_header.earliest_version
572                );
573            }
574
575            cursor.position() as usize
576        });
577
578        // From version 45 superblocks describe their own extents (a noop here).
579        // At version 44, superblocks assume a 4kb first extent.
580        // Prior to version 44, superblocks assume a 512kb first extent.
581        if super_block_version < SMALL_SUPERBLOCK_VERSION {
582            reader.handle().push_extent(0, target_super_block.legacy_first_extent());
583        } else if super_block_version < FIRST_EXTENT_IN_SUPERBLOCK_VERSION {
584            reader.handle().push_extent(0, target_super_block.first_extent())
585        }
586
587        // If guid is zeroed (e.g. in a newly imaged system), assign one randomly.
588        if super_block_header.guid.0.is_nil() {
589            super_block_header.guid = UuidWrapper::new();
590        }
591        reader.set_version(super_block_version);
592        Ok((super_block_header, RecordReader { reader }))
593    }
594}
595
596struct SuperBlockWriter<'a, S: HandleOwner> {
597    handle: DataObjectHandle<S>,
598    writer: JournalWriter,
599    existing_extents: VecDeque<FileExtent>,
600    size: u64,
601    reservation: &'a Reservation,
602}
603
604impl<'a, S: HandleOwner> SuperBlockWriter<'a, S> {
605    /// Create a new writer, outputs FXFS magic, version and SuperBlockHeader.
606    /// On success, the writer is ready to accept root parent store mutations.
607    pub async fn new(
608        handle: DataObjectHandle<S>,
609        super_block_header: &SuperBlockHeader,
610        reservation: &'a Reservation,
611    ) -> Result<Self, Error> {
612        let existing_extents = handle.device_extents().await?;
613        let mut this = Self {
614            handle,
615            writer: JournalWriter::new(BLOCK_SIZE as usize, 0),
616            existing_extents: existing_extents.into_iter().collect(),
617            size: 0,
618            reservation,
619        };
620        this.writer.write_all(SUPER_BLOCK_MAGIC)?;
621        super_block_header.serialize_with_version(&mut this.writer)?;
622        Ok(this)
623    }
624
625    /// Internal helper function to pull ranges from a list of existing extents and tack
626    /// corresponding extent records onto the journal.
627    fn try_extend_existing(&mut self, target_size: u64) -> Result<(), Error> {
628        while self.size < target_size {
629            if let Some(extent) = self.existing_extents.pop_front() {
630                ensure!(
631                    extent.logical_range().start == self.size,
632                    "superblock file contains a hole."
633                );
634                self.size += extent.length();
635                SuperBlockRecord::Extent(extent.device_range().clone())
636                    .serialize_into(&mut self.writer)?;
637            } else {
638                break;
639            }
640        }
641        Ok(())
642    }
643
644    pub async fn write_root_parent_item(&mut self, record: ObjectItem) -> Result<(), Error> {
645        let min_len = self.writer.journal_file_checkpoint().file_offset + SUPER_BLOCK_CHUNK_SIZE;
646        self.try_extend_existing(min_len)?;
647        if min_len > self.size {
648            // Need to allocate some more space.
649            let mut transaction = self
650                .handle
651                .new_transaction_with_options(Options {
652                    skip_journal_checks: true,
653                    borrow_metadata_space: true,
654                    allocator_reservation: Some(self.reservation),
655                    ..Default::default()
656                })
657                .await?;
658            let mut file_range = self.size..self.size + SUPER_BLOCK_CHUNK_SIZE;
659            let allocated = self
660                .handle
661                .preallocate_range(&mut transaction, &mut file_range)
662                .await
663                .context("preallocate superblock")?;
664            if file_range.start < file_range.end {
665                bail!("preallocate_range returned too little space");
666            }
667            transaction.commit().await?;
668            for device_range in allocated {
669                self.size += device_range.end - device_range.start;
670                SuperBlockRecord::Extent(device_range).serialize_into(&mut self.writer)?;
671            }
672        }
673        SuperBlockRecord::ObjectItem(record).serialize_into(&mut self.writer)?;
674        Ok(())
675    }
676
677    pub async fn finalize(mut self) -> Result<(), Error> {
678        SuperBlockRecord::End.serialize_into(&mut self.writer)?;
679        self.writer.pad_to_block()?;
680        let mut buf = self.handle.allocate_buffer(self.writer.flushable_bytes()).await;
681        let offset = self.writer.take_flushable(buf.as_mut());
682        self.handle.overwrite(offset, buf.as_mut(), OverwriteOptions::default()).await?;
683        let len =
684            std::cmp::max(MIN_SUPER_BLOCK_SIZE, self.writer.journal_file_checkpoint().file_offset)
685                + SUPER_BLOCK_CHUNK_SIZE;
686        self.handle
687            .truncate_with_options(
688                Options {
689                    skip_journal_checks: true,
690                    borrow_metadata_space: true,
691                    ..Default::default()
692                },
693                len,
694            )
695            .await?;
696        Ok(())
697    }
698}
699
700pub struct RecordReader {
701    reader: JournalReader,
702}
703
704impl RecordReader {
705    pub async fn next_item(&mut self) -> Result<SuperBlockRecord, Error> {
706        loop {
707            match self.reader.deserialize().await? {
708                ReadResult::Reset(_) => bail!("Unexpected reset"),
709                ReadResult::ChecksumMismatch => bail!("Checksum mismatch"),
710                ReadResult::Some(SuperBlockRecord::Extent(extent)) => {
711                    ensure!(extent.is_valid(), FxfsError::Inconsistent);
712                    self.reader.handle().push_extent(0, extent)
713                }
714                ReadResult::Some(x) => return Ok(x),
715            }
716        }
717    }
718}
719
720#[cfg(test)]
721mod tests {
722    use super::{
723        MIN_SUPER_BLOCK_SIZE, SUPER_BLOCK_CHUNK_SIZE, SUPER_BLOCK_MAGIC, SuperBlockHeader,
724        SuperBlockInstance, SuperBlockManager, SuperBlockRecord, UuidWrapper, compact_root_parent,
725        write,
726    };
727    use crate::filesystem::{FxFilesystem, OpenFxFilesystem, SyncOptions};
728    use crate::object_handle::ReadObjectHandle;
729    use crate::object_store::journal::JournalCheckpoint;
730    use crate::object_store::journal::writer::JournalWriter;
731    use crate::object_store::transaction::{Options, lock_keys};
732    use crate::object_store::{
733        DataObjectHandle, HandleOptions, ObjectHandle, ObjectKey, ObjectStore,
734    };
735    use crate::serialized_types::{LATEST_VERSION, Versioned, VersionedLatest};
736    use std::io::Write;
737    use storage_device::DeviceHolder;
738    use storage_device::fake_device::FakeDevice;
739
740    // We require 512kiB each for A/B super-blocks, 256kiB for the journal (128kiB before flush)
741    // and compactions require double the layer size to complete.
742    const TEST_DEVICE_BLOCK_SIZE: u32 = 512;
743    const TEST_DEVICE_BLOCK_COUNT: u64 = 16384;
744
745    async fn filesystem_and_super_block_handles()
746    -> (OpenFxFilesystem, DataObjectHandle<ObjectStore>, DataObjectHandle<ObjectStore>) {
747        let device =
748            DeviceHolder::new(FakeDevice::new(TEST_DEVICE_BLOCK_COUNT, TEST_DEVICE_BLOCK_SIZE));
749        let fs = FxFilesystem::new_empty(device).await.expect("new_empty failed");
750        fs.close().await.expect("Close failed");
751        let device = fs.take_device().await;
752        device.reopen(false);
753        let fs = FxFilesystem::open(device).await.expect("open failed");
754
755        let handle_a = ObjectStore::open_object(
756            &fs.object_manager().root_store(),
757            SuperBlockInstance::A.object_id(),
758            HandleOptions::default(),
759            None,
760        )
761        .await
762        .expect("open superblock failed");
763
764        let handle_b = ObjectStore::open_object(
765            &fs.object_manager().root_store(),
766            SuperBlockInstance::B.object_id(),
767            HandleOptions::default(),
768            None,
769        )
770        .await
771        .expect("open superblock failed");
772        (fs, handle_a, handle_b)
773    }
774
775    #[fuchsia::test]
776    async fn test_read_written_super_block() {
777        let (fs, _handle_a, _handle_b) = filesystem_and_super_block_handles().await;
778        const JOURNAL_OBJECT_ID: u64 = 5;
779
780        // Confirm that the (first) super-block is expected size.
781        // It should be MIN_SUPER_BLOCK_SIZE + SUPER_BLOCK_CHUNK_SIZE.
782        assert_eq!(
783            ObjectStore::open_object(
784                &fs.root_store(),
785                SuperBlockInstance::A.object_id(),
786                HandleOptions::default(),
787                None,
788            )
789            .await
790            .expect("open_object failed")
791            .get_size(),
792            MIN_SUPER_BLOCK_SIZE + SUPER_BLOCK_CHUNK_SIZE
793        );
794
795        // Create a large number of objects in the root parent store so that we test growing
796        // of the super-block file, requiring us to add extents.
797        let mut created_object_ids = vec![];
798        const NUM_ENTRIES: u64 = 16384;
799        for _ in 0..NUM_ENTRIES {
800            let mut transaction = fs
801                .clone()
802                .new_transaction(lock_keys![], Options::default())
803                .await
804                .expect("new_transaction failed");
805            created_object_ids.push(
806                ObjectStore::create_object(
807                    &fs.object_manager().root_parent_store(),
808                    &mut transaction,
809                    HandleOptions::default(),
810                    None,
811                )
812                .await
813                .expect("create_object failed")
814                .object_id(),
815            );
816            transaction.commit().await.expect("commit failed");
817        }
818
819        // Note here that DataObjectHandle caches the size given to it at construction.
820        // If we want to know the true size after a super-block has been written, we need
821        // a new handle.
822        assert!(
823            ObjectStore::open_object(
824                &fs.root_store(),
825                SuperBlockInstance::A.object_id(),
826                HandleOptions::default(),
827                None,
828            )
829            .await
830            .expect("open_object failed")
831            .get_size()
832                > MIN_SUPER_BLOCK_SIZE + SUPER_BLOCK_CHUNK_SIZE
833        );
834
835        let written_super_block_a =
836            SuperBlockHeader::read_header(fs.device(), SuperBlockInstance::A)
837                .await
838                .expect("read failed");
839        let written_super_block_b =
840            SuperBlockHeader::read_header(fs.device(), SuperBlockInstance::B)
841                .await
842                .expect("read failed");
843
844        // Check that a non-zero GUID has been assigned.
845        assert!(!written_super_block_a.0.guid.0.is_nil());
846
847        // Depending on specific offsets is fragile so we just validate the fields we believe
848        // to be stable.
849        assert_eq!(written_super_block_a.0.guid, written_super_block_b.0.guid);
850        assert_eq!(written_super_block_a.0.guid, written_super_block_b.0.guid);
851        assert!(written_super_block_a.0.generation != written_super_block_b.0.generation);
852        assert_eq!(
853            written_super_block_a.0.root_parent_store_object_id,
854            written_super_block_b.0.root_parent_store_object_id
855        );
856        assert_eq!(
857            written_super_block_a.0.root_parent_graveyard_directory_object_id,
858            written_super_block_b.0.root_parent_graveyard_directory_object_id
859        );
860        assert_eq!(written_super_block_a.0.root_store_object_id, fs.root_store().store_object_id());
861        assert_eq!(
862            written_super_block_a.0.root_store_object_id,
863            written_super_block_b.0.root_store_object_id
864        );
865        assert_eq!(written_super_block_a.0.allocator_object_id, fs.allocator().object_id());
866        assert_eq!(
867            written_super_block_a.0.allocator_object_id,
868            written_super_block_b.0.allocator_object_id
869        );
870        assert_eq!(written_super_block_a.0.journal_object_id, JOURNAL_OBJECT_ID);
871        assert_eq!(
872            written_super_block_a.0.journal_object_id,
873            written_super_block_b.0.journal_object_id
874        );
875        assert!(
876            written_super_block_a.0.journal_checkpoint.file_offset
877                != written_super_block_b.0.journal_checkpoint.file_offset
878        );
879        assert!(
880            written_super_block_a.0.super_block_journal_file_offset
881                != written_super_block_b.0.super_block_journal_file_offset
882        );
883        // Nb: We skip journal_file_offsets and borrowed metadata space checks.
884        assert_eq!(written_super_block_a.0.earliest_version, LATEST_VERSION);
885        assert_eq!(
886            written_super_block_a.0.earliest_version,
887            written_super_block_b.0.earliest_version
888        );
889
890        // Nb: Skip comparison of root_parent store contents because we have no way of anticipating
891        // the extent offsets and it is reasonable that a/b differ.
892
893        // Delete all the objects we just made.
894        for object_id in created_object_ids {
895            let mut transaction = fs
896                .clone()
897                .new_transaction(lock_keys![], Options::default())
898                .await
899                .expect("new_transaction failed");
900            fs.object_manager()
901                .root_parent_store()
902                .adjust_refs(&mut transaction, object_id, -1)
903                .await
904                .expect("adjust_refs failed");
905            transaction.commit().await.expect("commit failed");
906            fs.object_manager()
907                .root_parent_store()
908                .tombstone_object(object_id, Options::default())
909                .await
910                .expect("tombstone failed");
911        }
912        // Write some stuff to the root store to ensure we rotate the journal and produce new
913        // super blocks.
914        for _ in 0..NUM_ENTRIES {
915            let mut transaction = fs
916                .clone()
917                .new_transaction(lock_keys![], Options::default())
918                .await
919                .expect("new_transaction failed");
920            ObjectStore::create_object(
921                &fs.object_manager().root_store(),
922                &mut transaction,
923                HandleOptions::default(),
924                None,
925            )
926            .await
927            .expect("create_object failed");
928            transaction.commit().await.expect("commit failed");
929        }
930
931        assert_eq!(
932            ObjectStore::open_object(
933                &fs.root_store(),
934                SuperBlockInstance::A.object_id(),
935                HandleOptions::default(),
936                None,
937            )
938            .await
939            .expect("open_object failed")
940            .get_size(),
941            MIN_SUPER_BLOCK_SIZE + SUPER_BLOCK_CHUNK_SIZE
942        );
943    }
944
945    #[fuchsia::test]
946    async fn test_generation_comparison_wrapping() {
947        let device = DeviceHolder::new(FakeDevice::new(
948            TEST_DEVICE_BLOCK_COUNT,
949            MIN_SUPER_BLOCK_SIZE as u32,
950        ));
951        let fs = FxFilesystem::new_empty(device).await.expect("new_empty failed");
952        fs.close().await.expect("close");
953        let device = fs.take_device().await;
954        device.reopen(false);
955
956        // Helper to write a superblock with a specific generation to a specific instance.
957        // We need to clone the inner Arc to pass to the closure.
958        let device_arc = (*device).clone();
959        let write_sb = |instance: SuperBlockInstance, generation: u64| {
960            let device = device_arc.clone();
961            async move {
962                let mut super_block_header = SuperBlockHeader::new(
963                    1, // generation
964                    3, // root_parent_store_object_id
965                    4, // root_parent_graveyard_directory_object_id
966                    5, // root_store_object_id
967                    6, // allocator_object_id
968                    7, // journal_object_id
969                    JournalCheckpoint::default(),
970                    LATEST_VERSION,
971                );
972                super_block_header.generation = generation;
973                super_block_header.journal_checkpoint.version = LATEST_VERSION;
974
975                let mut writer = JournalWriter::new(MIN_SUPER_BLOCK_SIZE as usize, 0);
976                writer.write_all(SUPER_BLOCK_MAGIC).unwrap();
977                super_block_header.serialize_with_version(&mut writer).unwrap();
978                SuperBlockRecord::End.serialize_into(&mut writer).unwrap();
979                writer.pad_to_block().unwrap();
980
981                let mut buf = device.allocate_buffer(writer.flushable_bytes()).await;
982                writer.take_flushable(buf.as_mut());
983                device
984                    .write(instance.first_extent().start, buf.as_ref())
985                    .await
986                    .expect("write failed");
987            }
988        };
989
990        // Case 1: A has MAX, B has 0. B should be selected.
991        write_sb(SuperBlockInstance::A, u64::MAX).await;
992        write_sb(SuperBlockInstance::B, 0).await;
993        let manager = SuperBlockManager::new();
994        let (header, _) = manager
995            .load((*device).clone(), MIN_SUPER_BLOCK_SIZE as u64)
996            .await
997            .expect("load failed");
998        assert_eq!(header.generation, 0);
999
1000        // Case 2: A has 0, B has MAX. A should be selected.
1001        write_sb(SuperBlockInstance::A, 0).await;
1002        write_sb(SuperBlockInstance::B, u64::MAX).await;
1003        let manager = SuperBlockManager::new();
1004        let (header, _) = manager
1005            .load((*device).clone(), MIN_SUPER_BLOCK_SIZE as u64)
1006            .await
1007            .expect("load failed");
1008        assert_eq!(header.generation, 0);
1009
1010        // Case 3: A has 100, B has 200. B should be selected.
1011        write_sb(SuperBlockInstance::A, 100).await;
1012        write_sb(SuperBlockInstance::B, 200).await;
1013        let manager = SuperBlockManager::new();
1014        let (header, _) = manager
1015            .load((*device).clone(), MIN_SUPER_BLOCK_SIZE as u64)
1016            .await
1017            .expect("load failed");
1018        assert_eq!(header.generation, 200);
1019    }
1020
1021    #[fuchsia::test]
1022    async fn test_generation_wrapping_on_flush() {
1023        let block_size = 4096;
1024        let mut device =
1025            DeviceHolder::new(FakeDevice::new(TEST_DEVICE_BLOCK_COUNT, block_size as u32));
1026        {
1027            let fs = FxFilesystem::new_empty(device).await.expect("new_empty failed");
1028            let root_store = fs.root_store();
1029            let mut transaction = fs
1030                .clone()
1031                .new_transaction(lock_keys![], Options::default())
1032                .await
1033                .expect("new_transaction failed");
1034            ObjectStore::create_object(
1035                &root_store,
1036                &mut transaction,
1037                HandleOptions::default(),
1038                None,
1039            )
1040            .await
1041            .expect("create_object failed");
1042            transaction.commit().await.expect("commit failed");
1043            fs.sync(SyncOptions::default()).await.expect("sync failed");
1044            fs.close().await.expect("close failed");
1045            device = fs.take_device().await;
1046        }
1047        device.reopen(false);
1048
1049        let manager = SuperBlockManager::new();
1050        let (mut header, _) =
1051            manager.load((*device).clone(), block_size as u64).await.expect("load failed");
1052
1053        {
1054            let fs = FxFilesystem::open(device).await.expect("open failed");
1055            // To test wrapping, we need to get into a state where the current generation is
1056            // u64::MAX. Since we have A and B, and wrapping comparison is used, we need to set them
1057            // up carefully. We will set A to u64::MAX - 1, and B to u64::MAX.
1058            // Then the next write will be to A, and should be 0.
1059            header.generation = u64::MAX - 1;
1060            manager
1061                .save(header.clone(), (*fs).clone(), fs.root_parent_store().tree().layer_set())
1062                .await
1063                .expect("save 1 failed");
1064            header.generation = u64::MAX;
1065            manager
1066                .save(header, (*fs).clone(), fs.root_parent_store().tree().layer_set())
1067                .await
1068                .expect("save 2 failed");
1069            fs.close().await.expect("close failed");
1070            device = fs.take_device().await;
1071            device.reopen(false);
1072
1073            let fs = FxFilesystem::open(device).await.expect("open failed");
1074
1075            let root_store = fs.root_store();
1076            for _ in 0..6000 {
1077                let mut transaction = fs
1078                    .clone()
1079                    .new_transaction(lock_keys![], Options::default())
1080                    .await
1081                    .expect("new_transaction failed");
1082                ObjectStore::create_object(
1083                    &root_store,
1084                    &mut transaction,
1085                    HandleOptions::default(),
1086                    None,
1087                )
1088                .await
1089                .expect("create_object failed");
1090                transaction.commit().await.expect("commit failed");
1091            }
1092            fs.sync(SyncOptions::default()).await.expect("sync failed");
1093            fs.close().await.expect("close failed");
1094            device = fs.take_device().await;
1095        }
1096        device.reopen(false);
1097
1098        let (header, _) =
1099            manager.load((*device).clone(), block_size as u64).await.expect("load failed");
1100        assert!(header.generation < 10);
1101    }
1102
1103    #[fuchsia::test]
1104    async fn test_guid_assign_on_read() {
1105        let (fs, handle_a, _handle_b) = filesystem_and_super_block_handles().await;
1106        const JOURNAL_OBJECT_ID: u64 = 5;
1107        let mut super_block_header_a = SuperBlockHeader::new(
1108            1,
1109            fs.object_manager().root_parent_store().store_object_id(),
1110            /* root_parent_graveyard_directory_object_id: */ 1000,
1111            fs.root_store().store_object_id(),
1112            fs.allocator().object_id(),
1113            JOURNAL_OBJECT_ID,
1114            JournalCheckpoint { file_offset: 1234, checksum: 5678, version: LATEST_VERSION },
1115            /* earliest_version: */ LATEST_VERSION,
1116        );
1117        // Ensure the superblock has no set GUID.
1118        super_block_header_a.guid = UuidWrapper::nil();
1119        write(
1120            &super_block_header_a,
1121            compact_root_parent(fs.object_manager().root_parent_store().as_ref())
1122                .expect("scan failed"),
1123            handle_a,
1124        )
1125        .await
1126        .expect("write failed");
1127        let super_block_header = SuperBlockHeader::read_header(fs.device(), SuperBlockInstance::A)
1128            .await
1129            .expect("read failed");
1130        // Ensure a GUID has been assigned.
1131        assert!(!super_block_header.0.guid.0.is_nil());
1132    }
1133
1134    #[fuchsia::test]
1135    async fn test_init_wipes_superblocks() {
1136        let device = DeviceHolder::new(FakeDevice::new(8192, TEST_DEVICE_BLOCK_SIZE));
1137
1138        let fs = FxFilesystem::new_empty(device).await.expect("new_empty failed");
1139        let root_store = fs.root_store();
1140        // Generate enough work to induce a journal flush and thus a new superblock being written.
1141        for _ in 0..6000 {
1142            let mut transaction = fs
1143                .clone()
1144                .new_transaction(lock_keys![], Options::default())
1145                .await
1146                .expect("new_transaction failed");
1147            ObjectStore::create_object(
1148                &root_store,
1149                &mut transaction,
1150                HandleOptions::default(),
1151                None,
1152            )
1153            .await
1154            .expect("create_object failed");
1155            transaction.commit().await.expect("commit failed");
1156        }
1157        fs.close().await.expect("Close failed");
1158        let device = fs.take_device().await;
1159        device.reopen(false);
1160
1161        SuperBlockHeader::read_header(device.clone(), SuperBlockInstance::A)
1162            .await
1163            .expect("read failed");
1164        let header = SuperBlockHeader::read_header(device.clone(), SuperBlockInstance::B)
1165            .await
1166            .expect("read failed");
1167
1168        let old_guid = header.0.guid;
1169
1170        // Re-initialize the filesystem.  The A and B blocks should be for the new FS.
1171        let fs = FxFilesystem::new_empty(device).await.expect("new_empty failed");
1172        fs.close().await.expect("Close failed");
1173        let device = fs.take_device().await;
1174        device.reopen(false);
1175
1176        let a = SuperBlockHeader::read_header(device.clone(), SuperBlockInstance::A)
1177            .await
1178            .expect("read failed");
1179        let b = SuperBlockHeader::read_header(device.clone(), SuperBlockInstance::B)
1180            .await
1181            .expect("read failed");
1182
1183        assert_eq!(a.0.guid, b.0.guid);
1184        assert_ne!(old_guid, a.0.guid);
1185    }
1186
1187    #[fuchsia::test]
1188    async fn test_alternating_super_blocks() {
1189        let device = DeviceHolder::new(FakeDevice::new(8192, TEST_DEVICE_BLOCK_SIZE));
1190
1191        let fs = FxFilesystem::new_empty(device).await.expect("new_empty failed");
1192        fs.close().await.expect("Close failed");
1193        let device = fs.take_device().await;
1194        device.reopen(false);
1195
1196        let (super_block_header_a, _) =
1197            SuperBlockHeader::read_header(device.clone(), SuperBlockInstance::A)
1198                .await
1199                .expect("read failed");
1200
1201        // The second super-block won't be valid at this time so there's no point reading it.
1202
1203        let fs = FxFilesystem::open(device).await.expect("open failed");
1204        let root_store = fs.root_store();
1205        // Generate enough work to induce a journal flush.
1206        for _ in 0..6000 {
1207            let mut transaction = fs
1208                .clone()
1209                .new_transaction(lock_keys![], Options::default())
1210                .await
1211                .expect("new_transaction failed");
1212            ObjectStore::create_object(
1213                &root_store,
1214                &mut transaction,
1215                HandleOptions::default(),
1216                None,
1217            )
1218            .await
1219            .expect("create_object failed");
1220            transaction.commit().await.expect("commit failed");
1221        }
1222        fs.close().await.expect("Close failed");
1223        let device = fs.take_device().await;
1224        device.reopen(false);
1225
1226        let (super_block_header_a_after, _) =
1227            SuperBlockHeader::read_header(device.clone(), SuperBlockInstance::A)
1228                .await
1229                .expect("read failed");
1230        let (super_block_header_b_after, _) =
1231            SuperBlockHeader::read_header(device.clone(), SuperBlockInstance::B)
1232                .await
1233                .expect("read failed");
1234
1235        // It's possible that multiple super-blocks were written, so cater for that.
1236
1237        // The sequence numbers should be one apart.
1238        assert_eq!(
1239            (super_block_header_b_after.generation as i64
1240                - super_block_header_a_after.generation as i64)
1241                .abs(),
1242            1
1243        );
1244
1245        // At least one super-block should have been written.
1246        assert!(
1247            std::cmp::max(
1248                super_block_header_a_after.generation,
1249                super_block_header_b_after.generation
1250            ) > super_block_header_a.generation
1251        );
1252
1253        // They should have the same oddness.
1254        assert_eq!(super_block_header_a_after.generation & 1, super_block_header_a.generation & 1);
1255    }
1256
1257    #[fuchsia::test]
1258    async fn test_root_parent_is_compacted() {
1259        let device = DeviceHolder::new(FakeDevice::new(8192, TEST_DEVICE_BLOCK_SIZE));
1260
1261        let fs = FxFilesystem::new_empty(device).await.expect("new_empty failed");
1262
1263        let mut transaction = fs
1264            .clone()
1265            .new_transaction(lock_keys![], Options::default())
1266            .await
1267            .expect("new_transaction failed");
1268        let store = fs.root_parent_store();
1269        let handle =
1270            ObjectStore::create_object(&store, &mut transaction, HandleOptions::default(), None)
1271                .await
1272                .expect("create_object failed");
1273        transaction.commit().await.expect("commit failed");
1274
1275        store
1276            .tombstone_object(handle.object_id(), Options::default())
1277            .await
1278            .expect("tombstone failed");
1279
1280        // Generate enough work to induce a journal flush.
1281        let root_store = fs.root_store();
1282        for _ in 0..6000 {
1283            let mut transaction = fs
1284                .clone()
1285                .new_transaction(lock_keys![], Options::default())
1286                .await
1287                .expect("new_transaction failed");
1288            ObjectStore::create_object(
1289                &root_store,
1290                &mut transaction,
1291                HandleOptions::default(),
1292                None,
1293            )
1294            .await
1295            .expect("create_object failed");
1296            transaction.commit().await.expect("commit failed");
1297        }
1298
1299        // The root parent store should have been compacted, so we shouldn't be able to find any
1300        // record referring to the object we tombstoned.
1301        assert_eq!(
1302            store.tree().find(&ObjectKey::object(handle.object_id())).await.expect("find failed"),
1303            None
1304        );
1305    }
1306}