1use crate::filesystem::FxFilesystem;
6use crate::fsck::errors::{FsckError, FsckFatal, FsckIssue, FsckWarning};
7use crate::log::*;
8use crate::lsm_tree::skip_list_layer::SkipListLayer;
9use crate::lsm_tree::types::{
10 BoxedLayerIterator, Item, Key, Layer, LayerIterator, OrdUpperBound, RangeKey, Value,
11};
12use crate::lsm_tree::Query;
13use crate::object_handle::INVALID_OBJECT_ID;
14use crate::object_store::allocator::{AllocatorKey, AllocatorValue, CoalescingIterator};
15use crate::object_store::journal::super_block::SuperBlockInstance;
16use crate::object_store::load_store_info;
17use crate::object_store::transaction::{lock_keys, LockKey};
18use crate::object_store::volume::root_volume;
19use anyhow::{anyhow, Context, Error};
20use futures::try_join;
21use fxfs_crypto::Crypt;
22use rustc_hash::FxHashSet as HashSet;
23use std::collections::BTreeMap;
24use std::iter::zip;
25use std::ops::Bound;
26use std::sync::atomic::{AtomicU64, Ordering};
27use std::sync::Arc;
28
29pub mod errors;
30
31mod store_scanner;
32
33#[cfg(test)]
34mod tests;
35
36pub const NUM_FRAGMENTATION_HISTOGRAM_SLOTS: usize = 12;
38#[derive(Default, Debug)]
39pub struct FragmentationStats {
40 pub extent_size: [u64; NUM_FRAGMENTATION_HISTOGRAM_SLOTS],
42 pub extent_count: [u64; NUM_FRAGMENTATION_HISTOGRAM_SLOTS],
44 pub free_space: [u64; NUM_FRAGMENTATION_HISTOGRAM_SLOTS],
46}
47
48impl FragmentationStats {
49 pub fn get_histogram_bucket_for_size(size: u64) -> usize {
51 return Self::get_histogram_bucket_for_count(size / 4096);
52 }
53 pub fn get_histogram_bucket_for_count(count: u64) -> usize {
55 let log_count = (64 - count.leading_zeros()) as usize;
56 return log_count.clamp(0, NUM_FRAGMENTATION_HISTOGRAM_SLOTS - 1);
57 }
58}
59
60#[derive(Default, Debug)]
62pub struct FsckResult {
63 pub fragmentation: FragmentationStats,
64}
65
66pub struct FsckOptions<'a> {
67 pub fail_on_warning: bool,
69 pub halt_on_error: bool,
71 pub do_slow_passes: bool,
73 pub on_error: Box<dyn Fn(&FsckIssue) + Send + Sync + 'a>,
75 pub quiet: bool,
77 pub verbose: bool,
79 pub no_lock: bool,
81}
82
83impl Default for FsckOptions<'_> {
84 fn default() -> Self {
85 Self {
86 fail_on_warning: false,
87 halt_on_error: false,
88 do_slow_passes: true,
89 on_error: Box::new(FsckIssue::log),
90 quiet: false,
91 verbose: false,
92 no_lock: false,
93 }
94 }
95}
96
97pub async fn fsck(filesystem: Arc<FxFilesystem>) -> Result<FsckResult, Error> {
105 fsck_with_options(filesystem, &FsckOptions::default()).await
106}
107
108pub async fn fsck_with_options(
109 filesystem: Arc<FxFilesystem>,
110 options: &FsckOptions<'_>,
111) -> Result<FsckResult, Error> {
112 let mut result = FsckResult::default();
113
114 if !options.quiet {
115 info!("Starting fsck");
116 }
117
118 let _guard = if options.no_lock {
119 None
120 } else {
121 Some(filesystem.lock_manager().write_lock(lock_keys![LockKey::Filesystem]).await)
122 };
123
124 let mut fsck = Fsck::new(options);
125
126 let object_manager = filesystem.object_manager();
127 let super_block_header = filesystem.super_block_header();
128
129 let mut journal_checkpoint_ids: HashSet<u64> = HashSet::default();
132 journal_checkpoint_ids.insert(super_block_header.allocator_object_id);
133 journal_checkpoint_ids.insert(super_block_header.root_store_object_id);
134
135 let mut root_objects =
137 vec![super_block_header.root_store_object_id, super_block_header.journal_object_id];
138 root_objects.append(&mut object_manager.root_store().parent_objects());
139 fsck.verbose("Scanning root parent store...");
140 store_scanner::scan_store(
141 &fsck,
142 object_manager.root_parent_store().as_ref(),
143 &root_objects,
144 &mut result,
145 )
146 .await?;
147 fsck.verbose("Scanning root parent store done");
148
149 let root_store = &object_manager.root_store();
150 let mut root_store_root_objects = Vec::new();
151 root_store_root_objects.append(&mut vec![
152 super_block_header.allocator_object_id,
153 SuperBlockInstance::A.object_id(),
154 SuperBlockInstance::B.object_id(),
155 ]);
156 root_store_root_objects.append(&mut root_store.root_objects());
157
158 let root_volume = root_volume(filesystem.clone()).await?;
159 let volume_directory = root_volume.volume_directory();
160 let layer_set = volume_directory.store().tree().layer_set();
161 let mut merger = layer_set.merger();
162 let mut iter = volume_directory.iter(&mut merger).await?;
163
164 while let Some((_, store_id, _)) = iter.get() {
166 journal_checkpoint_ids.insert(store_id);
167 fsck.check_child_store_metadata(
168 filesystem.as_ref(),
169 store_id,
170 &mut root_store_root_objects,
171 )
172 .await?;
173 iter.advance().await?;
174 }
175
176 let allocator = filesystem.allocator();
177 root_store_root_objects.append(&mut allocator.parent_objects());
178
179 if fsck.options.do_slow_passes {
180 let layer_set = allocator.tree().immutable_layer_set();
182 fsck.verbose(format!("Checking {} layers for allocator...", layer_set.layers.len()));
183 for layer in layer_set.layers {
184 if let Some(handle) = layer.handle() {
185 fsck.verbose(format!(
186 "Layer file {} for allocator is {} bytes",
187 handle.object_id(),
188 handle.get_size()
189 ));
190 }
191 fsck.check_layer_file_contents(
192 allocator.object_id(),
193 layer.handle().map(|h| h.object_id()).unwrap_or(INVALID_OBJECT_ID),
194 layer.clone(),
195 )
196 .await?;
197 }
198 fsck.verbose("Checking layers done");
199 }
200
201 fsck.verbose("Scanning root object store...");
203 store_scanner::scan_store(&fsck, root_store.as_ref(), &root_store_root_objects, &mut result)
204 .await?;
205 fsck.verbose("Scanning root object store done");
206
207 fsck.verbose("Verifying allocations...");
209 let mut store_ids = HashSet::default();
210 store_ids.insert(root_store.store_object_id());
211 store_ids.insert(object_manager.root_parent_store().store_object_id());
212 fsck.verify_allocations(filesystem.as_ref(), &store_ids, &mut result).await?;
213 fsck.verbose("Verifying allocations done");
214
215 for object_id in object_manager.journal_file_offsets().0.keys() {
220 if !journal_checkpoint_ids.contains(object_id) {
221 fsck.error(FsckError::UnexpectedJournalFileOffset(*object_id))?;
222 }
223 }
224
225 let errors = fsck.errors();
226 let warnings = fsck.warnings();
227 if errors > 0 || (fsck.options.fail_on_warning && warnings > 0) {
228 Err(anyhow!("Fsck encountered {} errors, {} warnings", errors, warnings))
229 } else {
230 if warnings > 0 {
231 warn!(count = warnings; "Fsck encountered warnings");
232 } else {
233 if !options.quiet {
234 info!("No issues detected");
235 }
236 }
237 Ok(result)
238 }
239}
240
241pub async fn fsck_volume(
245 filesystem: &FxFilesystem,
246 store_id: u64,
247 crypt: Option<Arc<dyn Crypt>>,
248) -> Result<FsckResult, Error> {
249 fsck_volume_with_options(filesystem, &FsckOptions::default(), store_id, crypt).await
250}
251
252pub async fn fsck_volume_with_options(
253 filesystem: &FxFilesystem,
254 options: &FsckOptions<'_>,
255 store_id: u64,
256 crypt: Option<Arc<dyn Crypt>>,
257) -> Result<FsckResult, Error> {
258 let mut result = FsckResult::default();
259 if !options.quiet {
260 info!(store_id:?; "Starting volume fsck");
261 }
262
263 let _guard = if options.no_lock {
264 None
265 } else {
266 Some(filesystem.lock_manager().write_lock(lock_keys![LockKey::Filesystem]).await)
267 };
268
269 let mut fsck = Fsck::new(options);
270 fsck.check_child_store(filesystem, store_id, crypt, &mut result).await?;
271 let mut store_ids = HashSet::default();
272 store_ids.insert(store_id);
273 fsck.verify_allocations(filesystem, &store_ids, &mut result).await?;
274
275 let errors = fsck.errors();
276 let warnings = fsck.warnings();
277 if errors > 0 || (fsck.options.fail_on_warning && warnings > 0) {
278 Err(anyhow!("Volume fsck encountered {} errors, {} warnings", errors, warnings))
279 } else {
280 if warnings > 0 {
281 warn!(count = warnings; "Volume fsck encountered warnings");
282 } else {
283 if !options.quiet {
284 info!("No issues detected");
285 }
286 }
287 Ok(result)
288 }
289}
290
291trait KeyExt: PartialEq {
292 fn overlaps(&self, other: &Self) -> bool;
293}
294
295impl<K: RangeKey + PartialEq> KeyExt for K {
296 fn overlaps(&self, other: &Self) -> bool {
297 RangeKey::overlaps(self, other)
298 }
299}
300
301struct Fsck<'a> {
302 options: &'a FsckOptions<'a>,
303 allocations: Arc<SkipListLayer<AllocatorKey, AllocatorValue>>,
305 errors: AtomicU64,
306 warnings: AtomicU64,
307}
308
309impl<'a> Fsck<'a> {
310 fn new(options: &'a FsckOptions<'a>) -> Self {
311 Fsck {
312 options,
313 allocations: SkipListLayer::new(2048),
315 errors: AtomicU64::new(0),
316 warnings: AtomicU64::new(0),
317 }
318 }
319
320 fn verbose(&self, message: impl AsRef<str>) {
322 if self.options.verbose {
323 info!(message = message.as_ref(); "fsck");
324 }
325 }
326
327 fn errors(&self) -> u64 {
328 self.errors.load(Ordering::Relaxed)
329 }
330
331 fn warnings(&self) -> u64 {
332 self.warnings.load(Ordering::Relaxed)
333 }
334
335 fn assert<V>(&self, res: Result<V, Error>, error: FsckFatal) -> Result<V, Error> {
336 if res.is_err() {
337 (self.options.on_error)(&FsckIssue::Fatal(error.clone()));
338 return Err(anyhow!("{:?}", error)).context(res.err().unwrap());
339 }
340 res
341 }
342
343 fn warning(&self, error: FsckWarning) -> Result<(), Error> {
344 (self.options.on_error)(&FsckIssue::Warning(error));
345 self.warnings.fetch_add(1, Ordering::Relaxed);
346 Ok(())
347 }
348
349 fn error(&self, error: FsckError) -> Result<(), Error> {
350 (self.options.on_error)(&FsckIssue::Error(error.clone()));
351 self.errors.fetch_add(1, Ordering::Relaxed);
352 if self.options.halt_on_error {
353 Err(anyhow!("{:?}", error))
354 } else {
355 Ok(())
356 }
357 }
358
359 fn fatal(&self, error: FsckFatal) -> Result<(), Error> {
360 (self.options.on_error)(&FsckIssue::Fatal(error.clone()));
361 Err(anyhow!("{:?}", error))
362 }
363
364 async fn check_child_store_metadata(
366 &mut self,
367 filesystem: &FxFilesystem,
368 store_id: u64,
369 root_store_root_objects: &mut Vec<u64>,
370 ) -> Result<(), Error> {
371 let root_store = filesystem.root_store();
372
373 let info = self.assert(
375 load_store_info(&root_store, store_id).await,
376 FsckFatal::MalformedStore(store_id),
377 )?;
378 root_store_root_objects.append(&mut info.parent_objects());
379 Ok(())
380 }
381
382 async fn check_child_store(
383 &mut self,
384 filesystem: &FxFilesystem,
385 store_id: u64,
386 crypt: Option<Arc<dyn Crypt>>,
387 result: &mut FsckResult,
388 ) -> Result<(), Error> {
389 let store =
390 filesystem.object_manager().store(store_id).context("open_store failed").unwrap();
391
392 let _relock_guard;
393 if store.is_locked() {
394 if let Some(crypt) = &crypt {
395 store.unlock_read_only(crypt.clone()).await?;
396 _relock_guard = scopeguard::guard(store.clone(), |store| {
397 store.lock_read_only();
398 });
399 } else {
400 return Err(anyhow!("Invalid key"));
401 }
402 }
403
404 if self.options.do_slow_passes {
405 let layer_set = store.tree().immutable_layer_set();
406 for layer in layer_set.layers {
407 let (layer_object_id, layer_size) = if let Some(h) = layer.handle() {
408 (h.object_id(), h.get_size())
409 } else {
410 (0, 0)
411 };
412 self.verbose(format!(
413 "Layer file {} for store {} is {} bytes",
414 layer_object_id, store_id, layer_size,
415 ));
416 self.check_layer_file_contents(store_id, layer_object_id, layer.clone()).await?
417 }
418 }
419
420 store_scanner::scan_store(self, store.as_ref(), &store.root_objects(), result)
421 .await
422 .context("scan_store failed")
423 }
424
425 async fn check_layer_file_contents<
426 K: Key + KeyExt + OrdUpperBound + std::fmt::Debug,
427 V: Value + std::fmt::Debug,
428 >(
429 &self,
430 store_object_id: u64,
431 layer_file_object_id: u64,
432 layer: Arc<dyn Layer<K, V>>,
433 ) -> Result<(), Error> {
434 let mut iter: BoxedLayerIterator<'_, K, V> = self.assert(
435 layer.seek(Bound::Unbounded).await,
436 FsckFatal::MalformedLayerFile(store_object_id, layer_file_object_id),
437 )?;
438
439 let mut last_item: Option<Item<K, V>> = None;
440 while let Some(item) = iter.get() {
441 if let Some(last) = last_item {
442 if !last.key.cmp_upper_bound(&item.key).is_le() {
443 self.fatal(FsckFatal::MisOrderedLayerFile(
444 store_object_id,
445 layer_file_object_id,
446 ))?;
447 }
448 if last.key.overlaps(&item.key) {
449 self.fatal(FsckFatal::OverlappingKeysInLayerFile(
450 store_object_id,
451 layer_file_object_id,
452 item.into(),
453 last.as_item_ref().into(),
454 ))?;
455 }
456 }
457 last_item = Some(item.cloned());
458 self.assert(
459 iter.advance().await,
460 FsckFatal::MalformedLayerFile(store_object_id, layer_file_object_id),
461 )?;
462 }
463 Ok(())
464 }
465
466 async fn verify_allocations(
468 &self,
469 filesystem: &FxFilesystem,
470 store_object_ids: &HashSet<u64>,
471 result: &mut FsckResult,
472 ) -> Result<(), Error> {
473 let allocator = filesystem.allocator();
474 let layer_set = allocator.tree().layer_set();
475 let mut merger = layer_set.merger();
476 let mut stored_allocations = CoalescingIterator::new(
477 allocator.filter(merger.query(Query::FullScan).await?, true).await?,
478 )
479 .await
480 .expect("filter failed");
481 let mut observed_allocations =
482 CoalescingIterator::new(self.allocations.seek(Bound::Unbounded).await?).await?;
483 let mut observed_owner_allocated_bytes = BTreeMap::new();
484 let mut extra_allocations: Vec<errors::Allocation> = vec![];
485 let bs = filesystem.block_size();
486 let mut previous_allocation_end = 0;
487 while let Some(allocation) = stored_allocations.get() {
488 if allocation.key.device_range.start % bs > 0
489 || allocation.key.device_range.end % bs > 0
490 {
491 self.error(FsckError::MisalignedAllocation(allocation.into()))?;
492 } else if allocation.key.device_range.start >= allocation.key.device_range.end {
493 self.error(FsckError::MalformedAllocation(allocation.into()))?;
494 }
495 let owner_object_id = match allocation.value {
496 AllocatorValue::None => INVALID_OBJECT_ID,
497 AllocatorValue::Abs { owner_object_id, .. } => *owner_object_id,
498 };
499 let r = &allocation.key.device_range;
500
501 if allocation.value != &AllocatorValue::None {
503 if r.start > previous_allocation_end {
504 let size = r.start - previous_allocation_end;
505 result.fragmentation.free_space
506 [FragmentationStats::get_histogram_bucket_for_size(size)] += 1;
507 }
508 previous_allocation_end = r.end;
509 }
510
511 *observed_owner_allocated_bytes.entry(owner_object_id).or_insert(0) += r.end - r.start;
512 if !store_object_ids.contains(&owner_object_id) {
513 if filesystem.object_manager().store(owner_object_id).is_none() {
514 self.error(FsckError::AllocationForNonexistentOwner(allocation.into()))?;
515 }
516 stored_allocations.advance().await?;
517 continue;
518 }
519 match observed_allocations.get() {
521 None => extra_allocations.push(allocation.into()),
522 Some(observed_allocation) => {
523 if allocation.key.device_range.end <= observed_allocation.key.device_range.start
524 {
525 extra_allocations.push(allocation.into());
526 stored_allocations.advance().await?;
527 continue;
528 }
529 if observed_allocation.key.device_range.end <= allocation.key.device_range.start
530 {
531 self.error(FsckError::MissingAllocation(observed_allocation.into()))?;
532 observed_allocations.advance().await?;
533 continue;
534 }
535 if allocation.key != observed_allocation.key
537 || allocation.value != observed_allocation.value
538 {
539 self.error(FsckError::AllocationMismatch(
540 observed_allocation.into(),
541 allocation.into(),
542 ))?;
543 stored_allocations.advance().await?;
544 continue;
545 }
546 }
547 }
548 try_join!(stored_allocations.advance(), observed_allocations.advance())?;
549 }
550 let device_size =
551 filesystem.device().block_count() * filesystem.device().block_size() as u64;
552 if previous_allocation_end < device_size {
553 let size = device_size - previous_allocation_end;
554 result.fragmentation.free_space
555 [FragmentationStats::get_histogram_bucket_for_size(size)] += 1;
556 }
557 while let Some(allocation) = observed_allocations.get() {
558 self.error(FsckError::MissingAllocation(allocation.into()))?;
559 observed_allocations.advance().await?;
560 continue;
561 }
562 let expected_allocated_bytes = observed_owner_allocated_bytes.values().sum::<u64>();
563 self.verbose(format!(
564 "Found {} bytes allocated (expected {} bytes). Total device size is {} bytes.",
565 allocator.get_allocated_bytes(),
566 expected_allocated_bytes,
567 device_size,
568 ));
569 if !extra_allocations.is_empty() {
570 self.error(FsckError::ExtraAllocations(extra_allocations))?;
571 }
572 let owner_allocated_bytes = allocator
576 .get_owner_allocated_bytes()
577 .into_iter()
578 .filter(|(_, v)| *v > 0)
579 .collect::<BTreeMap<_, _>>();
580 if expected_allocated_bytes != allocator.get_allocated_bytes()
581 || observed_owner_allocated_bytes.len() != owner_allocated_bytes.len()
582 || zip(observed_owner_allocated_bytes.iter(), owner_allocated_bytes.iter())
583 .filter(|((k1, v1), (k2, v2))| (*k1, *v1) != (*k2, *v2))
584 .count()
585 != 0
586 {
587 self.error(FsckError::AllocatedBytesMismatch(
588 observed_owner_allocated_bytes.iter().map(|(k, v)| (*k, *v)).collect(),
589 owner_allocated_bytes.iter().map(|(k, v)| (*k, *v)).collect(),
590 ))?;
591 }
592 for (k, v) in allocator.owner_byte_limits() {
593 if !owner_allocated_bytes.contains_key(&k) {
594 self.warning(FsckWarning::LimitForNonExistentStore(k, v))?;
595 }
596 }
597 Ok(())
598 }
599}