Skip to main content

fxfs/
lsm_tree.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5mod bloom_filter;
6pub mod cache;
7pub mod merge;
8pub mod persistent_layer;
9pub mod skip_list_layer;
10pub mod types;
11
12use crate::drop_event::DropEvent;
13use crate::log::*;
14use crate::metrics::DurationMeasureScope;
15use crate::object_handle::{ReadObjectHandle, WriteBytes};
16use crate::serialized_types::{LATEST_VERSION, Version};
17
18use anyhow::Error;
19use cache::{ObjectCache, ObjectCacheResult};
20
21use fuchsia_inspect::HistogramProperty;
22use fuchsia_sync::RwLock;
23use persistent_layer::{PersistentLayer, PersistentLayerWriter};
24use skip_list_layer::SkipListLayer;
25use std::fmt;
26use std::sync::atomic::{AtomicUsize, Ordering};
27use std::sync::{Arc, Mutex};
28use types::{
29    Existence, Item, ItemRef, Key, Layer, LayerIterator, LayerKey, LayerWriter, MergeableKey,
30    OrdLowerBound, Value,
31};
32
33pub use merge::Query;
34
35const SKIP_LIST_LAYER_ITEMS: usize = 512;
36
37// For serialization.
38pub use persistent_layer::{
39    LayerHeader as PersistentLayerHeader, LayerHeaderV39 as PersistentLayerHeaderV39,
40    LayerInfo as PersistentLayerInfo, LayerInfoV39 as PersistentLayerInfoV39,
41};
42
43pub async fn layers_from_handles<K: Key, V: Value>(
44    handles: impl IntoIterator<Item = impl ReadObjectHandle + 'static>,
45) -> Result<Vec<Arc<dyn Layer<K, V>>>, Error> {
46    let mut layers = Vec::new();
47    for handle in handles {
48        layers.push(PersistentLayer::open(handle).await? as Arc<dyn Layer<K, V>>);
49    }
50    Ok(layers)
51}
52
53#[derive(Eq, PartialEq, Debug)]
54pub enum Operation {
55    Insert,
56    ReplaceOrInsert,
57    MergeInto,
58}
59
60pub type MutationCallback<K, V> = Option<Box<dyn Fn(Operation, &Item<K, V>) + Send + Sync>>;
61
62struct Inner<K, V> {
63    mutable_layer: Arc<SkipListLayer<K, V>>,
64    layers: Vec<Arc<dyn Layer<K, V>>>,
65    mutation_callback: MutationCallback<K, V>,
66}
67
68pub const LOG2_HISTOGRAM_BUCKETS: usize = 32;
69
70/// Metrics related to LSM tree churn, layer depths, and compaction performance.
71pub struct CompactionCounters {
72    /// Total number of compaction events that merged layers together.
73    pub compactions: u64,
74    /// Total bytes written during compaction, useful for measuring write amplification.
75    pub compaction_bytes_written: u64,
76    /// Total duration spent compacting layers.
77    pub compaction_time_ns: u64,
78    /// Number of mutable layers sealed. Useful for measuring churn.
79    pub total_layers_added: u64,
80    /// The maximum depth of the LSM tree observed. An indicator of worst-case read amplification.
81    pub max_layer_count: u64,
82    /// Log2 histogram of the sizes of newly compacted layers, used to verify compaction heuristics.
83    pub layer_size_histogram: [u64; LOG2_HISTOGRAM_BUCKETS],
84}
85
86impl Default for CompactionCounters {
87    fn default() -> Self {
88        Self {
89            compactions: 0,
90            compaction_bytes_written: 0,
91            compaction_time_ns: 0,
92            total_layers_added: 0,
93            max_layer_count: 0,
94            layer_size_histogram: [0; LOG2_HISTOGRAM_BUCKETS],
95        }
96    }
97}
98
99/// Global counters and metrics for an LSM tree's lifetime.
100pub struct TreeCounters {
101    /// Number of individual key-lookup attempts (reads) through the tree.
102    pub num_seeks: AtomicUsize,
103    /// Tracks the number of layer files we might have looked at across all seeks.
104    /// Used alongside `layer_files_skipped` to compute the effectiveness of bloom filters.
105    pub layer_files_total: AtomicUsize,
106    /// Tracks how many layer files we skipped searching thanks to the bloom filter rejecting them.
107    pub layer_files_skipped: AtomicUsize,
108    /// Embedded counters for mutable metrics that require locking.
109    pub compaction: Mutex<CompactionCounters>,
110}
111
112impl Default for TreeCounters {
113    fn default() -> Self {
114        Self {
115            num_seeks: AtomicUsize::new(0),
116            layer_files_total: AtomicUsize::new(0),
117            layer_files_skipped: AtomicUsize::new(0),
118            compaction: Mutex::new(CompactionCounters::default()),
119        }
120    }
121}
122
123/// Writes the items yielded by the iterator into the supplied object.
124#[fxfs_trace::trace]
125pub async fn compact_with_iterator<K: Key, V: Value, W: WriteBytes + Send>(
126    mut iterator: impl LayerIterator<K, V>,
127    num_items: usize,
128    writer: W,
129    block_size: u64,
130    mut yielder: Option<impl Yielder>,
131) -> Result<u64, Error> {
132    let mut writer = PersistentLayerWriter::<W, K, V>::new(writer, num_items, block_size).await?;
133    while let Some(item_ref) = iterator.get() {
134        debug!(item_ref:?; "compact: writing");
135        writer.write(item_ref).await?;
136        iterator.advance().await?;
137        if let Some(y) = yielder.as_mut() {
138            y.yield_now().await;
139        }
140    }
141    writer.flush().await?;
142
143    Ok(writer.bytes_written())
144}
145
146/// LSMTree manages a tree of layers to provide a key/value store.  Each layer contains deltas on
147/// the preceding layer.  The top layer is an in-memory mutable layer.  Layers can be compacted to
148/// form a new combined layer.
149pub struct LSMTree<K, V> {
150    data: RwLock<Inner<K, V>>,
151    merge_fn: merge::MergeFn<K, V>,
152    cache: Box<dyn ObjectCache<K, V>>,
153    counters: Arc<TreeCounters>,
154}
155
156#[fxfs_trace::trace]
157impl<'tree, K: MergeableKey, V: Value> LSMTree<K, V> {
158    /// Creates a new empty tree.
159    pub fn new(merge_fn: merge::MergeFn<K, V>, cache: Box<dyn ObjectCache<K, V>>) -> Self {
160        let counters = TreeCounters::default();
161        counters.compaction.lock().unwrap().max_layer_count = 1;
162        LSMTree {
163            data: RwLock::new(Inner {
164                mutable_layer: Self::new_mutable_layer(),
165                layers: Vec::new(),
166                mutation_callback: None,
167            }),
168            merge_fn,
169            cache,
170            counters: Arc::new(counters),
171        }
172    }
173
174    /// Opens an existing tree from the provided handles to the layer objects.
175    pub async fn open(
176        merge_fn: merge::MergeFn<K, V>,
177        handles: impl IntoIterator<Item = impl ReadObjectHandle + 'static>,
178        cache: Box<dyn ObjectCache<K, V>>,
179    ) -> Result<Self, Error> {
180        let layers = layers_from_handles(handles).await?;
181        let max_layer_count = layers.len() as u64 + 1;
182        let counters = TreeCounters::default();
183        counters.compaction.lock().unwrap().max_layer_count = max_layer_count;
184        Ok(LSMTree {
185            data: RwLock::new(Inner {
186                mutable_layer: Self::new_mutable_layer(),
187                layers,
188                mutation_callback: None,
189            }),
190            merge_fn,
191            cache,
192            counters: Arc::new(counters),
193        })
194    }
195
196    /// Replaces the immutable layers.
197    pub fn set_layers(&self, layers: Vec<Arc<dyn Layer<K, V>>>) {
198        let mut data = self.data.write();
199        data.layers = layers;
200        let layer_count = data.layers.len() + 1;
201        let mut counters = self.counters.compaction.lock().unwrap();
202        counters.max_layer_count = std::cmp::max(counters.max_layer_count, layer_count as u64);
203    }
204
205    /// Appends to the given layers at the end i.e. they should be base layers.  This is supposed
206    /// to be used after replay when we are opening a tree and we have discovered the base layers.
207    pub async fn append_layers(
208        &self,
209        handles: impl IntoIterator<Item = impl ReadObjectHandle + 'static>,
210    ) -> Result<(), Error> {
211        let mut layers = layers_from_handles(handles).await?;
212        let mut data = self.data.write();
213        data.layers.append(&mut layers);
214        let layer_count = data.layers.len() + 1;
215        let mut counters = self.counters.compaction.lock().unwrap();
216        counters.max_layer_count = std::cmp::max(counters.max_layer_count, layer_count as u64);
217        Ok(())
218    }
219
220    /// Resets the immutable layers.
221    pub fn reset_immutable_layers(&self) {
222        self.data.write().layers = Vec::new();
223    }
224
225    /// Seals the current mutable layer and creates a new one.
226    pub fn seal(&self) {
227        // We need to be sure there are no mutations currently in-progress.  This is currently
228        // guaranteed by ensuring that all mutations take a read lock on `data`.
229        let mut data = self.data.write();
230        let layer = std::mem::replace(&mut data.mutable_layer, Self::new_mutable_layer());
231        data.layers.insert(0, layer);
232        let layer_count = data.layers.len() + 1;
233        let mut counters = self.counters.compaction.lock().unwrap();
234        counters.max_layer_count = std::cmp::max(counters.max_layer_count, layer_count as u64);
235        counters.total_layers_added += 1;
236    }
237
238    /// Resets the tree to an empty state.
239    pub fn reset(&self) {
240        let mut data = self.data.write();
241        data.layers = Vec::new();
242        data.mutable_layer = Self::new_mutable_layer();
243    }
244
245    pub fn report_compaction_metrics(
246        &self,
247        bytes_written: u64,
248        duration: std::time::Duration,
249        layer_count: usize,
250    ) {
251        let mut counters = self.counters.compaction.lock().unwrap();
252        counters.compactions += 1;
253        counters.compaction_bytes_written += bytes_written;
254        counters.compaction_time_ns += duration.as_nanos() as u64;
255
256        let bucket = if bytes_written == 0 {
257            0
258        } else {
259            std::cmp::min(LOG2_HISTOGRAM_BUCKETS - 1, 63 - bytes_written.leading_zeros() as usize)
260        };
261        counters.layer_size_histogram[bucket] += 1;
262
263        crate::metrics::lsm_tree_metrics().compaction_layer_stack_depth.insert(layer_count as u64);
264    }
265
266    pub fn compaction_bytes_written(&self) -> u64 {
267        self.counters.compaction.lock().unwrap().compaction_bytes_written
268    }
269
270    /// Returns an empty layer-set for this tree.
271    pub fn empty_layer_set(&self) -> LayerSet<K, V> {
272        LayerSet { layers: Vec::new(), merge_fn: self.merge_fn, counters: self.counters.clone() }
273    }
274
275    /// Adds all the layers (including the mutable layer) to `layer_set`.
276    pub fn add_all_layers_to_layer_set(&self, layer_set: &mut LayerSet<K, V>) {
277        let data = self.data.read();
278        layer_set.layers.reserve_exact(data.layers.len() + 1);
279        layer_set
280            .layers
281            .push(LockedLayer::from(data.mutable_layer.clone() as Arc<dyn Layer<K, V>>));
282        for layer in &data.layers {
283            layer_set.layers.push(layer.clone().into());
284        }
285    }
286
287    /// Returns a clone of the current set of layers (including the mutable layer), after which one
288    /// can get an iterator.
289    pub fn layer_set(&self) -> LayerSet<K, V> {
290        let mut layer_set = self.empty_layer_set();
291        self.add_all_layers_to_layer_set(&mut layer_set);
292        layer_set
293    }
294
295    /// Returns the current set of immutable layers after which one can get an iterator (for e.g.
296    /// compacting).  Since these layers are immutable, getting an iterator should not block
297    /// anything else.
298    pub fn immutable_layer_set(&self) -> LayerSet<K, V> {
299        let data = self.data.read();
300        let mut layers = Vec::with_capacity(data.layers.len());
301        for layer in &data.layers {
302            layers.push(layer.clone().into());
303        }
304        LayerSet { layers, merge_fn: self.merge_fn, counters: self.counters.clone() }
305    }
306
307    /// Inserts an item into the mutable layer.
308    /// Returns error if item already exists.
309    pub fn insert(&self, item: Item<K, V>) -> Result<(), Error> {
310        let _measure = DurationMeasureScope::new(&crate::metrics::lsm_tree_metrics().insert);
311
312        let key = item.key.clone();
313        let val = if item.value == V::DELETED_MARKER { None } else { Some(item.value.clone()) };
314        {
315            // `seal` below relies on us holding a read lock whilst we do the mutation.
316            let data = self.data.read();
317            if let Some(mutation_callback) = data.mutation_callback.as_ref() {
318                mutation_callback(Operation::Insert, &item);
319            }
320            data.mutable_layer.insert(item)?;
321        }
322        self.cache.invalidate(key, val);
323        Ok(())
324    }
325
326    /// Replaces or inserts an item into the mutable layer.
327    pub fn replace_or_insert(&self, item: Item<K, V>) {
328        let _measure =
329            DurationMeasureScope::new(&crate::metrics::lsm_tree_metrics().replace_or_insert);
330
331        let key = item.key.clone();
332        let val = if item.value == V::DELETED_MARKER { None } else { Some(item.value.clone()) };
333        {
334            // `seal` below relies on us holding a read lock whilst we do the mutation.
335            let data = self.data.read();
336            if let Some(mutation_callback) = data.mutation_callback.as_ref() {
337                mutation_callback(Operation::ReplaceOrInsert, &item);
338            }
339            data.mutable_layer.replace_or_insert(item);
340        }
341        self.cache.invalidate(key, val);
342    }
343
344    /// Merges the given item into the mutable layer.
345    pub fn merge_into(&self, item: Item<K, V>, lower_bound: &K) {
346        let _measure = DurationMeasureScope::new(&crate::metrics::lsm_tree_metrics().merge_into);
347
348        let key = item.key.clone();
349        {
350            // `seal` below relies on us holding a read lock whilst we do the mutation.
351            let data = self.data.read();
352            if let Some(mutation_callback) = data.mutation_callback.as_ref() {
353                mutation_callback(Operation::MergeInto, &item);
354            }
355            data.mutable_layer.merge_into(item, lower_bound, self.merge_fn);
356        }
357        self.cache.invalidate(key, None);
358    }
359
360    /// Searches for an exact match for the given key. If the value is equal to
361    /// `Value::DELETED_MARKER` the item is considered missing and will not be returned.
362    pub async fn find(&self, search_key: &K) -> Result<Option<Item<K, V>>, Error>
363    where
364        K: Eq,
365    {
366        let _measure = DurationMeasureScope::new(&crate::metrics::lsm_tree_metrics().find);
367        // It is important that the cache lookup is done prior to fetching the layer set as the
368        // placeholder returned acts as a sort of lock for the validity of the item that may be
369        // inserted later via that placeholder.
370        let token = match self.cache.lookup_or_reserve(search_key) {
371            ObjectCacheResult::Value(value) => {
372                if value == V::DELETED_MARKER {
373                    return Ok(None);
374                } else {
375                    return Ok(Some(Item::new(search_key.clone(), value)));
376                }
377            }
378            ObjectCacheResult::Placeholder(token) => Some(token),
379            ObjectCacheResult::NoCache => None,
380        };
381        let layer_set = self.layer_set();
382        let mut merger = layer_set.merger();
383
384        Ok(match merger.query(Query::Point(search_key)).await?.get() {
385            Some(ItemRef { key, value }) if key == search_key && *value != V::DELETED_MARKER => {
386                if let Some(token) = token {
387                    token.complete(Some(value));
388                }
389                Some(Item { key: key.clone(), value: value.clone() })
390            }
391            _ => None,
392        })
393    }
394
395    pub fn mutable_layer(&self) -> Arc<SkipListLayer<K, V>> {
396        self.data.read().mutable_layer.clone()
397    }
398
399    /// Sets a mutation callback which is a callback that is triggered whenever any mutations are
400    /// applied to the tree.  This might be useful for tests that want to record the precise
401    /// sequence of mutations that are applied to the tree.
402    pub fn set_mutation_callback(&self, mutation_callback: MutationCallback<K, V>) {
403        self.data.write().mutation_callback = mutation_callback;
404    }
405
406    /// Returns the earliest version used by a layer in the tree.
407    pub fn get_earliest_version(&self) -> Version {
408        let mut earliest_version = LATEST_VERSION;
409        for layer in self.layer_set().layers {
410            let layer_version = layer.get_version();
411            if layer_version < earliest_version {
412                earliest_version = layer_version;
413            }
414        }
415        return earliest_version;
416    }
417
418    /// Returns a new mutable layer.
419    pub fn new_mutable_layer() -> Arc<SkipListLayer<K, V>> {
420        SkipListLayer::new(SKIP_LIST_LAYER_ITEMS)
421    }
422
423    /// Replaces the mutable layer.
424    pub fn set_mutable_layer(&self, layer: Arc<SkipListLayer<K, V>>) {
425        self.data.write().mutable_layer = layer;
426    }
427
428    /// Records inspect data for the LSM tree into `node`.  Called lazily when inspect is queried.
429    pub fn record_inspect_data(&self, root: &fuchsia_inspect::Node) {
430        let layer_set = self.layer_set();
431        root.record_child("layers", move |node| {
432            let mut index = 0;
433            for layer in layer_set.layers {
434                node.record_child(format!("{index}"), move |node| {
435                    layer.1.record_inspect_data(node)
436                });
437                index += 1;
438            }
439        });
440        {
441            let counters = self.counters.compaction.lock().unwrap();
442            root.record_uint("num_seeks", self.counters.num_seeks.load(Ordering::Relaxed) as u64);
443            root.record_uint("bloom_filter_success_percent", {
444                let layer_files_total = self.counters.layer_files_total.load(Ordering::Relaxed);
445                let layer_files_skipped = self.counters.layer_files_skipped.load(Ordering::Relaxed);
446                if layer_files_total == 0 {
447                    0
448                } else {
449                    (layer_files_skipped * 100).div_ceil(layer_files_total) as u64
450                }
451            });
452            root.record_uint("compactions", counters.compactions);
453            root.record_uint("compaction_bytes_written", counters.compaction_bytes_written);
454            root.record_uint("compaction_time_ns", counters.compaction_time_ns);
455            root.record_uint("total_layers_added", counters.total_layers_added);
456            root.record_uint("max_layer_count", counters.max_layer_count);
457
458            let layer_sizes = root.create_uint_exponential_histogram(
459                "layer_size_histogram_log2",
460                fuchsia_inspect::ExponentialHistogramParams {
461                    floor: 1,
462                    initial_step: 1,
463                    step_multiplier: 2,
464                    buckets: LOG2_HISTOGRAM_BUCKETS,
465                },
466            );
467            for (i, count) in counters.layer_size_histogram.iter().enumerate() {
468                layer_sizes.insert_multiple(1u64 << i, *count as usize);
469            }
470            root.record(layer_sizes);
471        }
472    }
473}
474
475/// This is an RAII wrapper for a layer which holds a lock on the layer (via the Layer::lock
476/// method).
477pub struct LockedLayer<K, V>(Arc<DropEvent>, Arc<dyn Layer<K, V>>);
478
479impl<K, V> LockedLayer<K, V> {
480    pub async fn close_layer(self) {
481        let layer = self.1;
482        std::mem::drop(self.0);
483        layer.close().await;
484    }
485}
486
487impl<K, V> From<Arc<dyn Layer<K, V>>> for LockedLayer<K, V> {
488    fn from(layer: Arc<dyn Layer<K, V>>) -> Self {
489        let event = layer.lock().unwrap();
490        Self(event, layer)
491    }
492}
493
494impl<K, V> std::ops::Deref for LockedLayer<K, V> {
495    type Target = Arc<dyn Layer<K, V>>;
496
497    fn deref(&self) -> &Self::Target {
498        &self.1
499    }
500}
501
502impl<K, V> AsRef<dyn Layer<K, V>> for LockedLayer<K, V> {
503    fn as_ref(&self) -> &(dyn Layer<K, V> + 'static) {
504        self.1.as_ref()
505    }
506}
507
508/// A LayerSet provides a snapshot of the layers at a particular point in time, and allows you to
509/// get an iterator.  Iterators borrow the layers so something needs to hold reference count.
510pub struct LayerSet<K, V> {
511    pub layers: Vec<LockedLayer<K, V>>,
512    merge_fn: merge::MergeFn<K, V>,
513    counters: Arc<TreeCounters>,
514}
515
516impl<K: Key + LayerKey + OrdLowerBound, V: Value> LayerSet<K, V> {
517    pub fn sum_len(&self) -> usize {
518        let mut size = 0;
519        for layer in &self.layers {
520            size += layer.len()
521        }
522        size
523    }
524
525    pub fn merger(&self) -> merge::Merger<'_, K, V> {
526        merge::Merger::new(
527            self.layers.iter().map(|x| x.as_ref()),
528            self.merge_fn,
529            self.counters.clone(),
530        )
531    }
532
533    /// See `Layer::key_exists`.
534    pub async fn key_exists(&self, key: &K) -> Result<Existence, Error> {
535        for l in &self.layers {
536            match l.key_exists(key).await? {
537                e @ (Existence::Exists | Existence::MaybeExists) => return Ok(e),
538                _ => {}
539            }
540        }
541        Ok(Existence::Missing)
542    }
543}
544
545impl<K, V> fmt::Debug for LayerSet<K, V> {
546    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
547        fmt.debug_list()
548            .entries(self.layers.iter().map(|l| {
549                if let Some(handle) = l.handle() {
550                    format!("{}", handle.object_id())
551                } else {
552                    format!("{:?}", Arc::as_ptr(l))
553                }
554            }))
555            .finish()
556    }
557}
558
559/// A yielder can be used during compactions which are low priority.
560pub trait Yielder: Send {
561    fn yield_now(&mut self) -> impl Future<Output = ()> + Send;
562}
563
564#[cfg(test)]
565mod tests {
566    use super::{LSMTree, Yielder, compact_with_iterator};
567    use crate::drop_event::DropEvent;
568    use crate::lsm_tree::cache::{
569        NullCache, ObjectCache, ObjectCachePlaceholder, ObjectCacheResult,
570    };
571    use crate::lsm_tree::merge::{MergeLayerIterator, MergeResult};
572    use crate::lsm_tree::types::{
573        BoxedLayerIterator, Existence, FuzzyHash, Item, ItemRef, Key, Layer, LayerIterator,
574        LayerKey, OrdLowerBound, OrdUpperBound, SortByU64, Value,
575    };
576    use crate::lsm_tree::{Query, layers_from_handles};
577    use crate::object_handle::ObjectHandle;
578    use crate::serialized_types::{
579        LATEST_VERSION, Version, Versioned, VersionedLatest, versioned_type,
580    };
581    use crate::testing::fake_object::{FakeObject, FakeObjectHandle};
582    use crate::testing::writer::Writer;
583    use anyhow::{Error, anyhow};
584    use async_trait::async_trait;
585    use fprint::TypeFingerprint;
586    use fuchsia_sync::Mutex;
587    use fxfs_macros::FuzzyHash;
588    use rand::rng;
589    use rand::seq::SliceRandom;
590    use std::hash::Hash;
591    use std::sync::Arc;
592
593    #[derive(
594        Clone,
595        Eq,
596        PartialEq,
597        Debug,
598        Hash,
599        FuzzyHash,
600        serde::Serialize,
601        serde::Deserialize,
602        TypeFingerprint,
603        Versioned,
604    )]
605    struct TestKey(std::ops::Range<u64>);
606
607    versioned_type! { 1.. => TestKey }
608
609    impl SortByU64 for TestKey {
610        fn get_leading_u64(&self) -> u64 {
611            self.0.start
612        }
613    }
614
615    impl LayerKey for TestKey {}
616
617    impl OrdUpperBound for TestKey {
618        fn cmp_upper_bound(&self, other: &TestKey) -> std::cmp::Ordering {
619            self.0.end.cmp(&other.0.end)
620        }
621    }
622
623    impl OrdLowerBound for TestKey {
624        fn cmp_lower_bound(&self, other: &Self) -> std::cmp::Ordering {
625            self.0.start.cmp(&other.0.start)
626        }
627    }
628
629    fn emit_left_merge_fn(
630        _left: &MergeLayerIterator<'_, TestKey, u64>,
631        _right: &MergeLayerIterator<'_, TestKey, u64>,
632    ) -> MergeResult<TestKey, u64> {
633        MergeResult::EmitLeft
634    }
635
636    impl Value for u64 {
637        const DELETED_MARKER: Self = 0;
638    }
639
640    struct NoOpYielder;
641    impl Yielder for NoOpYielder {
642        async fn yield_now(&mut self) {}
643    }
644
645    #[fuchsia::test]
646    async fn test_iteration() {
647        let tree = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
648        let items = [Item::new(TestKey(1..1), 1), Item::new(TestKey(2..2), 2)];
649        tree.insert(items[0].clone()).expect("insert error");
650        tree.insert(items[1].clone()).expect("insert error");
651        let layers = tree.layer_set();
652        let mut merger = layers.merger();
653        let mut iter = merger.query(Query::FullScan).await.expect("seek failed");
654        let ItemRef { key, value, .. } = iter.get().expect("missing item");
655        assert_eq!((key, value), (&items[0].key, &items[0].value));
656        iter.advance().await.expect("advance failed");
657        let ItemRef { key, value, .. } = iter.get().expect("missing item");
658        assert_eq!((key, value), (&items[1].key, &items[1].value));
659        iter.advance().await.expect("advance failed");
660        assert!(iter.get().is_none());
661    }
662
663    #[fuchsia::test]
664    async fn test_compact() {
665        let tree = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
666        let items = [
667            Item::new(TestKey(1..1), 1),
668            Item::new(TestKey(2..2), 2),
669            Item::new(TestKey(3..3), 3),
670            Item::new(TestKey(4..4), 4),
671        ];
672        tree.insert(items[0].clone()).expect("insert error");
673        tree.insert(items[1].clone()).expect("insert error");
674        tree.seal();
675        tree.insert(items[2].clone()).expect("insert error");
676        tree.insert(items[3].clone()).expect("insert error");
677        tree.seal();
678        let object = Arc::new(FakeObject::new());
679        let handle = FakeObjectHandle::new(object.clone());
680        {
681            let layer_set = tree.immutable_layer_set();
682            let mut merger = layer_set.merger();
683            let iter = merger.query(Query::FullScan).await.expect("create merger");
684            compact_with_iterator(
685                iter,
686                items.len(),
687                Writer::new(&handle).await,
688                handle.block_size(),
689                Option::<NoOpYielder>::None,
690            )
691            .await
692            .expect("compact failed");
693        }
694        tree.set_layers(layers_from_handles([handle]).await.expect("layers_from_handles failed"));
695        let handle = FakeObjectHandle::new(object.clone());
696        let tree = LSMTree::open(emit_left_merge_fn, [handle], Box::new(NullCache {}))
697            .await
698            .expect("open failed");
699
700        let layers = tree.layer_set();
701        let mut merger = layers.merger();
702        let mut iter = merger.query(Query::FullScan).await.expect("seek failed");
703        for i in 1..5 {
704            let ItemRef { key, value, .. } = iter.get().expect("missing item");
705            assert_eq!((key, value), (&TestKey(i..i), &i));
706            iter.advance().await.expect("advance failed");
707        }
708        assert!(iter.get().is_none());
709    }
710
711    #[fuchsia::test]
712    async fn test_find() {
713        let items = [
714            Item::new(TestKey(1..1), 1),
715            Item::new(TestKey(2..2), 2),
716            Item::new(TestKey(3..3), 3),
717            Item::new(TestKey(4..4), 4),
718        ];
719        let tree = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
720        tree.insert(items[0].clone()).expect("insert error");
721        tree.insert(items[1].clone()).expect("insert error");
722        tree.seal();
723        tree.insert(items[2].clone()).expect("insert error");
724        tree.insert(items[3].clone()).expect("insert error");
725
726        let item = tree.find(&items[1].key).await.expect("find failed").expect("not found");
727        assert_eq!(item, items[1]);
728        assert!(tree.find(&TestKey(100..100)).await.expect("find failed").is_none());
729    }
730
731    #[fuchsia::test]
732    async fn test_find_no_return_deleted_values() {
733        let items = [Item::new(TestKey(1..1), 1), Item::new(TestKey(2..2), u64::DELETED_MARKER)];
734        let tree = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
735        tree.insert(items[0].clone()).expect("insert error");
736        tree.insert(items[1].clone()).expect("insert error");
737
738        let item = tree.find(&items[0].key).await.expect("find failed").expect("not found");
739        assert_eq!(item, items[0]);
740        assert!(tree.find(&items[1].key).await.expect("find failed").is_none());
741    }
742
743    #[fuchsia::test]
744    async fn test_empty_seal() {
745        let tree = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
746        tree.seal();
747        let item = Item::new(TestKey(1..1), 1);
748        tree.insert(item.clone()).expect("insert error");
749        let object = Arc::new(FakeObject::new());
750        let handle = FakeObjectHandle::new(object.clone());
751        {
752            let layer_set = tree.immutable_layer_set();
753            let mut merger = layer_set.merger();
754            let iter = merger.query(Query::FullScan).await.expect("create merger");
755            compact_with_iterator(
756                iter,
757                0,
758                Writer::new(&handle).await,
759                handle.block_size(),
760                Option::<NoOpYielder>::None,
761            )
762            .await
763            .expect("compact failed");
764        }
765        tree.set_layers(layers_from_handles([handle]).await.expect("layers_from_handles failed"));
766        let found_item = tree.find(&item.key).await.expect("find failed").expect("not found");
767        assert_eq!(found_item, item);
768        assert!(tree.find(&TestKey(2..2)).await.expect("find failed").is_none());
769    }
770
771    #[fuchsia::test]
772    async fn test_filter() {
773        let items = [
774            Item::new(TestKey(1..1), 1),
775            Item::new(TestKey(2..2), 2),
776            Item::new(TestKey(3..3), 3),
777            Item::new(TestKey(4..4), 4),
778        ];
779        let tree = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
780        tree.insert(items[0].clone()).expect("insert error");
781        tree.insert(items[1].clone()).expect("insert error");
782        tree.insert(items[2].clone()).expect("insert error");
783        tree.insert(items[3].clone()).expect("insert error");
784
785        let layers = tree.layer_set();
786        let mut merger = layers.merger();
787
788        // Filter out odd keys (which also guarantees we skip the first key which is an edge case).
789        let mut iter = merger
790            .query(Query::FullScan)
791            .await
792            .expect("seek failed")
793            .filter(|item: ItemRef<'_, TestKey, u64>| item.key.0.start % 2 == 0)
794            .await
795            .expect("filter failed");
796
797        assert_eq!(iter.get(), Some(items[1].as_item_ref()));
798        iter.advance().await.expect("advance failed");
799        assert_eq!(iter.get(), Some(items[3].as_item_ref()));
800        iter.advance().await.expect("advance failed");
801        assert!(iter.get().is_none());
802    }
803
804    #[fuchsia::test]
805    async fn test_insert_order_agnostic() {
806        let items = [
807            Item::new(TestKey(1..1), 1),
808            Item::new(TestKey(2..2), 2),
809            Item::new(TestKey(3..3), 3),
810            Item::new(TestKey(4..4), 4),
811            Item::new(TestKey(5..5), 5),
812            Item::new(TestKey(6..6), 6),
813        ];
814        let a = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
815        for item in &items {
816            a.insert(item.clone()).expect("insert error");
817        }
818        let b = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
819        let mut shuffled = items.clone();
820        shuffled.shuffle(&mut rng());
821        for item in &shuffled {
822            b.insert(item.clone()).expect("insert error");
823        }
824        let layers = a.layer_set();
825        let mut merger = layers.merger();
826        let mut iter_a = merger.query(Query::FullScan).await.expect("seek failed");
827        let layers = b.layer_set();
828        let mut merger = layers.merger();
829        let mut iter_b = merger.query(Query::FullScan).await.expect("seek failed");
830
831        for item in items {
832            assert_eq!(Some(item.as_item_ref()), iter_a.get());
833            assert_eq!(Some(item.as_item_ref()), iter_b.get());
834            iter_a.advance().await.expect("advance failed");
835            iter_b.advance().await.expect("advance failed");
836        }
837        assert!(iter_a.get().is_none());
838        assert!(iter_b.get().is_none());
839    }
840
841    struct AuditCacheInner<'a, V: Value> {
842        lookups: u64,
843        completions: u64,
844        invalidations: u64,
845        drops: u64,
846        result: Option<ObjectCacheResult<'a, V>>,
847    }
848
849    impl<V: Value> AuditCacheInner<'_, V> {
850        fn stats(&self) -> (u64, u64, u64, u64) {
851            (self.lookups, self.completions, self.invalidations, self.drops)
852        }
853    }
854
855    struct AuditCache<'a, V: Value> {
856        inner: Arc<Mutex<AuditCacheInner<'a, V>>>,
857    }
858
859    impl<V: Value> AuditCache<'_, V> {
860        fn new() -> Self {
861            Self {
862                inner: Arc::new(Mutex::new(AuditCacheInner {
863                    lookups: 0,
864                    completions: 0,
865                    invalidations: 0,
866                    drops: 0,
867                    result: None,
868                })),
869            }
870        }
871    }
872
873    struct AuditPlaceholder<'a, V: Value> {
874        inner: Arc<Mutex<AuditCacheInner<'a, V>>>,
875        completed: Mutex<bool>,
876    }
877
878    impl<V: Value> ObjectCachePlaceholder<V> for AuditPlaceholder<'_, V> {
879        fn complete(self: Box<Self>, _: Option<&V>) {
880            self.inner.lock().completions += 1;
881            *self.completed.lock() = true;
882        }
883    }
884
885    impl<V: Value> Drop for AuditPlaceholder<'_, V> {
886        fn drop(&mut self) {
887            if !*self.completed.lock() {
888                self.inner.lock().drops += 1;
889            }
890        }
891    }
892
893    impl<K: Key + std::cmp::PartialEq, V: Value> ObjectCache<K, V> for AuditCache<'_, V> {
894        fn lookup_or_reserve(&self, _key: &K) -> ObjectCacheResult<'_, V> {
895            {
896                let mut inner = self.inner.lock();
897                inner.lookups += 1;
898                if inner.result.is_some() {
899                    return std::mem::take(&mut inner.result).unwrap();
900                }
901            }
902            ObjectCacheResult::Placeholder(Box::new(AuditPlaceholder {
903                inner: self.inner.clone(),
904                completed: Mutex::new(false),
905            }))
906        }
907
908        fn invalidate(&self, _key: K, _value: Option<V>) {
909            self.inner.lock().invalidations += 1;
910        }
911    }
912
913    #[fuchsia::test]
914    async fn test_cache_handling() {
915        let item = Item::new(TestKey(1..1), 1);
916        let cache = Box::new(AuditCache::new());
917        let inner = cache.inner.clone();
918        let a = LSMTree::new(emit_left_merge_fn, cache);
919
920        // Zero counters.
921        assert_eq!(inner.lock().stats(), (0, 0, 0, 0));
922
923        // Look for an item, but don't find it. So no insertion. It is dropped.
924        assert!(a.find(&item.key).await.expect("Failed find").is_none());
925        assert_eq!(inner.lock().stats(), (1, 0, 0, 1));
926
927        // Insert attempts to invalidate.
928        let _ = a.insert(item.clone());
929        assert_eq!(inner.lock().stats(), (1, 0, 1, 1));
930
931        // Look for item, find it and insert into the cache.
932        assert_eq!(
933            a.find(&item.key).await.expect("Failed find").expect("Item should be found.").value,
934            item.value
935        );
936        assert_eq!(inner.lock().stats(), (2, 1, 1, 1));
937
938        // Insert or replace attempts to invalidate as well.
939        a.replace_or_insert(item.clone());
940        assert_eq!(inner.lock().stats(), (2, 1, 2, 1));
941    }
942
943    #[fuchsia::test]
944    async fn test_cache_hit() {
945        let item = Item::new(TestKey(1..1), 1);
946        let cache = Box::new(AuditCache::new());
947        let inner = cache.inner.clone();
948        let a = LSMTree::new(emit_left_merge_fn, cache);
949
950        // Zero counters.
951        assert_eq!(inner.lock().stats(), (0, 0, 0, 0));
952
953        // Insert attempts to invalidate.
954        let _ = a.insert(item.clone());
955        assert_eq!(inner.lock().stats(), (0, 0, 1, 0));
956
957        // Set up the item to find in the cache.
958        inner.lock().result = Some(ObjectCacheResult::Value(item.value.clone()));
959
960        // Look for item, find it in cache, so no insert.
961        assert_eq!(
962            a.find(&item.key).await.expect("Failed find").expect("Item should be found.").value,
963            item.value
964        );
965        assert_eq!(inner.lock().stats(), (1, 0, 1, 0));
966    }
967
968    #[fuchsia::test]
969    async fn test_cache_says_uncacheable() {
970        let item = Item::new(TestKey(1..1), 1);
971        let cache = Box::new(AuditCache::new());
972        let inner = cache.inner.clone();
973        let a = LSMTree::new(emit_left_merge_fn, cache);
974        let _ = a.insert(item.clone());
975
976        // One invalidation from the insert.
977        assert_eq!(inner.lock().stats(), (0, 0, 1, 0));
978
979        // Set up the NoCache response to find in the cache.
980        inner.lock().result = Some(ObjectCacheResult::NoCache);
981
982        // Look for item, it is uncacheable, so no insert.
983        assert_eq!(
984            a.find(&item.key).await.expect("Failed find").expect("Should find item").value,
985            item.value
986        );
987        assert_eq!(inner.lock().stats(), (1, 0, 1, 0));
988    }
989
990    struct FailLayer {
991        drop_event: Mutex<Option<Arc<DropEvent>>>,
992    }
993
994    impl FailLayer {
995        fn new() -> Self {
996            Self { drop_event: Mutex::new(Some(Arc::new(DropEvent::new()))) }
997        }
998    }
999
1000    #[async_trait]
1001    impl<K: Key, V: Value> Layer<K, V> for FailLayer {
1002        async fn seek(
1003            &self,
1004            _bound: std::ops::Bound<&K>,
1005        ) -> Result<BoxedLayerIterator<'_, K, V>, Error> {
1006            Err(anyhow!("Purposely failed seek"))
1007        }
1008
1009        fn lock(&self) -> Option<Arc<DropEvent>> {
1010            self.drop_event.lock().clone()
1011        }
1012
1013        fn len(&self) -> usize {
1014            0
1015        }
1016
1017        async fn close(&self) {
1018            let listener = match std::mem::replace(&mut (*self.drop_event.lock()), None) {
1019                Some(drop_event) => drop_event.listen(),
1020                None => return,
1021            };
1022            listener.await;
1023        }
1024
1025        fn get_version(&self) -> Version {
1026            LATEST_VERSION
1027        }
1028
1029        async fn key_exists(&self, _key: &K) -> Result<Existence, Error> {
1030            unimplemented!();
1031        }
1032    }
1033
1034    struct MockLayer {
1035        exists_result: Existence,
1036        drop_event: Mutex<Option<Arc<DropEvent>>>,
1037    }
1038
1039    impl MockLayer {
1040        fn new(exists_result: Existence) -> Self {
1041            Self { exists_result, drop_event: Mutex::new(Some(Arc::new(DropEvent::new()))) }
1042        }
1043    }
1044
1045    #[async_trait]
1046    impl<K: Key, V: Value> Layer<K, V> for MockLayer {
1047        async fn seek(
1048            &self,
1049            _bound: std::ops::Bound<&K>,
1050        ) -> Result<BoxedLayerIterator<'_, K, V>, Error> {
1051            unimplemented!()
1052        }
1053
1054        fn lock(&self) -> Option<Arc<DropEvent>> {
1055            self.drop_event.lock().clone()
1056        }
1057
1058        fn len(&self) -> usize {
1059            0
1060        }
1061
1062        async fn close(&self) {
1063            let listener = match std::mem::replace(&mut (*self.drop_event.lock()), None) {
1064                Some(drop_event) => drop_event.listen(),
1065                None => return,
1066            };
1067            listener.await;
1068        }
1069
1070        fn get_version(&self) -> Version {
1071            LATEST_VERSION
1072        }
1073
1074        async fn key_exists(&self, _key: &K) -> Result<Existence, Error> {
1075            Ok(self.exists_result)
1076        }
1077    }
1078
1079    #[fuchsia::test]
1080    async fn test_layer_set_key_exists() {
1081        use super::LockedLayer;
1082
1083        let tree = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
1084        let mut layer_set = tree.empty_layer_set();
1085
1086        // Empty layer set should return Missing.
1087        assert_eq!(
1088            layer_set.key_exists(&TestKey(0..1)).await.expect("key_exists failed"),
1089            Existence::Missing
1090        );
1091
1092        // Add a layer that returns Missing.
1093        layer_set.layers.push(LockedLayer::from(
1094            Arc::new(MockLayer::new(Existence::Missing)) as Arc<dyn Layer<TestKey, u64>>
1095        ));
1096        assert_eq!(
1097            layer_set.key_exists(&TestKey(0..1)).await.expect("key_exists failed"),
1098            Existence::Missing
1099        );
1100
1101        // Add a layer that returns MaybeExists.
1102        layer_set.layers.push(LockedLayer::from(
1103            Arc::new(MockLayer::new(Existence::MaybeExists)) as Arc<dyn Layer<TestKey, u64>>
1104        ));
1105        assert_eq!(
1106            layer_set.key_exists(&TestKey(0..1)).await.expect("key_exists failed"),
1107            Existence::MaybeExists
1108        );
1109
1110        // Add a layer that returns Exists.
1111        layer_set.layers.insert(
1112            0,
1113            LockedLayer::from(
1114                Arc::new(MockLayer::new(Existence::Exists)) as Arc<dyn Layer<TestKey, u64>>
1115            ),
1116        );
1117        assert_eq!(
1118            layer_set.key_exists(&TestKey(0..1)).await.expect("key_exists failed"),
1119            Existence::Exists
1120        );
1121    }
1122
1123    #[fuchsia::test]
1124    async fn test_failed_lookup() {
1125        let cache = Box::new(AuditCache::new());
1126        let inner = cache.inner.clone();
1127        let a = LSMTree::new(emit_left_merge_fn, cache);
1128        a.set_layers(vec![Arc::new(FailLayer::new())]);
1129
1130        // Zero counters.
1131        assert_eq!(inner.lock().stats(), (0, 0, 0, 0));
1132
1133        // Lookup should fail and drop the placeholder.
1134        assert!(a.find(&TestKey(1..1)).await.is_err());
1135        assert_eq!(inner.lock().stats(), (1, 0, 0, 1));
1136    }
1137}
1138
1139#[cfg(fuzz)]
1140mod fuzz {
1141    use crate::lsm_tree::types::{
1142        FuzzyHash, Item, LayerKey, OrdLowerBound, OrdUpperBound, SortByU64, Value,
1143    };
1144    use crate::serialized_types::{
1145        LATEST_VERSION, Version, Versioned, VersionedLatest, versioned_type,
1146    };
1147    use arbitrary::Arbitrary;
1148    use fprint::TypeFingerprint;
1149    use fuzz::fuzz;
1150    use fxfs_macros::FuzzyHash;
1151    use std::hash::Hash;
1152
1153    #[derive(
1154        Arbitrary,
1155        Clone,
1156        Eq,
1157        Hash,
1158        FuzzyHash,
1159        PartialEq,
1160        Debug,
1161        serde::Serialize,
1162        serde::Deserialize,
1163        TypeFingerprint,
1164        Versioned,
1165    )]
1166    struct TestKey(std::ops::Range<u64>);
1167
1168    versioned_type! { 1.. => TestKey }
1169
1170    impl Versioned for u64 {}
1171    versioned_type! { 1.. => u64 }
1172
1173    impl LayerKey for TestKey {}
1174
1175    impl SortByU64 for TestKey {
1176        fn get_leading_u64(&self) -> u64 {
1177            self.0.start
1178        }
1179    }
1180
1181    impl OrdUpperBound for TestKey {
1182        fn cmp_upper_bound(&self, other: &TestKey) -> std::cmp::Ordering {
1183            self.0.end.cmp(&other.0.end)
1184        }
1185    }
1186
1187    impl OrdLowerBound for TestKey {
1188        fn cmp_lower_bound(&self, other: &Self) -> std::cmp::Ordering {
1189            self.0.start.cmp(&other.0.start)
1190        }
1191    }
1192
1193    impl Value for u64 {
1194        const DELETED_MARKER: Self = 0;
1195    }
1196
1197    // Note: This code isn't really dead. it's used below in
1198    // `fuzz_lsm_tree_action`. However, the `#[fuzz]` proc macro attribute
1199    // obfuscates the usage enough to confuse the compiler.
1200    #[allow(dead_code)]
1201    #[derive(Arbitrary)]
1202    enum FuzzAction {
1203        Insert(Item<TestKey, u64>),
1204        ReplaceOrInsert(Item<TestKey, u64>),
1205        MergeInto(Item<TestKey, u64>, TestKey),
1206        Find(TestKey),
1207        Seal,
1208    }
1209
1210    #[fuzz]
1211    fn fuzz_lsm_tree_actions(actions: Vec<FuzzAction>) {
1212        use super::LSMTree;
1213        use super::cache::NullCache;
1214        use crate::lsm_tree::merge::{MergeLayerIterator, MergeResult};
1215        use futures::executor::block_on;
1216
1217        fn emit_left_merge_fn(
1218            _left: &MergeLayerIterator<'_, TestKey, u64>,
1219            _right: &MergeLayerIterator<'_, TestKey, u64>,
1220        ) -> MergeResult<TestKey, u64> {
1221            MergeResult::EmitLeft
1222        }
1223
1224        let tree = LSMTree::new(emit_left_merge_fn, Box::new(NullCache {}));
1225        for action in actions {
1226            match action {
1227                FuzzAction::Insert(item) => {
1228                    let _ = tree.insert(item);
1229                }
1230                FuzzAction::ReplaceOrInsert(item) => {
1231                    tree.replace_or_insert(item);
1232                }
1233                FuzzAction::Find(key) => {
1234                    block_on(tree.find(&key)).expect("find failed");
1235                }
1236                FuzzAction::MergeInto(item, bound) => tree.merge_into(item, &bound),
1237                FuzzAction::Seal => tree.seal(),
1238            };
1239        }
1240    }
1241}