fxfs/lsm_tree/
persistent_layer.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// PersistentLayer object format
6//
7// The layer is made up of 1 or more "blocks" whose size are some multiple of the block size used
8// by the underlying handle.
9//
10// The persistent layer has 4 types of blocks:
11//  - Header block
12//  - Data block
13//  - BloomFilter block
14//  - Seek block (+LayerInfo)
15//
16// The structure of the file is as follows:
17//
18// blk#     contents
19// 0        [Header]
20// 1        [Data]
21// 2        [Data]
22// ...      [Data]
23// L        [BloomFilter]
24// L + 1    [BloomFilter]
25// ...      [BloomFilter]
26// M        [Seek]
27// M + 1    [Seek]
28// ...      [Seek]
29// N        [Seek/LayerInfo]
30//
31// Generally, there will be an order of magnitude more Data blocks than Seek/BloomFilter blocks.
32//
33// Header contains a Version-prefixed LayerHeader struct.  This version is used for everything in
34// the layer file.
35//
36// Data blocks contain a little endian encoded u16 item count at the start, then a series of
37// serialized items, and a list of little endian u16 offsets within the block for where
38// serialized items start, excluding the first item (since it is at a known offset). The list of
39// offsets ends at the end of the block and since the items are of variable length, there may be
40// space between the two sections if the next item and its offset cannot fit into the block.
41//
42// |item_count|item|item|item|item|item|item|dead space|offset|offset|offset|offset|offset|
43//
44// BloomFilter blocks contain a bitmap which is used to probabilistically determine if a given key
45// might exist in the layer file.   See `BloomFilter` for details on this structure.  Note that this
46// can be absent from the file for small layer files.
47//
48// Seek/LayerInfo blocks contain both the seek table, and a single LayerInfo struct at the tail of
49// the last block, with the LayerInfo's length written as a little-endian u64 at the very end.  The
50// padding between the two structs is ignored but nominally is zeroed. They share blocks to avoid
51// wasting padding bytes.  Note that the seek table can be absent from the file for small layer
52// files (but there will always be one block for the LayerInfo).
53//
54// The seek table consists of a little-endian u64 for every data block except for the first one. The
55// entries should be monotonically increasing, as they represent some mapping for how the keys for
56// the first item in each block would be predominantly sorted, and there may be duplicate entries.
57// There should be exactly as many seek blocks as are required to house one entry fewer than the
58// number of data blocks.
59
60use crate::drop_event::DropEvent;
61use crate::errors::FxfsError;
62use crate::filesystem::MAX_BLOCK_SIZE;
63use crate::log::*;
64use crate::lsm_tree::bloom_filter::{BloomFilterReader, BloomFilterStats, BloomFilterWriter};
65use crate::lsm_tree::types::{
66    BoxedLayerIterator, FuzzyHash, Item, ItemRef, Key, Layer, LayerIterator, LayerValue,
67    LayerWriter,
68};
69use crate::object_handle::{ObjectHandle, ReadObjectHandle, WriteBytes};
70use crate::object_store::caching_object_handle::{CHUNK_SIZE, CachedChunk, CachingObjectHandle};
71use crate::round::{round_down, round_up};
72use crate::serialized_types::{LATEST_VERSION, Version, Versioned, VersionedLatest};
73use anyhow::{Context, Error, anyhow, bail, ensure};
74use async_trait::async_trait;
75use byteorder::{ByteOrder, LittleEndian, ReadBytesExt, WriteBytesExt};
76use fprint::TypeFingerprint;
77use fuchsia_sync::Mutex;
78use serde::{Deserialize, Serialize};
79use static_assertions::const_assert;
80use std::cmp::Ordering;
81use std::io::{Read, Write as _};
82use std::marker::PhantomData;
83use std::ops::Bound;
84use std::sync::Arc;
85
86const PERSISTENT_LAYER_MAGIC: &[u8; 8] = b"FxfsLayr";
87
88/// LayerHeader is stored in the first block of the persistent layer.
89pub type LayerHeader = LayerHeaderV39;
90
91#[derive(Debug, Serialize, Deserialize, TypeFingerprint, Versioned)]
92pub struct LayerHeaderV39 {
93    /// 'FxfsLayr'
94    magic: [u8; 8],
95    /// The block size used within this layer file. This is typically set at compaction time to the
96    /// same block size as the underlying object handle.
97    ///
98    /// (Each block starts with a 2 byte item count so there is a 64k item limit per block,
99    /// regardless of block size).
100    block_size: u64,
101}
102
103/// The last block of each layer contains metadata for the rest of the layer.
104pub type LayerInfo = LayerInfoV39;
105
106#[derive(Debug, Serialize, Deserialize, TypeFingerprint, Versioned)]
107pub struct LayerInfoV39 {
108    /// How many items are in the layer file.  Mainly used for sizing bloom filters during
109    /// compaction.
110    num_items: usize,
111    /// The number of data blocks in the layer file.
112    num_data_blocks: u64,
113    /// The size of the bloom filter in the layer file.  Not necessarily block-aligned.
114    bloom_filter_size_bytes: usize,
115    /// The seed for the nonces used in the bloom filter.
116    bloom_filter_seed: u64,
117    /// How many nonces to use for bloom filter hashing.
118    bloom_filter_num_hashes: usize,
119}
120
121/// A handle to a persistent layer.
122pub struct PersistentLayer<K, V> {
123    // We retain a reference to the underlying object handle so we can hand out references to it for
124    // `Layer::handle` when clients need it.  Internal reads should go through
125    // `caching_object_handle` so they are cached.  Note that `CachingObjectHandle` used to
126    // implement `ReadObjectHandle`, but that was removed so that `CachingObjectHandle` could hand
127    // out data references rather than requiring copying to a buffer, which speeds up LSM tree
128    // operations.
129    object_handle: Arc<dyn ReadObjectHandle>,
130    caching_object_handle: CachingObjectHandle<Arc<dyn ReadObjectHandle>>,
131    version: Version,
132    block_size: u64,
133    data_size: u64,
134    seek_table: Vec<u64>,
135    num_items: usize,
136    bloom_filter: Option<BloomFilterReader<K>>,
137    bloom_filter_stats: Option<BloomFilterStats>,
138    close_event: Mutex<Option<Arc<DropEvent>>>,
139    _value_type: PhantomData<V>,
140}
141
142#[derive(Debug)]
143struct BufferCursor {
144    chunk: Option<CachedChunk>,
145    pos: usize,
146}
147
148impl std::io::Read for BufferCursor {
149    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
150        let chunk = if let Some(chunk) = &self.chunk {
151            chunk
152        } else {
153            return Ok(0);
154        };
155        let to_read = std::cmp::min(buf.len(), chunk.len().saturating_sub(self.pos));
156        if to_read > 0 {
157            buf[..to_read].copy_from_slice(&chunk[self.pos..self.pos + to_read]);
158            self.pos += to_read;
159        }
160        Ok(to_read)
161    }
162}
163
164const MIN_BLOCK_SIZE: u64 = 512;
165
166// For small layer files, don't bother with the bloom filter.  Arbitrarily chosen.
167const MINIMUM_DATA_BLOCKS_FOR_BLOOM_FILTER: usize = 4;
168
169// How many blocks we reserve for the header.  Data blocks start at this offset.
170const NUM_HEADER_BLOCKS: u64 = 1;
171
172/// The smallest possible (empty) layer file is always 2 blocks, one for the header and one for
173/// LayerInfo.
174const MINIMUM_LAYER_FILE_BLOCKS: u64 = 2;
175
176// Put safety rails on the size of the bloom filter and seek table to avoid OOMing the system.
177// It's more likely that tampering has occurred in these cases.
178const MAX_BLOOM_FILTER_SIZE: usize = 64 * 1024 * 1024;
179const MAX_SEEK_TABLE_SIZE: usize = 64 * 1024 * 1024;
180
181// The following constants refer to sizes of metadata in the data blocks.
182const PER_DATA_BLOCK_HEADER_SIZE: usize = 2;
183const PER_DATA_BLOCK_SEEK_ENTRY_SIZE: usize = 2;
184
185// A key-only iterator, used while seeking through the tree.
186struct KeyOnlyIterator<'iter, K: Key, V: LayerValue> {
187    // Allocated out of |layer|.
188    buffer: BufferCursor,
189
190    layer: &'iter PersistentLayer<K, V>,
191
192    // The position of the _next_ block to be read.
193    pos: u64,
194
195    // The item index in the current block.
196    item_index: u16,
197
198    // The number of items in the current block.
199    item_count: u16,
200
201    // The current key.
202    key: Option<K>,
203
204    // Set by a wrapping iterator once the value has been deserialized, so the KeyOnlyIterator knows
205    // whether it is pointing at the next key or not.
206    value_deserialized: bool,
207}
208
209impl<K: Key, V: LayerValue> KeyOnlyIterator<'_, K, V> {
210    fn new<'iter>(layer: &'iter PersistentLayer<K, V>, pos: u64) -> KeyOnlyIterator<'iter, K, V> {
211        assert!(pos % layer.block_size == 0);
212        KeyOnlyIterator {
213            layer,
214            buffer: BufferCursor { chunk: None, pos: pos as usize % CHUNK_SIZE },
215            pos,
216            item_index: 0,
217            item_count: 0,
218            key: None,
219            value_deserialized: false,
220        }
221    }
222
223    // Repositions the iterator to point to the `index`'th item in the current block.
224    // Returns an error if the index is out of range or the resulting offset contains an obviously
225    // invalid value.
226    fn seek_to_block_item(&mut self, index: u16) -> Result<(), Error> {
227        ensure!(index < self.item_count, FxfsError::OutOfRange);
228        if index == self.item_index && self.value_deserialized {
229            // Fast-path when we are seeking in a linear manner, as is the case when advancing a
230            // wrapping iterator that also deserializes the values.
231            return Ok(());
232        }
233        let offset_in_block = if index == 0 {
234            // First entry isn't actually recorded, it is at the start of the block after the item
235            // count.
236            PER_DATA_BLOCK_HEADER_SIZE
237        } else {
238            let old_buffer_pos = self.buffer.pos;
239            self.buffer.pos = round_up(self.buffer.pos, self.layer.block_size as usize).unwrap()
240                - (PER_DATA_BLOCK_SEEK_ENTRY_SIZE * (usize::from(self.item_count - index)));
241            let res = self.buffer.read_u16::<LittleEndian>();
242            self.buffer.pos = old_buffer_pos;
243            let offset_in_block = res.context("Failed to read offset")? as usize;
244            if offset_in_block >= self.layer.block_size as usize
245                || offset_in_block <= PER_DATA_BLOCK_HEADER_SIZE
246            {
247                return Err(anyhow!(FxfsError::Inconsistent))
248                    .context(format!("Offset {} is out of valid range.", offset_in_block));
249            }
250            offset_in_block
251        };
252        self.item_index = index;
253        self.buffer.pos =
254            round_down(self.buffer.pos, self.layer.block_size as usize) + offset_in_block;
255        Ok(())
256    }
257
258    async fn advance(&mut self) -> Result<(), Error> {
259        if self.item_index >= self.item_count {
260            if self.pos >= self.layer.data_offset() + self.layer.data_size {
261                self.key = None;
262                return Ok(());
263            }
264            if self.buffer.chunk.is_none() || self.pos as usize % CHUNK_SIZE == 0 {
265                self.buffer.chunk = Some(
266                    self.layer
267                        .caching_object_handle
268                        .read(self.pos as usize)
269                        .await
270                        .context("Reading during advance")?,
271                );
272            }
273            self.buffer.pos = self.pos as usize % CHUNK_SIZE;
274            self.item_count = self.buffer.read_u16::<LittleEndian>()?;
275            if self.item_count == 0 {
276                bail!(
277                    "Read block with zero item count (object: {}, offset: {})",
278                    self.layer.object_handle.object_id(),
279                    self.pos
280                );
281            }
282            debug!(
283                pos = self.pos,
284                buf:? = self.buffer,
285                object_size = self.layer.data_offset() + self.layer.data_size,
286                oid = self.layer.object_handle.object_id();
287                ""
288            );
289            self.pos += self.layer.block_size;
290            self.item_index = 0;
291            self.value_deserialized = true;
292        }
293        self.seek_to_block_item(self.item_index)?;
294        self.key = Some(
295            K::deserialize_from_version(self.buffer.by_ref(), self.layer.version)
296                .context("Corrupt layer (key)")?,
297        );
298        self.item_index += 1;
299        self.value_deserialized = false;
300        Ok(())
301    }
302
303    fn get(&self) -> Option<&K> {
304        self.key.as_ref()
305    }
306}
307
308struct Iterator<'iter, K: Key, V: LayerValue> {
309    inner: KeyOnlyIterator<'iter, K, V>,
310    // The current item.
311    item: Option<Item<K, V>>,
312}
313
314impl<'iter, K: Key, V: LayerValue> Iterator<'iter, K, V> {
315    fn new(mut seek_iterator: KeyOnlyIterator<'iter, K, V>) -> Result<Self, Error> {
316        let key = std::mem::take(&mut seek_iterator.key);
317        let item = if let Some(key) = key {
318            seek_iterator.value_deserialized = true;
319            Some(Item {
320                key,
321                value: V::deserialize_from_version(
322                    seek_iterator.buffer.by_ref(),
323                    seek_iterator.layer.version,
324                )
325                .context("Corrupt layer (value)")?,
326                sequence: seek_iterator
327                    .buffer
328                    .read_u64::<LittleEndian>()
329                    .context("Corrupt layer (seq)")?,
330            })
331        } else {
332            None
333        };
334        Ok(Self { inner: seek_iterator, item })
335    }
336}
337
338#[async_trait]
339impl<'iter, K: Key, V: LayerValue> LayerIterator<K, V> for Iterator<'iter, K, V> {
340    async fn advance(&mut self) -> Result<(), Error> {
341        self.inner.advance().await?;
342        let key = std::mem::take(&mut self.inner.key);
343        self.item = if let Some(key) = key {
344            self.inner.value_deserialized = true;
345            Some(Item {
346                key,
347                value: V::deserialize_from_version(
348                    self.inner.buffer.by_ref(),
349                    self.inner.layer.version,
350                )
351                .context("Corrupt layer (value)")?,
352                sequence: self
353                    .inner
354                    .buffer
355                    .read_u64::<LittleEndian>()
356                    .context("Corrupt layer (seq)")?,
357            })
358        } else {
359            None
360        };
361        Ok(())
362    }
363
364    fn get(&self) -> Option<ItemRef<'_, K, V>> {
365        self.item.as_ref().map(<&Item<K, V>>::into)
366    }
367}
368
369// Returns the size of the seek table in bytes.
370fn seek_table_size(num_data_blocks: u64) -> usize {
371    // The first data block doesn't have an entry.
372    let seek_table_entries = num_data_blocks.saturating_sub(1) as usize;
373    if seek_table_entries == 0 {
374        return 0;
375    }
376    let entry_size = std::mem::size_of::<u64>();
377    seek_table_entries * entry_size
378}
379
380async fn load_seek_table(
381    object_handle: &(impl ReadObjectHandle + 'static),
382    seek_table_offset: u64,
383    num_data_blocks: u64,
384) -> Result<Vec<u64>, Error> {
385    let seek_table_size = seek_table_size(num_data_blocks);
386    if seek_table_size == 0 {
387        return Ok(vec![]);
388    }
389    if seek_table_size > MAX_SEEK_TABLE_SIZE {
390        return Err(anyhow!(FxfsError::NotSupported)).context("Seek table too large");
391    }
392    let mut buffer = object_handle.allocate_buffer(seek_table_size).await;
393    let bytes_read = object_handle
394        .read(seek_table_offset, buffer.as_mut())
395        .await
396        .context("Reading seek table blocks")?;
397    ensure!(bytes_read == seek_table_size, "Short read");
398
399    let mut seek_table = Vec::with_capacity(num_data_blocks as usize);
400    // No entry for the first data block, assume a lower bound 0.
401    seek_table.push(0);
402    let mut prev = 0;
403    for chunk in buffer.as_slice().chunks_exact(std::mem::size_of::<u64>()) {
404        let next = LittleEndian::read_u64(chunk);
405        // Should be in strict ascending order, otherwise something's broken, or we've gone off
406        // the end and we're reading zeroes.
407        if prev > next {
408            return Err(anyhow!(FxfsError::Inconsistent))
409                .context(format!("Seek table entry out of order, {:?} > {:?}", prev, next));
410        }
411        prev = next;
412        seek_table.push(next);
413    }
414    Ok(seek_table)
415}
416
417async fn load_bloom_filter<K: FuzzyHash>(
418    handle: &(impl ReadObjectHandle + 'static),
419    bloom_filter_offset: u64,
420    layer_info: &LayerInfo,
421) -> Result<Option<BloomFilterReader<K>>, Error> {
422    if layer_info.bloom_filter_size_bytes == 0 {
423        return Ok(None);
424    }
425    if layer_info.bloom_filter_size_bytes > MAX_BLOOM_FILTER_SIZE {
426        return Err(anyhow!(FxfsError::NotSupported)).context("Bloom filter too large");
427    }
428    let mut buffer = handle.allocate_buffer(layer_info.bloom_filter_size_bytes).await;
429    handle.read(bloom_filter_offset, buffer.as_mut()).await.context("Failed to read")?;
430    Ok(Some(BloomFilterReader::read(
431        buffer.as_slice(),
432        layer_info.bloom_filter_seed,
433        layer_info.bloom_filter_num_hashes,
434    )?))
435}
436
437impl<K: Key, V: LayerValue> PersistentLayer<K, V> {
438    pub async fn open(handle: impl ReadObjectHandle + 'static) -> Result<Arc<Self>, Error> {
439        let bs = handle.block_size();
440        let mut buffer = handle.allocate_buffer(bs as usize).await;
441        handle.read(0, buffer.as_mut()).await.context("Failed to read first block")?;
442        let mut cursor = std::io::Cursor::new(buffer.as_slice());
443        let version = Version::deserialize_from(&mut cursor)?;
444
445        ensure!(version <= LATEST_VERSION, FxfsError::InvalidVersion);
446        let header = LayerHeader::deserialize_from_version(&mut cursor, version)
447            .context("Failed to deserialize header")?;
448        if &header.magic != PERSISTENT_LAYER_MAGIC {
449            return Err(anyhow!(FxfsError::Inconsistent).context("Invalid layer file magic"));
450        }
451        if header.block_size == 0 || !header.block_size.is_power_of_two() {
452            return Err(anyhow!(FxfsError::Inconsistent))
453                .context(format!("Invalid block size {}", header.block_size));
454        }
455        ensure!(header.block_size > 0, FxfsError::Inconsistent);
456        ensure!(header.block_size <= MAX_BLOCK_SIZE, FxfsError::NotSupported);
457        let physical_block_size = handle.block_size();
458        if header.block_size % physical_block_size != 0 {
459            return Err(anyhow!(FxfsError::Inconsistent)).context(format!(
460                "{} not a multiple of physical block size {}",
461                header.block_size, physical_block_size
462            ));
463        }
464        std::mem::drop(cursor);
465
466        let bs = header.block_size as usize;
467        if handle.get_size() < MINIMUM_LAYER_FILE_BLOCKS * bs as u64 {
468            return Err(anyhow!(FxfsError::Inconsistent).context("Layer file too short"));
469        }
470
471        let layer_info = {
472            let last_block_offset = handle
473                .get_size()
474                .checked_sub(header.block_size)
475                .ok_or(FxfsError::Inconsistent)
476                .context("Layer file unexpectedly short")?;
477            handle
478                .read(last_block_offset, buffer.subslice_mut(0..header.block_size as usize))
479                .await
480                .context("Failed to read layer info")?;
481            let layer_info_len =
482                LittleEndian::read_u64(&buffer.as_slice()[bs - std::mem::size_of::<u64>()..]);
483            let layer_info_offset = bs
484                .checked_sub(std::mem::size_of::<u64>() + layer_info_len as usize)
485                .ok_or(FxfsError::Inconsistent)
486                .context("Invalid layer info length")?;
487            let mut cursor = std::io::Cursor::new(&buffer.as_slice()[layer_info_offset..]);
488            LayerInfo::deserialize_from_version(&mut cursor, version)
489                .context("Failed to deserialize LayerInfo")?
490        };
491        std::mem::drop(buffer);
492        if layer_info.num_items == 0 && layer_info.num_data_blocks > 0 {
493            return Err(anyhow!(FxfsError::Inconsistent))
494                .context("Invalid num_items/num_data_blocks");
495        }
496        let total_blocks = handle.get_size() / header.block_size;
497        let bloom_filter_blocks =
498            round_up(layer_info.bloom_filter_size_bytes as u64, header.block_size)
499                .unwrap_or(layer_info.bloom_filter_size_bytes as u64)
500                / header.block_size;
501        if layer_info.num_data_blocks + bloom_filter_blocks
502            > total_blocks - MINIMUM_LAYER_FILE_BLOCKS
503        {
504            return Err(anyhow!(FxfsError::Inconsistent)).context("Invalid number of blocks");
505        }
506
507        let bloom_filter_offset =
508            header.block_size * (NUM_HEADER_BLOCKS + layer_info.num_data_blocks);
509        let bloom_filter = if version == LATEST_VERSION {
510            load_bloom_filter(&handle, bloom_filter_offset, &layer_info)
511                .await
512                .context("Failed to load bloom filter")?
513        } else {
514            // Ignore the bloom filter for layer files in outdated versions.  We don't know whether
515            // keys have changed formats or not (and therefore have different hash values), so we
516            // must ignore the bloom filter and always query the layer.
517            None
518        };
519        let bloom_filter_stats = bloom_filter.as_ref().map(|b| b.stats());
520
521        let seek_offset = header.block_size
522            * (NUM_HEADER_BLOCKS + layer_info.num_data_blocks + bloom_filter_blocks);
523        let seek_table = load_seek_table(&handle, seek_offset, layer_info.num_data_blocks)
524            .await
525            .context("Failed to load seek table")?;
526
527        let object_handle = Arc::new(handle) as Arc<dyn ReadObjectHandle>;
528        let caching_object_handle = CachingObjectHandle::new(object_handle.clone());
529        Ok(Arc::new(PersistentLayer {
530            object_handle,
531            caching_object_handle,
532            version,
533            block_size: header.block_size,
534            data_size: layer_info.num_data_blocks * header.block_size,
535            seek_table,
536            num_items: layer_info.num_items,
537            bloom_filter,
538            bloom_filter_stats,
539            close_event: Mutex::new(Some(Arc::new(DropEvent::new()))),
540            _value_type: PhantomData::default(),
541        }))
542    }
543
544    /// Whether the bloom filter for the layer file is consulted or not.  If this is false, then
545    /// `maybe_contains_key` will always return true.
546    /// Note that the persistent layer file may still have a bloom filter, but it might be ignored
547    /// (e.g. for a layer file on an older version).
548    pub fn has_bloom_filter(&self) -> bool {
549        self.bloom_filter.is_some()
550    }
551
552    fn data_offset(&self) -> u64 {
553        NUM_HEADER_BLOCKS * self.block_size
554    }
555}
556
557#[async_trait]
558impl<K: Key, V: LayerValue> Layer<K, V> for PersistentLayer<K, V> {
559    fn handle(&self) -> Option<&dyn ReadObjectHandle> {
560        Some(&self.object_handle)
561    }
562
563    fn purge_cached_data(&self) {
564        self.caching_object_handle.purge();
565    }
566
567    async fn seek<'a>(&'a self, bound: Bound<&K>) -> Result<BoxedLayerIterator<'a, K, V>, Error> {
568        let (key, excluded) = match bound {
569            Bound::Unbounded => {
570                let mut iterator = Iterator::new(KeyOnlyIterator::new(self, self.data_offset()))?;
571                iterator.advance().await.context("Unbounded seek advance")?;
572                return Ok(Box::new(iterator));
573            }
574            Bound::Included(k) => (k, false),
575            Bound::Excluded(k) => (k, true),
576        };
577        let first_data_block_index = self.data_offset() / self.block_size;
578
579        let (mut left_offset, mut right_offset) = {
580            // We are searching for a range here, as multiple items can have the same value in
581            // this approximate search. Since the values used are the smallest in the associated
582            // block it means that if the value equals the target you should also search the
583            // one before it. The goal is for table[left] < target < table[right].
584            let target = key.get_leading_u64();
585            // Because the first entry in the table is always 0, right_index will never be 0.
586            let right_index = self.seek_table.as_slice().partition_point(|&x| x <= target) as u64;
587            // Since partition_point will find the index of the first place where the predicate
588            // is false, we subtract 1 to get the index where it was last true.
589            let left_index = self.seek_table.as_slice()[..right_index as usize]
590                .partition_point(|&x| x < target)
591                .saturating_sub(1) as u64;
592
593            (
594                (left_index + first_data_block_index) * self.block_size,
595                (right_index + first_data_block_index) * self.block_size,
596            )
597        };
598        let mut left = KeyOnlyIterator::new(self, left_offset);
599        left.advance().await.context("Initial seek advance")?;
600        match left.get() {
601            None => return Ok(Box::new(Iterator::new(left)?)),
602            Some(left_key) => match left_key.cmp_upper_bound(key) {
603                Ordering::Greater => return Ok(Box::new(Iterator::new(left)?)),
604                Ordering::Equal => {
605                    if excluded {
606                        left.advance().await?;
607                    }
608                    return Ok(Box::new(Iterator::new(left)?));
609                }
610                Ordering::Less => {}
611            },
612        }
613        let mut right = None;
614        while right_offset - left_offset > self.block_size {
615            // Pick a block midway.
616            let mid_offset =
617                round_down(left_offset + (right_offset - left_offset) / 2, self.block_size);
618            let mut iterator = KeyOnlyIterator::new(self, mid_offset);
619            iterator.advance().await?;
620            let iter_key: &K = iterator.get().unwrap();
621            match iter_key.cmp_upper_bound(key) {
622                Ordering::Greater => {
623                    right_offset = mid_offset;
624                    right = Some(iterator);
625                }
626                Ordering::Equal => {
627                    if excluded {
628                        iterator.advance().await?;
629                    }
630                    return Ok(Box::new(Iterator::new(iterator)?));
631                }
632                Ordering::Less => {
633                    left_offset = mid_offset;
634                    left = iterator;
635                }
636            }
637        }
638
639        // Finish the binary search on the block pointed to by `left`.
640        let mut left_index = 0;
641        let mut right_index = left.item_count;
642        // If the size is zero then we don't touch the iterator.
643        while left_index < (right_index - 1) {
644            let mid_index = left_index + ((right_index - left_index) / 2);
645            left.seek_to_block_item(mid_index).context("Read index offset for binary search")?;
646            left.advance().await?;
647            match left.get().unwrap().cmp_upper_bound(key) {
648                Ordering::Greater => {
649                    right_index = mid_index;
650                }
651                Ordering::Equal => {
652                    if excluded {
653                        left.advance().await?;
654                    }
655                    return Ok(Box::new(Iterator::new(left)?));
656                }
657                Ordering::Less => {
658                    left_index = mid_index;
659                }
660            }
661        }
662        // When we don't find an exact match, we need to return with the first entry *after* the the
663        // target key which might be the first one in the next block, currently already pointed to
664        // by the "right" buffer, but usually it's just the result of the right index within the
665        // "left" buffer.
666        if right_index < left.item_count {
667            left.seek_to_block_item(right_index)
668                .context("Read index for offset of right pointer")?;
669        } else if let Some(right) = right {
670            return Ok(Box::new(Iterator::new(right)?));
671        } else {
672            // We want the end of the layer.  `right_index == left.item_count`, so `left_index ==
673            // left.item_count - 1`, and the left iterator must be positioned on `left_index` since
674            // we cannot have gone through the `Ordering::Greater` path above because `right_index`
675            // would not be equal to `left.item_count` in that case, so all we need to do is advance
676            // the iterator.
677        }
678        left.advance().await?;
679        return Ok(Box::new(Iterator::new(left)?));
680    }
681
682    fn len(&self) -> usize {
683        self.num_items
684    }
685
686    fn maybe_contains_key(&self, key: &K) -> bool {
687        self.bloom_filter.as_ref().map_or(true, |f| f.maybe_contains(key))
688    }
689
690    fn lock(&self) -> Option<Arc<DropEvent>> {
691        self.close_event.lock().clone()
692    }
693
694    async fn close(&self) {
695        let listener = self.close_event.lock().take().expect("close already called").listen();
696        listener.await;
697    }
698
699    fn get_version(&self) -> Version {
700        return self.version;
701    }
702
703    fn record_inspect_data(self: Arc<Self>, node: &fuchsia_inspect::Node) {
704        node.record_uint("num_items", self.num_items as u64);
705        node.record_bool("persistent", true);
706        node.record_uint("size", self.object_handle.get_size());
707        if let Some(stats) = self.bloom_filter_stats.as_ref() {
708            node.record_child("bloom_filter", move |node| {
709                node.record_uint("size", stats.size as u64);
710                node.record_uint("num_hashes", stats.num_hashes as u64);
711                node.record_uint("fill_percentage", stats.fill_percentage as u64);
712            });
713        }
714    }
715}
716
717// This ensures that item_count can't be overflowed below.
718const_assert!(MAX_BLOCK_SIZE <= u16::MAX as u64 + 1);
719
720// -- Writer support --
721
722pub struct PersistentLayerWriter<W: WriteBytes, K: Key, V: LayerValue> {
723    writer: W,
724    block_size: u64,
725    buf: Vec<u8>,
726    buf_item_count: u16,
727    item_count: usize,
728    block_offsets: Vec<u16>,
729    block_keys: Vec<u64>,
730    bloom_filter: BloomFilterWriter<K>,
731    _value: PhantomData<V>,
732}
733
734impl<W: WriteBytes, K: Key, V: LayerValue> PersistentLayerWriter<W, K, V> {
735    /// Creates a new writer that will serialize items to the object accessible via |object_handle|
736    /// (which provides a write interface to the object).
737    pub async fn new(writer: W, num_items: usize, block_size: u64) -> Result<Self, Error> {
738        Self::new_with_version(writer, num_items, block_size, LATEST_VERSION).await
739    }
740
741    async fn new_with_version(
742        mut writer: W,
743        num_items: usize,
744        block_size: u64,
745        version: Version,
746    ) -> Result<Self, Error> {
747        ensure!(block_size <= MAX_BLOCK_SIZE, FxfsError::NotSupported);
748        ensure!(block_size >= MIN_BLOCK_SIZE, FxfsError::NotSupported);
749
750        // Write the header block.
751        let header = LayerHeader { magic: PERSISTENT_LAYER_MAGIC.clone(), block_size };
752        let mut buf = vec![0u8; block_size as usize];
753        {
754            let mut cursor = std::io::Cursor::new(&mut buf[..]);
755            version.serialize_into(&mut cursor)?;
756            header.serialize_into(&mut cursor)?;
757        }
758        writer.write_bytes(&buf[..]).await?;
759
760        let seed: u64 = rand::random();
761        Ok(PersistentLayerWriter {
762            writer,
763            block_size,
764            buf: Vec::new(),
765            buf_item_count: 0,
766            item_count: 0,
767            block_offsets: Vec::new(),
768            block_keys: Vec::new(),
769            bloom_filter: BloomFilterWriter::new(seed, num_items),
770            _value: PhantomData,
771        })
772    }
773
774    /// Writes 'buf[..len]' out as a block.
775    ///
776    /// Blocks are fixed size, consisting of a 16-bit item count, data, zero padding
777    /// and seek table at the end.
778    async fn write_block(&mut self, len: usize) -> Result<(), Error> {
779        if self.buf_item_count == 0 {
780            return Ok(());
781        }
782        let seek_table_size = self.block_offsets.len() * PER_DATA_BLOCK_SEEK_ENTRY_SIZE;
783        assert!(PER_DATA_BLOCK_HEADER_SIZE + seek_table_size + len <= self.block_size as usize);
784        let mut cursor = std::io::Cursor::new(vec![0u8; self.block_size as usize]);
785        cursor.write_u16::<LittleEndian>(self.buf_item_count)?;
786        cursor.write_all(self.buf.drain(..len).as_ref())?;
787        cursor.set_position(self.block_size - seek_table_size as u64);
788        // Write the seek table. Entries are 2 bytes each and items are always at least 10.
789        for &offset in &self.block_offsets {
790            cursor.write_u16::<LittleEndian>(offset)?;
791        }
792        self.writer.write_bytes(cursor.get_ref()).await?;
793        debug!(item_count = self.buf_item_count, byte_count = len; "wrote items");
794        self.buf_item_count = 0;
795        self.block_offsets.clear();
796        Ok(())
797    }
798
799    // Assumes the writer is positioned to a new block.
800    // Returns the size, in bytes, of the seek table.
801    // Note that the writer will be positioned to exactly the end of the seek table, not to the end
802    // of a block.
803    async fn write_seek_table(&mut self) -> Result<usize, Error> {
804        if self.block_keys.len() == 0 {
805            return Ok(0);
806        }
807        let size = self.block_keys.len() * std::mem::size_of::<u64>();
808        self.buf.resize(size, 0);
809        let mut len = 0;
810        for key in &self.block_keys {
811            LittleEndian::write_u64(&mut self.buf[len..len + std::mem::size_of::<u64>()], *key);
812            len += std::mem::size_of::<u64>();
813        }
814        self.writer.write_bytes(&self.buf).await?;
815        Ok(size)
816    }
817
818    // Assumes the writer is positioned to exactly the end of the seek table, which was
819    // `seek_table_len` bytes.
820    async fn write_info(
821        &mut self,
822        num_data_blocks: u64,
823        bloom_filter_size_bytes: usize,
824        seek_table_len: usize,
825    ) -> Result<(), Error> {
826        let block_size = self.writer.block_size() as usize;
827        let layer_info = LayerInfo {
828            num_items: self.item_count,
829            num_data_blocks,
830            bloom_filter_size_bytes,
831            bloom_filter_seed: self.bloom_filter.seed(),
832            bloom_filter_num_hashes: self.bloom_filter.num_hashes(),
833        };
834        let actual_len = {
835            let mut cursor = std::io::Cursor::new(&mut self.buf);
836            layer_info.serialize_into(&mut cursor)?;
837            let layer_info_len = cursor.position();
838            cursor.write_u64::<LittleEndian>(layer_info_len)?;
839            cursor.position() as usize
840        };
841
842        // We want the LayerInfo to be at the end of the last block.  That might require creating a
843        // new block if we don't have enough room.
844        let avail_in_block = block_size - (seek_table_len % block_size);
845        let to_skip = if avail_in_block < actual_len {
846            block_size + avail_in_block - actual_len
847        } else {
848            avail_in_block - actual_len
849        } as u64;
850        self.writer.skip(to_skip).await?;
851        self.writer.write_bytes(&self.buf[..actual_len]).await?;
852        Ok(())
853    }
854
855    // Assumes the writer is positioned to a new block.
856    // Returns the size of the bloom filter, in bytes.
857    async fn write_bloom_filter(&mut self) -> Result<usize, Error> {
858        if self.data_blocks() < MINIMUM_DATA_BLOCKS_FOR_BLOOM_FILTER {
859            return Ok(0);
860        }
861        // TODO(https://fxbug.dev/323571978): Avoid bounce-buffering.
862        let size = round_up(self.bloom_filter.serialized_size(), self.block_size as usize).unwrap();
863        self.buf.resize(size, 0);
864        let mut cursor = std::io::Cursor::new(&mut self.buf);
865        self.bloom_filter.write(&mut cursor)?;
866        self.writer.write_bytes(&self.buf).await?;
867        Ok(self.bloom_filter.serialized_size())
868    }
869
870    // Returns the bloom filter writer. Intended to be used for testing purposes, e.g., gain access
871    // to the bloom filter to then corrupt it.
872    #[cfg(test)]
873    pub(crate) fn bloom_filter(&mut self) -> &mut BloomFilterWriter<K> {
874        &mut self.bloom_filter
875    }
876
877    fn data_blocks(&self) -> usize {
878        if self.item_count == 0 { 0 } else { self.block_keys.len() + 1 }
879    }
880}
881
882impl<W: WriteBytes + Send, K: Key, V: LayerValue> LayerWriter<K, V>
883    for PersistentLayerWriter<W, K, V>
884{
885    async fn write(&mut self, item: ItemRef<'_, K, V>) -> Result<(), Error> {
886        // Note the length before we write this item.
887        let len = self.buf.len();
888        item.key.serialize_into(&mut self.buf)?;
889        item.value.serialize_into(&mut self.buf)?;
890        self.buf.write_u64::<LittleEndian>(item.sequence)?;
891        let mut added_offset = false;
892        // Never record the first item. The offset is always the same.
893        if self.buf_item_count > 0 {
894            self.block_offsets.push(u16::try_from(len + PER_DATA_BLOCK_HEADER_SIZE).unwrap());
895            added_offset = true;
896        }
897
898        // If writing the item took us over a block, flush the bytes in the buffer prior to this
899        // item.
900        if PER_DATA_BLOCK_HEADER_SIZE
901            + self.buf.len()
902            + (self.block_offsets.len() * PER_DATA_BLOCK_SEEK_ENTRY_SIZE)
903            > self.block_size as usize - 1
904        {
905            if added_offset {
906                // Drop the recently added offset from the list. The latest item will be the first
907                // on the next block and have a known offset there.
908                self.block_offsets.pop();
909            }
910            self.write_block(len).await?;
911
912            // Note that this will not insert an entry for the first data block.
913            self.block_keys.push(item.key.get_leading_u64());
914        }
915
916        self.bloom_filter.insert(&item.key);
917        self.buf_item_count += 1;
918        self.item_count += 1;
919        Ok(())
920    }
921
922    async fn flush(&mut self) -> Result<(), Error> {
923        self.write_block(self.buf.len()).await?;
924        let data_blocks = self.data_blocks() as u64;
925        let bloom_filter_len = self.write_bloom_filter().await?;
926        let seek_table_len = self.write_seek_table().await?;
927        self.write_info(data_blocks, bloom_filter_len, seek_table_len).await?;
928        self.writer.complete().await
929    }
930}
931
932impl<W: WriteBytes, K: Key, V: LayerValue> Drop for PersistentLayerWriter<W, K, V> {
933    fn drop(&mut self) {
934        if self.buf_item_count > 0 {
935            warn!("Dropping unwritten items; did you forget to flush?");
936        }
937    }
938}
939
940#[cfg(test)]
941mod tests {
942    use super::{PersistentLayer, PersistentLayerWriter};
943    use crate::filesystem::MAX_BLOCK_SIZE;
944    use crate::lsm_tree::LayerIterator;
945    use crate::lsm_tree::persistent_layer::MINIMUM_DATA_BLOCKS_FOR_BLOOM_FILTER;
946    use crate::lsm_tree::types::{
947        DefaultOrdUpperBound, FuzzyHash, Item, ItemRef, Layer, LayerKey, LayerWriter, MergeType,
948        SortByU64,
949    };
950    use crate::object_handle::WriteBytes;
951    use crate::round::round_up;
952    use crate::serialized_types::{
953        LATEST_VERSION, Version, Versioned, VersionedLatest, versioned_type,
954    };
955    use crate::testing::fake_object::{FakeObject, FakeObjectHandle};
956    use crate::testing::writer::Writer;
957    use fprint::TypeFingerprint;
958    use fxfs_macros::FuzzyHash;
959    use std::fmt::Debug;
960    use std::hash::Hash;
961    use std::ops::{Bound, Range};
962    use std::sync::Arc;
963
964    impl<W: WriteBytes> Debug for PersistentLayerWriter<W, i32, i32> {
965        fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
966            f.debug_struct("rPersistentLayerWriter")
967                .field("block_size", &self.block_size)
968                .field("item_count", &self.buf_item_count)
969                .finish()
970        }
971    }
972
973    #[fuchsia::test]
974    async fn test_iterate_after_write() {
975        const BLOCK_SIZE: u64 = 512;
976        const ITEM_COUNT: i32 = 10000;
977
978        let handle = FakeObjectHandle::new(Arc::new(FakeObject::new()));
979        {
980            let mut writer = PersistentLayerWriter::<_, i32, i32>::new(
981                Writer::new(&handle).await,
982                ITEM_COUNT as usize * 4,
983                BLOCK_SIZE,
984            )
985            .await
986            .expect("writer new");
987            for i in 0..ITEM_COUNT {
988                writer.write(Item::new(i, i).as_item_ref()).await.expect("write failed");
989            }
990            writer.flush().await.expect("flush failed");
991        }
992        let layer = PersistentLayer::<i32, i32>::open(handle).await.expect("new failed");
993        let mut iterator = layer.seek(Bound::Unbounded).await.expect("seek failed");
994        for i in 0..ITEM_COUNT {
995            let ItemRef { key, value, .. } = iterator.get().expect("missing item");
996            assert_eq!((key, value), (&i, &i));
997            iterator.advance().await.expect("failed to advance");
998        }
999        assert!(iterator.get().is_none());
1000    }
1001
1002    #[fuchsia::test]
1003    async fn test_seek_after_write() {
1004        const BLOCK_SIZE: u64 = 512;
1005        const ITEM_COUNT: i32 = 5000;
1006
1007        let handle = FakeObjectHandle::new(Arc::new(FakeObject::new()));
1008        {
1009            let mut writer = PersistentLayerWriter::<_, i32, i32>::new(
1010                Writer::new(&handle).await,
1011                ITEM_COUNT as usize * 18,
1012                BLOCK_SIZE,
1013            )
1014            .await
1015            .expect("writer new");
1016            for i in 0..ITEM_COUNT {
1017                // Populate every other value as an item.
1018                writer.write(Item::new(i * 2, i * 2).as_item_ref()).await.expect("write failed");
1019            }
1020            writer.flush().await.expect("flush failed");
1021        }
1022        let layer = PersistentLayer::<i32, i32>::open(handle).await.expect("new failed");
1023        // Search for all values to check the in-between values.
1024        for i in 0..ITEM_COUNT * 2 {
1025            // We've written every other value, we expect to get either the exact value searched
1026            // for, or the next one after it. So round up to the nearest multiple of 2.
1027            let expected = round_up(i, 2).unwrap();
1028            let mut iterator = layer.seek(Bound::Included(&i)).await.expect("failed to seek");
1029            // We've written values up to (N-1)*2=2*N-2, so when looking for 2*N-1 we'll go off the
1030            // end of the layer and get back no item.
1031            if i >= (ITEM_COUNT * 2) - 1 {
1032                assert!(iterator.get().is_none());
1033            } else {
1034                let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1035                assert_eq!((key, value), (&expected, &expected));
1036            }
1037
1038            // Check that we can advance to the next item.
1039            iterator.advance().await.expect("failed to advance");
1040            // The highest value is 2*N-2, searching for 2*N-3 will find the last value, and
1041            // advancing will go off the end of the layer and return no item. If there was
1042            // previously no item, then it will latch and always return no item.
1043            if i >= (ITEM_COUNT * 2) - 3 {
1044                assert!(iterator.get().is_none());
1045            } else {
1046                let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1047                let next = expected + 2;
1048                assert_eq!((key, value), (&next, &next));
1049            }
1050        }
1051    }
1052
1053    #[fuchsia::test]
1054    async fn test_seek_unbounded() {
1055        const BLOCK_SIZE: u64 = 512;
1056        const ITEM_COUNT: i32 = 1000;
1057
1058        let handle = FakeObjectHandle::new(Arc::new(FakeObject::new()));
1059        {
1060            let mut writer = PersistentLayerWriter::<_, i32, i32>::new(
1061                Writer::new(&handle).await,
1062                ITEM_COUNT as usize * 18,
1063                BLOCK_SIZE,
1064            )
1065            .await
1066            .expect("writer new");
1067            for i in 0..ITEM_COUNT {
1068                writer.write(Item::new(i, i).as_item_ref()).await.expect("write failed");
1069            }
1070            writer.flush().await.expect("flush failed");
1071        }
1072        let layer = PersistentLayer::<i32, i32>::open(handle).await.expect("new failed");
1073        let mut iterator = layer.seek(Bound::Unbounded).await.expect("failed to seek");
1074        let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1075        assert_eq!((key, value), (&0, &0));
1076
1077        // Check that we can advance to the next item.
1078        iterator.advance().await.expect("failed to advance");
1079        let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1080        assert_eq!((key, value), (&1, &1));
1081    }
1082
1083    #[fuchsia::test]
1084    async fn test_zero_items() {
1085        const BLOCK_SIZE: u64 = 512;
1086
1087        let handle = FakeObjectHandle::new(Arc::new(FakeObject::new()));
1088        {
1089            let mut writer = PersistentLayerWriter::<_, i32, i32>::new(
1090                Writer::new(&handle).await,
1091                0,
1092                BLOCK_SIZE,
1093            )
1094            .await
1095            .expect("writer new");
1096            writer.flush().await.expect("flush failed");
1097        }
1098
1099        let layer = PersistentLayer::<i32, i32>::open(handle).await.expect("new failed");
1100        let iterator = (layer.as_ref() as &dyn Layer<i32, i32>)
1101            .seek(Bound::Unbounded)
1102            .await
1103            .expect("seek failed");
1104        assert!(iterator.get().is_none())
1105    }
1106
1107    #[fuchsia::test]
1108    async fn test_one_item() {
1109        const BLOCK_SIZE: u64 = 512;
1110
1111        let handle = FakeObjectHandle::new(Arc::new(FakeObject::new()));
1112        {
1113            let mut writer = PersistentLayerWriter::<_, i32, i32>::new(
1114                Writer::new(&handle).await,
1115                1,
1116                BLOCK_SIZE,
1117            )
1118            .await
1119            .expect("writer new");
1120            writer.write(Item::new(42, 42).as_item_ref()).await.expect("write failed");
1121            writer.flush().await.expect("flush failed");
1122        }
1123
1124        let layer = PersistentLayer::<i32, i32>::open(handle).await.expect("new failed");
1125        {
1126            let mut iterator = (layer.as_ref() as &dyn Layer<i32, i32>)
1127                .seek(Bound::Unbounded)
1128                .await
1129                .expect("seek failed");
1130            let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1131            assert_eq!((key, value), (&42, &42));
1132            iterator.advance().await.expect("failed to advance");
1133            assert!(iterator.get().is_none())
1134        }
1135        {
1136            let mut iterator = (layer.as_ref() as &dyn Layer<i32, i32>)
1137                .seek(Bound::Included(&30))
1138                .await
1139                .expect("seek failed");
1140            let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1141            assert_eq!((key, value), (&42, &42));
1142            iterator.advance().await.expect("failed to advance");
1143            assert!(iterator.get().is_none())
1144        }
1145        {
1146            let mut iterator = (layer.as_ref() as &dyn Layer<i32, i32>)
1147                .seek(Bound::Included(&42))
1148                .await
1149                .expect("seek failed");
1150            let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1151            assert_eq!((key, value), (&42, &42));
1152            iterator.advance().await.expect("failed to advance");
1153            assert!(iterator.get().is_none())
1154        }
1155        {
1156            let iterator = (layer.as_ref() as &dyn Layer<i32, i32>)
1157                .seek(Bound::Included(&43))
1158                .await
1159                .expect("seek failed");
1160            assert!(iterator.get().is_none())
1161        }
1162    }
1163
1164    #[fuchsia::test]
1165    async fn test_large_block_size() {
1166        // At the upper end of the supported size.
1167        const BLOCK_SIZE: u64 = MAX_BLOCK_SIZE;
1168        // Items will be 18 bytes, so fill up a few pages.
1169        const ITEM_COUNT: i32 = ((BLOCK_SIZE as i32) / 18) * 3;
1170
1171        let handle =
1172            FakeObjectHandle::new_with_block_size(Arc::new(FakeObject::new()), BLOCK_SIZE as usize);
1173        {
1174            let mut writer = PersistentLayerWriter::<_, i32, i32>::new(
1175                Writer::new(&handle).await,
1176                ITEM_COUNT as usize * 18,
1177                BLOCK_SIZE,
1178            )
1179            .await
1180            .expect("writer new");
1181            // Use large values to force varint encoding to use consistent space.
1182            for i in 2000000000..(2000000000 + ITEM_COUNT) {
1183                writer.write(Item::new(i, i).as_item_ref()).await.expect("write failed");
1184            }
1185            writer.flush().await.expect("flush failed");
1186        }
1187
1188        let layer = PersistentLayer::<i32, i32>::open(handle).await.expect("new failed");
1189        let mut iterator = layer.seek(Bound::Unbounded).await.expect("seek failed");
1190        for i in 2000000000..(2000000000 + ITEM_COUNT) {
1191            let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1192            assert_eq!((key, value), (&i, &i));
1193            iterator.advance().await.expect("failed to advance");
1194        }
1195        assert!(iterator.get().is_none());
1196    }
1197
1198    #[fuchsia::test]
1199    async fn test_overlarge_block_size() {
1200        // At the upper end of the supported size.
1201        const BLOCK_SIZE: u64 = MAX_BLOCK_SIZE * 2;
1202
1203        let handle =
1204            FakeObjectHandle::new_with_block_size(Arc::new(FakeObject::new()), BLOCK_SIZE as usize);
1205        PersistentLayerWriter::<_, i32, i32>::new(Writer::new(&handle).await, 0, BLOCK_SIZE)
1206            .await
1207            .expect_err("Creating writer with overlarge block size.");
1208    }
1209
1210    #[fuchsia::test]
1211    async fn test_seek_bound_excluded() {
1212        const BLOCK_SIZE: u64 = 512;
1213        const ITEM_COUNT: i32 = 10000;
1214
1215        let handle = FakeObjectHandle::new(Arc::new(FakeObject::new()));
1216        {
1217            let mut writer = PersistentLayerWriter::<_, i32, i32>::new(
1218                Writer::new(&handle).await,
1219                ITEM_COUNT as usize * 18,
1220                BLOCK_SIZE,
1221            )
1222            .await
1223            .expect("writer new");
1224            for i in 0..ITEM_COUNT {
1225                writer.write(Item::new(i, i).as_item_ref()).await.expect("write failed");
1226            }
1227            writer.flush().await.expect("flush failed");
1228        }
1229        let layer = PersistentLayer::<i32, i32>::open(handle).await.expect("new failed");
1230
1231        for i in 9982..ITEM_COUNT {
1232            let mut iterator = layer.seek(Bound::Excluded(&i)).await.expect("failed to seek");
1233            let i_plus_one = i + 1;
1234            if i_plus_one < ITEM_COUNT {
1235                let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1236
1237                assert_eq!((key, value), (&i_plus_one, &i_plus_one));
1238
1239                // Check that we can advance to the next item.
1240                iterator.advance().await.expect("failed to advance");
1241                let i_plus_two = i + 2;
1242                if i_plus_two < ITEM_COUNT {
1243                    let ItemRef { key, value, .. } = iterator.get().expect("missing item");
1244                    assert_eq!((key, value), (&i_plus_two, &i_plus_two));
1245                } else {
1246                    assert!(iterator.get().is_none());
1247                }
1248            } else {
1249                assert!(iterator.get().is_none());
1250            }
1251        }
1252    }
1253
1254    #[derive(
1255        Clone,
1256        Eq,
1257        Hash,
1258        FuzzyHash,
1259        PartialEq,
1260        Debug,
1261        serde::Serialize,
1262        serde::Deserialize,
1263        TypeFingerprint,
1264        Versioned,
1265    )]
1266    struct TestKey(Range<u64>);
1267    versioned_type! { 1.. => TestKey }
1268    impl SortByU64 for TestKey {
1269        fn get_leading_u64(&self) -> u64 {
1270            self.0.start
1271        }
1272    }
1273    impl LayerKey for TestKey {
1274        fn merge_type(&self) -> crate::lsm_tree::types::MergeType {
1275            MergeType::OptimizedMerge
1276        }
1277
1278        fn next_key(&self) -> Option<Self> {
1279            Some(TestKey(self.0.end..self.0.end + 1))
1280        }
1281    }
1282    impl Ord for TestKey {
1283        fn cmp(&self, other: &Self) -> std::cmp::Ordering {
1284            self.0.start.cmp(&other.0.start).then(self.0.end.cmp(&other.0.end))
1285        }
1286    }
1287    impl PartialOrd for TestKey {
1288        fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
1289            Some(self.cmp(other))
1290        }
1291    }
1292    impl DefaultOrdUpperBound for TestKey {}
1293
1294    // Create a large spread of data across several blocks to ensure that no part of the range is
1295    // lost by the partial search using the layer seek table.
1296    #[fuchsia::test]
1297    async fn test_block_seek_duplicate_keys() {
1298        // At the upper end of the supported size.
1299        const BLOCK_SIZE: u64 = 512;
1300        // Items will be 37 bytes each. Max length varint u64 is 9 bytes, 3 of those plus one
1301        // straight encoded sequence number for another 8 bytes. Then 2 more for each seek table
1302        // entry.
1303        const ITEMS_TO_FILL_BLOCK: u64 = BLOCK_SIZE / 37;
1304
1305        let mut to_find = Vec::new();
1306
1307        let handle =
1308            FakeObjectHandle::new_with_block_size(Arc::new(FakeObject::new()), BLOCK_SIZE as usize);
1309        {
1310            let mut writer = PersistentLayerWriter::<_, TestKey, u64>::new(
1311                Writer::new(&handle).await,
1312                3 * BLOCK_SIZE as usize,
1313                BLOCK_SIZE,
1314            )
1315            .await
1316            .expect("writer new");
1317
1318            // Make all values take up maximum space for varint encoding.
1319            let mut current_value = u32::MAX as u64 + 1;
1320
1321            // First fill the front with a duplicate leading u64 amount, then look at the start,
1322            // middle and end of the range.
1323            {
1324                let items = ITEMS_TO_FILL_BLOCK * 3;
1325                for i in 0..items {
1326                    writer
1327                        .write(
1328                            Item::new(TestKey(current_value..current_value + i), current_value)
1329                                .as_item_ref(),
1330                        )
1331                        .await
1332                        .expect("write failed");
1333                }
1334                to_find.push(TestKey(current_value..current_value));
1335                to_find.push(TestKey(current_value..(current_value + (items / 2))));
1336                to_find.push(TestKey(current_value..current_value + (items - 1)));
1337                current_value += 1;
1338            }
1339
1340            // Add some filler of all different leading u64.
1341            {
1342                let items = ITEMS_TO_FILL_BLOCK * 3;
1343                for _ in 0..items {
1344                    writer
1345                        .write(
1346                            Item::new(TestKey(current_value..current_value), current_value)
1347                                .as_item_ref(),
1348                        )
1349                        .await
1350                        .expect("write failed");
1351                    current_value += 1;
1352                }
1353            }
1354
1355            // Fill the middle with a duplicate leading u64 amount, then look at the start,
1356            // middle and end of the range.
1357            {
1358                let items = ITEMS_TO_FILL_BLOCK * 3;
1359                for i in 0..items {
1360                    writer
1361                        .write(
1362                            Item::new(TestKey(current_value..current_value + i), current_value)
1363                                .as_item_ref(),
1364                        )
1365                        .await
1366                        .expect("write failed");
1367                }
1368                to_find.push(TestKey(current_value..current_value));
1369                to_find.push(TestKey(current_value..(current_value + (items / 2))));
1370                to_find.push(TestKey(current_value..current_value + (items - 1)));
1371                current_value += 1;
1372            }
1373
1374            // Add some filler of all different leading u64.
1375            {
1376                let items = ITEMS_TO_FILL_BLOCK * 3;
1377                for _ in 0..items {
1378                    writer
1379                        .write(
1380                            Item::new(TestKey(current_value..current_value), current_value)
1381                                .as_item_ref(),
1382                        )
1383                        .await
1384                        .expect("write failed");
1385                    current_value += 1;
1386                }
1387            }
1388
1389            // Fill the end with a duplicate leading u64 amount, then look at the start,
1390            // middle and end of the range.
1391            {
1392                let items = ITEMS_TO_FILL_BLOCK * 3;
1393                for i in 0..items {
1394                    writer
1395                        .write(
1396                            Item::new(TestKey(current_value..current_value + i), current_value)
1397                                .as_item_ref(),
1398                        )
1399                        .await
1400                        .expect("write failed");
1401                }
1402                to_find.push(TestKey(current_value..current_value));
1403                to_find.push(TestKey(current_value..(current_value + (items / 2))));
1404                to_find.push(TestKey(current_value..current_value + (items - 1)));
1405            }
1406
1407            writer.flush().await.expect("flush failed");
1408        }
1409
1410        let layer = PersistentLayer::<TestKey, u64>::open(handle).await.expect("new failed");
1411        for target in to_find {
1412            let iterator: Box<dyn LayerIterator<TestKey, u64>> =
1413                layer.seek(Bound::Included(&target)).await.expect("failed to seek");
1414            let ItemRef { key, .. } = iterator.get().expect("missing item");
1415            assert_eq!(&target, key);
1416        }
1417    }
1418
1419    #[fuchsia::test]
1420    async fn test_two_seek_blocks() {
1421        // At the upper end of the supported size.
1422        const BLOCK_SIZE: u64 = 512;
1423        // Items will be 37 bytes each. Max length varint u64 is 9 bytes, 3 of those plus one
1424        // straight encoded sequence number for another 8 bytes. Then 2 more for each seek table
1425        // entry.
1426        const ITEMS_TO_FILL_BLOCK: u64 = BLOCK_SIZE / 37;
1427        // Add enough items to create enough blocks that the seek table can't fit into a single
1428        // block. Entries are 8 bytes. Remember to add one because the first block entry is omitted,
1429        // then add one more to overflow into the next block.
1430        const ITEM_COUNT: u64 = ITEMS_TO_FILL_BLOCK * ((BLOCK_SIZE / 8) + 2);
1431
1432        let mut to_find = Vec::new();
1433
1434        let handle =
1435            FakeObjectHandle::new_with_block_size(Arc::new(FakeObject::new()), BLOCK_SIZE as usize);
1436        {
1437            let mut writer = PersistentLayerWriter::<_, TestKey, u64>::new(
1438                Writer::new(&handle).await,
1439                ITEM_COUNT as usize * 18,
1440                BLOCK_SIZE,
1441            )
1442            .await
1443            .expect("writer new");
1444
1445            // Make all values take up maximum space for varint encoding.
1446            let initial_value = u32::MAX as u64 + 1;
1447            for i in 0..ITEM_COUNT {
1448                writer
1449                    .write(
1450                        Item::new(TestKey(initial_value + i..initial_value + i), initial_value)
1451                            .as_item_ref(),
1452                    )
1453                    .await
1454                    .expect("write failed");
1455            }
1456            // Look at the start middle and end.
1457            to_find.push(TestKey(initial_value..initial_value));
1458            let middle = initial_value + ITEM_COUNT / 2;
1459            to_find.push(TestKey(middle..middle));
1460            let end = initial_value + ITEM_COUNT - 1;
1461            to_find.push(TestKey(end..end));
1462
1463            writer.flush().await.expect("flush failed");
1464        }
1465
1466        let layer = PersistentLayer::<TestKey, u64>::open(handle).await.expect("new failed");
1467        for target in to_find {
1468            let iterator: Box<dyn LayerIterator<TestKey, u64>> =
1469                layer.seek(Bound::Included(&target)).await.expect("failed to seek");
1470            let ItemRef { key, .. } = iterator.get().expect("missing item");
1471            assert_eq!(&target, key);
1472        }
1473    }
1474
1475    // Verifies behaviour around creating full seek blocks, to ensure that it is able to be opened
1476    // and parsed afterward.
1477    #[fuchsia::test]
1478    async fn test_full_seek_block() {
1479        const BLOCK_SIZE: u64 = 512;
1480
1481        // Items will be 37 bytes each. Max length varint u64 is 9 bytes, 3 of those plus one
1482        // straight encoded sequence number for another 8 bytes. Then 2 more for each seek table
1483        // entry.
1484        const ITEMS_TO_FILL_BLOCK: u64 = BLOCK_SIZE / 37;
1485
1486        // How many entries there are in a seek table block.
1487        const SEEK_TABLE_ENTRIES: u64 = BLOCK_SIZE / 8;
1488
1489        // Number of entries to fill a seek block would need one more block of entries, but we're
1490        // starting low here on purpose to do a range and make sure we hit the size we are
1491        // interested in.
1492        const START_ENTRIES_COUNT: u64 = ITEMS_TO_FILL_BLOCK * SEEK_TABLE_ENTRIES;
1493
1494        for entries in START_ENTRIES_COUNT..START_ENTRIES_COUNT + (ITEMS_TO_FILL_BLOCK * 2) {
1495            let handle = FakeObjectHandle::new_with_block_size(
1496                Arc::new(FakeObject::new()),
1497                BLOCK_SIZE as usize,
1498            );
1499            {
1500                let mut writer = PersistentLayerWriter::<_, TestKey, u64>::new(
1501                    Writer::new(&handle).await,
1502                    entries as usize,
1503                    BLOCK_SIZE,
1504                )
1505                .await
1506                .expect("writer new");
1507
1508                // Make all values take up maximum space for varint encoding.
1509                let initial_value = u32::MAX as u64 + 1;
1510                for i in 0..entries {
1511                    writer
1512                        .write(
1513                            Item::new(TestKey(initial_value + i..initial_value + i), initial_value)
1514                                .as_item_ref(),
1515                        )
1516                        .await
1517                        .expect("write failed");
1518                }
1519
1520                writer.flush().await.expect("flush failed");
1521            }
1522            PersistentLayer::<TestKey, u64>::open(handle).await.expect("new failed");
1523        }
1524    }
1525
1526    #[fuchsia::test]
1527    async fn test_ignore_bloom_filter_on_older_versions() {
1528        const BLOCK_SIZE: u64 = 512;
1529        // Items will be 37 bytes each. Max length varint u64 is 9 bytes, 3 of those plus one
1530        // straight encoded sequence number for another 8 bytes. Then 2 more for each seek table
1531        // entry.
1532        const ITEMS_TO_FILL_BLOCK: u64 = BLOCK_SIZE / 37;
1533        // Add enough items to create enough blocks for a bloom filter to be necessary.
1534        const ITEM_COUNT: u64 =
1535            (1 + MINIMUM_DATA_BLOCKS_FOR_BLOOM_FILTER as u64) * ITEMS_TO_FILL_BLOCK;
1536
1537        let old_version_handle =
1538            FakeObjectHandle::new_with_block_size(Arc::new(FakeObject::new()), BLOCK_SIZE as usize);
1539        let current_version_handle =
1540            FakeObjectHandle::new_with_block_size(Arc::new(FakeObject::new()), BLOCK_SIZE as usize);
1541        {
1542            let mut old_version_writer =
1543                PersistentLayerWriter::<_, TestKey, u64>::new_with_version(
1544                    Writer::new(&old_version_handle).await,
1545                    ITEM_COUNT as usize,
1546                    BLOCK_SIZE,
1547                    Version { major: LATEST_VERSION.major - 1, minor: 0 },
1548                )
1549                .await
1550                .expect("writer new");
1551            let mut current_version_writer = PersistentLayerWriter::<_, TestKey, u64>::new(
1552                Writer::new(&current_version_handle).await,
1553                ITEM_COUNT as usize,
1554                BLOCK_SIZE,
1555            )
1556            .await
1557            .expect("writer new");
1558
1559            // Make all values take up maximum space for varint encoding.
1560            let initial_value = u32::MAX as u64 + 1;
1561            for i in 0..ITEM_COUNT {
1562                old_version_writer
1563                    .write(
1564                        Item::new(TestKey(initial_value + i..initial_value + i), initial_value)
1565                            .as_item_ref(),
1566                    )
1567                    .await
1568                    .expect("write failed");
1569                current_version_writer
1570                    .write(
1571                        Item::new(TestKey(initial_value + i..initial_value + i), initial_value)
1572                            .as_item_ref(),
1573                    )
1574                    .await
1575                    .expect("write failed");
1576            }
1577
1578            old_version_writer.flush().await.expect("flush failed");
1579            current_version_writer.flush().await.expect("flush failed");
1580        }
1581
1582        let old_layer =
1583            PersistentLayer::<TestKey, u64>::open(old_version_handle).await.expect("open failed");
1584        let current_layer = PersistentLayer::<TestKey, u64>::open(current_version_handle)
1585            .await
1586            .expect("open failed");
1587        assert!(!old_layer.has_bloom_filter());
1588        assert!(current_layer.has_bloom_filter());
1589    }
1590}