heapdump_vmo/
allocations_table_v1.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use static_assertions::const_assert;
6use std::collections::HashSet;
7use std::mem::{align_of, size_of};
8use std::sync::atomic::AtomicU32;
9use std::sync::atomic::Ordering::{Relaxed, SeqCst};
10
11use crate::memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
12pub use crate::resources_table_v1::ResourceKey;
13
14type NodeIndex = u32;
15type AtomicNodeIndex = AtomicU32;
16type BucketHeads = [AtomicNodeIndex; NUM_BUCKETS];
17const NUM_BUCKETS: usize = 1 << 16;
18const NODE_INVALID: NodeIndex = NodeIndex::MAX;
19
20/// Minimum memory alignment of an allocation table.
21pub const MIN_ALIGNMENT: usize = align_of::<Node>();
22const_assert!(MIN_ALIGNMENT % align_of::<BucketHeads>() == 0);
23
24#[repr(C)]
25#[derive(Debug)]
26pub struct Node {
27    next: AtomicNodeIndex,
28    pub address: u64,
29    pub size: u64,
30    pub timestamp: zx::MonotonicInstant,
31    pub thread_info_key: ResourceKey,
32    pub stack_trace_key: ResourceKey,
33}
34
35// SAFETY: Our accessor functions never access this type's memory non-atomically.
36unsafe impl MemoryMappable for AtomicNodeIndex {}
37unsafe impl MemoryMappable for Node {}
38
39/// Computes the capacity (number of nodes) of a memory region given its size (bytes).
40fn compute_nodes_count(num_bytes: usize) -> Result<usize, crate::Error> {
41    let Some(nodes_size) = num_bytes.checked_sub(size_of::<BucketHeads>()) else {
42        return Err(crate::Error::BufferTooSmall);
43    };
44
45    let num_nodes = nodes_size / size_of::<Node>();
46    if num_nodes > NODE_INVALID as usize {
47        return Err(crate::Error::BufferTooBig);
48    }
49
50    Ok(num_nodes)
51}
52
53/// Mediates write access to a VMO containing an hash table of allocated blocks indexed by block
54/// address.
55///
56/// All the updates happen atomically so that, at any time, snapshotting the VMO always results in a
57/// coherent snapshot of the table.
58///
59/// Hash collisions are handled by maintaining per-bucket linked lists. Specifically, an array of
60/// list heads, one for each bucket, is stored at the beginning of the VMO. The remaining part of
61/// the VMO contains the linked list nodes.
62pub struct AllocationsTableWriter {
63    storage: MemoryMappedVmo,
64
65    // Free nodes are managed by a simple watermark allocator and the capacity of the hash table
66    // (i.e. the maximum number of nodes) is fixed.
67    //
68    // In order to make it possible to reuse nodes that have been freed, we also keep a linked list
69    // of free nodes in addition to the watermark. When it is not empty, nodes are allocated by
70    // popping the head of this list instead of incrementing the watermark.
71    //
72    // The watermark and the free list are not necessary for reading a snapshot. Therefore, we do
73    // not need to store them in the VMO or to offer any snapshot guarantee about them, and they do
74    // not need to be updated atomically.
75    watermark: NodeIndex,
76    max_num_nodes: usize,
77    free_list_head: NodeIndex,
78}
79
80impl AllocationsTableWriter {
81    /// Initializes a VMO as an empty table and creates an AllocationsTableWriter to write into it.
82    ///
83    /// # Safety
84    /// The caller must guarantee that the `vmo` is not accessed by others while the returned
85    /// instance is alive.
86    pub fn new(vmo: &zx::Vmo) -> Result<AllocationsTableWriter, crate::Error> {
87        let storage = MemoryMappedVmo::new_readwrite(vmo)?;
88        let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
89
90        let mut result = AllocationsTableWriter {
91            storage,
92            watermark: 0,
93            max_num_nodes,
94            free_list_head: NODE_INVALID,
95        };
96
97        // Clear the hash table.
98        for bucket_index in 0..NUM_BUCKETS {
99            result.bucket_head_at(bucket_index).store(NODE_INVALID, SeqCst);
100        }
101
102        Ok(result)
103    }
104
105    /// This is the hash function: it turns a block address into a bucket number.
106    fn compute_bucket_index(address: u64) -> usize {
107        // TODO(fdurso): The hash values generated by this function are not uniformly distributed.
108        let tmp = (address >> 4) as usize;
109        tmp % NUM_BUCKETS
110    }
111
112    /// Returns a mutable reference to the head of a given bucket's linked list.
113    fn bucket_head_at(&mut self, bucket_index: usize) -> &mut AtomicNodeIndex {
114        // The bucket heads are stored at the beginning of the VMO.
115        let bucket_heads = self.storage.get_object_mut::<BucketHeads>(0).unwrap();
116        &mut bucket_heads[bucket_index]
117    }
118
119    /// Returns a mutable reference to a given node.
120    fn node_at(&mut self, node_index: NodeIndex) -> &mut Node {
121        // The nodes are stored consecutively immediately after the bucket heads.
122        let byte_offset = size_of::<BucketHeads>() + node_index as usize * size_of::<Node>();
123        self.storage.get_object_mut::<Node>(byte_offset).unwrap()
124    }
125
126    /// Inserts a new entry in the hash table.
127    ///
128    /// Returns Ok(true) if it has been inserted, Ok(false) if a previous entry with the same
129    /// address already existed, or an error if no free nodes are available.
130    pub fn insert_allocation(
131        &mut self,
132        address: u64,
133        size: u64,
134        thread_info_key: ResourceKey,
135        stack_trace_key: ResourceKey,
136        timestamp: zx::MonotonicInstant,
137    ) -> Result<bool, crate::Error> {
138        let bucket_index = Self::compute_bucket_index(address);
139        let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
140
141        // Verify that no entry with the same address already exists.
142        let mut curr_index = old_head;
143        while curr_index != NODE_INVALID {
144            let curr_data = self.node_at(curr_index);
145            if curr_data.address == address {
146                return Ok(false);
147            }
148            curr_index = curr_data.next.load(Relaxed);
149        }
150
151        // Insert a new entry at the head of the list.
152        let new_index = self.pop_free_node()?;
153        *self.node_at(new_index) = Node {
154            address,
155            size,
156            timestamp,
157            thread_info_key,
158            stack_trace_key,
159            next: AtomicNodeIndex::new(old_head),
160        };
161        self.bucket_head_at(bucket_index).store(new_index, SeqCst);
162        Ok(true)
163    }
164
165    /// Removes an entry from the hash table and returns the value of the removed entry's size
166    /// field.
167    pub fn erase_allocation(&mut self, address: u64) -> Option<u64> {
168        let bucket_index = Self::compute_bucket_index(address);
169
170        // Search the entry to be removed.
171        let mut prev_index = None;
172        let mut curr_index = self.bucket_head_at(bucket_index).load(Relaxed);
173        while curr_index != NODE_INVALID {
174            let curr_data = self.node_at(curr_index);
175            let curr_data_size = curr_data.size;
176            let curr_data_next = curr_data.next.load(Relaxed);
177
178            // Is this the entry we were looking for?
179            if curr_data.address == address {
180                if let Some(prev_index) = prev_index {
181                    self.node_at(prev_index).next.store(curr_data_next, SeqCst);
182                } else {
183                    self.bucket_head_at(bucket_index).store(curr_data_next, SeqCst);
184                }
185                self.push_free_node(curr_index);
186                return Some(curr_data_size);
187            }
188
189            prev_index = Some(curr_index);
190            curr_index = curr_data.next.load(Relaxed);
191        }
192
193        // Not found.
194        None
195    }
196
197    /// Atomically updates metadata for the given address in the hash table.
198    ///
199    /// Returns Ok(Some(old_size)) if it has been updated, Ok(None) if no entry with the given
200    /// address exists, or an error if no free nodes are available.
201    pub fn replace_allocation(
202        &mut self,
203        address: u64,
204        size: u64,
205        thread_info_key: ResourceKey,
206        stack_trace_key: ResourceKey,
207        timestamp: zx::MonotonicInstant,
208    ) -> Result<Option<u64>, crate::Error> {
209        // Updates are implemented as an atomic insertion of a new node at the head followed by an
210        // atomic deletion of the old node. Therefore, while an update is in progress, two nodes
211        // with the same address may exist. `AllocationsTableReader`` is aware of this, and it
212        // only returns the first one (i.e. the newer one) in such a case.
213        // Note: the body of this function has been split in two parts to test reads of the
214        // intermediate state.
215
216        // Check preconditions and insert the new node.
217        let Some((new_node, old_node, old_size)) = self.replace_allocation_begin(
218            address,
219            size,
220            thread_info_key,
221            stack_trace_key,
222            timestamp,
223        )?
224        else {
225            return Ok(None);
226        };
227
228        // Remove the old node.
229        self.replace_allocation_end(new_node, old_node);
230        Ok(Some(old_size))
231    }
232
233    /// First part of `replace_allocation`. Locates the old node and inserts the new one at the head
234    fn replace_allocation_begin(
235        &mut self,
236        address: u64,
237        size: u64,
238        thread_info_key: ResourceKey,
239        stack_trace_key: ResourceKey,
240        timestamp: zx::MonotonicInstant,
241    ) -> Result<Option<(NodeIndex, NodeIndex, u64)>, crate::Error> {
242        let bucket_index = Self::compute_bucket_index(address);
243        let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
244
245        // Search the existing entry with the requested address.
246        let (old_index, old_size) = 'search: {
247            let mut curr_index = old_head;
248            while curr_index != NODE_INVALID {
249                let curr_data = self.node_at(curr_index);
250                if curr_data.address == address {
251                    break 'search (curr_index, curr_data.size); // Found.
252                }
253                curr_index = curr_data.next.load(Relaxed);
254            }
255
256            return Ok(None); // Not found.
257        };
258
259        // Insert a new entry at the head of the list.
260        let new_index = self.pop_free_node()?;
261        *self.node_at(new_index) = Node {
262            address,
263            size,
264            timestamp,
265            thread_info_key,
266            stack_trace_key,
267            next: AtomicNodeIndex::new(old_head),
268        };
269        self.bucket_head_at(bucket_index).store(new_index, SeqCst);
270        Ok(Some((new_index, old_index, old_size)))
271    }
272
273    // Second part of `replace_allocation`. Removes the old node.
274    fn replace_allocation_end(&mut self, new_node: NodeIndex, old_node: NodeIndex) {
275        let tail = self.node_at(old_node).next.load(Relaxed);
276
277        // Note: `replace_allocation_begin` always inserts the new node at the head; therefore, the
278        // old node will always be one of its successors.
279        let mut scan_node = new_node;
280        loop {
281            assert_ne!(scan_node, NODE_INVALID, "the old node must be a successor of new node");
282            let scan_data = self.node_at(scan_node);
283            if scan_data.next.load(Relaxed) == old_node {
284                scan_data.next.store(tail, SeqCst);
285                self.push_free_node(old_node);
286                return;
287            }
288            scan_node = scan_data.next.load(Relaxed);
289        }
290    }
291
292    /// Inserts a node into the free list.
293    fn push_free_node(&mut self, index: NodeIndex) {
294        let current_head = self.free_list_head;
295
296        let node_data = self.node_at(index);
297        node_data.next.store(current_head, Relaxed);
298
299        self.free_list_head = index;
300    }
301
302    /// Takes a node from the free list or allocates a new one if the free list is empty.
303    fn pop_free_node(&mut self) -> Result<NodeIndex, crate::Error> {
304        if self.free_list_head != NODE_INVALID {
305            // Pop a node from the free list.
306            let result = self.free_list_head;
307            self.free_list_head = self.node_at(result).next.load(Relaxed);
308            Ok(result)
309        } else if (self.watermark as usize) < self.max_num_nodes {
310            // Allocate one node with the watermark allocator.
311            let result = self.watermark;
312            self.watermark += 1;
313            Ok(result)
314        } else {
315            // We are out of space.
316            Err(crate::Error::OutOfSpace)
317        }
318    }
319}
320
321/// Mediates read access to a VMO written by AllocationsTableWriter.
322pub struct AllocationsTableReader {
323    storage: MemoryMappedVmo,
324    max_num_nodes: usize,
325}
326
327impl AllocationsTableReader {
328    pub fn new(vmo: &zx::Vmo) -> Result<AllocationsTableReader, crate::Error> {
329        let storage = MemoryMappedVmo::new_readonly(vmo)?;
330        let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
331
332        Ok(AllocationsTableReader { storage, max_num_nodes })
333    }
334
335    /// Iterates all the allocated blocks that are present in the hash table.
336    pub fn iter(&self) -> impl Iterator<Item = Result<&Node, crate::Error>> {
337        // The bucket heads are stored at the beginning of the VMO.
338        let bucket_heads = self.storage.get_object::<BucketHeads>(0).unwrap();
339        bucket_heads.iter().map(|head| self.iterate_bucket(head.load(Relaxed))).flatten()
340    }
341
342    fn iterate_bucket(&self, head: NodeIndex) -> impl Iterator<Item = Result<&Node, crate::Error>> {
343        let mut curr_index = head;
344        let mut seen_addresses = HashSet::new();
345        std::iter::from_fn(move || {
346            while curr_index != NODE_INVALID {
347                if (curr_index as usize) < self.max_num_nodes {
348                    // The nodes are stored consecutively immediately after the bucket heads.
349                    let byte_offset =
350                        size_of::<BucketHeads>() + curr_index as usize * size_of::<Node>();
351                    let curr_data = self.storage.get_object::<Node>(byte_offset).unwrap();
352                    curr_index = curr_data.next.load(Relaxed);
353
354                    // Only return the first occurrence of each address. This ensures that only the
355                    // newer record is returned if an update is in progress.
356                    if seen_addresses.insert(curr_data.address) {
357                        return Some(Ok(curr_data));
358                    }
359                } else {
360                    return Some(Err(crate::Error::InvalidInput));
361                };
362            }
363
364            None
365        })
366    }
367}
368
369#[cfg(test)]
370mod tests {
371    use super::*;
372    use assert_matches::assert_matches;
373    use std::alloc::Layout;
374    use std::collections::HashMap;
375
376    // Some tests below use this constant to ensure that each bucket is been used at least 10 times.
377    const NUM_ITERATIONS: usize = NUM_BUCKETS * 10;
378
379    // Ensure we allocate enough nodes to store all the blocks the tests need plus some buffer.
380    const NUM_NODES: usize = NUM_ITERATIONS + 100;
381
382    // Placeholder values used in the tests below:
383    const THREAD_INFO_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1122);
384    const THREAD_INFO_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3344);
385    const STACK_TRACE_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1212);
386    const STACK_TRACE_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3434);
387
388    struct TestStorage {
389        vmo: zx::Vmo,
390    }
391
392    impl TestStorage {
393        pub fn new(num_nodes: usize) -> TestStorage {
394            let nodes_layout = Layout::array::<Node>(num_nodes).unwrap();
395            let (layout, nodes_offset) = Layout::new::<BucketHeads>().extend(nodes_layout).unwrap();
396            assert_eq!(nodes_offset, size_of::<BucketHeads>());
397
398            let vmo = zx::Vmo::create(layout.size() as u64).unwrap();
399            TestStorage { vmo }
400        }
401
402        fn create_writer(&self) -> AllocationsTableWriter {
403            AllocationsTableWriter::new(&self.vmo).unwrap()
404        }
405
406        fn create_reader(&self) -> AllocationsTableReader {
407            AllocationsTableReader::new(&self.vmo).unwrap()
408        }
409    }
410
411    #[test]
412    fn test_cannot_insert_twice() {
413        let storage = TestStorage::new(NUM_NODES);
414        let mut writer = storage.create_writer();
415
416        let result = writer.insert_allocation(
417            0x1234,
418            0x5678,
419            THREAD_INFO_RESOURCE_KEY_1,
420            STACK_TRACE_RESOURCE_KEY_1,
421            zx::MonotonicInstant::ZERO,
422        );
423        assert_eq!(result, Ok(true));
424
425        let result = writer.insert_allocation(
426            0x1234,
427            0x5678,
428            THREAD_INFO_RESOURCE_KEY_1,
429            STACK_TRACE_RESOURCE_KEY_1,
430            zx::MonotonicInstant::ZERO,
431        );
432        assert_eq!(result, Ok(false));
433    }
434
435    #[test]
436    fn test_cannot_erase_twice() {
437        let storage = TestStorage::new(NUM_NODES);
438        let mut writer = storage.create_writer();
439
440        let result = writer.insert_allocation(
441            0x1234,
442            0x5678,
443            THREAD_INFO_RESOURCE_KEY_1,
444            STACK_TRACE_RESOURCE_KEY_1,
445            zx::MonotonicInstant::ZERO,
446        );
447        assert_eq!(result, Ok(true));
448
449        let result = writer.erase_allocation(0x1234);
450        assert_eq!(result, Some(0x5678));
451
452        let result = writer.erase_allocation(0x1234);
453        assert_eq!(result, None);
454    }
455
456    #[test]
457    fn test_out_of_space() {
458        let storage = TestStorage::new(NUM_NODES);
459        let mut writer = storage.create_writer();
460
461        // Test that inserting up to `NUM_NODES` works.
462        for i in 0..NUM_NODES {
463            let result = writer.insert_allocation(
464                i as u64,
465                1,
466                THREAD_INFO_RESOURCE_KEY_1,
467                STACK_TRACE_RESOURCE_KEY_1,
468                zx::MonotonicInstant::ZERO,
469            );
470            assert_eq!(result, Ok(true));
471        }
472
473        // Test that inserting an extra node fails.
474        let result = writer.insert_allocation(
475            NUM_NODES as u64,
476            1,
477            THREAD_INFO_RESOURCE_KEY_1,
478            STACK_TRACE_RESOURCE_KEY_1,
479            zx::MonotonicInstant::ZERO,
480        );
481        assert_eq!(result, Err(crate::Error::OutOfSpace));
482
483        // Test that removing an element and then inserting again succeeds.
484        let result = writer.erase_allocation(0);
485        assert_eq!(result, Some(1));
486        let result = writer.insert_allocation(
487            NUM_NODES as u64,
488            1,
489            THREAD_INFO_RESOURCE_KEY_1,
490            STACK_TRACE_RESOURCE_KEY_1,
491            zx::MonotonicInstant::ZERO,
492        );
493        assert_eq!(result, Ok(true));
494    }
495
496    #[test]
497    fn test_loop_insert_then_erase() {
498        let storage = TestStorage::new(NUM_NODES);
499        let mut writer = storage.create_writer();
500
501        for i in 0..NUM_ITERATIONS {
502            let result = writer.insert_allocation(
503                i as u64,
504                1,
505                THREAD_INFO_RESOURCE_KEY_1,
506                STACK_TRACE_RESOURCE_KEY_1,
507                zx::MonotonicInstant::ZERO,
508            );
509            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
510
511            let result = writer.erase_allocation(i as u64);
512            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
513        }
514    }
515
516    #[test]
517    fn test_bulk_insert_then_erase_same_order() {
518        let storage = TestStorage::new(NUM_NODES);
519        let mut writer = storage.create_writer();
520
521        for i in 0..NUM_ITERATIONS {
522            let result = writer.insert_allocation(
523                i as u64,
524                1,
525                THREAD_INFO_RESOURCE_KEY_1,
526                STACK_TRACE_RESOURCE_KEY_1,
527                zx::MonotonicInstant::ZERO,
528            );
529            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
530        }
531        for i in 0..NUM_ITERATIONS {
532            let result = writer.erase_allocation(i as u64);
533            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
534        }
535    }
536
537    #[test]
538    fn test_bulk_insert_then_erase_reverse_order() {
539        let storage = TestStorage::new(NUM_NODES);
540        let mut writer = storage.create_writer();
541
542        for i in 0..NUM_ITERATIONS {
543            let result = writer.insert_allocation(
544                i as u64,
545                1,
546                THREAD_INFO_RESOURCE_KEY_1,
547                STACK_TRACE_RESOURCE_KEY_1,
548                zx::MonotonicInstant::ZERO,
549            );
550            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
551        }
552        for i in (0..NUM_ITERATIONS).rev() {
553            let result = writer.erase_allocation(i as u64);
554            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
555        }
556    }
557
558    #[test]
559    fn test_read_empty() {
560        let storage = TestStorage::new(NUM_NODES);
561
562        // Initialize the hash table.
563        storage.create_writer();
564
565        // Read it back and verify that it is empty.
566        let reader = storage.create_reader();
567        assert_eq!(reader.iter().count(), 0);
568    }
569
570    #[test]
571    fn test_read_populated() {
572        let storage = TestStorage::new(NUM_NODES);
573
574        // Fill the hash table.
575        let mut writer = storage.create_writer();
576        let mut expected_map = HashMap::new();
577        for i in 0..NUM_ITERATIONS as u64 {
578            let thread_info_key =
579                if i % 4 >= 2 { THREAD_INFO_RESOURCE_KEY_1 } else { THREAD_INFO_RESOURCE_KEY_2 };
580            let stack_trace_key =
581                if i % 2 == 0 { STACK_TRACE_RESOURCE_KEY_1 } else { STACK_TRACE_RESOURCE_KEY_2 };
582            let timestamp =
583                zx::MonotonicInstant::from_nanos((NUM_ITERATIONS as i64 / 2) - (i as i64)); // test negative values too
584            let result =
585                writer.insert_allocation(i, 1, thread_info_key, stack_trace_key, timestamp);
586            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
587
588            expected_map.insert(i, (1, thread_info_key, stack_trace_key, timestamp));
589        }
590
591        // Read it back and verify.
592        let reader = storage.create_reader();
593        let mut actual_map = HashMap::new();
594        for node in reader.iter() {
595            let Node { address, size, thread_info_key, stack_trace_key, timestamp, .. } =
596                node.unwrap();
597            assert!(
598                actual_map
599                    .insert(*address, (*size, *thread_info_key, *stack_trace_key, *timestamp))
600                    .is_none(),
601                "address 0x{:x} was read more than once",
602                address
603            );
604        }
605
606        assert_eq!(actual_map, expected_map);
607    }
608
609    #[test]
610    fn test_read_bad_bucket_head() {
611        let storage = TestStorage::new(NUM_NODES);
612
613        // Initialize the hash table and corrupt one of the heads.
614        let mut writer = storage.create_writer();
615        writer.bucket_head_at(NUM_BUCKETS / 2).store(NODE_INVALID - 1, SeqCst);
616
617        // Try to read it back and verify that the iterator returns an error.
618        let reader = storage.create_reader();
619        let contains_error = reader.iter().any(|e| e.is_err());
620
621        assert!(contains_error);
622    }
623
624    #[test]
625    fn test_read_bad_node_next() {
626        let storage = TestStorage::new(NUM_NODES);
627
628        // Initialize the hash table and insert a node with a bad next pointer.
629        let mut writer = storage.create_writer();
630        writer.bucket_head_at(NUM_BUCKETS / 2).store(0, SeqCst);
631        *writer.node_at(0) = Node {
632            next: AtomicNodeIndex::new(NODE_INVALID - 1),
633            address: 0x1234,
634            size: 0x5678,
635            thread_info_key: THREAD_INFO_RESOURCE_KEY_1,
636            stack_trace_key: STACK_TRACE_RESOURCE_KEY_1,
637            timestamp: zx::MonotonicInstant::from_nanos(99999999),
638        };
639
640        // Try to read it back and verify that the iterator returns an error.
641        let reader = storage.create_reader();
642        let contains_error = reader.iter().any(|e| e.is_err());
643
644        assert!(contains_error);
645    }
646
647    #[test]
648    fn test_replace() {
649        let storage = TestStorage::new(NUM_NODES);
650        let mut writer = storage.create_writer();
651
652        let result = writer.insert_allocation(
653            0x1234,
654            0x1111,
655            THREAD_INFO_RESOURCE_KEY_1,
656            STACK_TRACE_RESOURCE_KEY_1,
657            zx::MonotonicInstant::ZERO,
658        );
659        assert_eq!(result, Ok(true));
660
661        // Verify initial contents.
662        let reader = storage.create_reader();
663        let mut iter = reader.iter();
664        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x1111, .. })));
665        assert_matches!(iter.next(), None);
666
667        let result = writer.replace_allocation_begin(
668            0x1234,
669            0x2222,
670            THREAD_INFO_RESOURCE_KEY_2,
671            STACK_TRACE_RESOURCE_KEY_2,
672            zx::MonotonicInstant::ZERO,
673        );
674        let Ok(Some((new_node, old_node, old_size))) = result else {
675            panic!("Update begin is supposed to succeed in this test, got {:?} instead", result)
676        };
677        assert_eq!(old_size, 0x1111);
678
679        // Verify that intermediate contents already reflect the update.
680        let reader = storage.create_reader();
681        let mut iter = reader.iter();
682        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
683        assert_matches!(iter.next(), None);
684
685        writer.replace_allocation_end(new_node, old_node);
686
687        // Verify that final contents reflect the update.
688        let reader = storage.create_reader();
689        let mut iter = reader.iter();
690        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
691        assert_matches!(iter.next(), None);
692    }
693
694    #[test]
695    fn test_cannot_replace_nonexisting() {
696        let storage = TestStorage::new(NUM_NODES);
697        let mut writer = storage.create_writer();
698
699        let result = writer.replace_allocation(
700            0x1234,
701            0x5678,
702            THREAD_INFO_RESOURCE_KEY_1,
703            STACK_TRACE_RESOURCE_KEY_1,
704            zx::MonotonicInstant::ZERO,
705        );
706        assert_eq!(result, Ok(None));
707    }
708}