Skip to main content

heapdump_vmo/
allocations_table_v1.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use memory_mapped_vmo::MemoryMappedVmo;
6use static_assertions::const_assert;
7use std::collections::HashSet;
8use std::mem::{align_of, size_of};
9use std::ops::Deref;
10use std::sync::atomic::AtomicU32;
11use std::sync::atomic::Ordering::{Relaxed, SeqCst};
12use zerocopy::FromBytes;
13
14pub use crate::resources_table_v1::ResourceKey;
15
16type NodeIndex = u32;
17type BucketHeads = [AtomicNodeIndex; NUM_BUCKETS];
18const NUM_BUCKETS: usize = 1 << 16;
19const NODE_INVALID: NodeIndex = NodeIndex::MAX;
20
21/// Minimum memory alignment of an allocation table.
22pub const MIN_ALIGNMENT: usize = align_of::<Node>();
23const_assert!(MIN_ALIGNMENT % align_of::<BucketHeads>() == 0);
24
25// Define AtomicNodeIndex as a newtype so we can implement traits on it.
26#[repr(transparent)]
27#[derive(Debug, FromBytes)]
28struct AtomicNodeIndex(AtomicU32);
29
30impl AtomicNodeIndex {
31    const fn new(value: u32) -> AtomicNodeIndex {
32        AtomicNodeIndex(AtomicU32::new(value))
33    }
34}
35
36impl Deref for AtomicNodeIndex {
37    type Target = AtomicU32;
38
39    fn deref(&self) -> &AtomicU32 {
40        &self.0
41    }
42}
43
44#[repr(C)]
45#[derive(Debug, FromBytes)]
46pub struct Node {
47    next: AtomicNodeIndex,
48    pub address: u64,
49    pub size: u64,
50    pub timestamp: zx::MonotonicInstant,
51    pub thread_info_key: ResourceKey,
52    pub stack_trace_key: ResourceKey,
53}
54
55/// Computes the capacity (number of nodes) of a memory region given its size (bytes).
56fn compute_nodes_count(num_bytes: usize) -> Result<usize, crate::Error> {
57    let Some(nodes_size) = num_bytes.checked_sub(size_of::<BucketHeads>()) else {
58        return Err(crate::Error::BufferTooSmall);
59    };
60
61    let num_nodes = nodes_size / size_of::<Node>();
62    if num_nodes > NODE_INVALID as usize {
63        return Err(crate::Error::BufferTooBig);
64    }
65
66    Ok(num_nodes)
67}
68
69/// Mediates write access to a VMO containing an hash table of allocated blocks indexed by block
70/// address.
71///
72/// All the updates happen atomically so that, at any time, snapshotting the VMO always results in a
73/// coherent snapshot of the table.
74///
75/// Hash collisions are handled by maintaining per-bucket linked lists. Specifically, an array of
76/// list heads, one for each bucket, is stored at the beginning of the VMO. The remaining part of
77/// the VMO contains the linked list nodes.
78pub struct AllocationsTableWriter {
79    storage: MemoryMappedVmo,
80
81    // Free nodes are managed by a simple watermark allocator and the capacity of the hash table
82    // (i.e. the maximum number of nodes) is fixed.
83    //
84    // In order to make it possible to reuse nodes that have been freed, we also keep a linked list
85    // of free nodes in addition to the watermark. When it is not empty, nodes are allocated by
86    // popping the head of this list instead of incrementing the watermark.
87    //
88    // The watermark and the free list are not necessary for reading a snapshot. Therefore, we do
89    // not need to store them in the VMO or to offer any snapshot guarantee about them, and they do
90    // not need to be updated atomically.
91    watermark: NodeIndex,
92    max_num_nodes: usize,
93    free_list_head: NodeIndex,
94}
95
96impl AllocationsTableWriter {
97    /// Initializes a VMO as an empty table and creates an AllocationsTableWriter to write into it.
98    ///
99    /// # Safety
100    /// The caller must guarantee that the `vmo` is not accessed by others while the returned
101    /// instance is alive. However, it always safe to take a snapshot and read that instead.
102    pub unsafe fn new(vmo: &zx::Vmo) -> Result<AllocationsTableWriter, crate::Error> {
103        let storage = unsafe { MemoryMappedVmo::new_readwrite(vmo)? };
104        let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
105
106        let mut result = AllocationsTableWriter {
107            storage,
108            watermark: 0,
109            max_num_nodes,
110            free_list_head: NODE_INVALID,
111        };
112
113        // Clear the hash table.
114        for bucket_index in 0..NUM_BUCKETS {
115            result.bucket_head_at(bucket_index).store(NODE_INVALID, SeqCst);
116        }
117
118        Ok(result)
119    }
120
121    /// This is the hash function: it turns a block address into a bucket number.
122    fn compute_bucket_index(address: u64) -> usize {
123        // TODO(fdurso): The hash values generated by this function are not uniformly distributed.
124        let tmp = (address >> 4) as usize;
125        tmp % NUM_BUCKETS
126    }
127
128    /// Returns a mutable reference to the head of a given bucket's linked list.
129    fn bucket_head_at(&mut self, bucket_index: usize) -> &mut AtomicNodeIndex {
130        // The bucket heads are stored at the beginning of the VMO.
131        let bucket_heads = self.storage.get_object_mut::<BucketHeads>(0).unwrap();
132        &mut bucket_heads[bucket_index]
133    }
134
135    /// Returns a mutable reference to a given node.
136    fn node_at(&mut self, node_index: NodeIndex) -> &mut Node {
137        // The nodes are stored consecutively immediately after the bucket heads.
138        let byte_offset = size_of::<BucketHeads>() + node_index as usize * size_of::<Node>();
139        self.storage.get_object_mut::<Node>(byte_offset).unwrap()
140    }
141
142    /// Inserts a new entry in the hash table.
143    ///
144    /// Returns Ok(true) if it has been inserted, Ok(false) if a previous entry with the same
145    /// address already existed, or an error if no free nodes are available.
146    pub fn insert_allocation(
147        &mut self,
148        address: u64,
149        size: u64,
150        thread_info_key: ResourceKey,
151        stack_trace_key: ResourceKey,
152        timestamp: zx::MonotonicInstant,
153    ) -> Result<bool, crate::Error> {
154        let bucket_index = Self::compute_bucket_index(address);
155        let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
156
157        // Verify that no entry with the same address already exists.
158        let mut curr_index = old_head;
159        while curr_index != NODE_INVALID {
160            let curr_data = self.node_at(curr_index);
161            if curr_data.address == address {
162                return Ok(false);
163            }
164            curr_index = curr_data.next.load(Relaxed);
165        }
166
167        // Insert a new entry at the head of the list.
168        let new_index = self.pop_free_node()?;
169        *self.node_at(new_index) = Node {
170            address,
171            size,
172            timestamp,
173            thread_info_key,
174            stack_trace_key,
175            next: AtomicNodeIndex::new(old_head),
176        };
177        self.bucket_head_at(bucket_index).store(new_index, SeqCst);
178        Ok(true)
179    }
180
181    /// Removes an entry from the hash table and returns the value of the removed entry's size
182    /// field.
183    pub fn erase_allocation(&mut self, address: u64) -> Option<u64> {
184        let bucket_index = Self::compute_bucket_index(address);
185
186        // Search the entry to be removed.
187        let mut prev_index = None;
188        let mut curr_index = self.bucket_head_at(bucket_index).load(Relaxed);
189        while curr_index != NODE_INVALID {
190            let curr_data = self.node_at(curr_index);
191            let curr_data_size = curr_data.size;
192            let curr_data_next = curr_data.next.load(Relaxed);
193
194            // Is this the entry we were looking for?
195            if curr_data.address == address {
196                if let Some(prev_index) = prev_index {
197                    self.node_at(prev_index).next.store(curr_data_next, SeqCst);
198                } else {
199                    self.bucket_head_at(bucket_index).store(curr_data_next, SeqCst);
200                }
201                self.push_free_node(curr_index);
202                return Some(curr_data_size);
203            }
204
205            prev_index = Some(curr_index);
206            curr_index = curr_data.next.load(Relaxed);
207        }
208
209        // Not found.
210        None
211    }
212
213    /// Atomically updates metadata for the given address in the hash table.
214    ///
215    /// Returns Ok(Some(old_size)) if it has been updated, Ok(None) if no entry with the given
216    /// address exists, or an error if no free nodes are available.
217    pub fn replace_allocation(
218        &mut self,
219        address: u64,
220        size: u64,
221        thread_info_key: ResourceKey,
222        stack_trace_key: ResourceKey,
223        timestamp: zx::MonotonicInstant,
224    ) -> Result<Option<u64>, crate::Error> {
225        // Updates are implemented as an atomic insertion of a new node at the head followed by an
226        // atomic deletion of the old node. Therefore, while an update is in progress, two nodes
227        // with the same address may exist. `AllocationsTableReader`` is aware of this, and it
228        // only returns the first one (i.e. the newer one) in such a case.
229        // Note: the body of this function has been split in two parts to test reads of the
230        // intermediate state.
231
232        // Check preconditions and insert the new node.
233        let Some((new_node, old_node, old_size)) = self.replace_allocation_begin(
234            address,
235            size,
236            thread_info_key,
237            stack_trace_key,
238            timestamp,
239        )?
240        else {
241            return Ok(None);
242        };
243
244        // Remove the old node.
245        self.replace_allocation_end(new_node, old_node);
246        Ok(Some(old_size))
247    }
248
249    /// First part of `replace_allocation`. Locates the old node and inserts the new one at the head
250    fn replace_allocation_begin(
251        &mut self,
252        address: u64,
253        size: u64,
254        thread_info_key: ResourceKey,
255        stack_trace_key: ResourceKey,
256        timestamp: zx::MonotonicInstant,
257    ) -> Result<Option<(NodeIndex, NodeIndex, u64)>, crate::Error> {
258        let bucket_index = Self::compute_bucket_index(address);
259        let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
260
261        // Search the existing entry with the requested address.
262        let (old_index, old_size) = 'search: {
263            let mut curr_index = old_head;
264            while curr_index != NODE_INVALID {
265                let curr_data = self.node_at(curr_index);
266                if curr_data.address == address {
267                    break 'search (curr_index, curr_data.size); // Found.
268                }
269                curr_index = curr_data.next.load(Relaxed);
270            }
271
272            return Ok(None); // Not found.
273        };
274
275        // Insert a new entry at the head of the list.
276        let new_index = self.pop_free_node()?;
277        *self.node_at(new_index) = Node {
278            address,
279            size,
280            timestamp,
281            thread_info_key,
282            stack_trace_key,
283            next: AtomicNodeIndex::new(old_head),
284        };
285        self.bucket_head_at(bucket_index).store(new_index, SeqCst);
286        Ok(Some((new_index, old_index, old_size)))
287    }
288
289    // Second part of `replace_allocation`. Removes the old node.
290    fn replace_allocation_end(&mut self, new_node: NodeIndex, old_node: NodeIndex) {
291        let tail = self.node_at(old_node).next.load(Relaxed);
292
293        // Note: `replace_allocation_begin` always inserts the new node at the head; therefore, the
294        // old node will always be one of its successors.
295        let mut scan_node = new_node;
296        loop {
297            assert_ne!(scan_node, NODE_INVALID, "the old node must be a successor of new node");
298            let scan_data = self.node_at(scan_node);
299            if scan_data.next.load(Relaxed) == old_node {
300                scan_data.next.store(tail, SeqCst);
301                self.push_free_node(old_node);
302                return;
303            }
304            scan_node = scan_data.next.load(Relaxed);
305        }
306    }
307
308    /// Inserts a node into the free list.
309    fn push_free_node(&mut self, index: NodeIndex) {
310        let current_head = self.free_list_head;
311
312        let node_data = self.node_at(index);
313        node_data.next.store(current_head, Relaxed);
314
315        self.free_list_head = index;
316    }
317
318    /// Takes a node from the free list or allocates a new one if the free list is empty.
319    fn pop_free_node(&mut self) -> Result<NodeIndex, crate::Error> {
320        if self.free_list_head != NODE_INVALID {
321            // Pop a node from the free list.
322            let result = self.free_list_head;
323            self.free_list_head = self.node_at(result).next.load(Relaxed);
324            Ok(result)
325        } else if (self.watermark as usize) < self.max_num_nodes {
326            // Allocate one node with the watermark allocator.
327            let result = self.watermark;
328            self.watermark += 1;
329            Ok(result)
330        } else {
331            // We are out of space.
332            Err(crate::Error::OutOfSpace)
333        }
334    }
335}
336
337/// Mediates read access to a VMO written by AllocationsTableWriter.
338pub struct AllocationsTableReader {
339    storage: MemoryMappedVmo,
340    max_num_nodes: usize,
341}
342
343impl AllocationsTableReader {
344    /// # Safety
345    /// The caller must guarantee that the `vmo` is not accessed by others while the returned
346    /// instance is alive, usually by taking a snapshot of the VMO that AllocationsTableWriter
347    /// operates on and then reading the snapshot instead.
348    pub unsafe fn new(vmo: &zx::Vmo) -> Result<AllocationsTableReader, crate::Error> {
349        let storage = unsafe { MemoryMappedVmo::new_readonly(vmo)? };
350        let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
351
352        Ok(AllocationsTableReader { storage, max_num_nodes })
353    }
354
355    /// Iterates all the allocated blocks that are present in the hash table.
356    pub fn iter(&self) -> impl Iterator<Item = Result<&Node, crate::Error>> {
357        // The bucket heads are stored at the beginning of the VMO.
358        let bucket_heads = self.storage.get_object::<BucketHeads>(0).unwrap();
359        bucket_heads.iter().map(|head| self.iterate_bucket(head.load(Relaxed))).flatten()
360    }
361
362    fn iterate_bucket(&self, head: NodeIndex) -> impl Iterator<Item = Result<&Node, crate::Error>> {
363        let mut curr_index = head;
364        let mut seen_addresses = HashSet::new();
365        std::iter::from_fn(move || {
366            while curr_index != NODE_INVALID {
367                if (curr_index as usize) < self.max_num_nodes {
368                    // The nodes are stored consecutively immediately after the bucket heads.
369                    let byte_offset =
370                        size_of::<BucketHeads>() + curr_index as usize * size_of::<Node>();
371                    let curr_data = self.storage.get_object::<Node>(byte_offset).unwrap();
372                    curr_index = curr_data.next.load(Relaxed);
373
374                    // Only return the first occurrence of each address. This ensures that only the
375                    // newer record is returned if an update is in progress.
376                    if seen_addresses.insert(curr_data.address) {
377                        return Some(Ok(curr_data));
378                    }
379                } else {
380                    return Some(Err(crate::Error::InvalidInput));
381                };
382            }
383
384            None
385        })
386    }
387}
388
389#[cfg(test)]
390mod tests {
391    use super::*;
392    use assert_matches::assert_matches;
393    use std::alloc::Layout;
394    use std::collections::HashMap;
395
396    // Some tests below use this constant to ensure that each bucket is been used at least 10 times.
397    const NUM_ITERATIONS: usize = NUM_BUCKETS * 10;
398
399    // Ensure we allocate enough nodes to store all the blocks the tests need plus some buffer.
400    const NUM_NODES: usize = NUM_ITERATIONS + 100;
401
402    // Placeholder values used in the tests below:
403    const THREAD_INFO_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1122);
404    const THREAD_INFO_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3344);
405    const STACK_TRACE_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1212);
406    const STACK_TRACE_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3434);
407
408    struct TestStorage {
409        vmo: zx::Vmo,
410    }
411
412    impl TestStorage {
413        pub fn new(num_nodes: usize) -> (TestStorage, AllocationsTableWriter) {
414            let nodes_layout = Layout::array::<Node>(num_nodes).unwrap();
415            let (layout, nodes_offset) = Layout::new::<BucketHeads>().extend(nodes_layout).unwrap();
416            assert_eq!(nodes_offset, size_of::<BucketHeads>());
417
418            let vmo = zx::Vmo::create(layout.size() as u64).unwrap();
419
420            // SAFETY: only one AllocationsTableWriter can ever be created.
421            let writer = unsafe { AllocationsTableWriter::new(&vmo) }.unwrap();
422
423            (TestStorage { vmo }, writer)
424        }
425
426        fn create_reader(&self) -> AllocationsTableReader {
427            let snapshot = self
428                .vmo
429                .create_child(
430                    zx::VmoChildOptions::SNAPSHOT | zx::VmoChildOptions::NO_WRITE,
431                    0,
432                    self.vmo.get_size().unwrap(),
433                )
434                .unwrap();
435
436            // SAFETY: Readers operate on the read-only VMO snapshot we just created.
437            unsafe { AllocationsTableReader::new(&snapshot) }.unwrap()
438        }
439    }
440
441    #[test]
442    fn test_cannot_insert_twice() {
443        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
444
445        let result = writer.insert_allocation(
446            0x1234,
447            0x5678,
448            THREAD_INFO_RESOURCE_KEY_1,
449            STACK_TRACE_RESOURCE_KEY_1,
450            zx::MonotonicInstant::ZERO,
451        );
452        assert_eq!(result, Ok(true));
453
454        let result = writer.insert_allocation(
455            0x1234,
456            0x5678,
457            THREAD_INFO_RESOURCE_KEY_1,
458            STACK_TRACE_RESOURCE_KEY_1,
459            zx::MonotonicInstant::ZERO,
460        );
461        assert_eq!(result, Ok(false));
462    }
463
464    #[test]
465    fn test_cannot_erase_twice() {
466        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
467
468        let result = writer.insert_allocation(
469            0x1234,
470            0x5678,
471            THREAD_INFO_RESOURCE_KEY_1,
472            STACK_TRACE_RESOURCE_KEY_1,
473            zx::MonotonicInstant::ZERO,
474        );
475        assert_eq!(result, Ok(true));
476
477        let result = writer.erase_allocation(0x1234);
478        assert_eq!(result, Some(0x5678));
479
480        let result = writer.erase_allocation(0x1234);
481        assert_eq!(result, None);
482    }
483
484    #[test]
485    fn test_out_of_space() {
486        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
487
488        // Test that inserting up to `NUM_NODES` works.
489        for i in 0..NUM_NODES {
490            let result = writer.insert_allocation(
491                i as u64,
492                1,
493                THREAD_INFO_RESOURCE_KEY_1,
494                STACK_TRACE_RESOURCE_KEY_1,
495                zx::MonotonicInstant::ZERO,
496            );
497            assert_eq!(result, Ok(true));
498        }
499
500        // Test that inserting an extra node fails.
501        let result = writer.insert_allocation(
502            NUM_NODES as u64,
503            1,
504            THREAD_INFO_RESOURCE_KEY_1,
505            STACK_TRACE_RESOURCE_KEY_1,
506            zx::MonotonicInstant::ZERO,
507        );
508        assert_eq!(result, Err(crate::Error::OutOfSpace));
509
510        // Test that removing an element and then inserting again succeeds.
511        let result = writer.erase_allocation(0);
512        assert_eq!(result, Some(1));
513        let result = writer.insert_allocation(
514            NUM_NODES as u64,
515            1,
516            THREAD_INFO_RESOURCE_KEY_1,
517            STACK_TRACE_RESOURCE_KEY_1,
518            zx::MonotonicInstant::ZERO,
519        );
520        assert_eq!(result, Ok(true));
521    }
522
523    #[test]
524    fn test_loop_insert_then_erase() {
525        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
526
527        for i in 0..NUM_ITERATIONS {
528            let result = writer.insert_allocation(
529                i as u64,
530                1,
531                THREAD_INFO_RESOURCE_KEY_1,
532                STACK_TRACE_RESOURCE_KEY_1,
533                zx::MonotonicInstant::ZERO,
534            );
535            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
536
537            let result = writer.erase_allocation(i as u64);
538            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
539        }
540    }
541
542    #[test]
543    fn test_bulk_insert_then_erase_same_order() {
544        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
545
546        for i in 0..NUM_ITERATIONS {
547            let result = writer.insert_allocation(
548                i as u64,
549                1,
550                THREAD_INFO_RESOURCE_KEY_1,
551                STACK_TRACE_RESOURCE_KEY_1,
552                zx::MonotonicInstant::ZERO,
553            );
554            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
555        }
556        for i in 0..NUM_ITERATIONS {
557            let result = writer.erase_allocation(i as u64);
558            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
559        }
560    }
561
562    #[test]
563    fn test_bulk_insert_then_erase_reverse_order() {
564        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
565
566        for i in 0..NUM_ITERATIONS {
567            let result = writer.insert_allocation(
568                i as u64,
569                1,
570                THREAD_INFO_RESOURCE_KEY_1,
571                STACK_TRACE_RESOURCE_KEY_1,
572                zx::MonotonicInstant::ZERO,
573            );
574            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
575        }
576        for i in (0..NUM_ITERATIONS).rev() {
577            let result = writer.erase_allocation(i as u64);
578            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
579        }
580    }
581
582    #[test]
583    fn test_read_empty() {
584        let (storage, _writer) = TestStorage::new(NUM_NODES);
585
586        // Verify that it is empty.
587        let reader = storage.create_reader();
588        assert_eq!(reader.iter().count(), 0);
589    }
590
591    #[test]
592    fn test_read_populated() {
593        let (storage, mut writer) = TestStorage::new(NUM_NODES);
594
595        // Fill the hash table.
596        let mut expected_map = HashMap::new();
597        for i in 0..NUM_ITERATIONS as u64 {
598            let thread_info_key =
599                if i % 4 >= 2 { THREAD_INFO_RESOURCE_KEY_1 } else { THREAD_INFO_RESOURCE_KEY_2 };
600            let stack_trace_key =
601                if i % 2 == 0 { STACK_TRACE_RESOURCE_KEY_1 } else { STACK_TRACE_RESOURCE_KEY_2 };
602            let timestamp =
603                zx::MonotonicInstant::from_nanos((NUM_ITERATIONS as i64 / 2) - (i as i64)); // test negative values too
604            let result =
605                writer.insert_allocation(i, 1, thread_info_key, stack_trace_key, timestamp);
606            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
607
608            expected_map.insert(i, (1, thread_info_key, stack_trace_key, timestamp));
609        }
610
611        // Read it back and verify.
612        let reader = storage.create_reader();
613        let mut actual_map = HashMap::new();
614        for node in reader.iter() {
615            let Node { address, size, thread_info_key, stack_trace_key, timestamp, .. } =
616                node.unwrap();
617            assert!(
618                actual_map
619                    .insert(*address, (*size, *thread_info_key, *stack_trace_key, *timestamp))
620                    .is_none(),
621                "address 0x{:x} was read more than once",
622                address
623            );
624        }
625
626        assert_eq!(actual_map, expected_map);
627    }
628
629    #[test]
630    fn test_read_bad_bucket_head() {
631        let (storage, mut writer) = TestStorage::new(NUM_NODES);
632
633        // Initialize the hash table and corrupt one of the heads.
634        writer.bucket_head_at(NUM_BUCKETS / 2).store(NODE_INVALID - 1, SeqCst);
635
636        // Try to read it back and verify that the iterator returns an error.
637        let reader = storage.create_reader();
638        let contains_error = reader.iter().any(|e| e.is_err());
639
640        assert!(contains_error);
641    }
642
643    #[test]
644    fn test_read_bad_node_next() {
645        let (storage, mut writer) = TestStorage::new(NUM_NODES);
646
647        // Initialize the hash table and insert a node with a bad next pointer.
648        writer.bucket_head_at(NUM_BUCKETS / 2).store(0, SeqCst);
649        *writer.node_at(0) = Node {
650            next: AtomicNodeIndex::new(NODE_INVALID - 1),
651            address: 0x1234,
652            size: 0x5678,
653            thread_info_key: THREAD_INFO_RESOURCE_KEY_1,
654            stack_trace_key: STACK_TRACE_RESOURCE_KEY_1,
655            timestamp: zx::MonotonicInstant::from_nanos(99999999),
656        };
657
658        // Try to read it back and verify that the iterator returns an error.
659        let reader = storage.create_reader();
660        let contains_error = reader.iter().any(|e| e.is_err());
661
662        assert!(contains_error);
663    }
664
665    #[test]
666    fn test_replace() {
667        let (storage, mut writer) = TestStorage::new(NUM_NODES);
668
669        let result = writer.insert_allocation(
670            0x1234,
671            0x1111,
672            THREAD_INFO_RESOURCE_KEY_1,
673            STACK_TRACE_RESOURCE_KEY_1,
674            zx::MonotonicInstant::ZERO,
675        );
676        assert_eq!(result, Ok(true));
677
678        // Verify initial contents.
679        let reader = storage.create_reader();
680        let mut iter = reader.iter();
681        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x1111, .. })));
682        assert_matches!(iter.next(), None);
683
684        let result = writer.replace_allocation_begin(
685            0x1234,
686            0x2222,
687            THREAD_INFO_RESOURCE_KEY_2,
688            STACK_TRACE_RESOURCE_KEY_2,
689            zx::MonotonicInstant::ZERO,
690        );
691        let Ok(Some((new_node, old_node, old_size))) = result else {
692            panic!("Update begin is supposed to succeed in this test, got {:?} instead", result)
693        };
694        assert_eq!(old_size, 0x1111);
695
696        // Verify that intermediate contents already reflect the update.
697        let reader = storage.create_reader();
698        let mut iter = reader.iter();
699        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
700        assert_matches!(iter.next(), None);
701
702        writer.replace_allocation_end(new_node, old_node);
703
704        // Verify that final contents reflect the update.
705        let reader = storage.create_reader();
706        let mut iter = reader.iter();
707        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
708        assert_matches!(iter.next(), None);
709    }
710
711    #[test]
712    fn test_cannot_replace_nonexisting() {
713        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
714
715        let result = writer.replace_allocation(
716            0x1234,
717            0x5678,
718            THREAD_INFO_RESOURCE_KEY_1,
719            STACK_TRACE_RESOURCE_KEY_1,
720            zx::MonotonicInstant::ZERO,
721        );
722        assert_eq!(result, Ok(None));
723    }
724}