heapdump_vmo/
allocations_table_v1.rs

1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use static_assertions::const_assert;
6use std::collections::HashSet;
7use std::mem::{align_of, size_of};
8use std::ops::Deref;
9use std::sync::atomic::AtomicU32;
10use std::sync::atomic::Ordering::{Relaxed, SeqCst};
11
12use crate::memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
13pub use crate::resources_table_v1::ResourceKey;
14
15type NodeIndex = u32;
16type BucketHeads = [AtomicNodeIndex; NUM_BUCKETS];
17const NUM_BUCKETS: usize = 1 << 16;
18const NODE_INVALID: NodeIndex = NodeIndex::MAX;
19
20/// Minimum memory alignment of an allocation table.
21pub const MIN_ALIGNMENT: usize = align_of::<Node>();
22const_assert!(MIN_ALIGNMENT % align_of::<BucketHeads>() == 0);
23
24// Define AtomicNodeIndex as a newtype so we can implement traits on it.
25#[repr(transparent)]
26#[derive(Debug)]
27struct AtomicNodeIndex(AtomicU32);
28
29impl AtomicNodeIndex {
30    const fn new(value: u32) -> AtomicNodeIndex {
31        AtomicNodeIndex(AtomicU32::new(value))
32    }
33}
34
35impl Deref for AtomicNodeIndex {
36    type Target = AtomicU32;
37
38    fn deref(&self) -> &AtomicU32 {
39        &self.0
40    }
41}
42
43// SAFETY: Our accessor functions never access this type's memory non-atomically.
44unsafe impl MemoryMappable for AtomicNodeIndex {}
45
46#[repr(C)]
47#[derive(Debug)]
48pub struct Node {
49    next: AtomicNodeIndex,
50    pub address: u64,
51    pub size: u64,
52    pub timestamp: zx::MonotonicInstant,
53    pub thread_info_key: ResourceKey,
54    pub stack_trace_key: ResourceKey,
55}
56
57unsafe impl MemoryMappable for Node {}
58
59/// Computes the capacity (number of nodes) of a memory region given its size (bytes).
60fn compute_nodes_count(num_bytes: usize) -> Result<usize, crate::Error> {
61    let Some(nodes_size) = num_bytes.checked_sub(size_of::<BucketHeads>()) else {
62        return Err(crate::Error::BufferTooSmall);
63    };
64
65    let num_nodes = nodes_size / size_of::<Node>();
66    if num_nodes > NODE_INVALID as usize {
67        return Err(crate::Error::BufferTooBig);
68    }
69
70    Ok(num_nodes)
71}
72
73/// Mediates write access to a VMO containing an hash table of allocated blocks indexed by block
74/// address.
75///
76/// All the updates happen atomically so that, at any time, snapshotting the VMO always results in a
77/// coherent snapshot of the table.
78///
79/// Hash collisions are handled by maintaining per-bucket linked lists. Specifically, an array of
80/// list heads, one for each bucket, is stored at the beginning of the VMO. The remaining part of
81/// the VMO contains the linked list nodes.
82pub struct AllocationsTableWriter {
83    storage: MemoryMappedVmo,
84
85    // Free nodes are managed by a simple watermark allocator and the capacity of the hash table
86    // (i.e. the maximum number of nodes) is fixed.
87    //
88    // In order to make it possible to reuse nodes that have been freed, we also keep a linked list
89    // of free nodes in addition to the watermark. When it is not empty, nodes are allocated by
90    // popping the head of this list instead of incrementing the watermark.
91    //
92    // The watermark and the free list are not necessary for reading a snapshot. Therefore, we do
93    // not need to store them in the VMO or to offer any snapshot guarantee about them, and they do
94    // not need to be updated atomically.
95    watermark: NodeIndex,
96    max_num_nodes: usize,
97    free_list_head: NodeIndex,
98}
99
100impl AllocationsTableWriter {
101    /// Initializes a VMO as an empty table and creates an AllocationsTableWriter to write into it.
102    ///
103    /// # Safety
104    /// The caller must guarantee that the `vmo` is not accessed by others while the returned
105    /// instance is alive. However, it always safe to take a snapshot and read that instead.
106    pub unsafe fn new(vmo: &zx::Vmo) -> Result<AllocationsTableWriter, crate::Error> {
107        let storage = MemoryMappedVmo::new_readwrite(vmo)?;
108        let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
109
110        let mut result = AllocationsTableWriter {
111            storage,
112            watermark: 0,
113            max_num_nodes,
114            free_list_head: NODE_INVALID,
115        };
116
117        // Clear the hash table.
118        for bucket_index in 0..NUM_BUCKETS {
119            result.bucket_head_at(bucket_index).store(NODE_INVALID, SeqCst);
120        }
121
122        Ok(result)
123    }
124
125    /// This is the hash function: it turns a block address into a bucket number.
126    fn compute_bucket_index(address: u64) -> usize {
127        // TODO(fdurso): The hash values generated by this function are not uniformly distributed.
128        let tmp = (address >> 4) as usize;
129        tmp % NUM_BUCKETS
130    }
131
132    /// Returns a mutable reference to the head of a given bucket's linked list.
133    fn bucket_head_at(&mut self, bucket_index: usize) -> &mut AtomicNodeIndex {
134        // The bucket heads are stored at the beginning of the VMO.
135        let bucket_heads = self.storage.get_object_mut::<BucketHeads>(0).unwrap();
136        &mut bucket_heads[bucket_index]
137    }
138
139    /// Returns a mutable reference to a given node.
140    fn node_at(&mut self, node_index: NodeIndex) -> &mut Node {
141        // The nodes are stored consecutively immediately after the bucket heads.
142        let byte_offset = size_of::<BucketHeads>() + node_index as usize * size_of::<Node>();
143        self.storage.get_object_mut::<Node>(byte_offset).unwrap()
144    }
145
146    /// Inserts a new entry in the hash table.
147    ///
148    /// Returns Ok(true) if it has been inserted, Ok(false) if a previous entry with the same
149    /// address already existed, or an error if no free nodes are available.
150    pub fn insert_allocation(
151        &mut self,
152        address: u64,
153        size: u64,
154        thread_info_key: ResourceKey,
155        stack_trace_key: ResourceKey,
156        timestamp: zx::MonotonicInstant,
157    ) -> Result<bool, crate::Error> {
158        let bucket_index = Self::compute_bucket_index(address);
159        let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
160
161        // Verify that no entry with the same address already exists.
162        let mut curr_index = old_head;
163        while curr_index != NODE_INVALID {
164            let curr_data = self.node_at(curr_index);
165            if curr_data.address == address {
166                return Ok(false);
167            }
168            curr_index = curr_data.next.load(Relaxed);
169        }
170
171        // Insert a new entry at the head of the list.
172        let new_index = self.pop_free_node()?;
173        *self.node_at(new_index) = Node {
174            address,
175            size,
176            timestamp,
177            thread_info_key,
178            stack_trace_key,
179            next: AtomicNodeIndex::new(old_head),
180        };
181        self.bucket_head_at(bucket_index).store(new_index, SeqCst);
182        Ok(true)
183    }
184
185    /// Removes an entry from the hash table and returns the value of the removed entry's size
186    /// field.
187    pub fn erase_allocation(&mut self, address: u64) -> Option<u64> {
188        let bucket_index = Self::compute_bucket_index(address);
189
190        // Search the entry to be removed.
191        let mut prev_index = None;
192        let mut curr_index = self.bucket_head_at(bucket_index).load(Relaxed);
193        while curr_index != NODE_INVALID {
194            let curr_data = self.node_at(curr_index);
195            let curr_data_size = curr_data.size;
196            let curr_data_next = curr_data.next.load(Relaxed);
197
198            // Is this the entry we were looking for?
199            if curr_data.address == address {
200                if let Some(prev_index) = prev_index {
201                    self.node_at(prev_index).next.store(curr_data_next, SeqCst);
202                } else {
203                    self.bucket_head_at(bucket_index).store(curr_data_next, SeqCst);
204                }
205                self.push_free_node(curr_index);
206                return Some(curr_data_size);
207            }
208
209            prev_index = Some(curr_index);
210            curr_index = curr_data.next.load(Relaxed);
211        }
212
213        // Not found.
214        None
215    }
216
217    /// Atomically updates metadata for the given address in the hash table.
218    ///
219    /// Returns Ok(Some(old_size)) if it has been updated, Ok(None) if no entry with the given
220    /// address exists, or an error if no free nodes are available.
221    pub fn replace_allocation(
222        &mut self,
223        address: u64,
224        size: u64,
225        thread_info_key: ResourceKey,
226        stack_trace_key: ResourceKey,
227        timestamp: zx::MonotonicInstant,
228    ) -> Result<Option<u64>, crate::Error> {
229        // Updates are implemented as an atomic insertion of a new node at the head followed by an
230        // atomic deletion of the old node. Therefore, while an update is in progress, two nodes
231        // with the same address may exist. `AllocationsTableReader`` is aware of this, and it
232        // only returns the first one (i.e. the newer one) in such a case.
233        // Note: the body of this function has been split in two parts to test reads of the
234        // intermediate state.
235
236        // Check preconditions and insert the new node.
237        let Some((new_node, old_node, old_size)) = self.replace_allocation_begin(
238            address,
239            size,
240            thread_info_key,
241            stack_trace_key,
242            timestamp,
243        )?
244        else {
245            return Ok(None);
246        };
247
248        // Remove the old node.
249        self.replace_allocation_end(new_node, old_node);
250        Ok(Some(old_size))
251    }
252
253    /// First part of `replace_allocation`. Locates the old node and inserts the new one at the head
254    fn replace_allocation_begin(
255        &mut self,
256        address: u64,
257        size: u64,
258        thread_info_key: ResourceKey,
259        stack_trace_key: ResourceKey,
260        timestamp: zx::MonotonicInstant,
261    ) -> Result<Option<(NodeIndex, NodeIndex, u64)>, crate::Error> {
262        let bucket_index = Self::compute_bucket_index(address);
263        let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
264
265        // Search the existing entry with the requested address.
266        let (old_index, old_size) = 'search: {
267            let mut curr_index = old_head;
268            while curr_index != NODE_INVALID {
269                let curr_data = self.node_at(curr_index);
270                if curr_data.address == address {
271                    break 'search (curr_index, curr_data.size); // Found.
272                }
273                curr_index = curr_data.next.load(Relaxed);
274            }
275
276            return Ok(None); // Not found.
277        };
278
279        // Insert a new entry at the head of the list.
280        let new_index = self.pop_free_node()?;
281        *self.node_at(new_index) = Node {
282            address,
283            size,
284            timestamp,
285            thread_info_key,
286            stack_trace_key,
287            next: AtomicNodeIndex::new(old_head),
288        };
289        self.bucket_head_at(bucket_index).store(new_index, SeqCst);
290        Ok(Some((new_index, old_index, old_size)))
291    }
292
293    // Second part of `replace_allocation`. Removes the old node.
294    fn replace_allocation_end(&mut self, new_node: NodeIndex, old_node: NodeIndex) {
295        let tail = self.node_at(old_node).next.load(Relaxed);
296
297        // Note: `replace_allocation_begin` always inserts the new node at the head; therefore, the
298        // old node will always be one of its successors.
299        let mut scan_node = new_node;
300        loop {
301            assert_ne!(scan_node, NODE_INVALID, "the old node must be a successor of new node");
302            let scan_data = self.node_at(scan_node);
303            if scan_data.next.load(Relaxed) == old_node {
304                scan_data.next.store(tail, SeqCst);
305                self.push_free_node(old_node);
306                return;
307            }
308            scan_node = scan_data.next.load(Relaxed);
309        }
310    }
311
312    /// Inserts a node into the free list.
313    fn push_free_node(&mut self, index: NodeIndex) {
314        let current_head = self.free_list_head;
315
316        let node_data = self.node_at(index);
317        node_data.next.store(current_head, Relaxed);
318
319        self.free_list_head = index;
320    }
321
322    /// Takes a node from the free list or allocates a new one if the free list is empty.
323    fn pop_free_node(&mut self) -> Result<NodeIndex, crate::Error> {
324        if self.free_list_head != NODE_INVALID {
325            // Pop a node from the free list.
326            let result = self.free_list_head;
327            self.free_list_head = self.node_at(result).next.load(Relaxed);
328            Ok(result)
329        } else if (self.watermark as usize) < self.max_num_nodes {
330            // Allocate one node with the watermark allocator.
331            let result = self.watermark;
332            self.watermark += 1;
333            Ok(result)
334        } else {
335            // We are out of space.
336            Err(crate::Error::OutOfSpace)
337        }
338    }
339}
340
341/// Mediates read access to a VMO written by AllocationsTableWriter.
342pub struct AllocationsTableReader {
343    storage: MemoryMappedVmo,
344    max_num_nodes: usize,
345}
346
347impl AllocationsTableReader {
348    /// # Safety
349    /// The caller must guarantee that the `vmo` is not accessed by others while the returned
350    /// instance is alive, usually by taking a snapshot of the VMO that AllocationsTableWriter
351    /// operates on and then reading the snapshot instead.
352    pub unsafe fn new(vmo: &zx::Vmo) -> Result<AllocationsTableReader, crate::Error> {
353        let storage = MemoryMappedVmo::new_readonly(vmo)?;
354        let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
355
356        Ok(AllocationsTableReader { storage, max_num_nodes })
357    }
358
359    /// Iterates all the allocated blocks that are present in the hash table.
360    pub fn iter(&self) -> impl Iterator<Item = Result<&Node, crate::Error>> {
361        // The bucket heads are stored at the beginning of the VMO.
362        let bucket_heads = self.storage.get_object::<BucketHeads>(0).unwrap();
363        bucket_heads.iter().map(|head| self.iterate_bucket(head.load(Relaxed))).flatten()
364    }
365
366    fn iterate_bucket(&self, head: NodeIndex) -> impl Iterator<Item = Result<&Node, crate::Error>> {
367        let mut curr_index = head;
368        let mut seen_addresses = HashSet::new();
369        std::iter::from_fn(move || {
370            while curr_index != NODE_INVALID {
371                if (curr_index as usize) < self.max_num_nodes {
372                    // The nodes are stored consecutively immediately after the bucket heads.
373                    let byte_offset =
374                        size_of::<BucketHeads>() + curr_index as usize * size_of::<Node>();
375                    let curr_data = self.storage.get_object::<Node>(byte_offset).unwrap();
376                    curr_index = curr_data.next.load(Relaxed);
377
378                    // Only return the first occurrence of each address. This ensures that only the
379                    // newer record is returned if an update is in progress.
380                    if seen_addresses.insert(curr_data.address) {
381                        return Some(Ok(curr_data));
382                    }
383                } else {
384                    return Some(Err(crate::Error::InvalidInput));
385                };
386            }
387
388            None
389        })
390    }
391}
392
393#[cfg(test)]
394mod tests {
395    use super::*;
396    use assert_matches::assert_matches;
397    use std::alloc::Layout;
398    use std::collections::HashMap;
399
400    // Some tests below use this constant to ensure that each bucket is been used at least 10 times.
401    const NUM_ITERATIONS: usize = NUM_BUCKETS * 10;
402
403    // Ensure we allocate enough nodes to store all the blocks the tests need plus some buffer.
404    const NUM_NODES: usize = NUM_ITERATIONS + 100;
405
406    // Placeholder values used in the tests below:
407    const THREAD_INFO_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1122);
408    const THREAD_INFO_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3344);
409    const STACK_TRACE_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1212);
410    const STACK_TRACE_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3434);
411
412    struct TestStorage {
413        vmo: zx::Vmo,
414    }
415
416    impl TestStorage {
417        pub fn new(num_nodes: usize) -> (TestStorage, AllocationsTableWriter) {
418            let nodes_layout = Layout::array::<Node>(num_nodes).unwrap();
419            let (layout, nodes_offset) = Layout::new::<BucketHeads>().extend(nodes_layout).unwrap();
420            assert_eq!(nodes_offset, size_of::<BucketHeads>());
421
422            let vmo = zx::Vmo::create(layout.size() as u64).unwrap();
423
424            // SAFETY: only one AllocationsTableWriter can ever be created.
425            let writer = unsafe { AllocationsTableWriter::new(&vmo) }.unwrap();
426
427            (TestStorage { vmo }, writer)
428        }
429
430        fn create_reader(&self) -> AllocationsTableReader {
431            let snapshot = self
432                .vmo
433                .create_child(
434                    zx::VmoChildOptions::SNAPSHOT | zx::VmoChildOptions::NO_WRITE,
435                    0,
436                    self.vmo.get_size().unwrap(),
437                )
438                .unwrap();
439
440            // SAFETY: Readers operate on the read-only VMO snapshot we just created.
441            unsafe { AllocationsTableReader::new(&snapshot) }.unwrap()
442        }
443    }
444
445    #[test]
446    fn test_cannot_insert_twice() {
447        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
448
449        let result = writer.insert_allocation(
450            0x1234,
451            0x5678,
452            THREAD_INFO_RESOURCE_KEY_1,
453            STACK_TRACE_RESOURCE_KEY_1,
454            zx::MonotonicInstant::ZERO,
455        );
456        assert_eq!(result, Ok(true));
457
458        let result = writer.insert_allocation(
459            0x1234,
460            0x5678,
461            THREAD_INFO_RESOURCE_KEY_1,
462            STACK_TRACE_RESOURCE_KEY_1,
463            zx::MonotonicInstant::ZERO,
464        );
465        assert_eq!(result, Ok(false));
466    }
467
468    #[test]
469    fn test_cannot_erase_twice() {
470        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
471
472        let result = writer.insert_allocation(
473            0x1234,
474            0x5678,
475            THREAD_INFO_RESOURCE_KEY_1,
476            STACK_TRACE_RESOURCE_KEY_1,
477            zx::MonotonicInstant::ZERO,
478        );
479        assert_eq!(result, Ok(true));
480
481        let result = writer.erase_allocation(0x1234);
482        assert_eq!(result, Some(0x5678));
483
484        let result = writer.erase_allocation(0x1234);
485        assert_eq!(result, None);
486    }
487
488    #[test]
489    fn test_out_of_space() {
490        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
491
492        // Test that inserting up to `NUM_NODES` works.
493        for i in 0..NUM_NODES {
494            let result = writer.insert_allocation(
495                i as u64,
496                1,
497                THREAD_INFO_RESOURCE_KEY_1,
498                STACK_TRACE_RESOURCE_KEY_1,
499                zx::MonotonicInstant::ZERO,
500            );
501            assert_eq!(result, Ok(true));
502        }
503
504        // Test that inserting an extra node fails.
505        let result = writer.insert_allocation(
506            NUM_NODES as u64,
507            1,
508            THREAD_INFO_RESOURCE_KEY_1,
509            STACK_TRACE_RESOURCE_KEY_1,
510            zx::MonotonicInstant::ZERO,
511        );
512        assert_eq!(result, Err(crate::Error::OutOfSpace));
513
514        // Test that removing an element and then inserting again succeeds.
515        let result = writer.erase_allocation(0);
516        assert_eq!(result, Some(1));
517        let result = writer.insert_allocation(
518            NUM_NODES as u64,
519            1,
520            THREAD_INFO_RESOURCE_KEY_1,
521            STACK_TRACE_RESOURCE_KEY_1,
522            zx::MonotonicInstant::ZERO,
523        );
524        assert_eq!(result, Ok(true));
525    }
526
527    #[test]
528    fn test_loop_insert_then_erase() {
529        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
530
531        for i in 0..NUM_ITERATIONS {
532            let result = writer.insert_allocation(
533                i as u64,
534                1,
535                THREAD_INFO_RESOURCE_KEY_1,
536                STACK_TRACE_RESOURCE_KEY_1,
537                zx::MonotonicInstant::ZERO,
538            );
539            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
540
541            let result = writer.erase_allocation(i as u64);
542            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
543        }
544    }
545
546    #[test]
547    fn test_bulk_insert_then_erase_same_order() {
548        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
549
550        for i in 0..NUM_ITERATIONS {
551            let result = writer.insert_allocation(
552                i as u64,
553                1,
554                THREAD_INFO_RESOURCE_KEY_1,
555                STACK_TRACE_RESOURCE_KEY_1,
556                zx::MonotonicInstant::ZERO,
557            );
558            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
559        }
560        for i in 0..NUM_ITERATIONS {
561            let result = writer.erase_allocation(i as u64);
562            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
563        }
564    }
565
566    #[test]
567    fn test_bulk_insert_then_erase_reverse_order() {
568        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
569
570        for i in 0..NUM_ITERATIONS {
571            let result = writer.insert_allocation(
572                i as u64,
573                1,
574                THREAD_INFO_RESOURCE_KEY_1,
575                STACK_TRACE_RESOURCE_KEY_1,
576                zx::MonotonicInstant::ZERO,
577            );
578            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
579        }
580        for i in (0..NUM_ITERATIONS).rev() {
581            let result = writer.erase_allocation(i as u64);
582            assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
583        }
584    }
585
586    #[test]
587    fn test_read_empty() {
588        let (storage, _writer) = TestStorage::new(NUM_NODES);
589
590        // Verify that it is empty.
591        let reader = storage.create_reader();
592        assert_eq!(reader.iter().count(), 0);
593    }
594
595    #[test]
596    fn test_read_populated() {
597        let (storage, mut writer) = TestStorage::new(NUM_NODES);
598
599        // Fill the hash table.
600        let mut expected_map = HashMap::new();
601        for i in 0..NUM_ITERATIONS as u64 {
602            let thread_info_key =
603                if i % 4 >= 2 { THREAD_INFO_RESOURCE_KEY_1 } else { THREAD_INFO_RESOURCE_KEY_2 };
604            let stack_trace_key =
605                if i % 2 == 0 { STACK_TRACE_RESOURCE_KEY_1 } else { STACK_TRACE_RESOURCE_KEY_2 };
606            let timestamp =
607                zx::MonotonicInstant::from_nanos((NUM_ITERATIONS as i64 / 2) - (i as i64)); // test negative values too
608            let result =
609                writer.insert_allocation(i, 1, thread_info_key, stack_trace_key, timestamp);
610            assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
611
612            expected_map.insert(i, (1, thread_info_key, stack_trace_key, timestamp));
613        }
614
615        // Read it back and verify.
616        let reader = storage.create_reader();
617        let mut actual_map = HashMap::new();
618        for node in reader.iter() {
619            let Node { address, size, thread_info_key, stack_trace_key, timestamp, .. } =
620                node.unwrap();
621            assert!(
622                actual_map
623                    .insert(*address, (*size, *thread_info_key, *stack_trace_key, *timestamp))
624                    .is_none(),
625                "address 0x{:x} was read more than once",
626                address
627            );
628        }
629
630        assert_eq!(actual_map, expected_map);
631    }
632
633    #[test]
634    fn test_read_bad_bucket_head() {
635        let (storage, mut writer) = TestStorage::new(NUM_NODES);
636
637        // Initialize the hash table and corrupt one of the heads.
638        writer.bucket_head_at(NUM_BUCKETS / 2).store(NODE_INVALID - 1, SeqCst);
639
640        // Try to read it back and verify that the iterator returns an error.
641        let reader = storage.create_reader();
642        let contains_error = reader.iter().any(|e| e.is_err());
643
644        assert!(contains_error);
645    }
646
647    #[test]
648    fn test_read_bad_node_next() {
649        let (storage, mut writer) = TestStorage::new(NUM_NODES);
650
651        // Initialize the hash table and insert a node with a bad next pointer.
652        writer.bucket_head_at(NUM_BUCKETS / 2).store(0, SeqCst);
653        *writer.node_at(0) = Node {
654            next: AtomicNodeIndex::new(NODE_INVALID - 1),
655            address: 0x1234,
656            size: 0x5678,
657            thread_info_key: THREAD_INFO_RESOURCE_KEY_1,
658            stack_trace_key: STACK_TRACE_RESOURCE_KEY_1,
659            timestamp: zx::MonotonicInstant::from_nanos(99999999),
660        };
661
662        // Try to read it back and verify that the iterator returns an error.
663        let reader = storage.create_reader();
664        let contains_error = reader.iter().any(|e| e.is_err());
665
666        assert!(contains_error);
667    }
668
669    #[test]
670    fn test_replace() {
671        let (storage, mut writer) = TestStorage::new(NUM_NODES);
672
673        let result = writer.insert_allocation(
674            0x1234,
675            0x1111,
676            THREAD_INFO_RESOURCE_KEY_1,
677            STACK_TRACE_RESOURCE_KEY_1,
678            zx::MonotonicInstant::ZERO,
679        );
680        assert_eq!(result, Ok(true));
681
682        // Verify initial contents.
683        let reader = storage.create_reader();
684        let mut iter = reader.iter();
685        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x1111, .. })));
686        assert_matches!(iter.next(), None);
687
688        let result = writer.replace_allocation_begin(
689            0x1234,
690            0x2222,
691            THREAD_INFO_RESOURCE_KEY_2,
692            STACK_TRACE_RESOURCE_KEY_2,
693            zx::MonotonicInstant::ZERO,
694        );
695        let Ok(Some((new_node, old_node, old_size))) = result else {
696            panic!("Update begin is supposed to succeed in this test, got {:?} instead", result)
697        };
698        assert_eq!(old_size, 0x1111);
699
700        // Verify that intermediate contents already reflect the update.
701        let reader = storage.create_reader();
702        let mut iter = reader.iter();
703        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
704        assert_matches!(iter.next(), None);
705
706        writer.replace_allocation_end(new_node, old_node);
707
708        // Verify that final contents reflect the update.
709        let reader = storage.create_reader();
710        let mut iter = reader.iter();
711        assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
712        assert_matches!(iter.next(), None);
713    }
714
715    #[test]
716    fn test_cannot_replace_nonexisting() {
717        let (_storage, mut writer) = TestStorage::new(NUM_NODES);
718
719        let result = writer.replace_allocation(
720            0x1234,
721            0x5678,
722            THREAD_INFO_RESOURCE_KEY_1,
723            STACK_TRACE_RESOURCE_KEY_1,
724            zx::MonotonicInstant::ZERO,
725        );
726        assert_eq!(result, Ok(None));
727    }
728}