1use memory_mapped_vmo::MemoryMappedVmo;
6use static_assertions::const_assert;
7use std::collections::HashSet;
8use std::mem::{align_of, size_of};
9use std::ops::Deref;
10use std::sync::atomic::AtomicU32;
11use std::sync::atomic::Ordering::{Relaxed, SeqCst};
12use zerocopy::FromBytes;
13
14pub use crate::resources_table_v1::ResourceKey;
15
16type NodeIndex = u32;
17type BucketHeads = [AtomicNodeIndex; NUM_BUCKETS];
18const NUM_BUCKETS: usize = 1 << 16;
19const NODE_INVALID: NodeIndex = NodeIndex::MAX;
20
21pub const MIN_ALIGNMENT: usize = align_of::<Node>();
23const_assert!(MIN_ALIGNMENT % align_of::<BucketHeads>() == 0);
24
25#[repr(transparent)]
27#[derive(Debug, FromBytes)]
28struct AtomicNodeIndex(AtomicU32);
29
30impl AtomicNodeIndex {
31 const fn new(value: u32) -> AtomicNodeIndex {
32 AtomicNodeIndex(AtomicU32::new(value))
33 }
34}
35
36impl Deref for AtomicNodeIndex {
37 type Target = AtomicU32;
38
39 fn deref(&self) -> &AtomicU32 {
40 &self.0
41 }
42}
43
44#[repr(C)]
45#[derive(Debug, FromBytes)]
46pub struct Node {
47 next: AtomicNodeIndex,
48 pub address: u64,
49 pub size: u64,
50 pub timestamp: zx::MonotonicInstant,
51 pub thread_info_key: ResourceKey,
52 pub stack_trace_key: ResourceKey,
53}
54
55fn compute_nodes_count(num_bytes: usize) -> Result<usize, crate::Error> {
57 let Some(nodes_size) = num_bytes.checked_sub(size_of::<BucketHeads>()) else {
58 return Err(crate::Error::BufferTooSmall);
59 };
60
61 let num_nodes = nodes_size / size_of::<Node>();
62 if num_nodes > NODE_INVALID as usize {
63 return Err(crate::Error::BufferTooBig);
64 }
65
66 Ok(num_nodes)
67}
68
69pub struct AllocationsTableWriter {
79 storage: MemoryMappedVmo,
80
81 watermark: NodeIndex,
92 max_num_nodes: usize,
93 free_list_head: NodeIndex,
94}
95
96impl AllocationsTableWriter {
97 pub unsafe fn new(vmo: &zx::Vmo) -> Result<AllocationsTableWriter, crate::Error> {
103 let storage = unsafe { MemoryMappedVmo::new_readwrite(vmo)? };
104 let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
105
106 let mut result = AllocationsTableWriter {
107 storage,
108 watermark: 0,
109 max_num_nodes,
110 free_list_head: NODE_INVALID,
111 };
112
113 for bucket_index in 0..NUM_BUCKETS {
115 result.bucket_head_at(bucket_index).store(NODE_INVALID, SeqCst);
116 }
117
118 Ok(result)
119 }
120
121 fn compute_bucket_index(address: u64) -> usize {
123 let tmp = (address >> 4) as usize;
125 tmp % NUM_BUCKETS
126 }
127
128 fn bucket_head_at(&mut self, bucket_index: usize) -> &mut AtomicNodeIndex {
130 let bucket_heads = self.storage.get_object_mut::<BucketHeads>(0).unwrap();
132 &mut bucket_heads[bucket_index]
133 }
134
135 fn node_at(&mut self, node_index: NodeIndex) -> &mut Node {
137 let byte_offset = size_of::<BucketHeads>() + node_index as usize * size_of::<Node>();
139 self.storage.get_object_mut::<Node>(byte_offset).unwrap()
140 }
141
142 pub fn insert_allocation(
147 &mut self,
148 address: u64,
149 size: u64,
150 thread_info_key: ResourceKey,
151 stack_trace_key: ResourceKey,
152 timestamp: zx::MonotonicInstant,
153 ) -> Result<bool, crate::Error> {
154 let bucket_index = Self::compute_bucket_index(address);
155 let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
156
157 let mut curr_index = old_head;
159 while curr_index != NODE_INVALID {
160 let curr_data = self.node_at(curr_index);
161 if curr_data.address == address {
162 return Ok(false);
163 }
164 curr_index = curr_data.next.load(Relaxed);
165 }
166
167 let new_index = self.pop_free_node()?;
169 *self.node_at(new_index) = Node {
170 address,
171 size,
172 timestamp,
173 thread_info_key,
174 stack_trace_key,
175 next: AtomicNodeIndex::new(old_head),
176 };
177 self.bucket_head_at(bucket_index).store(new_index, SeqCst);
178 Ok(true)
179 }
180
181 pub fn erase_allocation(&mut self, address: u64) -> Option<u64> {
184 let bucket_index = Self::compute_bucket_index(address);
185
186 let mut prev_index = None;
188 let mut curr_index = self.bucket_head_at(bucket_index).load(Relaxed);
189 while curr_index != NODE_INVALID {
190 let curr_data = self.node_at(curr_index);
191 let curr_data_size = curr_data.size;
192 let curr_data_next = curr_data.next.load(Relaxed);
193
194 if curr_data.address == address {
196 if let Some(prev_index) = prev_index {
197 self.node_at(prev_index).next.store(curr_data_next, SeqCst);
198 } else {
199 self.bucket_head_at(bucket_index).store(curr_data_next, SeqCst);
200 }
201 self.push_free_node(curr_index);
202 return Some(curr_data_size);
203 }
204
205 prev_index = Some(curr_index);
206 curr_index = curr_data.next.load(Relaxed);
207 }
208
209 None
211 }
212
213 pub fn replace_allocation(
218 &mut self,
219 address: u64,
220 size: u64,
221 thread_info_key: ResourceKey,
222 stack_trace_key: ResourceKey,
223 timestamp: zx::MonotonicInstant,
224 ) -> Result<Option<u64>, crate::Error> {
225 let Some((new_node, old_node, old_size)) = self.replace_allocation_begin(
234 address,
235 size,
236 thread_info_key,
237 stack_trace_key,
238 timestamp,
239 )?
240 else {
241 return Ok(None);
242 };
243
244 self.replace_allocation_end(new_node, old_node);
246 Ok(Some(old_size))
247 }
248
249 fn replace_allocation_begin(
251 &mut self,
252 address: u64,
253 size: u64,
254 thread_info_key: ResourceKey,
255 stack_trace_key: ResourceKey,
256 timestamp: zx::MonotonicInstant,
257 ) -> Result<Option<(NodeIndex, NodeIndex, u64)>, crate::Error> {
258 let bucket_index = Self::compute_bucket_index(address);
259 let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
260
261 let (old_index, old_size) = 'search: {
263 let mut curr_index = old_head;
264 while curr_index != NODE_INVALID {
265 let curr_data = self.node_at(curr_index);
266 if curr_data.address == address {
267 break 'search (curr_index, curr_data.size); }
269 curr_index = curr_data.next.load(Relaxed);
270 }
271
272 return Ok(None); };
274
275 let new_index = self.pop_free_node()?;
277 *self.node_at(new_index) = Node {
278 address,
279 size,
280 timestamp,
281 thread_info_key,
282 stack_trace_key,
283 next: AtomicNodeIndex::new(old_head),
284 };
285 self.bucket_head_at(bucket_index).store(new_index, SeqCst);
286 Ok(Some((new_index, old_index, old_size)))
287 }
288
289 fn replace_allocation_end(&mut self, new_node: NodeIndex, old_node: NodeIndex) {
291 let tail = self.node_at(old_node).next.load(Relaxed);
292
293 let mut scan_node = new_node;
296 loop {
297 assert_ne!(scan_node, NODE_INVALID, "the old node must be a successor of new node");
298 let scan_data = self.node_at(scan_node);
299 if scan_data.next.load(Relaxed) == old_node {
300 scan_data.next.store(tail, SeqCst);
301 self.push_free_node(old_node);
302 return;
303 }
304 scan_node = scan_data.next.load(Relaxed);
305 }
306 }
307
308 fn push_free_node(&mut self, index: NodeIndex) {
310 let current_head = self.free_list_head;
311
312 let node_data = self.node_at(index);
313 node_data.next.store(current_head, Relaxed);
314
315 self.free_list_head = index;
316 }
317
318 fn pop_free_node(&mut self) -> Result<NodeIndex, crate::Error> {
320 if self.free_list_head != NODE_INVALID {
321 let result = self.free_list_head;
323 self.free_list_head = self.node_at(result).next.load(Relaxed);
324 Ok(result)
325 } else if (self.watermark as usize) < self.max_num_nodes {
326 let result = self.watermark;
328 self.watermark += 1;
329 Ok(result)
330 } else {
331 Err(crate::Error::OutOfSpace)
333 }
334 }
335}
336
337pub struct AllocationsTableReader {
339 storage: MemoryMappedVmo,
340 max_num_nodes: usize,
341}
342
343impl AllocationsTableReader {
344 pub unsafe fn new(vmo: &zx::Vmo) -> Result<AllocationsTableReader, crate::Error> {
349 let storage = unsafe { MemoryMappedVmo::new_readonly(vmo)? };
350 let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
351
352 Ok(AllocationsTableReader { storage, max_num_nodes })
353 }
354
355 pub fn iter(&self) -> impl Iterator<Item = Result<&Node, crate::Error>> {
357 let bucket_heads = self.storage.get_object::<BucketHeads>(0).unwrap();
359 bucket_heads.iter().map(|head| self.iterate_bucket(head.load(Relaxed))).flatten()
360 }
361
362 fn iterate_bucket(&self, head: NodeIndex) -> impl Iterator<Item = Result<&Node, crate::Error>> {
363 let mut curr_index = head;
364 let mut seen_addresses = HashSet::new();
365 std::iter::from_fn(move || {
366 while curr_index != NODE_INVALID {
367 if (curr_index as usize) < self.max_num_nodes {
368 let byte_offset =
370 size_of::<BucketHeads>() + curr_index as usize * size_of::<Node>();
371 let curr_data = self.storage.get_object::<Node>(byte_offset).unwrap();
372 curr_index = curr_data.next.load(Relaxed);
373
374 if seen_addresses.insert(curr_data.address) {
377 return Some(Ok(curr_data));
378 }
379 } else {
380 return Some(Err(crate::Error::InvalidInput));
381 };
382 }
383
384 None
385 })
386 }
387}
388
389#[cfg(test)]
390mod tests {
391 use super::*;
392 use assert_matches::assert_matches;
393 use std::alloc::Layout;
394 use std::collections::HashMap;
395
396 const NUM_ITERATIONS: usize = NUM_BUCKETS * 10;
398
399 const NUM_NODES: usize = NUM_ITERATIONS + 100;
401
402 const THREAD_INFO_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1122);
404 const THREAD_INFO_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3344);
405 const STACK_TRACE_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1212);
406 const STACK_TRACE_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3434);
407
408 struct TestStorage {
409 vmo: zx::Vmo,
410 }
411
412 impl TestStorage {
413 pub fn new(num_nodes: usize) -> (TestStorage, AllocationsTableWriter) {
414 let nodes_layout = Layout::array::<Node>(num_nodes).unwrap();
415 let (layout, nodes_offset) = Layout::new::<BucketHeads>().extend(nodes_layout).unwrap();
416 assert_eq!(nodes_offset, size_of::<BucketHeads>());
417
418 let vmo = zx::Vmo::create(layout.size() as u64).unwrap();
419
420 let writer = unsafe { AllocationsTableWriter::new(&vmo) }.unwrap();
422
423 (TestStorage { vmo }, writer)
424 }
425
426 fn create_reader(&self) -> AllocationsTableReader {
427 let snapshot = self
428 .vmo
429 .create_child(
430 zx::VmoChildOptions::SNAPSHOT | zx::VmoChildOptions::NO_WRITE,
431 0,
432 self.vmo.get_size().unwrap(),
433 )
434 .unwrap();
435
436 unsafe { AllocationsTableReader::new(&snapshot) }.unwrap()
438 }
439 }
440
441 #[test]
442 fn test_cannot_insert_twice() {
443 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
444
445 let result = writer.insert_allocation(
446 0x1234,
447 0x5678,
448 THREAD_INFO_RESOURCE_KEY_1,
449 STACK_TRACE_RESOURCE_KEY_1,
450 zx::MonotonicInstant::ZERO,
451 );
452 assert_eq!(result, Ok(true));
453
454 let result = writer.insert_allocation(
455 0x1234,
456 0x5678,
457 THREAD_INFO_RESOURCE_KEY_1,
458 STACK_TRACE_RESOURCE_KEY_1,
459 zx::MonotonicInstant::ZERO,
460 );
461 assert_eq!(result, Ok(false));
462 }
463
464 #[test]
465 fn test_cannot_erase_twice() {
466 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
467
468 let result = writer.insert_allocation(
469 0x1234,
470 0x5678,
471 THREAD_INFO_RESOURCE_KEY_1,
472 STACK_TRACE_RESOURCE_KEY_1,
473 zx::MonotonicInstant::ZERO,
474 );
475 assert_eq!(result, Ok(true));
476
477 let result = writer.erase_allocation(0x1234);
478 assert_eq!(result, Some(0x5678));
479
480 let result = writer.erase_allocation(0x1234);
481 assert_eq!(result, None);
482 }
483
484 #[test]
485 fn test_out_of_space() {
486 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
487
488 for i in 0..NUM_NODES {
490 let result = writer.insert_allocation(
491 i as u64,
492 1,
493 THREAD_INFO_RESOURCE_KEY_1,
494 STACK_TRACE_RESOURCE_KEY_1,
495 zx::MonotonicInstant::ZERO,
496 );
497 assert_eq!(result, Ok(true));
498 }
499
500 let result = writer.insert_allocation(
502 NUM_NODES as u64,
503 1,
504 THREAD_INFO_RESOURCE_KEY_1,
505 STACK_TRACE_RESOURCE_KEY_1,
506 zx::MonotonicInstant::ZERO,
507 );
508 assert_eq!(result, Err(crate::Error::OutOfSpace));
509
510 let result = writer.erase_allocation(0);
512 assert_eq!(result, Some(1));
513 let result = writer.insert_allocation(
514 NUM_NODES as u64,
515 1,
516 THREAD_INFO_RESOURCE_KEY_1,
517 STACK_TRACE_RESOURCE_KEY_1,
518 zx::MonotonicInstant::ZERO,
519 );
520 assert_eq!(result, Ok(true));
521 }
522
523 #[test]
524 fn test_loop_insert_then_erase() {
525 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
526
527 for i in 0..NUM_ITERATIONS {
528 let result = writer.insert_allocation(
529 i as u64,
530 1,
531 THREAD_INFO_RESOURCE_KEY_1,
532 STACK_TRACE_RESOURCE_KEY_1,
533 zx::MonotonicInstant::ZERO,
534 );
535 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
536
537 let result = writer.erase_allocation(i as u64);
538 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
539 }
540 }
541
542 #[test]
543 fn test_bulk_insert_then_erase_same_order() {
544 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
545
546 for i in 0..NUM_ITERATIONS {
547 let result = writer.insert_allocation(
548 i as u64,
549 1,
550 THREAD_INFO_RESOURCE_KEY_1,
551 STACK_TRACE_RESOURCE_KEY_1,
552 zx::MonotonicInstant::ZERO,
553 );
554 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
555 }
556 for i in 0..NUM_ITERATIONS {
557 let result = writer.erase_allocation(i as u64);
558 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
559 }
560 }
561
562 #[test]
563 fn test_bulk_insert_then_erase_reverse_order() {
564 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
565
566 for i in 0..NUM_ITERATIONS {
567 let result = writer.insert_allocation(
568 i as u64,
569 1,
570 THREAD_INFO_RESOURCE_KEY_1,
571 STACK_TRACE_RESOURCE_KEY_1,
572 zx::MonotonicInstant::ZERO,
573 );
574 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
575 }
576 for i in (0..NUM_ITERATIONS).rev() {
577 let result = writer.erase_allocation(i as u64);
578 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
579 }
580 }
581
582 #[test]
583 fn test_read_empty() {
584 let (storage, _writer) = TestStorage::new(NUM_NODES);
585
586 let reader = storage.create_reader();
588 assert_eq!(reader.iter().count(), 0);
589 }
590
591 #[test]
592 fn test_read_populated() {
593 let (storage, mut writer) = TestStorage::new(NUM_NODES);
594
595 let mut expected_map = HashMap::new();
597 for i in 0..NUM_ITERATIONS as u64 {
598 let thread_info_key =
599 if i % 4 >= 2 { THREAD_INFO_RESOURCE_KEY_1 } else { THREAD_INFO_RESOURCE_KEY_2 };
600 let stack_trace_key =
601 if i % 2 == 0 { STACK_TRACE_RESOURCE_KEY_1 } else { STACK_TRACE_RESOURCE_KEY_2 };
602 let timestamp =
603 zx::MonotonicInstant::from_nanos((NUM_ITERATIONS as i64 / 2) - (i as i64)); let result =
605 writer.insert_allocation(i, 1, thread_info_key, stack_trace_key, timestamp);
606 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
607
608 expected_map.insert(i, (1, thread_info_key, stack_trace_key, timestamp));
609 }
610
611 let reader = storage.create_reader();
613 let mut actual_map = HashMap::new();
614 for node in reader.iter() {
615 let Node { address, size, thread_info_key, stack_trace_key, timestamp, .. } =
616 node.unwrap();
617 assert!(
618 actual_map
619 .insert(*address, (*size, *thread_info_key, *stack_trace_key, *timestamp))
620 .is_none(),
621 "address 0x{:x} was read more than once",
622 address
623 );
624 }
625
626 assert_eq!(actual_map, expected_map);
627 }
628
629 #[test]
630 fn test_read_bad_bucket_head() {
631 let (storage, mut writer) = TestStorage::new(NUM_NODES);
632
633 writer.bucket_head_at(NUM_BUCKETS / 2).store(NODE_INVALID - 1, SeqCst);
635
636 let reader = storage.create_reader();
638 let contains_error = reader.iter().any(|e| e.is_err());
639
640 assert!(contains_error);
641 }
642
643 #[test]
644 fn test_read_bad_node_next() {
645 let (storage, mut writer) = TestStorage::new(NUM_NODES);
646
647 writer.bucket_head_at(NUM_BUCKETS / 2).store(0, SeqCst);
649 *writer.node_at(0) = Node {
650 next: AtomicNodeIndex::new(NODE_INVALID - 1),
651 address: 0x1234,
652 size: 0x5678,
653 thread_info_key: THREAD_INFO_RESOURCE_KEY_1,
654 stack_trace_key: STACK_TRACE_RESOURCE_KEY_1,
655 timestamp: zx::MonotonicInstant::from_nanos(99999999),
656 };
657
658 let reader = storage.create_reader();
660 let contains_error = reader.iter().any(|e| e.is_err());
661
662 assert!(contains_error);
663 }
664
665 #[test]
666 fn test_replace() {
667 let (storage, mut writer) = TestStorage::new(NUM_NODES);
668
669 let result = writer.insert_allocation(
670 0x1234,
671 0x1111,
672 THREAD_INFO_RESOURCE_KEY_1,
673 STACK_TRACE_RESOURCE_KEY_1,
674 zx::MonotonicInstant::ZERO,
675 );
676 assert_eq!(result, Ok(true));
677
678 let reader = storage.create_reader();
680 let mut iter = reader.iter();
681 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x1111, .. })));
682 assert_matches!(iter.next(), None);
683
684 let result = writer.replace_allocation_begin(
685 0x1234,
686 0x2222,
687 THREAD_INFO_RESOURCE_KEY_2,
688 STACK_TRACE_RESOURCE_KEY_2,
689 zx::MonotonicInstant::ZERO,
690 );
691 let Ok(Some((new_node, old_node, old_size))) = result else {
692 panic!("Update begin is supposed to succeed in this test, got {:?} instead", result)
693 };
694 assert_eq!(old_size, 0x1111);
695
696 let reader = storage.create_reader();
698 let mut iter = reader.iter();
699 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
700 assert_matches!(iter.next(), None);
701
702 writer.replace_allocation_end(new_node, old_node);
703
704 let reader = storage.create_reader();
706 let mut iter = reader.iter();
707 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
708 assert_matches!(iter.next(), None);
709 }
710
711 #[test]
712 fn test_cannot_replace_nonexisting() {
713 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
714
715 let result = writer.replace_allocation(
716 0x1234,
717 0x5678,
718 THREAD_INFO_RESOURCE_KEY_1,
719 STACK_TRACE_RESOURCE_KEY_1,
720 zx::MonotonicInstant::ZERO,
721 );
722 assert_eq!(result, Ok(None));
723 }
724}