1use static_assertions::const_assert;
6use std::collections::HashSet;
7use std::mem::{align_of, size_of};
8use std::ops::Deref;
9use std::sync::atomic::AtomicU32;
10use std::sync::atomic::Ordering::{Relaxed, SeqCst};
11
12use crate::memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
13pub use crate::resources_table_v1::ResourceKey;
14
15type NodeIndex = u32;
16type BucketHeads = [AtomicNodeIndex; NUM_BUCKETS];
17const NUM_BUCKETS: usize = 1 << 16;
18const NODE_INVALID: NodeIndex = NodeIndex::MAX;
19
20pub const MIN_ALIGNMENT: usize = align_of::<Node>();
22const_assert!(MIN_ALIGNMENT % align_of::<BucketHeads>() == 0);
23
24#[repr(transparent)]
26#[derive(Debug)]
27struct AtomicNodeIndex(AtomicU32);
28
29impl AtomicNodeIndex {
30 const fn new(value: u32) -> AtomicNodeIndex {
31 AtomicNodeIndex(AtomicU32::new(value))
32 }
33}
34
35impl Deref for AtomicNodeIndex {
36 type Target = AtomicU32;
37
38 fn deref(&self) -> &AtomicU32 {
39 &self.0
40 }
41}
42
43unsafe impl MemoryMappable for AtomicNodeIndex {}
45
46#[repr(C)]
47#[derive(Debug)]
48pub struct Node {
49 next: AtomicNodeIndex,
50 pub address: u64,
51 pub size: u64,
52 pub timestamp: zx::MonotonicInstant,
53 pub thread_info_key: ResourceKey,
54 pub stack_trace_key: ResourceKey,
55}
56
57unsafe impl MemoryMappable for Node {}
58
59fn compute_nodes_count(num_bytes: usize) -> Result<usize, crate::Error> {
61 let Some(nodes_size) = num_bytes.checked_sub(size_of::<BucketHeads>()) else {
62 return Err(crate::Error::BufferTooSmall);
63 };
64
65 let num_nodes = nodes_size / size_of::<Node>();
66 if num_nodes > NODE_INVALID as usize {
67 return Err(crate::Error::BufferTooBig);
68 }
69
70 Ok(num_nodes)
71}
72
73pub struct AllocationsTableWriter {
83 storage: MemoryMappedVmo,
84
85 watermark: NodeIndex,
96 max_num_nodes: usize,
97 free_list_head: NodeIndex,
98}
99
100impl AllocationsTableWriter {
101 pub unsafe fn new(vmo: &zx::Vmo) -> Result<AllocationsTableWriter, crate::Error> {
107 let storage = MemoryMappedVmo::new_readwrite(vmo)?;
108 let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
109
110 let mut result = AllocationsTableWriter {
111 storage,
112 watermark: 0,
113 max_num_nodes,
114 free_list_head: NODE_INVALID,
115 };
116
117 for bucket_index in 0..NUM_BUCKETS {
119 result.bucket_head_at(bucket_index).store(NODE_INVALID, SeqCst);
120 }
121
122 Ok(result)
123 }
124
125 fn compute_bucket_index(address: u64) -> usize {
127 let tmp = (address >> 4) as usize;
129 tmp % NUM_BUCKETS
130 }
131
132 fn bucket_head_at(&mut self, bucket_index: usize) -> &mut AtomicNodeIndex {
134 let bucket_heads = self.storage.get_object_mut::<BucketHeads>(0).unwrap();
136 &mut bucket_heads[bucket_index]
137 }
138
139 fn node_at(&mut self, node_index: NodeIndex) -> &mut Node {
141 let byte_offset = size_of::<BucketHeads>() + node_index as usize * size_of::<Node>();
143 self.storage.get_object_mut::<Node>(byte_offset).unwrap()
144 }
145
146 pub fn insert_allocation(
151 &mut self,
152 address: u64,
153 size: u64,
154 thread_info_key: ResourceKey,
155 stack_trace_key: ResourceKey,
156 timestamp: zx::MonotonicInstant,
157 ) -> Result<bool, crate::Error> {
158 let bucket_index = Self::compute_bucket_index(address);
159 let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
160
161 let mut curr_index = old_head;
163 while curr_index != NODE_INVALID {
164 let curr_data = self.node_at(curr_index);
165 if curr_data.address == address {
166 return Ok(false);
167 }
168 curr_index = curr_data.next.load(Relaxed);
169 }
170
171 let new_index = self.pop_free_node()?;
173 *self.node_at(new_index) = Node {
174 address,
175 size,
176 timestamp,
177 thread_info_key,
178 stack_trace_key,
179 next: AtomicNodeIndex::new(old_head),
180 };
181 self.bucket_head_at(bucket_index).store(new_index, SeqCst);
182 Ok(true)
183 }
184
185 pub fn erase_allocation(&mut self, address: u64) -> Option<u64> {
188 let bucket_index = Self::compute_bucket_index(address);
189
190 let mut prev_index = None;
192 let mut curr_index = self.bucket_head_at(bucket_index).load(Relaxed);
193 while curr_index != NODE_INVALID {
194 let curr_data = self.node_at(curr_index);
195 let curr_data_size = curr_data.size;
196 let curr_data_next = curr_data.next.load(Relaxed);
197
198 if curr_data.address == address {
200 if let Some(prev_index) = prev_index {
201 self.node_at(prev_index).next.store(curr_data_next, SeqCst);
202 } else {
203 self.bucket_head_at(bucket_index).store(curr_data_next, SeqCst);
204 }
205 self.push_free_node(curr_index);
206 return Some(curr_data_size);
207 }
208
209 prev_index = Some(curr_index);
210 curr_index = curr_data.next.load(Relaxed);
211 }
212
213 None
215 }
216
217 pub fn replace_allocation(
222 &mut self,
223 address: u64,
224 size: u64,
225 thread_info_key: ResourceKey,
226 stack_trace_key: ResourceKey,
227 timestamp: zx::MonotonicInstant,
228 ) -> Result<Option<u64>, crate::Error> {
229 let Some((new_node, old_node, old_size)) = self.replace_allocation_begin(
238 address,
239 size,
240 thread_info_key,
241 stack_trace_key,
242 timestamp,
243 )?
244 else {
245 return Ok(None);
246 };
247
248 self.replace_allocation_end(new_node, old_node);
250 Ok(Some(old_size))
251 }
252
253 fn replace_allocation_begin(
255 &mut self,
256 address: u64,
257 size: u64,
258 thread_info_key: ResourceKey,
259 stack_trace_key: ResourceKey,
260 timestamp: zx::MonotonicInstant,
261 ) -> Result<Option<(NodeIndex, NodeIndex, u64)>, crate::Error> {
262 let bucket_index = Self::compute_bucket_index(address);
263 let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
264
265 let (old_index, old_size) = 'search: {
267 let mut curr_index = old_head;
268 while curr_index != NODE_INVALID {
269 let curr_data = self.node_at(curr_index);
270 if curr_data.address == address {
271 break 'search (curr_index, curr_data.size); }
273 curr_index = curr_data.next.load(Relaxed);
274 }
275
276 return Ok(None); };
278
279 let new_index = self.pop_free_node()?;
281 *self.node_at(new_index) = Node {
282 address,
283 size,
284 timestamp,
285 thread_info_key,
286 stack_trace_key,
287 next: AtomicNodeIndex::new(old_head),
288 };
289 self.bucket_head_at(bucket_index).store(new_index, SeqCst);
290 Ok(Some((new_index, old_index, old_size)))
291 }
292
293 fn replace_allocation_end(&mut self, new_node: NodeIndex, old_node: NodeIndex) {
295 let tail = self.node_at(old_node).next.load(Relaxed);
296
297 let mut scan_node = new_node;
300 loop {
301 assert_ne!(scan_node, NODE_INVALID, "the old node must be a successor of new node");
302 let scan_data = self.node_at(scan_node);
303 if scan_data.next.load(Relaxed) == old_node {
304 scan_data.next.store(tail, SeqCst);
305 self.push_free_node(old_node);
306 return;
307 }
308 scan_node = scan_data.next.load(Relaxed);
309 }
310 }
311
312 fn push_free_node(&mut self, index: NodeIndex) {
314 let current_head = self.free_list_head;
315
316 let node_data = self.node_at(index);
317 node_data.next.store(current_head, Relaxed);
318
319 self.free_list_head = index;
320 }
321
322 fn pop_free_node(&mut self) -> Result<NodeIndex, crate::Error> {
324 if self.free_list_head != NODE_INVALID {
325 let result = self.free_list_head;
327 self.free_list_head = self.node_at(result).next.load(Relaxed);
328 Ok(result)
329 } else if (self.watermark as usize) < self.max_num_nodes {
330 let result = self.watermark;
332 self.watermark += 1;
333 Ok(result)
334 } else {
335 Err(crate::Error::OutOfSpace)
337 }
338 }
339}
340
341pub struct AllocationsTableReader {
343 storage: MemoryMappedVmo,
344 max_num_nodes: usize,
345}
346
347impl AllocationsTableReader {
348 pub unsafe fn new(vmo: &zx::Vmo) -> Result<AllocationsTableReader, crate::Error> {
353 let storage = MemoryMappedVmo::new_readonly(vmo)?;
354 let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
355
356 Ok(AllocationsTableReader { storage, max_num_nodes })
357 }
358
359 pub fn iter(&self) -> impl Iterator<Item = Result<&Node, crate::Error>> {
361 let bucket_heads = self.storage.get_object::<BucketHeads>(0).unwrap();
363 bucket_heads.iter().map(|head| self.iterate_bucket(head.load(Relaxed))).flatten()
364 }
365
366 fn iterate_bucket(&self, head: NodeIndex) -> impl Iterator<Item = Result<&Node, crate::Error>> {
367 let mut curr_index = head;
368 let mut seen_addresses = HashSet::new();
369 std::iter::from_fn(move || {
370 while curr_index != NODE_INVALID {
371 if (curr_index as usize) < self.max_num_nodes {
372 let byte_offset =
374 size_of::<BucketHeads>() + curr_index as usize * size_of::<Node>();
375 let curr_data = self.storage.get_object::<Node>(byte_offset).unwrap();
376 curr_index = curr_data.next.load(Relaxed);
377
378 if seen_addresses.insert(curr_data.address) {
381 return Some(Ok(curr_data));
382 }
383 } else {
384 return Some(Err(crate::Error::InvalidInput));
385 };
386 }
387
388 None
389 })
390 }
391}
392
393#[cfg(test)]
394mod tests {
395 use super::*;
396 use assert_matches::assert_matches;
397 use std::alloc::Layout;
398 use std::collections::HashMap;
399
400 const NUM_ITERATIONS: usize = NUM_BUCKETS * 10;
402
403 const NUM_NODES: usize = NUM_ITERATIONS + 100;
405
406 const THREAD_INFO_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1122);
408 const THREAD_INFO_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3344);
409 const STACK_TRACE_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1212);
410 const STACK_TRACE_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3434);
411
412 struct TestStorage {
413 vmo: zx::Vmo,
414 }
415
416 impl TestStorage {
417 pub fn new(num_nodes: usize) -> (TestStorage, AllocationsTableWriter) {
418 let nodes_layout = Layout::array::<Node>(num_nodes).unwrap();
419 let (layout, nodes_offset) = Layout::new::<BucketHeads>().extend(nodes_layout).unwrap();
420 assert_eq!(nodes_offset, size_of::<BucketHeads>());
421
422 let vmo = zx::Vmo::create(layout.size() as u64).unwrap();
423
424 let writer = unsafe { AllocationsTableWriter::new(&vmo) }.unwrap();
426
427 (TestStorage { vmo }, writer)
428 }
429
430 fn create_reader(&self) -> AllocationsTableReader {
431 let snapshot = self
432 .vmo
433 .create_child(
434 zx::VmoChildOptions::SNAPSHOT | zx::VmoChildOptions::NO_WRITE,
435 0,
436 self.vmo.get_size().unwrap(),
437 )
438 .unwrap();
439
440 unsafe { AllocationsTableReader::new(&snapshot) }.unwrap()
442 }
443 }
444
445 #[test]
446 fn test_cannot_insert_twice() {
447 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
448
449 let result = writer.insert_allocation(
450 0x1234,
451 0x5678,
452 THREAD_INFO_RESOURCE_KEY_1,
453 STACK_TRACE_RESOURCE_KEY_1,
454 zx::MonotonicInstant::ZERO,
455 );
456 assert_eq!(result, Ok(true));
457
458 let result = writer.insert_allocation(
459 0x1234,
460 0x5678,
461 THREAD_INFO_RESOURCE_KEY_1,
462 STACK_TRACE_RESOURCE_KEY_1,
463 zx::MonotonicInstant::ZERO,
464 );
465 assert_eq!(result, Ok(false));
466 }
467
468 #[test]
469 fn test_cannot_erase_twice() {
470 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
471
472 let result = writer.insert_allocation(
473 0x1234,
474 0x5678,
475 THREAD_INFO_RESOURCE_KEY_1,
476 STACK_TRACE_RESOURCE_KEY_1,
477 zx::MonotonicInstant::ZERO,
478 );
479 assert_eq!(result, Ok(true));
480
481 let result = writer.erase_allocation(0x1234);
482 assert_eq!(result, Some(0x5678));
483
484 let result = writer.erase_allocation(0x1234);
485 assert_eq!(result, None);
486 }
487
488 #[test]
489 fn test_out_of_space() {
490 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
491
492 for i in 0..NUM_NODES {
494 let result = writer.insert_allocation(
495 i as u64,
496 1,
497 THREAD_INFO_RESOURCE_KEY_1,
498 STACK_TRACE_RESOURCE_KEY_1,
499 zx::MonotonicInstant::ZERO,
500 );
501 assert_eq!(result, Ok(true));
502 }
503
504 let result = writer.insert_allocation(
506 NUM_NODES as u64,
507 1,
508 THREAD_INFO_RESOURCE_KEY_1,
509 STACK_TRACE_RESOURCE_KEY_1,
510 zx::MonotonicInstant::ZERO,
511 );
512 assert_eq!(result, Err(crate::Error::OutOfSpace));
513
514 let result = writer.erase_allocation(0);
516 assert_eq!(result, Some(1));
517 let result = writer.insert_allocation(
518 NUM_NODES as u64,
519 1,
520 THREAD_INFO_RESOURCE_KEY_1,
521 STACK_TRACE_RESOURCE_KEY_1,
522 zx::MonotonicInstant::ZERO,
523 );
524 assert_eq!(result, Ok(true));
525 }
526
527 #[test]
528 fn test_loop_insert_then_erase() {
529 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
530
531 for i in 0..NUM_ITERATIONS {
532 let result = writer.insert_allocation(
533 i as u64,
534 1,
535 THREAD_INFO_RESOURCE_KEY_1,
536 STACK_TRACE_RESOURCE_KEY_1,
537 zx::MonotonicInstant::ZERO,
538 );
539 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
540
541 let result = writer.erase_allocation(i as u64);
542 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
543 }
544 }
545
546 #[test]
547 fn test_bulk_insert_then_erase_same_order() {
548 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
549
550 for i in 0..NUM_ITERATIONS {
551 let result = writer.insert_allocation(
552 i as u64,
553 1,
554 THREAD_INFO_RESOURCE_KEY_1,
555 STACK_TRACE_RESOURCE_KEY_1,
556 zx::MonotonicInstant::ZERO,
557 );
558 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
559 }
560 for i in 0..NUM_ITERATIONS {
561 let result = writer.erase_allocation(i as u64);
562 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
563 }
564 }
565
566 #[test]
567 fn test_bulk_insert_then_erase_reverse_order() {
568 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
569
570 for i in 0..NUM_ITERATIONS {
571 let result = writer.insert_allocation(
572 i as u64,
573 1,
574 THREAD_INFO_RESOURCE_KEY_1,
575 STACK_TRACE_RESOURCE_KEY_1,
576 zx::MonotonicInstant::ZERO,
577 );
578 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
579 }
580 for i in (0..NUM_ITERATIONS).rev() {
581 let result = writer.erase_allocation(i as u64);
582 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
583 }
584 }
585
586 #[test]
587 fn test_read_empty() {
588 let (storage, _writer) = TestStorage::new(NUM_NODES);
589
590 let reader = storage.create_reader();
592 assert_eq!(reader.iter().count(), 0);
593 }
594
595 #[test]
596 fn test_read_populated() {
597 let (storage, mut writer) = TestStorage::new(NUM_NODES);
598
599 let mut expected_map = HashMap::new();
601 for i in 0..NUM_ITERATIONS as u64 {
602 let thread_info_key =
603 if i % 4 >= 2 { THREAD_INFO_RESOURCE_KEY_1 } else { THREAD_INFO_RESOURCE_KEY_2 };
604 let stack_trace_key =
605 if i % 2 == 0 { STACK_TRACE_RESOURCE_KEY_1 } else { STACK_TRACE_RESOURCE_KEY_2 };
606 let timestamp =
607 zx::MonotonicInstant::from_nanos((NUM_ITERATIONS as i64 / 2) - (i as i64)); let result =
609 writer.insert_allocation(i, 1, thread_info_key, stack_trace_key, timestamp);
610 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
611
612 expected_map.insert(i, (1, thread_info_key, stack_trace_key, timestamp));
613 }
614
615 let reader = storage.create_reader();
617 let mut actual_map = HashMap::new();
618 for node in reader.iter() {
619 let Node { address, size, thread_info_key, stack_trace_key, timestamp, .. } =
620 node.unwrap();
621 assert!(
622 actual_map
623 .insert(*address, (*size, *thread_info_key, *stack_trace_key, *timestamp))
624 .is_none(),
625 "address 0x{:x} was read more than once",
626 address
627 );
628 }
629
630 assert_eq!(actual_map, expected_map);
631 }
632
633 #[test]
634 fn test_read_bad_bucket_head() {
635 let (storage, mut writer) = TestStorage::new(NUM_NODES);
636
637 writer.bucket_head_at(NUM_BUCKETS / 2).store(NODE_INVALID - 1, SeqCst);
639
640 let reader = storage.create_reader();
642 let contains_error = reader.iter().any(|e| e.is_err());
643
644 assert!(contains_error);
645 }
646
647 #[test]
648 fn test_read_bad_node_next() {
649 let (storage, mut writer) = TestStorage::new(NUM_NODES);
650
651 writer.bucket_head_at(NUM_BUCKETS / 2).store(0, SeqCst);
653 *writer.node_at(0) = Node {
654 next: AtomicNodeIndex::new(NODE_INVALID - 1),
655 address: 0x1234,
656 size: 0x5678,
657 thread_info_key: THREAD_INFO_RESOURCE_KEY_1,
658 stack_trace_key: STACK_TRACE_RESOURCE_KEY_1,
659 timestamp: zx::MonotonicInstant::from_nanos(99999999),
660 };
661
662 let reader = storage.create_reader();
664 let contains_error = reader.iter().any(|e| e.is_err());
665
666 assert!(contains_error);
667 }
668
669 #[test]
670 fn test_replace() {
671 let (storage, mut writer) = TestStorage::new(NUM_NODES);
672
673 let result = writer.insert_allocation(
674 0x1234,
675 0x1111,
676 THREAD_INFO_RESOURCE_KEY_1,
677 STACK_TRACE_RESOURCE_KEY_1,
678 zx::MonotonicInstant::ZERO,
679 );
680 assert_eq!(result, Ok(true));
681
682 let reader = storage.create_reader();
684 let mut iter = reader.iter();
685 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x1111, .. })));
686 assert_matches!(iter.next(), None);
687
688 let result = writer.replace_allocation_begin(
689 0x1234,
690 0x2222,
691 THREAD_INFO_RESOURCE_KEY_2,
692 STACK_TRACE_RESOURCE_KEY_2,
693 zx::MonotonicInstant::ZERO,
694 );
695 let Ok(Some((new_node, old_node, old_size))) = result else {
696 panic!("Update begin is supposed to succeed in this test, got {:?} instead", result)
697 };
698 assert_eq!(old_size, 0x1111);
699
700 let reader = storage.create_reader();
702 let mut iter = reader.iter();
703 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
704 assert_matches!(iter.next(), None);
705
706 writer.replace_allocation_end(new_node, old_node);
707
708 let reader = storage.create_reader();
710 let mut iter = reader.iter();
711 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
712 assert_matches!(iter.next(), None);
713 }
714
715 #[test]
716 fn test_cannot_replace_nonexisting() {
717 let (_storage, mut writer) = TestStorage::new(NUM_NODES);
718
719 let result = writer.replace_allocation(
720 0x1234,
721 0x5678,
722 THREAD_INFO_RESOURCE_KEY_1,
723 STACK_TRACE_RESOURCE_KEY_1,
724 zx::MonotonicInstant::ZERO,
725 );
726 assert_eq!(result, Ok(None));
727 }
728}