1use static_assertions::const_assert;
6use std::collections::HashSet;
7use std::mem::{align_of, size_of};
8use std::sync::atomic::AtomicU32;
9use std::sync::atomic::Ordering::{Relaxed, SeqCst};
10
11use crate::memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
12pub use crate::resources_table_v1::ResourceKey;
13
14type NodeIndex = u32;
15type AtomicNodeIndex = AtomicU32;
16type BucketHeads = [AtomicNodeIndex; NUM_BUCKETS];
17const NUM_BUCKETS: usize = 1 << 16;
18const NODE_INVALID: NodeIndex = NodeIndex::MAX;
19
20pub const MIN_ALIGNMENT: usize = align_of::<Node>();
22const_assert!(MIN_ALIGNMENT % align_of::<BucketHeads>() == 0);
23
24#[repr(C)]
25#[derive(Debug)]
26pub struct Node {
27 next: AtomicNodeIndex,
28 pub address: u64,
29 pub size: u64,
30 pub timestamp: zx::MonotonicInstant,
31 pub thread_info_key: ResourceKey,
32 pub stack_trace_key: ResourceKey,
33}
34
35unsafe impl MemoryMappable for AtomicNodeIndex {}
37unsafe impl MemoryMappable for Node {}
38
39fn compute_nodes_count(num_bytes: usize) -> Result<usize, crate::Error> {
41 let Some(nodes_size) = num_bytes.checked_sub(size_of::<BucketHeads>()) else {
42 return Err(crate::Error::BufferTooSmall);
43 };
44
45 let num_nodes = nodes_size / size_of::<Node>();
46 if num_nodes > NODE_INVALID as usize {
47 return Err(crate::Error::BufferTooBig);
48 }
49
50 Ok(num_nodes)
51}
52
53pub struct AllocationsTableWriter {
63 storage: MemoryMappedVmo,
64
65 watermark: NodeIndex,
76 max_num_nodes: usize,
77 free_list_head: NodeIndex,
78}
79
80impl AllocationsTableWriter {
81 pub fn new(vmo: &zx::Vmo) -> Result<AllocationsTableWriter, crate::Error> {
87 let storage = MemoryMappedVmo::new_readwrite(vmo)?;
88 let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
89
90 let mut result = AllocationsTableWriter {
91 storage,
92 watermark: 0,
93 max_num_nodes,
94 free_list_head: NODE_INVALID,
95 };
96
97 for bucket_index in 0..NUM_BUCKETS {
99 result.bucket_head_at(bucket_index).store(NODE_INVALID, SeqCst);
100 }
101
102 Ok(result)
103 }
104
105 fn compute_bucket_index(address: u64) -> usize {
107 let tmp = (address >> 4) as usize;
109 tmp % NUM_BUCKETS
110 }
111
112 fn bucket_head_at(&mut self, bucket_index: usize) -> &mut AtomicNodeIndex {
114 let bucket_heads = self.storage.get_object_mut::<BucketHeads>(0).unwrap();
116 &mut bucket_heads[bucket_index]
117 }
118
119 fn node_at(&mut self, node_index: NodeIndex) -> &mut Node {
121 let byte_offset = size_of::<BucketHeads>() + node_index as usize * size_of::<Node>();
123 self.storage.get_object_mut::<Node>(byte_offset).unwrap()
124 }
125
126 pub fn insert_allocation(
131 &mut self,
132 address: u64,
133 size: u64,
134 thread_info_key: ResourceKey,
135 stack_trace_key: ResourceKey,
136 timestamp: zx::MonotonicInstant,
137 ) -> Result<bool, crate::Error> {
138 let bucket_index = Self::compute_bucket_index(address);
139 let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
140
141 let mut curr_index = old_head;
143 while curr_index != NODE_INVALID {
144 let curr_data = self.node_at(curr_index);
145 if curr_data.address == address {
146 return Ok(false);
147 }
148 curr_index = curr_data.next.load(Relaxed);
149 }
150
151 let new_index = self.pop_free_node()?;
153 *self.node_at(new_index) = Node {
154 address,
155 size,
156 timestamp,
157 thread_info_key,
158 stack_trace_key,
159 next: AtomicNodeIndex::new(old_head),
160 };
161 self.bucket_head_at(bucket_index).store(new_index, SeqCst);
162 Ok(true)
163 }
164
165 pub fn erase_allocation(&mut self, address: u64) -> Option<u64> {
168 let bucket_index = Self::compute_bucket_index(address);
169
170 let mut prev_index = None;
172 let mut curr_index = self.bucket_head_at(bucket_index).load(Relaxed);
173 while curr_index != NODE_INVALID {
174 let curr_data = self.node_at(curr_index);
175 let curr_data_size = curr_data.size;
176 let curr_data_next = curr_data.next.load(Relaxed);
177
178 if curr_data.address == address {
180 if let Some(prev_index) = prev_index {
181 self.node_at(prev_index).next.store(curr_data_next, SeqCst);
182 } else {
183 self.bucket_head_at(bucket_index).store(curr_data_next, SeqCst);
184 }
185 self.push_free_node(curr_index);
186 return Some(curr_data_size);
187 }
188
189 prev_index = Some(curr_index);
190 curr_index = curr_data.next.load(Relaxed);
191 }
192
193 None
195 }
196
197 pub fn replace_allocation(
202 &mut self,
203 address: u64,
204 size: u64,
205 thread_info_key: ResourceKey,
206 stack_trace_key: ResourceKey,
207 timestamp: zx::MonotonicInstant,
208 ) -> Result<Option<u64>, crate::Error> {
209 let Some((new_node, old_node, old_size)) = self.replace_allocation_begin(
218 address,
219 size,
220 thread_info_key,
221 stack_trace_key,
222 timestamp,
223 )?
224 else {
225 return Ok(None);
226 };
227
228 self.replace_allocation_end(new_node, old_node);
230 Ok(Some(old_size))
231 }
232
233 fn replace_allocation_begin(
235 &mut self,
236 address: u64,
237 size: u64,
238 thread_info_key: ResourceKey,
239 stack_trace_key: ResourceKey,
240 timestamp: zx::MonotonicInstant,
241 ) -> Result<Option<(NodeIndex, NodeIndex, u64)>, crate::Error> {
242 let bucket_index = Self::compute_bucket_index(address);
243 let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
244
245 let (old_index, old_size) = 'search: {
247 let mut curr_index = old_head;
248 while curr_index != NODE_INVALID {
249 let curr_data = self.node_at(curr_index);
250 if curr_data.address == address {
251 break 'search (curr_index, curr_data.size); }
253 curr_index = curr_data.next.load(Relaxed);
254 }
255
256 return Ok(None); };
258
259 let new_index = self.pop_free_node()?;
261 *self.node_at(new_index) = Node {
262 address,
263 size,
264 timestamp,
265 thread_info_key,
266 stack_trace_key,
267 next: AtomicNodeIndex::new(old_head),
268 };
269 self.bucket_head_at(bucket_index).store(new_index, SeqCst);
270 Ok(Some((new_index, old_index, old_size)))
271 }
272
273 fn replace_allocation_end(&mut self, new_node: NodeIndex, old_node: NodeIndex) {
275 let tail = self.node_at(old_node).next.load(Relaxed);
276
277 let mut scan_node = new_node;
280 loop {
281 assert_ne!(scan_node, NODE_INVALID, "the old node must be a successor of new node");
282 let scan_data = self.node_at(scan_node);
283 if scan_data.next.load(Relaxed) == old_node {
284 scan_data.next.store(tail, SeqCst);
285 self.push_free_node(old_node);
286 return;
287 }
288 scan_node = scan_data.next.load(Relaxed);
289 }
290 }
291
292 fn push_free_node(&mut self, index: NodeIndex) {
294 let current_head = self.free_list_head;
295
296 let node_data = self.node_at(index);
297 node_data.next.store(current_head, Relaxed);
298
299 self.free_list_head = index;
300 }
301
302 fn pop_free_node(&mut self) -> Result<NodeIndex, crate::Error> {
304 if self.free_list_head != NODE_INVALID {
305 let result = self.free_list_head;
307 self.free_list_head = self.node_at(result).next.load(Relaxed);
308 Ok(result)
309 } else if (self.watermark as usize) < self.max_num_nodes {
310 let result = self.watermark;
312 self.watermark += 1;
313 Ok(result)
314 } else {
315 Err(crate::Error::OutOfSpace)
317 }
318 }
319}
320
321pub struct AllocationsTableReader {
323 storage: MemoryMappedVmo,
324 max_num_nodes: usize,
325}
326
327impl AllocationsTableReader {
328 pub fn new(vmo: &zx::Vmo) -> Result<AllocationsTableReader, crate::Error> {
329 let storage = MemoryMappedVmo::new_readonly(vmo)?;
330 let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
331
332 Ok(AllocationsTableReader { storage, max_num_nodes })
333 }
334
335 pub fn iter(&self) -> impl Iterator<Item = Result<&Node, crate::Error>> {
337 let bucket_heads = self.storage.get_object::<BucketHeads>(0).unwrap();
339 bucket_heads.iter().map(|head| self.iterate_bucket(head.load(Relaxed))).flatten()
340 }
341
342 fn iterate_bucket(&self, head: NodeIndex) -> impl Iterator<Item = Result<&Node, crate::Error>> {
343 let mut curr_index = head;
344 let mut seen_addresses = HashSet::new();
345 std::iter::from_fn(move || {
346 while curr_index != NODE_INVALID {
347 if (curr_index as usize) < self.max_num_nodes {
348 let byte_offset =
350 size_of::<BucketHeads>() + curr_index as usize * size_of::<Node>();
351 let curr_data = self.storage.get_object::<Node>(byte_offset).unwrap();
352 curr_index = curr_data.next.load(Relaxed);
353
354 if seen_addresses.insert(curr_data.address) {
357 return Some(Ok(curr_data));
358 }
359 } else {
360 return Some(Err(crate::Error::InvalidInput));
361 };
362 }
363
364 None
365 })
366 }
367}
368
369#[cfg(test)]
370mod tests {
371 use super::*;
372 use assert_matches::assert_matches;
373 use std::alloc::Layout;
374 use std::collections::HashMap;
375
376 const NUM_ITERATIONS: usize = NUM_BUCKETS * 10;
378
379 const NUM_NODES: usize = NUM_ITERATIONS + 100;
381
382 const THREAD_INFO_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1122);
384 const THREAD_INFO_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3344);
385 const STACK_TRACE_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1212);
386 const STACK_TRACE_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3434);
387
388 struct TestStorage {
389 vmo: zx::Vmo,
390 }
391
392 impl TestStorage {
393 pub fn new(num_nodes: usize) -> TestStorage {
394 let nodes_layout = Layout::array::<Node>(num_nodes).unwrap();
395 let (layout, nodes_offset) = Layout::new::<BucketHeads>().extend(nodes_layout).unwrap();
396 assert_eq!(nodes_offset, size_of::<BucketHeads>());
397
398 let vmo = zx::Vmo::create(layout.size() as u64).unwrap();
399 TestStorage { vmo }
400 }
401
402 fn create_writer(&self) -> AllocationsTableWriter {
403 AllocationsTableWriter::new(&self.vmo).unwrap()
404 }
405
406 fn create_reader(&self) -> AllocationsTableReader {
407 AllocationsTableReader::new(&self.vmo).unwrap()
408 }
409 }
410
411 #[test]
412 fn test_cannot_insert_twice() {
413 let storage = TestStorage::new(NUM_NODES);
414 let mut writer = storage.create_writer();
415
416 let result = writer.insert_allocation(
417 0x1234,
418 0x5678,
419 THREAD_INFO_RESOURCE_KEY_1,
420 STACK_TRACE_RESOURCE_KEY_1,
421 zx::MonotonicInstant::ZERO,
422 );
423 assert_eq!(result, Ok(true));
424
425 let result = writer.insert_allocation(
426 0x1234,
427 0x5678,
428 THREAD_INFO_RESOURCE_KEY_1,
429 STACK_TRACE_RESOURCE_KEY_1,
430 zx::MonotonicInstant::ZERO,
431 );
432 assert_eq!(result, Ok(false));
433 }
434
435 #[test]
436 fn test_cannot_erase_twice() {
437 let storage = TestStorage::new(NUM_NODES);
438 let mut writer = storage.create_writer();
439
440 let result = writer.insert_allocation(
441 0x1234,
442 0x5678,
443 THREAD_INFO_RESOURCE_KEY_1,
444 STACK_TRACE_RESOURCE_KEY_1,
445 zx::MonotonicInstant::ZERO,
446 );
447 assert_eq!(result, Ok(true));
448
449 let result = writer.erase_allocation(0x1234);
450 assert_eq!(result, Some(0x5678));
451
452 let result = writer.erase_allocation(0x1234);
453 assert_eq!(result, None);
454 }
455
456 #[test]
457 fn test_out_of_space() {
458 let storage = TestStorage::new(NUM_NODES);
459 let mut writer = storage.create_writer();
460
461 for i in 0..NUM_NODES {
463 let result = writer.insert_allocation(
464 i as u64,
465 1,
466 THREAD_INFO_RESOURCE_KEY_1,
467 STACK_TRACE_RESOURCE_KEY_1,
468 zx::MonotonicInstant::ZERO,
469 );
470 assert_eq!(result, Ok(true));
471 }
472
473 let result = writer.insert_allocation(
475 NUM_NODES as u64,
476 1,
477 THREAD_INFO_RESOURCE_KEY_1,
478 STACK_TRACE_RESOURCE_KEY_1,
479 zx::MonotonicInstant::ZERO,
480 );
481 assert_eq!(result, Err(crate::Error::OutOfSpace));
482
483 let result = writer.erase_allocation(0);
485 assert_eq!(result, Some(1));
486 let result = writer.insert_allocation(
487 NUM_NODES as u64,
488 1,
489 THREAD_INFO_RESOURCE_KEY_1,
490 STACK_TRACE_RESOURCE_KEY_1,
491 zx::MonotonicInstant::ZERO,
492 );
493 assert_eq!(result, Ok(true));
494 }
495
496 #[test]
497 fn test_loop_insert_then_erase() {
498 let storage = TestStorage::new(NUM_NODES);
499 let mut writer = storage.create_writer();
500
501 for i in 0..NUM_ITERATIONS {
502 let result = writer.insert_allocation(
503 i as u64,
504 1,
505 THREAD_INFO_RESOURCE_KEY_1,
506 STACK_TRACE_RESOURCE_KEY_1,
507 zx::MonotonicInstant::ZERO,
508 );
509 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
510
511 let result = writer.erase_allocation(i as u64);
512 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
513 }
514 }
515
516 #[test]
517 fn test_bulk_insert_then_erase_same_order() {
518 let storage = TestStorage::new(NUM_NODES);
519 let mut writer = storage.create_writer();
520
521 for i in 0..NUM_ITERATIONS {
522 let result = writer.insert_allocation(
523 i as u64,
524 1,
525 THREAD_INFO_RESOURCE_KEY_1,
526 STACK_TRACE_RESOURCE_KEY_1,
527 zx::MonotonicInstant::ZERO,
528 );
529 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
530 }
531 for i in 0..NUM_ITERATIONS {
532 let result = writer.erase_allocation(i as u64);
533 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
534 }
535 }
536
537 #[test]
538 fn test_bulk_insert_then_erase_reverse_order() {
539 let storage = TestStorage::new(NUM_NODES);
540 let mut writer = storage.create_writer();
541
542 for i in 0..NUM_ITERATIONS {
543 let result = writer.insert_allocation(
544 i as u64,
545 1,
546 THREAD_INFO_RESOURCE_KEY_1,
547 STACK_TRACE_RESOURCE_KEY_1,
548 zx::MonotonicInstant::ZERO,
549 );
550 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
551 }
552 for i in (0..NUM_ITERATIONS).rev() {
553 let result = writer.erase_allocation(i as u64);
554 assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
555 }
556 }
557
558 #[test]
559 fn test_read_empty() {
560 let storage = TestStorage::new(NUM_NODES);
561
562 storage.create_writer();
564
565 let reader = storage.create_reader();
567 assert_eq!(reader.iter().count(), 0);
568 }
569
570 #[test]
571 fn test_read_populated() {
572 let storage = TestStorage::new(NUM_NODES);
573
574 let mut writer = storage.create_writer();
576 let mut expected_map = HashMap::new();
577 for i in 0..NUM_ITERATIONS as u64 {
578 let thread_info_key =
579 if i % 4 >= 2 { THREAD_INFO_RESOURCE_KEY_1 } else { THREAD_INFO_RESOURCE_KEY_2 };
580 let stack_trace_key =
581 if i % 2 == 0 { STACK_TRACE_RESOURCE_KEY_1 } else { STACK_TRACE_RESOURCE_KEY_2 };
582 let timestamp =
583 zx::MonotonicInstant::from_nanos((NUM_ITERATIONS as i64 / 2) - (i as i64)); let result =
585 writer.insert_allocation(i, 1, thread_info_key, stack_trace_key, timestamp);
586 assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
587
588 expected_map.insert(i, (1, thread_info_key, stack_trace_key, timestamp));
589 }
590
591 let reader = storage.create_reader();
593 let mut actual_map = HashMap::new();
594 for node in reader.iter() {
595 let Node { address, size, thread_info_key, stack_trace_key, timestamp, .. } =
596 node.unwrap();
597 assert!(
598 actual_map
599 .insert(*address, (*size, *thread_info_key, *stack_trace_key, *timestamp))
600 .is_none(),
601 "address 0x{:x} was read more than once",
602 address
603 );
604 }
605
606 assert_eq!(actual_map, expected_map);
607 }
608
609 #[test]
610 fn test_read_bad_bucket_head() {
611 let storage = TestStorage::new(NUM_NODES);
612
613 let mut writer = storage.create_writer();
615 writer.bucket_head_at(NUM_BUCKETS / 2).store(NODE_INVALID - 1, SeqCst);
616
617 let reader = storage.create_reader();
619 let contains_error = reader.iter().any(|e| e.is_err());
620
621 assert!(contains_error);
622 }
623
624 #[test]
625 fn test_read_bad_node_next() {
626 let storage = TestStorage::new(NUM_NODES);
627
628 let mut writer = storage.create_writer();
630 writer.bucket_head_at(NUM_BUCKETS / 2).store(0, SeqCst);
631 *writer.node_at(0) = Node {
632 next: AtomicNodeIndex::new(NODE_INVALID - 1),
633 address: 0x1234,
634 size: 0x5678,
635 thread_info_key: THREAD_INFO_RESOURCE_KEY_1,
636 stack_trace_key: STACK_TRACE_RESOURCE_KEY_1,
637 timestamp: zx::MonotonicInstant::from_nanos(99999999),
638 };
639
640 let reader = storage.create_reader();
642 let contains_error = reader.iter().any(|e| e.is_err());
643
644 assert!(contains_error);
645 }
646
647 #[test]
648 fn test_replace() {
649 let storage = TestStorage::new(NUM_NODES);
650 let mut writer = storage.create_writer();
651
652 let result = writer.insert_allocation(
653 0x1234,
654 0x1111,
655 THREAD_INFO_RESOURCE_KEY_1,
656 STACK_TRACE_RESOURCE_KEY_1,
657 zx::MonotonicInstant::ZERO,
658 );
659 assert_eq!(result, Ok(true));
660
661 let reader = storage.create_reader();
663 let mut iter = reader.iter();
664 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x1111, .. })));
665 assert_matches!(iter.next(), None);
666
667 let result = writer.replace_allocation_begin(
668 0x1234,
669 0x2222,
670 THREAD_INFO_RESOURCE_KEY_2,
671 STACK_TRACE_RESOURCE_KEY_2,
672 zx::MonotonicInstant::ZERO,
673 );
674 let Ok(Some((new_node, old_node, old_size))) = result else {
675 panic!("Update begin is supposed to succeed in this test, got {:?} instead", result)
676 };
677 assert_eq!(old_size, 0x1111);
678
679 let reader = storage.create_reader();
681 let mut iter = reader.iter();
682 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
683 assert_matches!(iter.next(), None);
684
685 writer.replace_allocation_end(new_node, old_node);
686
687 let reader = storage.create_reader();
689 let mut iter = reader.iter();
690 assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
691 assert_matches!(iter.next(), None);
692 }
693
694 #[test]
695 fn test_cannot_replace_nonexisting() {
696 let storage = TestStorage::new(NUM_NODES);
697 let mut writer = storage.create_writer();
698
699 let result = writer.replace_allocation(
700 0x1234,
701 0x5678,
702 THREAD_INFO_RESOURCE_KEY_1,
703 STACK_TRACE_RESOURCE_KEY_1,
704 zx::MonotonicInstant::ZERO,
705 );
706 assert_eq!(result, Ok(None));
707 }
708}