use static_assertions::const_assert;
use std::collections::HashSet;
use std::mem::{align_of, size_of};
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering::{Relaxed, SeqCst};
use crate::memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
pub use crate::resources_table_v1::ResourceKey;
type NodeIndex = u32;
type AtomicNodeIndex = AtomicU32;
type BucketHeads = [AtomicNodeIndex; NUM_BUCKETS];
const NUM_BUCKETS: usize = 1 << 16;
const NODE_INVALID: NodeIndex = NodeIndex::MAX;
pub const MIN_ALIGNMENT: usize = align_of::<Node>();
const_assert!(MIN_ALIGNMENT % align_of::<BucketHeads>() == 0);
#[repr(C)]
#[derive(Debug)]
pub struct Node {
next: AtomicNodeIndex,
pub address: u64,
pub size: u64,
pub timestamp: zx::MonotonicInstant,
pub thread_info_key: ResourceKey,
pub stack_trace_key: ResourceKey,
}
unsafe impl MemoryMappable for AtomicNodeIndex {}
unsafe impl MemoryMappable for Node {}
fn compute_nodes_count(num_bytes: usize) -> Result<usize, crate::Error> {
let Some(nodes_size) = num_bytes.checked_sub(size_of::<BucketHeads>()) else {
return Err(crate::Error::BufferTooSmall);
};
let num_nodes = nodes_size / size_of::<Node>();
if num_nodes > NODE_INVALID as usize {
return Err(crate::Error::BufferTooBig);
}
Ok(num_nodes)
}
pub struct AllocationsTableWriter {
storage: MemoryMappedVmo,
watermark: NodeIndex,
max_num_nodes: usize,
free_list_head: NodeIndex,
}
impl AllocationsTableWriter {
pub fn new(vmo: &zx::Vmo) -> Result<AllocationsTableWriter, crate::Error> {
let storage = MemoryMappedVmo::new_readwrite(vmo)?;
let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
let mut result = AllocationsTableWriter {
storage,
watermark: 0,
max_num_nodes,
free_list_head: NODE_INVALID,
};
for bucket_index in 0..NUM_BUCKETS {
result.bucket_head_at(bucket_index).store(NODE_INVALID, SeqCst);
}
Ok(result)
}
fn compute_bucket_index(address: u64) -> usize {
let tmp = (address >> 4) as usize;
tmp % NUM_BUCKETS
}
fn bucket_head_at(&mut self, bucket_index: usize) -> &mut AtomicNodeIndex {
let bucket_heads = self.storage.get_object_mut::<BucketHeads>(0).unwrap();
&mut bucket_heads[bucket_index]
}
fn node_at(&mut self, node_index: NodeIndex) -> &mut Node {
let byte_offset = size_of::<BucketHeads>() + node_index as usize * size_of::<Node>();
self.storage.get_object_mut::<Node>(byte_offset).unwrap()
}
pub fn insert_allocation(
&mut self,
address: u64,
size: u64,
thread_info_key: ResourceKey,
stack_trace_key: ResourceKey,
timestamp: zx::MonotonicInstant,
) -> Result<bool, crate::Error> {
let bucket_index = Self::compute_bucket_index(address);
let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
let mut curr_index = old_head;
while curr_index != NODE_INVALID {
let curr_data = self.node_at(curr_index);
if curr_data.address == address {
return Ok(false);
}
curr_index = curr_data.next.load(Relaxed);
}
let new_index = self.pop_free_node()?;
*self.node_at(new_index) = Node {
address,
size,
timestamp,
thread_info_key,
stack_trace_key,
next: AtomicNodeIndex::new(old_head),
};
self.bucket_head_at(bucket_index).store(new_index, SeqCst);
Ok(true)
}
pub fn erase_allocation(&mut self, address: u64) -> Option<u64> {
let bucket_index = Self::compute_bucket_index(address);
let mut prev_index = None;
let mut curr_index = self.bucket_head_at(bucket_index).load(Relaxed);
while curr_index != NODE_INVALID {
let curr_data = self.node_at(curr_index);
let curr_data_size = curr_data.size;
let curr_data_next = curr_data.next.load(Relaxed);
if curr_data.address == address {
if let Some(prev_index) = prev_index {
self.node_at(prev_index).next.store(curr_data_next, SeqCst);
} else {
self.bucket_head_at(bucket_index).store(curr_data_next, SeqCst);
}
self.push_free_node(curr_index);
return Some(curr_data_size);
}
prev_index = Some(curr_index);
curr_index = curr_data.next.load(Relaxed);
}
None
}
pub fn replace_allocation(
&mut self,
address: u64,
size: u64,
thread_info_key: ResourceKey,
stack_trace_key: ResourceKey,
timestamp: zx::MonotonicInstant,
) -> Result<Option<u64>, crate::Error> {
let Some((new_node, old_node, old_size)) = self.replace_allocation_begin(
address,
size,
thread_info_key,
stack_trace_key,
timestamp,
)?
else {
return Ok(None);
};
self.replace_allocation_end(new_node, old_node);
Ok(Some(old_size))
}
fn replace_allocation_begin(
&mut self,
address: u64,
size: u64,
thread_info_key: ResourceKey,
stack_trace_key: ResourceKey,
timestamp: zx::MonotonicInstant,
) -> Result<Option<(NodeIndex, NodeIndex, u64)>, crate::Error> {
let bucket_index = Self::compute_bucket_index(address);
let old_head = self.bucket_head_at(bucket_index).load(Relaxed);
let (old_index, old_size) = 'search: {
let mut curr_index = old_head;
while curr_index != NODE_INVALID {
let curr_data = self.node_at(curr_index);
if curr_data.address == address {
break 'search (curr_index, curr_data.size); }
curr_index = curr_data.next.load(Relaxed);
}
return Ok(None); };
let new_index = self.pop_free_node()?;
*self.node_at(new_index) = Node {
address,
size,
timestamp,
thread_info_key,
stack_trace_key,
next: AtomicNodeIndex::new(old_head),
};
self.bucket_head_at(bucket_index).store(new_index, SeqCst);
Ok(Some((new_index, old_index, old_size)))
}
fn replace_allocation_end(&mut self, new_node: NodeIndex, old_node: NodeIndex) {
let tail = self.node_at(old_node).next.load(Relaxed);
let mut scan_node = new_node;
loop {
assert_ne!(scan_node, NODE_INVALID, "the old node must be a successor of new node");
let scan_data = self.node_at(scan_node);
if scan_data.next.load(Relaxed) == old_node {
scan_data.next.store(tail, SeqCst);
self.push_free_node(old_node);
return;
}
scan_node = scan_data.next.load(Relaxed);
}
}
fn push_free_node(&mut self, index: NodeIndex) {
let current_head = self.free_list_head;
let node_data = self.node_at(index);
node_data.next.store(current_head, Relaxed);
self.free_list_head = index;
}
fn pop_free_node(&mut self) -> Result<NodeIndex, crate::Error> {
if self.free_list_head != NODE_INVALID {
let result = self.free_list_head;
self.free_list_head = self.node_at(result).next.load(Relaxed);
Ok(result)
} else if (self.watermark as usize) < self.max_num_nodes {
let result = self.watermark;
self.watermark += 1;
Ok(result)
} else {
Err(crate::Error::OutOfSpace)
}
}
}
pub struct AllocationsTableReader {
storage: MemoryMappedVmo,
max_num_nodes: usize,
}
impl AllocationsTableReader {
pub fn new(vmo: &zx::Vmo) -> Result<AllocationsTableReader, crate::Error> {
let storage = MemoryMappedVmo::new_readonly(vmo)?;
let max_num_nodes = compute_nodes_count(storage.vmo_size())?;
Ok(AllocationsTableReader { storage, max_num_nodes })
}
pub fn iter(&self) -> impl Iterator<Item = Result<&Node, crate::Error>> {
let bucket_heads = self.storage.get_object::<BucketHeads>(0).unwrap();
bucket_heads.iter().map(|head| self.iterate_bucket(head.load(Relaxed))).flatten()
}
fn iterate_bucket(&self, head: NodeIndex) -> impl Iterator<Item = Result<&Node, crate::Error>> {
let mut curr_index = head;
let mut seen_addresses = HashSet::new();
std::iter::from_fn(move || {
while curr_index != NODE_INVALID {
if (curr_index as usize) < self.max_num_nodes {
let byte_offset =
size_of::<BucketHeads>() + curr_index as usize * size_of::<Node>();
let curr_data = self.storage.get_object::<Node>(byte_offset).unwrap();
curr_index = curr_data.next.load(Relaxed);
if seen_addresses.insert(curr_data.address) {
return Some(Ok(curr_data));
}
} else {
return Some(Err(crate::Error::InvalidInput));
};
}
None
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
use std::alloc::Layout;
use std::collections::HashMap;
const NUM_ITERATIONS: usize = NUM_BUCKETS * 10;
const NUM_NODES: usize = NUM_ITERATIONS + 100;
const THREAD_INFO_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1122);
const THREAD_INFO_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3344);
const STACK_TRACE_RESOURCE_KEY_1: ResourceKey = ResourceKey::from_raw(0x1212);
const STACK_TRACE_RESOURCE_KEY_2: ResourceKey = ResourceKey::from_raw(0x3434);
struct TestStorage {
vmo: zx::Vmo,
}
impl TestStorage {
pub fn new(num_nodes: usize) -> TestStorage {
let nodes_layout = Layout::array::<Node>(num_nodes).unwrap();
let (layout, nodes_offset) = Layout::new::<BucketHeads>().extend(nodes_layout).unwrap();
assert_eq!(nodes_offset, size_of::<BucketHeads>());
let vmo = zx::Vmo::create(layout.size() as u64).unwrap();
TestStorage { vmo }
}
fn create_writer(&self) -> AllocationsTableWriter {
AllocationsTableWriter::new(&self.vmo).unwrap()
}
fn create_reader(&self) -> AllocationsTableReader {
AllocationsTableReader::new(&self.vmo).unwrap()
}
}
#[test]
fn test_cannot_insert_twice() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
let result = writer.insert_allocation(
0x1234,
0x5678,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(true));
let result = writer.insert_allocation(
0x1234,
0x5678,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(false));
}
#[test]
fn test_cannot_erase_twice() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
let result = writer.insert_allocation(
0x1234,
0x5678,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(true));
let result = writer.erase_allocation(0x1234);
assert_eq!(result, Some(0x5678));
let result = writer.erase_allocation(0x1234);
assert_eq!(result, None);
}
#[test]
fn test_out_of_space() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
for i in 0..NUM_NODES {
let result = writer.insert_allocation(
i as u64,
1,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(true));
}
let result = writer.insert_allocation(
NUM_NODES as u64,
1,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Err(crate::Error::OutOfSpace));
let result = writer.erase_allocation(0);
assert_eq!(result, Some(1));
let result = writer.insert_allocation(
NUM_NODES as u64,
1,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(true));
}
#[test]
fn test_loop_insert_then_erase() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
for i in 0..NUM_ITERATIONS {
let result = writer.insert_allocation(
i as u64,
1,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
let result = writer.erase_allocation(i as u64);
assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
}
}
#[test]
fn test_bulk_insert_then_erase_same_order() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
for i in 0..NUM_ITERATIONS {
let result = writer.insert_allocation(
i as u64,
1,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
}
for i in 0..NUM_ITERATIONS {
let result = writer.erase_allocation(i as u64);
assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
}
}
#[test]
fn test_bulk_insert_then_erase_reverse_order() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
for i in 0..NUM_ITERATIONS {
let result = writer.insert_allocation(
i as u64,
1,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
}
for i in (0..NUM_ITERATIONS).rev() {
let result = writer.erase_allocation(i as u64);
assert_eq!(result, Some(1), "failed to erase 0x{:x}", i);
}
}
#[test]
fn test_read_empty() {
let storage = TestStorage::new(NUM_NODES);
storage.create_writer();
let reader = storage.create_reader();
assert_eq!(reader.iter().count(), 0);
}
#[test]
fn test_read_populated() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
let mut expected_map = HashMap::new();
for i in 0..NUM_ITERATIONS as u64 {
let thread_info_key =
if i % 4 >= 2 { THREAD_INFO_RESOURCE_KEY_1 } else { THREAD_INFO_RESOURCE_KEY_2 };
let stack_trace_key =
if i % 2 == 0 { STACK_TRACE_RESOURCE_KEY_1 } else { STACK_TRACE_RESOURCE_KEY_2 };
let timestamp =
zx::MonotonicInstant::from_nanos((NUM_ITERATIONS as i64 / 2) - (i as i64)); let result =
writer.insert_allocation(i, 1, thread_info_key, stack_trace_key, timestamp);
assert_eq!(result, Ok(true), "failed to insert 0x{:x}", i);
expected_map.insert(i, (1, thread_info_key, stack_trace_key, timestamp));
}
let reader = storage.create_reader();
let mut actual_map = HashMap::new();
for node in reader.iter() {
let Node { address, size, thread_info_key, stack_trace_key, timestamp, .. } =
node.unwrap();
assert!(
actual_map
.insert(*address, (*size, *thread_info_key, *stack_trace_key, *timestamp))
.is_none(),
"address 0x{:x} was read more than once",
address
);
}
assert_eq!(actual_map, expected_map);
}
#[test]
fn test_read_bad_bucket_head() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
writer.bucket_head_at(NUM_BUCKETS / 2).store(NODE_INVALID - 1, SeqCst);
let reader = storage.create_reader();
let contains_error = reader.iter().any(|e| e.is_err());
assert!(contains_error);
}
#[test]
fn test_read_bad_node_next() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
writer.bucket_head_at(NUM_BUCKETS / 2).store(0, SeqCst);
*writer.node_at(0) = Node {
next: AtomicNodeIndex::new(NODE_INVALID - 1),
address: 0x1234,
size: 0x5678,
thread_info_key: THREAD_INFO_RESOURCE_KEY_1,
stack_trace_key: STACK_TRACE_RESOURCE_KEY_1,
timestamp: zx::MonotonicInstant::from_nanos(99999999),
};
let reader = storage.create_reader();
let contains_error = reader.iter().any(|e| e.is_err());
assert!(contains_error);
}
#[test]
fn test_replace() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
let result = writer.insert_allocation(
0x1234,
0x1111,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(true));
let reader = storage.create_reader();
let mut iter = reader.iter();
assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x1111, .. })));
assert_matches!(iter.next(), None);
let result = writer.replace_allocation_begin(
0x1234,
0x2222,
THREAD_INFO_RESOURCE_KEY_2,
STACK_TRACE_RESOURCE_KEY_2,
zx::MonotonicInstant::ZERO,
);
let Ok(Some((new_node, old_node, old_size))) = result else {
panic!("Update begin is supposed to succeed in this test, got {:?} instead", result)
};
assert_eq!(old_size, 0x1111);
let reader = storage.create_reader();
let mut iter = reader.iter();
assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
assert_matches!(iter.next(), None);
writer.replace_allocation_end(new_node, old_node);
let reader = storage.create_reader();
let mut iter = reader.iter();
assert_matches!(iter.next(), Some(Ok(Node { address: 0x1234, size: 0x2222, .. })));
assert_matches!(iter.next(), None);
}
#[test]
fn test_cannot_replace_nonexisting() {
let storage = TestStorage::new(NUM_NODES);
let mut writer = storage.create_writer();
let result = writer.replace_allocation(
0x1234,
0x5678,
THREAD_INFO_RESOURCE_KEY_1,
STACK_TRACE_RESOURCE_KEY_1,
zx::MonotonicInstant::ZERO,
);
assert_eq!(result, Ok(None));
}
}