1use memory_mapped_vmo::MemoryMappedVmo;
6use static_assertions::const_assert;
7use std::mem::{align_of, size_of};
8use std::ops::Deref;
9use std::sync::atomic::{AtomicU32, Ordering};
10use zerocopy::FromBytes;
11
12pub type NodeIndex = u32;
13
14pub const NODE_INVALID: NodeIndex = 0;
17
18pub const MAX_FRAMES: usize = 128;
19
20#[repr(transparent)]
22#[derive(Debug, FromBytes)]
23pub struct AtomicNodeIndex(AtomicU32);
24
25impl AtomicNodeIndex {
26 pub const fn new(value: u32) -> AtomicNodeIndex {
27 AtomicNodeIndex(AtomicU32::new(value))
28 }
29}
30
31impl Deref for AtomicNodeIndex {
32 type Target = AtomicU32;
33
34 fn deref(&self) -> &AtomicU32 {
35 &self.0
36 }
37}
38
39#[repr(C)]
41#[derive(Debug, FromBytes)]
42pub struct Header {
43 head: AtomicNodeIndex,
45}
46
47#[repr(C)]
49#[derive(Debug, FromBytes)]
50pub struct Node {
51 next: AtomicNodeIndex,
53 prev: u32,
55 pub koid: u64,
57 pub count: u32,
59 pub frames: [Frame; MAX_FRAMES],
61}
62
63const_assert!(size_of::<Header>() < size_of::<Node>());
65const_assert!(align_of::<Header>() <= align_of::<Node>());
66
67#[repr(C)]
69#[derive(Debug, Clone, Copy, Default, FromBytes, PartialEq)]
70pub struct Frame {
71 pub pc: u64,
73 pub fp: u64,
74 }
76
77pub struct StacktrackWriter {
79 memory: MemoryMappedVmo,
80 capacity: u32,
81 free_head: NodeIndex,
82 watermark: u32,
83}
84
85impl StacktrackWriter {
86 pub unsafe fn new(vmo: &zx::Vmo) -> Result<Self, crate::Error> {
92 let memory = unsafe { MemoryMappedVmo::new_readwrite(vmo)? };
93
94 let capacity = compute_node_capacity(memory.vmo_size())?;
95
96 if capacity == 0 {
98 return Err(crate::Error::BufferTooSmall);
99 }
100
101 Ok(Self {
102 memory,
103 capacity: capacity as u32,
104 free_head: NODE_INVALID,
105 watermark: 1, })
107 }
108
109 fn get_header_mut(&mut self) -> &mut Header {
110 self.memory.get_object_mut(0).unwrap()
111 }
112
113 fn get_node_mut(&mut self, index: NodeIndex) -> &mut Node {
114 assert!(index != NODE_INVALID && index < self.capacity, "index out of bounds");
115 let offset = index as usize * size_of::<Node>();
116 self.memory.get_object_mut(offset).unwrap()
117 }
118
119 pub fn insert_at_head(
123 &mut self,
124 koid: u64,
125 frames: &[Frame],
126 ) -> Result<NodeIndex, crate::Error> {
127 let node_idx = if self.free_head != NODE_INVALID {
129 let node_idx = self.free_head;
130 let node = self.get_node_mut(node_idx);
131 let next_free_idx = node.next.load(Ordering::Relaxed);
132
133 assert_eq!(node.prev, NODE_INVALID, "free list is corrupted");
135 node.next.store(NODE_INVALID, Ordering::Relaxed);
136
137 self.free_head = next_free_idx;
138 node_idx
139 } else {
140 let idx = self.watermark;
141 if idx < self.capacity {
142 self.watermark += 1;
143 idx
144 } else {
145 return Err(crate::Error::OutOfSpace);
146 }
147 };
148
149 let old_head_idx = self.get_header_mut().head.load(Ordering::Relaxed);
151 let node = self.get_node_mut(node_idx);
152 node.koid = koid;
153 node.count = frames.len() as u32;
154 let count = frames.len().min(MAX_FRAMES);
155 node.frames[..count].copy_from_slice(&frames[..count]);
156 node.prev = NODE_INVALID;
157 node.next.store(old_head_idx, Ordering::Release);
158
159 if old_head_idx != NODE_INVALID {
162 let old_head = self.get_node_mut(old_head_idx);
163 old_head.prev = node_idx;
164 }
165
166 self.get_header_mut().head.store(node_idx, Ordering::Release);
169
170 Ok(node_idx)
171 }
172
173 pub fn remove(&mut self, node_idx: NodeIndex) {
175 let node = self.get_node_mut(node_idx);
176 let prev_idx = node.prev;
177 let next_idx = node.next.load(Ordering::Acquire);
178
179 if next_idx != NODE_INVALID {
181 let next_node = self.get_node_mut(next_idx);
182 next_node.prev = prev_idx;
183 }
184
185 if prev_idx != NODE_INVALID {
188 let prev = self.get_node_mut(prev_idx);
189 prev.next.store(next_idx, Ordering::Release);
190 } else {
191 self.get_header_mut().head.store(next_idx, Ordering::Release);
193 }
194
195 let next_free_idx = std::mem::replace(&mut self.free_head, node_idx);
197 let node = self.get_node_mut(node_idx);
198 node.next.store(next_free_idx, Ordering::Relaxed);
199 node.prev = NODE_INVALID;
200 }
201}
202
203pub struct StacktrackReader {
204 memory: MemoryMappedVmo,
205 capacity: u32,
206}
207
208impl StacktrackReader {
209 pub unsafe fn new(vmo: &zx::Vmo) -> Result<Self, crate::Error> {
214 let memory = unsafe { MemoryMappedVmo::new_readonly(vmo)? };
215
216 let capacity = compute_node_capacity(memory.vmo_size())?;
217
218 Ok(Self { memory, capacity: capacity as u32 })
219 }
220
221 fn get_header(&self) -> &Header {
222 self.memory.get_object(0).unwrap()
223 }
224
225 fn get_node(&self, index: NodeIndex) -> &Node {
226 assert!(index != 0 && index < self.capacity, "index out of bounds");
227 let offset = index as usize * size_of::<Node>();
228 self.memory.get_object(offset).unwrap()
229 }
230
231 pub fn iter(&self) -> StacktrackIterator<'_> {
232 let head_index = self.get_header().head.load(Ordering::Acquire);
234 StacktrackIterator { reader: self, next_index: head_index }
235 }
236}
237
238pub struct StacktrackIterator<'a> {
239 reader: &'a StacktrackReader,
240 next_index: NodeIndex,
241}
242
243impl<'a> Iterator for StacktrackIterator<'a> {
244 type Item = &'a Node;
245
246 fn next(&mut self) -> Option<Self::Item> {
247 if self.next_index == NODE_INVALID || self.next_index >= self.reader.capacity {
248 return None;
249 }
250
251 let node = self.reader.get_node(self.next_index);
252 self.next_index = node.next.load(Ordering::Acquire);
253 Some(node)
254 }
255}
256
257fn compute_node_capacity(num_bytes: usize) -> Result<usize, crate::Error> {
258 if num_bytes < size_of::<Node>() {
259 return Err(crate::Error::BufferTooSmall);
260 }
261 Ok(num_bytes / size_of::<Node>())
262}
263
264#[cfg(test)]
265mod tests {
266 use super::*;
267 use itertools::Itertools;
268
269 const NUM_ITERATIONS: usize = 100;
270 const NUM_NODES: usize = NUM_ITERATIONS + 10;
271
272 struct TestStorage {
273 vmo: zx::Vmo,
274 }
275
276 impl TestStorage {
277 pub fn new(num_nodes: usize) -> (TestStorage, StacktrackWriter) {
278 let vmo_size = num_nodes * size_of::<Node>();
279 let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
280 let writer = unsafe { StacktrackWriter::new(&vmo).unwrap() };
281 (TestStorage { vmo }, writer)
282 }
283
284 fn create_reader(&self) -> StacktrackReader {
285 let snapshot = self
286 .vmo
287 .create_child(
288 zx::VmoChildOptions::SNAPSHOT | zx::VmoChildOptions::NO_WRITE,
289 0,
290 self.vmo.get_size().unwrap(),
291 )
292 .unwrap();
293 unsafe { StacktrackReader::new(&snapshot).unwrap() }
294 }
295 }
296
297 #[test]
298 fn test_read_empty() {
299 let (storage, _writer) = TestStorage::new(NUM_NODES);
300 let reader = storage.create_reader();
301 assert_eq!(reader.iter().count(), 0);
302 }
303
304 #[test]
305 fn test_allocation_exhaustion() {
306 let (_storage, mut writer) = TestStorage::new(10);
307 let frames = [Frame::default()];
308
309 for i in 1..=9 {
311 assert_eq!(writer.insert_at_head(i as u64, &frames), Ok(i));
312 }
313
314 assert_eq!(writer.insert_at_head(10, &frames), Err(crate::Error::OutOfSpace));
316
317 writer.remove(9);
319
320 assert_eq!(writer.insert_at_head(11, &frames), Ok(9));
322 }
323
324 #[test]
325 fn test_lifecycle() {
326 let (storage, mut writer) = TestStorage::new(NUM_NODES);
327 let frames = [Frame { pc: 0x123, fp: 0x456 }];
328
329 assert_eq!(writer.insert_at_head(101, &frames), Ok(1));
331
332 {
334 let reader = storage.create_reader();
335 let nodes: Vec<_> = reader.iter().collect();
336 assert_eq!(nodes.len(), 1);
337 assert_eq!(nodes[0].koid, 101);
338 assert_eq!(nodes[0].count, 1);
339 assert_eq!(nodes[0].frames[0].pc, 0x123);
340 assert_eq!(nodes[0].frames[0].fp, 0x456);
341 }
342
343 assert_eq!(writer.insert_at_head(102, &frames), Ok(2));
345
346 writer.remove(1);
348
349 {
351 let reader = storage.create_reader();
352 let nodes: Vec<_> = reader.iter().collect();
353 assert_eq!(nodes.len(), 1);
354 assert_eq!(nodes[0].koid, 102);
355 }
356
357 writer.remove(2);
359
360 {
362 let reader = storage.create_reader();
363 assert_eq!(reader.iter().count(), 0);
364 }
365 }
366
367 #[test]
368 fn test_multiple_nodes() {
369 let (storage, mut writer) = TestStorage::new(NUM_NODES);
370 let frames = [Frame::default()];
371
372 assert_eq!(writer.insert_at_head(101, &frames), Ok(1));
373 assert_eq!(writer.insert_at_head(102, &frames), Ok(2));
374
375 let reader = storage.create_reader();
376 let nodes = reader.iter().map(|node| node.koid).sorted().collect_vec();
377 assert_eq!(nodes, [101, 102]);
378 }
379}