Skip to main content

stacktrack_vmo/
threads_table_v1.rs

1// Copyright 2026 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use memory_mapped_vmo::MemoryMappedVmo;
6use static_assertions::const_assert;
7use std::mem::{align_of, size_of};
8use std::ops::Deref;
9use std::sync::atomic::{AtomicU32, Ordering};
10use zerocopy::FromBytes;
11
12pub type NodeIndex = u32;
13
14/// The index used to represent NULL or invalid node.
15/// This is also where the Header is stored.
16pub const NODE_INVALID: NodeIndex = 0;
17
18pub const MAX_FRAMES: usize = 128;
19
20// Define AtomicNodeIndex as a newtype so we can implement traits on it.
21#[repr(transparent)]
22#[derive(Debug, FromBytes)]
23pub struct AtomicNodeIndex(AtomicU32);
24
25impl AtomicNodeIndex {
26    pub const fn new(value: u32) -> AtomicNodeIndex {
27        AtomicNodeIndex(AtomicU32::new(value))
28    }
29}
30
31impl Deref for AtomicNodeIndex {
32    type Target = AtomicU32;
33
34    fn deref(&self) -> &AtomicU32 {
35        &self.0
36    }
37}
38
39/// VMO Header.
40#[repr(C)]
41#[derive(Debug, FromBytes)]
42pub struct Header {
43    /// Index of the first node in the list.
44    head: AtomicNodeIndex,
45}
46
47/// A node in the stack track VMO.
48#[repr(C)]
49#[derive(Debug, FromBytes)]
50pub struct Node {
51    /// Index of the next node.
52    next: AtomicNodeIndex,
53    /// Index of the previous node.
54    prev: u32,
55    /// Thread KOID.
56    pub koid: u64,
57    /// Number of valid frames.
58    pub count: u32,
59    /// Stack frames.
60    pub frames: [Frame; MAX_FRAMES],
61}
62
63// Ensure Header fits in Node 0.
64const_assert!(size_of::<Header>() < size_of::<Node>());
65const_assert!(align_of::<Header>() <= align_of::<Node>());
66
67/// Information about a stack frame.
68#[repr(C)]
69#[derive(Debug, Clone, Copy, Default, FromBytes, PartialEq)]
70pub struct Frame {
71    // LINT.IfChange
72    pub pc: u64,
73    pub fp: u64,
74    // LINT.ThenChange(//src/performance/memory/stacktrack/instrumentation/src/unwind.cc)
75}
76
77/// Writer for the Stacktrack VMO.
78pub struct StacktrackWriter {
79    memory: MemoryMappedVmo,
80    capacity: u32,
81    free_head: NodeIndex,
82    watermark: u32,
83}
84
85impl StacktrackWriter {
86    /// Initializes a VMO as an empty table and creates a StacktrackWriter to write into it.
87    ///
88    /// # Safety
89    /// The caller must guarantee that the `vmo` is not accessed by others while the returned
90    /// instance is alive. However, it always safe to take a snapshot and read that instead.
91    pub unsafe fn new(vmo: &zx::Vmo) -> Result<Self, crate::Error> {
92        let memory = unsafe { MemoryMappedVmo::new_readwrite(vmo)? };
93
94        let capacity = compute_node_capacity(memory.vmo_size())?;
95
96        // Ensure we have at least one node for the header.
97        if capacity == 0 {
98            return Err(crate::Error::BufferTooSmall);
99        }
100
101        Ok(Self {
102            memory,
103            capacity: capacity as u32,
104            free_head: NODE_INVALID,
105            watermark: 1, // Start allocating after the header.
106        })
107    }
108
109    fn get_header_mut(&mut self) -> &mut Header {
110        self.memory.get_object_mut(0).unwrap()
111    }
112
113    fn get_node_mut(&mut self, index: NodeIndex) -> &mut Node {
114        assert!(index != NODE_INVALID && index < self.capacity, "index out of bounds");
115        let offset = index as usize * size_of::<Node>();
116        self.memory.get_object_mut(offset).unwrap()
117    }
118
119    /// Inserts a new node at the head of the linked-list.
120    ///
121    /// The index of the new node is returned on success.
122    pub fn insert_at_head(
123        &mut self,
124        koid: u64,
125        frames: &[Frame],
126    ) -> Result<NodeIndex, crate::Error> {
127        // Pop a free node from the free list.
128        let node_idx = if self.free_head != NODE_INVALID {
129            let node_idx = self.free_head;
130            let node = self.get_node_mut(node_idx);
131            let next_free_idx = node.next.load(Ordering::Relaxed);
132
133            // Ensure that the node we are about to use has no links to other nodes.
134            assert_eq!(node.prev, NODE_INVALID, "free list is corrupted");
135            node.next.store(NODE_INVALID, Ordering::Relaxed);
136
137            self.free_head = next_free_idx;
138            node_idx
139        } else {
140            let idx = self.watermark;
141            if idx < self.capacity {
142                self.watermark += 1;
143                idx
144            } else {
145                return Err(crate::Error::OutOfSpace);
146            }
147        };
148
149        // Set the new node's contents and links (at this point, it's not reachable yet).
150        let old_head_idx = self.get_header_mut().head.load(Ordering::Relaxed);
151        let node = self.get_node_mut(node_idx);
152        node.koid = koid;
153        node.count = frames.len() as u32;
154        let count = frames.len().min(MAX_FRAMES);
155        node.frames[..count].copy_from_slice(&frames[..count]);
156        node.prev = NODE_INVALID;
157        node.next.store(old_head_idx, Ordering::Release);
158
159        // Update the old head's link to the new node (the prev link is only used by the writer, so
160        // it doesn't need to be set atomically).
161        if old_head_idx != NODE_INVALID {
162            let old_head = self.get_node_mut(old_head_idx);
163            old_head.prev = node_idx;
164        }
165
166        // Update the head pointer. This is the operation that makes the new node reachable by the
167        // reader and it must be atomic.
168        self.get_header_mut().head.store(node_idx, Ordering::Release);
169
170        Ok(node_idx)
171    }
172
173    /// Remove the node at the given index.
174    pub fn remove(&mut self, node_idx: NodeIndex) {
175        let node = self.get_node_mut(node_idx);
176        let prev_idx = node.prev;
177        let next_idx = node.next.load(Ordering::Acquire);
178
179        // Update the next node's link to the previous node.
180        if next_idx != NODE_INVALID {
181            let next_node = self.get_node_mut(next_idx);
182            next_node.prev = prev_idx;
183        }
184
185        // Update the previous node's link to the next node. This is what makes the node
186        // unreachable.
187        if prev_idx != NODE_INVALID {
188            let prev = self.get_node_mut(prev_idx);
189            prev.next.store(next_idx, Ordering::Release);
190        } else {
191            // The node was the head, so update the head pointer.
192            self.get_header_mut().head.store(next_idx, Ordering::Release);
193        }
194
195        // Push the node into the free list.
196        let next_free_idx = std::mem::replace(&mut self.free_head, node_idx);
197        let node = self.get_node_mut(node_idx);
198        node.next.store(next_free_idx, Ordering::Relaxed);
199        node.prev = NODE_INVALID;
200    }
201}
202
203pub struct StacktrackReader {
204    memory: MemoryMappedVmo,
205    capacity: u32,
206}
207
208impl StacktrackReader {
209    /// # Safety
210    /// The caller must guarantee that the `vmo` is not accessed by others while the returned
211    /// instance is alive, usually by taking a snapshot of the VMO that StacktrackWriter
212    /// operates on and then reading the snapshot instead.
213    pub unsafe fn new(vmo: &zx::Vmo) -> Result<Self, crate::Error> {
214        let memory = unsafe { MemoryMappedVmo::new_readonly(vmo)? };
215
216        let capacity = compute_node_capacity(memory.vmo_size())?;
217
218        Ok(Self { memory, capacity: capacity as u32 })
219    }
220
221    fn get_header(&self) -> &Header {
222        self.memory.get_object(0).unwrap()
223    }
224
225    fn get_node(&self, index: NodeIndex) -> &Node {
226        assert!(index != 0 && index < self.capacity, "index out of bounds");
227        let offset = index as usize * size_of::<Node>();
228        self.memory.get_object(offset).unwrap()
229    }
230
231    pub fn iter(&self) -> StacktrackIterator<'_> {
232        // We need header to start iteration.
233        let head_index = self.get_header().head.load(Ordering::Acquire);
234        StacktrackIterator { reader: self, next_index: head_index }
235    }
236}
237
238pub struct StacktrackIterator<'a> {
239    reader: &'a StacktrackReader,
240    next_index: NodeIndex,
241}
242
243impl<'a> Iterator for StacktrackIterator<'a> {
244    type Item = &'a Node;
245
246    fn next(&mut self) -> Option<Self::Item> {
247        if self.next_index == NODE_INVALID || self.next_index >= self.reader.capacity {
248            return None;
249        }
250
251        let node = self.reader.get_node(self.next_index);
252        self.next_index = node.next.load(Ordering::Acquire);
253        Some(node)
254    }
255}
256
257fn compute_node_capacity(num_bytes: usize) -> Result<usize, crate::Error> {
258    if num_bytes < size_of::<Node>() {
259        return Err(crate::Error::BufferTooSmall);
260    }
261    Ok(num_bytes / size_of::<Node>())
262}
263
264#[cfg(test)]
265mod tests {
266    use super::*;
267    use itertools::Itertools;
268
269    const NUM_ITERATIONS: usize = 100;
270    const NUM_NODES: usize = NUM_ITERATIONS + 10;
271
272    struct TestStorage {
273        vmo: zx::Vmo,
274    }
275
276    impl TestStorage {
277        pub fn new(num_nodes: usize) -> (TestStorage, StacktrackWriter) {
278            let vmo_size = num_nodes * size_of::<Node>();
279            let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
280            let writer = unsafe { StacktrackWriter::new(&vmo).unwrap() };
281            (TestStorage { vmo }, writer)
282        }
283
284        fn create_reader(&self) -> StacktrackReader {
285            let snapshot = self
286                .vmo
287                .create_child(
288                    zx::VmoChildOptions::SNAPSHOT | zx::VmoChildOptions::NO_WRITE,
289                    0,
290                    self.vmo.get_size().unwrap(),
291                )
292                .unwrap();
293            unsafe { StacktrackReader::new(&snapshot).unwrap() }
294        }
295    }
296
297    #[test]
298    fn test_read_empty() {
299        let (storage, _writer) = TestStorage::new(NUM_NODES);
300        let reader = storage.create_reader();
301        assert_eq!(reader.iter().count(), 0);
302    }
303
304    #[test]
305    fn test_allocation_exhaustion() {
306        let (_storage, mut writer) = TestStorage::new(10);
307        let frames = [Frame::default()];
308
309        // Allocate all the available nodes. Indices will be 1-9.
310        for i in 1..=9 {
311            assert_eq!(writer.insert_at_head(i as u64, &frames), Ok(i));
312        }
313
314        // The next allocation should fail.
315        assert_eq!(writer.insert_at_head(10, &frames), Err(crate::Error::OutOfSpace));
316
317        // Free one.
318        writer.remove(9);
319
320        // Insertion should succeed now.
321        assert_eq!(writer.insert_at_head(11, &frames), Ok(9));
322    }
323
324    #[test]
325    fn test_lifecycle() {
326        let (storage, mut writer) = TestStorage::new(NUM_NODES);
327        let frames = [Frame { pc: 0x123, fp: 0x456 }];
328
329        // Insert one node (takes index 1).
330        assert_eq!(writer.insert_at_head(101, &frames), Ok(1));
331
332        // Read back its contents.
333        {
334            let reader = storage.create_reader();
335            let nodes: Vec<_> = reader.iter().collect();
336            assert_eq!(nodes.len(), 1);
337            assert_eq!(nodes[0].koid, 101);
338            assert_eq!(nodes[0].count, 1);
339            assert_eq!(nodes[0].frames[0].pc, 0x123);
340            assert_eq!(nodes[0].frames[0].fp, 0x456);
341        }
342
343        // Insert another node (takes index 2)
344        assert_eq!(writer.insert_at_head(102, &frames), Ok(2));
345
346        // Remove the previous node (index 1).
347        writer.remove(1);
348
349        // Verify read (should see 102, which is at index 2)
350        {
351            let reader = storage.create_reader();
352            let nodes: Vec<_> = reader.iter().collect();
353            assert_eq!(nodes.len(), 1);
354            assert_eq!(nodes[0].koid, 102);
355        }
356
357        // Remove the second node too (index 2).
358        writer.remove(2);
359
360        // Verify that the table is now empty.
361        {
362            let reader = storage.create_reader();
363            assert_eq!(reader.iter().count(), 0);
364        }
365    }
366
367    #[test]
368    fn test_multiple_nodes() {
369        let (storage, mut writer) = TestStorage::new(NUM_NODES);
370        let frames = [Frame::default()];
371
372        assert_eq!(writer.insert_at_head(101, &frames), Ok(1));
373        assert_eq!(writer.insert_at_head(102, &frames), Ok(2));
374
375        let reader = storage.create_reader();
376        let nodes = reader.iter().map(|node| node.koid).sorted().collect_vec();
377        assert_eq!(nodes, [101, 102]);
378    }
379}