1// Copyright 2023 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
45use std::alloc::Layout;
6use std::mem::{align_of, size_of, size_of_val};
78use crate::memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
910/// An offset within the VMO.
11type Offset = u32;
12const OFFSET_INVALID: Offset = Offset::MAX;
1314// Stack traces are stored in compressed form as an array of u8 prefixed by a u16 stating its
15// length. The u16 is guaranteed to be aligned.
16type StackTraceLength = u16;
1718// Known stack traces are indexed by a hash table, whose linked list heads (one for each bucket) are
19// stored at the beginning of the VMO.
20//
21// Note that the presence and the format of the hash table is meant to be an internal detail of the
22// current ResourcesTableWriter implementation: ResourcesTableReader does not depend on it.
23const NUM_STACK_BUCKETS: usize = 1 << 13;
24type StackBucketHeads = [Offset; NUM_STACK_BUCKETS];
2526/// A resource key is just an offset into the VMO.
27#[derive(Clone, Copy, Eq, Debug, Hash, Ord, PartialEq, PartialOrd)]
28#[repr(transparent)]
29pub struct ResourceKey(Offset);
3031impl ResourceKey {
32/// Used by tests in this crate to construct placeholder values.
33#[cfg(test)]
34pub(crate) const fn from_raw(offset: Offset) -> ResourceKey {
35 ResourceKey(offset)
36 }
3738pub const fn into_raw(self) -> Offset {
39self.0
40}
41}
4243#[repr(C)]
44#[derive(Debug)]
45pub struct ThreadInfo {
46pub koid: zx::sys::zx_koid_t,
47pub name: zx::Name,
48}
4950// SAFETY: ThreadInfo only contains memory-mappable types and we never parse the name in it.
51unsafe impl MemoryMappable for ThreadInfo {}
5253/// Mediates write access to a VMO containing compressed stack traces and thread info structs.
54///
55/// Compressed stack traces are stored as a hash table for efficient deduplication. Thread
56/// information structures are not deduplicated (it is expected that deduplication happens at a
57/// higher level in the stack). Apart from the hash table's bucket heads, all data is immutable:
58/// neither stack traces nor thread info structs can be modified or deleted after having been
59/// inserted.
60///
61/// Hash collisions are handled by maintaining per-bucket linked lists. Specifically, an array of
62/// list heads, one for each bucket, is stored at the beginning of the VMO. The remaining part of
63/// the VMO contains the linked list nodes. Since nodes are immutable, insertion always happen at
64/// the head of the list.
65///
66/// Thanks to the fact that inserted data is immutable, other readers are allowed to read data from
67/// this VMO while ResourcesTableWriter is still alive. In particular, all insertion functions
68/// return a ResourceKey, which is simply the offset of the just-inserted now-immutable data, and
69/// that can be used by ResourcesTableReader to read data back without even having to know about the
70/// hash table.
71pub struct ResourcesTableWriter {
72 storage: MemoryMappedVmo,
73 watermark: usize, // Offset of the first unallocated byte.
74}
7576impl ResourcesTableWriter {
77/// Initializes a VMO as an empty table and creates an AllocationsTableWriter to write into it.
78 ///
79 /// The caller must guarantee that the `vmo` is not accessed by others (unless they use
80 /// ResourcesTableReader instances) while the returned instance is alive.
81pub fn new(vmo: &zx::Vmo) -> Result<ResourcesTableWriter, crate::Error> {
82let storage = MemoryMappedVmo::new_readwrite(vmo)?;
83if storage.vmo_size() < size_of::<StackBucketHeads>() {
84return Err(crate::Error::BufferTooSmall);
85 } else if storage.vmo_size() - 1 > Offset::MAX as usize {
86return Err(crate::Error::BufferTooBig);
87 }
8889let mut result = ResourcesTableWriter { storage, watermark: size_of::<StackBucketHeads>() };
9091// Clear the hash table.
92for bucket_index in 0..NUM_STACK_BUCKETS {
93*result.stack_bucket_head_at(bucket_index) = OFFSET_INVALID;
94 }
9596Ok(result)
97 }
9899/// Allocates space in the VMO and returns the base offset of the allocated range.
100fn allocate(&mut self, layout: Layout) -> Result<Offset, crate::Error> {
101// Forbid alignment requirements greater than the page size, as they would have implications
102 // on how the VMO can be mapped for reading.
103if layout.align() > zx::system_get_page_size() as usize {
104return Err(crate::Error::InvalidInput);
105 }
106107let result_start = (self.watermark + layout.align() - 1) & !(layout.align() - 1);
108let result_end = result_start + layout.size();
109110if result_end <= self.storage.vmo_size() {
111self.watermark = result_end;
112Ok(result_start as Offset)
113 } else {
114Err(crate::Error::OutOfSpace)
115 }
116 }
117118/// This is the hash function for stack traces.
119fn compute_bucket_index(compressed_stack_trace: &[u8]) -> usize {
120let tmp = crc::crc32::checksum_ieee(compressed_stack_trace);
121 tmp as usize % NUM_STACK_BUCKETS
122 }
123124/// Returns a mutable reference to the head of a given bucket's linked list.
125fn stack_bucket_head_at(&mut self, bucket_index: usize) -> &mut Offset {
126// The bucket heads are always stored at the beginning of the VMO.
127let bucket_heads = self.storage.get_object_mut::<StackBucketHeads>(0).unwrap();
128&mut bucket_heads[bucket_index]
129 }
130131/// Tries to find an already-inserted stack trace in the given bucket by scanning its linked
132 /// list.
133fn find_in_bucket(
134&mut self,
135 bucket_index: usize,
136 compressed_stack_trace: &[u8],
137 ) -> Option<Offset> {
138let mut curr = *self.stack_bucket_head_at(bucket_index);
139while curr != OFFSET_INVALID {
140// Read the "next" field in the current node and its compressed stack trace, which is
141 // stored immediately afterwards.
142let curr_next: Offset = *self.storage.get_object(curr as usize).unwrap();
143let payload_offset = curr as usize + size_of_val(&curr_next);
144let curr_payload = get_compressed_stack_trace(&self.storage, payload_offset).unwrap();
145146// Is this stack trace the one we were looking for?
147if *curr_payload == *compressed_stack_trace {
148return Some(curr);
149 }
150151 curr = curr_next;
152 }
153154// Not found.
155None
156}
157158fn insert_in_bucket(
159&mut self,
160 bucket_index: usize,
161 compressed_stack_trace: &[u8],
162 ) -> Result<Offset, crate::Error> {
163// Allocate space for:
164 // - The "next" field
165 // - The stack trace length
166 // - The actual stack trace
167let alloc_bytes =
168 size_of::<Offset>() + size_of::<StackTraceLength>() + compressed_stack_trace.len();
169let alloc_align = align_of::<Offset>();
170let new = self.allocate(Layout::from_size_align(alloc_bytes, alloc_align).unwrap())?;
171172let old_head = *self.stack_bucket_head_at(bucket_index);
173174// Write them.
175*self.storage.get_object_mut(new as usize).unwrap() = old_head;
176 set_compressed_stack_trace(
177&mut self.storage,
178 new as usize + size_of::<Offset>(),
179 compressed_stack_trace,
180 )
181 .unwrap();
182183// Update the bucket's head pointer.
184*self.stack_bucket_head_at(bucket_index) = new;
185186Ok(new)
187 }
188189/// Appends a compressed stack trace and returns its offset into the VMO.
190 ///
191 /// This function also applies deduplication: if a copy of the given stack trace is already
192 /// present, the offset of the existing copy is returned without modifying the VMO contents.
193pub fn intern_compressed_stack_trace(
194&mut self,
195 compressed_stack_trace: &[u8],
196 ) -> Result<(ResourceKey, bool), crate::Error> {
197// Verify that the length fits in StackTraceLength and return error if it does not.
198if compressed_stack_trace.len() > StackTraceLength::MAX as usize {
199return Err(crate::Error::BufferTooBig);
200 }
201202// Find/insert a StackNode and get its offset within the memory region.
203let bucket_index = Self::compute_bucket_index(compressed_stack_trace);
204let (offset, inserted) = match self.find_in_bucket(bucket_index, compressed_stack_trace) {
205Some(offset) => (offset, false),
206None => (self.insert_in_bucket(bucket_index, compressed_stack_trace)?, true),
207 };
208209// Adjust the returned offset to skip the "next" field (which is an internal
210 // ResourcesTableWriter implementation detail) and point directly to the StackTraceLength
211 // field (which is what ResourcesTableReader expects to receive).
212let resource_key = ResourceKey(offset + size_of::<Offset>() as Offset);
213Ok((resource_key, inserted))
214 }
215216/// Appends a thread information entry and returns its offset into the VMO.
217pub fn insert_thread_info(
218&mut self,
219 koid: zx::sys::zx_koid_t,
220 name: &zx::Name,
221 ) -> Result<ResourceKey, crate::Error> {
222let offset = self.allocate(Layout::new::<ThreadInfo>())?;
223*self.storage.get_object_mut(offset as usize).unwrap() = ThreadInfo { koid, name: *name };
224Ok(ResourceKey(offset))
225 }
226}
227228/// Mediates read access to a VMO written by ResourcesTableWriter.
229pub struct ResourcesTableReader {
230 storage: MemoryMappedVmo,
231}
232233impl ResourcesTableReader {
234pub fn new(vmo: &zx::Vmo) -> Result<ResourcesTableReader, crate::Error> {
235let storage = MemoryMappedVmo::new_readonly(vmo)?;
236Ok(ResourcesTableReader { storage })
237 }
238239/// Gets the compressed stack trace identified by `resource_key`.
240pub fn get_compressed_stack_trace(
241&self,
242 resource_key: ResourceKey,
243 ) -> Result<&[u8], crate::Error> {
244let ResourceKey(offset) = resource_key;
245 get_compressed_stack_trace(&self.storage, offset as usize)
246 }
247248/// Gets the thread info entry identified by `resource_key`.
249pub fn get_thread_info(&self, resource_key: ResourceKey) -> Result<&ThreadInfo, crate::Error> {
250let ResourceKey(offset) = resource_key;
251Ok(self.storage.get_object(offset as usize)?)
252 }
253}
254255fn get_compressed_stack_trace(
256 storage: &MemoryMappedVmo,
257 byte_offset: usize,
258) -> Result<&[u8], crate::Error> {
259// Read the length.
260let header: StackTraceLength = *storage.get_object(byte_offset)?;
261262// Get actual data, which is stored immediately after the length, as a slice.
263Ok(storage.get_slice(byte_offset + size_of_val(&header), header as usize)?)
264}
265266fn set_compressed_stack_trace(
267 storage: &mut MemoryMappedVmo,
268 byte_offset: usize,
269 compressed_stack_trace: &[u8],
270) -> Result<(), crate::Error> {
271let header: StackTraceLength =
272 compressed_stack_trace.len().try_into().map_err(|_| crate::Error::BufferTooBig)?;
273274// Write the length.
275*storage.get_object_mut(byte_offset)? = header;
276277// Write actual data immediately after the length.
278storage
279 .get_slice_mut(byte_offset + size_of_val(&header), compressed_stack_trace.len())?
280.copy_from_slice(compressed_stack_trace);
281282Ok(())
283}
284285#[cfg(test)]
286mod tests {
287use super::*;
288use assert_matches::assert_matches;
289290// Some tests below use this constant to create a VMO with a known size.
291const VMO_SIZE: usize = 4 * 1024 * 1024; // 4 MiB
292293struct TestStorage {
294 vmo: zx::Vmo,
295 }
296297impl TestStorage {
298pub fn new(vmo_size: usize) -> TestStorage {
299let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
300 TestStorage { vmo }
301 }
302303fn create_writer(&self) -> ResourcesTableWriter {
304 ResourcesTableWriter::new(&self.vmo).unwrap()
305 }
306307fn create_reader(&self) -> ResourcesTableReader {
308 ResourcesTableReader::new(&self.vmo).unwrap()
309 }
310 }
311312#[test]
313fn test_stack_trace_deduplication() {
314let storage = TestStorage::new(VMO_SIZE);
315let mut writer = storage.create_writer();
316317// Insert different distinct stack traces and store the corresponding resource keys.
318 // The number of stack traces is chosen so that at least one bucket contains at least three
319 // stack traces.
320const COUNT: usize = 2 * NUM_STACK_BUCKETS + 1;
321let mut pairs = Vec::new();
322for i in 0..COUNT {
323// Generate a unique array of bytes and pretend it is a compressed stack trace.
324let stack_trace = i.to_ne_bytes();
325326let (resource_key, inserted) =
327 writer.intern_compressed_stack_trace(&stack_trace).unwrap();
328assert!(inserted, "expected true because the stack trace was not present");
329330 pairs.push((stack_trace, resource_key));
331 }
332333// Verify that trying to insert them again returns the same resource keys.
334for (stack_trace, expected_resource_key) in &pairs {
335let (actual_resource_key, inserted) =
336 writer.intern_compressed_stack_trace(stack_trace).unwrap();
337assert!(!inserted, "expected false because the stack trace is already present");
338assert_eq!(actual_resource_key, *expected_resource_key);
339 }
340341// Verify that they can be read back.
342let reader = storage.create_reader();
343for (expected_stack_trace, resource_key) in &pairs {
344let actual_stack_trace = reader.get_compressed_stack_trace(*resource_key).unwrap();
345assert_eq!(actual_stack_trace, *expected_stack_trace);
346 }
347 }
348349#[test]
350fn test_empty_stack_trace() {
351let storage = TestStorage::new(VMO_SIZE);
352let mut writer = storage.create_writer();
353354// It must be possible to insert the empty stack trace.
355let (resource_key, inserted) = writer.intern_compressed_stack_trace(&[]).unwrap();
356assert!(inserted);
357358// Verify that is can be read back correctly.
359let reader = storage.create_reader();
360let read_result = reader.get_compressed_stack_trace(resource_key).unwrap();
361assert_eq!(read_result, []);
362 }
363364#[test]
365fn test_long_stack_traces() {
366let storage = TestStorage::new(VMO_SIZE);
367let mut writer = storage.create_writer();
368369// Inserting a stack trace whose length cannot be represented in the length field (u16).
370 // should fail.
371let stack_trace_too_long = vec![0xAA; u16::MAX as usize + 1];
372let result = writer.intern_compressed_stack_trace(&stack_trace_too_long);
373assert_matches!(result, Err(crate::Error::BufferTooBig));
374375// Inserting a stack trace with the maximum representable length should succeed.
376let stack_trace_max_len = vec![0x55; u16::MAX as usize];
377let (resource_key, _) = writer.intern_compressed_stack_trace(&stack_trace_max_len).unwrap();
378379// And it must be possible to read it back.
380let reader = storage.create_reader();
381let read_result = reader.get_compressed_stack_trace(resource_key).unwrap();
382assert_eq!(read_result, stack_trace_max_len);
383 }
384385#[test]
386fn test_write_until_out_of_space() {
387let storage = TestStorage::new(VMO_SIZE);
388let mut writer = storage.create_writer();
389390// Insert many distinct stack traces and verify that, at some point, we get an OutOfSpace
391 // error. Instead of estimating exactly how many stack traces can fit, we just use VMO_SIZE
392 // as an upper bound before declaring failure (each distinct stack trace obviously requires
393 // at least one byte of storage).
394for i in 0..=VMO_SIZE {
395// Generate a unique array of bytes and pretend it is a compressed stack trace.
396let stack_trace = i.to_ne_bytes();
397398if let Err(crate::Error::OutOfSpace) =
399 writer.intern_compressed_stack_trace(&stack_trace)
400 {
401return; // Test passed
402}
403 }
404405unreachable!("Inserted more than {} distinct stack traces", VMO_SIZE);
406 }
407408#[test]
409fn test_thread_info() {
410let storage = TestStorage::new(VMO_SIZE);
411let mut writer = storage.create_writer();
412413// Insert a thread info struct with placeholder values (the name must be padded to the
414 // expected length).
415const FAKE_KOID: zx::sys::zx_koid_t = 1234;
416const FAKE_NAME: zx::Name = zx::Name::new_lossy("fake-name");
417let resource_key = writer.insert_thread_info(FAKE_KOID, &FAKE_NAME).unwrap();
418419// Verify that it can be read back correctly.
420let reader = storage.create_reader();
421let thread_info = reader.get_thread_info(resource_key).unwrap();
422assert_eq!(thread_info.koid, FAKE_KOID);
423assert_eq!(thread_info.name, FAKE_NAME);
424 }
425}