1use std::alloc::Layout;
6use std::mem::{align_of, size_of, size_of_val};
7
8use crate::memory_mapped_vmo::{MemoryMappable, MemoryMappedVmo};
9
10type Offset = u32;
12const OFFSET_INVALID: Offset = Offset::MAX;
13
14type StackTraceLength = u16;
17
18const NUM_STACK_BUCKETS: usize = 1 << 13;
24type StackBucketHeads = [Offset; NUM_STACK_BUCKETS];
25
26#[derive(Clone, Copy, Eq, Debug, Hash, Ord, PartialEq, PartialOrd)]
28#[repr(transparent)]
29pub struct ResourceKey(Offset);
30
31impl ResourceKey {
32 #[cfg(test)]
34 pub(crate) const fn from_raw(offset: Offset) -> ResourceKey {
35 ResourceKey(offset)
36 }
37
38 pub const fn into_raw(self) -> Offset {
39 self.0
40 }
41}
42
43#[repr(C)]
44#[derive(Debug)]
45pub struct ThreadInfo {
46 pub koid: zx::sys::zx_koid_t,
47 pub name: zx::Name,
48}
49
50unsafe impl MemoryMappable for ThreadInfo {}
52
53pub struct ResourcesTableWriter {
72 storage: MemoryMappedVmo,
73 watermark: usize, }
75
76impl ResourcesTableWriter {
77 pub unsafe fn new(vmo: &zx::Vmo) -> Result<ResourcesTableWriter, crate::Error> {
85 let storage = MemoryMappedVmo::new_readwrite(vmo)?;
86 if storage.vmo_size() < size_of::<StackBucketHeads>() {
87 return Err(crate::Error::BufferTooSmall);
88 } else if storage.vmo_size() - 1 > Offset::MAX as usize {
89 return Err(crate::Error::BufferTooBig);
90 }
91
92 let mut result = ResourcesTableWriter { storage, watermark: size_of::<StackBucketHeads>() };
93
94 for bucket_index in 0..NUM_STACK_BUCKETS {
96 *result.stack_bucket_head_at(bucket_index) = OFFSET_INVALID;
97 }
98
99 Ok(result)
100 }
101
102 fn allocate(&mut self, layout: Layout) -> Result<Offset, crate::Error> {
104 if layout.align() > zx::system_get_page_size() as usize {
107 return Err(crate::Error::InvalidInput);
108 }
109
110 let result_start = (self.watermark + layout.align() - 1) & !(layout.align() - 1);
111 let result_end = result_start + layout.size();
112
113 if result_end <= self.storage.vmo_size() {
114 self.watermark = result_end;
115 Ok(result_start as Offset)
116 } else {
117 Err(crate::Error::OutOfSpace)
118 }
119 }
120
121 fn compute_bucket_index(compressed_stack_trace: &[u8]) -> usize {
123 let tmp = crc::crc32::checksum_ieee(compressed_stack_trace);
124 tmp as usize % NUM_STACK_BUCKETS
125 }
126
127 fn stack_bucket_head_at(&mut self, bucket_index: usize) -> &mut Offset {
129 let bucket_heads = self.storage.get_object_mut::<StackBucketHeads>(0).unwrap();
131 &mut bucket_heads[bucket_index]
132 }
133
134 fn find_in_bucket(
137 &mut self,
138 bucket_index: usize,
139 compressed_stack_trace: &[u8],
140 ) -> Option<Offset> {
141 let mut curr = *self.stack_bucket_head_at(bucket_index);
142 while curr != OFFSET_INVALID {
143 let curr_next: Offset = *self.storage.get_object(curr as usize).unwrap();
146 let payload_offset = curr as usize + size_of_val(&curr_next);
147 let curr_payload = get_compressed_stack_trace(&self.storage, payload_offset).unwrap();
148
149 if *curr_payload == *compressed_stack_trace {
151 return Some(curr);
152 }
153
154 curr = curr_next;
155 }
156
157 None
159 }
160
161 fn insert_in_bucket(
162 &mut self,
163 bucket_index: usize,
164 compressed_stack_trace: &[u8],
165 ) -> Result<Offset, crate::Error> {
166 let alloc_bytes =
171 size_of::<Offset>() + size_of::<StackTraceLength>() + compressed_stack_trace.len();
172 let alloc_align = align_of::<Offset>();
173 let new = self.allocate(Layout::from_size_align(alloc_bytes, alloc_align).unwrap())?;
174
175 let old_head = *self.stack_bucket_head_at(bucket_index);
176
177 *self.storage.get_object_mut(new as usize).unwrap() = old_head;
179 set_compressed_stack_trace(
180 &mut self.storage,
181 new as usize + size_of::<Offset>(),
182 compressed_stack_trace,
183 )
184 .unwrap();
185
186 *self.stack_bucket_head_at(bucket_index) = new;
188
189 Ok(new)
190 }
191
192 pub fn intern_compressed_stack_trace(
197 &mut self,
198 compressed_stack_trace: &[u8],
199 ) -> Result<(ResourceKey, bool), crate::Error> {
200 if compressed_stack_trace.len() > StackTraceLength::MAX as usize {
202 return Err(crate::Error::BufferTooBig);
203 }
204
205 let bucket_index = Self::compute_bucket_index(compressed_stack_trace);
207 let (offset, inserted) = match self.find_in_bucket(bucket_index, compressed_stack_trace) {
208 Some(offset) => (offset, false),
209 None => (self.insert_in_bucket(bucket_index, compressed_stack_trace)?, true),
210 };
211
212 let resource_key = ResourceKey(offset + size_of::<Offset>() as Offset);
216 Ok((resource_key, inserted))
217 }
218
219 pub fn insert_thread_info(
221 &mut self,
222 koid: zx::sys::zx_koid_t,
223 name: &zx::Name,
224 ) -> Result<ResourceKey, crate::Error> {
225 let offset = self.allocate(Layout::new::<ThreadInfo>())?;
226 *self.storage.get_object_mut(offset as usize).unwrap() = ThreadInfo { koid, name: *name };
227 Ok(ResourceKey(offset))
228 }
229}
230
231pub struct ResourcesTableReader {
233 storage: MemoryMappedVmo,
234}
235
236impl ResourcesTableReader {
237 pub unsafe fn new(vmo: &zx::Vmo) -> Result<ResourcesTableReader, crate::Error> {
242 let storage = MemoryMappedVmo::new_readonly(vmo)?;
243 Ok(ResourcesTableReader { storage })
244 }
245
246 pub fn get_compressed_stack_trace(
248 &self,
249 resource_key: ResourceKey,
250 ) -> Result<&[u8], crate::Error> {
251 let ResourceKey(offset) = resource_key;
252 get_compressed_stack_trace(&self.storage, offset as usize)
253 }
254
255 pub fn get_thread_info(&self, resource_key: ResourceKey) -> Result<&ThreadInfo, crate::Error> {
257 let ResourceKey(offset) = resource_key;
258 Ok(self.storage.get_object(offset as usize)?)
259 }
260}
261
262fn get_compressed_stack_trace(
263 storage: &MemoryMappedVmo,
264 byte_offset: usize,
265) -> Result<&[u8], crate::Error> {
266 let header: StackTraceLength = *storage.get_object(byte_offset)?;
268
269 Ok(storage.get_slice(byte_offset + size_of_val(&header), header as usize)?)
271}
272
273fn set_compressed_stack_trace(
274 storage: &mut MemoryMappedVmo,
275 byte_offset: usize,
276 compressed_stack_trace: &[u8],
277) -> Result<(), crate::Error> {
278 let header: StackTraceLength =
279 compressed_stack_trace.len().try_into().map_err(|_| crate::Error::BufferTooBig)?;
280
281 *storage.get_object_mut(byte_offset)? = header;
283
284 storage
286 .get_slice_mut(byte_offset + size_of_val(&header), compressed_stack_trace.len())?
287 .copy_from_slice(compressed_stack_trace);
288
289 Ok(())
290}
291
292#[cfg(test)]
293mod tests {
294 use super::*;
295 use assert_matches::assert_matches;
296
297 const VMO_SIZE: usize = 4 * 1024 * 1024; struct TestStorage {
301 vmo: zx::Vmo,
302 }
303
304 impl TestStorage {
305 pub fn new(vmo_size: usize) -> (TestStorage, ResourcesTableWriter) {
306 let vmo = zx::Vmo::create(vmo_size as u64).unwrap();
307
308 let writer = unsafe { ResourcesTableWriter::new(&vmo) }.unwrap();
310
311 (TestStorage { vmo }, writer)
312 }
313
314 fn create_reader(&self) -> ResourcesTableReader {
315 unsafe { ResourcesTableReader::new(&self.vmo) }.unwrap()
317 }
318 }
319
320 #[test]
321 fn test_stack_trace_deduplication() {
322 let (storage, mut writer) = TestStorage::new(VMO_SIZE);
323
324 const COUNT: usize = 2 * NUM_STACK_BUCKETS + 1;
328 let mut pairs = Vec::new();
329 for i in 0..COUNT {
330 let stack_trace = i.to_ne_bytes();
332
333 let (resource_key, inserted) =
334 writer.intern_compressed_stack_trace(&stack_trace).unwrap();
335 assert!(inserted, "expected true because the stack trace was not present");
336
337 pairs.push((stack_trace, resource_key));
338 }
339
340 for (stack_trace, expected_resource_key) in &pairs {
342 let (actual_resource_key, inserted) =
343 writer.intern_compressed_stack_trace(stack_trace).unwrap();
344 assert!(!inserted, "expected false because the stack trace is already present");
345 assert_eq!(actual_resource_key, *expected_resource_key);
346 }
347
348 let reader = storage.create_reader();
350 for (expected_stack_trace, resource_key) in &pairs {
351 let actual_stack_trace = reader.get_compressed_stack_trace(*resource_key).unwrap();
352 assert_eq!(actual_stack_trace, *expected_stack_trace);
353 }
354 }
355
356 #[test]
357 fn test_empty_stack_trace() {
358 let (storage, mut writer) = TestStorage::new(VMO_SIZE);
359
360 let (resource_key, inserted) = writer.intern_compressed_stack_trace(&[]).unwrap();
362 assert!(inserted);
363
364 let reader = storage.create_reader();
366 let read_result = reader.get_compressed_stack_trace(resource_key).unwrap();
367 assert_eq!(read_result, []);
368 }
369
370 #[test]
371 fn test_long_stack_traces() {
372 let (storage, mut writer) = TestStorage::new(VMO_SIZE);
373
374 let stack_trace_too_long = vec![0xAA; u16::MAX as usize + 1];
377 let result = writer.intern_compressed_stack_trace(&stack_trace_too_long);
378 assert_matches!(result, Err(crate::Error::BufferTooBig));
379
380 let stack_trace_max_len = vec![0x55; u16::MAX as usize];
382 let (resource_key, _) = writer.intern_compressed_stack_trace(&stack_trace_max_len).unwrap();
383
384 let reader = storage.create_reader();
386 let read_result = reader.get_compressed_stack_trace(resource_key).unwrap();
387 assert_eq!(read_result, stack_trace_max_len);
388 }
389
390 #[test]
391 fn test_write_until_out_of_space() {
392 let (_storage, mut writer) = TestStorage::new(VMO_SIZE);
393
394 for i in 0..=VMO_SIZE {
399 let stack_trace = i.to_ne_bytes();
401
402 if let Err(crate::Error::OutOfSpace) =
403 writer.intern_compressed_stack_trace(&stack_trace)
404 {
405 return; }
407 }
408
409 unreachable!("Inserted more than {} distinct stack traces", VMO_SIZE);
410 }
411
412 #[test]
413 fn test_thread_info() {
414 let (storage, mut writer) = TestStorage::new(VMO_SIZE);
415
416 const FAKE_KOID: zx::sys::zx_koid_t = 1234;
419 const FAKE_NAME: zx::Name = zx::Name::new_lossy("fake-name");
420 let resource_key = writer.insert_thread_info(FAKE_KOID, &FAKE_NAME).unwrap();
421
422 let reader = storage.create_reader();
424 let thread_info = reader.get_thread_info(resource_key).unwrap();
425 assert_eq!(thread_info.koid, FAKE_KOID);
426 assert_eq!(thread_info.name, FAKE_NAME);
427 }
428}