starnix_core/perf/
event.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use std::sync::Arc;
6use std::sync::atomic::{AtomicBool, Ordering};
7
8use crate::mm::memory::MemoryObject;
9use crate::task::Kernel;
10use crate::vfs::OutputBuffer;
11use fuchsia_runtime::vmar_root_self;
12use shared_buffer::SharedBuffer;
13use starnix_sync::Mutex;
14use starnix_types::PAGE_SIZE;
15use starnix_uapi::errors::Errno;
16use starnix_uapi::{errno, error, from_status_like_fdio};
17use zerocopy::native_endian::{I32, U16, U32, U64};
18use zerocopy::{Immutable, IntoBytes, Unaligned};
19use zx::{BootInstant, BootTimeline};
20
21// The default ring buffer size (2MB).
22// TODO(https://fxbug.dev/357665908): This should be based on /sys/kernel/tracing/buffer_size_kb.
23const DEFAULT_RING_BUFFER_SIZE_BYTES: u64 = 2097152;
24
25// A page header consists of a u64 timestamp and a u64 commit field.
26const PAGE_HEADER_SIZE: u64 = 2 * std::mem::size_of::<u64>() as u64;
27const COMMIT_FIELD_OFFSET: u64 = std::mem::size_of::<u64>() as u64;
28
29// The event id for atrace events.
30const FTRACE_PRINT_ID: U16 = U16::new(5);
31
32// Used for inspect tracking.
33const DROPPED_PAGES: &str = "dropped_pages";
34
35#[repr(C)]
36#[derive(Debug, Default, IntoBytes, Immutable, Unaligned)]
37struct PrintEventHeader {
38    common_type: U16,
39    common_flags: u8,
40    common_preempt_count: u8,
41    common_pid: I32,
42    ip: U64,
43}
44
45#[repr(C)]
46#[derive(Debug, IntoBytes, Immutable, Unaligned)]
47struct PrintEvent {
48    header: PrintEventHeader,
49}
50
51impl PrintEvent {
52    fn new(pid: i32) -> Self {
53        Self {
54            header: PrintEventHeader {
55                common_type: FTRACE_PRINT_ID,
56                common_pid: I32::new(pid),
57                // Perfetto doesn't care about any other field.
58                ..Default::default()
59            },
60        }
61    }
62
63    fn size(&self) -> usize {
64        std::mem::size_of::<PrintEventHeader>()
65    }
66}
67
68#[repr(C)]
69#[derive(Debug, Default, IntoBytes, PartialEq, Immutable, Unaligned)]
70struct TraceEventHeader {
71    // u32 where:
72    //   type_or_length: bottom 5 bits. If 0, `data` is read for length. Always set to 0 for now.
73    //   time_delta: top 27 bits
74    time_delta: U32,
75
76    // If type_or_length is 0, holds the length of the trace message.
77    // We always write length here for simplicity.
78    data: U32,
79}
80
81impl TraceEventHeader {
82    fn new(size: usize) -> Self {
83        // The size reported in the event's header includes the size of `size` (a u32) and the size
84        // of the event data.
85        let size = (std::mem::size_of::<u32>() + size) as u32;
86        Self { time_delta: U32::new(0), data: U32::new(size) }
87    }
88
89    fn set_time_delta(&mut self, nanos: u32) {
90        self.time_delta = U32::new(nanos << 5);
91    }
92}
93
94#[repr(C)]
95#[derive(Debug, IntoBytes, Immutable, Unaligned)]
96pub struct TraceEvent {
97    /// Common metadata among all trace event types.
98    header: TraceEventHeader, // u64
99
100    /// The event data.
101    ///
102    /// Atrace events are reported as PrintFtraceEvents. When we support multiple types of events,
103    /// this can be updated to be more generic.
104    event: PrintEvent,
105}
106
107impl TraceEvent {
108    pub fn new(pid: i32, data_len: usize) -> Self {
109        let event = PrintEvent::new(pid);
110        // +1 because we append a trailing '\n' to the data when we serialize.
111        let header = TraceEventHeader::new(event.size() + data_len + 1);
112        Self { header, event }
113    }
114
115    fn size(&self) -> usize {
116        // The header's data size doesn't include the time_delta size.
117        std::mem::size_of::<u32>() + self.header.data.get() as usize
118    }
119
120    fn set_timestamp(&mut self, timestamp: BootInstant, prev_timestamp: BootInstant) {
121        // Debug assert here so if it happens, we can notice it happened and hopefully fix it.
122        // In non-debug, use 0 as the delta. It will be less disruptive to the process and the
123        // resulting trace data.
124        debug_assert!(timestamp >= prev_timestamp, "Timestamp must be >= prev_timestamp");
125        let nanos: u32 = (timestamp - prev_timestamp).into_nanos().try_into().unwrap_or(0);
126        self.header.set_time_delta(nanos);
127    }
128}
129
130struct TraceEventQueueMetadata {
131    /// The offset where new reads happen in the ring buffer.
132    head: u64,
133
134    /// The offset of the end of the last committed event in the ring buffer.
135    ///
136    /// When a writer can preempt another writer, only the last writer to commit its event moves
137    /// this commit page.
138    commit: u64,
139
140    /// The offset where new writes happen in the ring buffer. This can be later in the ring buffer
141    /// compared to `commit` when a writer has reserved space for an event but not yet committed it.
142    tail: u64,
143
144    /// The max size of an event.
145    max_event_size: u64,
146
147    /// The timestamp of the last event in the queue. If the queue is empty, then the time the queue
148    /// was created.
149    prev_timestamp: BootInstant,
150
151    /// If true, the queue doesn't have a full page of events to read.
152    ///
153    /// TODO(https://fxbug.dev/357665908): Support partial page reads.
154    is_readable: bool,
155
156    /// If true, overwrites old pages of events when queue is full. Defaults to true.
157    overwrite: bool,
158
159    /// The number of pages of events dropped because the ring buffer was full and the queue is in
160    /// overwrite mode.
161    dropped_pages: u64,
162
163    /// While tracing is in session, we map the trace buffer to avoid zx_vmo_write calls.
164    mapping: Option<SharedBuffer>, // mapped_vmo requires a non resizable vmo, so we use
165                                   // SharedBuffer directly
166}
167
168impl Drop for TraceEventQueueMetadata {
169    fn drop(&mut self) {
170        if let Some(ref mut buf) = self.mapping {
171            let (addr, size): (*mut u8, usize) = buf.as_ptr_len();
172            let addr = addr as usize;
173
174            // Safety:
175            //
176            // The memory behind this `SharedBuffer` is only accessible
177            // via `mapping` through this struct.
178            unsafe {
179                let _ = vmar_root_self().unmap(addr, size);
180            }
181        }
182    }
183}
184
185impl TraceEventQueueMetadata {
186    fn new() -> Self {
187        Self {
188            head: 0,
189            commit: PAGE_HEADER_SIZE,
190            tail: PAGE_HEADER_SIZE,
191            max_event_size: *PAGE_SIZE - PAGE_HEADER_SIZE,
192            prev_timestamp: BootInstant::get(),
193            is_readable: false,
194            overwrite: true,
195            dropped_pages: 0,
196            mapping: None,
197        }
198    }
199
200    /// The offset of the head page in the `ring_buffer` VMO.
201    fn head_page_offset(&self) -> u64 {
202        self.head - (self.head % *PAGE_SIZE)
203    }
204
205    /// The offset of the commit page in the `ring_buffer` VMO.
206    fn commit_page_offset(&self) -> u64 {
207        self.commit - (self.commit % *PAGE_SIZE)
208    }
209
210    /// The offset of the tail page in the `ring_buffer` VMO.
211    fn tail_page_offset(&self) -> u64 {
212        self.tail - (self.tail % *PAGE_SIZE)
213    }
214
215    /// The offset of the `commit` field in the current commit page's page header.
216    fn commit_field_offset(&self) -> u64 {
217        self.commit_page_offset() + COMMIT_FIELD_OFFSET
218    }
219
220    /// Reserves space in the ring buffer to commit an event. Returns the offset of the start of the
221    /// reserved space.
222    ///
223    /// If the current tail page doesn't have enough space to fit the event but the queue is not
224    /// full or is in overwrite mode, returns the offset after the page header of the next page.
225    ///
226    /// The caller needs to handle clearing old events if queue is in overwrite mode and
227    /// head page has moved forward one.
228    fn reserve(&mut self, event_size: u64) -> Result<u64, Errno> {
229        if event_size > self.max_event_size {
230            return error!(EINVAL);
231        }
232
233        let prev_tail_page = self.tail_page_offset();
234        let mut reserve_start = self.tail;
235        let maybe_new_tail = (self.tail + event_size as u64) % DEFAULT_RING_BUFFER_SIZE_BYTES;
236        let maybe_new_tail_page = maybe_new_tail - (maybe_new_tail % *PAGE_SIZE);
237
238        if prev_tail_page != maybe_new_tail_page {
239            // From https://docs.kernel.org/trace/ring-buffer-design.html:
240            // When the tail meets the head page, if the buffer is in overwrite mode, the head page
241            // will be pushed ahead one, otherwise, the write will fail.
242            if maybe_new_tail_page == self.head_page_offset() {
243                if self.overwrite {
244                    self.head += *PAGE_SIZE;
245                    self.dropped_pages += 1;
246                } else {
247                    return error!(ENOMEM);
248                }
249            }
250
251            // Fix commit and tail to point to the offset after the page header.
252            reserve_start = maybe_new_tail_page + PAGE_HEADER_SIZE;
253        }
254        self.tail = reserve_start + event_size as u64;
255
256        Ok(reserve_start)
257    }
258
259    /// Moves the commit offset ahead to indicate a write has been committed.
260    /// reserve() accounted for moving commit
261    fn commit(&mut self, event_size: u64) {
262        let prev_commit_page = self.commit_page_offset();
263        self.commit = (self.commit + event_size as u64) % DEFAULT_RING_BUFFER_SIZE_BYTES;
264
265        let new_commit_page = self.commit_page_offset();
266        if prev_commit_page != new_commit_page {
267            self.commit = new_commit_page + PAGE_HEADER_SIZE + event_size as u64;
268            // Allow more reads when a page of events are available.
269            self.is_readable = true;
270        }
271    }
272
273    /// Returns the offset of the page to read from. Moves the head page forward a page.
274    fn read(&mut self) -> Result<u64, Errno> {
275        if !self.is_readable {
276            return error!(EAGAIN);
277        }
278
279        let head_page = self.head_page_offset();
280        self.head = (self.head + *PAGE_SIZE) % DEFAULT_RING_BUFFER_SIZE_BYTES;
281
282        // If the read meets the last commit, then there is nothing more to read.
283        if self.head_page_offset() == self.commit_page_offset() {
284            self.is_readable = false;
285        }
286
287        Ok(head_page)
288    }
289}
290
291/// Stores all trace events.
292pub struct TraceEventQueue {
293    /// If true, atrace events written to /sys/kernel/tracing/trace_marker will be stored as
294    /// TraceEvents in `ring_buffer`.
295    tracing_enabled: AtomicBool,
296
297    /// Metadata about `ring_buffer`.
298    metadata: Mutex<TraceEventQueueMetadata>,
299
300    /// The trace events.
301    ///
302    /// From https://docs.kernel.org/trace/ring-buffer-map.html, if this memory is mapped, it should
303    /// start with a meta-page but Perfetto doesn't seem to parse this.
304    ///
305    /// Each page in this VMO consists of:
306    ///   A page header:
307    ///     // The timestamp of the last event in the previous page. If this is the first page, then
308    ///     // the timestamp tracing was enabled. This is used with time_delta in each
309    ///     // event header to calculate an event's timestamp.
310    ///     timestamp: u64
311    ///
312    ///     // The size in bytes of events committed in this page.
313    ///     commit: u64
314    ///
315    ///   // Each event must fit on the remainder of the page (i.e. be smaller than a page minus the
316    ///   // size of the page header.
317    ///   N trace events
318    ring_buffer: MemoryObject,
319
320    /// Inspect node used for diagnostics.
321    tracefs_node: fuchsia_inspect::Node,
322}
323
324impl<'a> TraceEventQueue {
325    pub fn new(inspect_node: &fuchsia_inspect::Node) -> Result<Self, Errno> {
326        let tracefs_node = inspect_node.create_child("tracefs");
327        let metadata = TraceEventQueueMetadata::new();
328        let ring_buffer: MemoryObject = zx::Vmo::create_with_opts(zx::VmoOptions::RESIZABLE, 0)
329            .map_err(|_| errno!(ENOMEM))?
330            .into();
331        let ring_buffer = ring_buffer.with_zx_name(b"starnix:tracefs");
332        Ok(Self {
333            tracing_enabled: AtomicBool::new(false),
334            metadata: Mutex::new(metadata),
335            ring_buffer,
336            tracefs_node,
337        })
338    }
339
340    /**
341     * Uses the TraceEventQueue from the kernel or initializes a new one if not present.
342     */
343    pub fn from(kernel: &Kernel) -> Arc<Self> {
344        kernel.expando.get_or_init(|| {
345            TraceEventQueue::new(&kernel.inspect_node)
346                .expect("TraceEventQueue constructed with valid options")
347        })
348    }
349
350    pub fn is_enabled(&self) -> bool {
351        self.tracing_enabled.load(Ordering::Relaxed)
352    }
353
354    pub fn enable(&self) -> Result<(), Errno> {
355        // Use the metadata mutex to make sure the state of the metadata and the enabled flag
356        // are changed at the same time.
357        let mut metadata = self.metadata.lock();
358        metadata.prev_timestamp = BootInstant::get();
359        self.ring_buffer
360            .set_size(DEFAULT_RING_BUFFER_SIZE_BYTES)
361            .map_err(|e| from_status_like_fdio!(e))?;
362
363        let vmo = self.ring_buffer.as_vmo().expect("Trace FS's memory must be VMO backed.");
364        let addr = vmar_root_self()
365            .map(
366                0,
367                &vmo,
368                0,
369                DEFAULT_RING_BUFFER_SIZE_BYTES as usize,
370                zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
371            )
372            .map_err(|e| from_status_like_fdio!(e))?;
373
374        // Safety:
375        //
376        // The memory behind this `SharedBuffer` is only accessible via
377        // methods on `TraceEventQueue`.
378        //
379        // The underlying memory is accessible during any accesses to `SharedBuffer`:
380        // - It is only unmapped on `drop`
381        // - We don't expose the mapped address which might allow it to outlive the TraceEventQueue
382        // - The underlying VMO is resizable, but we never resize while the memory is mapped.
383        metadata.mapping = Some(unsafe {
384            SharedBuffer::new(addr as *mut u8, DEFAULT_RING_BUFFER_SIZE_BYTES as usize)
385        });
386
387        self.initialize_page(0, metadata.prev_timestamp)?;
388        self.tracing_enabled.store(true, Ordering::Relaxed);
389        Ok(())
390    }
391
392    /// Disables the event queue and resets it to empty.
393    /// The number of dropped pages are recorded for reading via tracefs.
394    pub fn disable(&self) -> Result<(), Errno> {
395        // Use the metadata mutex to make sure the state of the metadata and the enabled flag
396        // are changed at the same time.
397        let mut metadata = self.metadata.lock();
398        self.tracefs_node.record_uint(DROPPED_PAGES, metadata.dropped_pages);
399        *metadata = TraceEventQueueMetadata::new();
400        self.ring_buffer.set_size(0).map_err(|e| from_status_like_fdio!(e))?;
401        self.tracing_enabled.store(false, Ordering::Relaxed);
402
403        Ok(())
404    }
405
406    /// Reads a page worth of events. Currently only reads pages that are full.
407    ///
408    /// From https://docs.kernel.org/trace/ring-buffer-design.html, when memory is mapped, a reader
409    /// page can be swapped with the header page to avoid copying memory.
410    pub fn read(&self, buf: &mut dyn OutputBuffer) -> Result<usize, Errno> {
411        // Read the offset, which also moves the read pointer forward in the metadata, then unlock.
412        let offset = {
413            let mut metadata = self.metadata.lock();
414            metadata.read()?
415        };
416
417        // self.ring_buffer is  vmo backed memory. So reads past the allocated size return in error.
418        // Enabling and disabling the queue can change the size of the ring_buffer, but this is done
419        // using thread safe kernel, so if there is a race between this read and disabling the queue,
420        // the worst that will happen is an error of either EAGAIN or ENOMEM.
421        buf.write_all(
422            &self.ring_buffer.read_to_vec(offset, *PAGE_SIZE).map_err(|_| errno!(ENOMEM))?,
423        )
424    }
425
426    /// Write `event` into `ring_buffer`.
427    /// If `event` does not fit in the current page, move on to the next.
428    ///
429    /// Should eventually allow for a writer to preempt another writer.
430    /// See https://docs.kernel.org/trace/ring-buffer-design.html.
431    /// Returns the delta duration between this event and the previous event written.
432    pub fn push_event(
433        &self,
434        mut event: TraceEvent,
435        data: &[u8],
436    ) -> Result<zx::Duration<BootTimeline>, Errno> {
437        let mut metadata = self.metadata.lock();
438        if metadata.mapping.is_none() {
439            return Err(errno!(ENOMEM));
440        };
441
442        // The timestamp for the current event must be after the metadata.prev_timestamp.
443        // This is because the event data header only stores the delta time, not the entire timestamp.
444        // This is stored as an unsigned 27 bit value, so the delta must be a positive value to be
445        // stored correctly.
446        // To make sure this is the case, the timestamp and delta calculation are done while holding
447        // the metadata lock. This definitely could be refined, potentially using an atomic to hold
448        // the previous timestamp or similar synchronization to make sure the previous timestamp is not
449        // updated past this timestamp.
450        let timestamp = BootInstant::get();
451
452        event.set_timestamp(timestamp, metadata.prev_timestamp);
453
454        // Get the offset of `ring_buffer` to write this event to.
455        let old_tail_page = metadata.tail_page_offset();
456        let offset = metadata.reserve(event.size() as u64)?;
457
458        // Clear old events and reset the page header if we've moved to the next page.
459        let new_tail_page = metadata.tail_page_offset();
460        if new_tail_page != old_tail_page {
461            self.initialize_page(new_tail_page, metadata.prev_timestamp)?;
462        }
463
464        // Write the event and update the commit offset.
465        let bytes = event.as_bytes();
466        if let Some(ref mapping) = metadata.mapping {
467            mapping.write_at(offset as usize, bytes);
468            mapping.write_at(offset as usize + bytes.len(), data);
469            mapping.write_at(offset as usize + bytes.len() + data.len(), b"\n");
470        }
471        metadata.commit(event.size() as u64);
472
473        // Update the page header's `commit` field with the new size of committed data on the page.
474        let new_commit = ((metadata.commit % *PAGE_SIZE) - PAGE_HEADER_SIZE).to_le_bytes();
475        let commit_offset = metadata.commit_field_offset() as usize;
476        if let Some(ref mapping) = metadata.mapping {
477            mapping.write_at(commit_offset, &new_commit);
478        }
479
480        let delta = timestamp - metadata.prev_timestamp;
481        metadata.prev_timestamp = timestamp;
482
483        Ok(delta)
484    }
485
486    #[cfg(test)]
487    /// Returns the timestamp of the previous event in `ring_buffer`.
488    fn prev_timestamp(&self) -> BootInstant {
489        self.metadata.lock().prev_timestamp
490    }
491
492    /// Initializes a new page by setting the header's timestamp and clearing the rest of the page
493    /// with 0's.
494    fn initialize_page(&self, offset: u64, prev_timestamp: BootInstant) -> Result<(), Errno> {
495        self.ring_buffer
496            .write(&prev_timestamp.into_nanos().to_le_bytes(), offset)
497            .map_err(|e| from_status_like_fdio!(e))?;
498        let timestamp_size = std::mem::size_of::<BootInstant>() as u64;
499        self.ring_buffer
500            .op_range(zx::VmoOp::ZERO, offset + timestamp_size, *PAGE_SIZE - timestamp_size)
501            .map_err(|e| from_status_like_fdio!(e))?;
502        Ok(())
503    }
504}
505
506#[cfg(test)]
507mod tests {
508    use super::{
509        DEFAULT_RING_BUFFER_SIZE_BYTES, PAGE_HEADER_SIZE, TraceEvent, TraceEventQueue,
510        TraceEventQueueMetadata,
511    };
512    use crate::vfs::OutputBuffer;
513    use crate::vfs::buffers::VecOutputBuffer;
514    use starnix_types::PAGE_SIZE;
515    use starnix_uapi::error;
516
517    #[fuchsia::test]
518    fn metadata_errors() {
519        let mut metadata = TraceEventQueueMetadata::new();
520        assert_eq!(metadata.read(), error!(EAGAIN));
521        assert_eq!(metadata.reserve(*PAGE_SIZE), error!(EINVAL));
522    }
523
524    #[fuchsia::test]
525    fn metadata_push_event_simple() {
526        let mut metadata = TraceEventQueueMetadata::new();
527        let event_size = 30;
528        let reserved_offset = metadata.reserve(event_size).expect("reserve failed");
529        assert_eq!(reserved_offset, PAGE_HEADER_SIZE);
530        assert_eq!(metadata.head, 0);
531        assert_eq!(metadata.commit, PAGE_HEADER_SIZE);
532        assert_eq!(metadata.tail, PAGE_HEADER_SIZE + event_size);
533
534        metadata.commit(event_size);
535        assert_eq!(metadata.head, 0);
536        assert_eq!(metadata.commit, PAGE_HEADER_SIZE + event_size);
537        assert_eq!(metadata.tail, PAGE_HEADER_SIZE + event_size);
538    }
539
540    #[fuchsia::test]
541    fn metadata_push_event_next_page() {
542        let mut metadata = TraceEventQueueMetadata::new();
543        // Set up pointers to be near the end of the page.
544        metadata.commit = *PAGE_SIZE - 1;
545        metadata.tail = *PAGE_SIZE - 1;
546
547        // Reserving space for an event should only move the tail pointer.
548        let event_size = 30;
549        let reserved_offset = metadata.reserve(event_size).expect("reserve failed");
550        assert_eq!(reserved_offset, *PAGE_SIZE + PAGE_HEADER_SIZE);
551        assert_eq!(metadata.head, 0);
552        assert_eq!(metadata.commit, *PAGE_SIZE - 1);
553        assert_eq!(metadata.tail, *PAGE_SIZE + PAGE_HEADER_SIZE + event_size);
554
555        // Committing an event should only move the commit pointer.
556        metadata.commit(event_size);
557        assert_eq!(metadata.head, 0);
558        assert_eq!(metadata.commit, *PAGE_SIZE + PAGE_HEADER_SIZE + event_size);
559        assert_eq!(metadata.tail, *PAGE_SIZE + PAGE_HEADER_SIZE + event_size);
560    }
561
562    #[fuchsia::test]
563    fn metadata_reserve_full() {
564        let mut metadata = TraceEventQueueMetadata::new();
565        metadata.commit = DEFAULT_RING_BUFFER_SIZE_BYTES;
566        metadata.tail = DEFAULT_RING_BUFFER_SIZE_BYTES;
567
568        // If not overwriting, reserve should fail.
569        metadata.overwrite = false;
570        assert_eq!(metadata.reserve(30), error!(ENOMEM));
571
572        // Otherwise, reserving should wrap around to the front of the ring buffer.
573        metadata.overwrite = true;
574        assert_eq!(metadata.reserve(30), Ok(PAGE_HEADER_SIZE));
575        assert_eq!(metadata.head_page_offset(), *PAGE_SIZE);
576        assert_eq!(metadata.dropped_pages, 1);
577    }
578
579    #[fuchsia::test]
580    fn metadata_read_simple() {
581        let mut metadata = TraceEventQueueMetadata::new();
582        metadata.is_readable = true;
583
584        assert_eq!(metadata.read(), Ok(0));
585        assert_eq!(metadata.head, *PAGE_SIZE);
586    }
587
588    #[fuchsia::test]
589    fn metadata_read_meets_commit() {
590        let mut metadata = TraceEventQueueMetadata::new();
591        metadata.is_readable = true;
592        metadata.commit = *PAGE_SIZE + PAGE_HEADER_SIZE + 30;
593
594        assert_eq!(metadata.read(), Ok(0));
595        assert_eq!(metadata.head, *PAGE_SIZE);
596        assert!(!metadata.is_readable);
597        assert_eq!(metadata.read(), error!(EAGAIN));
598    }
599
600    #[fuchsia::test]
601    fn read_empty_queue() {
602        let inspect_node = fuchsia_inspect::Node::default();
603        let queue = TraceEventQueue::new(&inspect_node).expect("create queue");
604        let mut buffer = VecOutputBuffer::new(*PAGE_SIZE as usize);
605        assert_eq!(queue.read(&mut buffer), error!(EAGAIN));
606    }
607
608    #[fuchsia::test]
609    fn enable_disable_queue() {
610        let inspect_node = fuchsia_inspect::Node::default();
611        let queue = TraceEventQueue::new(&inspect_node).expect("create queue");
612        assert_eq!(queue.ring_buffer.get_size(), 0);
613
614        // Enable tracing and check the queue's state.
615        assert!(queue.enable().is_ok());
616        assert_eq!(queue.ring_buffer.get_size(), DEFAULT_RING_BUFFER_SIZE_BYTES);
617
618        // Confirm we can push an event.
619        let data = b"B|1234|slice_name";
620        let event = TraceEvent::new(1234, data.len());
621        let event_size = event.size() as u64;
622        let result = queue.push_event(event, data);
623        assert!(result.is_ok());
624        assert!(result.ok().expect("delta").into_nanos() > 0);
625        assert_eq!(queue.metadata.lock().commit, PAGE_HEADER_SIZE + event_size);
626
627        // Disable tracing and check that the queue's state has been reset.
628        assert!(queue.disable().is_ok());
629        assert_eq!(queue.ring_buffer.get_size(), 0);
630        assert_eq!(queue.metadata.lock().commit, PAGE_HEADER_SIZE);
631    }
632
633    #[fuchsia::test]
634    fn create_trace_event() {
635        // Create an event.
636        let event = TraceEvent::new(1234, b"B|1234|slice_name".len());
637        let event_size = event.size();
638        assert_eq!(event_size, 42);
639    }
640
641    // This can be removed when we support reading incomplete pages.
642    #[fuchsia::test]
643    fn single_trace_event_fails_read() {
644        let inspect_node = fuchsia_inspect::Node::default();
645        let queue = TraceEventQueue::new(&inspect_node).expect("create queue");
646        queue.enable().expect("enable queue");
647        // Create an event.
648        let data = b"B|1234|slice_name";
649        let event = TraceEvent::new(1234, data.len());
650
651        // Push the event into the queue.
652        let result = queue.push_event(event, data);
653        assert!(result.is_ok());
654        assert!(result.ok().expect("delta").into_nanos() > 0);
655
656        let mut buffer = VecOutputBuffer::new(*PAGE_SIZE as usize);
657        assert_eq!(queue.read(&mut buffer), error!(EAGAIN));
658    }
659
660    #[fuchsia::test]
661    fn page_overflow() {
662        let inspect_node = fuchsia_inspect::Node::default();
663        let queue = TraceEventQueue::new(&inspect_node).expect("create queue");
664        queue.enable().expect("enable queue");
665        let queue_start_timestamp = queue.prev_timestamp();
666        let pid = 1234;
667        let data = b"B|1234|loooooooooooooooooooooooooooooooooooooooooooooooooooooooooo\
668        ooooooooooooooooooooooooooooooooooooooooooooooooooooooooongevent";
669        let expected_event = TraceEvent::new(pid, data.len());
670        assert_eq!(expected_event.size(), 155);
671
672        // Push the event into the queue.
673        for _ in 0..27 {
674            let event = TraceEvent::new(pid, data.len());
675            let result = queue.push_event(event, data);
676            assert!(result.is_ok());
677            assert!(result.ok().expect("delta").into_nanos() > 0);
678        }
679
680        // Read a page of data.
681        let mut buffer = VecOutputBuffer::new(*PAGE_SIZE as usize);
682        assert_eq!(queue.read(&mut buffer), Ok(*PAGE_SIZE as usize));
683        assert_eq!(buffer.bytes_written() as u64, *PAGE_SIZE);
684
685        let mut expected_page_header: Vec<u8> = vec![];
686        expected_page_header
687            .extend_from_slice(&(queue_start_timestamp.into_nanos() as u64).to_le_bytes());
688        expected_page_header.extend_from_slice(&(expected_event.size() * 26).to_le_bytes());
689
690        assert!(buffer.data().starts_with(&expected_page_header));
691
692        // Try reading another page.
693        let mut buffer = VecOutputBuffer::new(*PAGE_SIZE as usize);
694        assert_eq!(queue.read(&mut buffer), error!(EAGAIN));
695    }
696}