1use crate::args::{Arg, RawArg};
6use crate::error::ParseWarning;
7use crate::init::Ticks;
8use crate::session::ResolveCtx;
9use crate::thread::{ProcessKoid, ProcessRef, ThreadKoid, ThreadRef};
10use crate::{trace_header, ParseError, ParseResult, Provider, SCHEDULING_RECORD_TYPE};
11use nom::combinator::all_consuming;
12use nom::number::complete::le_u64;
13use nom::Parser;
14use std::num::NonZero;
15
16const LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE: u8 = 0;
17const CONTEXT_SWITCH_SCHEDULING_TYPE: u8 = 1;
18const THREAD_WAKEUP_SCHEDULING_TYPE: u8 = 2;
19
20#[derive(Clone, Debug, PartialEq)]
21pub enum SchedulingRecord {
22 ContextSwitch(ContextSwitchEvent),
23 ThreadWakeup(ThreadWakeupEvent),
24 LegacyContextSwitch(LegacyContextSwitchEvent),
25}
26
27impl SchedulingRecord {
28 pub fn process(&self) -> Option<ProcessKoid> {
30 match self {
31 Self::LegacyContextSwitch(LegacyContextSwitchEvent { incoming_process, .. }) => {
32 Some(*incoming_process)
33 }
34 Self::ContextSwitch(..) | Self::ThreadWakeup(..) => None,
35 }
36 }
37
38 pub fn thread(&self) -> ThreadKoid {
40 match self {
41 Self::LegacyContextSwitch(LegacyContextSwitchEvent { incoming_thread, .. }) => {
42 *incoming_thread
43 }
44 Self::ContextSwitch(ContextSwitchEvent { incoming_thread_id, .. }) => {
45 *incoming_thread_id
46 }
47 Self::ThreadWakeup(ThreadWakeupEvent { waking_thread_id, .. }) => *waking_thread_id,
48 }
49 }
50
51 pub(super) fn resolve(ctx: &mut ResolveCtx, raw: RawSchedulingRecord<'_>) -> Option<Self> {
52 match raw {
53 RawSchedulingRecord::ContextSwitch(c) => {
54 Some(Self::ContextSwitch(ContextSwitchEvent::resolve(ctx, c)))
55 }
56 RawSchedulingRecord::ThreadWakeup(t) => {
57 Some(Self::ThreadWakeup(ThreadWakeupEvent::resolve(ctx, t)))
58 }
59 RawSchedulingRecord::LegacyContextSwitch(c) => {
60 Some(Self::LegacyContextSwitch(LegacyContextSwitchEvent::resolve(ctx, c)))
61 }
62 RawSchedulingRecord::Unknown { raw_type, .. } => {
63 ctx.add_warning(ParseWarning::UnknownSchedulingRecordType(raw_type));
64 None
65 }
66 }
67 }
68}
69
70#[derive(Debug, PartialEq)]
71pub(super) enum RawSchedulingRecord<'a> {
72 ContextSwitch(RawContextSwitchEvent<'a>),
73 ThreadWakeup(RawThreadWakeupEvent<'a>),
74 LegacyContextSwitch(RawLegacyContextSwitchEvent),
75 Unknown { raw_type: u8, bytes: &'a [u8] },
76}
77
78trace_header! {
79 SchedulingHeader (SCHEDULING_RECORD_TYPE) {
80 u8, record_type: 60, 63;
81 }
82}
83
84impl<'a> RawSchedulingRecord<'a> {
85 pub(super) fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
86 use nom::combinator::map;
87 let base_header = SchedulingHeader::parse(buf)?.1;
88 match base_header.record_type() {
89 LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE => {
90 map(RawLegacyContextSwitchEvent::parse, Self::LegacyContextSwitch).parse(buf)
91 }
92 CONTEXT_SWITCH_SCHEDULING_TYPE => {
93 map(RawContextSwitchEvent::parse, Self::ContextSwitch).parse(buf)
94 }
95 THREAD_WAKEUP_SCHEDULING_TYPE => {
96 map(RawThreadWakeupEvent::parse, Self::ThreadWakeup).parse(buf)
97 }
98 unknown => {
99 let size_bytes = base_header.size_words() as usize * 8;
100 if size_bytes <= buf.len() {
101 let (unknown_record, rem) = buf.split_at(size_bytes);
102 Ok((rem, Self::Unknown { raw_type: unknown, bytes: unknown_record }))
103 } else {
104 Err(nom::Err::Incomplete(nom::Needed::Size(
105 NonZero::new(size_bytes - buf.len()).unwrap(),
106 )))
107 }
108 }
109 }
110 }
111}
112
113#[derive(Clone, Debug, PartialEq)]
114pub struct ContextSwitchEvent {
115 pub provider: Option<Provider>,
116 pub cpu_id: u16,
117 pub timestamp: i64,
118 pub outgoing_thread_state: ThreadState,
119 pub outgoing_thread_id: ThreadKoid,
120 pub incoming_thread_id: ThreadKoid,
121 pub args: Vec<Arg>,
122}
123
124impl ContextSwitchEvent {
125 fn resolve(ctx: &mut ResolveCtx, raw: RawContextSwitchEvent<'_>) -> Self {
126 Self {
127 provider: ctx.current_provider(),
128 cpu_id: raw.cpu_id,
129 timestamp: ctx.resolve_ticks(raw.ticks),
130 outgoing_thread_state: raw.outgoing_thread_state,
131 outgoing_thread_id: ThreadKoid(raw.outgoing_thread_id),
132 incoming_thread_id: ThreadKoid(raw.incoming_thread_id),
133 args: Arg::resolve_n(ctx, raw.args),
134 }
135 }
136}
137
138#[derive(Debug, PartialEq)]
139pub(super) struct RawContextSwitchEvent<'a> {
140 cpu_id: u16,
141 ticks: Ticks,
142 outgoing_thread_state: ThreadState,
143 outgoing_thread_id: u64,
144 incoming_thread_id: u64,
145 args: Vec<RawArg<'a>>,
146}
147
148impl<'a> RawContextSwitchEvent<'a> {
149 fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
150 let (buf, header) = ContextSwitchHeader::parse(buf)?;
151 if header.record_type() != CONTEXT_SWITCH_SCHEDULING_TYPE {
152 return Err(nom::Err::Error(ParseError::WrongType {
153 observed: header.record_type(),
154 expected: CONTEXT_SWITCH_SCHEDULING_TYPE,
155 context: "ContextSwitchEvent",
156 }));
157 }
158 let (rem, payload) = header.take_payload(buf)?;
159 let (payload, ticks) = Ticks::parse(payload)?;
160 let (payload, outgoing_thread_id) = le_u64(payload)?;
161 let (payload, incoming_thread_id) = le_u64(payload)?;
162 let (empty, args) =
163 all_consuming(|p| RawArg::parse_n(header.num_args(), p)).parse(payload)?;
164 assert!(empty.is_empty(), "all_consuming must not return any remaining buffer");
165 Ok((
166 rem,
167 Self {
168 cpu_id: header.cpu_id(),
169 outgoing_thread_state: ThreadState::parse(header.outgoing_thread_state()),
170 ticks,
171 outgoing_thread_id,
172 incoming_thread_id,
173 args,
174 },
175 ))
176 }
177}
178
179trace_header! {
180 ContextSwitchHeader (SCHEDULING_RECORD_TYPE) {
181 u8, num_args: 16, 19;
182 u16, cpu_id: 20, 35;
183 u8, outgoing_thread_state: 36, 39;
184 u8, record_type: 60, 63;
185 }
186}
187
188#[derive(Clone, Debug, PartialEq)]
189pub struct ThreadWakeupEvent {
190 pub provider: Option<Provider>,
191 pub timestamp: i64,
192 pub cpu_id: u16,
193 pub waking_thread_id: ThreadKoid,
194 pub args: Vec<Arg>,
195}
196
197impl ThreadWakeupEvent {
198 fn resolve(ctx: &mut ResolveCtx, raw: RawThreadWakeupEvent<'_>) -> Self {
199 Self {
200 provider: ctx.current_provider(),
201 timestamp: ctx.resolve_ticks(raw.ticks),
202 cpu_id: raw.cpu_id,
203 waking_thread_id: ThreadKoid(raw.waking_thread_id),
204 args: Arg::resolve_n(ctx, raw.args),
205 }
206 }
207}
208
209#[derive(Debug, PartialEq)]
210pub(super) struct RawThreadWakeupEvent<'a> {
211 ticks: Ticks,
212 cpu_id: u16,
213 waking_thread_id: u64,
214 args: Vec<RawArg<'a>>,
215}
216
217impl<'a> RawThreadWakeupEvent<'a> {
218 fn parse(buf: &'a [u8]) -> ParseResult<'a, Self> {
219 let (buf, header) = ThreadWakeupHeader::parse(buf)?;
220 if header.record_type() != THREAD_WAKEUP_SCHEDULING_TYPE {
221 return Err(nom::Err::Error(ParseError::WrongType {
222 observed: header.record_type(),
223 expected: THREAD_WAKEUP_SCHEDULING_TYPE,
224 context: "ThreadWakeupEvent",
225 }));
226 }
227 let (rem, payload) = header.take_payload(buf)?;
228 let (payload, ticks) = Ticks::parse(payload)?;
229 let (payload, waking_thread_id) = le_u64(payload)?;
230 let (empty, args) =
231 all_consuming(|p| RawArg::parse_n(header.num_args(), p)).parse(payload)?;
232 assert!(empty.is_empty(), "all_consuming must not return any remaining buffer");
233 Ok((rem, Self { ticks, cpu_id: header.cpu_id(), waking_thread_id, args }))
234 }
235}
236
237trace_header! {
238 ThreadWakeupHeader (SCHEDULING_RECORD_TYPE) {
239 u8, num_args: 16, 19;
240 u16, cpu_id: 20, 35;
241 u8, record_type: 60, 63;
242 }
243}
244
245#[derive(Clone, Debug, PartialEq)]
246pub struct LegacyContextSwitchEvent {
247 pub provider: Option<Provider>,
248 pub timestamp: i64,
249 pub cpu_id: u16,
250 pub outgoing_thread_state: ThreadState,
251 pub outgoing_process: ProcessKoid,
252 pub outgoing_thread: ThreadKoid,
253 pub outgoing_thread_priority: u8,
254 pub incoming_process: ProcessKoid,
255 pub incoming_thread: ThreadKoid,
256 pub incoming_thread_priority: u8,
257}
258
259impl LegacyContextSwitchEvent {
260 fn resolve(ctx: &mut ResolveCtx, raw: RawLegacyContextSwitchEvent) -> Self {
261 Self {
262 provider: ctx.current_provider(),
263 timestamp: ctx.resolve_ticks(raw.ticks),
264 cpu_id: raw.cpu_id,
265 outgoing_thread_state: raw.outgoing_thread_state,
266 outgoing_process: ctx.resolve_process(raw.outgoing_process),
267 outgoing_thread: ctx.resolve_thread(raw.outgoing_thread),
268 outgoing_thread_priority: raw.outgoing_thread_priority,
269 incoming_process: ctx.resolve_process(raw.incoming_process),
270 incoming_thread: ctx.resolve_thread(raw.incoming_thread),
271 incoming_thread_priority: raw.incoming_thread_priority,
272 }
273 }
274}
275
276#[derive(Debug, PartialEq)]
277pub(super) struct RawLegacyContextSwitchEvent {
278 ticks: Ticks,
279 cpu_id: u16,
280 outgoing_thread_state: ThreadState,
281 outgoing_process: ProcessRef,
282 outgoing_thread: ThreadRef,
283 outgoing_thread_priority: u8,
284 incoming_process: ProcessRef,
285 incoming_thread: ThreadRef,
286 incoming_thread_priority: u8,
287}
288
289impl RawLegacyContextSwitchEvent {
290 fn parse(buf: &[u8]) -> ParseResult<'_, Self> {
291 let (buf, header) = LegacyContextSwitchHeader::parse(buf)?;
292 if header.record_type() != LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE {
293 return Err(nom::Err::Error(ParseError::WrongType {
294 observed: header.record_type(),
295 expected: LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE,
296 context: "LegacyContextSwitchEvent",
297 }));
298 }
299 let outgoing_thread_state = ThreadState::parse(header.outgoing_thread_state());
300 let (rem, payload) = header.take_payload(buf)?;
301 let (payload, ticks) = Ticks::parse(payload)?;
302 let (payload, outgoing_process) = ProcessRef::parse(header.outgoing_thread(), payload)?;
303 let (payload, outgoing_thread) = ThreadRef::parse(header.outgoing_thread(), payload)?;
304 let (payload, incoming_process) = ProcessRef::parse(header.incoming_thread(), payload)?;
305 let (empty, incoming_thread) =
306 all_consuming(|p| ThreadRef::parse(header.incoming_thread(), p)).parse(payload)?;
307 assert!(empty.is_empty(), "all_consuming must not return any remaining buffer");
308
309 Ok((
310 rem,
311 Self {
312 ticks,
313 cpu_id: header.cpu_id(),
314 outgoing_thread_priority: header.outgoing_thread_priority(),
315 incoming_thread_priority: header.incoming_thread_priority(),
316 outgoing_thread_state,
317 outgoing_process,
318 outgoing_thread,
319 incoming_process,
320 incoming_thread,
321 },
322 ))
323 }
324}
325
326trace_header! {
327 LegacyContextSwitchHeader (SCHEDULING_RECORD_TYPE) {
328 u16, cpu_id: 16, 23;
329 u8, outgoing_thread_state: 24, 27;
330 u8, outgoing_thread: 28, 35;
331 u8, incoming_thread: 36, 43;
332 u8, outgoing_thread_priority: 44, 51;
333 u8, incoming_thread_priority: 52, 59;
334 u8, record_type: 60, 63;
335 }
336}
337
338#[derive(Clone, Copy, Debug, PartialEq)]
339pub enum ThreadState {
340 New,
341 Running,
342 Suspended,
343 Blocked,
344 Dying,
345 Dead,
346 Unknown(u8),
347}
348
349impl ThreadState {
350 fn parse(raw: u8) -> Self {
351 match raw {
352 0 => Self::New,
353 1 => Self::Running,
354 2 => Self::Suspended,
355 3 => Self::Blocked,
356 4 => Self::Dying,
357 5 => Self::Dead,
358 unknown => Self::Unknown(unknown),
359 }
360 }
361}
362
363#[cfg(test)]
364mod tests {
365 use super::*;
366 use crate::args::{I32Header, RawArgValue};
367 use crate::fxt_builder::FxtBuilder;
368 use crate::string::{StringRef, STRING_REF_INLINE_BIT};
369 use crate::RawTraceRecord;
370
371 #[test]
372 fn context_switch_event() {
373 let mut header = ContextSwitchHeader::empty();
374 header.set_record_type(CONTEXT_SWITCH_SCHEDULING_TYPE);
375 header.set_num_args(2);
376 header.set_cpu_id(6);
377 header.set_outgoing_thread_state(4);
378
379 let first_arg_name = "incoming_weight";
380 let mut first_arg_header = I32Header::empty();
381 first_arg_header.set_name_ref(first_arg_name.len() as u16 | STRING_REF_INLINE_BIT);
382 first_arg_header.set_value(12);
383
384 let second_arg_name = "outgoing_weight";
385 let mut second_arg_header = I32Header::empty();
386 second_arg_header.set_name_ref(second_arg_name.len() as u16 | STRING_REF_INLINE_BIT);
387 second_arg_header.set_value(14);
388
389 assert_parses_to_record!(
390 FxtBuilder::new(header)
391 .atom(1024u64.to_le_bytes())
392 .atom(5u64.to_le_bytes())
393 .atom(8u64.to_le_bytes())
394 .atom(FxtBuilder::new(first_arg_header).atom(first_arg_name).build())
395 .atom(FxtBuilder::new(second_arg_header).atom(second_arg_name).build())
396 .build(),
397 RawTraceRecord::Scheduling(RawSchedulingRecord::ContextSwitch(RawContextSwitchEvent {
398 cpu_id: 6,
399 ticks: Ticks(1024),
400 outgoing_thread_state: ThreadState::Dying,
401 outgoing_thread_id: 5,
402 incoming_thread_id: 8,
403 args: vec![
404 RawArg {
405 name: StringRef::Inline(first_arg_name),
406 value: RawArgValue::Signed32(12),
407 },
408 RawArg {
409 name: StringRef::Inline(second_arg_name),
410 value: RawArgValue::Signed32(14),
411 },
412 ],
413 })),
414 );
415 }
416
417 #[test]
418 fn thread_wakeup_event() {
419 let mut header = ThreadWakeupHeader::empty();
420 header.set_record_type(THREAD_WAKEUP_SCHEDULING_TYPE);
421 header.set_cpu_id(6);
422 header.set_num_args(1);
423
424 let arg_name = "weight";
425 let mut arg_header = I32Header::empty();
426 arg_header.set_name_ref(arg_name.len() as u16 | STRING_REF_INLINE_BIT);
427 arg_header.set_value(12);
428
429 assert_parses_to_record!(
430 FxtBuilder::new(header)
431 .atom(1024u64.to_le_bytes())
432 .atom(5u64.to_le_bytes())
433 .atom(FxtBuilder::new(arg_header).atom(arg_name).build())
434 .build(),
435 RawTraceRecord::Scheduling(RawSchedulingRecord::ThreadWakeup(RawThreadWakeupEvent {
436 cpu_id: 6,
437 ticks: Ticks(1024),
438 waking_thread_id: 5,
439 args: vec![RawArg {
440 name: StringRef::Inline(arg_name),
441 value: RawArgValue::Signed32(12),
442 }]
443 })),
444 );
445 }
446
447 #[test]
448 fn legacy_context_switch_event() {
449 let mut header = LegacyContextSwitchHeader::empty();
450 header.set_record_type(LEGACY_CONTEXT_SWITCH_SCHEDULING_TYPE);
451 header.set_cpu_id(6);
452 header.set_outgoing_thread_state(2);
453 header.set_outgoing_thread_priority(10);
454 header.set_incoming_thread_priority(11);
455
456 assert_parses_to_record!(
457 FxtBuilder::new(header)
458 .atom(1024u64.to_le_bytes()) .atom(25u64.to_le_bytes()) .atom(26u64.to_le_bytes()) .atom(100u64.to_le_bytes()) .atom(101u64.to_le_bytes()) .build(),
464 RawTraceRecord::Scheduling(RawSchedulingRecord::LegacyContextSwitch(
465 RawLegacyContextSwitchEvent {
466 ticks: Ticks(1024),
467 cpu_id: 6,
468 outgoing_thread_state: ThreadState::Suspended,
469 outgoing_process: ProcessRef::Inline(ProcessKoid(25)),
470 outgoing_thread: ThreadRef::Inline(ThreadKoid(26)),
471 outgoing_thread_priority: 10,
472 incoming_process: ProcessRef::Inline(ProcessKoid(100)),
473 incoming_thread: ThreadRef::Inline(ThreadKoid(101)),
474 incoming_thread_priority: 11,
475 }
476 ))
477 );
478 }
479}