diagnostics_message/
lib.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5use crate::error::MessageError;
6use byteorder::{ByteOrder, LittleEndian};
7use diagnostics_data::{
8    BuilderArgs, ExtendedMoniker, LogsData, LogsDataBuilder, LogsField, LogsProperty, Severity,
9};
10use diagnostics_log_encoding::{Argument, Record, Value};
11use flyweights::FlyStr;
12use libc::{c_char, c_int};
13use moniker::Moniker;
14use std::{mem, str};
15
16#[cfg(fuchsia_api_level_at_least = "HEAD")]
17use fidl_fuchsia_diagnostics as fdiagnostics;
18
19mod constants;
20pub mod error;
21pub mod ffi;
22pub use constants::*;
23
24#[cfg(test)]
25mod test;
26
27#[derive(Clone)]
28pub struct MonikerWithUrl {
29    pub moniker: ExtendedMoniker,
30    pub url: FlyStr,
31}
32
33/// Transforms the given legacy log message (already parsed) into a `LogsData` containing the
34/// given identity information.
35pub fn from_logger(source: MonikerWithUrl, msg: LoggerMessage) -> LogsData {
36    let (raw_severity, severity) = Severity::parse_exact(msg.raw_severity);
37    let mut builder = LogsDataBuilder::new(BuilderArgs {
38        timestamp: msg.timestamp,
39        component_url: Some(source.url),
40        moniker: source.moniker,
41        severity,
42    })
43    .set_pid(msg.pid)
44    .set_tid(msg.tid)
45    .set_dropped(msg.dropped_logs)
46    .set_message(msg.message);
47    if let Some(raw_severity) = raw_severity {
48        builder = builder.set_raw_severity(raw_severity);
49    }
50    for tag in &msg.tags {
51        builder = builder.add_tag(tag.as_ref());
52    }
53    builder.build()
54}
55
56struct ExtendedMetadata {
57    moniker: ExtendedMoniker,
58    url: FlyStr,
59    rolled_out_logs: u64,
60}
61
62#[cfg(fuchsia_api_level_less_than = "HEAD")]
63fn parse_archivist_args<'a>(
64    builder: LogsDataBuilder,
65    _input: &'a Record<'a>,
66) -> Result<(LogsDataBuilder, usize), MessageError> {
67    Ok((builder, 0))
68}
69
70#[cfg(fuchsia_api_level_at_least = "HEAD")]
71fn parse_archivist_args<'a>(
72    mut builder: LogsDataBuilder,
73    input: &'a Record<'a>,
74) -> Result<(LogsDataBuilder, usize), MessageError> {
75    let mut archivist_argument_count = 0;
76    for argument in input.arguments.iter().rev() {
77        // If Archivist records are expected, they should always be at the end.
78        // If we see a non-archivist record, we can stop looking.
79        match argument {
80            Argument::Other { name, value } => {
81                if name == fdiagnostics::COMPONENT_URL_ARG_NAME {
82                    if let Value::Text(url) = value {
83                        builder = builder.set_url(Some(FlyStr::new(url.as_ref())));
84                        archivist_argument_count += 1;
85                        continue;
86                    }
87                } else if name == fdiagnostics::MONIKER_ARG_NAME {
88                    if let Value::Text(moniker) = value {
89                        builder = builder.set_moniker(ExtendedMoniker::parse_str(moniker)?);
90                        archivist_argument_count += 1;
91                        continue;
92                    }
93                } else if name == fdiagnostics::ROLLED_OUT_ARG_NAME {
94                    if let Value::UnsignedInt(count) = value {
95                        builder = builder.set_rolled_out(*count);
96                        archivist_argument_count += 1;
97                        continue;
98                    }
99                }
100            }
101            _ => break,
102        }
103    }
104    Ok((builder, archivist_argument_count))
105}
106
107fn parse_logs_data<'a>(
108    input: &'a Record<'a>,
109    source: Option<ExtendedMetadata>,
110) -> Result<LogsData, MessageError> {
111    let (raw_severity, severity) = Severity::parse_exact(input.severity);
112    let has_attribution = source.is_some();
113
114    let (maybe_moniker, maybe_url, maybe_rolled_out) = source
115        .map(|value| (Some(value.moniker), Some(value.url), Some(value.rolled_out_logs)))
116        .unwrap_or((None, None, None));
117
118    let mut builder = LogsDataBuilder::new(BuilderArgs {
119        component_url: maybe_url,
120        moniker: maybe_moniker.unwrap_or(ExtendedMoniker::ComponentInstance(
121            Moniker::parse_str("placeholder").unwrap(),
122        )),
123        severity,
124        timestamp: input.timestamp,
125    });
126
127    if let Some(rolled_out) = maybe_rolled_out {
128        if rolled_out > 0 {
129            builder = builder.set_rolled_out(rolled_out);
130        }
131    }
132
133    if let Some(raw_severity) = raw_severity {
134        builder = builder.set_raw_severity(raw_severity);
135    }
136    let archivist_argument_count = if has_attribution {
137        0
138    } else {
139        let (new_builder, count) = parse_archivist_args(builder, input)?;
140        builder = new_builder;
141        count
142    };
143
144    for argument in input.arguments.iter().take(input.arguments.len() - archivist_argument_count) {
145        match argument {
146            Argument::Tag(tag) => {
147                builder = builder.add_tag(tag.as_ref());
148            }
149            Argument::Pid(pid) => {
150                builder = builder.set_pid(pid.raw_koid());
151            }
152            Argument::Tid(tid) => {
153                builder = builder.set_tid(tid.raw_koid());
154            }
155            Argument::Dropped(dropped) => {
156                builder = builder.set_dropped(*dropped);
157            }
158            Argument::File(file) => {
159                builder = builder.set_file(file.as_ref());
160            }
161            Argument::Line(line) => {
162                builder = builder.set_line(*line);
163            }
164            Argument::Message(msg) => {
165                builder = builder.set_message(msg.as_ref());
166            }
167            Argument::Other { value, name } => {
168                let name = LogsField::Other(name.to_string());
169                builder = builder.add_key(match value {
170                    Value::SignedInt(v) => LogsProperty::Int(name, *v),
171                    Value::UnsignedInt(v) => LogsProperty::Uint(name, *v),
172                    Value::Floating(v) => LogsProperty::Double(name, *v),
173                    Value::Text(v) => LogsProperty::String(name, v.to_string()),
174                    Value::Boolean(v) => LogsProperty::Bool(name, *v),
175                })
176            }
177        }
178    }
179
180    Ok(builder.build())
181}
182
183/// Constructs a `LogsData` from the provided bytes, assuming the bytes
184/// are a a single FXT log record with a potentially extended metadata section.
185/// [log encoding] https://fuchsia.dev/fuchsia-src/reference/platform-spec/diagnostics/logs-encoding
186pub fn from_extended_record(bytes: &[u8]) -> Result<(LogsData, &[u8]), MessageError> {
187    let (input, remaining) = diagnostics_log_encoding::parse::parse_record(bytes)?;
188    let (source, new_remaining) = if remaining.len() >= 16 {
189        let moniker_len = u32::from_le_bytes(remaining[0..4].try_into().unwrap()) as usize;
190        let component_url_len = u32::from_le_bytes(remaining[4..8].try_into().unwrap()) as usize;
191        let rolled_out_logs = u64::from_le_bytes(remaining[8..16].try_into().unwrap());
192        let mut offset = 16;
193        let moniker = str::from_utf8(&remaining[offset..offset + moniker_len])?;
194        let moniker_padded_len = (moniker_len + 7) & !7;
195        offset += moniker_padded_len;
196        let url = str::from_utf8(&remaining[offset..offset + component_url_len])?;
197        let component_url_padded_len = (component_url_len + 7) & !7;
198        offset += component_url_padded_len;
199        (
200            Some(ExtendedMetadata {
201                moniker: ExtendedMoniker::parse_str(moniker)?,
202                url: FlyStr::new(url),
203                rolled_out_logs,
204            }),
205            &remaining[offset..],
206        )
207    } else {
208        (None, remaining)
209    };
210    let record = parse_logs_data(&input, source)?;
211    Ok((record, new_remaining))
212}
213
214/// Constructs a `LogsData` from the provided bytes, assuming the bytes
215/// are in the format specified as in the [log encoding].
216///
217/// [log encoding] https://fuchsia.dev/fuchsia-src/development/logs/encodings
218pub fn from_structured(source: MonikerWithUrl, bytes: &[u8]) -> Result<LogsData, MessageError> {
219    let (input, _remaining) = diagnostics_log_encoding::parse::parse_record(bytes)?;
220    let record = parse_logs_data(
221        &input,
222        Some(ExtendedMetadata { moniker: source.moniker, url: source.url, rolled_out_logs: 0 }),
223    )?;
224    Ok(record)
225}
226
227#[derive(Clone, Debug, Eq, PartialEq)]
228pub struct LoggerMessage {
229    pub timestamp: zx::BootInstant,
230    pub raw_severity: u8,
231    pub pid: u64,
232    pub tid: u64,
233    pub size_bytes: usize,
234    pub dropped_logs: u64,
235    pub message: Box<str>,
236    pub tags: Vec<Box<str>>,
237}
238
239/// Parse the provided buffer as if it implements the [logger/syslog wire format].
240///
241/// Note that this is distinct from the parsing we perform for the debuglog log, which also
242/// takes a `&[u8]` and is why we don't implement this as `TryFrom`.
243///
244/// [logger/syslog wire format]: https://fuchsia.googlesource.com/fuchsia/+/HEAD/zircon/system/ulib/syslog/include/lib/syslog/wire_format.h
245impl TryFrom<&[u8]> for LoggerMessage {
246    type Error = MessageError;
247
248    fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
249        if bytes.len() < MIN_PACKET_SIZE {
250            return Err(MessageError::ShortRead { len: bytes.len() });
251        }
252
253        let terminator = bytes[bytes.len() - 1];
254        if terminator != 0 {
255            return Err(MessageError::NotNullTerminated { terminator });
256        }
257
258        let pid = LittleEndian::read_u64(&bytes[..8]);
259        let tid = LittleEndian::read_u64(&bytes[8..16]);
260        let timestamp = zx::BootInstant::from_nanos(LittleEndian::read_i64(&bytes[16..24]));
261
262        let raw_severity = LittleEndian::read_i32(&bytes[24..28]);
263        let raw_severity = if raw_severity > (u8::MAX as i32) {
264            u8::MAX
265        } else if raw_severity < 0 {
266            0
267        } else {
268            u8::try_from(raw_severity).unwrap()
269        };
270        let dropped_logs = LittleEndian::read_u32(&bytes[28..METADATA_SIZE]) as u64;
271
272        // start reading tags after the header
273        let mut cursor = METADATA_SIZE;
274        let mut tag_len = bytes[cursor] as usize;
275        let mut tags = Vec::new();
276        while tag_len != 0 {
277            if tags.len() == MAX_TAGS {
278                return Err(MessageError::TooManyTags);
279            }
280
281            if tag_len > MAX_TAG_LEN - 1 {
282                return Err(MessageError::TagTooLong { index: tags.len(), len: tag_len });
283            }
284
285            if (cursor + tag_len + 1) > bytes.len() {
286                return Err(MessageError::OutOfBounds);
287            }
288
289            let tag_start = cursor + 1;
290            let tag_end = tag_start + tag_len;
291            let tag = String::from_utf8_lossy(&bytes[tag_start..tag_end]);
292            tags.push(tag.into());
293
294            cursor = tag_end;
295            tag_len = bytes[cursor] as usize;
296        }
297
298        let msg_start = cursor + 1;
299        let mut msg_end = cursor + 1;
300        while msg_end < bytes.len() {
301            if bytes[msg_end] > 0 {
302                msg_end += 1;
303                continue;
304            }
305            let message = String::from_utf8_lossy(&bytes[msg_start..msg_end]).into_owned();
306            let message_len = message.len();
307            let result = LoggerMessage {
308                timestamp,
309                raw_severity,
310                message: message.into_boxed_str(),
311                pid,
312                tid,
313                dropped_logs,
314                tags,
315                size_bytes: cursor + message_len + 1,
316            };
317            return Ok(result);
318        }
319
320        Err(MessageError::OutOfBounds)
321    }
322}
323
324#[allow(non_camel_case_types)]
325pub type fx_log_severity_t = c_int;
326
327#[repr(C)]
328#[derive(Debug, Copy, Clone, Default, Eq, PartialEq)]
329pub struct fx_log_metadata_t {
330    pub pid: zx::sys::zx_koid_t,
331    pub tid: zx::sys::zx_koid_t,
332    pub time: zx::sys::zx_time_t,
333    pub severity: fx_log_severity_t,
334    pub dropped_logs: u32,
335}
336
337#[repr(C)]
338#[derive(Clone)]
339pub struct fx_log_packet_t {
340    pub metadata: fx_log_metadata_t,
341    // Contains concatenated tags and message and a null terminating character at
342    // the end.
343    // char(tag_len) + "tag1" + char(tag_len) + "tag2\0msg\0"
344    pub data: [c_char; MAX_DATAGRAM_LEN - METADATA_SIZE],
345}
346
347impl Default for fx_log_packet_t {
348    fn default() -> fx_log_packet_t {
349        fx_log_packet_t {
350            data: [0; MAX_DATAGRAM_LEN - METADATA_SIZE],
351            metadata: Default::default(),
352        }
353    }
354}
355
356impl fx_log_packet_t {
357    /// This struct has no padding bytes, but we can't use zerocopy because it needs const
358    /// generics to support arrays this large.
359    pub fn as_bytes(&self) -> &[u8] {
360        unsafe {
361            std::slice::from_raw_parts(
362                (self as *const Self) as *const u8,
363                mem::size_of::<fx_log_packet_t>(),
364            )
365        }
366    }
367
368    /// Fills data with a single value for defined region.
369    pub fn fill_data(&mut self, region: std::ops::Range<usize>, with: c_char) {
370        self.data[region].iter_mut().for_each(|c| *c = with);
371    }
372
373    /// Copies bytes to data at specifies offset.
374    pub fn add_data<T: std::convert::TryInto<c_char> + Copy>(&mut self, offset: usize, bytes: &[T])
375    where
376        <T as std::convert::TryInto<c_char>>::Error: std::fmt::Debug,
377    {
378        self.data[offset..(offset + bytes.len())]
379            .iter_mut()
380            .enumerate()
381            .for_each(|(i, x)| *x = bytes[i].try_into().unwrap());
382    }
383}