1// Copyright 2020 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
45use anyhow::{anyhow, format_err, Error};
6use fuchsia_component::client::connect_to_protocol;
7use futures::channel::mpsc;
8use futures::stream::StreamExt;
9use injectable_time::TimeSource;
10use log::{error, warn};
11use std::cell::RefCell;
12use {fidl_fuchsia_feedback as fidl_feedback, fuchsia_async as fasync};
1314// Name of the crash-report product we're filing against.
15const CRASH_PRODUCT_NAME: &str = "FuchsiaDetect";
1617// CRASH_PROGRAM_NAME serves two purposes:
18// 1) It is sent with the crash report. It may show up on the server as
19// "process type".
20// 2) The on-device crash reporting program associates this string with the
21// "product" CRASH_PRODUCT_NAME we're requesting to file against, so we
22// only have to send the program name and not the product name with each
23// crash report request.
24// This association is registered via a call to
25// CrashReportingProductRegister.upsert_with_ack().
26const CRASH_PROGRAM_NAME: &str = "triage_detect";
2728#[derive(Debug)]
29pub struct SnapshotRequest {
30 signature: String,
31}
3233impl SnapshotRequest {
34pub fn new(signature: String) -> SnapshotRequest {
35 SnapshotRequest { signature }
36 }
37}
3839/// The maximum number of pending crash report requests. This is needed because the FIDL API to file
40/// a crash report does not return until the crash report has been fully generated, which can take
41/// many seconds. Supporting pending crash reports means Detect can file
42/// a new crash report for any other reason within that window, but the CrashReportHandler will
43/// handle rate limiting to the CrashReporter service.
44const MAX_PENDING_CRASH_REPORTS: usize = 10;
4546/// A builder for constructing the CrashReportHandler node.
47pub struct CrashReportHandlerBuilder<T: TimeSource> {
48 proxy: Option<fidl_feedback::CrashReporterProxy>,
49 max_pending_crash_reports: usize,
50 time_source: T,
51}
5253/// Logs an error message if the passed in `result` is an error.
54#[macro_export]
55macro_rules! log_if_err {
56 ($result:expr, $log_prefix:expr) => {
57if let Err(e) = $result.as_ref() {
58log::error!("{}: {}", $log_prefix, e);
59 }
60 };
61}
6263impl<T> CrashReportHandlerBuilder<T>
64where
65T: TimeSource + 'static,
66{
67pub fn new(time_source: T) -> Self {
68Self { time_source, max_pending_crash_reports: MAX_PENDING_CRASH_REPORTS, proxy: None }
69 }
7071pub async fn build(self) -> Result<CrashReportHandler, Error> {
72// Proxy is only pre-set for tests. If a proxy was not specified,
73 // this is a good time to configure for our crash reporting product.
74if self.proxy.is_none() {
75let config_proxy =
76 connect_to_protocol::<fidl_feedback::CrashReportingProductRegisterMarker>()?;
77let product_config = fidl_feedback::CrashReportingProduct {
78 name: Some(CRASH_PRODUCT_NAME.to_string()),
79 ..Default::default()
80 };
81 config_proxy.upsert_with_ack(CRASH_PROGRAM_NAME, &product_config).await?;
82 }
83// Connect to the CrashReporter service if a proxy wasn't specified
84let proxy =
85self.proxy.unwrap_or(connect_to_protocol::<fidl_feedback::CrashReporterMarker>()?);
86Ok(CrashReportHandler::new(proxy, self.time_source, self.max_pending_crash_reports))
87 }
88}
8990#[cfg(test)]
91impl<T> CrashReportHandlerBuilder<T>
92where
93T: TimeSource,
94{
95fn with_proxy(mut self, proxy: fidl_feedback::CrashReporterProxy) -> Self {
96self.proxy = Some(proxy);
97self
98}
99100fn with_max_pending_crash_reports(mut self, max: usize) -> Self {
101self.max_pending_crash_reports = max;
102self
103}
104}
105106/// CrashReportHandler
107/// Triggers a snapshot via FIDL
108///
109/// Summary: Provides a mechanism for filing crash reports.
110///
111/// FIDL dependencies:
112/// - fuchsia.feedback.CrashReporter: CrashReportHandler uses this protocol to communicate
113/// with the CrashReporter service in order to file crash reports.
114/// - fuchsia.feedback.CrashReportingProductRegister: CrashReportHandler uses this protocol
115/// to communicate with the CrashReportingProductRegister service in order to configure
116/// the crash reporting product it will be filing on.
117pub struct CrashReportHandler {
118/// The channel to send new crash report requests to the asynchronous crash report sender
119 /// future. The maximum pending crash reports are implicitly enforced by the channel length.
120crash_report_sender: RefCell<mpsc::Sender<SnapshotRequest>>,
121 channel_size: usize,
122 _server_task: fasync::Task<()>,
123}
124125impl CrashReportHandler {
126fn new<T>(proxy: fidl_feedback::CrashReporterProxy, time_source: T, channel_size: usize) -> Self
127where
128T: TimeSource + 'static,
129 {
130// Set up the crash report sender that runs asynchronously
131let (channel, receiver) = mpsc::channel(channel_size);
132let server_task = Self::begin_crash_report_sender(proxy, receiver, time_source);
133Self { channel_size, crash_report_sender: RefCell::new(channel), _server_task: server_task }
134 }
135136/// Handle a FileCrashReport message by sending the specified crash report signature over the
137 /// channel to the crash report sender.
138pub fn request_snapshot(&self, request: SnapshotRequest) -> Result<(), Error> {
139// Try to send the crash report signature over the channel. If the channel is full, return
140 // an error
141match self.crash_report_sender.borrow_mut().try_send(request) {
142Ok(()) => Ok(()),
143Err(e) if e.is_full() => {
144warn!("Too many crash reports pending: {e}");
145Err(anyhow!("Pending crash reports exceeds max ({})", self.channel_size))
146 }
147Err(e) => {
148warn!("Error sending crash report: {e}");
149Err(anyhow!("{e}"))
150 }
151 }
152 }
153154/// Spawn a Task that receives crash report signatures over the channel and uses
155 /// the proxy to send a File FIDL request to the CrashReporter service with the specified
156 /// signatures.
157fn begin_crash_report_sender<T>(
158 proxy: fidl_feedback::CrashReporterProxy,
159mut receive_channel: mpsc::Receiver<SnapshotRequest>,
160 time_source: T,
161 ) -> fasync::Task<()>
162where
163T: TimeSource + 'static,
164 {
165 fasync::Task::local(async move {
166while let Some(request) = receive_channel.next().await {
167log_if_err!(
168Self::send_crash_report(&proxy, request, &time_source).await,
169"Failed to file crash report"
170);
171 }
172error!("Crash reporter task ended. Crash reports will no longer be filed. This should not happen.")
173 })
174 }
175176/// Send a File request to the CrashReporter service with the specified crash report signature.
177async fn send_crash_report<T: TimeSource>(
178 proxy: &fidl_feedback::CrashReporterProxy,
179 payload: SnapshotRequest,
180 time_source: &T,
181 ) -> Result<fidl_feedback::FileReportResults, Error> {
182warn!("Filing crash report, signature '{}'", payload.signature);
183let report = fidl_feedback::CrashReport {
184 program_name: Some(CRASH_PROGRAM_NAME.to_string()),
185 program_uptime: Some(time_source.now()),
186 crash_signature: Some(payload.signature),
187 is_fatal: Some(false),
188 ..Default::default()
189 };
190191let result = proxy.file_report(report).await.map_err(|e| format_err!("IPC error: {e}"))?;
192 result.map_err(|e| format_err!("Service error: {e:?}"))
193 }
194}
195196#[cfg(test)]
197mod tests {
198use super::*;
199use assert_matches::assert_matches;
200use futures::TryStreamExt;
201use injectable_time::{FakeTime, IncrementingFakeTime};
202203/// Tests that the node responds to the FileCrashReport message and that the expected crash
204 /// report is received by the CrashReporter service.
205#[fuchsia::test]
206async fn test_crash_report_content() {
207// The crash report signature to use and verify against
208let crash_report_signature = "TestCrashReportSignature";
209210// Set up the CrashReportHandler node
211let (proxy, mut stream) =
212 fidl::endpoints::create_proxy_and_stream::<fidl_feedback::CrashReporterMarker>();
213let fake_time = FakeTime::new();
214 fake_time.set_ticks(9876);
215let crash_report_handler =
216 CrashReportHandlerBuilder::new(fake_time).with_proxy(proxy).build().await.unwrap();
217218// File a crash report
219crash_report_handler
220 .request_snapshot(SnapshotRequest::new(crash_report_signature.to_string()))
221 .unwrap();
222223// Verify the fake service receives the crash report with expected data
224if let Ok(Some(fidl_feedback::CrashReporterRequest::FileReport { responder: _, report })) =
225 stream.try_next().await
226{
227assert_eq!(
228 report,
229 fidl_feedback::CrashReport {
230 program_name: Some(CRASH_PROGRAM_NAME.to_string()),
231 program_uptime: Some(9876),
232 crash_signature: Some(crash_report_signature.to_string()),
233 is_fatal: Some(false),
234 ..Default::default()
235 }
236 );
237 } else {
238panic!("Did not receive a crash report");
239 }
240 }
241242/// Tests that the number of pending crash reports is correctly bounded.
243#[fuchsia::test]
244async fn test_crash_report_pending_reports() {
245// Set up the proxy/stream and node outside of the large future used below. This way we can
246 // still poll the stream after the future completes.
247let (proxy, mut stream) =
248 fidl::endpoints::create_proxy_and_stream::<fidl_feedback::CrashReporterMarker>();
249let fake_time = IncrementingFakeTime::new(1000, std::time::Duration::from_nanos(1000));
250let crash_report_handler = CrashReportHandlerBuilder::new(fake_time)
251 .with_proxy(proxy)
252 .with_max_pending_crash_reports(1)
253 .build()
254 .await
255.unwrap();
256257// Set up the CrashReportHandler node. The request stream is never serviced, so when the
258 // node makes the FIDL call to file the crash report, the call will block indefinitely.
259 // This lets us test the pending crash report counts.
260261 // The first FileCrashReport should succeed
262assert_matches!(
263 crash_report_handler.request_snapshot(SnapshotRequest::new("TestCrash1".to_string())),
264Ok(())
265 );
266267// The second FileCrashReport should also succeed because since the first is now in
268 // progress, this is now the first "pending" report request
269assert_matches!(
270 crash_report_handler.request_snapshot(SnapshotRequest::new("TestCrash2".to_string())),
271Ok(())
272 );
273274// Since the first request has not completed, and there is already one pending request,
275 // this request should fail
276assert_matches!(
277 crash_report_handler.request_snapshot(SnapshotRequest::new("TestCrash3".to_string())),
278Err(_)
279 );
280281// Verify the signature of the first crash report
282if let Ok(Some(fidl_feedback::CrashReporterRequest::FileReport { responder, report })) =
283 stream.try_next().await
284{
285// Send a reply to allow the node to process the next crash report
286let _ = responder.send(Ok(&fidl_feedback::FileReportResults::default()));
287assert_eq!(
288 report,
289 fidl_feedback::CrashReport {
290 program_name: Some(CRASH_PROGRAM_NAME.to_string()),
291 program_uptime: Some(1000),
292 crash_signature: Some("TestCrash1".to_string()),
293 is_fatal: Some(false),
294 ..Default::default()
295 }
296 );
297 } else {
298panic!("Did not receive a crash report");
299 }
300301// Verify the signature of the second crash report
302if let Ok(Some(fidl_feedback::CrashReporterRequest::FileReport { responder, report })) =
303 stream.try_next().await
304{
305// Send a reply to allow the node to process the next crash report
306let _ = responder.send(Ok(&fidl_feedback::FileReportResults::default()));
307assert_eq!(
308 report,
309 fidl_feedback::CrashReport {
310 program_name: Some(CRASH_PROGRAM_NAME.to_string()),
311 program_uptime: Some(2000),
312 crash_signature: Some("TestCrash2".to_string()),
313 is_fatal: Some(false),
314 ..Default::default()
315 }
316 );
317 } else {
318panic!("Did not receive a crash report");
319 }
320 }
321}