inspect_runtime/
lib.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
3
4//! # Inspect Runtime
5//!
6//! This library contains the necessary functions to serve inspect from a component.
7
8use fidl::endpoints::ClientEnd;
9use fidl::AsHandleRef;
10use fuchsia_component::client;
11use fuchsia_inspect::Inspector;
12use log::error;
13use pin_project::pin_project;
14use std::future::Future;
15use std::pin::Pin;
16use std::task::{Context, Poll};
17use {fidl_fuchsia_inspect as finspect, fuchsia_async as fasync};
18
19#[cfg(fuchsia_api_level_at_least = "HEAD")]
20pub use finspect::EscrowToken;
21
22pub mod service;
23
24/// A setting for the fuchsia.inspect.Tree server that indicates how the server should send
25/// the Inspector's VMO. For fallible methods of sending, a fallback is also set.
26#[derive(Clone)]
27pub enum TreeServerSendPreference {
28    /// Frozen denotes sending a copy-on-write VMO.
29    /// `on_failure` refers to failure behavior, as not all VMOs
30    /// can be frozen. In particular, freezing a VMO requires writing to it,
31    /// so if an Inspector is created with a read-only VMO, freezing will fail.
32    ///
33    /// Failure behavior should be one of Live or DeepCopy.
34    ///
35    /// Frozen { on_failure: Live } is the default value of TreeServerSendPreference.
36    Frozen { on_failure: Box<TreeServerSendPreference> },
37
38    /// Live denotes sending a live handle to the VMO.
39    ///
40    /// A client might want this behavior if they have time sensitive writes
41    /// to the VMO, because copy-on-write behavior causes the initial write
42    /// to a page to be around 1% slower.
43    Live,
44
45    /// DeepCopy will send a private copy of the VMO. This should probably
46    /// not be a client's first choice, as Frozen(DeepCopy) will provide the
47    /// same semantic behavior while possibly avoiding an expensive copy.
48    ///
49    /// A client might want this behavior if they have time sensitive writes
50    /// to the VMO, because copy-on-write behavior causes the initial write
51    /// to a page to be around 1% slower.
52    DeepCopy,
53}
54
55impl TreeServerSendPreference {
56    /// Create a new [`TreeServerSendPreference`] that sends a frozen/copy-on-write VMO of the tree,
57    /// falling back to the specified `failure_mode` if a frozen VMO cannot be provided.
58    ///
59    /// # Arguments
60    ///
61    /// * `failure_mode` - Fallback behavior to use if freezing the Inspect VMO fails.
62    ///
63    pub fn frozen_or(failure_mode: TreeServerSendPreference) -> Self {
64        TreeServerSendPreference::Frozen { on_failure: Box::new(failure_mode) }
65    }
66}
67
68impl Default for TreeServerSendPreference {
69    fn default() -> Self {
70        TreeServerSendPreference::frozen_or(TreeServerSendPreference::Live)
71    }
72}
73
74/// Optional settings for serving `fuchsia.inspect.Tree`
75#[derive(Default)]
76pub struct PublishOptions {
77    /// This specifies how the VMO should be sent over the `fuchsia.inspect.Tree` server.
78    ///
79    /// Default behavior is
80    /// `TreeServerSendPreference::Frozen { on_failure: TreeServerSendPreference::Live }`.
81    pub(crate) vmo_preference: TreeServerSendPreference,
82
83    /// An name value which will show up in the metadata of snapshots
84    /// taken from this `fuchsia.inspect.Tree` server. Defaults to
85    /// fuchsia.inspect#DEFAULT_TREE_NAME.
86    pub(crate) tree_name: Option<String>,
87
88    /// Channel over which the InspectSink protocol will be used.
89    pub(crate) inspect_sink_client: Option<ClientEnd<finspect::InspectSinkMarker>>,
90}
91
92impl PublishOptions {
93    /// This specifies how the VMO should be sent over the `fuchsia.inspect.Tree` server.
94    ///
95    /// Default behavior is
96    /// `TreeServerSendPreference::Frozen { on_failure: TreeServerSendPreference::Live }`.
97    pub fn send_vmo_preference(mut self, preference: TreeServerSendPreference) -> Self {
98        self.vmo_preference = preference;
99        self
100    }
101
102    /// This sets an optional name value which will show up in the metadata of snapshots
103    /// taken from this `fuchsia.inspect.Tree` server.
104    ///
105    /// Default behavior is an empty string.
106    pub fn inspect_tree_name(mut self, name: impl Into<String>) -> Self {
107        self.tree_name = Some(name.into());
108        self
109    }
110
111    /// This allows the client to provide the InspectSink client channel.
112    pub fn on_inspect_sink_client(
113        mut self,
114        client: ClientEnd<finspect::InspectSinkMarker>,
115    ) -> Self {
116        self.inspect_sink_client = Some(client);
117        self
118    }
119}
120
121/// Spawns a server handling `fuchsia.inspect.Tree` requests and a handle
122/// to the `fuchsia.inspect.Tree` is published using `fuchsia.inspect.InspectSink`.
123///
124/// Whenever the client wishes to stop publishing Inspect, the Controller may be dropped.
125///
126/// `None` will be returned on FIDL failures. This includes:
127/// * Failing to convert a FIDL endpoint for `fuchsia.inspect.Tree`'s `TreeMarker` into a stream
128/// * Failing to connect to the `InspectSink` protocol
129/// * Failing to send the connection over the wire
130#[must_use]
131pub fn publish(
132    inspector: &Inspector,
133    options: PublishOptions,
134) -> Option<PublishedInspectController> {
135    let PublishOptions { vmo_preference, tree_name, inspect_sink_client } = options;
136    let scope = fasync::Scope::new_with_name("inspect_runtime::publish");
137    let tree = service::spawn_tree_server(inspector.clone(), vmo_preference, &scope);
138
139    let inspect_sink = inspect_sink_client.map(|client| client.into_proxy()).or_else(|| {
140        client::connect_to_protocol::<finspect::InspectSinkMarker>()
141            .map_err(|err| error!(err:%; "failed to spawn the fuchsia.inspect.Tree server"))
142            .ok()
143    })?;
144
145    // unwrap: safe since we have a valid tree handle coming from the server we spawn.
146    let tree_koid = tree.basic_info().unwrap().koid;
147    if let Err(err) = inspect_sink.publish(finspect::InspectSinkPublishRequest {
148        tree: Some(tree),
149        name: tree_name,
150        ..finspect::InspectSinkPublishRequest::default()
151    }) {
152        error!(err:%; "failed to spawn the fuchsia.inspect.Tree server");
153        return None;
154    }
155
156    Some(PublishedInspectController::new(inspector.clone(), scope, tree_koid))
157}
158
159#[pin_project]
160pub struct PublishedInspectController {
161    #[pin]
162    scope: fasync::scope::Join,
163    inspector: Inspector,
164    tree_koid: zx::Koid,
165}
166
167#[cfg(fuchsia_api_level_at_least = "HEAD")]
168#[derive(Default)]
169pub struct EscrowOptions {
170    name: Option<String>,
171    inspect_sink: Option<finspect::InspectSinkProxy>,
172}
173
174#[cfg(fuchsia_api_level_at_least = "HEAD")]
175impl EscrowOptions {
176    /// Sets the name with which the Inspect handle will be escrowed.
177    pub fn name(mut self, name: impl Into<String>) -> Self {
178        self.name = Some(name.into());
179        self
180    }
181
182    /// Sets the inspect sink channel to use for escrowing.
183    pub fn inspect_sink(mut self, proxy: finspect::InspectSinkProxy) -> Self {
184        self.inspect_sink = Some(proxy);
185        self
186    }
187}
188
189impl PublishedInspectController {
190    fn new(inspector: Inspector, scope: fasync::Scope, tree_koid: zx::Koid) -> Self {
191        Self { inspector, scope: scope.join(), tree_koid }
192    }
193
194    /// Escrows a frozen copy of the VMO of the associated Inspector replacing the current live
195    /// handle in the server.
196    /// This will not capture lazy nodes or properties.
197    #[cfg(fuchsia_api_level_at_least = "HEAD")]
198    pub async fn escrow_frozen(self, opts: EscrowOptions) -> Option<EscrowToken> {
199        let inspect_sink = match opts.inspect_sink {
200            Some(proxy) => proxy,
201            None => match client::connect_to_protocol::<finspect::InspectSinkMarker>() {
202                Ok(inspect_sink) => inspect_sink,
203                Err(err) => {
204                    error!(err:%; "failed to spawn the fuchsia.inspect.Tree server");
205                    return None;
206                }
207            },
208        };
209        let (ep0, ep1) = zx::EventPair::create();
210        let Some(vmo) = self.inspector.frozen_vmo_copy() else {
211            error!("failed to get a frozen vmo, aborting escrow");
212            return None;
213        };
214        if let Err(err) = inspect_sink.escrow(finspect::InspectSinkEscrowRequest {
215            vmo: Some(vmo),
216            name: opts.name,
217            token: Some(EscrowToken { token: ep0 }),
218            tree: Some(self.tree_koid.raw_koid()),
219            ..Default::default()
220        }) {
221            error!(err:%; "failed to escrow inspect data");
222            return None;
223        }
224        self.scope.await;
225        Some(EscrowToken { token: ep1 })
226    }
227}
228
229impl Future for PublishedInspectController {
230    type Output = ();
231
232    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
233        let this = self.project();
234        this.scope.poll(cx)
235    }
236}
237
238#[cfg(test)]
239mod tests {
240    use super::*;
241    use assert_matches::assert_matches;
242    use component_events::events::{EventStream, Started};
243    use component_events::matcher::EventMatcher;
244    use diagnostics_assertions::assert_json_diff;
245    use diagnostics_hierarchy::DiagnosticsHierarchy;
246    use diagnostics_reader::ArchiveReader;
247    use fidl::endpoints::RequestStream;
248    use fidl_fuchsia_inspect::{InspectSinkRequest, InspectSinkRequestStream};
249    use fuchsia_component_test::ScopedInstance;
250    use fuchsia_inspect::reader::snapshot::Snapshot;
251    use fuchsia_inspect::reader::{read, PartialNodeHierarchy};
252    use fuchsia_inspect::InspectorConfig;
253
254    use futures::{FutureExt, StreamExt};
255
256    const TEST_PUBLISH_COMPONENT_URL: &str = "#meta/inspect_test_component.cm";
257
258    #[fuchsia::test]
259    async fn new_no_op() {
260        let inspector = Inspector::new(InspectorConfig::default().no_op());
261        assert!(!inspector.is_valid());
262
263        // Ensure publish doesn't crash on a No-Op inspector.
264        // The idea is that in this context, publish will hang if the server is running
265        // correctly. That is, if there is an error condition, it will be immediate.
266        assert_matches!(
267            publish(&inspector, PublishOptions::default()).unwrap().now_or_never(),
268            None
269        );
270    }
271
272    #[fuchsia::test]
273    async fn connect_to_service() -> Result<(), anyhow::Error> {
274        let mut event_stream = EventStream::open().await.unwrap();
275
276        let app = ScopedInstance::new_with_name(
277            "interesting_name".into(),
278            "coll".to_string(),
279            TEST_PUBLISH_COMPONENT_URL.to_string(),
280        )
281        .await
282        .expect("failed to create test component");
283
284        let started_stream = EventMatcher::ok()
285            .moniker_regex(app.child_name().to_owned())
286            .wait::<Started>(&mut event_stream);
287
288        app.connect_to_binder().expect("failed to connect to Binder protocol");
289
290        started_stream.await.expect("failed to observe Started event");
291
292        let hierarchy = ArchiveReader::inspect()
293            .add_selector("coll\\:interesting_name:[name=tree-0]root")
294            .snapshot()
295            .await?
296            .into_iter()
297            .next()
298            .and_then(|result| result.payload)
299            .expect("one Inspect hierarchy");
300
301        assert_json_diff!(hierarchy, root: {
302            "tree-0": 0u64,
303            int: 3i64,
304            "lazy-node": {
305                a: "test",
306                child: {
307                    double: 3.25,
308                },
309            }
310        });
311
312        Ok(())
313    }
314
315    #[fuchsia::test]
316    async fn publish_new_no_op() {
317        let inspector = Inspector::new(InspectorConfig::default().no_op());
318        assert!(!inspector.is_valid());
319
320        // Ensure publish doesn't crash on a No-Op inspector
321        let _task = publish(&inspector, PublishOptions::default());
322    }
323
324    #[fuchsia::test]
325    async fn publish_on_provided_channel() {
326        let (client, server) = zx::Channel::create();
327        let inspector = Inspector::default();
328        inspector.root().record_string("hello", "world");
329        let _inspect_sink_server_task = publish(
330            &inspector,
331            PublishOptions::default()
332                .on_inspect_sink_client(ClientEnd::<finspect::InspectSinkMarker>::new(client)),
333        );
334        let mut request_stream =
335            InspectSinkRequestStream::from_channel(fidl::AsyncChannel::from_channel(server));
336
337        let tree = request_stream.next().await.unwrap();
338
339        assert_matches!(tree, Ok(InspectSinkRequest::Publish {
340            payload: finspect::InspectSinkPublishRequest { tree: Some(tree), .. }, ..}) => {
341                let hierarchy = read(&tree.into_proxy()).await.unwrap();
342                assert_json_diff!(hierarchy, root: {
343                    hello: "world"
344                });
345            }
346        );
347
348        assert!(request_stream.next().await.is_none());
349    }
350
351    #[fuchsia::test]
352    async fn controller_supports_escrowing_a_copy() {
353        let inspector = Inspector::default();
354        inspector.root().record_string("hello", "world");
355
356        let (client, mut request_stream) = fidl::endpoints::create_request_stream();
357        let controller =
358            publish(&inspector, PublishOptions::default().on_inspect_sink_client(client))
359                .expect("got controller");
360
361        let request = request_stream.next().await.unwrap();
362        let tree_koid = match request {
363            Ok(InspectSinkRequest::Publish {
364                payload: finspect::InspectSinkPublishRequest { tree: Some(tree), .. },
365                ..
366            }) => tree.basic_info().unwrap().koid,
367            other => {
368                panic!("unexpected request: {other:?}");
369            }
370        };
371        let (proxy, mut request_stream) =
372            fidl::endpoints::create_proxy_and_stream::<finspect::InspectSinkMarker>();
373        let (client_token, request) = futures::future::join(
374            controller.escrow_frozen(EscrowOptions {
375                name: Some("test".into()),
376                inspect_sink: Some(proxy),
377            }),
378            request_stream.next(),
379        )
380        .await;
381        match request {
382            Some(Ok(InspectSinkRequest::Escrow {
383                payload:
384                    finspect::InspectSinkEscrowRequest {
385                        vmo: Some(vmo),
386                        name: Some(name),
387                        token: Some(EscrowToken { token }),
388                        tree: Some(tree),
389                        ..
390                    },
391                ..
392            })) => {
393                assert_eq!(name, "test");
394                assert_eq!(tree, tree_koid.raw_koid());
395
396                // An update to the inspector isn't reflected here, since it was  CoW.
397                inspector.root().record_string("hey", "not there");
398
399                let snapshot = Snapshot::try_from(&vmo).expect("valid vmo");
400                let hierarchy: DiagnosticsHierarchy =
401                    PartialNodeHierarchy::try_from(snapshot).expect("valid snapshot").into();
402                assert_json_diff!(hierarchy, root: {
403                    hello: "world"
404                });
405                assert_eq!(
406                    client_token.unwrap().token.basic_info().unwrap().koid,
407                    token.basic_info().unwrap().related_koid
408                );
409            }
410            other => {
411                panic!("unexpected request: {other:?}");
412            }
413        };
414    }
415}