inspect_runtime/
lib.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
3
4//! # Inspect Runtime
5//!
6//! This library contains the necessary functions to serve inspect from a component.
7
8use fidl::AsHandleRef;
9use fidl::endpoints::ClientEnd;
10use fuchsia_component_client::connect_to_protocol;
11use fuchsia_inspect::Inspector;
12use log::error;
13use pin_project::pin_project;
14use std::future::Future;
15use std::pin::{Pin, pin};
16use std::task::{Context, Poll};
17use {fidl_fuchsia_inspect as finspect, fuchsia_async as fasync};
18
19#[cfg(fuchsia_api_level_at_least = "HEAD")]
20pub use finspect::EscrowToken;
21
22pub mod service;
23
24/// A setting for the fuchsia.inspect.Tree server that indicates how the server should send
25/// the Inspector's VMO. For fallible methods of sending, a fallback is also set.
26#[derive(Clone)]
27pub enum TreeServerSendPreference {
28    /// Frozen denotes sending a copy-on-write VMO.
29    /// `on_failure` refers to failure behavior, as not all VMOs
30    /// can be frozen. In particular, freezing a VMO requires writing to it,
31    /// so if an Inspector is created with a read-only VMO, freezing will fail.
32    ///
33    /// Failure behavior should be one of Live or DeepCopy.
34    ///
35    /// Frozen { on_failure: Live } is the default value of TreeServerSendPreference.
36    Frozen { on_failure: Box<TreeServerSendPreference> },
37
38    /// Live denotes sending a live handle to the VMO.
39    ///
40    /// A client might want this behavior if they have time sensitive writes
41    /// to the VMO, because copy-on-write behavior causes the initial write
42    /// to a page to be around 1% slower.
43    Live,
44
45    /// DeepCopy will send a private copy of the VMO. This should probably
46    /// not be a client's first choice, as Frozen(DeepCopy) will provide the
47    /// same semantic behavior while possibly avoiding an expensive copy.
48    ///
49    /// A client might want this behavior if they have time sensitive writes
50    /// to the VMO, because copy-on-write behavior causes the initial write
51    /// to a page to be around 1% slower.
52    DeepCopy,
53}
54
55impl TreeServerSendPreference {
56    /// Create a new [`TreeServerSendPreference`] that sends a frozen/copy-on-write VMO of the tree,
57    /// falling back to the specified `failure_mode` if a frozen VMO cannot be provided.
58    ///
59    /// # Arguments
60    ///
61    /// * `failure_mode` - Fallback behavior to use if freezing the Inspect VMO fails.
62    ///
63    pub fn frozen_or(failure_mode: TreeServerSendPreference) -> Self {
64        TreeServerSendPreference::Frozen { on_failure: Box::new(failure_mode) }
65    }
66}
67
68impl Default for TreeServerSendPreference {
69    fn default() -> Self {
70        TreeServerSendPreference::frozen_or(TreeServerSendPreference::Live)
71    }
72}
73
74/// Optional settings for serving `fuchsia.inspect.Tree`
75#[derive(Default)]
76pub struct PublishOptions {
77    /// This specifies how the VMO should be sent over the `fuchsia.inspect.Tree` server.
78    ///
79    /// Default behavior is
80    /// `TreeServerSendPreference::Frozen { on_failure: TreeServerSendPreference::Live }`.
81    pub(crate) vmo_preference: TreeServerSendPreference,
82
83    /// An name value which will show up in the metadata of snapshots
84    /// taken from this `fuchsia.inspect.Tree` server. Defaults to
85    /// fuchsia.inspect#DEFAULT_TREE_NAME.
86    pub(crate) tree_name: Option<String>,
87
88    /// Channel over which the InspectSink protocol will be used.
89    pub(crate) inspect_sink_client: Option<ClientEnd<finspect::InspectSinkMarker>>,
90
91    /// Scope on which the server will be spawned.
92    pub(crate) custom_scope: Option<fasync::ScopeHandle>,
93
94    /// If provided, `publish` will use this tree instead of creating a new one.
95    pub(crate) tree: Option<TreeServerHandle>,
96}
97
98impl PublishOptions {
99    /// This specifies how the VMO should be sent over the `fuchsia.inspect.Tree` server.
100    ///
101    /// Default behavior is
102    /// `TreeServerSendPreference::Frozen { on_failure: TreeServerSendPreference::Live }`.
103    pub fn send_vmo_preference(mut self, preference: TreeServerSendPreference) -> Self {
104        self.vmo_preference = preference;
105        self
106    }
107
108    /// This sets an optional name value which will show up in the metadata of snapshots
109    /// taken from this `fuchsia.inspect.Tree` server.
110    ///
111    /// Default behavior is an empty string.
112    pub fn inspect_tree_name(mut self, name: impl Into<String>) -> Self {
113        self.tree_name = Some(name.into());
114        self
115    }
116
117    /// Sets a custom fuchsia_async::Scope to use for serving Inspect.
118    pub fn custom_scope(mut self, scope: fasync::ScopeHandle) -> Self {
119        self.custom_scope = Some(scope);
120        self
121    }
122
123    /// This allows the client to provide the InspectSink client channel.
124    pub fn on_inspect_sink_client(
125        mut self,
126        client: ClientEnd<finspect::InspectSinkMarker>,
127    ) -> Self {
128        self.inspect_sink_client = Some(client);
129        self
130    }
131
132    /// Use the provided [`TreeServerHandle`] instead of creating a new one. Skips the
133    /// call to InspectSink.Publish, but still spawns a new Tree server to
134    /// handle incoming requests.
135    pub fn on_tree_server(mut self, tree: TreeServerHandle) -> Self {
136        self.tree = Some(tree);
137        self
138    }
139}
140
141/// Spawns a server handling `fuchsia.inspect.Tree` requests and a handle
142/// to the `fuchsia.inspect.Tree` is published using `fuchsia.inspect.InspectSink`.
143///
144/// Whenever the client wishes to stop publishing Inspect, the Controller may be dropped.
145///
146/// `None` will be returned on FIDL failures. This includes:
147/// * Failing to convert a FIDL endpoint for `fuchsia.inspect.Tree`'s `TreeMarker` into a stream
148/// * Failing to connect to the `InspectSink` protocol
149/// * Failing to send the connection over the wire
150#[must_use]
151pub fn publish(
152    inspector: &Inspector,
153    options: PublishOptions,
154) -> Option<PublishedInspectController> {
155    let PublishOptions { vmo_preference, tree_name, inspect_sink_client, custom_scope, tree } =
156        options;
157    let scope = custom_scope
158        .map(|handle| handle.new_child_with_name("inspect_runtime::publish"))
159        .unwrap_or_else(|| fasync::Scope::new_with_name("inspect_runtime::publish"));
160
161    if let Some(TreeServerHandle { client_koid: client, stream }) = tree {
162        service::spawn_tree_server_with_stream(inspector.clone(), vmo_preference, stream, &scope);
163        return Some(PublishedInspectController::new(inspector.clone(), scope, client));
164    }
165
166    let tree = service::spawn_tree_server(inspector.clone(), vmo_preference, &scope);
167
168    let inspect_sink = inspect_sink_client.map(|client| client.into_proxy()).or_else(|| {
169        connect_to_protocol::<finspect::InspectSinkMarker>()
170            .map_err(|err| error!(err:%; "failed to spawn the fuchsia.inspect.Tree server"))
171            .ok()
172    })?;
173
174    // unwrap: safe since we have a valid tree handle coming from the server we spawn.
175    let tree_koid = tree.get_koid().unwrap();
176    if let Err(err) = inspect_sink.publish(finspect::InspectSinkPublishRequest {
177        tree: Some(tree),
178        name: tree_name,
179        ..finspect::InspectSinkPublishRequest::default()
180    }) {
181        error!(err:%; "failed to spawn the fuchsia.inspect.Tree server");
182        return None;
183    }
184
185    Some(PublishedInspectController::new(inspector.clone(), scope, tree_koid))
186}
187
188/// Options for fetching a VMO that was previously escrowed.
189#[derive(Debug, Default)]
190pub struct FetchEscrowOptions {
191    /// Channel over which the InspectSink protocol will be used.
192    pub(crate) inspect_sink_client: Option<ClientEnd<finspect::InspectSinkMarker>>,
193
194    /// If true, the escrowed Inspect tree will be replaced with a new one, and a handle
195    /// to the new tree will be returned in [`FetchEscrowResult`].
196    pub(crate) should_replace_with_tree: bool,
197}
198
199impl FetchEscrowOptions {
200    /// Creates new default options for fetching an escrowed VMO.
201    pub fn new() -> Self {
202        Self::default()
203    }
204
205    /// This allows the client to provide the InspectSink client channel.
206    pub fn on_inspect_sink_client(
207        mut self,
208        client: ClientEnd<finspect::InspectSinkMarker>,
209    ) -> Self {
210        self.inspect_sink_client = Some(client);
211        self
212    }
213
214    /// If true, the escrowed Inspect tree will be replaced with a new one, and a handle
215    /// to the new tree will be returned in [`FetchEscrowResult`].
216    pub fn replace_with_tree(mut self) -> Self {
217        self.should_replace_with_tree = true;
218        self
219    }
220}
221
222/// The result of fetching an escrowed VMO.
223pub struct FetchEscrowResult {
224    /// The VMO containing the escrowed Inspect data.
225    pub vmo: zx::Vmo,
226    /// A handle to the new Inspect Tree if one was requested.
227    pub server: Option<TreeServerHandle>,
228}
229
230/// A handle to a `fuchsia.inspect.Tree` server.
231pub struct TreeServerHandle {
232    client_koid: zx::Koid,
233    stream: finspect::TreeRequestStream,
234}
235
236/// Fetches a VMO that was previously escrowed.
237///
238/// This function connects to `fuchsia.inspect.InspectSink` and exchanges the provided
239/// `escrow_token` for the VMO it represents.
240///
241/// If `FetchEscrowOptions::replace_with_tree` is set, a new `fuchsia.inspect.Tree` server
242/// will be created to replace the one that was torn down when the VMO was originally escrowed.
243/// A handle to this new tree will be returned.
244#[cfg(fuchsia_api_level_at_least = "HEAD")]
245pub async fn fetch_escrow(
246    escrow_token: finspect::EscrowToken,
247    options: FetchEscrowOptions,
248) -> Result<FetchEscrowResult, anyhow::Error> {
249    use anyhow::{Context as _, anyhow};
250
251    let FetchEscrowOptions { inspect_sink_client, should_replace_with_tree } = options;
252
253    let (tree, handle) = if should_replace_with_tree {
254        let (client, stream) = fidl::endpoints::create_request_stream::<finspect::TreeMarker>();
255        // unwrap: safe since we have a valid tree handle coming from above.
256        let client_koid = client.get_koid().unwrap();
257        (Some(client), Some(TreeServerHandle { client_koid, stream }))
258    } else {
259        (None, None)
260    };
261
262    let inspect_sink = match inspect_sink_client {
263        Some(client) => client.into_proxy(),
264        None => connect_to_protocol::<finspect::InspectSinkMarker>()?,
265    };
266
267    let vmo = inspect_sink
268        .fetch_escrow(finspect::InspectSinkFetchEscrowRequest {
269            token: Some(escrow_token),
270            tree,
271            ..Default::default()
272        })
273        .await
274        .context("Failed to fetch escrow")?
275        .vmo
276        .ok_or_else(|| {
277            anyhow!("VMO missing from response; perhaps the provided escrow_token is invalid")
278        })?;
279
280    Ok(FetchEscrowResult { vmo, server: handle })
281}
282
283#[pin_project]
284pub struct PublishedInspectController {
285    #[pin]
286    scope: fasync::scope::Join,
287    inspector: Inspector,
288    tree_koid: zx::Koid,
289}
290
291#[cfg(fuchsia_api_level_at_least = "HEAD")]
292#[derive(Default)]
293pub struct EscrowOptions {
294    name: Option<String>,
295    inspect_sink: Option<finspect::InspectSinkProxy>,
296}
297
298#[cfg(fuchsia_api_level_at_least = "HEAD")]
299impl EscrowOptions {
300    /// Sets the name with which the Inspect handle will be escrowed.
301    pub fn name(mut self, name: impl Into<String>) -> Self {
302        self.name = Some(name.into());
303        self
304    }
305
306    /// Sets the inspect sink channel to use for escrowing.
307    pub fn inspect_sink(mut self, proxy: finspect::InspectSinkProxy) -> Self {
308        self.inspect_sink = Some(proxy);
309        self
310    }
311}
312
313impl PublishedInspectController {
314    fn new(inspector: Inspector, scope: fasync::Scope, tree_koid: zx::Koid) -> Self {
315        Self { inspector, scope: scope.join(), tree_koid }
316    }
317
318    /// Escrows a frozen copy of the VMO of the associated Inspector replacing the current live
319    /// handle in the server.
320    /// This will not capture lazy nodes or properties.
321    #[cfg(fuchsia_api_level_at_least = "HEAD")]
322    pub async fn escrow_frozen(self, opts: EscrowOptions) -> Option<EscrowToken> {
323        let inspect_sink = match opts.inspect_sink {
324            Some(proxy) => proxy,
325            None => match connect_to_protocol::<finspect::InspectSinkMarker>() {
326                Ok(inspect_sink) => inspect_sink,
327                Err(err) => {
328                    error!(err:%; "failed to spawn the fuchsia.inspect.Tree server");
329                    return None;
330                }
331            },
332        };
333        let (ep0, ep1) = zx::EventPair::create();
334        let Some(vmo) = self.inspector.frozen_vmo_copy() else {
335            error!("failed to get a frozen vmo, aborting escrow");
336            return None;
337        };
338        if let Err(err) = inspect_sink.escrow(finspect::InspectSinkEscrowRequest {
339            vmo: Some(vmo),
340            name: opts.name,
341            token: Some(EscrowToken { token: ep0 }),
342            tree: Some(self.tree_koid.raw_koid()),
343            ..Default::default()
344        }) {
345            error!(err:%; "failed to escrow inspect data");
346            return None;
347        }
348        self.scope.await;
349        Some(EscrowToken { token: ep1 })
350    }
351
352    /// Cancels the running published controller.
353    ///
354    /// The future resolves when no more serving tasks are running.
355    pub async fn cancel(self) {
356        let Self { scope, inspector: _, tree_koid: _ } = self;
357        let scope = pin!(scope);
358        scope.cancel().await;
359    }
360}
361
362impl Future for PublishedInspectController {
363    type Output = ();
364
365    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
366        let this = self.project();
367        this.scope.poll(cx)
368    }
369}
370
371#[cfg(test)]
372mod tests {
373    use super::*;
374    use assert_matches::assert_matches;
375    use component_events::events::{EventStream, Started};
376    use component_events::matcher::EventMatcher;
377    use diagnostics_assertions::assert_json_diff;
378    use diagnostics_hierarchy::DiagnosticsHierarchy;
379    use diagnostics_reader::ArchiveReader;
380    use fidl::endpoints::RequestStream;
381    use fidl_fuchsia_inspect::{InspectSinkRequest, InspectSinkRequestStream};
382    use fuchsia_component_test::ScopedInstance;
383    use fuchsia_inspect::InspectorConfig;
384    use fuchsia_inspect::reader::snapshot::Snapshot;
385    use fuchsia_inspect::reader::{PartialNodeHierarchy, read};
386
387    use futures::{FutureExt, StreamExt};
388
389    const TEST_PUBLISH_COMPONENT_URL: &str = "#meta/inspect_test_component.cm";
390
391    #[fuchsia::test]
392    async fn new_no_op() {
393        let inspector = Inspector::new(InspectorConfig::default().no_op());
394        assert!(!inspector.is_valid());
395
396        // Ensure publish doesn't crash on a No-Op inspector.
397        // The idea is that in this context, publish will hang if the server is running
398        // correctly. That is, if there is an error condition, it will be immediate.
399        assert_matches!(
400            publish(&inspector, PublishOptions::default()).unwrap().now_or_never(),
401            None
402        );
403    }
404
405    #[fuchsia::test]
406    async fn connect_to_service() -> Result<(), anyhow::Error> {
407        let mut event_stream = EventStream::open().await.unwrap();
408
409        let app = ScopedInstance::new_with_name(
410            "interesting_name".into(),
411            "coll".to_string(),
412            TEST_PUBLISH_COMPONENT_URL.to_string(),
413        )
414        .await
415        .expect("failed to create test component");
416
417        let started_stream = EventMatcher::ok()
418            .moniker_regex(app.child_name().to_owned())
419            .wait::<Started>(&mut event_stream);
420
421        app.connect_to_binder().expect("failed to connect to Binder protocol");
422
423        started_stream.await.expect("failed to observe Started event");
424
425        let hierarchy = ArchiveReader::inspect()
426            .add_selector("coll\\:interesting_name:[name=tree-0]root")
427            .snapshot()
428            .await?
429            .into_iter()
430            .next()
431            .and_then(|result| result.payload)
432            .expect("one Inspect hierarchy");
433
434        assert_json_diff!(hierarchy, root: {
435            "tree-0": 0u64,
436            int: 3i64,
437            "lazy-node": {
438                a: "test",
439                child: {
440                    double: 3.25,
441                },
442            }
443        });
444
445        Ok(())
446    }
447
448    #[fuchsia::test]
449    async fn publish_new_no_op() {
450        let inspector = Inspector::new(InspectorConfig::default().no_op());
451        assert!(!inspector.is_valid());
452
453        // Ensure publish doesn't crash on a No-Op inspector
454        let _task = publish(&inspector, PublishOptions::default());
455    }
456
457    #[fuchsia::test]
458    async fn publish_on_provided_channel() {
459        let (client, server) = zx::Channel::create();
460        let inspector = Inspector::default();
461        inspector.root().record_string("hello", "world");
462        let _inspect_sink_server_task = publish(
463            &inspector,
464            PublishOptions::default()
465                .on_inspect_sink_client(ClientEnd::<finspect::InspectSinkMarker>::new(client)),
466        );
467        let mut request_stream =
468            InspectSinkRequestStream::from_channel(fidl::AsyncChannel::from_channel(server));
469
470        let tree = request_stream.next().await.unwrap();
471
472        assert_matches!(tree, Ok(InspectSinkRequest::Publish {
473            payload: finspect::InspectSinkPublishRequest { tree: Some(tree), .. }, ..}) => {
474                let hierarchy = read(&tree.into_proxy()).await.unwrap();
475                assert_json_diff!(hierarchy, root: {
476                    hello: "world"
477                });
478            }
479        );
480
481        assert!(request_stream.next().await.is_none());
482    }
483
484    #[fuchsia::test]
485    async fn cancel_published_controller() {
486        let (client, server) = zx::Channel::create();
487        let inspector = Inspector::default();
488        inspector.root().record_string("hello", "world");
489        let controller = publish(
490            &inspector,
491            PublishOptions::default()
492                .on_inspect_sink_client(ClientEnd::<finspect::InspectSinkMarker>::new(client)),
493        )
494        .expect("create controller");
495        let mut request_stream =
496            InspectSinkRequestStream::from_channel(fidl::AsyncChannel::from_channel(server));
497
498        let tree = request_stream.next().await.unwrap();
499
500        let tree = assert_matches!(tree, Ok(InspectSinkRequest::Publish {
501            payload: finspect::InspectSinkPublishRequest { tree: Some(tree), .. }, ..}) => tree
502        );
503
504        assert!(request_stream.next().await.is_none());
505
506        controller.cancel().await;
507        fidl::AsyncChannel::from_channel(tree.into_channel())
508            .on_closed()
509            .await
510            .expect("wait closed");
511    }
512
513    #[fuchsia::test]
514    async fn controller_supports_escrowing_a_copy() {
515        let inspector = Inspector::default();
516        inspector.root().record_string("hello", "world");
517
518        let (client, mut request_stream) = fidl::endpoints::create_request_stream();
519        let controller =
520            publish(&inspector, PublishOptions::default().on_inspect_sink_client(client))
521                .expect("got controller");
522
523        let request = request_stream.next().await.unwrap();
524        let tree_koid = match request {
525            Ok(InspectSinkRequest::Publish {
526                payload: finspect::InspectSinkPublishRequest { tree: Some(tree), .. },
527                ..
528            }) => tree.basic_info().unwrap().koid,
529            other => {
530                panic!("unexpected request: {other:?}");
531            }
532        };
533        let (proxy, mut request_stream) =
534            fidl::endpoints::create_proxy_and_stream::<finspect::InspectSinkMarker>();
535        let (client_token, request) = futures::future::join(
536            controller.escrow_frozen(EscrowOptions {
537                name: Some("test".into()),
538                inspect_sink: Some(proxy),
539            }),
540            request_stream.next(),
541        )
542        .await;
543        match request {
544            Some(Ok(InspectSinkRequest::Escrow {
545                payload:
546                    finspect::InspectSinkEscrowRequest {
547                        vmo: Some(vmo),
548                        name: Some(name),
549                        token: Some(EscrowToken { token }),
550                        tree: Some(tree),
551                        ..
552                    },
553                ..
554            })) => {
555                assert_eq!(name, "test");
556                assert_eq!(tree, tree_koid.raw_koid());
557
558                // An update to the inspector isn't reflected here, since it was  CoW.
559                inspector.root().record_string("hey", "not there");
560
561                let snapshot = Snapshot::try_from(&vmo).expect("valid vmo");
562                let hierarchy: DiagnosticsHierarchy =
563                    PartialNodeHierarchy::try_from(snapshot).expect("valid snapshot").into();
564                assert_json_diff!(hierarchy, root: {
565                    hello: "world"
566                });
567                assert_eq!(
568                    client_token.unwrap().token.basic_info().unwrap().koid,
569                    token.basic_info().unwrap().related_koid
570                );
571            }
572            other => {
573                panic!("unexpected request: {other:?}");
574            }
575        };
576    }
577
578    #[cfg(fuchsia_api_level_at_least = "HEAD")]
579    #[fuchsia::test]
580    async fn fetch_escrow_works() {
581        let (client, mut request_stream) =
582            fidl::endpoints::create_request_stream::<finspect::InspectSinkMarker>();
583        let (_local_token, remote_token) = zx::EventPair::create();
584        let token = EscrowToken { token: remote_token };
585        let expected_koid = token.token.basic_info().unwrap().koid;
586
587        let publisher_fut =
588            fetch_escrow(token, FetchEscrowOptions::new().on_inspect_sink_client(client));
589
590        let server_fut = async {
591            let (payload, responder) = assert_matches!(
592                request_stream.next().await,
593                Some(Ok(InspectSinkRequest::FetchEscrow { payload, responder })) => (payload, responder)
594            );
595            let received_token = payload.token.unwrap();
596            assert_eq!(received_token.token.basic_info().unwrap().koid, expected_koid);
597            responder
598                .send(finspect::InspectSinkFetchEscrowResponse {
599                    vmo: Some(zx::Vmo::create(0).unwrap()),
600                    ..Default::default()
601                })
602                .unwrap();
603        };
604
605        let (result, _) = futures::join!(publisher_fut, server_fut);
606        assert!(result.is_ok());
607    }
608}