inspect_runtime/
lib.rs

1// Copyright 2021 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
3
4//! # Inspect Runtime
5//!
6//! This library contains the necessary functions to serve inspect from a component.
7
8use fidl::AsHandleRef;
9use fidl::endpoints::ClientEnd;
10use fuchsia_component_client::connect_to_protocol;
11use fuchsia_inspect::Inspector;
12use log::error;
13use pin_project::pin_project;
14use std::future::Future;
15use std::pin::{Pin, pin};
16use std::task::{Context, Poll};
17use {fidl_fuchsia_inspect as finspect, fuchsia_async as fasync};
18
19#[cfg(fuchsia_api_level_at_least = "HEAD")]
20pub use finspect::EscrowToken;
21
22pub mod service;
23
24/// A setting for the fuchsia.inspect.Tree server that indicates how the server should send
25/// the Inspector's VMO. For fallible methods of sending, a fallback is also set.
26#[derive(Clone)]
27pub enum TreeServerSendPreference {
28    /// Frozen denotes sending a copy-on-write VMO.
29    /// `on_failure` refers to failure behavior, as not all VMOs
30    /// can be frozen. In particular, freezing a VMO requires writing to it,
31    /// so if an Inspector is created with a read-only VMO, freezing will fail.
32    ///
33    /// Failure behavior should be one of Live or DeepCopy.
34    ///
35    /// Frozen { on_failure: Live } is the default value of TreeServerSendPreference.
36    Frozen { on_failure: Box<TreeServerSendPreference> },
37
38    /// Live denotes sending a live handle to the VMO.
39    ///
40    /// A client might want this behavior if they have time sensitive writes
41    /// to the VMO, because copy-on-write behavior causes the initial write
42    /// to a page to be around 1% slower.
43    Live,
44
45    /// DeepCopy will send a private copy of the VMO. This should probably
46    /// not be a client's first choice, as Frozen(DeepCopy) will provide the
47    /// same semantic behavior while possibly avoiding an expensive copy.
48    ///
49    /// A client might want this behavior if they have time sensitive writes
50    /// to the VMO, because copy-on-write behavior causes the initial write
51    /// to a page to be around 1% slower.
52    DeepCopy,
53}
54
55impl TreeServerSendPreference {
56    /// Create a new [`TreeServerSendPreference`] that sends a frozen/copy-on-write VMO of the tree,
57    /// falling back to the specified `failure_mode` if a frozen VMO cannot be provided.
58    ///
59    /// # Arguments
60    ///
61    /// * `failure_mode` - Fallback behavior to use if freezing the Inspect VMO fails.
62    ///
63    pub fn frozen_or(failure_mode: TreeServerSendPreference) -> Self {
64        TreeServerSendPreference::Frozen { on_failure: Box::new(failure_mode) }
65    }
66}
67
68impl Default for TreeServerSendPreference {
69    fn default() -> Self {
70        TreeServerSendPreference::frozen_or(TreeServerSendPreference::Live)
71    }
72}
73
74/// Optional settings for serving `fuchsia.inspect.Tree`
75#[derive(Default)]
76pub struct PublishOptions {
77    /// This specifies how the VMO should be sent over the `fuchsia.inspect.Tree` server.
78    ///
79    /// Default behavior is
80    /// `TreeServerSendPreference::Frozen { on_failure: TreeServerSendPreference::Live }`.
81    pub(crate) vmo_preference: TreeServerSendPreference,
82
83    /// An name value which will show up in the metadata of snapshots
84    /// taken from this `fuchsia.inspect.Tree` server. Defaults to
85    /// fuchsia.inspect#DEFAULT_TREE_NAME.
86    pub(crate) tree_name: Option<String>,
87
88    /// Channel over which the InspectSink protocol will be used.
89    pub(crate) inspect_sink_client: Option<ClientEnd<finspect::InspectSinkMarker>>,
90
91    /// Scope on which the server will be spawned.
92    pub(crate) custom_scope: Option<fasync::ScopeHandle>,
93}
94
95impl PublishOptions {
96    /// This specifies how the VMO should be sent over the `fuchsia.inspect.Tree` server.
97    ///
98    /// Default behavior is
99    /// `TreeServerSendPreference::Frozen { on_failure: TreeServerSendPreference::Live }`.
100    pub fn send_vmo_preference(mut self, preference: TreeServerSendPreference) -> Self {
101        self.vmo_preference = preference;
102        self
103    }
104
105    /// This sets an optional name value which will show up in the metadata of snapshots
106    /// taken from this `fuchsia.inspect.Tree` server.
107    ///
108    /// Default behavior is an empty string.
109    pub fn inspect_tree_name(mut self, name: impl Into<String>) -> Self {
110        self.tree_name = Some(name.into());
111        self
112    }
113
114    /// Sets a custom fuchsia_async::Scope to use for serving Inspect.
115    pub fn custom_scope(mut self, scope: fasync::ScopeHandle) -> Self {
116        self.custom_scope = Some(scope);
117        self
118    }
119
120    /// This allows the client to provide the InspectSink client channel.
121    pub fn on_inspect_sink_client(
122        mut self,
123        client: ClientEnd<finspect::InspectSinkMarker>,
124    ) -> Self {
125        self.inspect_sink_client = Some(client);
126        self
127    }
128}
129
130/// Spawns a server handling `fuchsia.inspect.Tree` requests and a handle
131/// to the `fuchsia.inspect.Tree` is published using `fuchsia.inspect.InspectSink`.
132///
133/// Whenever the client wishes to stop publishing Inspect, the Controller may be dropped.
134///
135/// `None` will be returned on FIDL failures. This includes:
136/// * Failing to convert a FIDL endpoint for `fuchsia.inspect.Tree`'s `TreeMarker` into a stream
137/// * Failing to connect to the `InspectSink` protocol
138/// * Failing to send the connection over the wire
139#[must_use]
140pub fn publish(
141    inspector: &Inspector,
142    options: PublishOptions,
143) -> Option<PublishedInspectController> {
144    let PublishOptions { vmo_preference, tree_name, inspect_sink_client, custom_scope } = options;
145    let scope = custom_scope
146        .map(|handle| handle.new_child_with_name("inspect_runtime::publish"))
147        .unwrap_or_else(|| fasync::Scope::new_with_name("inspect_runtime::publish"));
148    let tree = service::spawn_tree_server(inspector.clone(), vmo_preference, &scope);
149
150    let inspect_sink = inspect_sink_client.map(|client| client.into_proxy()).or_else(|| {
151        connect_to_protocol::<finspect::InspectSinkMarker>()
152            .map_err(|err| error!(err:%; "failed to spawn the fuchsia.inspect.Tree server"))
153            .ok()
154    })?;
155
156    // unwrap: safe since we have a valid tree handle coming from the server we spawn.
157    let tree_koid = tree.basic_info().unwrap().koid;
158    if let Err(err) = inspect_sink.publish(finspect::InspectSinkPublishRequest {
159        tree: Some(tree),
160        name: tree_name,
161        ..finspect::InspectSinkPublishRequest::default()
162    }) {
163        error!(err:%; "failed to spawn the fuchsia.inspect.Tree server");
164        return None;
165    }
166
167    Some(PublishedInspectController::new(inspector.clone(), scope, tree_koid))
168}
169
170#[pin_project]
171pub struct PublishedInspectController {
172    #[pin]
173    scope: fasync::scope::Join,
174    inspector: Inspector,
175    tree_koid: zx::Koid,
176}
177
178#[cfg(fuchsia_api_level_at_least = "HEAD")]
179#[derive(Default)]
180pub struct EscrowOptions {
181    name: Option<String>,
182    inspect_sink: Option<finspect::InspectSinkProxy>,
183}
184
185#[cfg(fuchsia_api_level_at_least = "HEAD")]
186impl EscrowOptions {
187    /// Sets the name with which the Inspect handle will be escrowed.
188    pub fn name(mut self, name: impl Into<String>) -> Self {
189        self.name = Some(name.into());
190        self
191    }
192
193    /// Sets the inspect sink channel to use for escrowing.
194    pub fn inspect_sink(mut self, proxy: finspect::InspectSinkProxy) -> Self {
195        self.inspect_sink = Some(proxy);
196        self
197    }
198}
199
200impl PublishedInspectController {
201    fn new(inspector: Inspector, scope: fasync::Scope, tree_koid: zx::Koid) -> Self {
202        Self { inspector, scope: scope.join(), tree_koid }
203    }
204
205    /// Escrows a frozen copy of the VMO of the associated Inspector replacing the current live
206    /// handle in the server.
207    /// This will not capture lazy nodes or properties.
208    #[cfg(fuchsia_api_level_at_least = "HEAD")]
209    pub async fn escrow_frozen(self, opts: EscrowOptions) -> Option<EscrowToken> {
210        let inspect_sink = match opts.inspect_sink {
211            Some(proxy) => proxy,
212            None => match connect_to_protocol::<finspect::InspectSinkMarker>() {
213                Ok(inspect_sink) => inspect_sink,
214                Err(err) => {
215                    error!(err:%; "failed to spawn the fuchsia.inspect.Tree server");
216                    return None;
217                }
218            },
219        };
220        let (ep0, ep1) = zx::EventPair::create();
221        let Some(vmo) = self.inspector.frozen_vmo_copy() else {
222            error!("failed to get a frozen vmo, aborting escrow");
223            return None;
224        };
225        if let Err(err) = inspect_sink.escrow(finspect::InspectSinkEscrowRequest {
226            vmo: Some(vmo),
227            name: opts.name,
228            token: Some(EscrowToken { token: ep0 }),
229            tree: Some(self.tree_koid.raw_koid()),
230            ..Default::default()
231        }) {
232            error!(err:%; "failed to escrow inspect data");
233            return None;
234        }
235        self.scope.await;
236        Some(EscrowToken { token: ep1 })
237    }
238
239    /// Cancels the running published controller.
240    ///
241    /// The future resolves when no more serving tasks are running.
242    pub async fn cancel(self) {
243        let Self { scope, inspector: _, tree_koid: _ } = self;
244        let scope = pin!(scope);
245        scope.cancel().await;
246    }
247}
248
249impl Future for PublishedInspectController {
250    type Output = ();
251
252    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
253        let this = self.project();
254        this.scope.poll(cx)
255    }
256}
257
258#[cfg(test)]
259mod tests {
260    use super::*;
261    use assert_matches::assert_matches;
262    use component_events::events::{EventStream, Started};
263    use component_events::matcher::EventMatcher;
264    use diagnostics_assertions::assert_json_diff;
265    use diagnostics_hierarchy::DiagnosticsHierarchy;
266    use diagnostics_reader::ArchiveReader;
267    use fidl::endpoints::RequestStream;
268    use fidl_fuchsia_inspect::{InspectSinkRequest, InspectSinkRequestStream};
269    use fuchsia_component_test::ScopedInstance;
270    use fuchsia_inspect::InspectorConfig;
271    use fuchsia_inspect::reader::snapshot::Snapshot;
272    use fuchsia_inspect::reader::{PartialNodeHierarchy, read};
273
274    use futures::{FutureExt, StreamExt};
275
276    const TEST_PUBLISH_COMPONENT_URL: &str = "#meta/inspect_test_component.cm";
277
278    #[fuchsia::test]
279    async fn new_no_op() {
280        let inspector = Inspector::new(InspectorConfig::default().no_op());
281        assert!(!inspector.is_valid());
282
283        // Ensure publish doesn't crash on a No-Op inspector.
284        // The idea is that in this context, publish will hang if the server is running
285        // correctly. That is, if there is an error condition, it will be immediate.
286        assert_matches!(
287            publish(&inspector, PublishOptions::default()).unwrap().now_or_never(),
288            None
289        );
290    }
291
292    #[fuchsia::test]
293    async fn connect_to_service() -> Result<(), anyhow::Error> {
294        let mut event_stream = EventStream::open().await.unwrap();
295
296        let app = ScopedInstance::new_with_name(
297            "interesting_name".into(),
298            "coll".to_string(),
299            TEST_PUBLISH_COMPONENT_URL.to_string(),
300        )
301        .await
302        .expect("failed to create test component");
303
304        let started_stream = EventMatcher::ok()
305            .moniker_regex(app.child_name().to_owned())
306            .wait::<Started>(&mut event_stream);
307
308        app.connect_to_binder().expect("failed to connect to Binder protocol");
309
310        started_stream.await.expect("failed to observe Started event");
311
312        let hierarchy = ArchiveReader::inspect()
313            .add_selector("coll\\:interesting_name:[name=tree-0]root")
314            .snapshot()
315            .await?
316            .into_iter()
317            .next()
318            .and_then(|result| result.payload)
319            .expect("one Inspect hierarchy");
320
321        assert_json_diff!(hierarchy, root: {
322            "tree-0": 0u64,
323            int: 3i64,
324            "lazy-node": {
325                a: "test",
326                child: {
327                    double: 3.25,
328                },
329            }
330        });
331
332        Ok(())
333    }
334
335    #[fuchsia::test]
336    async fn publish_new_no_op() {
337        let inspector = Inspector::new(InspectorConfig::default().no_op());
338        assert!(!inspector.is_valid());
339
340        // Ensure publish doesn't crash on a No-Op inspector
341        let _task = publish(&inspector, PublishOptions::default());
342    }
343
344    #[fuchsia::test]
345    async fn publish_on_provided_channel() {
346        let (client, server) = zx::Channel::create();
347        let inspector = Inspector::default();
348        inspector.root().record_string("hello", "world");
349        let _inspect_sink_server_task = publish(
350            &inspector,
351            PublishOptions::default()
352                .on_inspect_sink_client(ClientEnd::<finspect::InspectSinkMarker>::new(client)),
353        );
354        let mut request_stream =
355            InspectSinkRequestStream::from_channel(fidl::AsyncChannel::from_channel(server));
356
357        let tree = request_stream.next().await.unwrap();
358
359        assert_matches!(tree, Ok(InspectSinkRequest::Publish {
360            payload: finspect::InspectSinkPublishRequest { tree: Some(tree), .. }, ..}) => {
361                let hierarchy = read(&tree.into_proxy()).await.unwrap();
362                assert_json_diff!(hierarchy, root: {
363                    hello: "world"
364                });
365            }
366        );
367
368        assert!(request_stream.next().await.is_none());
369    }
370
371    #[fuchsia::test]
372    async fn cancel_published_controller() {
373        let (client, server) = zx::Channel::create();
374        let inspector = Inspector::default();
375        inspector.root().record_string("hello", "world");
376        let controller = publish(
377            &inspector,
378            PublishOptions::default()
379                .on_inspect_sink_client(ClientEnd::<finspect::InspectSinkMarker>::new(client)),
380        )
381        .expect("create controller");
382        let mut request_stream =
383            InspectSinkRequestStream::from_channel(fidl::AsyncChannel::from_channel(server));
384
385        let tree = request_stream.next().await.unwrap();
386
387        let tree = assert_matches!(tree, Ok(InspectSinkRequest::Publish {
388            payload: finspect::InspectSinkPublishRequest { tree: Some(tree), .. }, ..}) => tree
389        );
390
391        assert!(request_stream.next().await.is_none());
392
393        controller.cancel().await;
394        fidl::AsyncChannel::from_channel(tree.into_channel())
395            .on_closed()
396            .await
397            .expect("wait closed");
398    }
399
400    #[fuchsia::test]
401    async fn controller_supports_escrowing_a_copy() {
402        let inspector = Inspector::default();
403        inspector.root().record_string("hello", "world");
404
405        let (client, mut request_stream) = fidl::endpoints::create_request_stream();
406        let controller =
407            publish(&inspector, PublishOptions::default().on_inspect_sink_client(client))
408                .expect("got controller");
409
410        let request = request_stream.next().await.unwrap();
411        let tree_koid = match request {
412            Ok(InspectSinkRequest::Publish {
413                payload: finspect::InspectSinkPublishRequest { tree: Some(tree), .. },
414                ..
415            }) => tree.basic_info().unwrap().koid,
416            other => {
417                panic!("unexpected request: {other:?}");
418            }
419        };
420        let (proxy, mut request_stream) =
421            fidl::endpoints::create_proxy_and_stream::<finspect::InspectSinkMarker>();
422        let (client_token, request) = futures::future::join(
423            controller.escrow_frozen(EscrowOptions {
424                name: Some("test".into()),
425                inspect_sink: Some(proxy),
426            }),
427            request_stream.next(),
428        )
429        .await;
430        match request {
431            Some(Ok(InspectSinkRequest::Escrow {
432                payload:
433                    finspect::InspectSinkEscrowRequest {
434                        vmo: Some(vmo),
435                        name: Some(name),
436                        token: Some(EscrowToken { token }),
437                        tree: Some(tree),
438                        ..
439                    },
440                ..
441            })) => {
442                assert_eq!(name, "test");
443                assert_eq!(tree, tree_koid.raw_koid());
444
445                // An update to the inspector isn't reflected here, since it was  CoW.
446                inspector.root().record_string("hey", "not there");
447
448                let snapshot = Snapshot::try_from(&vmo).expect("valid vmo");
449                let hierarchy: DiagnosticsHierarchy =
450                    PartialNodeHierarchy::try_from(snapshot).expect("valid snapshot").into();
451                assert_json_diff!(hierarchy, root: {
452                    hello: "world"
453                });
454                assert_eq!(
455                    client_token.unwrap().token.basic_info().unwrap().koid,
456                    token.basic_info().unwrap().related_koid
457                );
458            }
459            other => {
460                panic!("unexpected request: {other:?}");
461            }
462        };
463    }
464}