1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
// Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

use anyhow::{anyhow, Context as _};
use async_utils::stream::FlattenUnorderedExt as _;
use component_events::events::{self};
use fidl::endpoints::Proxy as _;
use fidl::endpoints::{ControlHandle as _, RequestStream as _};
use fidl_fuchsia_component as fcomponent;
use fidl_fuchsia_component_runner as frunner;
use fidl_fuchsia_data as fdata;
use fidl_fuchsia_io as fio;
use fidl_fuchsia_netemul as fnetemul;
use fidl_fuchsia_sys2 as fsys2;
use fidl_fuchsia_test as ftest;
use fuchsia_component::client::{
    connect_to_named_protocol_at_dir_root, connect_to_protocol_at_dir_root,
};
use fuchsia_component::server::{ServiceFs, ServiceFsDir};
use fuchsia_zircon as zx;
use futures::{FutureExt as _, StreamExt as _, TryStreamExt as _};
use log::{error, info, warn};

mod config;

#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
    diagnostics_log::init!();
    info!("started");

    let mut fs = ServiceFs::new_local();
    let _: &mut ServiceFsDir<'_, _> =
        fs.dir("svc").add_fidl_service(|s: frunner::ComponentRunnerRequestStream| s);
    let _: &mut ServiceFs<_> = fs.take_and_serve_directory_handle()?;
    fs.fuse()
        .flatten_unordered()
        .map(|r| r.context("error reading request stream"))
        .and_then(handle_runner_request)
        .for_each_concurrent(None, |r| async {
            r.unwrap_or_else(|e| error!("error handling component runner requests: {:?}", e));
        })
        .await;

    Ok(())
}

// Performs any necessary test setup, such as reading the specified virtual
// network configuration and configuring it, and, if successful, returns a
// handle to its network environment, along with the '/svc' directory from the
// test root's namespace.
async fn test_setup(
    program: fdata::Dictionary,
    namespace: Vec<frunner::ComponentNamespaceEntry>,
) -> Result<(config::NetworkEnvironment, fio::DirectoryProxy), anyhow::Error> {
    // Retrieve the '/svc' directory from the test root's namespace, so that we
    // can:
    // - access the `fuchsia.test/Suite` protocol from the test driver
    // - access any netstacks that need to be configured
    // - use the `fuchsia.sys2/LifecycleController` for the test root to start
    //   non-test components once test setup is complete
    let svc_dir = namespace
        .into_iter()
        .find_map(|frunner::ComponentNamespaceEntry { path, directory, .. }| {
            (path.map(|path| path == "/svc").unwrap_or(false)).then(|| directory)
        })
        .context("/svc directory not in namespace")?
        .context("directory field not set for /svc namespace entry")?
        .into_proxy()
        .context("client end into proxy")?;

    let lifecycle_controller =
        connect_to_protocol_at_dir_root::<fsys2::LifecycleControllerMarker>(&svc_dir)
            .context("connect to LifecycleController protocol")?;

    let network_environment = config::Config::load_from_program(program)
        .context("retrieving and parsing network configuration")?
        .apply(
            |name| {
                connect_to_named_protocol_at_dir_root::<fnetemul::ConfigurableNetstackMarker>(
                    &svc_dir, &name,
                )
                .context("connect to protocol")
            },
            lifecycle_controller,
        )
        .await
        .context("configuring networking environment")?;

    Ok((network_environment, svc_dir))
}

async fn handle_runner_request(
    request: frunner::ComponentRunnerRequest,
) -> Result<(), anyhow::Error> {
    match request {
        frunner::ComponentRunnerRequest::Start { start_info, controller, control_handle: _ } => {
            let frunner::ComponentStartInfo { resolved_url, program, ns, outgoing_dir, .. } =
                start_info;

            let resolved_url = resolved_url.context("component URL missing from start info")?;
            let program = program.context("program missing from start info")?;
            let namespace = ns.context("namespace missing from start info")?;
            let outgoing_dir =
                outgoing_dir.context("outgoing directory missing from start info")?;

            let mut fs = ServiceFs::new_local();
            let (
                // Keep around the handles to the virtual networks and endpoints we created, so
                // that they're not cleaned up before test execution is complete.
                _network_environment,
                test_stopped_fut,
                component_epitaph,
            ) = match test_setup(program, namespace).await {
                Ok((env, svc_dir)) => {
                    // Retrieve the component event stream from the test root so we can observe its
                    // `destroyed` lifecycle event. The test root will only be destroyed once all
                    // its child components have stopped.
                    let connection =
                        connect_to_protocol_at_dir_root::<fcomponent::EventStreamMarker>(&svc_dir)
                            .context("connect to protocol")?;
                    connection
                        .wait_for_ready()
                        .await
                        .context("wait for event subscription to complete")?;
                    let mut event_stream = events::EventStream::new_v2(connection);
                    let test_stopped_fut = async move {
                        component_events::matcher::EventMatcher::ok()
                            .moniker(".")
                            .wait::<events::Destroyed>(&mut event_stream)
                            .await
                    };

                    // Proxy `fuchsia.test/Suite` requests at the test root's outgoing directory,
                    // where the test manager will expect to be able to access it, to the '/svc'
                    // directory in the test root's namespace, where the protocol was routed from
                    // the test driver.
                    //
                    // TODO(https://fxbug.dev/108786): Use Proxy::into_client_end when available.
                    let svc_dir = std::sync::Arc::new(fidl::endpoints::ClientEnd::new(
                        svc_dir.into_channel().expect("proxy into channel").into_zx_channel(),
                    ));
                    let _: &mut ServiceFsDir<'_, _> =
                        fs.dir("svc").add_proxy_service_to::<ftest::SuiteMarker, ()>(svc_dir);

                    (Some(env), Some(test_stopped_fut), zx::Status::OK)
                }
                Err(e) => {
                    error!("failed to set up test {}: {:?}", resolved_url, e);
                    // The runner could just bail when test setup fails, and in doing so, close both
                    // the `fuchsia.test/Suite` channel to test_manager and the component controller
                    // channel to component_manager. However, this would lead to a race between
                    // component shutdown and test failure.
                    //
                    // To synchronize these processes, continue to serve `fuchsia.test/Suite`
                    // (closing incoming request channels) until the runner receives a Stop request
                    // from component_manager, then shut down the component.
                    //
                    // TODO(https://fxbug.dev/94888): communicate the invalid component
                    // configuration to the test manager (via an epitaph, for example), rather than
                    // just closing the `fuchsia.test/Suite` protocol.
                    let _: &mut ServiceFsDir<'_, _> =
                        fs.dir("svc").add_fidl_service(|stream: ftest::SuiteRequestStream| {
                            stream.control_handle().shutdown()
                        });
                    (
                        None,
                        None,
                        zx::Status::from_raw(fcomponent::Error::InstanceCannotStart as i32),
                    )
                }
            };
            let serve_test_suite = fs
                .serve_connection(outgoing_dir)
                .context("serve connection on test component's outgoing dir")?
                .collect::<()>();

            let mut request_stream =
                controller.into_stream().context("server end into request stream")?;

            let request = futures::select! {
                () = serve_test_suite.fuse() => panic!("service fs closed unexpectedly"),
                request = request_stream.try_next() => request,
            };

            // If the component manager sent a stop or kill request (or dropped the client
            // end of the component controller channel), clean up any resources and, if the
            // client end wasn't dropped, signal that component execution has finished by
            // closing the channel with an epitaph.
            if let Some(request) = request.context("receive request")? {
                let control_handle = match request {
                    frunner::ComponentControllerRequest::Stop { control_handle } => {
                        info!("received stop request for component {}", resolved_url);
                        control_handle
                    }
                    frunner::ComponentControllerRequest::Kill { control_handle } => {
                        info!("received kill request for component {}", resolved_url);
                        control_handle
                    }
                };
                control_handle.shutdown_with_epitaph(component_epitaph);
                // TODO(https://fxbug.dev/81036): remove this once
                // `ControlHandle::shutdown_with_epitaph` actually closes the underlying
                // channel.
                drop(request_stream);
            } else {
                warn!("component manager dropped client end of component controller channel");
            }

            if let Some(fut) = test_stopped_fut {
                // Wait until we observe the test root's `destroyed` event to drop the handle to
                // the network environment, so that we are ensured the entire test realm has
                // completed orderly shutdown by the time we are removing interfaces. This
                // prevents spurious test failures from the virtual network being torn down
                // while some components in the test realm may still be running.
                match fut.await {
                    Ok(destroyed_event) => {
                        let events::DestroyedPayload {} = destroyed_event
                            .result()
                            .map_err(|e| anyhow!("error on component destroyed event: {:?}", e))?;
                    }
                    Err(e) => {
                        // Errors are sometimes expected because when the event stream subscriber
                        // (in this case, the test root) is destroyed, component manager will close
                        // the event stream, in which case we expect PEER_CLOSED.
                        //
                        // TODO(fxbug.dev/114562): PEER_CLOSED is not enough to between normal
                        // channel closure that happens on component destruction and an internal
                        // error in component manager that caused the channel to close
                        // unexpectedly. Ideally, it should be possible to precisely check whether
                        // the component was destroyed without error.
                        warn!("Missed destroy event: {}", e);
                    }
                }
            }
        }
    }
    Ok(())
}