elf_runner/
lib.rs

1// Copyright 2019 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5mod component;
6mod component_set;
7pub mod config;
8mod crash_handler;
9pub mod crash_info;
10pub mod error;
11mod logger;
12mod memory;
13pub mod process_launcher;
14mod runtime_dir;
15pub mod stdout;
16pub mod vdso_vmo;
17
18use self::component::{ElfComponent, ElfComponentInfo};
19use self::config::ElfProgramConfig;
20use self::error::{JobError, StartComponentError, StartInfoError};
21use self::runtime_dir::RuntimeDirBuilder;
22use self::stdout::bind_streams_to_syslog;
23use crate::component_set::ComponentSet;
24use crate::config::ElfProgramBadHandlesPolicy;
25use crate::crash_info::CrashRecords;
26use crate::memory::reporter::MemoryReporter;
27use crate::vdso_vmo::get_next_vdso_vmo;
28use ::routing::policy::ScopedPolicyChecker;
29use chrono::DateTime;
30use fidl::endpoints::{ClientEnd, ServerEnd};
31use fidl_fuchsia_component_runner::{
32    ComponentDiagnostics, ComponentTasks, Task as DiagnosticsTask,
33};
34use fidl_fuchsia_process_lifecycle::{LifecycleMarker, LifecycleProxy};
35use fuchsia_async::{self as fasync, TimeoutExt};
36use fuchsia_runtime::{HandleInfo, HandleType, UtcClock, duplicate_utc_clock_handle, job_default};
37use futures::TryStreamExt;
38use futures::channel::oneshot;
39use log::{trace, warn};
40use moniker::Moniker;
41use namespace::Namespace;
42use runner::StartInfo;
43use runner::component::StopInfo;
44use std::collections::{HashMap, HashSet};
45use std::mem;
46use std::path::Path;
47use std::sync::Arc;
48use vfs::execution_scope::ExecutionScope;
49use zx::HandleBased;
50use {
51    fidl_fuchsia_component as fcomp, fidl_fuchsia_component_runner as fcrunner,
52    fidl_fuchsia_io as fio, fidl_fuchsia_memory_attribution as fattribution,
53    fidl_fuchsia_process as fproc,
54};
55
56// Maximum time that the runner will wait for break_on_start eventpair to signal.
57// This is set to prevent debuggers from blocking us for too long, either intentionally
58// or unintentionally.
59const MAX_WAIT_BREAK_ON_START: zx::MonotonicDuration = zx::MonotonicDuration::from_millis(300);
60
61// Minimum timer slack amount and default mode. The amount should be large enough to allow for some
62// coalescing of timers, but small enough to ensure applications don't miss deadlines.
63//
64// TODO(https://fxbug.dev/42120293): For now, set the value to 50us to avoid delaying performance-critical
65// timers in Scenic and other system services.
66const TIMER_SLACK_DURATION: zx::MonotonicDuration = zx::MonotonicDuration::from_micros(50);
67
68// Rights used when duplicating the UTC clock handle.
69//
70// Formed out of:
71// * `zx::Rights::BASIC`, but
72// * with `zx::Rights::WRITE` stripped (UTC clock is normally read-only), and
73// * with `zx::Rights::INSPECT` added (so that `ZX_INFO_CLOCK_MAPPED_SIZE` can be queried).
74//
75// Rather than subtracting `WRITE` from `BASIC`, we build the rights explicitly to avoid
76// including unintended rights by accident.
77//
78// The bitwise `|` operator for `bitflags` is implemented through the `std::ops::BitOr` trait,
79// which cannot be used in a const context. The workaround is to bitwise OR the raw bits.
80const DUPLICATE_CLOCK_RIGHTS: zx::Rights = zx::Rights::from_bits_truncate(
81    zx::Rights::READ.bits() // BASIC
82        | zx::Rights::WAIT.bits() // BASIC
83        | zx::Rights::DUPLICATE.bits() // BASIC
84        | zx::Rights::TRANSFER.bits() // BASIC
85        | zx::Rights::INSPECT.bits()
86        // Allows calls to zx_clock_read_mappable and zx_clock_get_details_mappable.
87        // Since "regular" read and details only require READ, and mappable read
88        // and details read the clock the same way, it seems safe to include MAP
89        // in this set of rights.
90        | zx::Rights::MAP.bits(),
91);
92
93// Mapping of component monikers to "acceptable" exit codes
94//
95// There are some ELF programs that upon exit, produce certain exit codes that
96// are "normal" part of operation. The most interesting of these from Fuchsia's
97// perspective is the sshd binary, which returns a 255 exit code when the client
98// hangs up unexpectedly (e.g. sending SIGINT to a running ssh process).
99//
100// Due to how `ffx` interacts with the Target over ssh in a user-interactive mode,
101// it is commonplace for the user to SIGINT their locally running ffx processes
102// which SIGINT's the SSH process running on the Host, which causes misleading
103// logs on the Target, implying that sshd has had an error (when in-fact there is none).
104//
105// If this list grows significantly (not expected). We may consider adding
106// this as a formal configuration option somewhere. That said this is (currently)
107// only for suppressing diagnostic logs, so this is unlikely.
108static MONIKER_PREFIXES_TO_ACCEPTABLE_EXIT_CODES: std::sync::LazyLock<
109    HashMap<&'static str, Vec<i64>>,
110> = std::sync::LazyLock::new(|| {
111    let mut m = HashMap::new();
112    m.insert("core/sshd-host/shell:sshd-", vec![255]);
113    m
114});
115
116// Builds and serves the runtime directory
117/// Runs components with ELF binaries.
118pub struct ElfRunner {
119    /// Each ELF component run by this runner will live inside a job that is a
120    /// child of this job.
121    job: zx::Job,
122
123    launcher_connector: process_launcher::Connector,
124
125    /// If `utc_clock` is populated then that Clock's handle will
126    /// be passed into the newly created process. Otherwise, the UTC
127    /// clock will be duplicated from current process' process table.
128    /// The latter is typically the case in unit tests and nested
129    /// component managers.
130    utc_clock: Option<Arc<UtcClock>>,
131
132    crash_records: CrashRecords,
133
134    /// Tracks the ELF components that are currently running under this runner.
135    components: Arc<ComponentSet>,
136
137    /// Tracks reporting memory changes to an observer.
138    memory_reporter: MemoryReporter,
139
140    /// Tasks that support the runner are launched in this scope
141    scope: ExecutionScope,
142
143    /// Environment variables to be injected in the form KEY=VALUE.
144    /// Values are shadowed by identical keys found in the component manifest.
145    additional_environ: Vec<String>,
146}
147
148/// The job for a component.
149pub enum Job {
150    Single(zx::Job),
151    Multiple { parent: zx::Job, child: zx::Job },
152}
153
154impl Job {
155    fn top(&self) -> &zx::Job {
156        match self {
157            Job::Single(job) => job,
158            Job::Multiple { parent, child: _ } => parent,
159        }
160    }
161
162    fn proc(&self) -> &zx::Job {
163        match self {
164            Job::Single(job) => job,
165            Job::Multiple { parent: _, child } => child,
166        }
167    }
168}
169
170/// Resources and data to launch a component, generated by [`ElfComponentLaunchInfo::new`]. This is
171/// a public type so other runners may use it to assembly the launch info independently of
172/// [`ElfRunner`].
173#[derive(Debug)]
174pub struct ElfComponentLaunchInfo {
175    pub ns: Namespace,
176    pub handle_infos: Vec<fproc::HandleInfo>,
177    pub utc_clock: UtcClock,
178    pub lifecycle_client: Option<LifecycleProxy>,
179    pub outgoing_directory: Option<ClientEnd<fio::DirectoryMarker>>,
180    pub local_scope: ExecutionScope,
181}
182
183impl ElfComponentLaunchInfo {
184    pub fn new(
185        start_info: &mut StartInfo,
186        program_config: &ElfProgramConfig,
187        utc_clock: Option<&UtcClock>,
188    ) -> Result<Self, StartComponentError> {
189        // Convert the directories into proxies, so we can find "/pkg" and open "lib" and bin_path
190        let namespace = mem::replace(&mut start_info.namespace, Default::default());
191        let ns = namespace::Namespace::try_from(namespace)
192            .map_err(StartComponentError::NamespaceError)?;
193
194        let config_vmo =
195            start_info.encoded_config.take().map(runner::get_config_vmo).transpose()?;
196
197        let next_vdso = program_config.use_next_vdso.then(get_next_vdso_vmo).transpose()?;
198
199        let (lifecycle_client, lifecycle_server) = if program_config.notify_lifecycle_stop {
200            // Creating a channel is not expected to fail.
201            let (client, server) = fidl::endpoints::create_proxy::<LifecycleMarker>();
202            (Some(client), Some(server.into_channel()))
203        } else {
204            (None, None)
205        };
206
207        // Take the UTC clock handle out of `start_info.numbered_handles`, if available.
208        let utc_handle = start_info
209            .numbered_handles
210            .iter()
211            .position(|handles| handles.id == HandleInfo::new(HandleType::ClockUtc, 0).as_raw())
212            .map(|position| start_info.numbered_handles.swap_remove(position).handle);
213
214        let utc_clock = if let Some(handle) = utc_handle {
215            zx::Clock::from(handle)
216        } else {
217            Self::duplicate_utc_clock(utc_clock)
218                .map_err(StartComponentError::UtcClockDuplicateFailed)?
219        };
220
221        // Duplicate the clock handle again, used later to wait for the clock to start, while
222        // the original handle is passed to the process.
223        let utc_clock_dup = utc_clock
224            .duplicate_handle(zx::Rights::SAME_RIGHTS)
225            .map_err(StartComponentError::UtcClockDuplicateFailed)?;
226
227        // If the component supports memory attribution, clone its outgoing directory connection
228        // so that we may later connect to it.
229        let outgoing_directory = if program_config.memory_attribution {
230            let Some(outgoing_dir) = start_info.outgoing_dir.take() else {
231                return Err(StartComponentError::StartInfoError(
232                    StartInfoError::MissingOutgoingDir,
233                ));
234            };
235            let (outgoing_dir_client, outgoing_dir_server) = fidl::endpoints::create_endpoints();
236            start_info.outgoing_dir = Some(outgoing_dir_server);
237            fdio::open_at(
238                outgoing_dir_client.channel(),
239                ".",
240                fio::Flags::PROTOCOL_DIRECTORY
241                    | fio::PERM_READABLE
242                    | fio::PERM_WRITABLE
243                    | fio::PERM_EXECUTABLE,
244                outgoing_dir.into_channel(),
245            )
246            .unwrap();
247            Some(outgoing_dir_client)
248        } else {
249            None
250        };
251
252        // Create procarg handles.
253        let mut handle_infos = Self::create_handle_infos(
254            start_info.outgoing_dir.take().map(|dir| dir.into_channel()),
255            lifecycle_server,
256            utc_clock,
257            next_vdso,
258            config_vmo,
259        );
260
261        // Add stdout and stderr handles that forward to syslog.
262        let (local_scope, stdout_and_stderr_handles) =
263            bind_streams_to_syslog(&ns, program_config.stdout_sink, program_config.stderr_sink);
264        handle_infos.extend(stdout_and_stderr_handles);
265
266        // Add any external numbered handles.
267        let numbered_handles = mem::replace(&mut start_info.numbered_handles, Default::default());
268        handle_infos.extend(numbered_handles);
269
270        // If the program escrowed a dictionary, give it back via `numbered_handles`.
271        if let Some(escrowed_dictionary) = start_info.escrowed_dictionary.take() {
272            handle_infos.push(fproc::HandleInfo {
273                handle: escrowed_dictionary.token.into_handle().into(),
274                id: HandleInfo::new(HandleType::EscrowedDictionary, 0).as_raw(),
275            });
276        } else if let Some(escrowed_dictionary_handle) =
277            start_info.escrowed_dictionary_handle.take()
278        {
279            handle_infos.push(fproc::HandleInfo {
280                handle: escrowed_dictionary_handle.into(),
281                id: HandleInfo::new(HandleType::EscrowedDictionary, 0).as_raw(),
282            });
283        }
284
285        Ok(Self {
286            ns,
287            handle_infos,
288            utc_clock: utc_clock_dup,
289            local_scope,
290            lifecycle_client,
291            outgoing_directory,
292        })
293    }
294
295    fn create_handle_infos(
296        outgoing_dir: Option<zx::Channel>,
297        lifecycle_server: Option<zx::Channel>,
298        utc_clock: UtcClock,
299        next_vdso: Option<zx::Vmo>,
300        config_vmo: Option<zx::Vmo>,
301    ) -> Vec<fproc::HandleInfo> {
302        let mut handle_infos = vec![];
303
304        if let Some(outgoing_dir) = outgoing_dir {
305            handle_infos.push(fproc::HandleInfo {
306                handle: outgoing_dir.into_handle(),
307                id: HandleInfo::new(HandleType::DirectoryRequest, 0).as_raw(),
308            });
309        }
310
311        if let Some(lifecycle_chan) = lifecycle_server {
312            handle_infos.push(fproc::HandleInfo {
313                handle: lifecycle_chan.into_handle(),
314                id: HandleInfo::new(HandleType::Lifecycle, 0).as_raw(),
315            })
316        };
317
318        handle_infos.push(fproc::HandleInfo {
319            handle: utc_clock.into_handle(),
320            id: HandleInfo::new(HandleType::ClockUtc, 0).as_raw(),
321        });
322
323        if let Some(next_vdso) = next_vdso {
324            handle_infos.push(fproc::HandleInfo {
325                handle: next_vdso.into_handle(),
326                id: HandleInfo::new(HandleType::VdsoVmo, 0).as_raw(),
327            });
328        }
329
330        if let Some(config_vmo) = config_vmo {
331            handle_infos.push(fproc::HandleInfo {
332                handle: config_vmo.into_handle(),
333                id: HandleInfo::new(HandleType::ComponentConfigVmo, 0).as_raw(),
334            });
335        }
336
337        handle_infos
338    }
339
340    /// Returns a UTC clock handle.
341    ///
342    /// Duplicates `self.utc_clock` if populated, or the UTC clock assigned to the current process.
343    fn duplicate_utc_clock(utc_clock: Option<&UtcClock>) -> Result<UtcClock, zx::Status> {
344        if let Some(utc_clock) = utc_clock {
345            utc_clock.duplicate_handle(DUPLICATE_CLOCK_RIGHTS)
346        } else {
347            duplicate_utc_clock_handle(DUPLICATE_CLOCK_RIGHTS)
348        }
349    }
350}
351
352/// Merges environment slices, prioritizing `right` over `left`.
353///
354/// Keys are determined by the first `=` delimiter, falling back to the
355/// full string if missing. Non-shadowed `left` entries are returned first.
356fn merge_environ(left: &[String], right: &[String]) -> Vec<String> {
357    fn get_key(kv: &str) -> &str {
358        kv.split('=').next().unwrap_or(kv)
359    }
360    let right_keys: HashSet<&str> = right.iter().map(|kv| get_key(kv.as_str())).collect();
361    let environ: Vec<String> = left
362        .iter()
363        .filter(|&kv| !right_keys.contains(get_key(kv.as_str())))
364        .chain(right.iter())
365        .cloned()
366        .collect();
367    environ
368}
369
370impl ElfRunner {
371    pub fn new(
372        job: zx::Job,
373        launcher_connector: process_launcher::Connector,
374        utc_clock: Option<Arc<UtcClock>>,
375        crash_records: CrashRecords,
376        additional_environ: Vec<String>,
377    ) -> ElfRunner {
378        let scope = ExecutionScope::new();
379        let components = ComponentSet::new(scope.clone());
380        let memory_reporter = MemoryReporter::new(components.clone());
381        ElfRunner {
382            job,
383            launcher_connector,
384            utc_clock,
385            crash_records,
386            components,
387            memory_reporter,
388            scope,
389            additional_environ,
390        }
391    }
392
393    /// Creates a job for a component.
394    fn create_job(&self, program_config: &ElfProgramConfig) -> Result<Job, JobError> {
395        let job = self.job.create_child_job().map_err(JobError::CreateChild)?;
396
397        // Set timer slack.
398        //
399        // Why Late and not Center or Early? Timers firing a little later than requested is not
400        // uncommon in non-realtime systems. Programs are generally tolerant of some
401        // delays. However, timers firing before their deadline can be unexpected and lead to bugs.
402        job.set_policy(zx::JobPolicy::TimerSlack(
403            TIMER_SLACK_DURATION,
404            zx::JobDefaultTimerMode::Late,
405        ))
406        .map_err(JobError::SetPolicy)?;
407
408        // Prevent direct creation of processes.
409        //
410        // The kernel-level mechanisms for creating processes are very low-level. We require that
411        // all processes be created via fuchsia.process.Launcher in order for the platform to
412        // maintain change-control over how processes are created.
413        if !program_config.create_raw_processes {
414            job.set_policy(zx::JobPolicy::Basic(
415                zx::JobPolicyOption::Absolute,
416                vec![(zx::JobCondition::NewProcess, zx::JobAction::Deny)],
417            ))
418            .map_err(JobError::SetPolicy)?;
419        }
420
421        // Default deny the job policy which allows ambiently marking VMOs executable, i.e. calling
422        // vmo_replace_as_executable without an appropriate resource handle.
423        if !program_config.ambient_mark_vmo_exec {
424            job.set_policy(zx::JobPolicy::Basic(
425                zx::JobPolicyOption::Absolute,
426                vec![(zx::JobCondition::AmbientMarkVmoExec, zx::JobAction::Deny)],
427            ))
428            .map_err(JobError::SetPolicy)?;
429        }
430
431        if let Some(job_policy_bad_handles) = &program_config.job_policy_bad_handles {
432            let action = match job_policy_bad_handles {
433                ElfProgramBadHandlesPolicy::DenyException => zx::JobAction::DenyException,
434                ElfProgramBadHandlesPolicy::AllowException => zx::JobAction::AllowException,
435            };
436            job.set_policy(zx::JobPolicy::Basic(
437                zx::JobPolicyOption::Absolute,
438                vec![(zx::JobCondition::BadHandle, action)],
439            ))
440            .map_err(JobError::SetPolicy)?;
441        }
442
443        Ok(if program_config.job_with_available_exception_channel {
444            // Create a new job to hold the process because the component wants
445            // the process to be a direct child of a job that has its exception
446            // channel available for taking. Note that we (the ELF runner) uses
447            // a job's exception channel for crash recording so we create a new
448            // job underneath the original job to hold the process.
449            let child = job.create_child_job().map_err(JobError::CreateChild)?;
450            Job::Multiple { parent: job, child }
451        } else {
452            Job::Single(job)
453        })
454    }
455
456    pub async fn start_component(
457        &self,
458        start_info: fcrunner::ComponentStartInfo,
459        checker: &ScopedPolicyChecker,
460    ) -> Result<ElfComponent, StartComponentError> {
461        let start_info: StartInfo =
462            start_info.try_into().map_err(StartInfoError::StartInfoError)?;
463
464        let resolved_url = start_info.resolved_url.clone();
465
466        // This also checks relevant security policy for config that it wraps using the provided
467        // PolicyChecker.
468        let program_config = ElfProgramConfig::parse_and_check(&start_info.program, Some(checker))
469            .map_err(|err| {
470                StartComponentError::StartInfoError(StartInfoError::ProgramError(err))
471            })?;
472
473        let main_process_critical = program_config.main_process_critical;
474        let res: Result<ElfComponent, StartComponentError> = self
475            .start_component_helper(start_info, Some(checker.scope.clone()), program_config)
476            .await;
477        match res {
478            Err(e) if main_process_critical => {
479                panic!(
480                    "failed to launch component with a critical process ({:?}): {:?}",
481                    &resolved_url, e
482                )
483            }
484            x => x,
485        }
486    }
487
488    async fn start_component_helper(
489        &self,
490        mut start_info: StartInfo,
491        moniker: Option<Moniker>,
492        program_config: ElfProgramConfig,
493    ) -> Result<ElfComponent, StartComponentError> {
494        let moniker = moniker.unwrap_or_else(|| Moniker::root());
495
496        // Fail early if there are clock issues.
497        let boot_clock = zx::Clock::<zx::MonotonicTimeline, zx::BootTimeline>::create(
498            zx::ClockOpts::CONTINUOUS,
499            /*backstop=*/ None,
500        )
501        .map_err(StartComponentError::BootClockCreateFailed)?;
502
503        let ElfComponentLaunchInfo {
504            ns,
505            handle_infos,
506            utc_clock,
507            lifecycle_client,
508            outgoing_directory,
509            local_scope,
510        } = ElfComponentLaunchInfo::new(
511            &mut start_info,
512            &program_config,
513            self.utc_clock.as_ref().map(|c| &**c),
514        )?;
515
516        let resolved_url = &start_info.resolved_url;
517
518        // Create a job for this component that will contain its process.
519        let job = self.create_job(&program_config)?;
520
521        crash_handler::run_exceptions_server(
522            &self.scope,
523            job.top(),
524            moniker.clone(),
525            resolved_url.clone(),
526            self.crash_records.clone(),
527        )
528        .map_err(StartComponentError::ExceptionRegistrationFailed)?;
529
530        // Create and serve the runtime dir.
531        let runtime_dir_server_end = start_info
532            .runtime_dir
533            .ok_or(StartComponentError::StartInfoError(StartInfoError::MissingRuntimeDir))?;
534        let job_koid = job.proc().koid().map_err(StartComponentError::JobGetKoidFailed)?.raw_koid();
535
536        let runtime_dir = RuntimeDirBuilder::new(runtime_dir_server_end)
537            .args(program_config.args.clone())
538            .job_id(job_koid)
539            .serve();
540
541        // Configure the process launcher.
542        let proc_job_dup = job
543            .proc()
544            .duplicate_handle(zx::Rights::SAME_RIGHTS)
545            .map_err(StartComponentError::JobDuplicateFailed)?;
546
547        let name = Path::new(resolved_url)
548            .file_name()
549            .and_then(|filename| filename.to_str())
550            .ok_or_else(|| {
551                StartComponentError::StartInfoError(StartInfoError::BadResolvedUrl(
552                    resolved_url.clone(),
553                ))
554            })?;
555
556        // Wait on break_on_start with a timeout and don't fail.
557        if let Some(break_on_start) = start_info.break_on_start {
558            fasync::OnSignals::new(&break_on_start, zx::Signals::OBJECT_PEER_CLOSED)
559                .on_timeout(MAX_WAIT_BREAK_ON_START, || Err(zx::Status::TIMED_OUT))
560                .await
561                .err()
562                .map(|error| warn!(moniker:%, error:%; "Failed to wait break_on_start"));
563        }
564
565        let environs = merge_environ(
566            &self.additional_environ,
567            program_config.environ.as_deref().unwrap_or_default(),
568        );
569
570        // Connect to `fuchsia.process.Launcher`.
571        let launcher = self
572            .launcher_connector
573            .connect()
574            .map_err(|err| StartComponentError::ProcessLauncherConnectError(err.into()))?;
575
576        let launch_info =
577            runner::component::configure_launcher(runner::component::LauncherConfigArgs {
578                bin_path: &program_config.binary,
579                name,
580                options: program_config.process_options(),
581                args: Some(program_config.args.clone()),
582                ns,
583                job: Some(proc_job_dup),
584                handle_infos: Some(handle_infos),
585                name_infos: None,
586                environs: (!environs.is_empty()).then_some(environs),
587                launcher: &launcher,
588                loader_proxy_chan: None,
589                executable_vmo: None,
590            })
591            .await?;
592
593        // Launch the process.
594        let (status, process) = launcher
595            .launch(launch_info)
596            .await
597            .map_err(StartComponentError::ProcessLauncherFidlError)?;
598        zx::Status::ok(status).map_err(StartComponentError::CreateProcessFailed)?;
599        let process = process.unwrap(); // Process is present iff status is OK.
600        if program_config.main_process_critical {
601            job_default()
602                .set_critical(zx::JobCriticalOptions::RETCODE_NONZERO, &process)
603                .map_err(StartComponentError::ProcessMarkCriticalFailed)
604                .expect("failed to set process as critical");
605        }
606
607        let pid = process.koid().map_err(StartComponentError::ProcessGetKoidFailed)?.raw_koid();
608
609        // Add process ID to the runtime dir.
610        runtime_dir.add_process_id(pid);
611
612        fuchsia_trace::instant!(
613            c"component:start",
614            c"elf",
615            fuchsia_trace::Scope::Thread,
616            "moniker" => format!("{}", moniker).as_str(),
617            "url" => resolved_url.as_str(),
618            "pid" => pid
619        );
620
621        // Add process start time to the runtime dir.
622        let process_start_instant_mono =
623            process.info().map_err(StartComponentError::ProcessInfoFailed)?.start_time;
624        runtime_dir.add_process_start_time(process_start_instant_mono.into_nanos());
625
626        // Add UTC estimate of the process start time to the runtime dir.
627        let utc_clock_started = fasync::OnSignals::new(&utc_clock, zx::Signals::CLOCK_STARTED)
628            .on_timeout(zx::MonotonicInstant::after(zx::MonotonicDuration::default()), || {
629                Err(zx::Status::TIMED_OUT)
630            })
631            .await
632            .is_ok();
633
634        // The clock transformations needed to map a timestamp on a monotonic timeline
635        // to a timestamp on the UTC timeline.
636        let mono_to_clock_transformation =
637            boot_clock.get_details().map(|details| details.reference_to_synthetic).ok();
638        let boot_to_utc_transformation = utc_clock_started
639            .then(|| utc_clock.get_details().map(|details| details.reference_to_synthetic).ok())
640            .flatten();
641
642        if let Some(clock_transformation) = boot_to_utc_transformation {
643            // This composes two transformations, to get from a timestamp expressed in
644            // nanoseconds on the monotonic timeline, to our best estimate of the
645            // corresponding UTC date-time.
646            //
647            // The clock transformations are computed before they are applied. If
648            // a suspend intervenes exactly between the computation and application,
649            // the timelines will drift away during sleep, causing a wrong date-time
650            // to be exposed in `runtime_dir`.
651            //
652            // This should not be a huge issue in practice, as the chances of that
653            // happening are vanishingly small.
654            let maybe_time_utc = mono_to_clock_transformation
655                .map(|t| t.apply(process_start_instant_mono))
656                .map(|time_boot| clock_transformation.apply(time_boot));
657
658            if let Some(utc_timestamp) = maybe_time_utc {
659                let utc_time_ns = utc_timestamp.into_nanos();
660                let seconds = (utc_time_ns / 1_000_000_000) as i64;
661                let nanos = (utc_time_ns % 1_000_000_000) as u32;
662                let dt = DateTime::from_timestamp(seconds, nanos).unwrap();
663
664                // If any of the above values are unavailable (unlikely), then this
665                // does not happen.
666                runtime_dir.add_process_start_time_utc_estimate(dt.to_string())
667            }
668        };
669
670        Ok(ElfComponent::new(
671            runtime_dir,
672            moniker,
673            job,
674            process,
675            lifecycle_client,
676            program_config.main_process_critical,
677            local_scope,
678            resolved_url.clone(),
679            outgoing_directory,
680            program_config,
681            start_info.component_instance.ok_or(StartComponentError::StartInfoError(
682                StartInfoError::MissingComponentInstanceToken,
683            ))?,
684        ))
685    }
686
687    pub fn get_scoped_runner(
688        self: Arc<Self>,
689        checker: ScopedPolicyChecker,
690    ) -> Arc<ScopedElfRunner> {
691        Arc::new(ScopedElfRunner { runner: self, checker })
692    }
693
694    pub fn serve_memory_reporter(&self, stream: fattribution::ProviderRequestStream) {
695        self.memory_reporter.serve(stream);
696    }
697}
698
699pub struct ScopedElfRunner {
700    runner: Arc<ElfRunner>,
701    checker: ScopedPolicyChecker,
702}
703
704impl ScopedElfRunner {
705    pub fn serve(&self, mut stream: fcrunner::ComponentRunnerRequestStream) {
706        let runner = self.runner.clone();
707        let checker = self.checker.clone();
708        self.scope().spawn(async move {
709            while let Ok(Some(request)) = stream.try_next().await {
710                match request {
711                    fcrunner::ComponentRunnerRequest::Start { start_info, controller, .. } => {
712                        start(&runner, checker.clone(), start_info, controller).await;
713                    }
714                    fcrunner::ComponentRunnerRequest::_UnknownMethod { ordinal, .. } => {
715                        warn!(ordinal:%; "Unknown ComponentRunner request");
716                    }
717                }
718            }
719        });
720    }
721
722    pub async fn start(
723        &self,
724        start_info: fcrunner::ComponentStartInfo,
725        server_end: ServerEnd<fcrunner::ComponentControllerMarker>,
726    ) {
727        start(&self.runner, self.checker.clone(), start_info, server_end).await
728    }
729
730    pub(crate) fn scope(&self) -> &ExecutionScope {
731        &self.runner.scope
732    }
733}
734
735fn is_acceptable_exit_code(moniker: &Moniker, code: i64) -> bool {
736    let moniker_name = moniker.to_string();
737    MONIKER_PREFIXES_TO_ACCEPTABLE_EXIT_CODES
738        .iter()
739        .any(|(prefix, codes)| moniker_name.starts_with(*prefix) && codes.contains(&code))
740}
741
742/// Starts a component by creating a new Job and Process for the component.
743async fn start(
744    runner: &ElfRunner,
745    checker: ScopedPolicyChecker,
746    start_info: fcrunner::ComponentStartInfo,
747    server_end: ServerEnd<fcrunner::ComponentControllerMarker>,
748) {
749    let resolved_url = start_info.resolved_url.clone().unwrap_or_else(|| "<unknown>".to_string());
750
751    let elf_component = match runner.start_component(start_info, &checker).await {
752        Ok(elf_component) => elf_component,
753        Err(err) => {
754            runner::component::report_start_error(
755                err.as_zx_status(),
756                format!("{}", err),
757                &resolved_url,
758                server_end,
759            );
760            return;
761        }
762    };
763    let elf_component_moniker = elf_component.info().get_moniker().clone();
764
765    let (termination_tx, termination_rx) = oneshot::channel::<StopInfo>();
766    // This function waits for something from the channel and
767    // returns it or Error::Internal if the channel is closed
768    let termination_fn = Box::pin(async move {
769        termination_rx
770            .await
771            .unwrap_or_else(|_| {
772                warn!("epitaph oneshot channel closed unexpectedly");
773                StopInfo::from_error(fcomp::Error::Internal, None)
774            })
775            .into()
776    });
777
778    let Some(proc_copy) = elf_component.copy_process() else {
779        runner::component::report_start_error(
780            zx::Status::from_raw(
781                i32::try_from(fcomp::Error::InstanceCannotStart.into_primitive()).unwrap(),
782            ),
783            "Component unexpectedly had no process".to_string(),
784            &resolved_url,
785            server_end,
786        );
787        return;
788    };
789
790    let component_diagnostics = elf_component
791        .info()
792        .copy_job_for_diagnostics()
793        .map(|job| ComponentDiagnostics {
794            tasks: Some(ComponentTasks {
795                component_task: Some(DiagnosticsTask::Job(job.into())),
796                ..Default::default()
797            }),
798            ..Default::default()
799        })
800        .map_err(|error| {
801            warn!(error:%; "Failed to copy job for diagnostics");
802            ()
803        })
804        .ok();
805
806    let (server_stream, control) = server_end.into_stream_and_control_handle();
807
808    // Spawn a future that watches for the process to exit
809    runner.scope.spawn({
810        let resolved_url = resolved_url.clone();
811        async move {
812            fasync::OnSignals::new(&proc_copy.as_handle_ref(), zx::Signals::PROCESS_TERMINATED)
813                .await
814                .map(|_: fidl::Signals| ()) // Discard.
815                .unwrap_or_else(|error| warn!(error:%; "error creating signal handler"));
816            // Process exit code '0' is considered a clean return.
817            // TODO(https://fxbug.dev/42134825) If we create an epitaph that indicates
818            // intentional, non-zero exit, use that for all non-0 exit
819            // codes.
820            let stop_info = match proc_copy.info() {
821                Ok(zx::ProcessInfo { return_code, .. }) => {
822                    match return_code {
823                        0 => StopInfo::from_ok(Some(return_code)),
824                        // Don't log SYSCALL_KILL codes because they are expected in the course
825                        // of normal operation. When elf_runner process a `Kill` method call for
826                        // a component it makes a zx_task_kill syscall which sets this return code.
827                        zx::sys::ZX_TASK_RETCODE_SYSCALL_KILL => StopInfo::from_error(
828                            fcomp::Error::InstanceDied.into(),
829                            Some(return_code),
830                        ),
831                        _ => {
832                            if is_acceptable_exit_code(&elf_component_moniker, return_code) {
833                                trace!(url:% = resolved_url, return_code:%; "component terminated with an acceptable non-zero exit code");
834                            } else {
835                                warn!(url:% = resolved_url, return_code:%;
836                                    "process terminated with abnormal return code");
837                            }
838                            StopInfo::from_error(fcomp::Error::InstanceDied, Some(return_code))
839                        }
840                    }
841                }
842                Err(error) => {
843                    warn!(error:%; "Unable to query process info");
844                    StopInfo::from_error(fcomp::Error::Internal, None)
845                }
846            };
847            termination_tx.send(stop_info).unwrap_or_else(|_| warn!("error sending done signal"));
848        }
849    });
850
851    let mut elf_component = elf_component;
852    runner.components.clone().add(&mut elf_component);
853
854    // Create a future which owns and serves the controller
855    // channel. The `epitaph_fn` future completes when the
856    // component's main process exits. The controller then sets the
857    // epitaph on the controller channel, closes it, and stops
858    // serving the protocol.
859    runner.scope.spawn(async move {
860        if let Some(component_diagnostics) = component_diagnostics {
861            control.send_on_publish_diagnostics(component_diagnostics).unwrap_or_else(
862                |error| warn!(url:% = resolved_url, error:%; "sending diagnostics failed"),
863            );
864        }
865        runner::component::Controller::new(elf_component, server_stream, control)
866            .serve(termination_fn)
867            .await;
868    });
869}
870
871#[cfg(test)]
872mod tests {
873    use super::runtime_dir::RuntimeDirectory;
874    use super::*;
875    use anyhow::{Context, Error};
876    use assert_matches::assert_matches;
877    use cm_config::{AllowlistEntryBuilder, JobPolicyAllowlists, SecurityPolicy};
878    use fidl::endpoints::{DiscoverableProtocolMarker, Proxy, create_endpoints, create_proxy};
879    use fidl_connector::Connect;
880    use fidl_fuchsia_component_runner::Task as DiagnosticsTask;
881    use fidl_fuchsia_logger::{LogSinkMarker, LogSinkRequestStream};
882    use fidl_fuchsia_process_lifecycle::LifecycleProxy;
883    use fidl_test_util::spawn_stream_handler;
884    use fuchsia_component::server::{ServiceFs, ServiceObjLocal};
885    use futures::channel::mpsc;
886    use futures::lock::Mutex;
887    use futures::{StreamExt, join};
888    use runner::component::Controllable;
889    use std::str::FromStr;
890    use std::task::Poll;
891    use test_case::test_case;
892    use zx::{AsHandleRef, Task};
893    use {
894        fidl_fuchsia_component as fcomp, fidl_fuchsia_component_runner as fcrunner,
895        fidl_fuchsia_data as fdata, fidl_fuchsia_io as fio, fuchsia_async as fasync,
896    };
897
898    pub enum MockServiceRequest {
899        LogSink(LogSinkRequestStream),
900    }
901
902    pub type MockServiceFs<'a> = ServiceFs<ServiceObjLocal<'a, MockServiceRequest>>;
903
904    /// Create a new local fs and install a mock LogSink service into.
905    /// Returns the created directory and corresponding namespace entries.
906    pub fn create_fs_with_mock_logsink()
907    -> Result<(MockServiceFs<'static>, Vec<fcrunner::ComponentNamespaceEntry>), Error> {
908        let (dir_client, dir_server) = create_endpoints::<fio::DirectoryMarker>();
909
910        let mut dir = ServiceFs::new_local();
911        dir.add_fidl_service_at(LogSinkMarker::PROTOCOL_NAME, MockServiceRequest::LogSink);
912        dir.serve_connection(dir_server).context("Failed to add serving channel.")?;
913
914        let namespace = vec![fcrunner::ComponentNamespaceEntry {
915            path: Some("/svc".to_string()),
916            directory: Some(dir_client),
917            ..Default::default()
918        }];
919
920        Ok((dir, namespace))
921    }
922
923    // Provide a UTC clock to avoid reusing the system UTC clock in tests, which may
924    // limit the changes that are allowed to be made to this code. We create this clock
925    // here, and start it from current time.
926    pub fn new_utc_clock_for_tests() -> Arc<UtcClock> {
927        let reference_now = zx::BootInstant::get();
928        let system_utc_clock = duplicate_utc_clock_handle(zx::Rights::SAME_RIGHTS).unwrap();
929        let utc_now = system_utc_clock.read().unwrap();
930
931        let utc_clock_for_tests =
932            Arc::new(UtcClock::create(zx::ClockOpts::MAPPABLE, /*backstop=*/ None).unwrap());
933        // This will start the test-only UTC clock.
934        utc_clock_for_tests
935            .update(zx::ClockUpdate::builder().absolute_value(reference_now, utc_now.into()))
936            .unwrap();
937        utc_clock_for_tests
938    }
939
940    pub fn new_elf_runner_for_test() -> Arc<ElfRunner> {
941        Arc::new(ElfRunner::new(
942            job_default().duplicate(zx::Rights::SAME_RIGHTS).unwrap(),
943            Box::new(process_launcher::BuiltInConnector {}),
944            Some(new_utc_clock_for_tests()),
945            CrashRecords::new(),
946            vec![],
947        ))
948    }
949
950    fn namespace_entry(path: &str, flags: fio::Flags) -> fcrunner::ComponentNamespaceEntry {
951        // Get a handle to /pkg
952        let ns_path = path.to_string();
953        let ns_dir = fuchsia_fs::directory::open_in_namespace(path, flags).unwrap();
954        let client_end = ns_dir.into_client_end().unwrap();
955        fcrunner::ComponentNamespaceEntry {
956            path: Some(ns_path),
957            directory: Some(client_end),
958            ..Default::default()
959        }
960    }
961
962    fn pkg_dir_namespace_entry() -> fcrunner::ComponentNamespaceEntry {
963        namespace_entry("/pkg", fio::PERM_READABLE | fio::PERM_EXECUTABLE)
964    }
965
966    fn svc_dir_namespace_entry() -> fcrunner::ComponentNamespaceEntry {
967        namespace_entry("/svc", fio::PERM_READABLE)
968    }
969
970    fn hello_world_startinfo(
971        runtime_dir: ServerEnd<fio::DirectoryMarker>,
972    ) -> fcrunner::ComponentStartInfo {
973        let ns = vec![pkg_dir_namespace_entry()];
974
975        fcrunner::ComponentStartInfo {
976            resolved_url: Some(
977                "fuchsia-pkg://fuchsia.com/elf_runner_tests#meta/hello-world-rust.cm".to_string(),
978            ),
979            program: Some(fdata::Dictionary {
980                entries: Some(vec![
981                    fdata::DictionaryEntry {
982                        key: "args".to_string(),
983                        value: Some(Box::new(fdata::DictionaryValue::StrVec(vec![
984                            "foo".to_string(),
985                            "bar".to_string(),
986                        ]))),
987                    },
988                    fdata::DictionaryEntry {
989                        key: "binary".to_string(),
990                        value: Some(Box::new(fdata::DictionaryValue::Str(
991                            "bin/hello_world_rust".to_string(),
992                        ))),
993                    },
994                ]),
995                ..Default::default()
996            }),
997            ns: Some(ns),
998            outgoing_dir: None,
999            runtime_dir: Some(runtime_dir),
1000            component_instance: Some(zx::Event::create()),
1001            ..Default::default()
1002        }
1003    }
1004
1005    /// ComponentStartInfo that points to a non-existent binary.
1006    fn invalid_binary_startinfo(
1007        runtime_dir: ServerEnd<fio::DirectoryMarker>,
1008    ) -> fcrunner::ComponentStartInfo {
1009        let ns = vec![pkg_dir_namespace_entry()];
1010
1011        fcrunner::ComponentStartInfo {
1012            resolved_url: Some(
1013                "fuchsia-pkg://fuchsia.com/elf_runner_tests#meta/does-not-exist.cm".to_string(),
1014            ),
1015            program: Some(fdata::Dictionary {
1016                entries: Some(vec![fdata::DictionaryEntry {
1017                    key: "binary".to_string(),
1018                    value: Some(Box::new(fdata::DictionaryValue::Str(
1019                        "bin/does_not_exist".to_string(),
1020                    ))),
1021                }]),
1022                ..Default::default()
1023            }),
1024            ns: Some(ns),
1025            outgoing_dir: None,
1026            runtime_dir: Some(runtime_dir),
1027            component_instance: Some(zx::Event::create()),
1028            ..Default::default()
1029        }
1030    }
1031
1032    /// Creates start info for a component which runs until told to exit. The
1033    /// ComponentController protocol can be used to stop the component when the
1034    /// test is done inspecting the launched component.
1035    pub fn lifecycle_startinfo(
1036        runtime_dir: ServerEnd<fio::DirectoryMarker>,
1037    ) -> fcrunner::ComponentStartInfo {
1038        let ns = vec![pkg_dir_namespace_entry()];
1039
1040        fcrunner::ComponentStartInfo {
1041            resolved_url: Some(
1042                "fuchsia-pkg://fuchsia.com/lifecycle-example#meta/lifecycle.cm".to_string(),
1043            ),
1044            program: Some(fdata::Dictionary {
1045                entries: Some(vec![
1046                    fdata::DictionaryEntry {
1047                        key: "args".to_string(),
1048                        value: Some(Box::new(fdata::DictionaryValue::StrVec(vec![
1049                            "foo".to_string(),
1050                            "bar".to_string(),
1051                        ]))),
1052                    },
1053                    fdata::DictionaryEntry {
1054                        key: "binary".to_string(),
1055                        value: Some(Box::new(fdata::DictionaryValue::Str(
1056                            "bin/lifecycle_placeholder".to_string(),
1057                        ))),
1058                    },
1059                    fdata::DictionaryEntry {
1060                        key: "lifecycle.stop_event".to_string(),
1061                        value: Some(Box::new(fdata::DictionaryValue::Str("notify".to_string()))),
1062                    },
1063                ]),
1064                ..Default::default()
1065            }),
1066            ns: Some(ns),
1067            outgoing_dir: None,
1068            runtime_dir: Some(runtime_dir),
1069            component_instance: Some(zx::Event::create()),
1070            ..Default::default()
1071        }
1072    }
1073
1074    fn create_child_process(job: &zx::Job, name: &str) -> zx::Process {
1075        let (process, _vmar) = job
1076            .create_child_process(zx::ProcessOptions::empty(), name.as_bytes())
1077            .expect("could not create process");
1078        process
1079    }
1080
1081    fn make_default_elf_component(
1082        lifecycle_client: Option<LifecycleProxy>,
1083        critical: bool,
1084    ) -> (scoped_task::Scoped<zx::Job>, ElfComponent) {
1085        let job = scoped_task::create_child_job().expect("failed to make child job");
1086        let process = create_child_process(&job, "test_process");
1087        let job_copy =
1088            job.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("job handle duplication failed");
1089        let component = ElfComponent::new(
1090            RuntimeDirectory::empty(),
1091            Moniker::default(),
1092            Job::Single(job_copy),
1093            process,
1094            lifecycle_client,
1095            critical,
1096            ExecutionScope::new(),
1097            "".to_string(),
1098            None,
1099            Default::default(),
1100            zx::Event::create(),
1101        );
1102        (job, component)
1103    }
1104
1105    // TODO(https://fxbug.dev/42073224): A variation of this is used in a couple of places. We should consider
1106    // refactoring this into a test util file.
1107    async fn read_file<'a>(root_proxy: &'a fio::DirectoryProxy, path: &'a str) -> String {
1108        let file_proxy =
1109            fuchsia_fs::directory::open_file_async(&root_proxy, path, fuchsia_fs::PERM_READABLE)
1110                .expect("Failed to open file.");
1111        let res = fuchsia_fs::file::read_to_string(&file_proxy).await;
1112        res.expect("Unable to read file.")
1113    }
1114
1115    #[fuchsia::test]
1116    async fn test_runtime_dir_entries() -> Result<(), Error> {
1117        let (runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1118        let start_info = lifecycle_startinfo(runtime_dir_server);
1119
1120        let runner = new_elf_runner_for_test();
1121        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1122            Arc::new(SecurityPolicy::default()),
1123            Moniker::root(),
1124        ));
1125        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1126
1127        runner.start(start_info, server_controller).await;
1128
1129        // Verify that args are added to the runtime directory.
1130        assert_eq!("foo", read_file(&runtime_dir, "args/0").await);
1131        assert_eq!("bar", read_file(&runtime_dir, "args/1").await);
1132
1133        // Process Id, Process Start Time, Job Id will vary with every run of this test. Here we
1134        // verify that they exist in the runtime directory, they can be parsed as integers,
1135        // they're greater than zero and they are not the same value. Those are about the only
1136        // invariants we can verify across test runs.
1137        let process_id = read_file(&runtime_dir, "elf/process_id").await.parse::<u64>()?;
1138        let process_start_time =
1139            read_file(&runtime_dir, "elf/process_start_time").await.parse::<i64>()?;
1140        let process_start_time_utc_estimate =
1141            read_file(&runtime_dir, "elf/process_start_time_utc_estimate").await;
1142        let job_id = read_file(&runtime_dir, "elf/job_id").await.parse::<u64>()?;
1143        assert!(process_id > 0);
1144        assert!(process_start_time > 0);
1145        assert!(process_start_time_utc_estimate.contains("UTC"));
1146        assert!(job_id > 0);
1147        assert_ne!(process_id, job_id);
1148
1149        controller.stop().expect("Stop request failed");
1150        // Wait for the process to exit so the test doesn't pagefault due to an invalid stdout
1151        // handle.
1152        controller.on_closed().await.expect("failed waiting for channel to close");
1153        Ok(())
1154    }
1155
1156    #[fuchsia::test]
1157    async fn test_kill_component() -> Result<(), Error> {
1158        let (job, mut component) = make_default_elf_component(None, false);
1159
1160        let job_info = job.info()?;
1161        assert!(!job_info.exited);
1162
1163        component.kill().await;
1164
1165        let h = job.as_handle_ref();
1166        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1167            .await
1168            .expect("failed waiting for termination signal");
1169
1170        let job_info = job.info()?;
1171        assert!(job_info.exited);
1172        Ok(())
1173    }
1174
1175    #[fuchsia::test]
1176    fn test_stop_critical_component() -> Result<(), Error> {
1177        let mut exec = fasync::TestExecutor::new();
1178        // Presence of the Lifecycle channel isn't used by ElfComponent to sense
1179        // component exit, but it does modify the stop behavior and this is
1180        // what we want to test.
1181        let (lifecycle_client, _lifecycle_server) = create_proxy::<LifecycleMarker>();
1182        let (job, mut component) = make_default_elf_component(Some(lifecycle_client), true);
1183        let process = component.copy_process().unwrap();
1184        let job_info = job.info()?;
1185        assert!(!job_info.exited);
1186
1187        // Ask the runner to stop the component, it returns a future which
1188        // completes when the component closes its side of the lifecycle
1189        // channel
1190        let mut completes_when_stopped = component.stop();
1191
1192        // The returned future shouldn't complete because we're holding the
1193        // lifecycle channel open.
1194        match exec.run_until_stalled(&mut completes_when_stopped) {
1195            Poll::Ready(_) => {
1196                panic!("runner should still be waiting for lifecycle channel to stop");
1197            }
1198            _ => {}
1199        }
1200        assert_eq!(process.kill(), Ok(()));
1201
1202        exec.run_singlethreaded(&mut completes_when_stopped);
1203
1204        // Check that the runner killed the job hosting the exited component.
1205        let h = job.as_handle_ref();
1206        let termination_fut = async move {
1207            fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1208                .await
1209                .expect("failed waiting for termination signal");
1210        };
1211        exec.run_singlethreaded(termination_fut);
1212
1213        let job_info = job.info()?;
1214        assert!(job_info.exited);
1215        Ok(())
1216    }
1217
1218    #[fuchsia::test]
1219    fn test_stop_noncritical_component() -> Result<(), Error> {
1220        let mut exec = fasync::TestExecutor::new();
1221        // Presence of the Lifecycle channel isn't used by ElfComponent to sense
1222        // component exit, but it does modify the stop behavior and this is
1223        // what we want to test.
1224        let (lifecycle_client, lifecycle_server) = create_proxy::<LifecycleMarker>();
1225        let (job, mut component) = make_default_elf_component(Some(lifecycle_client), false);
1226
1227        let job_info = job.info()?;
1228        assert!(!job_info.exited);
1229
1230        // Ask the runner to stop the component, it returns a future which
1231        // completes when the component closes its side of the lifecycle
1232        // channel
1233        let mut completes_when_stopped = component.stop();
1234
1235        // The returned future shouldn't complete because we're holding the
1236        // lifecycle channel open.
1237        match exec.run_until_stalled(&mut completes_when_stopped) {
1238            Poll::Ready(_) => {
1239                panic!("runner should still be waiting for lifecycle channel to stop");
1240            }
1241            _ => {}
1242        }
1243        drop(lifecycle_server);
1244
1245        match exec.run_until_stalled(&mut completes_when_stopped) {
1246            Poll::Ready(_) => {}
1247            _ => {
1248                panic!("runner future should have completed, lifecycle channel is closed.");
1249            }
1250        }
1251        // Check that the runner killed the job hosting the exited component.
1252        let h = job.as_handle_ref();
1253        let termination_fut = async move {
1254            fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1255                .await
1256                .expect("failed waiting for termination signal");
1257        };
1258        exec.run_singlethreaded(termination_fut);
1259
1260        let job_info = job.info()?;
1261        assert!(job_info.exited);
1262        Ok(())
1263    }
1264
1265    /// Stopping a component which doesn't have a lifecycle channel should be
1266    /// equivalent to killing a component directly.
1267    #[fuchsia::test]
1268    async fn test_stop_component_without_lifecycle() -> Result<(), Error> {
1269        let (job, mut component) = make_default_elf_component(None, false);
1270
1271        let job_info = job.info()?;
1272        assert!(!job_info.exited);
1273
1274        component.stop().await;
1275
1276        let h = job.as_handle_ref();
1277        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1278            .await
1279            .expect("failed waiting for termination signal");
1280
1281        let job_info = job.info()?;
1282        assert!(job_info.exited);
1283        Ok(())
1284    }
1285
1286    #[fuchsia::test]
1287    async fn test_stop_critical_component_with_closed_lifecycle() -> Result<(), Error> {
1288        let (lifecycle_client, lifecycle_server) = create_proxy::<LifecycleMarker>();
1289        let (job, mut component) = make_default_elf_component(Some(lifecycle_client), true);
1290        let process = component.copy_process().unwrap();
1291        let job_info = job.info()?;
1292        assert!(!job_info.exited);
1293
1294        // Close the lifecycle channel
1295        drop(lifecycle_server);
1296        // Kill the process because this is what ElfComponent monitors to
1297        // determine if the component exited.
1298        process.kill()?;
1299        component.stop().await;
1300
1301        let h = job.as_handle_ref();
1302        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1303            .await
1304            .expect("failed waiting for termination signal");
1305
1306        let job_info = job.info()?;
1307        assert!(job_info.exited);
1308        Ok(())
1309    }
1310
1311    #[fuchsia::test]
1312    async fn test_stop_noncritical_component_with_closed_lifecycle() -> Result<(), Error> {
1313        let (lifecycle_client, lifecycle_server) = create_proxy::<LifecycleMarker>();
1314        let (job, mut component) = make_default_elf_component(Some(lifecycle_client), false);
1315
1316        let job_info = job.info()?;
1317        assert!(!job_info.exited);
1318
1319        // Close the lifecycle channel
1320        drop(lifecycle_server);
1321        // Kill the process because this is what ElfComponent monitors to
1322        // determine if the component exited.
1323        component.stop().await;
1324
1325        let h = job.as_handle_ref();
1326        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1327            .await
1328            .expect("failed waiting for termination signal");
1329
1330        let job_info = job.info()?;
1331        assert!(job_info.exited);
1332        Ok(())
1333    }
1334
1335    /// Dropping the component should kill the job hosting it.
1336    #[fuchsia::test]
1337    async fn test_drop() -> Result<(), Error> {
1338        let (job, component) = make_default_elf_component(None, false);
1339
1340        let job_info = job.info()?;
1341        assert!(!job_info.exited);
1342
1343        drop(component);
1344
1345        let h = job.as_handle_ref();
1346        fasync::OnSignals::new(&h, zx::Signals::TASK_TERMINATED)
1347            .await
1348            .expect("failed waiting for termination signal");
1349
1350        let job_info = job.info()?;
1351        assert!(job_info.exited);
1352        Ok(())
1353    }
1354
1355    fn with_mark_vmo_exec(
1356        mut start_info: fcrunner::ComponentStartInfo,
1357    ) -> fcrunner::ComponentStartInfo {
1358        start_info.program.as_mut().map(|dict| {
1359            dict.entries.as_mut().map(|entry| {
1360                entry.push(fdata::DictionaryEntry {
1361                    key: "job_policy_ambient_mark_vmo_exec".to_string(),
1362                    value: Some(Box::new(fdata::DictionaryValue::Str("true".to_string()))),
1363                });
1364                entry
1365            })
1366        });
1367        start_info
1368    }
1369
1370    fn with_main_process_critical(
1371        mut start_info: fcrunner::ComponentStartInfo,
1372    ) -> fcrunner::ComponentStartInfo {
1373        start_info.program.as_mut().map(|dict| {
1374            dict.entries.as_mut().map(|entry| {
1375                entry.push(fdata::DictionaryEntry {
1376                    key: "main_process_critical".to_string(),
1377                    value: Some(Box::new(fdata::DictionaryValue::Str("true".to_string()))),
1378                });
1379                entry
1380            })
1381        });
1382        start_info
1383    }
1384
1385    #[fuchsia::test]
1386    async fn vmex_security_policy_denied() -> Result<(), Error> {
1387        let (_runtime_dir, runtime_dir_server) = create_endpoints::<fio::DirectoryMarker>();
1388        let start_info = with_mark_vmo_exec(lifecycle_startinfo(runtime_dir_server));
1389
1390        // Config does not allowlist any monikers to have access to the job policy.
1391        let runner = new_elf_runner_for_test();
1392        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1393            Arc::new(SecurityPolicy::default()),
1394            Moniker::root(),
1395        ));
1396        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1397
1398        // Attempting to start the component should fail, which we detect by looking for an
1399        // ACCESS_DENIED epitaph on the ComponentController's event stream.
1400        runner.start(start_info, server_controller).await;
1401        assert_matches!(
1402            controller.take_event_stream().try_next().await,
1403            Err(fidl::Error::ClientChannelClosed { status: zx::Status::ACCESS_DENIED, .. })
1404        );
1405
1406        Ok(())
1407    }
1408
1409    #[fuchsia::test]
1410    async fn vmex_security_policy_allowed() -> Result<(), Error> {
1411        let (runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1412        let start_info = with_mark_vmo_exec(lifecycle_startinfo(runtime_dir_server));
1413
1414        let policy = SecurityPolicy {
1415            job_policy: JobPolicyAllowlists {
1416                ambient_mark_vmo_exec: vec![AllowlistEntryBuilder::new().exact("foo").build()],
1417                ..Default::default()
1418            },
1419            ..Default::default()
1420        };
1421        let runner = new_elf_runner_for_test();
1422        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1423            Arc::new(policy),
1424            Moniker::try_from(["foo"]).unwrap(),
1425        ));
1426        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1427        runner.start(start_info, server_controller).await;
1428
1429        // Runtime dir won't exist if the component failed to start.
1430        let process_id = read_file(&runtime_dir, "elf/process_id").await.parse::<u64>()?;
1431        assert!(process_id > 0);
1432        // Component controller should get shutdown normally; no ACCESS_DENIED epitaph.
1433        controller.kill().expect("kill failed");
1434
1435        // We expect the event stream to have closed, which is reported as an
1436        // error and the value of the error should match the epitaph for a
1437        // process that was killed.
1438        let mut event_stream = controller.take_event_stream();
1439        expect_diagnostics_event(&mut event_stream).await;
1440
1441        let s = zx::Status::from_raw(
1442            i32::try_from(fcomp::Error::InstanceDied.into_primitive()).unwrap(),
1443        );
1444        expect_on_stop(&mut event_stream, s, Some(zx::sys::ZX_TASK_RETCODE_SYSCALL_KILL)).await;
1445        expect_channel_closed(&mut event_stream).await;
1446        Ok(())
1447    }
1448
1449    #[fuchsia::test]
1450    async fn critical_security_policy_denied() -> Result<(), Error> {
1451        let (_runtime_dir, runtime_dir_server) = create_endpoints::<fio::DirectoryMarker>();
1452        let start_info = with_main_process_critical(hello_world_startinfo(runtime_dir_server));
1453
1454        // Default policy does not allowlist any monikers to be marked as critical
1455        let runner = new_elf_runner_for_test();
1456        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1457            Arc::new(SecurityPolicy::default()),
1458            Moniker::root(),
1459        ));
1460        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1461
1462        // Attempting to start the component should fail, which we detect by looking for an
1463        // ACCESS_DENIED epitaph on the ComponentController's event stream.
1464        runner.start(start_info, server_controller).await;
1465        assert_matches!(
1466            controller.take_event_stream().try_next().await,
1467            Err(fidl::Error::ClientChannelClosed { status: zx::Status::ACCESS_DENIED, .. })
1468        );
1469
1470        Ok(())
1471    }
1472
1473    #[fuchsia::test]
1474    #[should_panic]
1475    async fn fail_to_launch_critical_component() {
1476        let (_runtime_dir, runtime_dir_server) = create_endpoints::<fio::DirectoryMarker>();
1477
1478        // ElfRunner should fail to start the component because this start_info points
1479        // to a binary that does not exist in the test package.
1480        let start_info = with_main_process_critical(invalid_binary_startinfo(runtime_dir_server));
1481
1482        // Policy does not allowlist any monikers to be marked as critical without being
1483        // allowlisted, so make sure we permit this one.
1484        let policy = SecurityPolicy {
1485            job_policy: JobPolicyAllowlists {
1486                main_process_critical: vec![AllowlistEntryBuilder::new().build()],
1487                ..Default::default()
1488            },
1489            ..Default::default()
1490        };
1491        let runner = new_elf_runner_for_test();
1492        let runner =
1493            runner.get_scoped_runner(ScopedPolicyChecker::new(Arc::new(policy), Moniker::root()));
1494        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1495
1496        runner.start(start_info, server_controller).await;
1497
1498        controller
1499            .take_event_stream()
1500            .try_next()
1501            .await
1502            .map(|_: Option<fcrunner::ComponentControllerEvent>| ()) // Discard.
1503            .unwrap_or_else(|error| warn!(error:%; "error reading from event stream"));
1504    }
1505
1506    fn hello_world_startinfo_forward_stdout_to_log(
1507        runtime_dir: ServerEnd<fio::DirectoryMarker>,
1508        mut ns: Vec<fcrunner::ComponentNamespaceEntry>,
1509    ) -> fcrunner::ComponentStartInfo {
1510        ns.push(pkg_dir_namespace_entry());
1511
1512        fcrunner::ComponentStartInfo {
1513            resolved_url: Some(
1514                "fuchsia-pkg://fuchsia.com/hello-world-rust#meta/hello-world-rust.cm".to_string(),
1515            ),
1516            program: Some(fdata::Dictionary {
1517                entries: Some(vec![
1518                    fdata::DictionaryEntry {
1519                        key: "binary".to_string(),
1520                        value: Some(Box::new(fdata::DictionaryValue::Str(
1521                            "bin/hello_world_rust".to_string(),
1522                        ))),
1523                    },
1524                    fdata::DictionaryEntry {
1525                        key: "forward_stdout_to".to_string(),
1526                        value: Some(Box::new(fdata::DictionaryValue::Str("log".to_string()))),
1527                    },
1528                    fdata::DictionaryEntry {
1529                        key: "forward_stderr_to".to_string(),
1530                        value: Some(Box::new(fdata::DictionaryValue::Str("log".to_string()))),
1531                    },
1532                ]),
1533                ..Default::default()
1534            }),
1535            ns: Some(ns),
1536            outgoing_dir: None,
1537            runtime_dir: Some(runtime_dir),
1538            component_instance: Some(zx::Event::create()),
1539            ..Default::default()
1540        }
1541    }
1542
1543    // TODO(https://fxbug.dev/42148789): Following function shares a lot of code with
1544    // //src/sys/component_manager/src/model/namespace.rs tests. Shared
1545    // functionality should be refactored into a common test util lib.
1546    #[fuchsia::test]
1547    async fn enable_stdout_and_stderr_logging() -> Result<(), Error> {
1548        let (mut dir, ns) = create_fs_with_mock_logsink()?;
1549
1550        let run_component_fut = async move {
1551            let (_runtime_dir, runtime_dir_server) = create_endpoints::<fio::DirectoryMarker>();
1552            let start_info = hello_world_startinfo_forward_stdout_to_log(runtime_dir_server, ns);
1553
1554            let runner = new_elf_runner_for_test();
1555            let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1556                Arc::new(SecurityPolicy::default()),
1557                Moniker::root(),
1558            ));
1559            let (client_controller, server_controller) =
1560                create_proxy::<fcrunner::ComponentControllerMarker>();
1561
1562            runner.start(start_info, server_controller).await;
1563            let mut event_stream = client_controller.take_event_stream();
1564            expect_diagnostics_event(&mut event_stream).await;
1565            expect_on_stop(&mut event_stream, zx::Status::OK, Some(0)).await;
1566            expect_channel_closed(&mut event_stream).await;
1567        };
1568
1569        // Just check for connection count, other integration tests cover decoding the actual logs.
1570        let service_fs_listener_fut = async {
1571            let mut requests = Vec::new();
1572            while let Some(MockServiceRequest::LogSink(r)) = dir.next().await {
1573                // The client is expecting us to send OnInit, but we're not testing that, so just
1574                // park the requests.
1575                requests.push(r);
1576            }
1577            requests.len()
1578        };
1579
1580        let connection_count = join!(run_component_fut, service_fs_listener_fut).1;
1581
1582        assert_eq!(connection_count, 1);
1583        Ok(())
1584    }
1585
1586    #[fuchsia::test]
1587    async fn on_publish_diagnostics_contains_job_handle() -> Result<(), Error> {
1588        let (runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1589        let start_info = lifecycle_startinfo(runtime_dir_server);
1590
1591        let runner = new_elf_runner_for_test();
1592        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1593            Arc::new(SecurityPolicy::default()),
1594            Moniker::root(),
1595        ));
1596        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1597
1598        runner.start(start_info, server_controller).await;
1599
1600        let job_id = read_file(&runtime_dir, "elf/job_id").await.parse::<u64>().unwrap();
1601        let mut event_stream = controller.take_event_stream();
1602        match event_stream.try_next().await {
1603            Ok(Some(fcrunner::ComponentControllerEvent::OnPublishDiagnostics {
1604                payload:
1605                    ComponentDiagnostics {
1606                        tasks:
1607                            Some(ComponentTasks {
1608                                component_task: Some(DiagnosticsTask::Job(job)), ..
1609                            }),
1610                        ..
1611                    },
1612            })) => {
1613                assert_eq!(job_id, job.koid().unwrap().raw_koid());
1614            }
1615            other => panic!("unexpected event result: {:?}", other),
1616        }
1617
1618        controller.stop().expect("Stop request failed");
1619        // Wait for the process to exit so the test doesn't pagefault due to an invalid stdout
1620        // handle.
1621        controller.on_closed().await.expect("failed waiting for channel to close");
1622
1623        Ok(())
1624    }
1625
1626    async fn expect_diagnostics_event(event_stream: &mut fcrunner::ComponentControllerEventStream) {
1627        let event = event_stream.try_next().await;
1628        assert_matches!(
1629            event,
1630            Ok(Some(fcrunner::ComponentControllerEvent::OnPublishDiagnostics {
1631                payload: ComponentDiagnostics {
1632                    tasks: Some(ComponentTasks {
1633                        component_task: Some(DiagnosticsTask::Job(_)),
1634                        ..
1635                    }),
1636                    ..
1637                },
1638            }))
1639        );
1640    }
1641
1642    async fn expect_on_stop(
1643        event_stream: &mut fcrunner::ComponentControllerEventStream,
1644        expected_status: zx::Status,
1645        expected_exit_code: Option<i64>,
1646    ) {
1647        let event = event_stream.try_next().await;
1648        assert_matches!(
1649            event,
1650            Ok(Some(fcrunner::ComponentControllerEvent::OnStop {
1651                payload: fcrunner::ComponentStopInfo { termination_status: Some(s), exit_code, .. },
1652            }))
1653            if s == expected_status.into_raw() &&
1654                exit_code == expected_exit_code
1655        );
1656    }
1657
1658    async fn expect_channel_closed(event_stream: &mut fcrunner::ComponentControllerEventStream) {
1659        let event = event_stream.try_next().await;
1660        match event {
1661            Ok(None) => {}
1662            other => panic!("Expected channel closed error, got {:?}", other),
1663        }
1664    }
1665
1666    /// An implementation of launcher that sends a complete launch request payload back to
1667    /// a test through an mpsc channel.
1668    struct LauncherConnectorForTest {
1669        sender: mpsc::UnboundedSender<LaunchPayload>,
1670    }
1671
1672    /// Contains all the information passed to fuchsia.process.Launcher before and up to calling
1673    /// Launch/CreateWithoutStarting.
1674    #[derive(Default)]
1675    struct LaunchPayload {
1676        launch_info: Option<fproc::LaunchInfo>,
1677        args: Vec<Vec<u8>>,
1678        environ: Vec<Vec<u8>>,
1679        name_info: Vec<fproc::NameInfo>,
1680        handles: Vec<fproc::HandleInfo>,
1681        options: u32,
1682    }
1683
1684    impl Connect for LauncherConnectorForTest {
1685        type Proxy = fproc::LauncherProxy;
1686
1687        fn connect(&self) -> Result<Self::Proxy, anyhow::Error> {
1688            let sender = self.sender.clone();
1689            let payload = Arc::new(Mutex::new(LaunchPayload::default()));
1690
1691            Ok(spawn_stream_handler(move |launcher_request| {
1692                let sender = sender.clone();
1693                let payload = payload.clone();
1694                async move {
1695                    let mut payload = payload.lock().await;
1696                    match launcher_request {
1697                        fproc::LauncherRequest::Launch { info, responder } => {
1698                            let process = create_child_process(&info.job, "test_process");
1699                            responder.send(zx::Status::OK.into_raw(), Some(process)).unwrap();
1700
1701                            let mut payload =
1702                                std::mem::replace(&mut *payload, LaunchPayload::default());
1703                            payload.launch_info = Some(info);
1704                            sender.unbounded_send(payload).unwrap();
1705                        }
1706                        fproc::LauncherRequest::CreateWithoutStarting { info: _, responder: _ } => {
1707                            unimplemented!()
1708                        }
1709                        fproc::LauncherRequest::AddArgs { mut args, control_handle: _ } => {
1710                            payload.args.append(&mut args);
1711                        }
1712                        fproc::LauncherRequest::AddEnvirons { mut environ, control_handle: _ } => {
1713                            payload.environ.append(&mut environ);
1714                        }
1715                        fproc::LauncherRequest::AddNames { mut names, control_handle: _ } => {
1716                            payload.name_info.append(&mut names);
1717                        }
1718                        fproc::LauncherRequest::AddHandles { mut handles, control_handle: _ } => {
1719                            payload.handles.append(&mut handles);
1720                        }
1721                        fproc::LauncherRequest::SetOptions { options, .. } => {
1722                            payload.options = options;
1723                        }
1724                    }
1725                }
1726            }))
1727        }
1728    }
1729
1730    #[fuchsia::test]
1731    async fn process_created_with_utc_clock_from_numbered_handles() -> Result<(), Error> {
1732        let (payload_tx, mut payload_rx) = mpsc::unbounded();
1733
1734        let connector = LauncherConnectorForTest { sender: payload_tx };
1735        let runner = ElfRunner::new(
1736            job_default().duplicate(zx::Rights::SAME_RIGHTS).unwrap(),
1737            Box::new(connector),
1738            Some(new_utc_clock_for_tests()),
1739            CrashRecords::new(),
1740            vec![],
1741        );
1742        let policy_checker = ScopedPolicyChecker::new(
1743            Arc::new(SecurityPolicy::default()),
1744            Moniker::try_from(["foo"]).unwrap(),
1745        );
1746
1747        // Create a clock and pass it to the component as the UTC clock through numbered_handles.
1748        let clock = zx::SyntheticClock::create(
1749            zx::ClockOpts::AUTO_START | zx::ClockOpts::MONOTONIC | zx::ClockOpts::MAPPABLE,
1750            None,
1751        )?;
1752        let clock_koid = clock.koid().unwrap();
1753
1754        let (_runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1755        let mut start_info = hello_world_startinfo(runtime_dir_server);
1756        start_info.numbered_handles = Some(vec![fproc::HandleInfo {
1757            handle: clock.into_handle(),
1758            id: HandleInfo::new(HandleType::ClockUtc, 0).as_raw(),
1759        }]);
1760
1761        // Start the component.
1762        let _ = runner
1763            .start_component(start_info, &policy_checker)
1764            .await
1765            .context("failed to start component")?;
1766
1767        let payload = payload_rx.next().await.unwrap();
1768        assert!(
1769            payload
1770                .handles
1771                .iter()
1772                .any(|handle_info| handle_info.handle.koid().unwrap() == clock_koid)
1773        );
1774
1775        Ok(())
1776    }
1777
1778    /// Test visiting running components using [`ComponentSet`].
1779    #[fuchsia::test]
1780    async fn test_enumerate_components() {
1781        use std::sync::atomic::{AtomicUsize, Ordering};
1782
1783        let (_runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1784        let start_info = lifecycle_startinfo(runtime_dir_server);
1785
1786        let runner = new_elf_runner_for_test();
1787        let components = runner.components.clone();
1788
1789        // Initially there are zero components.
1790        let count = Arc::new(AtomicUsize::new(0));
1791        components.clone().visit(|_, _| {
1792            count.fetch_add(1, Ordering::SeqCst);
1793        });
1794        assert_eq!(count.load(Ordering::SeqCst), 0);
1795
1796        // Run a component.
1797        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1798            Arc::new(SecurityPolicy::default()),
1799            Moniker::root(),
1800        ));
1801        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1802        runner.start(start_info, server_controller).await;
1803
1804        // There should now be one component in the set.
1805        let count = Arc::new(AtomicUsize::new(0));
1806        components.clone().visit(|elf_component: &ElfComponentInfo, _| {
1807            assert_eq!(
1808                elf_component.get_url().as_str(),
1809                "fuchsia-pkg://fuchsia.com/lifecycle-example#meta/lifecycle.cm"
1810            );
1811            count.fetch_add(1, Ordering::SeqCst);
1812        });
1813        assert_eq!(count.load(Ordering::SeqCst), 1);
1814
1815        // Stop the component.
1816        controller.stop().unwrap();
1817        controller.on_closed().await.unwrap();
1818
1819        // There should now be zero components in the set.
1820        // Keep retrying until the component is asynchronously deregistered.
1821        loop {
1822            let count = Arc::new(AtomicUsize::new(0));
1823            components.clone().visit(|_, _| {
1824                count.fetch_add(1, Ordering::SeqCst);
1825            });
1826            let count = count.load(Ordering::SeqCst);
1827            assert!(count == 0 || count == 1);
1828            if count == 0 {
1829                break;
1830            }
1831            // Yield to the executor once so that we are not starving the
1832            // asynchronous deregistration task from running.
1833            yield_to_executor().await;
1834        }
1835    }
1836
1837    async fn yield_to_executor() {
1838        let mut done = false;
1839        futures::future::poll_fn(|cx| {
1840            if done {
1841                Poll::Ready(())
1842            } else {
1843                done = true;
1844                cx.waker().wake_by_ref();
1845                Poll::Pending
1846            }
1847        })
1848        .await;
1849    }
1850
1851    /// Creates start info for a component which runs immediately escrows its
1852    /// outgoing directory and then exits.
1853    pub fn immediate_escrow_startinfo(
1854        outgoing_dir: ServerEnd<fio::DirectoryMarker>,
1855        runtime_dir: ServerEnd<fio::DirectoryMarker>,
1856    ) -> fcrunner::ComponentStartInfo {
1857        let ns = vec![
1858            pkg_dir_namespace_entry(),
1859            // Give the test component LogSink.
1860            svc_dir_namespace_entry(),
1861        ];
1862
1863        fcrunner::ComponentStartInfo {
1864            resolved_url: Some("#meta/immediate_escrow_component.cm".to_string()),
1865            program: Some(fdata::Dictionary {
1866                entries: Some(vec![
1867                    fdata::DictionaryEntry {
1868                        key: "binary".to_string(),
1869                        value: Some(Box::new(fdata::DictionaryValue::Str(
1870                            "bin/immediate_escrow".to_string(),
1871                        ))),
1872                    },
1873                    fdata::DictionaryEntry {
1874                        key: "lifecycle.stop_event".to_string(),
1875                        value: Some(Box::new(fdata::DictionaryValue::Str("notify".to_string()))),
1876                    },
1877                ]),
1878                ..Default::default()
1879            }),
1880            ns: Some(ns),
1881            outgoing_dir: Some(outgoing_dir),
1882            runtime_dir: Some(runtime_dir),
1883            component_instance: Some(zx::Event::create()),
1884            ..Default::default()
1885        }
1886    }
1887
1888    /// Test that an ELF component can send an `OnEscrow` event on its lifecycle
1889    /// channel and this event is forwarded to the `ComponentController`.
1890    #[fuchsia::test]
1891    async fn test_lifecycle_on_escrow() {
1892        let (outgoing_dir_client, outgoing_dir_server) =
1893            fidl::endpoints::create_endpoints::<fio::DirectoryMarker>();
1894        let (_, runtime_dir_server) = fidl::endpoints::create_endpoints::<fio::DirectoryMarker>();
1895        let start_info = immediate_escrow_startinfo(outgoing_dir_server, runtime_dir_server);
1896
1897        let runner = new_elf_runner_for_test();
1898        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
1899            Arc::new(SecurityPolicy::default()),
1900            Moniker::root(),
1901        ));
1902        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
1903
1904        runner.start(start_info, server_controller).await;
1905
1906        let mut event_stream = controller.take_event_stream();
1907
1908        expect_diagnostics_event(&mut event_stream).await;
1909
1910        match event_stream.try_next().await {
1911            Ok(Some(fcrunner::ComponentControllerEvent::OnEscrow {
1912                payload: fcrunner::ComponentControllerOnEscrowRequest { outgoing_dir, .. },
1913            })) => {
1914                let outgoing_dir_server = outgoing_dir.unwrap();
1915
1916                assert_eq!(
1917                    outgoing_dir_client.as_handle_ref().basic_info().unwrap().koid,
1918                    outgoing_dir_server.as_handle_ref().basic_info().unwrap().related_koid
1919                );
1920            }
1921            other => panic!("unexpected event result: {:?}", other),
1922        }
1923
1924        expect_on_stop(&mut event_stream, zx::Status::OK, Some(0)).await;
1925        expect_channel_closed(&mut event_stream).await;
1926    }
1927
1928    fn exit_with_code_startinfo(exit_code: i64) -> fcrunner::ComponentStartInfo {
1929        let (_runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1930        let ns = vec![pkg_dir_namespace_entry()];
1931
1932        fcrunner::ComponentStartInfo {
1933            resolved_url: Some(
1934                "fuchsia-pkg://fuchsia.com/elf_runner_tests#meta/exit-with-code.cm".to_string(),
1935            ),
1936            program: Some(fdata::Dictionary {
1937                entries: Some(vec![
1938                    fdata::DictionaryEntry {
1939                        key: "args".to_string(),
1940                        value: Some(Box::new(fdata::DictionaryValue::StrVec(vec![format!(
1941                            "{}",
1942                            exit_code
1943                        )]))),
1944                    },
1945                    fdata::DictionaryEntry {
1946                        key: "binary".to_string(),
1947                        value: Some(Box::new(fdata::DictionaryValue::Str(
1948                            "bin/exit_with_code".to_string(),
1949                        ))),
1950                    },
1951                ]),
1952                ..Default::default()
1953            }),
1954            ns: Some(ns),
1955            outgoing_dir: None,
1956            runtime_dir: Some(runtime_dir_server),
1957            component_instance: Some(zx::Event::create()),
1958            ..Default::default()
1959        }
1960    }
1961
1962    fn exit_with_code_startinfo_from_env(exit_code: Option<i64>) -> fcrunner::ComponentStartInfo {
1963        let (_runtime_dir, runtime_dir_server) = create_proxy::<fio::DirectoryMarker>();
1964        let ns = vec![pkg_dir_namespace_entry()];
1965        let environ = match exit_code {
1966            Some(code) => vec![format!("EXIT_CODE={}", code)],
1967            None => vec![],
1968        };
1969        fcrunner::ComponentStartInfo {
1970            resolved_url: Some(
1971                "fuchsia-pkg://fuchsia.com/elf_runner_tests#meta/exit-with-code-from_env.cm"
1972                    .to_string(),
1973            ),
1974            program: Some(fdata::Dictionary {
1975                entries: Some(vec![
1976                    fdata::DictionaryEntry {
1977                        key: "environ".to_string(),
1978                        value: Some(Box::new(fdata::DictionaryValue::StrVec(environ))),
1979                    },
1980                    fdata::DictionaryEntry {
1981                        key: "binary".to_string(),
1982                        value: Some(Box::new(fdata::DictionaryValue::Str(
1983                            "bin/exit_with_code_from_env".to_string(),
1984                        ))),
1985                    },
1986                ]),
1987                ..Default::default()
1988            }),
1989            ns: Some(ns),
1990            outgoing_dir: None,
1991            runtime_dir: Some(runtime_dir_server),
1992            component_instance: Some(zx::Event::create()),
1993            ..Default::default()
1994        }
1995    }
1996
1997    #[test_case(exit_with_code_startinfo(0) ; "args")]
1998    #[test_case(exit_with_code_startinfo_from_env(Some(0)) ; "env")]
1999    #[fuchsia::test]
2000    async fn test_return_code_success(start_info: fcrunner::ComponentStartInfo) {
2001        let runner = new_elf_runner_for_test();
2002        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
2003            Arc::new(SecurityPolicy::default()),
2004            Moniker::root(),
2005        ));
2006        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
2007        runner.start(start_info, server_controller).await;
2008
2009        let mut event_stream = controller.take_event_stream();
2010        expect_diagnostics_event(&mut event_stream).await;
2011        expect_on_stop(&mut event_stream, zx::Status::OK, Some(0)).await;
2012        expect_channel_closed(&mut event_stream).await;
2013    }
2014
2015    #[test_case(exit_with_code_startinfo(123), vec![] ; "args")]
2016    #[test_case(exit_with_code_startinfo_from_env(Some(123)), vec![] ; "component_env")]
2017    #[test_case(exit_with_code_startinfo_from_env(Some(123)), vec!["EXIT_CODE=2"] ; "additional_env_shadowed")]
2018    #[test_case(exit_with_code_startinfo_from_env(None), vec!["EXIT_CODE=123"] ; "additional_env")]
2019    #[fuchsia::test]
2020    async fn test_return_code_failure(
2021        start_info: fcrunner::ComponentStartInfo,
2022        additional_environ: Vec<&str>,
2023    ) {
2024        let runner = Arc::new(ElfRunner::new(
2025            job_default().duplicate(zx::Rights::SAME_RIGHTS).unwrap(),
2026            Box::new(process_launcher::BuiltInConnector {}),
2027            Some(new_utc_clock_for_tests()),
2028            CrashRecords::new(),
2029            additional_environ.iter().map(|s| s.to_string()).collect(),
2030        ));
2031        let runner = runner.get_scoped_runner(ScopedPolicyChecker::new(
2032            Arc::new(SecurityPolicy::default()),
2033            Moniker::root(),
2034        ));
2035        let (controller, server_controller) = create_proxy::<fcrunner::ComponentControllerMarker>();
2036        runner.start(start_info, server_controller).await;
2037
2038        let mut event_stream = controller.take_event_stream();
2039        expect_diagnostics_event(&mut event_stream).await;
2040        let s = zx::Status::from_raw(
2041            i32::try_from(fcomp::Error::InstanceDied.into_primitive()).unwrap(),
2042        );
2043        expect_on_stop(&mut event_stream, s, Some(123)).await;
2044        expect_channel_closed(&mut event_stream).await;
2045    }
2046
2047    #[fuchsia::test]
2048    fn test_is_acceptable_exit_code() {
2049        // Test sshd with its acceptable code
2050        assert!(is_acceptable_exit_code(
2051            &Moniker::from_str("core/sshd-host/shell:sshd-1").expect("valid moniker"),
2052            255
2053        ));
2054
2055        // Test sshd with a non-acceptable code
2056        assert!(!is_acceptable_exit_code(
2057            &Moniker::from_str("core/sshd-host/shell:sshd-1").expect("valid moniker"),
2058            1
2059        ));
2060
2061        // Test a URL that doesn't match
2062        assert!(!is_acceptable_exit_code(
2063            &Moniker::from_str("not_core/ssh-host/shell:sshd-1").expect("valid moniker"),
2064            255
2065        ));
2066
2067        // Test an unknown component with a code that happens to be acceptable for another
2068        assert!(!is_acceptable_exit_code(
2069            &Moniker::from_str("foo/debug").expect("valid moniker"),
2070            255
2071        ));
2072    }
2073
2074    #[fuchsia::test]
2075    fn test_merge_environ() {
2076        assert_eq!(merge_environ(&vec![], &vec![]), Vec::<String>::new());
2077
2078        assert_eq!(
2079            merge_environ(&vec!["A".to_string(), "B=".to_string(), "C=c".to_string()], &vec![]),
2080            vec!["A".to_string(), "B=".to_string(), "C=c".to_string()]
2081        );
2082        assert_eq!(
2083            merge_environ(
2084                &vec!["A".to_string(), "B=".to_string(), "C=c".to_string()],
2085                &vec!["A".to_string(), "B".to_string(), "C".to_string()]
2086            ),
2087            vec!["A".to_string(), "B".to_string(), "C".to_string()]
2088        );
2089        assert_eq!(
2090            merge_environ(
2091                &vec!["A".to_string(), "B=".to_string(), "C=c".to_string()],
2092                &vec!["A=".to_string(), "B=".to_string(), "C=".to_string()]
2093            ),
2094            vec!["A=".to_string(), "B=".to_string(), "C=".to_string()]
2095        );
2096        assert_eq!(
2097            merge_environ(
2098                &vec!["A".to_string(), "B=".to_string(), "C=c".to_string()],
2099                &vec!["A=aa".to_string(), "B=bb".to_string(), "C=cc".to_string()]
2100            ),
2101            vec!["A=aa".to_string(), "B=bb".to_string(), "C=cc".to_string()]
2102        );
2103        assert_eq!(
2104            merge_environ(&vec![], &vec!["A".to_string(), "B=".to_string(), "C=c".to_string()]),
2105            vec!["A".to_string(), "B=".to_string(), "C=c".to_string()]
2106        );
2107    }
2108}