Skip to main content

alarms/
lib.rs

1// Copyright 2024 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Alarm management subsystem.
6//!
7//! This subsystem serves the FIDL API `fuchsia.time.alarms/Wake`. To instantiate,
8//! you can use the following approach:
9//!
10//! ```ignore
11//! let proxy = client::connect_to_protocol::<ffhh::DeviceMarker>().map_err(
12//!    |e| error!("error: {}", e)).expect("add proper error handling");
13//!    let timer_loop = alarms::Handle::new(proxy);
14//! ```
15//!
16//! From here, use the standard approach with [ServiceFs::new] to expose the
17//! discoverable FIDL endpoint and call:
18//!
19//! ```ignore
20//! let stream: fidl_fuchsia_time_alarms::WakeRequestStream = ... ;
21//! alarms::serve(timer_loop, stream).await;
22//! // ...
23//! ```
24//!
25//! Of course, for everything to work well, your component will need appropriate
26//! capability routing.  Refer to capability routing docs for those details.
27
28mod emu;
29mod timers;
30
31use crate::emu::EmulationTimerOps;
32use anyhow::{Context, Result, anyhow};
33use async_trait::async_trait;
34use fidl::HandleBased;
35use fidl::encoding::ProxyChannelBox;
36use fidl::endpoints::RequestStream;
37use fuchsia_component::client::connect_to_named_protocol_at_dir_root;
38use fuchsia_inspect::{IntProperty, NumericProperty, Property};
39use futures::channel::mpsc;
40use futures::sink::SinkExt;
41use futures::{StreamExt, TryStreamExt};
42use log::{debug, error, warn};
43use scopeguard::defer;
44use std::cell::RefCell;
45use std::rc::Rc;
46use std::sync::LazyLock;
47use time_pretty::{MSEC_IN_NANOS, format_duration, format_timer};
48use zx::AsHandleRef;
49use {
50    fidl_fuchsia_hardware_hrtimer as ffhh, fidl_fuchsia_time_alarms as fta,
51    fuchsia_async as fasync, fuchsia_inspect as finspect, fuchsia_runtime as fxr,
52    fuchsia_trace as trace,
53};
54
55static I64_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i64::MAX.try_into().expect("infallible"));
56static I32_MAX_AS_U64: LazyLock<u64> = LazyLock::new(|| i32::MAX.try_into().expect("infallible"));
57
58/// The largest value of timer "ticks" that is still considered useful.
59static MAX_USEFUL_TICKS: LazyLock<u64> = LazyLock::new(|| *I32_MAX_AS_U64);
60
61/// The smallest value of "ticks" that we can program into the driver. To wit,
62/// driver will reject "0" ticks, even though it probably shouldn't. See
63/// for details: b/437177931.
64static MIN_USEFUL_TICKS: u64 = 1;
65
66/// The hrtimer ID used for scheduling wake alarms.  This ID is reused from
67/// Starnix, and should eventually no longer be critical.
68const MAIN_TIMER_ID: usize = 6;
69
70/// This is what we consider a "long" delay in alarm operations.
71const LONG_DELAY_NANOS: i64 = 2000 * MSEC_IN_NANOS;
72
73const TIMEOUT_SECONDS: i64 = 40;
74
75/// Waits on a future, but if the future takes longer than `TIMEOUT_SECONDS`, we log a warning and
76/// a stack trace. A stack trace is requested at most once once for each call.
77macro_rules! log_long_op {
78    ($fut:expr) => {{
79        use futures::FutureExt;
80        let fut = $fut;
81        futures::pin_mut!(fut);
82        let mut logged = false;
83        loop {
84            let timeout = fasync::Timer::new(zx::MonotonicDuration::from_seconds(TIMEOUT_SECONDS));
85            futures::select! {
86                res = fut.as_mut().fuse() => {
87                    if logged {
88                        log::warn!("unexpected blocking is now resolved: long-running async operation at {}:{}.",
89                            file!(), line!());
90                    }
91                    break res;
92                }
93                _ = timeout.fuse() => {
94                    // Check logs for a `kBadState` status reported from the hrtimer driver.
95                    log::warn!("unexpected blocking: long-running async op at {}:{}. Report to `componentId:1408151`",
96                        file!(), line!());
97                    if !logged {
98                        #[cfg(all(target_os = "fuchsia", not(doc)))]
99                        ::debug::backtrace_request_all_threads();
100                    }
101                    logged = true;
102                }
103            }
104        }
105    }};
106}
107
108/// Increments the value of an underlying inspect property during its lifetime.
109struct ScopedInc<'a> {
110    property: &'a IntProperty,
111}
112
113impl<'a> ScopedInc<'a> {
114    fn new(property: &'a IntProperty) -> Self {
115        property.add(1);
116        Self { property }
117    }
118}
119
120impl<'a> Drop for ScopedInc<'a> {
121    fn drop(&mut self) {
122        self.property.add(-1);
123    }
124}
125
126/// Compares two optional deadlines and returns true if the `before is different from `after.
127/// Nones compare as equal.
128fn is_deadline_changed(
129    before: Option<fasync::BootInstant>,
130    after: Option<fasync::BootInstant>,
131) -> bool {
132    match (before, after) {
133        (None, None) => false,
134        (None, Some(_)) | (Some(_), None) => true,
135        (Some(before), Some(after)) => before != after,
136    }
137}
138
139// Errors returnable from [TimerOps] calls.
140#[derive(Debug, Clone)]
141pub(crate) enum TimerOpsError {
142    /// The driver reported an error.
143    Driver(ffhh::DriverError),
144    /// FIDL-specific RPC error.
145    Fidl(fidl::Error),
146}
147
148impl Into<fta::WakeAlarmsError> for TimerOpsError {
149    /// Compute the error that gets propagated to callers, depending on messages
150    /// from the driver.
151    fn into(self) -> fta::WakeAlarmsError {
152        match self {
153            TimerOpsError::Fidl(fidl::Error::ClientChannelClosed { .. }) => {
154                fta::WakeAlarmsError::DriverConnection
155            }
156            TimerOpsError::Driver(ffhh::DriverError::InternalError) => fta::WakeAlarmsError::Driver,
157            _ => fta::WakeAlarmsError::Internal,
158        }
159    }
160}
161
162impl TimerOpsError {
163    fn is_canceled(&self) -> bool {
164        match self {
165            TimerOpsError::Driver(ffhh::DriverError::Canceled) => true,
166            _ => false,
167        }
168    }
169}
170
171trait SawResponseFut: std::future::Future<Output = Result<zx::EventPair, TimerOpsError>> {
172    // nop
173}
174
175/// Abstracts away timer operations.
176#[async_trait(?Send)]
177pub(crate) trait TimerOps {
178    /// Stop the timer with the specified ID.
179    async fn stop(&self, id: u64);
180
181    /// Examine the timer's properties, such as supported resolutions and tick
182    /// counts.
183    async fn get_timer_properties(&self) -> TimerConfig;
184
185    /// This method must return an actual future, to handle the borrow checker:
186    /// making this async will assume that `self` remains borrowed, which will
187    /// thwart attempts to move the return value of this call into a separate
188    /// closure.
189    fn start_and_wait(
190        &self,
191        id: u64,
192        resolution: &ffhh::Resolution,
193        ticks: u64,
194        setup_event: zx::Event,
195    ) -> std::pin::Pin<Box<dyn SawResponseFut>>;
196}
197
198/// TimerOps backed by an actual hardware timer.
199struct HardwareTimerOps {
200    proxy: ffhh::DeviceProxy,
201}
202
203impl HardwareTimerOps {
204    fn new(proxy: ffhh::DeviceProxy) -> Box<Self> {
205        Box::new(Self { proxy })
206    }
207}
208
209#[async_trait(?Send)]
210impl TimerOps for HardwareTimerOps {
211    async fn stop(&self, id: u64) {
212        let _ = self
213            .proxy
214            .stop(id)
215            .await
216            .map(|result| {
217                let _ = result.map_err(|e| warn!("stop_hrtimer: driver error: {:?}", e));
218            })
219            .map_err(|e| warn!("stop_hrtimer: could not stop prior timer: {}", e));
220    }
221
222    async fn get_timer_properties(&self) -> TimerConfig {
223        match log_long_op!(self.proxy.get_properties()) {
224            Ok(p) => {
225                let timers_properties = &p.timers_properties.expect("timers_properties must exist");
226                debug!("get_timer_properties: got: {:?}", timers_properties);
227
228                // Pick the correct hrtimer to use for wakes.
229                let timer_index = if timers_properties.len() > MAIN_TIMER_ID {
230                    // Mostly vim3, where we have pre-existing timer allocations
231                    // that we don't need to change.
232                    MAIN_TIMER_ID
233                } else if timers_properties.len() > 0 {
234                    // Newer devices that don't need to allocate timer IDs, and/or
235                    // may not even have as many timers as vim3 does. But, at least
236                    // one timer is needed.
237                    0
238                } else {
239                    // Give up.
240                    return TimerConfig::new_empty();
241                };
242                let main_timer_properties = &timers_properties[timer_index];
243                debug!("alarms: main_timer_properties: {:?}", main_timer_properties);
244                // Not sure whether it is useful to have more ticks than this, so limit it.
245                let max_ticks: u64 = std::cmp::min(
246                    main_timer_properties.max_ticks.unwrap_or(*MAX_USEFUL_TICKS),
247                    *MAX_USEFUL_TICKS,
248                );
249                let resolutions = &main_timer_properties
250                    .supported_resolutions
251                    .as_ref()
252                    .expect("supported_resolutions is populated")
253                    .iter()
254                    .last() //  Limits the resolution to the coarsest available.
255                    .map(|r| match *r {
256                        ffhh::Resolution::Duration(d) => d,
257                        _ => {
258                            error!(
259                            "get_timer_properties: Unknown resolution type, returning millisecond."
260                        );
261                            MSEC_IN_NANOS
262                        }
263                    })
264                    .map(|d| zx::BootDuration::from_nanos(d))
265                    .into_iter() // Used with .last() above.
266                    .collect::<Vec<_>>();
267                let timer_id = main_timer_properties.id.expect("timer ID is always present");
268                TimerConfig::new_from_data(timer_id, resolutions, max_ticks)
269            }
270            Err(e) => {
271                error!("could not get timer properties: {:?}", e);
272                TimerConfig::new_empty()
273            }
274        }
275    }
276
277    fn start_and_wait(
278        &self,
279        id: u64,
280        resolution: &ffhh::Resolution,
281        ticks: u64,
282        setup_event: zx::Event,
283    ) -> std::pin::Pin<Box<dyn SawResponseFut>> {
284        let inner = self.proxy.start_and_wait(id, resolution, ticks, setup_event);
285        Box::pin(HwResponseFut { pinner: Box::pin(inner) })
286    }
287}
288
289// Untangles the borrow checker issues that otherwise result from making
290// TimerOps::start_and_wait an async function.
291struct HwResponseFut {
292    pinner: std::pin::Pin<
293        Box<
294            fidl::client::QueryResponseFut<
295                ffhh::DeviceStartAndWaitResult,
296                fidl::encoding::DefaultFuchsiaResourceDialect,
297            >,
298        >,
299    >,
300}
301
302use std::task::Poll;
303impl SawResponseFut for HwResponseFut {}
304impl std::future::Future for HwResponseFut {
305    type Output = Result<zx::EventPair, TimerOpsError>;
306    fn poll(
307        mut self: std::pin::Pin<&mut Self>,
308        cx: &mut std::task::Context<'_>,
309    ) -> std::task::Poll<Self::Output> {
310        let inner_poll = self.pinner.as_mut().poll(cx);
311        match inner_poll {
312            Poll::Ready(result) => Poll::Ready(match result {
313                Ok(Ok(keep_alive)) => Ok(keep_alive),
314                Ok(Err(e)) => Err(TimerOpsError::Driver(e)),
315                Err(e) => Err(TimerOpsError::Fidl(e)),
316            }),
317            Poll::Pending => Poll::Pending,
318        }
319    }
320}
321
322/// Stops a currently running hardware timer.
323async fn stop_hrtimer(hrtimer: &Box<dyn TimerOps>, timer_config: &TimerConfig) {
324    trace::duration!("alarms", "hrtimer:stop", "id" => timer_config.id);
325    debug!("stop_hrtimer: stopping hardware timer: {}", timer_config.id);
326    hrtimer.stop(timer_config.id).await;
327    debug!("stop_hrtimer: stopped  hardware timer: {}", timer_config.id);
328}
329
330// The default size of the channels created in this module.
331// This is very unlikely to create bottlenecks.
332const CHANNEL_SIZE: usize = 1000;
333
334/// A type handed around between the concurrent loops run by this module.
335#[derive(Debug)]
336enum Cmd {
337    /// Request a timer to be started.
338    Start {
339        /// The unique connection ID.
340        conn_id: zx::Koid,
341        /// A timestamp (presumably in the future), at which to expire the timer.
342        deadline: timers::Deadline,
343        // The API supports several modes. See fuchsia.time.alarms/Wake.fidl.
344        //
345        // Optional, because not always needed:
346        //
347        // * `mode` is required for hanging get API calls (e.g. `StartAndWait`), as we must signal
348        //   when the alarm is scheduled.
349        // * The calls such as `SetUtc` which return only upon scheduling do not need a `mode`, as
350        //   the caller can wait for the call to return immediately.
351        mode: Option<fta::SetMode>,
352        /// An alarm identifier, chosen by the caller.
353        alarm_id: String,
354        /// A responder that will be called when the timer expires. The
355        /// client end of the connection will block until we send something
356        /// on this responder.
357        ///
358        /// This is packaged into a Rc... only because both the "happy path"
359        /// and the error path must consume the responder.  This allows them
360        /// to be consumed, without the responder needing to implement Default.
361        responder: Rc<dyn timers::Responder>,
362    },
363    StopById {
364        done: zx::Event,
365        timer_id: timers::Id,
366    },
367    Alarm {
368        expired_deadline: fasync::BootInstant,
369        keep_alive: fidl::EventPair,
370    },
371    AlarmFidlError {
372        expired_deadline: fasync::BootInstant,
373        error: fidl::Error,
374    },
375    AlarmDriverError {
376        expired_deadline: fasync::BootInstant,
377        error: ffhh::DriverError,
378
379        // Added these for debugging details, otherwise not necessary.
380        timer_config_id: u64,
381        resolution_nanos: i64,
382        ticks: u64,
383    },
384    /// The UTC clock transformation has been updated.
385    UtcUpdated {
386        // The new boot-to-utc clock transformation.
387        transform: fxr::UtcClockTransform,
388    },
389}
390
391impl std::fmt::Display for Cmd {
392    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
393        match self {
394            Cmd::Start { conn_id, deadline, alarm_id, .. } => {
395                write!(
396                    f,
397                    "Start[alarm_id=\"{}\", conn_id={:?}, deadline={}]",
398                    alarm_id, conn_id, deadline,
399                )
400            }
401            Cmd::Alarm { expired_deadline, .. } => {
402                write!(f, "Alarm[deadline={}]", format_timer((*expired_deadline).into()))
403            }
404            Cmd::AlarmFidlError { expired_deadline, error } => {
405                write!(
406                    f,
407                    "FIDLError[deadline={}, err={}, NO_WAKE_LEASE!]",
408                    format_timer((*expired_deadline).into()),
409                    error
410                )
411            }
412            Cmd::AlarmDriverError { expired_deadline, error, .. } => {
413                write!(
414                    f,
415                    "DriverError[deadline={}, err={:?}, NO_WAKE_LEASE!]",
416                    format_timer((*expired_deadline).into()),
417                    error
418                )
419            }
420            Cmd::StopById { timer_id, done: _ } => {
421                write!(f, "StopById[timerId={}]", timer_id,)
422            }
423            Cmd::UtcUpdated { transform } => {
424                write!(f, "UtcUpdated[timerId={transform:?}]")
425            }
426        }
427    }
428}
429
430/// Extracts a KOID from the underlying channel of the provided stream.
431///
432/// This function deconstructs the provided stream to access the underlying
433/// channel and extract its KOID. It then reconstructs the stream and returns
434/// it to the caller along with the KOID.
435///
436/// # Args
437/// - `stream`: The `fta::WakeAlarmsRequestStream` to extract the KOID from.
438///
439/// # Returns
440/// A tuple containing the `zx::Koid` of the stream's channel and the
441/// reconstructed `fta::WakeAlarmsRequestStream`.
442pub fn get_stream_koid(
443    stream: fta::WakeAlarmsRequestStream,
444) -> (zx::Koid, fta::WakeAlarmsRequestStream) {
445    let (inner, is_terminated) = stream.into_inner();
446    let koid = inner.channel().as_channel().as_handle_ref().koid().expect("infallible");
447    let stream = fta::WakeAlarmsRequestStream::from_inner(inner, is_terminated);
448    (koid, stream)
449}
450
451/// Serves a single Wake API client.
452///
453/// This function processes incoming requests from a `fta::WakeAlarmsRequestStream`,
454/// handling each request by calling `handle_request`. It continues to process
455/// requests until the stream is exhausted.
456///
457/// # Args
458/// - `timer_loop`: A reference-counted pointer to the `Loop` that manages timers.
459/// - `requests`: The stream of incoming `fta::WakeAlarmsRequest` from a client.
460pub async fn serve(timer_loop: Rc<Loop>, requests: fta::WakeAlarmsRequestStream) {
461    let timer_loop = timer_loop.clone();
462    let timer_loop_send = || timer_loop.get_sender();
463    let (conn_id, mut requests) = get_stream_koid(requests);
464    let mut request_count = 0;
465    debug!("alarms::serve: opened connection: {:?}", conn_id);
466    while let Some(maybe_request) = requests.next().await {
467        request_count += 1;
468        debug!("alarms::serve: conn_id: {:?} incoming request: {}", conn_id, request_count);
469        match maybe_request {
470            Ok(request) => {
471                // Should return quickly.
472                handle_request(conn_id, timer_loop_send(), request).await;
473            }
474            Err(e) => {
475                warn!("alarms::serve: error in request: {:?}", e);
476            }
477        }
478        debug!("alarms::serve: conn_id: {:?} done request: {}", conn_id, request_count);
479    }
480    // Check if connection closure was intentional. It is way too easy to close
481    // a FIDL connection inadvertently if doing non-mainstream things with FIDL.
482    warn!("alarms::serve: CLOSED CONNECTION: conn_id: {:?}", conn_id);
483}
484
485async fn handle_cancel(alarm_id: String, conn_id: zx::Koid, cmd: &mut mpsc::Sender<Cmd>) {
486    let done = zx::Event::create();
487    let timer_id = timers::Id::new(alarm_id.clone(), conn_id);
488    if let Err(e) = cmd.send(Cmd::StopById { timer_id, done: clone_handle(&done) }).await {
489        warn!("handle_request: error while trying to cancel: {}: {:?}", alarm_id, e);
490    }
491    wait_signaled(&done).await;
492}
493
494/// Processes a single Wake API request from a single client.
495/// This function is expected to return quickly.
496///
497/// # Args
498/// - `conn_id`: the unique identifier of the connection producing these requests.
499/// - `cmd`: the outbound queue of commands to deliver to the timer manager.
500/// - `request`: a single inbound Wake FIDL API request.
501async fn handle_request(
502    conn_id: zx::Koid,
503    mut cmd: mpsc::Sender<Cmd>,
504    request: fta::WakeAlarmsRequest,
505) {
506    match request {
507        fta::WakeAlarmsRequest::SetAndWait { deadline, mode, alarm_id, responder } => {
508            // Since responder is consumed by the happy path and the error path, but not both,
509            // and because the responder does not implement Default, this is a way to
510            // send it in two mutually exclusive directions.  Each direction will reverse
511            // this wrapping once the responder makes it to the other side.
512            //
513            // Rc required because of sharing a noncopyable struct; RefCell required because
514            // borrow_mut() is needed to move out; and Option is required so we can
515            // use take() to replace the struct with None so it does not need to leave
516            // a Default in its place.
517            let responder = Rc::new(RefCell::new(Some(responder)));
518
519            // Alarm is not scheduled yet!
520            debug!(
521                "handle_request: scheduling alarm_id: \"{}\"\n\tconn_id: {:?}\n\tdeadline: {}",
522                alarm_id,
523                conn_id,
524                format_timer(deadline.into())
525            );
526            // Expected to return quickly.
527            let deadline = timers::Deadline::Boot(deadline.into());
528            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
529                conn_id,
530                deadline,
531                mode: Some(mode),
532                alarm_id: alarm_id.clone(),
533                responder: responder.clone(),
534            })) {
535                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
536                responder
537                    .borrow_mut()
538                    .take()
539                    .expect("always present if call fails")
540                    .send(Err(fta::WakeAlarmsError::Internal))
541                    .unwrap();
542            }
543        }
544        fta::WakeAlarmsRequest::SetAndWaitUtc { deadline, mode, alarm_id, responder } => {
545            // Quickly get rid of the custom wake alarms deadline type.
546            let deadline =
547                timers::Deadline::Utc(fxr::UtcInstant::from_nanos(deadline.timestamp_utc));
548
549            // The rest of this match branch is the same as for `SetAndWait`. However, the handling
550            // is for now simple enough that we don't need to explore factoring common actions out.
551            let responder = Rc::new(RefCell::new(Some(responder)));
552            debug!(
553                "handle_request: scheduling alarm_id UTC: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {deadline}",
554            );
555
556            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
557                conn_id,
558                deadline,
559                mode: Some(mode),
560                alarm_id: alarm_id.clone(),
561                responder: responder.clone(),
562            })) {
563                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
564                responder
565                    .borrow_mut()
566                    .take()
567                    .expect("always present if call fails")
568                    .send(Err(fta::WakeAlarmsError::Internal))
569                    .unwrap();
570            }
571        }
572        fta::WakeAlarmsRequest::Cancel { alarm_id, .. } => {
573            // TODO: b/383062441 - make this into an async task so that we wait
574            // less to schedule the next alarm.
575            log_long_op!(handle_cancel(alarm_id, conn_id, &mut cmd));
576        }
577        fta::WakeAlarmsRequest::Set { notifier, deadline, mode, alarm_id, responder } => {
578            // Alarm is not scheduled yet!
579            debug!(
580                "handle_request: scheduling alarm_id: \"{alarm_id}\"\n\tconn_id: {conn_id:?}\n\tdeadline: {}",
581                format_timer(deadline.into())
582            );
583            // Expected to return quickly.
584            if let Err(e) = log_long_op!(cmd.send(Cmd::Start {
585                conn_id,
586                deadline: timers::Deadline::Boot(deadline.into()),
587                mode: Some(mode),
588                alarm_id: alarm_id.clone(),
589                responder: Rc::new(RefCell::new(Some(notifier))),
590            })) {
591                warn!("handle_request: error while trying to schedule `{}`: {:?}", alarm_id, e);
592                responder.send(Err(fta::WakeAlarmsError::Internal)).unwrap();
593            } else {
594                // Successfully scheduled the alarm.
595                responder.send(Ok(())).unwrap();
596            }
597        }
598        fta::WakeAlarmsRequest::_UnknownMethod { .. } => {}
599    };
600}
601
602/// Represents a single alarm event processing loop.
603///
604/// One instance is created per each alarm-capable low-level device. The `Loop`
605/// is responsible for managing the lifecycle of wake alarms, including their
606/// creation, scheduling, and cancellation. It interacts with the underlying
607/// hardware timer through a `TimerOps` trait object.
608pub struct Loop {
609    // Given to any clients that need to send messages to `_task`
610    // via [get_sender].
611    snd: mpsc::Sender<Cmd>,
612}
613
614impl Loop {
615    /// Creates a new instance of `Loop`.
616    ///
617    /// This function initializes a new `Loop` with a connection to a low-level
618    /// hardware timer device. It spawns two background tasks: one for the main
619    /// timer event loop and another for monitoring UTC clock changes.
620    ///
621    /// # Args
622    /// - `scope`: The `fasync::ScopeHandle` to spawn background tasks in.
623    /// - `device_proxy`: A `ffhh::DeviceProxy` for communicating with the hardware timer.
624    /// - `inspect`: A `finspect::Node` for recording diagnostics.
625    /// - `utc_clock`: A `fxr::UtcClock` for tracking UTC time.
626    ///
627    /// # Returns
628    /// A new instance of `Loop`.
629    pub fn new(
630        scope: fasync::ScopeHandle,
631        device_proxy: ffhh::DeviceProxy,
632        inspect: finspect::Node,
633        utc_clock: fxr::UtcClock,
634    ) -> Self {
635        let hw_device_timer_ops = HardwareTimerOps::new(device_proxy);
636        Loop::new_internal(scope, hw_device_timer_ops, inspect, utc_clock)
637    }
638
639    /// Creates a new instance of `Loop` with emulated wake alarms.
640    ///
641    /// This function is similar to `new`, but it uses an emulated timer instead
642    /// of a real hardware timer. This is useful for testing environments where
643    /// a hardware timer may not be available.
644    ///
645    /// # Args
646    /// - `scope`: The `fasync::ScopeHandle` to spawn background tasks in.
647    /// - `inspect`: A `finspect::Node` for recording diagnostics.
648    /// - `utc_clock`: A `fxr::UtcClock` for tracking UTC time.
649    ///
650    /// # Returns
651    /// A new instance of `Loop` with an emulated timer.
652    pub fn new_emulated(
653        scope: fasync::ScopeHandle,
654        inspect: finspect::Node,
655        utc_clock: fxr::UtcClock,
656    ) -> Self {
657        let timer_ops = Box::new(EmulationTimerOps::new());
658        Loop::new_internal(scope, timer_ops, inspect, utc_clock)
659    }
660
661    fn new_internal(
662        scope: fasync::ScopeHandle,
663        timer_ops: Box<dyn TimerOps>,
664        inspect: finspect::Node,
665        utc_clock: fxr::UtcClock,
666    ) -> Self {
667        let utc_transform = Rc::new(RefCell::new(
668            utc_clock.get_details().expect("has UTC clock READ capability").reference_to_synthetic,
669        ));
670
671        let (snd, rcv) = mpsc::channel(CHANNEL_SIZE);
672        let loop_scope = scope.clone();
673
674        scope.spawn_local(wake_timer_loop(
675            loop_scope,
676            snd.clone(),
677            rcv,
678            timer_ops,
679            inspect,
680            utc_transform,
681        ));
682        scope.spawn_local(monitor_utc_clock_changes(utc_clock, snd.clone()));
683        Self { snd }
684    }
685
686    /// Gets a copy of a channel through which async commands may be sent to
687    /// the [Loop].
688    fn get_sender(&self) -> mpsc::Sender<Cmd> {
689        self.snd.clone()
690    }
691}
692
693// Forwards the clock transformation of an updated clock into the alarm manager, to allow
694// correcting the boot time deadlines of clocks on the UTC timeline.
695async fn monitor_utc_clock_changes(utc_clock: fxr::UtcClock, mut cmd: mpsc::Sender<Cmd>) {
696    let koid = utc_clock.as_handle_ref().koid();
697    log::info!("monitor_utc_clock_changes: entry");
698    loop {
699        // CLOCK_UPDATED signal is self-clearing.
700        fasync::OnSignals::new(utc_clock.as_handle_ref(), zx::Signals::CLOCK_UPDATED)
701            .await
702            .expect("UTC clock is readable");
703
704        let transform =
705            utc_clock.get_details().expect("UTC clock details are readable").reference_to_synthetic;
706        log::debug!("Received a UTC update: koid={koid:?}: {transform:?}");
707        if let Err(err) = cmd.send(Cmd::UtcUpdated { transform }).await {
708            // This is OK in tests.
709            log::warn!("monitor_utc_clock_changes: exit: {err:?}");
710            break;
711        }
712    }
713}
714
715/// Clones a handle infallibly with `zx::Rights::SAME_RIGHTS`.
716///
717/// This function duplicates a handle, preserving its rights. It will panic if
718/// the handle duplication fails, which is not expected to happen under normal
719/// circumstances.
720///
721/// # Args
722/// - `handle`: A reference to a handle-based object to be cloned.
723///
724/// # Returns
725/// A new handle with the same rights as the original.
726pub fn clone_handle<H: HandleBased>(handle: &H) -> H {
727    handle.duplicate_handle(zx::Rights::SAME_RIGHTS).expect("infallible")
728}
729
730async fn wait_signaled<H: HandleBased>(handle: &H) {
731    fasync::OnSignals::new(handle, zx::Signals::EVENT_SIGNALED).await.expect("infallible");
732}
733
734pub(crate) fn signal(event: &zx::Event) {
735    event.signal(zx::Signals::NONE, zx::Signals::EVENT_SIGNALED).expect("infallible");
736}
737
738/// A [TimerDuration] represents a duration of time that can be expressed by
739/// a discrete timer register.
740///
741/// This is a low-level representation of time duration, used in interaction with
742/// hardware devices. It is therefore necessarily discretized, with adaptive
743/// resolution, depending on the physical characteristics of the underlying
744/// hardware timer that it models.
745#[derive(Debug, Clone, Copy)]
746struct TimerDuration {
747    // The resolution of each one of the `ticks` below.
748    resolution: zx::BootDuration,
749    // The number of ticks that encodes time duration. Each "tick" represents
750    // one unit of `resolution` above.
751    ticks: u64,
752}
753
754/// This and the comparison traits below are used to allow TimerDuration
755/// calculations in a compact form.
756impl Eq for TimerDuration {}
757
758impl std::cmp::PartialOrd for TimerDuration {
759    fn partial_cmp(&self, other: &TimerDuration) -> Option<std::cmp::Ordering> {
760        Some(self.cmp(other))
761    }
762}
763
764impl std::cmp::PartialEq for TimerDuration {
765    fn eq(&self, other: &Self) -> bool {
766        self.cmp(other) == std::cmp::Ordering::Equal
767    }
768}
769
770impl std::cmp::Ord for TimerDuration {
771    /// Two [TimerDuration]s compare equal if they model exactly the same duration of time,
772    /// no matter the resolutions.
773    fn cmp(&self, other: &TimerDuration) -> std::cmp::Ordering {
774        let self_ticks_128: i128 = self.ticks as i128;
775        let self_resolution: i128 = self.resolution_as_nanos() as i128;
776        let self_nanos = self_resolution * self_ticks_128;
777
778        let other_ticks_128: i128 = other.ticks as i128;
779        let other_resolution: i128 = other.resolution_as_nanos() as i128;
780        let other_nanos = other_resolution * other_ticks_128;
781
782        self_nanos.cmp(&other_nanos)
783    }
784}
785
786impl std::fmt::Display for TimerDuration {
787    /// Human readable TimerDuration exposes both the tick count and the resolution,
788    /// in the format of "ticks x resolution", with an end result of
789    /// `10x5ms` for example.
790    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
791        let ticks = self.ticks;
792        let resolution = self.resolution();
793        // Example: 10x1ms
794        write!(f, "{}x{}", ticks, format_duration(resolution),)
795    }
796}
797
798impl TimerDuration {
799    /// The maximum representable TimerDuration that we allow.
800    fn max() -> Self {
801        TimerDuration::new(zx::BootDuration::from_nanos(1), *I64_MAX_AS_U64)
802    }
803
804    /// The zero [TimerDuration].
805    fn zero() -> Self {
806        TimerDuration::new(zx::BootDuration::from_nanos(1), 0)
807    }
808
809    /// Creates a new timer duration with the given parameters.
810    fn new(resolution: zx::BootDuration, ticks: u64) -> Self {
811        Self { resolution, ticks }
812    }
813
814    /// Creates a new timer duration using the resolution from `res_source` and
815    /// a specified number of ticks.
816    fn new_with_resolution(res_source: &TimerDuration, ticks: u64) -> Self {
817        Self::new(res_source.resolution, ticks)
818    }
819
820    /// Returns the time duration represented by this TimerDuration.
821    ///
822    /// Due to the way duration is expressed, the same time duration
823    /// can be represented in multiple ways.
824    fn duration(&self) -> zx::BootDuration {
825        let duration_as_nanos = self.resolution_as_nanos() * self.ticks;
826        let clamp_duration = std::cmp::min(*I32_MAX_AS_U64, duration_as_nanos);
827        zx::BootDuration::from_nanos(clamp_duration.try_into().expect("result was clamped"))
828    }
829
830    /// The resolution of this TimerDuration
831    fn resolution(&self) -> zx::BootDuration {
832        self.resolution
833    }
834
835    fn resolution_as_nanos(&self) -> u64 {
836        self.resolution().into_nanos().try_into().expect("resolution is never negative")
837    }
838
839    /// The number of ticks of this [TimerDuration].
840    fn ticks(&self) -> u64 {
841        self.ticks
842    }
843}
844
845impl From<zx::BootDuration> for TimerDuration {
846    fn from(d: zx::BootDuration) -> TimerDuration {
847        let nanos = d.into_nanos();
848        assert!(nanos >= 0);
849        let nanos_u64 = nanos.try_into().expect("guarded by assert");
850        TimerDuration::new(zx::BootDuration::from_nanos(1), nanos_u64)
851    }
852}
853
854impl std::ops::Div for TimerDuration {
855    type Output = u64;
856    fn div(self, rhs: Self) -> Self::Output {
857        let self_nanos = self.resolution_as_nanos() * self.ticks;
858        let rhs_nanos = rhs.resolution_as_nanos() * rhs.ticks;
859        self_nanos / rhs_nanos
860    }
861}
862
863impl std::ops::Mul<u64> for TimerDuration {
864    type Output = Self;
865    fn mul(self, rhs: u64) -> Self::Output {
866        Self::new(self.resolution, self.ticks * rhs)
867    }
868}
869
870/// Contains the configuration of a specific timer.
871#[derive(Debug)]
872pub(crate) struct TimerConfig {
873    /// The resolutions supported by this timer. Each entry is one possible
874    /// duration for on timer "tick".  The resolution is picked when a timer
875    /// request is sent.
876    ///
877    /// The resolutions MUST be sorted from finest (index 0) to coarsest.
878    ///
879    /// There MUST be at least one resolution.
880    resolutions: Vec<zx::BootDuration>,
881    /// The maximum count of "ticks" that the timer supports. The timer usually
882    /// has a register that counts up or down based on a clock signal with
883    /// the period specified by `resolutions`.  This is the maximum value that
884    /// the counter can count to without overflowing.
885    max_ticks: u64,
886    /// The stable ID of the timer with the above configuration.
887    id: u64,
888}
889
890impl TimerConfig {
891    /// Creates a new timer config with supported timer resolutions and the max
892    /// ticks value for the timer's counter.
893    fn new_from_data(timer_id: u64, resolutions: &[zx::BootDuration], max_ticks: u64) -> Self {
894        debug!(
895            "TimerConfig: resolutions: {:?}, max_ticks: {}, timer_id: {}",
896            resolutions.iter().map(|r| format_duration(*r)).collect::<Vec<_>>(),
897            max_ticks,
898            timer_id
899        );
900        let resolutions = resolutions.iter().map(|d| *d).collect::<Vec<zx::BootDuration>>();
901        TimerConfig { resolutions, max_ticks, id: timer_id }
902    }
903
904    fn new_empty() -> Self {
905        error!("TimerConfig::new_empty() called, this is not OK.");
906        TimerConfig { resolutions: vec![], max_ticks: 0, id: 0 }
907    }
908
909    // Picks the most appropriate timer setting for it to fire as close as possible
910    // when `duration` expires.
911    //
912    // If duration is too far in the future for what the timer supports,
913    // return a smaller value, to allow the timer to be reprogrammed multiple
914    // times.
915    //
916    // If the available menu of resolutions is such that we can wake only after
917    // the intended deadline, begrudgingly return that option.
918    fn pick_setting(&self, duration: zx::BootDuration) -> TimerDuration {
919        assert!(self.resolutions.len() > 0, "there must be at least one supported resolution");
920
921        // Driver does not support zero ticks, so we must accept the finest resolution duration
922        // instead.
923        if duration <= zx::BootDuration::ZERO {
924            return TimerDuration::new(self.resolutions[0], 1);
925        }
926
927        //  0         |-------------->|<---------------|
928        //  |---------+---------------+----------------+---->
929        //  |---------^               |                |
930        //  | best positive slack     |                |
931        //  |-------------------------^ duration       |
932        //  |------------------------------------------^ best negative slack.
933        let mut best_positive_slack = TimerDuration::zero();
934        let mut best_negative_slack = TimerDuration::max();
935
936        if self.max_ticks == 0 {
937            return TimerDuration::new(zx::BootDuration::from_millis(1), 0);
938        }
939        let duration_slack: TimerDuration = duration.into();
940
941        for res1 in self.resolutions.iter() {
942            let smallest_unit = TimerDuration::new(*res1, 1);
943            let max_tick_at_res = TimerDuration::new(*res1, self.max_ticks);
944
945            let smallest_slack_larger_than_duration = smallest_unit > duration_slack;
946            let largest_slack_smaller_than_duration = max_tick_at_res < duration_slack;
947
948            if smallest_slack_larger_than_duration {
949                if duration_slack == TimerDuration::zero() {
950                    best_negative_slack = TimerDuration::zero();
951                } else if smallest_unit < best_negative_slack {
952                    best_negative_slack = smallest_unit;
953                }
954            }
955            if largest_slack_smaller_than_duration {
956                if max_tick_at_res > best_positive_slack
957                    || best_positive_slack == TimerDuration::zero()
958                {
959                    best_positive_slack = max_tick_at_res;
960                }
961            }
962
963            // "Regular" case.
964            if !smallest_slack_larger_than_duration && !largest_slack_smaller_than_duration {
965                // Check whether duration divides evenly into the available slack options
966                // for this resolution.  If it does, then that is the slack we're looking for.
967                let q = duration_slack / smallest_unit;
968                let d = smallest_unit * q;
969                if d == duration_slack {
970                    // Exact match, we can return right now.
971                    return d;
972                } else {
973                    // Not an exact match, so q ticks is before, but q+1 is after.
974                    if d > best_positive_slack {
975                        best_positive_slack = TimerDuration::new_with_resolution(&smallest_unit, q);
976                    }
977                    let d_plus = TimerDuration::new_with_resolution(&smallest_unit, q + 1);
978                    if d_plus < best_negative_slack {
979                        best_negative_slack = d_plus;
980                    }
981                }
982            }
983        }
984
985        let p_slack = duration - best_positive_slack.duration();
986        let n_slack = best_negative_slack.duration() - duration;
987
988        // If the closest approximation is 0ns, then we can not advance time, so we reject it.
989        // Otherwise pick the smallest slack.  Note that when we pick the best positive slack,
990        // we will wake *before* the actual deadline.  In multi-resolution counters, this enables
991        // us to pick a finer count in the next go.
992        let ret = if p_slack < n_slack && best_positive_slack.duration().into_nanos() > 0 {
993            best_positive_slack
994        } else {
995            best_negative_slack
996        };
997        debug!("TimerConfig: picked slack: {} for duration: {}", ret, format_duration(duration));
998        assert!(
999            ret.duration().into_nanos() >= 0,
1000            "ret: {}, p_slack: {}, n_slack: {}, orig.duration: {}\n\tbest_p_slack: {}\n\tbest_n_slack: {}\n\ttarget: {}\n\t 1: {} 2: {:?}, 3: {:?}",
1001            ret,
1002            format_duration(p_slack),
1003            format_duration(n_slack),
1004            format_duration(duration),
1005            best_positive_slack,
1006            best_negative_slack,
1007            duration_slack,
1008            p_slack != zx::BootDuration::ZERO,
1009            p_slack,
1010            zx::BootDuration::ZERO,
1011        );
1012        ret
1013    }
1014}
1015
1016async fn get_timer_properties(hrtimer: &Box<dyn TimerOps>) -> TimerConfig {
1017    debug!("get_timer_properties: requesting timer properties.");
1018    hrtimer.get_timer_properties().await
1019}
1020
1021/// The state of a single hardware timer that we must bookkeep.
1022struct TimerState {
1023    // The task waiting for the proximate timer to expire.
1024    task: fasync::Task<()>,
1025    // The deadline that the above task is waiting for.
1026    deadline: fasync::BootInstant,
1027}
1028
1029/// The command loop for timer interaction.  All changes to the wake alarm device programming
1030/// come in form of commands through `cmd`.
1031///
1032/// Args:
1033/// - `snd`: the send end of `cmd` below, a clone is given to each spawned sub-task.
1034/// - `cmds``: the input queue of alarm related commands.
1035/// - `timer_proxy`: the FIDL API proxy for interacting with the hardware device.
1036/// - `inspect`: the inspect node to record loop info into.
1037async fn wake_timer_loop(
1038    scope: fasync::ScopeHandle,
1039    snd: mpsc::Sender<Cmd>,
1040    mut cmds: mpsc::Receiver<Cmd>,
1041    timer_proxy: Box<dyn TimerOps>,
1042    inspect: finspect::Node,
1043    utc_transform: Rc<RefCell<fxr::UtcClockTransform>>,
1044) {
1045    debug!("wake_timer_loop: started");
1046
1047    let mut timers = timers::Heap::new(utc_transform.clone());
1048    let timer_config = get_timer_properties(&timer_proxy).await;
1049
1050    // Keeps the currently executing HrTimer closure.  This is not read from, but
1051    // keeps the timer task active.
1052    #[allow(clippy::collection_is_never_read)]
1053    let mut hrtimer_status: Option<TimerState> = None;
1054
1055    // Initialize inspect properties. This must be done only once.
1056    //
1057    // Take note that these properties are updated when the `cmds` loop runs.
1058    // This means that repeated reads while no `cmds` activity occurs will return
1059    // old readings.  This is to ensure a consistent ability to replay the last
1060    // loop run if needed.
1061    let now_prop = inspect.create_int("now_ns", 0);
1062    let now_formatted_prop = inspect.create_string("now_formatted", "");
1063    let pending_timers_count_prop = inspect.create_uint("pending_timers_count", 0);
1064    let pending_timers_prop = inspect.create_string("pending_timers", "");
1065    let _deadline_histogram_prop = inspect.create_int_exponential_histogram(
1066        "requested_deadlines_ns",
1067        finspect::ExponentialHistogramParams {
1068            floor: 0,
1069            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1070            // Allows capturing deadlines up to dozens of days.
1071            step_multiplier: 10,
1072            buckets: 16,
1073        },
1074    );
1075    let slack_histogram_prop = inspect.create_int_exponential_histogram(
1076        "slack_ns",
1077        finspect::ExponentialHistogramParams {
1078            floor: 0,
1079            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1080            step_multiplier: 10,
1081            buckets: 16,
1082        },
1083    );
1084    let schedule_delay_prop = inspect.create_int_exponential_histogram(
1085        "schedule_delay_ns",
1086        finspect::ExponentialHistogramParams {
1087            floor: 0,
1088            initial_step: zx::BootDuration::from_micros(1).into_nanos(),
1089            step_multiplier: 10,
1090            buckets: 16,
1091        },
1092    );
1093    let boot_deadlines_count_prop = inspect.create_uint("boot_deadlines_count", 0);
1094    let utc_deadlines_count_prop = inspect.create_uint("utc_deadlines_count", 0);
1095    // Internals of what was programmed into the wake alarms hardware.
1096    let hw_node = inspect.create_child("hardware");
1097    let current_hw_deadline_prop = hw_node.create_string("current_deadline", "");
1098    let remaining_until_alarm_prop = hw_node.create_string("remaining_until_alarm", "");
1099
1100    // Debug nodes for b/454085350.
1101    let debug_node = inspect.create_child("debug_node");
1102    let start_notify_setup_count = debug_node.create_int("start_notify_setup", 0);
1103    let start_count = debug_node.create_int("start_count", 0);
1104    let responder_count = debug_node.create_int("responder_count", 0);
1105    let stop_count = debug_node.create_int("stop", 0);
1106    let stop_responder_count = debug_node.create_int("stop_responder", 0);
1107    let stop_hrtimer_count = debug_node.create_int("stop_hrtimer", 0);
1108    let schedule_hrtimer_count = debug_node.create_int("schedule_hrtimer", 0);
1109    let alarm_count = debug_node.create_int("alarm", 0);
1110    let alarm_fidl_count = debug_node.create_int("alarm_fidl", 0);
1111    let alarm_driver_count = debug_node.create_int("alarm_driver", 0);
1112    let utc_update_count = debug_node.create_int("utc_update", 0);
1113    let status_count = debug_node.create_int("status", 0);
1114    let loop_count = debug_node.create_int("loop_count", 0);
1115
1116    let hrtimer_node = debug_node.create_child("hrtimer");
1117
1118    const LRU_CACHE_CAPACITY: usize = 100;
1119    let mut error_cache = lru_cache::LruCache::new(LRU_CACHE_CAPACITY);
1120
1121    while let Some(cmd) = cmds.next().await {
1122        let _i = ScopedInc::new(&loop_count);
1123        trace::duration!("alarms", "Cmd");
1124        // Use a consistent notion of "now" across commands.
1125        let now = fasync::BootInstant::now();
1126        now_prop.set(now.into_nanos());
1127        trace::instant!("alarms", "wake_timer_loop", trace::Scope::Process, "now" => now.into_nanos());
1128        match cmd {
1129            Cmd::Start { conn_id, deadline, mode, alarm_id, responder } => {
1130                let _i = ScopedInc::new(&start_count);
1131                trace::duration!("alarms", "Cmd::Start");
1132                fuchsia_trace::flow_step!(
1133                    "alarms",
1134                    "hrtimer_lifecycle",
1135                    timers::get_trace_id(&alarm_id)
1136                );
1137                // NOTE: hold keep_alive until all work is done.
1138                debug!(
1139                    "wake_timer_loop: START alarm_id: \"{}\", conn_id: {:?}\n\tdeadline: {}\n\tnow:      {}",
1140                    alarm_id,
1141                    conn_id,
1142                    deadline,
1143                    format_timer(now.into()),
1144                );
1145
1146                defer! {
1147                    let _i = ScopedInc::new(&start_notify_setup_count);
1148                    // This is the only option that requires further action.
1149                    if let Some(mode) = mode {
1150                        if let fta::SetMode::NotifySetupDone(setup_done) = mode {
1151                            // Must signal once the setup is completed.
1152                            signal(&setup_done);
1153                            debug!("wake_timer_loop: START: setup_done signaled");
1154                        };
1155                    }
1156                }
1157                let deadline_boot = deadline.as_boot(&*utc_transform.borrow());
1158
1159                // TODO: b/444236931: re-enable.
1160                //// Bookkeeping, record the incidence of deadline types.
1161                //deadline_histogram_prop.insert((deadline_boot - now).into_nanos());
1162                match deadline {
1163                    timers::Deadline::Boot(_) => boot_deadlines_count_prop.add(1),
1164                    timers::Deadline::Utc(_) => utc_deadlines_count_prop.add(1),
1165                };
1166
1167                if timers::Heap::expired(now, deadline_boot) {
1168                    trace::duration!("alarms", "Cmd::Start:immediate");
1169                    fuchsia_trace::flow_step!(
1170                        "alarms",
1171                        "hrtimer_lifecycle",
1172                        timers::get_trace_id(&alarm_id)
1173                    );
1174                    // A timer set into now or the past expires right away.
1175                    let (_lease, keep_alive) = zx::EventPair::create();
1176                    debug!(
1177                        "[{}] wake_timer_loop: bogus lease {:?}",
1178                        line!(),
1179                        &keep_alive.koid().unwrap()
1180                    );
1181
1182                    {
1183                        let _i1 = ScopedInc::new(&responder_count);
1184                        if let Err(e) = responder
1185                            .send(&alarm_id, Ok(keep_alive))
1186                            .expect("responder is always present")
1187                        {
1188                            error!(
1189                                "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: could not notify, dropping: {e}",
1190                            );
1191                        } else {
1192                            debug!(
1193                                "wake_timer_loop: conn_id: {conn_id:?}, alarm: {alarm_id}: EXPIRED IMMEDIATELY\n\tdeadline({}) <= now({})\n\tfull deadline: {}",
1194                                format_timer(deadline_boot.into()),
1195                                format_timer(now.into()),
1196                                deadline,
1197                            )
1198                        }
1199                    }
1200                } else {
1201                    trace::duration!("alarms", "Cmd::Start:regular");
1202                    fuchsia_trace::flow_step!(
1203                        "alarms",
1204                        "hrtimer_lifecycle",
1205                        timers::get_trace_id(&alarm_id)
1206                    );
1207                    // A timer scheduled for the future gets inserted into the timer heap.
1208                    let was_empty = timers.is_empty();
1209
1210                    let deadline_before = timers.peek_deadline_as_boot();
1211                    let node = match deadline {
1212                        timers::Deadline::Boot(_) => {
1213                            timers.new_node_boot(deadline_boot, alarm_id, conn_id, responder)
1214                        }
1215                        timers::Deadline::Utc(d) => {
1216                            timers.new_node_utc(d, alarm_id, conn_id, responder)
1217                        }
1218                    };
1219                    timers.push(node);
1220                    let deadline_after = timers.peek_deadline_as_boot();
1221
1222                    let deadline_changed = is_deadline_changed(deadline_before, deadline_after);
1223                    let needs_cancel = !was_empty && deadline_changed;
1224                    let needs_reschedule = was_empty || deadline_changed;
1225
1226                    if needs_reschedule {
1227                        // Always schedule the proximate deadline.
1228                        let schedulable_deadline = deadline_after.unwrap_or(deadline_boot);
1229                        if needs_cancel {
1230                            log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1231                        }
1232                        hrtimer_status = Some(
1233                            schedule_hrtimer(
1234                                scope.clone(),
1235                                now,
1236                                &timer_proxy,
1237                                schedulable_deadline,
1238                                snd.clone(),
1239                                &timer_config,
1240                                &schedule_delay_prop,
1241                                &hrtimer_node,
1242                            )
1243                            .await,
1244                        );
1245                    }
1246                }
1247            }
1248            Cmd::StopById { timer_id, done } => {
1249                let _i = ScopedInc::new(&stop_count);
1250                defer! {
1251                    signal(&done);
1252                }
1253                trace::duration!("alarms", "Cmd::StopById", "alarm_id" => timer_id.alarm());
1254                fuchsia_trace::flow_step!(
1255                    "alarms",
1256                    "hrtimer_lifecycle",
1257                    timers::get_trace_id(&timer_id.alarm())
1258                );
1259                debug!("wake_timer_loop: STOP timer: {}", timer_id);
1260                let deadline_before = timers.peek_deadline_as_boot();
1261
1262                if let Some(timer_node) = timers.remove_by_id(&timer_id) {
1263                    let deadline_after = timers.peek_deadline_as_boot();
1264
1265                    {
1266                        let _i = ScopedInc::new(&stop_responder_count);
1267                        if let Some(res) = timer_node
1268                            .get_responder()
1269                            .send(timer_node.id().alarm(), Err(fta::WakeAlarmsError::Dropped))
1270                        {
1271                            // We must reply to the responder to keep the connection open.
1272                            res.expect("infallible");
1273                        }
1274                    }
1275                    if is_deadline_changed(deadline_before, deadline_after) {
1276                        let _i = ScopedInc::new(&stop_hrtimer_count);
1277                        log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1278                    }
1279                    if let Some(deadline) = deadline_after {
1280                        let _i = ScopedInc::new(&schedule_hrtimer_count);
1281                        // Reschedule the hardware timer if the removed timer is the earliest one,
1282                        // and another one exists.
1283                        let new_timer_state = schedule_hrtimer(
1284                            scope.clone(),
1285                            now,
1286                            &timer_proxy,
1287                            deadline,
1288                            snd.clone(),
1289                            &timer_config,
1290                            &schedule_delay_prop,
1291                            &hrtimer_node,
1292                        )
1293                        .await;
1294                        let old_hrtimer_status = hrtimer_status.replace(new_timer_state);
1295                        if let Some(task) = old_hrtimer_status.map(|ev| ev.task) {
1296                            // Allow the task to complete. Since this task should have been
1297                            // canceled or completed already, this call should not block for
1298                            // a long time.
1299                            log_long_op!(task);
1300                        }
1301                    } else {
1302                        // No next timer, clean up the hrtimer status.
1303                        hrtimer_status = None;
1304                    }
1305                } else {
1306                    debug!("wake_timer_loop: STOP: no active timer to stop: {}", timer_id);
1307                }
1308            }
1309            Cmd::Alarm { expired_deadline, keep_alive } => {
1310                let _i = ScopedInc::new(&alarm_count);
1311
1312                trace::duration!("alarms", "Cmd::Alarm");
1313                // Expire all eligible timers, based on "now".  This is because
1314                // we may have woken up earlier than the actual deadline. This
1315                // happens for example if the timer can not make the actual
1316                // deadline and needs to be re-programmed.
1317                debug!(
1318                    "wake_timer_loop: ALARM!!! reached deadline: {}, wakey-wakey! {:?}",
1319                    format_timer(expired_deadline.into()),
1320                    keep_alive.koid().unwrap(),
1321                );
1322                let expired_count =
1323                    notify_all(&mut timers, &keep_alive, now, None, &slack_histogram_prop)
1324                        .expect("notification succeeds");
1325                if expired_count == 0 {
1326                    // This could be a resolution switch, or a straggler notification.
1327                    // Either way, the hardware timer is still ticking, cancel it.
1328                    debug!("wake_timer_loop: no expired alarms, reset hrtimer state");
1329                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1330                }
1331                // There is a timer to reschedule, do that now.
1332                hrtimer_status = match timers.peek_deadline_as_boot() {
1333                    None => None,
1334                    Some(deadline) => Some(
1335                        schedule_hrtimer(
1336                            scope.clone(),
1337                            now,
1338                            &timer_proxy,
1339                            deadline,
1340                            snd.clone(),
1341                            &timer_config,
1342                            &schedule_delay_prop,
1343                            &hrtimer_node,
1344                        )
1345                        .await,
1346                    ),
1347                }
1348            }
1349            Cmd::AlarmFidlError { expired_deadline, error } => {
1350                let _i = ScopedInc::new(&alarm_fidl_count);
1351
1352                trace::duration!("alarms", "Cmd::AlarmFidlError");
1353                // We do not have a wake lease, so the system may sleep before
1354                // we get to schedule a new timer. We have no way to avoid it
1355                // today.
1356                let error_string = format!("{}", error);
1357                if !error_cache.contains_key(&error_string) {
1358                    warn!(
1359                        "wake_timer_loop: FIDL error: {}, deadline: {}, now: {}",
1360                        error,
1361                        format_timer(expired_deadline.into()),
1362                        format_timer(now.into()),
1363                    );
1364                    error_cache.insert(error_string, ());
1365                }
1366                // Manufacture a fake lease to make the code below work.
1367                // Maybe use Option instead?
1368                let (_dummy_lease, peer) = zx::EventPair::create();
1369                debug!(
1370                    "bogus lease: {:?} fidl error [{}:{}]",
1371                    &peer.koid().unwrap(),
1372                    file!(),
1373                    line!()
1374                );
1375                notify_all(
1376                    &mut timers,
1377                    &peer,
1378                    now,
1379                    Some(TimerOpsError::Fidl(error)),
1380                    &slack_histogram_prop,
1381                )
1382                .expect("notification succeeds");
1383                hrtimer_status = match timers.peek_deadline_as_boot() {
1384                    None => None, // No remaining timers, nothing to schedule.
1385                    Some(deadline) => Some(
1386                        schedule_hrtimer(
1387                            scope.clone(),
1388                            now,
1389                            &timer_proxy,
1390                            deadline,
1391                            snd.clone(),
1392                            &timer_config,
1393                            &schedule_delay_prop,
1394                            &hrtimer_node,
1395                        )
1396                        .await,
1397                    ),
1398                }
1399            }
1400            Cmd::AlarmDriverError {
1401                expired_deadline,
1402                error,
1403                timer_config_id,
1404                resolution_nanos,
1405                ticks,
1406            } => {
1407                let _i = ScopedInc::new(&alarm_driver_count);
1408
1409                trace::duration!("alarms", "Cmd::AlarmDriverError");
1410                let (_dummy_lease, peer) = zx::EventPair::create();
1411                debug!(
1412                    "bogus lease: {:?} driver error. [{}:{}]",
1413                    &peer.koid().unwrap(),
1414                    file!(),
1415                    line!()
1416                );
1417                notify_all(
1418                    &mut timers,
1419                    &peer,
1420                    now,
1421                    Some(TimerOpsError::Driver(error)),
1422                    &slack_histogram_prop,
1423                )
1424                .expect("notification succeeds");
1425                match error {
1426                    fidl_fuchsia_hardware_hrtimer::DriverError::Canceled => {
1427                        // Nothing to do here, cancelation is handled in Stop code.
1428                        debug!(
1429                            "wake_timer_loop: CANCELED timer at deadline: {}",
1430                            format_timer(expired_deadline.into())
1431                        );
1432                    }
1433                    _ => {
1434                        error!(
1435                            "wake_timer_loop: DRIVER SAYS: {:?}, deadline: {}, now: {}\n\ttimer_id={}\n\tresolution={}\n\tticks={}",
1436                            error,
1437                            format_timer(expired_deadline.into()),
1438                            format_timer(now.into()),
1439                            timer_config_id,
1440                            resolution_nanos,
1441                            ticks,
1442                        );
1443                        // We do not have a wake lease, so the system may sleep before
1444                        // we get to schedule a new timer. We have no way to avoid it
1445                        // today.
1446                        hrtimer_status = match timers.peek_deadline_as_boot() {
1447                            None => None,
1448                            Some(deadline) => Some(
1449                                schedule_hrtimer(
1450                                    scope.clone(),
1451                                    now,
1452                                    &timer_proxy,
1453                                    deadline,
1454                                    snd.clone(),
1455                                    &timer_config,
1456                                    &schedule_delay_prop,
1457                                    &hrtimer_node,
1458                                )
1459                                .await,
1460                            ),
1461                        }
1462                    }
1463                }
1464            }
1465            Cmd::UtcUpdated { transform } => {
1466                let _i = ScopedInc::new(&utc_update_count);
1467
1468                trace::duration!("alarms", "Cmd::UtcUpdated");
1469                debug!("wake_timer_loop: applying new clock transform: {transform:?}");
1470
1471                // Assigning to this shared reference updates the deadlines of all
1472                // UTC timers.
1473                *utc_transform.borrow_mut() = transform;
1474
1475                // Reschedule the hardware timer with the now-current deadline if there is an
1476                // active timer.
1477                if hrtimer_status.is_some() {
1478                    log_long_op!(stop_hrtimer(&timer_proxy, &timer_config));
1479                    // Should we request a wake lock here?
1480                    hrtimer_status = match timers.peek_deadline_as_boot() {
1481                        None => None,
1482                        Some(deadline) => Some(
1483                            schedule_hrtimer(
1484                                scope.clone(),
1485                                now,
1486                                &timer_proxy,
1487                                deadline,
1488                                snd.clone(),
1489                                &timer_config,
1490                                &schedule_delay_prop,
1491                                &hrtimer_node,
1492                            )
1493                            .await,
1494                        ),
1495                    }
1496                }
1497            }
1498        }
1499
1500        {
1501            let _i = ScopedInc::new(&status_count);
1502
1503            // Print and record diagnostics after each iteration, record the
1504            // duration for performance awareness.  Note that iterations happen
1505            // only occasionally, so these stats can remain unchanged for a long
1506            // time.
1507            trace::duration!("timekeeper", "inspect");
1508            let now_formatted = format_timer(now.into());
1509            debug!("wake_timer_loop: now:                             {}", &now_formatted);
1510            now_formatted_prop.set(&now_formatted);
1511
1512            let pending_timers_count: u64 =
1513                timers.timer_count().try_into().expect("always convertible");
1514            debug!("wake_timer_loop: currently pending timer count:   {}", pending_timers_count);
1515            pending_timers_count_prop.set(pending_timers_count);
1516
1517            let pending_timers = format!("{}", timers);
1518            debug!("wake_timer_loop: currently pending timers:        \n\t{}", &timers);
1519            pending_timers_prop.set(&pending_timers);
1520
1521            let current_deadline: String = hrtimer_status
1522                .as_ref()
1523                .map(|s| format!("{}", format_timer(s.deadline.into())))
1524                .unwrap_or_else(|| "(none)".into());
1525            debug!("wake_timer_loop: current hardware timer deadline: {:?}", current_deadline);
1526            current_hw_deadline_prop.set(&current_deadline);
1527
1528            let remaining_duration_until_alarm = hrtimer_status
1529                .as_ref()
1530                .map(|s| format!("{}", format_duration((s.deadline - now).into())))
1531                .unwrap_or_else(|| "(none)".into());
1532            debug!(
1533                "wake_timer_loop: remaining duration until alarm:  {}",
1534                &remaining_duration_until_alarm
1535            );
1536            remaining_until_alarm_prop.set(&remaining_duration_until_alarm);
1537            debug!("---");
1538        }
1539    }
1540
1541    // Prod code should not see this loop ever exiting. the wake alarm manager
1542    // should run forever.
1543    log::info!("wake_timer_loop: exiting. This is only correct in test code.");
1544}
1545
1546/// Schedules a wake alarm.
1547///
1548/// # Args:
1549///
1550/// - `scope`: used to spawn async tasks.
1551/// - `now`: the time instant used as the value of current instant.
1552/// - `hrtimer`: the proxy for the hrtimer device driver.
1553/// - `deadline`: the time instant in the future at which the alarm should fire.
1554/// - `command_send`: the sender channel to use when the timer expires.
1555/// - `timer_config`: a configuration of the hardware timer showing supported resolutions and
1556///   max tick value.
1557/// - `schedule_delay_histogram`: inspect instrumentation.
1558/// - `debug_node`: used for keeping debug counters.
1559async fn schedule_hrtimer(
1560    scope: fasync::ScopeHandle,
1561    now: fasync::BootInstant,
1562    hrtimer: &Box<dyn TimerOps>,
1563    deadline: fasync::BootInstant,
1564    mut command_send: mpsc::Sender<Cmd>,
1565    timer_config: &TimerConfig,
1566    _schedule_delay_histogram: &finspect::IntExponentialHistogramProperty,
1567    debug_node: &finspect::Node,
1568) -> TimerState {
1569    let timeout = std::cmp::max(zx::BootDuration::ZERO, deadline - now);
1570    trace::duration!("alarms", "schedule_hrtimer", "timeout" => timeout.into_nanos());
1571    // When signaled, the hrtimer has been scheduled.
1572    let hrtimer_scheduled = zx::Event::create();
1573
1574    let schedule_count = debug_node.create_int("schedule", 0);
1575    let hrtimer_wait_count = debug_node.create_int("hrtimer_wait", 0);
1576    let wait_signaled_count = debug_node.create_int("wait_signaled", 0);
1577
1578    let _sc = ScopedInc::new(&schedule_count);
1579
1580    debug!(
1581        "schedule_hrtimer:\n\tnow: {}\n\tdeadline: {}\n\ttimeout: {}",
1582        format_timer(now.into()),
1583        format_timer(deadline.into()),
1584        format_duration(timeout),
1585    );
1586
1587    let slack = timer_config.pick_setting(timeout);
1588    let resolution_nanos = slack.resolution.into_nanos();
1589    // The driver will reject "0" ticks, even though it probably shouldn't. See for details:
1590    // b/437177931.
1591    let useful_ticks = std::cmp::max(MIN_USEFUL_TICKS, slack.ticks());
1592
1593    trace::instant!("alarms", "hrtimer:programmed",
1594        trace::Scope::Process,
1595        "resolution_ns" => resolution_nanos,
1596        "ticks" => useful_ticks
1597    );
1598    let timer_config_id = timer_config.id;
1599    let start_and_wait_fut = {
1600        let _sc = ScopedInc::new(&hrtimer_wait_count);
1601        hrtimer.start_and_wait(
1602            timer_config.id,
1603            &ffhh::Resolution::Duration(resolution_nanos),
1604            useful_ticks,
1605            clone_handle(&hrtimer_scheduled),
1606        )
1607    };
1608
1609    let hrtimer_scheduled_if_error = clone_handle(&hrtimer_scheduled);
1610    let hrtimer_task = scope.spawn_local(async move {
1611        debug!("hrtimer_task: waiting for hrtimer driver response");
1612        trace::instant!("alarms", "hrtimer:started", trace::Scope::Process);
1613        let response = start_and_wait_fut.await;
1614        trace::instant!("alarms", "hrtimer:response", trace::Scope::Process);
1615        match response {
1616            Err(TimerOpsError::Fidl(e)) => {
1617                defer! {
1618                    // Allow hrtimer_scheduled to proceed anyways.
1619                    signal(&hrtimer_scheduled_if_error);
1620                }
1621                trace::instant!("alarms", "hrtimer:response:fidl_error", trace::Scope::Process);
1622                command_send
1623                    .start_send(Cmd::AlarmFidlError { expired_deadline: now, error: e })
1624                    .unwrap();
1625                // BAD: no way to keep alive.
1626            }
1627            Err(TimerOpsError::Driver(e)) => {
1628                defer! {
1629                    // This should be idempotent if the error occurs after
1630                    // the timer was scheduled.
1631                    signal(&hrtimer_scheduled_if_error);
1632                }
1633                let driver_error_str = format!("{:?}", e);
1634                trace::instant!("alarms", "hrtimer:response:driver_error", trace::Scope::Process, "error" => &driver_error_str[..]);
1635                // This is very common. For example, a "timer canceled" event
1636                // will result in this code path being hit.
1637                debug!("schedule_hrtimer: hrtimer driver error: {:?}", e);
1638                command_send
1639                    .start_send(Cmd::AlarmDriverError {
1640                        expired_deadline: now,
1641                        error: e,
1642                        timer_config_id,
1643                        resolution_nanos,
1644                        ticks: useful_ticks,
1645                    })
1646                    .unwrap();
1647                // BAD: no way to keep alive.
1648            }
1649            Ok(keep_alive) => {
1650                trace::instant!("alarms", "hrtimer:response:alarm", trace::Scope::Process);
1651                debug!("hrtimer: got alarm response: {:?}", keep_alive);
1652                // May trigger sooner than the deadline.
1653                command_send
1654                    .start_send(Cmd::Alarm { expired_deadline: deadline, keep_alive })
1655                    .unwrap();
1656            }
1657        }
1658        debug!("hrtimer_task: exiting task.");
1659        trace::instant!("alarms", "hrtimer:task_exit", trace::Scope::Process);
1660    }).into();
1661    debug!("schedule_hrtimer: waiting for event to be signaled");
1662
1663    {
1664        let _i = ScopedInc::new(&wait_signaled_count);
1665        // We must wait here to ensure that the wake alarm has been scheduled.
1666        log_long_op!(wait_signaled(&hrtimer_scheduled));
1667    }
1668
1669    let now_after_signaled = fasync::BootInstant::now();
1670    let duration_until_scheduled: zx::BootDuration = (now_after_signaled - now).into();
1671    if duration_until_scheduled > zx::BootDuration::from_nanos(LONG_DELAY_NANOS) {
1672        trace::duration!("alarms", "schedule_hrtimer:unusual_duration",
1673            "duration" => duration_until_scheduled.into_nanos());
1674        warn!(
1675            "unusual duration until hrtimer scheduled: {}",
1676            format_duration(duration_until_scheduled)
1677        );
1678    }
1679    // TODO: b/444236931: re-enable.
1680    //schedule_delay_histogram.insert(duration_until_scheduled.into_nanos());
1681    debug!("schedule_hrtimer: hrtimer wake alarm has been scheduled.");
1682    TimerState { task: hrtimer_task, deadline }
1683}
1684
1685/// Notify all `timers` that `reference_instant` has been reached.
1686///
1687/// The notified `timers` are removed from the list of timers to notify.
1688///
1689/// Args:
1690/// - `timers`: the collection of currently available timers.
1691/// - `lease_prototype`: an EventPair used as a wake lease.
1692/// - `reference_instant`: the time instant used as a reference for alarm notification.
1693/// - `timer_ops_error`: if set, this is the error that happened while attempting to
1694///   schedule or trigger a timer in hardware.
1695fn notify_all(
1696    timers: &mut timers::Heap,
1697    lease_prototype: &zx::EventPair,
1698    reference_instant: fasync::BootInstant,
1699    timer_ops_error: Option<TimerOpsError>,
1700    _unusual_slack_histogram: &finspect::IntExponentialHistogramProperty,
1701) -> Result<usize> {
1702    trace::duration!("alarms", "notify_all");
1703    let now = fasync::BootInstant::now();
1704    let mut expired = 0;
1705    while let Some(timer_node) = timers.maybe_expire_earliest(reference_instant) {
1706        expired += 1;
1707        // How much later than requested did the notification happen.
1708        let deadline = timer_node.get_boot_deadline();
1709        let alarm = timer_node.id().alarm();
1710        let alarm_id = alarm.to_string();
1711        trace::duration!("alarms", "notify_all:notified", "alarm_id" => &*alarm_id);
1712        fuchsia_trace::flow_step!("alarms", "hrtimer_lifecycle", timers::get_trace_id(&alarm_id));
1713        let conn_id = timer_node.id().conn.clone();
1714        let slack: zx::BootDuration = deadline - now;
1715        if slack < zx::BootDuration::from_nanos(-LONG_DELAY_NANOS) {
1716            trace::duration!("alarms", "schedule_hrtimer:unusual_slack", "slack" => slack.into_nanos());
1717            // This alarm triggered noticeably later than it should have.
1718            warn!(
1719                "alarm id: {} had an unusually large slack: {}",
1720                alarm_id,
1721                format_duration(slack)
1722            );
1723        }
1724        if slack < zx::BootDuration::ZERO {
1725            // TODO: b/444236931: re-enable.
1726            //unusual_slack_histogram.insert(-slack.into_nanos());
1727        }
1728        if let Some(ref err) = timer_ops_error {
1729            // Canceled timers are getting notified with alarm, but not other
1730            // errors.
1731            if !err.is_canceled() {
1732                timer_node.get_responder().send(alarm, Err(err.clone().into()));
1733                continue;
1734            }
1735        }
1736        debug!(
1737            concat!(
1738                "wake_alarm_loop: ALARM alarm_id: \"{}\"\n\tdeadline: {},\n\tconn_id: {:?},\n\t",
1739                "reference_instant: {},\n\tnow: {},\n\tslack: {}",
1740            ),
1741            alarm_id,
1742            format_timer(deadline.into()),
1743            conn_id,
1744            format_timer(reference_instant.into()),
1745            format_timer(now.into()),
1746            format_duration(slack),
1747        );
1748        let lease = clone_handle(lease_prototype);
1749        trace::instant!("alarms", "notify", trace::Scope::Process, "alarm_id" => &alarm_id[..], "conn_id" => conn_id);
1750        if let Some(Err(e)) = timer_node.get_responder().send(alarm, Ok(lease)) {
1751            error!("could not signal responder: {:?}", e);
1752        }
1753        trace::instant!("alarms", "notified", trace::Scope::Process);
1754    }
1755    trace::instant!("alarms", "notify", trace::Scope::Process, "expired_count" => expired);
1756    debug!("notify_all: expired count: {}", expired);
1757    Ok(expired)
1758    // A new timer is not scheduled yet here.
1759}
1760
1761/// The hrtimer driver service directory.  hrtimer driver APIs appear as randomly
1762/// named files in this directory. They are expected to come and go.
1763const HRTIMER_DIRECTORY: &str = "/dev/class/hrtimer";
1764
1765/// Connects to the high resolution timer device driver.
1766///
1767/// This function watches the hrtimer device directory and connects to the first
1768/// available hrtimer device.
1769///
1770/// # Returns
1771/// A `Result` containing a `ffhh::DeviceProxy` on success, or an error if
1772/// the connection fails.
1773pub async fn connect_to_hrtimer_async() -> Result<ffhh::DeviceProxy> {
1774    debug!("connect_to_hrtimer: trying directory: {}", HRTIMER_DIRECTORY);
1775    let dir =
1776        fuchsia_fs::directory::open_in_namespace(HRTIMER_DIRECTORY, fidl_fuchsia_io::PERM_READABLE)
1777            .with_context(|| format!("Opening {}", HRTIMER_DIRECTORY))?;
1778    let path = device_watcher::watch_for_files(&dir)
1779        .await
1780        .with_context(|| format!("Watching for files in {}", HRTIMER_DIRECTORY))?
1781        .try_next()
1782        .await
1783        .with_context(|| format!("Getting a file from {}", HRTIMER_DIRECTORY))?;
1784    let path = path.ok_or_else(|| anyhow!("Could not find {}", HRTIMER_DIRECTORY))?;
1785    let path = path
1786        .to_str()
1787        .ok_or_else(|| anyhow!("Could not find a valid str for {}", HRTIMER_DIRECTORY))?;
1788    connect_to_named_protocol_at_dir_root::<ffhh::DeviceMarker>(&dir, path)
1789        .context("Failed to connect built-in service")
1790}
1791
1792#[cfg(test)]
1793mod tests {
1794    use super::*;
1795    use assert_matches::assert_matches;
1796    use diagnostics_assertions::{AnyProperty, assert_data_tree};
1797    use fuchsia_async::TestExecutor;
1798    use futures::select;
1799    use test_case::test_case;
1800    use test_util::{assert_gt, assert_lt};
1801
1802    fn fake_wake_lease() -> fidl_fuchsia_power_system::LeaseToken {
1803        let (_lease, peer) = zx::EventPair::create();
1804        peer
1805    }
1806
1807    #[test]
1808    fn timer_duration_no_overflow() {
1809        let duration1 = TimerDuration {
1810            resolution: zx::BootDuration::from_seconds(100_000_000),
1811            ticks: u64::MAX,
1812        };
1813        let duration2 = TimerDuration {
1814            resolution: zx::BootDuration::from_seconds(110_000_000),
1815            ticks: u64::MAX,
1816        };
1817        assert_eq!(duration1, duration1);
1818        assert_eq!(duration2, duration2);
1819
1820        assert_lt!(duration1, duration2);
1821        assert_gt!(duration2, duration1);
1822    }
1823
1824    #[test_case(
1825        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1826        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1827    )]
1828    #[test_case(
1829        TimerDuration::new(zx::BootDuration::from_nanos(1), 10),
1830        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1831    )]
1832    #[test_case(
1833        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1834        TimerDuration::new(zx::BootDuration::from_nanos(1), 10)
1835    )]
1836    #[test_case(
1837        TimerDuration::new(zx::BootDuration::from_micros(1), 1),
1838        TimerDuration::new(zx::BootDuration::from_nanos(1), 1000)
1839    )]
1840    fn test_slack_eq(one: TimerDuration, other: TimerDuration) {
1841        assert_eq!(one, other);
1842    }
1843
1844    #[test_case(
1845        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1846        TimerDuration::new(zx::BootDuration::from_nanos(1), 2)
1847    )]
1848    #[test_case(
1849        TimerDuration::new(zx::BootDuration::from_nanos(1), 1),
1850        TimerDuration::new(zx::BootDuration::from_nanos(10), 1)
1851    )]
1852    fn test_slack_lt(one: TimerDuration, other: TimerDuration) {
1853        assert_lt!(one, other);
1854    }
1855
1856    #[test_case(
1857        TimerDuration::new(zx::BootDuration::from_nanos(1), 2),
1858        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1859    )]
1860    #[test_case(
1861        TimerDuration::new(zx::BootDuration::from_nanos(10), 1),
1862        TimerDuration::new(zx::BootDuration::from_nanos(1), 1)
1863    )]
1864    fn test_slack_gt(one: TimerDuration, other: TimerDuration) {
1865        assert_gt!(one, other);
1866    }
1867
1868    #[test_case(
1869        vec![zx::BootDuration::from_nanos(1)],
1870        100,
1871        zx::BootDuration::from_nanos(0),
1872        TimerDuration::new(zx::BootDuration::from_nanos(1), 1) ; "0ns becomes 1ns"
1873    )]
1874    #[test_case(
1875        vec![zx::BootDuration::from_nanos(1)],
1876        100,
1877        zx::BootDuration::from_nanos(50),
1878        TimerDuration::new(zx::BootDuration::from_nanos(1), 50) ; "Exact at 50x1ns"
1879    )]
1880    #[test_case(
1881        vec![zx::BootDuration::from_nanos(2)],
1882        100,
1883        zx::BootDuration::from_nanos(50),
1884        TimerDuration::new(zx::BootDuration::from_nanos(2), 25) ; "Exact at 25x2ns"
1885    )]
1886    #[test_case(
1887        vec![zx::BootDuration::from_nanos(3)],
1888        100,
1889        zx::BootDuration::from_nanos(50),
1890        // The closest duration is 51ns.
1891        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "Inexact at 51ns"
1892    )]
1893    #[test_case(
1894        vec![
1895            zx::BootDuration::from_nanos(3),
1896            zx::BootDuration::from_nanos(4)
1897        ],
1898        100,
1899        zx::BootDuration::from_nanos(50),
1900        TimerDuration::new(zx::BootDuration::from_nanos(3), 17) ; "3ns is a better resolution"
1901    )]
1902    #[test_case(
1903        vec![
1904            zx::BootDuration::from_nanos(1000),
1905        ],
1906        100,
1907        zx::BootDuration::from_nanos(50),
1908        TimerDuration::new(zx::BootDuration::from_nanos(1000), 1) ;
1909        "950ns negative slack is the best we can do"
1910    )]
1911    #[test_case(
1912        vec![
1913            zx::BootDuration::from_nanos(1),
1914        ],
1915        10,
1916        zx::BootDuration::from_nanos(50),
1917        TimerDuration::new(zx::BootDuration::from_nanos(1), 10) ;
1918        "10ns positive slack is the best we can do"
1919    )]
1920    #[test_case(
1921        vec![
1922            zx::BootDuration::from_millis(1),
1923            zx::BootDuration::from_micros(100),
1924            zx::BootDuration::from_micros(10),
1925            zx::BootDuration::from_micros(1),
1926        ],
1927        20,  // Make only one of the resolutions above match.
1928        zx::BootDuration::from_micros(150),
1929        TimerDuration::new(zx::BootDuration::from_micros(10), 15) ;
1930        "Realistic case with resolutions from driver, should be 15us"
1931    )]
1932    #[test_case(
1933        vec![
1934            zx::BootDuration::from_millis(1),
1935            zx::BootDuration::from_micros(100),
1936            zx::BootDuration::from_micros(10),
1937            zx::BootDuration::from_micros(1),
1938        ],
1939        2000,  // Make only one of the resolutions above match.
1940        zx::BootDuration::from_micros(6000),
1941        TimerDuration::new(zx::BootDuration::from_millis(1), 6) ;
1942        "Coarser exact unit wins"
1943    )]
1944    #[test_case(
1945        vec![
1946            zx::BootDuration::from_millis(1),
1947            zx::BootDuration::from_millis(10),
1948            zx::BootDuration::from_millis(100),
1949        ],
1950        1000,
1951        zx::BootDuration::from_micros(-10),
1952        TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1953        "Negative duration gets the smallest timer duration"
1954    )]
1955    #[test_case(
1956        vec![
1957            zx::BootDuration::from_millis(1),
1958            zx::BootDuration::from_millis(10),
1959            zx::BootDuration::from_millis(100),
1960        ],
1961        1000,
1962        zx::BootDuration::ZERO,
1963        TimerDuration::new(zx::BootDuration::from_millis(1), 1) ;
1964        "Zero duration gets the smallest timer duration"
1965    )]
1966    fn test_pick_setting(
1967        resolutions: Vec<zx::BootDuration>,
1968        max_ticks: u64,
1969        duration: zx::BootDuration,
1970        expected: TimerDuration,
1971    ) {
1972        let config = TimerConfig::new_from_data(MAIN_TIMER_ID as u64, &resolutions[..], max_ticks);
1973        let actual = config.pick_setting(duration);
1974
1975        // .eq() does not work here, since we do not just require that the values
1976        // be equal, but also that the same resolution is used in both.
1977        assert_slack_eq(expected, actual);
1978    }
1979
1980    // TimerDuration assertion with human-friendly output in case of an error.
1981    fn assert_slack_eq(expected: TimerDuration, actual: TimerDuration) {
1982        let slack = expected.duration() - actual.duration();
1983        assert_eq!(
1984            actual.resolution(),
1985            expected.resolution(),
1986            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1987            expected,
1988            format_duration(expected.duration()),
1989            actual,
1990            format_duration(actual.duration()),
1991            format_duration(slack)
1992        );
1993        assert_eq!(
1994            actual.ticks(),
1995            expected.ticks(),
1996            "\n\texpected: {} ({})\n\tactual  : {} ({})\n\tslack: expected-actual={}",
1997            expected,
1998            format_duration(expected.duration()),
1999            actual,
2000            format_duration(actual.duration()),
2001            format_duration(slack)
2002        );
2003    }
2004
2005    #[derive(Debug)]
2006    enum FakeCmd {
2007        SetProperties {
2008            resolutions: Vec<zx::BootDuration>,
2009            max_ticks: i64,
2010            keep_alive: zx::EventPair,
2011            done: zx::Event,
2012        },
2013    }
2014
2015    use std::cell::RefCell;
2016    use std::rc::Rc;
2017
2018    // A fake that emulates some aspects of the hrtimer driver.
2019    //
2020    // Specifically it can be configured with different resolutions, and will
2021    // bomb out if any waiting methods are called twice in a succession, without
2022    // canceling the timer in between.
2023    fn fake_hrtimer_connection(
2024        scope: fasync::ScopeHandle,
2025        rcv: mpsc::Receiver<FakeCmd>,
2026    ) -> ffhh::DeviceProxy {
2027        debug!("fake_hrtimer_connection: entry.");
2028        let (hrtimer, mut stream) =
2029            fidl::endpoints::create_proxy_and_stream::<ffhh::DeviceMarker>();
2030        scope.clone().spawn_local(async move {
2031            let mut rcv = rcv.fuse();
2032            let timer_properties = Rc::new(RefCell::new(None));
2033            let wake_lease = Rc::new(RefCell::new(None));
2034
2035            // Set to true when the hardware timer is supposed to be running.
2036            // Hardware timer may not be reprogrammed without canceling it first,
2037            // make sure the tests fail the same way as production would.
2038            let timer_running = Rc::new(RefCell::new(false));
2039
2040            loop {
2041                let timer_properties = timer_properties.clone();
2042                let wake_lease = wake_lease.clone();
2043                select! {
2044                    cmd = rcv.next() => {
2045                        debug!("fake_hrtimer_connection: cmd: {:?}", cmd);
2046                        match cmd {
2047                            Some(FakeCmd::SetProperties{ resolutions, max_ticks, keep_alive, done}) => {
2048                                let mut timer_props = vec![];
2049                                for v in 0..10 {
2050                                    timer_props.push(ffhh::TimerProperties {
2051                                        supported_resolutions: Some(
2052                                            resolutions.iter()
2053                                                .map(|d| ffhh::Resolution::Duration(d.into_nanos())).collect()),
2054                                        max_ticks: Some(max_ticks.try_into().unwrap()),
2055                                        // start_and_wait method works.
2056                                        supports_wait: Some(true),
2057                                        id: Some(v),
2058                                        ..Default::default()
2059                                        },
2060                                    );
2061                                }
2062                                *timer_properties.borrow_mut() = Some(timer_props);
2063                                *wake_lease.borrow_mut() = Some(keep_alive);
2064                                debug!("set timer properties to: {:?}", timer_properties);
2065                                signal(&done);
2066                            }
2067                            e => {
2068                                panic!("unrecognized command: {:?}", e);
2069                            }
2070                        }
2071                        // Set some responses if we have them.
2072                    },
2073                    event = stream.next() => {
2074                        debug!("fake_hrtimer_connection: event: {:?}", event);
2075                        if let Some(Ok(event)) = event {
2076                            match event {
2077                                ffhh::DeviceRequest::Start { responder, .. } => {
2078                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2079                                    *timer_running.borrow_mut() = true;
2080                                    responder.send(Ok(())).expect("");
2081                                }
2082                                ffhh::DeviceRequest::Stop { responder, .. } => {
2083                                    *timer_running.borrow_mut() = false;
2084                                    responder.send(Ok(())).expect("");
2085                                }
2086                                ffhh::DeviceRequest::GetTicksLeft { responder, .. } => {
2087                                    responder.send(Ok(1)).expect("");
2088                                }
2089                                ffhh::DeviceRequest::SetEvent { responder, .. } => {
2090                                    responder.send(Ok(())).expect("");
2091                                }
2092                                ffhh::DeviceRequest::StartAndWait { id, resolution, ticks, setup_event, responder, .. } => {
2093                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2094                                    *timer_running.borrow_mut() = true;
2095                                    debug!("fake_hrtimer_connection: starting timer: \"{}\", resolution: {:?}, ticks: {}", id, resolution, ticks);
2096                                    let ticks: i64 = ticks.try_into().unwrap();
2097                                    let sleep_duration  = zx::BootDuration::from_nanos(ticks * match resolution {
2098                                        ffhh::Resolution::Duration(e) => e,
2099                                        _ => {
2100                                            error!("resolution has an unexpected value");
2101                                            1
2102                                        }
2103                                    });
2104                                    let timer_running_clone = timer_running.clone();
2105                                    scope.spawn_local(async move {
2106                                        // Signaling the setup event allows the client to proceed
2107                                        // with post-scheduling work.
2108                                        signal(&setup_event);
2109
2110                                        // Respond after the requested sleep time. In tests this will
2111                                        // be sleeping in fake time.
2112                                        fasync::Timer::new(sleep_duration).await;
2113                                        *timer_running_clone.borrow_mut() = false;
2114                                        responder.send(Ok(clone_handle(wake_lease.borrow().as_ref().unwrap()))).unwrap();
2115                                        debug!("StartAndWait: hrtimer expired");
2116                                    });
2117                                }
2118                                ffhh::DeviceRequest::StartAndWait2 { responder, .. } => {
2119                                    assert!(!*timer_running.borrow(), "invariant broken: timer may not be running here");
2120                                    *timer_running.borrow_mut() = true;
2121                                    responder.send(Err(ffhh::DriverError::InternalError)).expect("");
2122                                }
2123                                ffhh::DeviceRequest::GetProperties { responder, .. } => {
2124                                    if (*timer_properties).borrow().is_none() {
2125                                        error!("timer_properties is empty, this is not what you want!");
2126                                    }
2127                                    responder
2128                                        .send(ffhh::Properties {
2129                                            timers_properties: (*timer_properties).borrow().clone(),
2130                                            ..Default::default()
2131                                        })
2132                                        .expect("");
2133                                }
2134                                ffhh::DeviceRequest::ReadTimer { responder, .. } => {
2135                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2136                                }
2137                                ffhh::DeviceRequest::ReadClock { responder, .. } => {
2138                                    responder.send(Err(ffhh::DriverError::NotSupported)).expect("");
2139                                }
2140                                ffhh::DeviceRequest::_UnknownMethod { .. } => todo!(),
2141                            }
2142                        }
2143                    },
2144                }
2145            }
2146        });
2147        hrtimer
2148    }
2149
2150    fn clone_utc_clock(orig: &fxr::UtcClock) -> fxr::UtcClock {
2151        orig.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2152    }
2153
2154    struct TestContext {
2155        wake_proxy: fta::WakeAlarmsProxy,
2156        _scope: fasync::Scope,
2157        _cmd_tx: mpsc::Sender<FakeCmd>,
2158        // Use to manipulate the UTC clock from the test.
2159        utc_clock: fxr::UtcClock,
2160        utc_backstop: fxr::UtcInstant,
2161    }
2162
2163    impl TestContext {
2164        async fn new() -> Self {
2165            TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(0)).await;
2166
2167            let scope = fasync::Scope::new();
2168            let utc_backstop = fxr::UtcInstant::from_nanos(1000);
2169            let utc_clock =
2170                fxr::UtcClock::create(zx::ClockOpts::empty(), Some(utc_backstop)).unwrap();
2171            let utc_clone = clone_utc_clock(&utc_clock);
2172            let (mut cmd_tx, wake_proxy) = {
2173                let (tx, rx) = mpsc::channel::<FakeCmd>(0);
2174                let hrtimer_proxy = fake_hrtimer_connection(scope.to_handle(), rx);
2175
2176                let inspector = finspect::component::inspector();
2177                let alarms = Rc::new(Loop::new(
2178                    scope.to_handle(),
2179                    hrtimer_proxy,
2180                    inspector.root().create_child("test"),
2181                    utc_clone,
2182                ));
2183
2184                let (proxy, stream) =
2185                    fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2186                scope.spawn_local(async move {
2187                    serve(alarms, stream).await;
2188                });
2189                (tx, proxy)
2190            };
2191
2192            let (_wake_lease, peer) = zx::EventPair::create();
2193            let done = zx::Event::create();
2194            cmd_tx
2195                .start_send(FakeCmd::SetProperties {
2196                    resolutions: vec![zx::Duration::from_nanos(1)],
2197                    max_ticks: 100,
2198                    keep_alive: peer,
2199                    done: done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2200                })
2201                .unwrap();
2202
2203            // Wait until hrtimer configuration has completed.
2204            assert_matches!(fasync::OnSignals::new(done, zx::Signals::EVENT_SIGNALED).await, Ok(_));
2205
2206            Self { wake_proxy, _scope: scope, _cmd_tx: cmd_tx, utc_clock, utc_backstop }
2207        }
2208    }
2209
2210    impl Drop for TestContext {
2211        fn drop(&mut self) {
2212            assert_matches!(TestExecutor::next_timer(), None, "Unexpected lingering timers");
2213        }
2214    }
2215
2216    #[fuchsia::test(allow_stalls = false)]
2217    async fn test_basic_timed_wait() {
2218        let ctx = TestContext::new().await;
2219
2220        let deadline = zx::BootInstant::from_nanos(100);
2221        let setup_done = zx::Event::create();
2222        let mut set_task = ctx.wake_proxy.set_and_wait(
2223            deadline.into(),
2224            fta::SetMode::NotifySetupDone(
2225                setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2226            ),
2227            "Hello".into(),
2228        );
2229
2230        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task).await, Poll::Pending);
2231
2232        let mut setup_done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2233        assert_matches!(
2234            TestExecutor::poll_until_stalled(&mut setup_done_task).await,
2235            Poll::Ready(Ok(_)),
2236            "Setup event not triggered after scheduling an alarm"
2237        );
2238
2239        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(100)).await;
2240        assert_matches!(TestExecutor::poll_until_stalled(set_task).await, Poll::Ready(Ok(Ok(_))));
2241    }
2242
2243    #[fuchsia::test(allow_stalls = false)]
2244    async fn test_basic_timed_wait_notify() {
2245        const ALARM_ID: &str = "Hello";
2246        let ctx = TestContext::new().await;
2247
2248        let (notifier_client, mut notifier_stream) =
2249            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2250        let setup_done = zx::Event::create();
2251        assert_matches!(
2252            ctx.wake_proxy
2253                .set(
2254                    notifier_client,
2255                    fidl::BootInstant::from_nanos(2),
2256                    fta::SetMode::NotifySetupDone(
2257                        setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap()
2258                    ),
2259                    ALARM_ID,
2260                )
2261                .await,
2262            Ok(Ok(()))
2263        );
2264
2265        let mut done_task = fasync::OnSignals::new(setup_done, zx::Signals::EVENT_SIGNALED);
2266        assert_matches!(
2267            TestExecutor::poll_until_stalled(&mut done_task).await,
2268            Poll::Ready(Ok(_)),
2269            "Setup event not triggered after scheduling an alarm"
2270        );
2271
2272        let mut next_task = notifier_stream.next();
2273        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2274
2275        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2276        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2277
2278        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(2)).await;
2279        assert_matches!(
2280            TestExecutor::poll_until_stalled(next_task).await,
2281            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2282        );
2283    }
2284
2285    #[fuchsia::test(allow_stalls = false)]
2286    async fn test_two_alarms_same() {
2287        const DEADLINE_NANOS: i64 = 100;
2288
2289        let ctx = TestContext::new().await;
2290
2291        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2292            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2293            fta::SetMode::KeepAlive(fake_wake_lease()),
2294            "Hello1".into(),
2295        );
2296        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2297            fidl::BootInstant::from_nanos(DEADLINE_NANOS),
2298            fta::SetMode::KeepAlive(fake_wake_lease()),
2299            "Hello2".into(),
2300        );
2301
2302        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2303        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2304
2305        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2306
2307        assert_matches!(
2308            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2309            Poll::Ready(Ok(Ok(_)))
2310        );
2311        assert_matches!(
2312            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2313            Poll::Ready(Ok(Ok(_)))
2314        );
2315    }
2316
2317    #[fuchsia::test(allow_stalls = false)]
2318    async fn test_two_alarms_same_notify() {
2319        const DEADLINE_NANOS: i64 = 100;
2320        const ALARM_ID_1: &str = "Hello1";
2321        const ALARM_ID_2: &str = "Hello2";
2322
2323        let ctx = TestContext::new().await;
2324
2325        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2326            let (notifier_client, notifier_stream) =
2327                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2328            assert_matches!(
2329                ctx.wake_proxy
2330                    .set(
2331                        notifier_client,
2332                        fidl::BootInstant::from_nanos(deadline_nanos),
2333                        fta::SetMode::KeepAlive(fake_wake_lease()),
2334                        alarm_id,
2335                    )
2336                    .await,
2337                Ok(Ok(()))
2338            );
2339            notifier_stream
2340        };
2341
2342        let mut notifier_1 = schedule(DEADLINE_NANOS, ALARM_ID_1).await;
2343        let mut notifier_2 = schedule(DEADLINE_NANOS, ALARM_ID_2).await;
2344
2345        let mut next_task_1 = notifier_1.next();
2346        let mut next_task_2 = notifier_2.next();
2347
2348        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2349        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2350
2351        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(DEADLINE_NANOS)).await;
2352
2353        assert_matches!(
2354            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2355            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_1
2356        );
2357        assert_matches!(
2358            TestExecutor::poll_until_stalled(&mut next_task_2).await,
2359            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID_2
2360        );
2361
2362        assert_matches!(
2363            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2364            Poll::Ready(None)
2365        );
2366        assert_matches!(
2367            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2368            Poll::Ready(None)
2369        );
2370    }
2371
2372    #[test_case(100, 200 ; "push out")]
2373    #[test_case(200, 100 ; "pull in")]
2374    #[fuchsia::test(allow_stalls = false)]
2375    async fn test_two_alarms_different(
2376        // One timer scheduled at this instant (fake time starts from zero).
2377        first_deadline_nanos: i64,
2378        // Another timer scheduled at this instant.
2379        second_deadline_nanos: i64,
2380    ) {
2381        let ctx = TestContext::new().await;
2382
2383        let mut set_task_1 = ctx.wake_proxy.set_and_wait(
2384            fidl::BootInstant::from_nanos(first_deadline_nanos),
2385            fta::SetMode::KeepAlive(fake_wake_lease()),
2386            "Hello1".into(),
2387        );
2388        let mut set_task_2 = ctx.wake_proxy.set_and_wait(
2389            fidl::BootInstant::from_nanos(second_deadline_nanos),
2390            fta::SetMode::KeepAlive(fake_wake_lease()),
2391            "Hello2".into(),
2392        );
2393
2394        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2395        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2396
2397        // Sort alarms by their deadlines.
2398        let mut tasks = [(first_deadline_nanos, set_task_1), (second_deadline_nanos, set_task_2)];
2399        tasks.sort_by(|a, b| a.0.cmp(&b.0));
2400        let [mut first_task, mut second_task] = tasks;
2401
2402        // Alarms should fire in order of deadlines.
2403        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(first_task.0)).await;
2404        assert_matches!(
2405            TestExecutor::poll_until_stalled(&mut first_task.1).await,
2406            Poll::Ready(Ok(Ok(_)))
2407        );
2408        assert_matches!(TestExecutor::poll_until_stalled(&mut second_task.1).await, Poll::Pending);
2409
2410        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(second_task.0)).await;
2411        assert_matches!(
2412            TestExecutor::poll_until_stalled(&mut second_task.1).await,
2413            Poll::Ready(Ok(Ok(_)))
2414        );
2415    }
2416
2417    #[test_case(100, 200 ; "push out")]
2418    #[test_case(200, 100 ; "pull in")]
2419    #[fuchsia::test(allow_stalls = false)]
2420    async fn test_two_alarms_different_notify(
2421        // One timer scheduled at this instant (fake time starts from zero).
2422        first_deadline_nanos: i64,
2423        // Another timer scheduled at this instant.
2424        second_deadline_nanos: i64,
2425    ) {
2426        const ALARM_ID_1: &str = "Hello1";
2427        const ALARM_ID_2: &str = "Hello2";
2428
2429        let ctx = TestContext::new().await;
2430
2431        let schedule = async |deadline_nanos: i64, alarm_id: &str| {
2432            let (notifier_client, notifier_stream) =
2433                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2434            assert_matches!(
2435                ctx.wake_proxy
2436                    .set(
2437                        notifier_client,
2438                        fidl::BootInstant::from_nanos(deadline_nanos),
2439                        fta::SetMode::KeepAlive(fake_wake_lease()),
2440                        alarm_id,
2441                    )
2442                    .await,
2443                Ok(Ok(()))
2444            );
2445            notifier_stream
2446        };
2447
2448        // Sort alarms by their deadlines.
2449        let mut notifier_all = futures::stream::select_all([
2450            schedule(first_deadline_nanos, ALARM_ID_1).await,
2451            schedule(second_deadline_nanos, ALARM_ID_2).await,
2452        ]);
2453        let [(early_ns, early_alarm), (later_ns, later_alarm)] = {
2454            let mut tasks =
2455                [(first_deadline_nanos, ALARM_ID_1), (second_deadline_nanos, ALARM_ID_2)];
2456            tasks.sort_by(|a, b| a.0.cmp(&b.0));
2457            tasks
2458        };
2459
2460        // Alarms should fire in order of deadlines.
2461        let mut next_task = notifier_all.next();
2462        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2463
2464        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(early_ns)).await;
2465        assert_matches!(
2466            TestExecutor::poll_until_stalled(next_task).await,
2467            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == early_alarm
2468        );
2469
2470        let mut next_task = notifier_all.next();
2471        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task).await, Poll::Pending);
2472
2473        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(later_ns)).await;
2474        assert_matches!(
2475            TestExecutor::poll_until_stalled(next_task).await,
2476            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == later_alarm
2477        );
2478        assert_matches!(
2479            TestExecutor::poll_until_stalled(notifier_all.next()).await,
2480            Poll::Ready(None)
2481        );
2482    }
2483
2484    #[fuchsia::test(allow_stalls = false)]
2485    async fn test_alarm_immediate() {
2486        let ctx = TestContext::new().await;
2487        let mut set_task = ctx.wake_proxy.set_and_wait(
2488            fidl::BootInstant::INFINITE_PAST,
2489            fta::SetMode::KeepAlive(fake_wake_lease()),
2490            "Hello1".into(),
2491        );
2492        assert_matches!(
2493            TestExecutor::poll_until_stalled(&mut set_task).await,
2494            Poll::Ready(Ok(Ok(_)))
2495        );
2496    }
2497
2498    #[fuchsia::test(allow_stalls = false)]
2499    async fn test_alarm_immediate_notify() {
2500        const ALARM_ID: &str = "Hello";
2501        let ctx = TestContext::new().await;
2502
2503        let (notifier_client, mut notifier_stream) =
2504            fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2505
2506        let mut set_task = ctx.wake_proxy.set(
2507            notifier_client,
2508            fidl::BootInstant::INFINITE_PAST,
2509            fta::SetMode::KeepAlive(fake_wake_lease()),
2510            ALARM_ID,
2511        );
2512        assert_matches!(
2513            TestExecutor::poll_until_stalled(&mut set_task).await,
2514            Poll::Ready(Ok(Ok(_)))
2515        );
2516        assert_matches!(
2517            TestExecutor::poll_until_stalled(notifier_stream.next()).await,
2518            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2519        );
2520    }
2521
2522    // Rescheduling a timer will cancel the earlier call and use the new
2523    // deadline for the later call.
2524    #[test_case(200, 100 ; "pull in")]
2525    #[test_case(100, 200 ; "push out")]
2526    #[test_case(100, 100 ; "replace with the same deadline")]
2527    #[fuchsia::test(allow_stalls = false)]
2528    async fn test_reschedule(initial_deadline_nanos: i64, override_deadline_nanos: i64) {
2529        const ALARM_ID: &str = "Hello";
2530
2531        let ctx = TestContext::new().await;
2532
2533        let schedule = |deadline_nanos: i64| {
2534            let setup_done = zx::Event::create();
2535            let task = ctx.wake_proxy.set_and_wait(
2536                fidl::BootInstant::from_nanos(deadline_nanos),
2537                fta::SetMode::NotifySetupDone(
2538                    setup_done.duplicate_handle(zx::Rights::SAME_RIGHTS).unwrap(),
2539                ),
2540                ALARM_ID.into(),
2541            );
2542            (task, setup_done)
2543        };
2544
2545        // Schedule timer with a long timeout first. Let it wait, then
2546        // try to reschedule the same timer
2547        let (mut set_task_1, setup_done_1) = schedule(initial_deadline_nanos);
2548        fasync::OnSignals::new(setup_done_1, zx::Signals::EVENT_SIGNALED).await.unwrap();
2549        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_1).await, Poll::Pending);
2550
2551        // Schedule the same timer as above, but with a shorter deadline. This
2552        // should cancel the earlier call.
2553        let (mut set_task_2, setup_done_2) = schedule(override_deadline_nanos);
2554        fasync::OnSignals::new(setup_done_2, zx::Signals::EVENT_SIGNALED).await.unwrap();
2555        assert_matches!(
2556            TestExecutor::poll_until_stalled(&mut set_task_1).await,
2557            Poll::Ready(Ok(Err(fta::WakeAlarmsError::Dropped)))
2558        );
2559        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2560
2561        // The later call will be fired exactly on the new shorter deadline.
2562        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos - 1))
2563            .await;
2564        assert_matches!(TestExecutor::poll_until_stalled(&mut set_task_2).await, Poll::Pending);
2565        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(override_deadline_nanos))
2566            .await;
2567        assert_matches!(
2568            TestExecutor::poll_until_stalled(&mut set_task_2).await,
2569            Poll::Ready(Ok(Ok(_)))
2570        );
2571
2572        // The values in the inspector tree are fixed because the test
2573        // runs fully deterministically in fake time.
2574        assert_data_tree!(finspect::component::inspector(), root: {
2575            test: {
2576                hardware: {
2577                    // All alarms fired, so this should be "none".
2578                    current_deadline: "(none)",
2579                    remaining_until_alarm: "(none)",
2580                },
2581                now_formatted: format!("{override_deadline_nanos}ns ({override_deadline_nanos})"),
2582                now_ns: override_deadline_nanos,
2583                pending_timers: "Boot:\n\t\n\tUTC:\n\t",
2584                pending_timers_count: 0u64,
2585                requested_deadlines_ns: AnyProperty,
2586                schedule_delay_ns: AnyProperty,
2587                slack_ns: AnyProperty,
2588                boot_deadlines_count: AnyProperty,
2589                utc_deadlines_count: AnyProperty,
2590                debug_node: contains {},
2591            },
2592        });
2593    }
2594
2595    // Rescheduling a timer will send an error on the old notifier and use the
2596    // new notifier for the new deadline.
2597    #[fuchsia::test(allow_stalls = false)]
2598    async fn test_reschedule_notify() {
2599        const ALARM_ID: &str = "Hello";
2600        const INITIAL_DEADLINE_NANOS: i64 = 100;
2601        const OVERRIDE_DEADLINE_NANOS: i64 = 200;
2602
2603        let ctx = TestContext::new().await;
2604
2605        let schedule = async |deadline_nanos: i64| {
2606            let (notifier_client, notifier_stream) =
2607                fidl::endpoints::create_request_stream::<fta::NotifierMarker>();
2608            assert_matches!(
2609                ctx.wake_proxy
2610                    .set(
2611                        notifier_client,
2612                        fidl::BootInstant::from_nanos(deadline_nanos),
2613                        fta::SetMode::KeepAlive(fake_wake_lease()),
2614                        ALARM_ID.into(),
2615                    )
2616                    .await,
2617                Ok(Ok(()))
2618            );
2619            notifier_stream
2620        };
2621
2622        let mut notifier_1 = schedule(INITIAL_DEADLINE_NANOS).await;
2623        let mut next_task_1 = notifier_1.next();
2624        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_1).await, Poll::Pending);
2625
2626        let mut notifier_2 = schedule(OVERRIDE_DEADLINE_NANOS).await;
2627        let mut next_task_2 = notifier_2.next();
2628        assert_matches!(TestExecutor::poll_until_stalled(&mut next_task_2).await, Poll::Pending);
2629
2630        // First notifier is called with an error then closed.
2631        assert_matches!(
2632            TestExecutor::poll_until_stalled(&mut next_task_1).await,
2633            Poll::Ready(Some(Ok(fta::NotifierRequest::NotifyError { alarm_id, error, .. }))) if alarm_id == ALARM_ID && error == fta::WakeAlarmsError::Dropped
2634        );
2635        assert_matches!(
2636            TestExecutor::poll_until_stalled(notifier_1.next()).await,
2637            Poll::Ready(None)
2638        );
2639
2640        // Second notifier is called upon the new deadline then closed.
2641        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(OVERRIDE_DEADLINE_NANOS))
2642            .await;
2643        assert_matches!(
2644            TestExecutor::poll_until_stalled(next_task_2).await,
2645            Poll::Ready(Some(Ok(fta::NotifierRequest::Notify { alarm_id, .. }))) if alarm_id == ALARM_ID
2646        );
2647        assert_matches!(
2648            TestExecutor::poll_until_stalled(notifier_2.next()).await,
2649            Poll::Ready(None)
2650        );
2651    }
2652
2653    // If we get two scheduling FIDL errors one after another, the wake alarm
2654    // manager must not lock up.
2655    #[fuchsia::test(allow_stalls = false)]
2656    async fn test_fidl_error_on_reschedule() {
2657        const DEADLINE_NANOS: i64 = 100;
2658
2659        let (wake_proxy, _stream) =
2660            fidl::endpoints::create_proxy_and_stream::<fta::WakeAlarmsMarker>();
2661        drop(_stream);
2662
2663        assert_matches!(
2664            wake_proxy
2665                .set_and_wait(
2666                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2667                    fta::SetMode::KeepAlive(fake_wake_lease()),
2668                    "hello1".into(),
2669                )
2670                .await,
2671            Err(fidl::Error::ClientChannelClosed { .. })
2672        );
2673
2674        assert_matches!(
2675            wake_proxy
2676                .set_and_wait(
2677                    zx::BootInstant::from_nanos(DEADLINE_NANOS).into(),
2678                    fta::SetMode::KeepAlive(fake_wake_lease()),
2679                    "hello2".into(),
2680                )
2681                .await,
2682            Err(fidl::Error::ClientChannelClosed { .. })
2683        );
2684    }
2685
2686    // Verify that if a UTC timer is scheduled in the future on the UTC timeline, then the
2687    // UTC clock is changed to move "now" beyond the timer's deadline, the timer fires.
2688    #[fuchsia::test(allow_stalls = false)]
2689    async fn test_set_and_wait_utc() {
2690        const ALARM_ID: &str = "Hello_set_and_wait_utc";
2691        let ctx = TestContext::new().await;
2692
2693        let now_boot = fasync::BootInstant::now();
2694        ctx.utc_clock
2695            .update(
2696                zx::ClockUpdate::builder()
2697                    .absolute_value(now_boot.into(), ctx.utc_backstop)
2698                    .build(),
2699            )
2700            .unwrap();
2701
2702        let timestamp_utc = ctx.utc_backstop + fxr::UtcDuration::from_nanos(2);
2703        let mut wake_fut = ctx.wake_proxy.set_and_wait_utc(
2704            &fta::InstantUtc { timestamp_utc: timestamp_utc.into_nanos() },
2705            fta::SetMode::KeepAlive(fake_wake_lease()),
2706            ALARM_ID,
2707        );
2708
2709        // Timer is not expired yet.
2710        assert_matches!(TestExecutor::poll_until_stalled(&mut wake_fut).await, Poll::Pending);
2711
2712        // Move the UTC timeline.
2713        ctx.utc_clock
2714            .update(
2715                zx::ClockUpdate::builder()
2716                    .absolute_value(
2717                        now_boot.into(),
2718                        ctx.utc_backstop + fxr::UtcDuration::from_nanos(100),
2719                    )
2720                    .build(),
2721            )
2722            .unwrap();
2723
2724        // See similar code in the test above.
2725        TestExecutor::advance_to(fasync::MonotonicInstant::from_nanos(1)).await;
2726        assert_matches!(TestExecutor::poll_until_stalled(wake_fut).await, Poll::Ready(_));
2727    }
2728}